text
stringlengths
3
1.05M
# Generated by Django 3.1.7 on 2021-03-30 23:42 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('ghostpost', '0002_auto_20210330_2341'), ] operations = [ migrations.RemoveField( model_name='rbmodel', name='privatesecret_key', ), ]
import logging from collections import Counter from time import time from brownie import chain from joblib import Parallel, delayed import yearn.iearn import yearn.ironbank import yearn.special import yearn.v1.registry import yearn.v2.registry from yearn.exceptions import UnsupportedNetwork from yearn.networks import Network from yearn.outputs.victoria import output_base, output_wallets from yearn.prices import constants from yearn.utils import contract_creation_block logger = logging.getLogger(__name__) class Yearn: """ Can describe all products. """ def __init__(self, load_strategies=True, load_harvests=False, load_transfers=False, watch_events_forever=True, exclude_ib_tvl=True) -> None: start = time() if chain.id == Network.Mainnet: self.registries = { "earn": yearn.iearn.Registry(), "v1": yearn.v1.registry.Registry(), "v2": yearn.v2.registry.Registry(watch_events_forever=watch_events_forever), "ib": yearn.ironbank.Registry(exclude_ib_tvl=exclude_ib_tvl), "special": yearn.special.Registry(), } elif chain.id == Network.Fantom: self.registries = { "v2": yearn.v2.registry.Registry(watch_events_forever=watch_events_forever), "ib": yearn.ironbank.Registry(exclude_ib_tvl=exclude_ib_tvl), } elif chain.id == Network.Arbitrum: self.registries = { "v2": yearn.v2.registry.Registry(watch_events_forever=watch_events_forever), "ib": yearn.ironbank.Registry(exclude_ib_tvl=exclude_ib_tvl), } else: raise UnsupportedNetwork('yearn is not supported on this network') self.exclude_ib_tvl = exclude_ib_tvl if load_strategies: self.registries["v2"].load_strategies() if load_harvests: self.registries["v2"].load_harvests() logger.info('loaded yearn in %.3fs', time() - start) def active_vaults_at(self, block=None): active = [ vault for registry in self.registries.values() for vault in registry.active_vaults_at(block=block) ] # [yGov] Doesn't count for this context if chain.id == Network.Mainnet and ( block is None or block > contract_creation_block(yearn.special.Ygov().vault.address) ): active.remove(yearn.special.Ygov()) return active def describe(self, block=None): desc = Parallel(4, "threading")( delayed(self.registries[key].describe)(block=block) for key in self.registries ) return dict(zip(self.registries, desc)) def describe_wallets(self, block=None): from yearn.outputs.describers.registry import RegistryWalletDescriber data = Parallel(4,'threading')(delayed(RegistryWalletDescriber().describe_wallets)(registry, block=block) for registry in self.registries.items()) data = {registry:desc for registry,desc in zip(self.registries,data)} wallet_balances = Counter() for registry, reg_desc in data.items(): for wallet, usd_bal in reg_desc['wallet balances usd'].items(): wallet_balances[wallet] += usd_bal agg_stats = { "agg_stats": { "total wallets": len(wallet_balances), "active wallets": sum(1 if balance > 50 else 0 for wallet, balance in wallet_balances.items()), "wallets > $5k": sum(1 if balance > 5000 else 0 for wallet, balance in wallet_balances.items()), "wallets > $50k": sum(1 if balance > 50000 else 0 for wallet, balance in wallet_balances.items()), "wallet balances usd": wallet_balances } } data.update(agg_stats) return data def total_value_at(self, block=None): desc = Parallel(4, "threading")( delayed(self.registries[key].total_value_at)(block=block) for key in self.registries ) return dict(zip(self.registries, desc)) def export(self, block, ts): start = time() data = self.describe(block) output_base.export(block, ts, data) products = list(data.keys()) if self.exclude_ib_tvl and block > constants.ib_snapshot_block: products.remove('ib') tvl = sum(vault['tvl'] for (product, product_values) in data.items() if product in products for vault in product_values.values() if type(vault) == dict) logger.info('exported block=%d tvl=%.0f took=%.3fs', block, tvl, time() - start) def export_wallets(self, block, ts): start = time() data = self.describe_wallets(block) output_wallets.export(ts,data) logger.info('exported block=%d took=%.3fs', block, time() - start)
import React, { Component } from 'react'; import { YMLBuilder } from 'common/builder/YMLBuilder'; export default class SchemaBuilder extends Component { static Config() { return { name: 'UIBuilder', }; } render() { return ( <YMLBuilder schema={this.props.uiSchema} script={this.props.script} /> ); } }
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import unittest import paddle.fluid as fluid from test_parallel_dygraph_dataparallel import TestMultipleGpus class TestDygraphShardingOptimizerStage2(TestMultipleGpus): # check sharding logic as well as the accuracy with single mode def test_dygraph_sharding_optimizer_stage2(self): self.run_mnist_2gpu('dygraph_sharding_optimizer_stage2.py') if __name__ == "__main__": unittest.main()
/*! ramp-pcar 29-05-2015 14:06:43 : v. 5.4.0-10 * * RAMP GIS viewer - Elk; Sample of an implementation of RAMP **/ define(["ramp/eventManager","dojo/Deferred","dojo/topic","dojo/text!./templates/filter_wms_meta_Template.json","utils/popupManager","utils/tmplHelper","utils/util"],function(a,b,c,d,e,f,g){"use strict";function h(){j=e.registerPopup($("#layerList"),"click",function(a){j.isOpen(null,"any")?(a.reject(),j.close(),i(this.target)):(i(this.target),a.resolve())},{closeHandler:function(a){a.resolve()},handleSelector:".metadata-button",openOnly:!0,activeClass:"button-pressed"})}function i(b){var e=$(b),h=e.parents("legend");if(h.hasClass("selected-row"))c.publish(a.GUI.SUBPANEL_CLOSE,{origin:"filterManager"});else{var i,k=e.data("layer-id"),l=RAMP.layerRegistry[k],m=l?l.ramp.config:null;if(c.publish(a.GUI.SUBPANEL_OPEN,{panelName:i18n.t("filterManager.metadata"),title:h.find(".layer-name span").text(),content:null,target:h.find(".layer-details"),origin:"filterManager",guid:k,doOnOpen:function(){h.addClass("selected-row")},doOnHide:function(){j.isOpen(null,"any")&&j.close(),h.removeClass("selected-row")}}),m.layerInfo)if(m.legend){var n;tmpl.cache={},tmpl.templates=JSON.parse(f.stringifyTemplate(d)),n=tmpl("wms_meta_main",{legendUrl:m.legend.imageUrl,getCapabilitiesUrl:m.url+"&request=GetCapabilities",serviceEndPointUrl:m.url}),c.publish(a.GUI.SUBPANEL_OPEN,{content:$(n),origin:"filterManager",update:!0,guid:k})}else c.publish(a.GUI.SUBPANEL_OPEN,{content:"<p>"+i18n.t("filterManager.metadataNotFound")+"</p><b>Service End Point URL</b><br><a href='"+m.url+"' tagget='_blank'>"+m.url+"</a>",origin:"filterManager",update:!0,guid:k});else{var o=function(){c.publish(a.GUI.SUBPANEL_OPEN,{content:"<p>"+i18n.t("filterManager.metadataNotFound")+"</p><h5>"+i18n.t("filterManager.serviceEndPointLabel")+"</h5><p><a href='"+m.url+"' tagget='_blank'>"+m.url+"</a></p>",origin:"filterManager",update:!0,guid:k})};i=m.metadataUrl;var p=null;m.catalogueUrl&&(p=[{key:"catalogue_url",value:m.catalogueUrl}]),i?g.transformXML(i,"assets/metadata/xstyle_default_"+RAMP.locale+".xsl",function(b,d){b?o():c.publish(a.GUI.SUBPANEL_OPEN,{content:$(d).append("<h5>"+i18n.t("filterManager.serviceEndPointLabel")+"</h5><p><a href='"+m.url+"' tagget='_blank'>"+m.url+"</a></p>"),origin:"filterManager",update:!0,guid:k})},null,p):o()}}}var j;return{init:function(){h()}}});
// Require Dependencies const express = require('express'); const bodyParser = require('body-parser'); // Init App const app = express(); // BodyParser Middleware // parse application/x-www-form-urlencoded app.use(bodyParser.urlencoded({ extended: true })); // parse application/json app.use(bodyParser.json()); // Require routes const scraper = require('./routes/scraper'); // Use routes app.use('/', scraper); // Listen to port 3000 for connection const port = 5000 app.listen(port , function() { console.log('Server running on port ${port}'); });
from typing import Dict, List from decimal import Decimal from rest_framework.request import Request from polaris import settings from polaris.models import Asset def calculate_fee( fee_params: Dict, *_args: List, request: Request = None, **_kwargs: Dict ) -> Decimal: """ .. _`/fee`: https://github.com/stellar/stellar-protocol/blob/master/ecosystem/sep-0024.md#fee Calculate the fee to be charged for the transaction described by `fee_params`. Note that this endpoint only supports calculating fees expressed in units of a Stellar asset. If fees cannot be calculated using the `fee_params` passed, raise a ``ValueError`` for Polaris to return a 400 Bad Request to the client. Replace this function with another by passing it to ``register_integrations()`` as described in :ref:`api:Register Integrations` if the fees charged for transactions is not calculated using the asset's ``fee_fixed`` and ``fee_percent`` attributes. If replaced, `/info` responses will no longer contain the ``fee_fixed`` and ``fee_percent`` attributes per-asset. This is because Polaris can no longer assume fees are determined using those attributes alone. `fee_params` will always contain the following key-value pairs: - `amount`: ``Decimal`` - `asset_code`: ``str`` - `operation`: ``str`` - `type`: ``str`` Each of these key-value pairs correspond to the associated parameter for the `/fee`_ endpoint. The Decimal returned will be used as the `fee` value in the response. """ amount = fee_params["amount"] asset = Asset.objects.filter(code=fee_params["asset_code"]).first() if fee_params["operation"] == settings.OPERATION_WITHDRAWAL: fee_percent = asset.withdrawal_fee_percent fee_fixed = asset.withdrawal_fee_fixed elif fee_params["operation"] == settings.OPERATION_DEPOSIT: fee_percent = asset.deposit_fee_percent fee_fixed = asset.deposit_fee_fixed else: # send fee_percent = asset.send_fee_percent fee_fixed = asset.send_fee_fixed if fee_fixed is None and fee_percent is None: raise ValueError("unable to calculate fees") if fee_fixed is None: fee_fixed = Decimal(0) elif fee_percent is None: fee_percent = Decimal(0) return round( fee_fixed + (fee_percent / Decimal("100") * Decimal(amount)), asset.significant_decimals, ) registered_fee_func = calculate_fee
from __future__ import absolute_import, print_function import tweepy from tweepy import Stream from tweepy import OAuthHandler from tweepy.streaming import StreamListener import time import json from textblob import TextBlob ckey = 'QtjbyJiJ3NLR2nYuU5SVbaEMF' csecret = 'jDiECspRBGt9SdrieY3BBOtIDXFiFox3zZjAY4i7gCzAjSPd8y' atoken = '721453542519795712-LNBQIFa7V7uhzt2YyUC92yvX7twmbCl' asecret = '1iZRfVomihmYf7PWgbyjtJ3YStpfW5BwKKY5gjRxCRawz' # temp = input('Please enter a restaurant name: ') # locpoint = input('Please enter the locations: ') class listener(StreamListener): def on_data(self, data): decoded = json.loads(data) tweet = TextBlob(decoded["text"]) datastring=('@%s: %s' % (decoded['user']['screen_name'], decoded['text'].encode('ascii', 'ignore'))) print(datastring) print("\n") print("This is the Sentiment: ", tweet.sentiment) print('\n') print('Sentiment Polarity: ', tweet.sentiment.polarity) print('\n') savefile=open('twitterdata.txt','a') savefile.write(datastring) savefile.write('\n') savefile.write(str(tweet.sentiment.polarity)) savefile.write('\n') savefile.close() return True def on_error(self, status): print(status) if __name__ == '__main__': l = listener() auth = OAuthHandler(ckey, csecret) auth.set_access_token(atoken, asecret) # GEOBOX for the locations # api = tweepy.API(auth) # search_results = api.search(q=str(temp), count=100, "43.6532,79.3832,1mi") # for eachstring in search_results: # print(eachstring.text) # savefile=open('twitterdata.txt','a') # savefile.write(eachstring.text) # savefile.write('\n') # savefile.close() # teststring = ('@%s: %s' % (search_results['user']['screen_name'], search_results['text'].encode('ascii', 'ignore'))) # print(teststring) # GEOBOX = [-79, 43, -80, 44] stream = Stream(auth, l) # stream.filter(track=[str(temp)+' Restaurant'], locations=[], languages=['en']) stream.filter(track=['Restaurant'], languages=['en']) print("Counter: ", counter)
import os import numpy as np from modules.simulation.individual.bess import BESS from modules.simulation.individual.chpp import CHPP from modules.simulation.individual.hwt import HWT from modules.simulation.individual.demand import Demand from modules.simulation.integrated.bess_chpp_hwt import BESS_CHPP_HWT #### # general hyperparameters #### # simulation time_step = 15 * 60 # seconds # training cache_initialization_process_count = 24 training_generation_process_count = 8 evaluation_generation_process_count = 0 training_process_count = 2 batch_size = 1024 * 3 training_cache_size = batch_size * 1000 evaluation_cache_size = batch_size * 100 evaluation_batch_count = evaluation_cache_size / batch_size # meta meta_search_sample_count = 16 meta_search_fully_stored_count = 1 meta_search_parameter_space = {} # see below #### # model parameters and model #### # read data with open('data/crest_heat_demand.txt', 'r') as file: demand_series = np.loadtxt(file, delimiter='\t', skiprows=1) # read file, dismiss header demand_series = demand_series.transpose(1,0) # dim 0 identifies the series demand_series *= 1000 # kW -> W allow_infeasible_actions = True hwt_volume = 0.750 hwt_min_temp = 60. hwt_max_temp = 80. relative_loss = HWT.relative_loss_from_energy_label(12, 5.93, hwt_volume, 45) actions = np.array(range(-10500,5001,100)) # tesla powerwall 2 # 13.5 kWh # 5 kW continuous # 0.9 roundtrip efficiency (=> ~ 0.95*0.95) bess = BESS(time_step, actions[(actions<=5000) * (actions>=-5000)], 13500 * 60 * 60, 0.95, 0.95, 0, correct_infeasible=allow_infeasible_actions) # senertec dachs state_matrix = [ [(0,0) , (-5500/2,-12500/2)], [(-5500/2,-12500/2) , (-5500,-12500)] ] chpp = CHPP(time_step, actions[(actions<=0) * (actions>=-5500)], state_matrix, correct_infeasible=allow_infeasible_actions) hwt = HWT(time_step, hwt_min_temp, hwt_max_temp, hwt_volume, 1, 1, relative_loss) demand = Demand(time_step, demand_series) model= BESS_CHPP_HWT(time_step, actions, bess, chpp, hwt, demand, 0.01, correct_infeasible=allow_infeasible_actions) #### # sample generation parameters and ANN loss #### import torch import torch.nn as nn # determine torch device device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") from modules.neuralnetwork.samplegenerators.statebased import BatchProcessor staying_times = [i * 900 for i in range(1,4*2+1)] input_processor = BatchProcessor() input_processor.normalize([0], [model.bess.capacity]) # soc input_processor.none(2) # soc_min, soc_max input_processor.normalize([0], [10000]) # demand input_processor.one_hot([i for i in range(len(state_matrix[0]))]) # chpp mode input_processor.one_hot(staying_times) # staying time input_processor.one_hot(staying_times) # min off input_processor.one_hot(staying_times) # min on input_processor.normalize([hwt_min_temp, hwt.ambient_temperature], [(hwt_max_temp-hwt_min_temp), 1.]) # hwt temp, ambient temp input_processor.one_hot(model.actions) # action output_processor = BatchProcessor() output_processor.normalize([0], [model.bess.capacity]) # soc output_processor.none(2) # soc_min, soc_max output_processor.discretize_index([0]) # demand output_processor.discretize_index([i for i in range(len(state_matrix[0]))]) # chpp Mode output_processor.discretize_index(staying_times) # staying time output_processor.discretize_index(staying_times) # min off output_processor.discretize_index(staying_times) # min on output_processor.normalize([hwt_min_temp], [(hwt_max_temp-hwt_min_temp)]) # hwt temp output_processor.discretize_index([0]) # ambient temp output_processor.discretize_index(-model.actions) # el. interaction [minus, since the interaction is the negative action (for a feasible action)] output_processor.discretize_index([0]) # th. interaction losses = [] losses.append((nn.MSELoss().to(device), 1)) # soc losses.append((nn.MSELoss().to(device), 2)) # soc_min, soc_max losses.append((nn.CrossEntropyLoss().to(device), 1)) # demand losses.append((nn.CrossEntropyLoss().to(device), len(state_matrix[0]))) # chpp mode losses.append((nn.CrossEntropyLoss().to(device), len(staying_times))) # staying time losses.append((nn.CrossEntropyLoss().to(device), len(staying_times))) # min off losses.append((nn.CrossEntropyLoss().to(device), len(staying_times))) # min on losses.append((nn.MSELoss().to(device), 1)) # hwt_temp losses.append((nn.CrossEntropyLoss().to(device), 1)) # ambient temp losses.append((nn.CrossEntropyLoss().to(device), len(model.actions))) # el interaction losses.append((nn.CrossEntropyLoss().to(device), 1)) # th interaction loss_weights = [1e6,10,10,1e3,1e3,10,10,1e5,1,1,1] ann_output_processor = BatchProcessor() ann_output_processor.denormalize([0], [model.bess.capacity]).clip(0, model.bess.capacity) # soc ann_output_processor.none(2) # soc_min, soc_max ann_output_processor.mode_of_distribution([0]) #demand ann_output_processor.mode_of_distribution([i for i in range(len(state_matrix[0]))]) # chpp Mode ann_output_processor.mode_of_distribution(staying_times) # staying time ann_output_processor.mode_of_distribution(staying_times) # min off ann_output_processor.mode_of_distribution(staying_times) # min on ann_output_processor.denormalize([hwt_min_temp], [(hwt_max_temp-hwt_min_temp)]) # hwt temp ann_output_processor.mode_of_distribution([0]) # ambient temp ann_output_processor.mode_of_distribution(-model.actions) # el. interaction ann_output_processor.mode_of_distribution([0]) # th. interaction ann_output_processor.split_result([10]) sampling_parameters = { 'soc_distribution': ([(0,0.25), (0.25,0.75), (0.75,1)], [3./8, 2./8, 3./8]), 'min_off_times': staying_times, 'min_on_times' : staying_times, 'dwell_times': staying_times, 'temp_distribution': ([(20,60), (60,80), (80,90)], [3/20, 14/20, 3/20]), 'infeasible_chance': 1./2, } #### # sample generator #### from modules.neuralnetwork.samplegenerators.statebased import SampleGenerator sample_generator = SampleGenerator(model, input_processor, output_processor, sampling_parameters, False, True, True, ann_output_processor) sample_input, sample_output = next(sample_generator) sample_input_size = sample_input.size(1) sample_output_size = sample_output.size(1) #### # ANN #### from modules.neuralnetwork.loss import MixedLoss, SigmoidGatedL1RegularizationLoss, L1RegularizationLoss from modules.neuralnetwork.training import EarlyStoppingCallback # this is not necessarily the same as sample_output_size, since dimension of target (sample output) and estimation (ANN output) can differ output_width = sum([loss[1] for loss in losses]) max_layers = 14 meta_search_parameter_space = { 'batch_size': [batch_size], 'loss': [MixedLoss(losses, loss_weights, device=device)], 'regularization': [ L1RegularizationLoss(2e-6, device), L1RegularizationLoss(2e-7, device), L1RegularizationLoss(2e-8, device), ], 'learning_rate': [0.005, 0.001, 0.0005, 0.0001], 'epoch_count': [1000], 'batch_count': [training_cache_size / batch_size], 'max_grad_norm': [1e6], 'input_width': [sample_input_size], 'output_width': [output_width], 'output_activation': [None], 'hidden_layer_count': np.arange(min(max_layers-1,4),max_layers), # 'min(.,.)' is a failsafe 'width': [2**i for i in range(7,11)], 'width_interpolation_steps_input': [0], 'width_interpolation_steps_output': [0,3,4,5], 'betas': ([0.5, 1, 2], max_layers), 'batch_norms': ([0]*14+[1], max_layers), 'dropout': ([0], max_layers), #([0]*28+[0.2]+[0.5], max_layers), 'skips': ([0]*19+[1], (max_layers, max_layers)), 'early_stopping_callback': [EarlyStoppingCallback({ }, 100)], #'lr_scheduler': [(torch.optim.lr_scheduler.StepLR,{'step_size':25,'gamma':0.75}), (torch.optim.lr_scheduler.StepLR,{'step_size':1,'gamma':0.99})] 'lr_scheduler': [(torch.optim.lr_scheduler.StepLR,{'step_size':25,'gamma':0.5}), (torch.optim.lr_scheduler.StepLR,{'step_size':1,'gamma':0.95}), (torch.optim.lr_scheduler.StepLR,{'step_size':1,'gamma':0.99})] } #### output_location = '{}/output/{{}}_dfh_state'.format(os.path.dirname(__file__))
module.exports = function (grunt) { require('time-grunt')(grunt); var aged = require('aged'); var pkg = grunt.file.readJSON('package.json'); // used for templates var pkgData = { data: { pkg: pkg } }; var plugins = require('matchdep').filterDev('grunt-*'); plugins.forEach(grunt.loadNpmTasks); var userConfig = require('./build.config.js'); var versionTemplate = '<%= pkg.name %> - v<%= pkg.version %> - <%= grunt.template.today("yyyy-mm-dd") %>'; var version = grunt.template.process(versionTemplate, pkgData); var copyrightTemplate = 'Copyright (c) <%= grunt.template.today("yyyy") %> <%= pkg.author %>'; var copyright = grunt.template.process(copyrightTemplate, pkgData); var taskConfig = { pkg: pkg, clean: ['<%= destination_dir %>/bower_components', 'tmp'], meta: { banner: '/**\n' + ' * ' + version + '\n' + ' * ' + copyright + '\n' + ' */\n' }, usebanner: { compile: { options: { banner: '<%= meta.banner %>', position: 'top', linebreak: true }, files: { src: [ '<%= destination_dir %>/<%= pkg.name %>*.js', '<%= destination_dir %>/<%= pkg.name %>*.css' ] } } }, jshint: { all: userConfig.app_files.js, options: { jshintrc: '.jshintrc', reporter: require('jshint-stylish') }, }, complexity: { fresh: { src: '<%= app_files.js %>', options: { errorsOnly: false, cyclomatic: 10, halstead: 20, maintainability: 100 } }, aged: { src: '<%= app_files.js %>', filter: aged(2, 'days'), options: { errorsOnly: false, cyclomatic: 5, halstead: 12, maintainability: 100 } } }, readme: { options: { templates: './docs', readme: './docs/README.tmpl.md', docs: '.' } }, sync: { all: { options: { // sync only these options sync: ['author', 'description', 'name', 'version'], from: 'package.json', to: 'src/manifest.json' } } }, jsonlint: { all: { src: [ 'package.json', 'src/manifest.json' ] } }, 'nice-package': { all: { options: { license: function (value) { return value === 'MIT'; } } } }, 'gh-pages': { options: { base: '<%= destination_dir %>' }, src: [ 'index.html', 'README.md', 'favicon.png', 'bower_components/angular/angular.js', 'bower_components/bootstrap/dist/css/bootstrap.min.css', 'bower_components/bootstrap/dist/js/bootstrap.min.js', 'bower_components/bootstrap/dist/fonts/*', 'bower_components/jquery/jquery.min.js', 'bower_components/jquery/jquery.min.map', 'color-pusher.min.js', 'color-pusher.js', 'color-pusher.min.css', 'color-pusher.css', 'jquery.minicolors.png' ] }, /* convert AngularJs html templates to cached JavaScript */ html2js: { main: { options: { base: 'src', module: 'color-pusher-widget.templates' }, src: [ 'src/*.tpl.html' ], dest: 'tmp/<%= pkg.name %>.templates.js' } }, concat: { js: { options: {}, src: [ '<%= vendor_files.js %>', 'tmp/*.js', '<%= app_files.js %>' ], dest: '<%= destination_dir %>/<%= pkg.name %>.js' }, css: { options: {}, src: [ '<%= vendor_files.css %>', '<%= app_files.css %>' ], dest: '<%= destination_dir %>/<%= pkg.name %>.css' } }, uglify: { options: { report: 'min' }, js: { files: { '<%= destination_dir %>/<%= pkg.name %>.min.js': '<%= destination_dir %>/<%= pkg.name %>.js' } } }, cssmin: { options: { report: 'min' }, css: { files: { '<%= destination_dir %>/<%= pkg.name %>.min.css': '<%= destination_dir %>/<%= pkg.name %>.css' } } }, // make sure index.html example works inside destination folder copy: { all: { files: [ { expand: true, src: [ 'bower_components/bootstrap/dist/css/bootstrap.min.css', 'bower_components/bootstrap/dist/js/bootstrap.min.js', 'bower_components/bootstrap/dist/fonts/*', 'bower_components/jquery/jquery.min.js', 'bower_components/jquery/jquery.min.map', 'bower_components/angular/angular.js', 'index.html', 'favicon.png', 'README.md' ], dest: '<%= destination_dir %>' }, { src: 'bower_components/jquery-minicolors/jquery.minicolors.png', dest: '<%= destination_dir %>/jquery.minicolors.png' } ] } } }; grunt.initConfig(grunt.util._.extend(taskConfig, userConfig)); grunt.registerTask('build', ['clean', 'html2js', 'concat', 'copy', 'uglify', 'cssmin', 'usebanner']); grunt.registerTask('default', ['sync', 'jsonlint', 'nice-package', 'jshint', 'complexity']); };
const express = require('express'); const genres = require('../routes/genres'); const customers = require('../routes/customers'); const movies = require('../routes/movies'); const rentals = require('../routes/rentals'); const users = require('../routes/users'); const auth = require('../routes/auth'); const returns = require('../routes/returns'); const error = require('../middleware/error'); module.exports = function (app) { app.use(express.json()); app.use('/api/genres', genres); app.use('/api/customers', customers); app.use('/api/movies', movies); app.use('/api/rentals', rentals); app.use('/api/users', users); app.use('/api/auth', auth); app.use('/api/returns', returns); //error middleware app.use(error); }
import * as React from "react"; import { ethers } from "ethers"; import './App.css'; export default function App() { const wave = () => { } return ( <div className="mainContainer"> <div className="dataContainer"> <div className="header"> 👋 Hey there! </div> <div className="bio"> I am SHA888 and I worked on self-driving cars so that's pretty cool right? Connect your Ethereum wallet and wave at me! </div> <button className="waveButton" onClick={wave}> Wave at Me </button> </div> </div> ); }
import express from "express" import { index } from "api/apps/search/routes" import { setUser, authenticated, adminOnly } from "api/apps/users/routes.coffee" const app = (module.exports = express()) app.get("/search", setUser, authenticated, adminOnly, index)
angular.module('kibibitCodeEditor') .directive('kbSearchProject', function() { return { scope: {}, bindToController: { searchTerm: '=ngModel' }, controller: 'searchProjectController', controllerAs: 'searchProjectCtrl', templateUrl: 'app/components/searchProject/searchProjectTemplate.html', link: function(scope, element, attrs, searchProjectCtrl) { var searchBtn = angular.element('.search-btn'); var input = angular.element('.search-input'); /* OPEN SEARCH */ searchBtn.click(openSearch); /* BLUR ON ESC */ input.keyup(function(e) { if (e.keyCode == 27) { input.blur(); } }); /* CHANGE CLASSES ON FOCUS\BLUR */ input.focus(function() { input.addClass('open'); searchBtn.addClass('open'); input.removeClass('blurred'); }); input.blur(function() { if (input.val() === '') { input.removeClass('open'); searchBtn.removeClass('open'); } input.addClass('blurred'); }); function openSearch() { if (!input.hasClass('open')) { input.focus(); } } } }; }) .controller('searchProjectController', [ function() { var vm = this; } ]);
/* * Copyright (C) 2002-2005 Roman Zippel <zippel@linux-m68k.org> * Copyright (C) 2002-2005 Sam Ravnborg <sam@ravnborg.org> * * Released under the terms of the GNU GPL v2.0. */ #include <stdarg.h> #include <stdlib.h> #include <string.h> #include "lkc.h" /* file already present in list? If not add it */ struct file *file_lookup(const char *name) { struct file *file; const char *file_name = sym_expand_string_value(name); for (file = file_list; file; file = file->next) { if (!strcmp(name, file->name)) { free((void *)file_name); return file; } } file = xmalloc(sizeof(*file)); memset(file, 0, sizeof(*file)); file->name = file_name; file->next = file_list; file_list = file; return file; } /* write a dependency file as used by kbuild to track dependencies */ int file_write_dep(const char *name) { struct symbol *sym, *env_sym; struct expr *e; struct file *file; FILE *out; if (!name) name = ".kconfig.d"; out = fopen("..config.tmp", "w"); if (!out) return 1; fprintf(out, "deps_config := \\\n"); for (file = file_list; file; file = file->next) { if (file->next) fprintf(out, "\t%s \\\n", file->name); else fprintf(out, "\t%s\n", file->name); } fprintf(out, "\n%s: \\\n" "\t$(deps_config)\n\n", conf_get_autoconfig_name()); expr_list_for_each_sym(sym_env_list, e, sym) { struct property *prop; const char *value; prop = sym_get_env_prop(sym); env_sym = prop_get_symbol(prop); if (!env_sym) continue; value = getenv(env_sym->name); if (!value) value = ""; fprintf(out, "ifneq \"$(%s)\" \"%s\"\n", env_sym->name, value); fprintf(out, "%s: FORCE\n", conf_get_autoconfig_name()); fprintf(out, "endif\n"); } fprintf(out, "\n$(deps_config): ;\n"); fclose(out); remove(name); rename("..config.tmp", name); return 0; } /* Allocate initial growable string */ struct gstr str_new(void) { struct gstr gs; gs.s = xmalloc(sizeof(char) * 64); gs.len = 64; gs.max_width = 0; strcpy(gs.s, "\0"); return gs; } /* Allocate and assign growable string */ struct gstr str_assign(const char *s) { struct gstr gs; gs.s = strdup(s); gs.len = strlen(s) + 1; gs.max_width = 0; return gs; } /* Free storage for growable string */ void str_free(struct gstr *gs) { if (gs->s) free(gs->s); gs->s = NULL; gs->len = 0; } /* Append to growable string */ void str_append(struct gstr *gs, const char *s) { size_t l; if (s) { l = strlen(gs->s) + strlen(s) + 1; if (l > gs->len) { gs->s = realloc(gs->s, l); gs->len = l; } strcat(gs->s, s); } } /* Append printf formatted string to growable string */ void str_printf(struct gstr *gs, const char *fmt, ...) { va_list ap; char s[10000]; /* big enough... */ va_start(ap, fmt); vsnprintf(s, sizeof(s), fmt, ap); str_append(gs, s); va_end(ap); } /* Retrieve value of growable string */ const char *str_get(struct gstr *gs) { return gs->s; } void *xmalloc(size_t size) { void *p = malloc(size); if (p) return p; fprintf(stderr, "Out of memory.\n"); exit(1); } void *xcalloc(size_t nmemb, size_t size) { void *p = calloc(nmemb, size); if (p) return p; fprintf(stderr, "Out of memory.\n"); exit(1); }
import{e as a,l as s,i as t,a as e,g as n,c as i}from"./index.4205cc56.js";import{d as l,f as c,r,w as d,t as o,u,o as p,c as m,b as v,B as y,h as f,q as x,C as g,x as k,p as _,a as b}from"./vendor.03637da4.js";const X=x();var $=l({expose:[],props:["startX","startY","size"],setup(s){const{proxy:t}=g();l({FullScreen:a});let e=c({}),n=c({}),i=r(.3),x=r(!1);function k(){i.value=0,e[`endAnimate${t.size}`]=!0,setTimeout((()=>{x.value=!1,e[`startAnimate${t.size}`]=!1,e[`endAnimate${t.size}`]=!1}),500)}return d((()=>t.startX),(()=>{n.left=t.startX+"px",n.top=t.startY+"px",i.value=.3,e[`startAnimate${t.size}`]=!0,x.value=!0})),(s,t)=>{const l=o("el-card");return u(x)?(p(),m(a,{key:0,opacity:u(i),onClickOutSide:k},{default:X((()=>[v(l,{class:["pop-card",u(e)],style:u(n)},{default:X((()=>[y(s.$slots,"default",{},void 0,!0)])),_:3},8,["style","class"])])),_:1},8,["opacity"])):f("",!0)}}});$.__scopeId="data-v-75769bc1";const z=x();_("data-v-cbc82bce");const I={class:"info"},Y={class:"photo"},h={class:"name"},A={class:"introduce"},w={class:"item day"},B=v("i",{class:"el-icon-sunny",style:{"margin-right":"3px"}},null,-1),M={class:"item email"},S={class:"email-box"},T={class:"item text"},j=v("span",null,"个人介绍",-1);b();var C=l({expose:[],props:["startX","startY","userId","nickname"],setup(r){l({FullScreen:a});const{proxy:o}=g();let y=c({});return d((()=>o.userId),(async a=>{const s=await n(`/manager/infoById?id=${a}`);200==s.code&&i(s.data,y)})),d((()=>o.nickname),(async a=>{const s=await n(`/manager/infoByNickname?nickname=${a}`);200==s.code&&i(s.data,y)})),(a,n)=>(p(),m($,{startX:r.startX,startY:r.startY,size:"Middle"},{default:z((()=>[v("div",{class:["expand",u(e)()]},[v("div",I,[v("div",Y,[v("img",{src:u(s)(u(y).headImg),alt:""},null,8,["src"])]),v("p",h,k(u(y).nickname),1)]),v("div",A,[v("div",w,[v("div",null,[B,v("span",null,k(u(y).createDuration),1)])]),v("div",M,[v("div",S,[v("img",{style:{"margin-right":"6px"},src:u(t)()?u("/assets/email-dark.1fded8d0.svg"):u("/assets/email.c7fc9d5e.svg"),alt:""},null,8,["src"]),v("span",null,k(u(y).email),1)])]),v("div",T,[v("div",null,[j,v("div",{innerHTML:u(y).introduce},null,8,["innerHTML"])])])])],2)])),_:1},8,["startX","startY"]))}});C.__scopeId="data-v-cbc82bce";export{C as _};
""" Question Source:Leetcode Level: Medium Topic: Stack Solver: Tayyrov Date: 18.03.2022 """ def removeDuplicateLetters(s: str) -> str: last_indices = {char: idx for idx, char in enumerate(s)} stack = [] added = set() for idx, char in enumerate(s): while stack and stack[-1] > char and last_indices[stack[-1]] > idx and char not in added: added.remove(stack.pop()) if char not in added: stack.append(char) added.add(char) return "".join(stack)
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class PyTreehash(PythonPackage): """Calculates a SHA256 (or, potentially, any other hashlib supported function) "tree" hash, as used by e.g. Amazon Glacier.""" homepage = "https://github.com/jdswinbank/treehash" url = "https://pypi.io/packages/source/t/treehash/TreeHash-1.0.2.tar.gz" version('1.0.2', sha256='fefcadd6a1e8ba2808897d776d5ae8bdae56ec3fe90ed385c1322357269f27a4')
from pathlib import Path from io import StringIO from jinja2 import Template from ploomber.tasks.abc import Task from ploomber.tasks.mixins import ClientMixin from ploomber.sources import (SQLScriptSource, SQLQuerySource, FileSource) from ploomber.products import (File, PostgresRelation, SQLiteRelation, GenericSQLRelation, GenericProduct, SQLRelation) from ploomber import io from ploomber.util import requires from ploomber.placeholders.placeholder import _add_globals from ploomber.exceptions import SQLTaskBuildError class SQLScript(ClientMixin, Task): """Execute a script in a SQL database to create a relation or view Parameters ---------- source: str or pathlib.Path SQL script source, if str, the content is interpreted as the actual script, if pathlib.Path, the content of the file is loaded product: ploomber.products.product Product generated upon successful execution dag: ploomber.DAG A DAG to add this task to name: str A str to indentify this task. Should not already exist in the dag client: ploomber.clients.{SQLAlchemyClient, DBAPIClient}, optional The client used to connect to the database. Only required if no dag-level client has been declared using dag.clients[class] params: dict, optional Parameters to pass to the script, by default, the callable will be executed with a "product" (which will contain the product object). It will also include a "upstream" parameter if the task has upstream dependencies along with any parameters declared here. The source code is converted to a jinja2.Template for passing parameters, refer to jinja2 documentation for details Examples -------- Spec API: .. code-block:: yaml :class: text-editor :name: pipeline-yaml clients: SQLScript: clients.get SQLiteRelation: clients.get tasks: - source: script.sql product: [subset, table] Python API (SQLite): >>> import sqlite3 >>> import pandas as pd >>> from ploomber import DAG >>> from ploomber.products import SQLiteRelation >>> from ploomber.tasks import SQLScript >>> from ploomber.clients import DBAPIClient >>> con_raw = sqlite3.connect(database='my_db.db') >>> df = pd.DataFrame({'a': range(100), 'b': range(100)}) >>> _ = df.to_sql('numbers', con_raw, index=False) >>> dag = DAG() >>> client = DBAPIClient(sqlite3.connect, dict(database='my_db.db'), ... split_source=';') >>> dag.clients[SQLScript] = client >>> dag.clients[SQLiteRelation] = client >>> script = ('DROP TABLE IF EXISTS {{product}};' ... 'CREATE TABLE {{product}} AS ' ... 'SELECT * FROM numbers LIMIT 3') >>> _ = SQLScript(script, SQLiteRelation(('subset', 'table')), ... dag=dag, name='create-subset') >>> _ = dag.build() >>> df = pd.read_sql('SELECT * FROM subset', con_raw) >>> con_raw.close() >>> df.head(3) a b 0 0 0 1 1 1 2 2 2 See Also -------- ploomber.clients.SQLDump : A task to execute a ``SELECT`` statement and dump the output into a file """ PRODUCT_CLASSES_ALLOWED = (PostgresRelation, SQLiteRelation, GenericSQLRelation, SQLRelation) def __init__(self, source, product, dag, name=None, client=None, params=None): params = params or {} # TODO: access self.client so it uses the dag-level if available try: split_source = client.split_source except AttributeError: split_source = None kwargs = dict(hot_reload=dag._params.hot_reload, split_source=split_source) self._source = type(self)._init_source(source, kwargs) super().__init__(product, dag, name, params) self._client = client self.dag = dag def run(self): source_code = str(self.source) try: return self.client.execute(source_code) except Exception as e: raise SQLTaskBuildError(type(self), source_code, e) from e def load(self, limit=10): """Load this task's product in a pandas.DataFrame Parameters ---------- limit : int, default=10 How many records to load, defaults to 10 """ import pandas as pd return pd.read_sql(f'SELECT * FROM {self.product} LIMIT {int(limit)}', self.client) @staticmethod def _init_source(source, kwargs): return SQLScriptSource(source, **kwargs) class SQLDump(io.FileLoaderMixin, ClientMixin, Task): """Dumps data from a SQL SELECT statement to a file(s) Parameters ---------- source: str or pathlib.Path SQL script source, if str, the content is interpreted as the actual script, if pathlib.Path, the content of the file is loaded product: ploomber.products.product Product generated upon successful execution dag: ploomber.DAG A DAG to add this task to name: str A str to indentify this task. Should not already exist in the dag client: ploomber.clients.{SQLAlchemyClient, DBAPIClient}, optional The client used to connect to the database. Only required if no dag-level client has been declared using dag.clients[class] params: dict, optional Parameters to pass to the script, by default, the callable will be executed with a "product" (which will contain the product object). It will also include a "upstream" parameter if the task has upstream dependencies along with any parameters declared here. The source code is converted to a jinja2.Template for passing parameters, refer to jinja2 documentation for details chunksize: int, optional Number of rows per file, otherwise download the entire dataset in a single one. If not None, the product becomes a directory io_handler: ploomber.io.CSVIO or ploomber.io.ParquetIO, optional io handler to use (which controls the output format), currently only csv and parquet are supported. If None, it tries to infer the handler from the product's extension if that doesn't work, it uses io.CSVIO Examples -------- Spec API: .. code-block:: yaml :class: text-editor :name: pipeline-yaml clients: # define a get function in clients.py that returns the client SQLDump: clients.get tasks: # script with a SELECT statement - source: script.sql product: data.parquet `Full spec API example. <https://github.com/ploomber/projects/tree/master/cookbook/sql-dump>`_ # noqa Python API: >>> import sqlite3 >>> import pandas as pd >>> from ploomber import DAG >>> from ploomber.products import File >>> from ploomber.tasks import SQLDump >>> from ploomber.clients import DBAPIClient >>> con_raw = sqlite3.connect(database='my_db.db') >>> df = pd.DataFrame({'a': range(100), 'b': range(100)}) >>> _ = df.to_sql('numbers', con_raw, index=False) >>> con_raw.close() >>> dag = DAG() >>> client = DBAPIClient(sqlite3.connect, dict(database='my_db.db')) >>> _ = SQLDump('SELECT * FROM numbers', File('data.parquet'), ... dag=dag, name='dump', client=client, chunksize=None) >>> _ = dag.build() >>> df = pd.read_parquet('data.parquet') >>> df.head(3) a b 0 0 0 1 1 1 2 2 2 Notes ----- The chunksize parameter is also set in cursor.arraysize object, this parameter can greatly speed up the dump for some databases when the driver uses cursors.arraysize as the number of rows to fetch on a single network trip, but this is driver-dependent, not all drivers implement this (cx_Oracle does it) See Also -------- ploomber.clients.SQLScript : A task to execute a SQL script and create a table/view as product """ PRODUCT_CLASSES_ALLOWED = (File, GenericProduct) def __init__(self, source, product, dag, name=None, client=None, params=None, chunksize=10000, io_handler=None): params = params or {} kwargs = dict(hot_reload=dag._params.hot_reload) self._source = type(self)._init_source(source, kwargs) super().__init__(product, dag, name, params) self._client = client self.chunksize = chunksize if io_handler is None: if self.product._identifier._raw.endswith('.parquet'): self.io_handler = io.ParquetIO else: self.io_handler = io.CSVIO else: self.io_handler = io_handler @staticmethod def _init_source(source, kwargs): return SQLQuerySource(source, **kwargs) def run(self): # render runtime parameters template = Template(str(self.source), variable_start_string='[[', variable_end_string=']]') _add_globals(template.environment) source_code = template.render(upstream=self.params.get('upstream')) path = Path(str(self.params['product'])) handler = self.io_handler(path, chunked=bool(self.chunksize)) self._logger.debug('Code: %s', source_code) cursor = self.client.connection.cursor() try: cursor.execute(source_code) except Exception as e: raise SQLTaskBuildError(type(self), source_code, e) from e if self.chunksize: i = 1 headers = None cursor.arraysize = self.chunksize while True: self._logger.info('Fetching chunk {}...'.format(i)) data = cursor.fetchmany() self._logger.info('Fetched chunk {}'.format(i)) if i == 1: headers = [c[0] for c in cursor.description] if not data: break handler.write(data, headers) i = i + 1 else: data = cursor.fetchall() headers = [c[0] for c in cursor.description] handler.write(data, headers) cursor.close() # FIXME: this can be a lot faster for clients that transfer chunksize # rows over the network class SQLTransfer(ClientMixin, Task): """ Transfers data from a SQL database to another (Note: this relies on pandas, only use it for small to medium size datasets) Parameters ---------- source: str or pathlib.Path SQL script source, if str, the content is interpreted as the actual script, if pathlib.Path, the content of the file is loaded product: ploomber.products.product Product generated upon successful execution. For SQLTransfer, usually product.client != task.client. task.client represents the data source while product.client represents the data destination dag: ploomber.DAG A DAG to add this task to name: str A str to indentify this task. Should not already exist in the dag client: ploomber.clients.SQLAlchemyClient, optional The client used to connect to the database. Only required if no dag-level client has been declared using dag.clients[class] params: dict, optional Parameters to pass to the script, by default, the callable will be executed with a "product" (which will contain the product object). It will also include a "upstream" parameter if the task has upstream dependencies along with any parameters declared here. The source code is converted to a jinja2.Template for passing parameters, refer to jinja2 documentation for details chunksize: int, optional Number of rows to transfer on every chunk Notes ---- This task is *not* intended to move large datasets, but a convenience way of transfering small to medium size datasets. It relies on pandas to read and write, which introduces a considerable overhead. """ PRODUCT_CLASSES_ALLOWED = (PostgresRelation, SQLiteRelation, GenericSQLRelation) @requires(['pandas'], 'SQLTransfer') def __init__(self, source, product, dag, name=None, client=None, params=None, chunksize=10000): params = params or {} kwargs = dict(hot_reload=dag._params.hot_reload) self._source = type(self)._init_source(source, kwargs) super().__init__(product, dag, name, params) self._client = client self.chunksize = chunksize @staticmethod def _init_source(source, kwargs): # TODO: this shoule be a FileSource return SQLQuerySource(source, **kwargs) def run(self): import pandas as pd source_code = str(self.source) product = self.params['product'] # read from source_code, use connection from the Task self._logger.info('Fetching data...') dfs = pd.read_sql_query(source_code, self.client.engine, chunksize=self.chunksize) self._logger.info('Done fetching data...') for i, df in enumerate(dfs): self._logger.info('Storing chunk {i}...'.format(i=i)) df.to_sql(name=product.name, con=product.client.engine, schema=product.schema, if_exists='replace' if i == 0 else 'append', index=False) class SQLUpload(ClientMixin, Task): """ Upload data to a SQL database from a parquet or a csv file. Note: this task relies uses pandas.to_sql which introduces some overhead. Only use it for small to medium size datasets. Each database usually come with a tool to upload data efficiently. If you are using PostgreSQL, check out the PostgresCopyFrom task. Parameters ---------- source : str or pathlib.Path Path to parquet or a csv file to upload product : ploomber.products.product Product generated upon successful execution. The client for the product must be in the target database, where as task.client should be a client in the source database. dag : ploomber.DAG A DAG to add this task to name : str A str to indentify this task. Should not already exist in the dag client: ploomber.clients.SQLAlchemyClient, optional The client used to connect to the database and where the data will be uploaded. Only required if no dag-level client has been declared using dag.clients[class] params : dict, optional Parameters to pass to the script, by default, the callable will be executed with a "product" (which will contain the product object). It will also include a "upstream" parameter if the task has upstream dependencies along with any parameters declared here. The source code is converted to a jinja2.Template for passing parameters, refer to jinja2 documentation for details chunksize : int, optional Number of rows to transfer on every chunk io_handler : callable, optional A Python callable to read the source file, if None, it will tried to be inferred from the source file extension to_sql_kwargs : dict, optional Keyword arguments passed to the pandas.DataFrame.to_sql function, one useful parameter is "if_exists", which determines if the task should fail ("fail"), the relation should be replaced ("replace") or rows appended ("append"). Notes ----- This task is *not* intended to move large datasets, but a convenience way of transfering small to medium size datasets. It relies on pandas to read and write, which introduces a considerable overhead. """ PRODUCT_CLASSES_ALLOWED = (PostgresRelation, SQLiteRelation, GenericSQLRelation) @requires(['pandas'], 'SQLUpload') def __init__(self, source, product, dag, name=None, client=None, params=None, chunksize=None, io_handler=None, to_sql_kwargs=None): params = params or {} kwargs = dict(hot_reload=dag._params.hot_reload) self._source = type(self)._init_source(source, kwargs) super().__init__(product, dag, name, params) self._client = client self.chunksize = chunksize self.io_handler = io_handler self.to_sql_kwargs = to_sql_kwargs or {} @staticmethod def _init_source(source, kwargs): return FileSource(str(source), **kwargs) def run(self): import pandas as pd product = self.params['product'] path = str(self.source) mapping = { '.csv': pd.read_csv, '.parquet': pd.read_parquet, } if self.io_handler is None: extension = Path(path).suffix read_fn = mapping.get(extension) if not read_fn: raise ValueError( 'Could not infer reading function for ' 'file with extension: {}'.format(extension), 'pass the function directly in the ' 'io_handler argument') else: read_fn = self.io_handler self._logger.info('Reading data...') df = read_fn(path) self._logger.info('Done reading data...') df.to_sql(name=product.name, con=self.client.engine, schema=product.schema, **self.to_sql_kwargs) # TODO: provide more flexibility to configure the COPY statement class PostgresCopyFrom(ClientMixin, Task): """Efficiently copy data to a postgres database using COPY FROM (faster alternative to SQLUpload for postgres). If using SQLAlchemy client for postgres is psycopg2. Replaces the table if exists. Parameters ---------- source: str or pathlib.Path Path to parquet file to upload client: ploomber.clients.SQLAlchemyClient, optional The client used to connect to the database and where the data will be uploaded. Only required if no dag-level client has been declared using dag.clients[class] Notes ----- Although this task does not depend on pandas for data i/o, it still needs it to dynamically create the table, after the table is created the COPY statement is used to upload the data """ PRODUCT_CLASSES_ALLOWED = (PostgresRelation, ) @requires(['pandas', 'psycopg2'], 'PostgresCopyFrom') def __init__(self, source, product, dag, name=None, client=None, params=None, columns=None): params = params or {} kwargs = dict(hot_reload=dag._params.hot_reload) self._source = type(self)._init_source(source, kwargs) super().__init__(product, dag, name, params) self._client = client self.columns = columns @staticmethod def _init_source(source, kwargs): return FileSource(str(source), **kwargs) def run(self): import pandas as pd product = self.params['product'] df = pd.read_parquet(str(self.source)) # create the table self._logger.info('Creating table...') df.head(0).to_sql(name=product.name, con=self.client.engine, schema=product.schema, if_exists='replace', index=False) self._logger.info('Done creating table.') # create file-like object f = StringIO() df.to_csv(f, sep='\t', na_rep='\\N', header=False, index=False) f.seek(0) # upload using copy cur = self.client.connection.cursor() self._logger.info('Copying data...') cur.copy_expert(f'COPY {product} FROM STDIN', f) f.close() cur.close()
import versioneer from setuptools import setup, find_packages from codecs import open from os import path here = path.abspath(path.dirname(__file__)) with open(path.join(here, "README.rst"), encoding="utf-8") as f: long_description = f.read() requires = [ "matplotlib>=2.0.0", "pandas>=0.24", "Cython", "scipy<1.3", "seaborn", "scikit-learn>=0.21.3", "statsmodels", "natsort", "anndata", "numba", "numpy", "tables", "xlsxwriter", "loompy", "docopt", "setuptools", "plotly", "pybind11", "joblib", "scikit-misc", "pyarrow", "umap-learn>=0.3.9", "lightgbm==2.2.1", "python-igraph", "MulticoreTSNE-modified", "hnswlib", "fisher-modified", "louvain-github", "leidenalg", "forceatlas2-python", "scplot" ] setup( name="sccloud", version=versioneer.get_version(), cmdclass=versioneer.get_cmdclass(), description="scRNA-Seq analysis tools that scale to millions of cells", long_description=long_description, url="https://github.com/klarman-cell-observatory/scCloudPy", author="Bo Li, Joshua Gould, Yiming Yang, Siranush Sarkizova", author_email="sccloud@googlegroups.com, sccloud@broadinstitute.org", classifiers=[ # https://pypi.python.org/pypi?%3Aaction=list_classifiers "Development Status :: 3 - Alpha", "Intended Audience :: Developers", "Intended Audience :: Science/Research", "Topic :: Software Development :: Build Tools", "License :: OSI Approved :: BSD License", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Framework :: Jupyter", "Natural Language :: English", "Operating System :: MacOS :: MacOS X", "Operating System :: POSIX :: Linux", "Topic :: Scientific/Engineering :: Bio-Informatics", ], keywords="single cell/nucleus genomics analysis", packages=find_packages(), install_requires=requires, extras_require=dict(fitsne=["fitsne"], mkl=["mkl"]), python_requires="~=3.5", package_data={ "sccloud.annotate_cluster": [ "human_immune_cell_markers.json", "mouse_immune_cell_markers.json", "mouse_brain_cell_markers.json", "human_brain_cell_markers.json", ], "scCloud.check_sample_indexes": ["chromium-dna-sample-indexes-plate.json"], }, entry_points={"console_scripts": ["sccloud=sccloud.__main__:main"]}, )
/* * jit-block.h - Functions for manipulating blocks. * * Copyright (C) 2004 Southern Storm Software, Pty Ltd. * * The libjit library is free software: you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation, either version 2.1 of * the License, or (at your option) any later version. * * The libjit library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with the libjit library. If not, see * <http://www.gnu.org/licenses/>. */ #ifndef _JIT_BLOCK_H #define _JIT_BLOCK_H #include <jit/jit-common.h> #ifdef __cplusplus extern "C" { #endif jit_function_t jit_block_get_function(jit_block_t block) JIT_NOTHROW; jit_context_t jit_block_get_context(jit_block_t block) JIT_NOTHROW; jit_label_t jit_block_get_label(jit_block_t block) JIT_NOTHROW; jit_label_t jit_block_get_next_label(jit_block_t block, jit_label_t label) JIT_NOTHROW; jit_block_t jit_block_next(jit_function_t func, jit_block_t previous) JIT_NOTHROW; jit_block_t jit_block_previous(jit_function_t func, jit_block_t previous) JIT_NOTHROW; jit_block_t jit_block_from_label(jit_function_t func, jit_label_t label) JIT_NOTHROW; int jit_block_set_meta(jit_block_t block, int type, void *data, jit_meta_free_func free_data) JIT_NOTHROW; void *jit_block_get_meta(jit_block_t block, int type) JIT_NOTHROW; void jit_block_free_meta(jit_block_t block, int type) JIT_NOTHROW; int jit_block_is_reachable(jit_block_t block) JIT_NOTHROW; int jit_block_ends_in_dead(jit_block_t block) JIT_NOTHROW; int jit_block_current_is_dead(jit_function_t func) JIT_NOTHROW; #ifdef __cplusplus }; #endif #endif /* _JIT_BLOCK_H */
#!/usr/bin/env python3 from src.util import * # tag::starOne[] foods = read_file_to_list("input.txt") l = len(foods) dangerous = dict() while True: allergens = dict() for i in range(l): if len(foods[i]["allergens"]) == 1: allg = list(foods[i]["allergens"])[0] if allg not in allergens: allergens[allg] = set(foods[i]["ingredients"]) else: allergens[allg].intersection_update(foods[i]["ingredients"]) for j in range(i+1,l): ingr = foods[i]["ingredients"] & foods[j]["ingredients"] allerg = foods[i]["allergens"] & foods[j]["allergens"] if len(allerg) == 1: allg = list(allerg)[0] if allg not in allergens: allergens[allg] = set(ingr) else: allergens[allg].intersection_update(ingr) reduce = False for allg in allergens.keys(): if len(allergens[allg]) == 1: ingr = list(allergens[allg])[0] #print(ingr +" has "+allg) dangerous[allg] = ingr reduce = True for food in foods: food["ingredients"].discard(ingr) food["allergens"].discard(allg) if not reduce: break rest = [] for food in foods: if len (food["allergens"]) != 0: raise Exception(food) rest += list(food["ingredients"]) print(len(rest)) # end::starOne[] # tag::starTwo[] dangerousList = [] for k in sorted(dangerous.keys()): dangerousList.append(dangerous[k]) print(','.join(dangerousList)) # end::starTwo[]
#coding:utf-8 # # id: functional.procedure.create.02 # title: CREATE PROCEDURE - Input parameters # decription: CREATE PROCEDURE - Input parameters # # Dependencies: # CREATE DATABASE # tracker_id: # min_versions: [] # versions: 2.1 # qmid: functional.procedure.create.create_procedure_02 import pytest from firebird.qa import db_factory, isql_act, Action # version: 2.1 # resources: None substitutions_1 = [] init_script_1 = """""" db_1 = db_factory(sql_dialect=3, init=init_script_1) test_script_1 = """SET TERM ^; CREATE PROCEDURE test( p1 SMALLINT, p2 INTEGER, p3 FLOAT, p4 DOUBLE PRECISION, p5 DECIMAL(9,3), p6 NUMERIC(10,4), p7 DATE, p8 TIME, p9 TIMESTAMP, p10 CHAR(40), p11 VARCHAR(60), p12 NCHAR(70)) AS BEGIN POST_EVENT 'Test'; END ^ SET TERM ;^ commit; SHOW PROCEDURE test;""" act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) expected_stdout_1 = """Procedure text: ============================================================================= BEGIN POST_EVENT 'Test'; END ============================================================================= Parameters: P1 INPUT SMALLINT P2 INPUT INTEGER P3 INPUT FLOAT P4 INPUT DOUBLE PRECISION P5 INPUT DECIMAL(9, 3) P6 INPUT NUMERIC(10, 4) P7 INPUT DATE P8 INPUT TIME P9 INPUT TIMESTAMP P10 INPUT CHAR(40) P11 INPUT VARCHAR(60) P12 INPUT CHAR(70) CHARACTER SET ISO8859_1""" @pytest.mark.version('>=2.1') def test_1(act_1: Action): act_1.expected_stdout = expected_stdout_1 act_1.execute() assert act_1.clean_stdout == act_1.clean_expected_stdout
import numpy as np from scipy.integrate import odeint class FirstOrderHold: def __init__(self, m, K, sigma): self.K = K self.m = m self.n_x = m.n_x self.n_u = m.n_u self.A_bar = np.zeros([m.n_x * m.n_x, K - 1]) self.B_bar = np.zeros([m.n_x * m.n_u, K - 1]) self.C_bar = np.zeros([m.n_x * m.n_u, K - 1]) self.z_bar = np.zeros([m.n_x, K - 1]) # vector indices for flat matrices x_end = m.n_x A_bar_end = m.n_x * (1 + m.n_x) B_bar_end = m.n_x * (1 + m.n_x + m.n_u) C_bar_end = m.n_x * (1 + m.n_x + m.n_u + m.n_u) z_bar_end = m.n_x * (1 + m.n_x + m.n_u + m.n_u + 1) self.x_ind = slice(0, x_end) self.A_bar_ind = slice(x_end, A_bar_end) self.B_bar_ind = slice(A_bar_end, B_bar_end) self.C_bar_ind = slice(B_bar_end, C_bar_end) self.z_bar_ind = slice(C_bar_end, z_bar_end) self.f, self.A, self.B = m.get_equations() # integration initial condition self.V0 = np.zeros((m.n_x * (1 + m.n_x + m.n_u + m.n_u + 1),)) self.V0[self.A_bar_ind] = np.eye(m.n_x).reshape(-1) self.sigma = sigma self.dt = 1. / (K - 1) * sigma def calculate_discretization(self, X, U): """ Calculate discretization for given states, inputs and total time. :param X: Matrix of states for all time points :param U: Matrix of inputs for all time points :return: The discretization matrices """ for k in range(self.K - 1): self.V0[self.x_ind] = X[:, k] V = np.array(odeint(self._ode_dVdt, self.V0, (0, self.dt), args=(U[:, k], U[:, k + 1]))[1, :]) # flatten matrices in column-major (Fortran) order for CVXPY Phi = V[self.A_bar_ind].reshape((self.n_x, self.n_x)) self.A_bar[:, k] = Phi.flatten(order='F') self.B_bar[:, k] = np.matmul(Phi, V[self.B_bar_ind].reshape((self.n_x, self.n_u))).flatten(order='F') self.C_bar[:, k] = np.matmul(Phi, V[self.C_bar_ind].reshape((self.n_x, self.n_u))).flatten(order='F') self.z_bar[:, k] = np.matmul(Phi, V[self.z_bar_ind]) return self.A_bar, self.B_bar, self.C_bar, self.z_bar def _ode_dVdt(self, V, t, u_t0, u_t1): """ ODE function to compute dVdt. :param V: Evaluation state V = [x, Phi_A, B_bar, C_bar, z_bar] :param t: Evaluation time :param u_t0: Input at start of interval :param u_t1: Input at end of interval :return: Derivative at current time and state dVdt """ alpha = (self.dt - t) / self.dt beta = t / self.dt x = V[self.x_ind] u = u_t0 + (t / self.dt) * (u_t1 - u_t0) # using \Phi_A(\tau_{k+1},\xi) = \Phi_A(\tau_{k+1},\tau_k)\Phi_A(\xi,\tau_k)^{-1} # and pre-multiplying with \Phi_A(\tau_{k+1},\tau_k) after integration Phi_A_xi = np.linalg.inv(V[self.A_bar_ind].reshape((self.n_x, self.n_x))) A_subs = self.A(x, u) B_subs = self.B(x, u) f_subs = self.f(x, u) dVdt = np.zeros_like(V) dVdt[self.x_ind] = f_subs.T dVdt[self.A_bar_ind] = np.matmul(A_subs, V[self.A_bar_ind].reshape((self.n_x, self.n_x))).reshape(-1) dVdt[self.B_bar_ind] = np.matmul(Phi_A_xi, B_subs).reshape(-1) * alpha dVdt[self.C_bar_ind] = np.matmul(Phi_A_xi, B_subs).reshape(-1) * beta z_t = np.squeeze(f_subs) - np.matmul(A_subs, x) - np.matmul(B_subs, u) dVdt[self.z_bar_ind] = np.matmul(Phi_A_xi, z_t) return dVdt def integrate_nonlinear_piecewise(self, X_l, U): """ Piecewise integration to verfify accuracy of linearization. :param X_l: Linear state evolution :param U: Linear input evolution :return: The piecewise integrated dynamics """ X_nl = np.zeros_like(X_l) X_nl[:, 0] = X_l[:, 0] for k in range(self.K - 1): X_nl[:, k + 1] = odeint(self._dx, X_l[:, k], (0, self.dt), args=(U[:, k], U[:, k + 1]))[1, :] return X_nl def integrate_nonlinear_full(self, x0, U): """ Simulate nonlinear behavior given an initial state and an input over time. :param x0: Initial state :param U: Linear input evolution :return: The full integrated dynamics """ X_nl = np.zeros([x0.size, self.K]) X_nl[:, 0] = x0 for k in range(self.K - 1): X_nl[:, k + 1] = odeint(self._dx, X_nl[:, k], (0, self.dt), args=(U[:, k], U[:, k + 1]))[1, :] return X_nl def _dx(self, x, t, u_t0, u_t1): u = u_t0 + (t / self.dt) * (u_t1 - u_t0) return np.squeeze(self.f(x, u))
/** * @file * JavaScript behaviors for webform custom options. */ (function ($, Drupal) { 'use strict'; Drupal.webformOptionsCustom = Drupal.webformOptionsCustom || {}; // @see http://api.jqueryui.com/tooltip/ Drupal.webformOptionsCustom.jQueryUiTooltip = Drupal.webformOptionsCustom.jQueryUiTooltip || {}; Drupal.webformOptionsCustom.jQueryUiTooltip.options = Drupal.webformOptionsCustom.jQueryUiTooltip.options || { tooltipClass: 'webform-options-custom-tooltip', track: true, // @see // https://stackoverflow.com/questions/18231315/jquery-ui-tooltip-html-with-links show: {delay: 300}, close: function (event, ui) { ui.tooltip.hover( function () { $(this).stop(true).fadeTo(400, 1); }, function () { $(this).fadeOut('400', function () { $(this).remove(); }); }); } }; // @see http://bootstrapdocs.com/v3.0.3/docs/javascript/#tooltips-usage Drupal.webformOptionsCustom.bootstrapTooltip = Drupal.webformOptionsCustom.bootstrapTooltip || {}; Drupal.webformOptionsCustom.bootstrapTooltip.options = Drupal.webformOptionsCustom.bootstrapTooltip.options || { delay: 200 }; // @see https://github.com/ariutta/svg-pan-zoom Drupal.webformOptionsCustom.panAndZoom = Drupal.webformOptionsCustom.panAndZoom || {}; Drupal.webformOptionsCustom.panAndZoom.options = Drupal.webformOptionsCustom.panAndZoom.options || { controlIconsEnabled: true, // Mouse event must be enable to allow keyboard accessibility to // continue to work. preventMouseEventsDefault: false, // Prevent scroll wheel zoom to allow users to scroll past the SVG graphic. mouseWheelZoomEnabled: false }; /** * Custom options. * * @type {Drupal~behavior} * * @prop {Drupal~behaviorAttach} attach * Attaches the behavior for the block settings summaries. */ Drupal.behaviors.webformOptionsCustom = { attach: function (context) { $('.js-webform-options-custom', context).once('webform-options-custom').each(function () { var $element = $(this); var $select = $element.find('select'); var $template = $element.find('.webform-options-custom-template'); var $svg = $template.children('svg'); // Get select menu options. var descriptions = $element.attr('data-descriptions') ? JSON.parse($element.attr('data-descriptions')) : {}; var selectOptions = {}; $select.find('option').each(function () { selectOptions[this.value] = this; selectOptions[this.value].description = descriptions[this.value]; }); var hasMultiple = $select.is('[multiple]'); var hasFill = $element.is('[data-fill]'); var hasZoom = $element.is('[data-zoom]'); var hasTooltip = $element.is('[data-tooltip]'); var hasSelectHidden = $element.is('[data-select-hidden]'); var $templateOptions = $template.find('[data-option-value]'); var $focusableTemplateOptions = $templateOptions.not('text'); // If select is hidden set its tabindex to -1 to prevent focus. if (hasSelectHidden) { $select.attr('tabindex', '-1'); } // Initialize template options. $templateOptions.each(function () { var $templateOption = $(this); var value = $templateOption.attr('data-option-value'); var option = selectOptions[value]; // If select menu option is missing remove the // 'data-option-value' attribute. if (!option) { $templateOption.removeAttr('data-option-value'); return; } initializeSelectOption(option); initializeTemplateOption($templateOption, option); initializeTemplateTooltip($templateOption, option); }); // Pan and zoom. initializeZoom(); // Select event handling. $select.on('change', setSelectValue); // Template event handling. $template .on('click', setTemplateValue) .on('keydown', function (event) { var $templateOption = $(event.target); if (!$templateOption.is('[data-option-value]')) { return; } // Space or return. if (event.which === 32 || event.which === 13) { setTemplateValue(event); event.preventDefault(); return; } if (event.which >= 37 && event.which <= 40) { var $prev; var $next; $focusableTemplateOptions.each(function (index) { if (this === event.target) { $prev = $focusableTemplateOptions[index - 1] ? $($focusableTemplateOptions[index - 1]) : null; $next = $focusableTemplateOptions [index + 1] ? $($focusableTemplateOptions[index + 1]) : null; } }); if (event.which === 37 || event.which === 38) { if ($prev) { $prev.focus(); } } else if (event.which === 39 || event.which === 40) { if ($next) { $next.focus(); } } event.preventDefault(); return; } }); setSelectValue(); /* ****************************************************************** */ /* See select and template value callbacks. */ /* ****************************************************************** */ /** * Set select menu options value */ function setSelectValue() { var values = (hasMultiple) ? $select.val() : [$select.val()]; clearTemplateOptions(); $(values).each(function (index, value) { $template.find('[data-option-value="' + value + '"]') .attr('aria-checked', 'true'); }); setTemplateTabIndex(); } /** * Set template options value. * * @param {jQuery.Event} event * The event triggered. */ function setTemplateValue(event) { var $templateOption = $(event.target); if (!$templateOption.is('[data-option-value]')) { $templateOption = $templateOption.parents('[data-option-value]'); } if ($templateOption.is('[data-option-value]')) { setValue($templateOption.attr('data-option-value')); if ($templateOption.is('[href]')) { event.preventDefault(); } } setTemplateTabIndex(); } /** * Set template tab index. * * @see https://www.w3.org/TR/wai-aria-practices/#kbd_roving_tabindex */ function setTemplateTabIndex() { if (hasMultiple) { return; } // Remove existing tabindex. $template .find('[data-option-value][tabindex="0"]') .attr('tabindex', '-1'); // Find checked. var $checked = $template .find('[data-option-value][aria-checked="true"]'); if ($checked.length) { // Add tabindex to checked options. $checked.not('text').first().attr('tabindex', '0'); } else { // Add tabindex to the first not disabled and <text> // template option. $template .find('[data-option-value]') .not('[aria-disabled="true"], text') .first() .attr('tabindex', '0'); } } /** * Set the custom options value. * * @param {string} value * Custom option value. */ function setValue(value) { if (selectOptions[value].disabled) { return; } var $templateOption = $template.find('[data-option-value="' + value + '"]'); if ($templateOption.attr('aria-checked') === 'true') { selectOptions[value].selected = false; $template.find('[data-option-value="' + value + '"]') .attr('aria-checked', 'false'); } else { if (!hasMultiple) { clearTemplateOptions(); } selectOptions[value].selected = true; $template.find('[data-option-value="' + value + '"]') .attr('aria-checked', 'true'); } // Never alter SVG <text> elements. if ($templateOption[0].tagName === 'text') { $template .find('[data-option-value="' + value + '"]') .not('text') .first() .focus(); } $select.change(); } /* ****************************************************************** */ /* Initialize methods. */ /* ****************************************************************** */ /** * Initialize a select option. * * @param {object} option * The select option. */ function initializeSelectOption(option) { // Get description and set text. var text = option.text; var description = ''; if (text.indexOf(' -- ') !== -1) { var parts = text.split(' -- '); text = parts[0]; description = parts[1]; // Reset option text. option.text = text; option.description = description; } } /** * Initialize a template option. * * @param {object} $templateOption * The template option. * @param {object} option * The select option. */ function initializeTemplateOption($templateOption, option) { // Never alter SVG <text> elements. if ($templateOption[0].tagName === 'text') { return; } // Set ARIA attributes. $templateOption .attr('role', (hasMultiple) ? 'radio' : 'checkbox') .attr('aria-checked', 'false'); // Remove SVG fill style property so that we can change an option's // fill property via CSS. // @see webform_options_custom.element.css if (hasFill) { $templateOption.css('fill', ''); } // Set tabindex or disabled. if (option.disabled) { $templateOption.attr('aria-disabled', 'true'); } else { $templateOption.attr('tabindex', (hasMultiple) ? '0' : '-1'); } } /** * Initialize a template tooltip. * * @param {object} $templateOption * The template option. * @param {object} option * The select option. */ function initializeTemplateTooltip($templateOption, option) { if (!hasTooltip) { return; } var content = '<div class="webform-options-custom-tooltip--text" data-tooltip-value="' + Drupal.checkPlain(option.value) + '">' + Drupal.checkPlain(option.text) + '</div>'; if (option.description) { content += '<div class="webform-options-custom-tooltip--description">' + option.description + '</div>'; } if (typeof $.ui.tooltip !== 'undefined') { // jQuery UI tooltip support. var tooltipOptions = $.extend({ content: content, items: '[data-option-value]', open: function (event, ui) { $(ui.tooltip).on('click', function () { var value = $(this) .find('[data-tooltip-value]') .attr('data-tooltip-value'); setValue(value); }); } }, Drupal.webformOptionsCustom.jQueryUiTooltip.options); $templateOption.tooltip(tooltipOptions); } else if ((typeof $.fn.tooltip) !== 'undefined') { // Bootstrap tooltip support. var options = $.extend({ html: true, title: content }, Drupal.webformOptionsCustom.bootstrapTooltip.options); $templateOption .tooltip(options) .on('show.bs.tooltip', function (event) { $templateOptions.not($templateOption).tooltip('hide'); }); } } /** * Initialize SVG pan and zoom. */ function initializeZoom() { if (!hasZoom || !window.svgPanZoom || !$svg.length) { return; } var options = $.extend({ }, Drupal.webformOptionsCustom.panAndZoom.options); var panZoom = window.svgPanZoom($svg[0], options); $(window).resize(function () { panZoom.resize(); panZoom.fit(); panZoom.center(); }); } /* ****************************************************************** */ /* Clear methods. */ /* ****************************************************************** */ /** * Clear all template options. */ function clearTemplateOptions() { $templateOptions.attr('aria-checked', 'false'); } }); } }; })(jQuery, Drupal);
import { increment, decrement, reset } from './actions/counter' import store from './store' // 打印初始状态 console.log(store.getState()) // 每次 state 更新时,打印日志 // 注意 subscribe() 返回一个函数用来注销监听器 const unsubscribe = store.subscribe(() => console.log(store.getState())) // 发起一系列 action store.dispatch(increment()) store.dispatch(decrement()) store.dispatch(reset()) // 停止监听 state 更新 unsubscribe()
# -*- coding: utf-8 -*- # mk42 # mk42/apps/core/migrations/0006_auto_20170701_1801.py # Generated by Django 1.11.2 on 2017-07-01 18:01 from __future__ import unicode_literals import uuid from django.db import ( migrations, models, ) import django.db.models.deletion import redactor.fields class Migration(migrations.Migration): dependencies = [ ("core", "0005_auto_20170618_1023"), ] operations = [ migrations.CreateModel( name="Event", fields=[ ("id", models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False, verbose_name="ID")), ("name", models.CharField(db_index=True, max_length=256, unique=True, verbose_name="name")), ("description", redactor.fields.RedactorField(blank=True, db_index=True, null=True, verbose_name="description")), ("start", models.DateTimeField(auto_now=True, db_index=True, null=True, verbose_name="start date/time")), ("created", models.DateTimeField(auto_now_add=True, db_index=True, null=True, verbose_name="created date/time")), ], options={ "ordering": ["-created", "-start"], "verbose_name": "event", "verbose_name_plural": "events", }, ), migrations.AlterField( model_name="group", name="name", field=models.CharField(db_index=True, max_length=256, unique=True, verbose_name="name"), ), migrations.AddField( model_name="event", name="group", field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name="events", to="core.Group", verbose_name="group"), ), ]
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef BASE_NIX_XDG_UTIL_H_ #define BASE_NIX_XDG_UTIL_H_ // XDG refers to http://en.wikipedia.org/wiki/Freedesktop.org . // This file contains utilities found across free desktop environments. // // TODO(brettw) this file should be in app/x11, but is currently used by // net. We should have a net API to allow the embedder to specify the behavior // that it uses XDG for, and then move this file. #include "base/base_export.h" #ifdef nix #error asdf #endif namespace base { class Environment; class FilePath; namespace nix { // The default XDG config directory name. BASE_EXPORT extern const char kDotConfigDir[]; // The XDG config directory environment variable. BASE_EXPORT extern const char kXdgConfigHomeEnvVar[]; // Utility function for getting XDG directories. // |env_name| is the name of an environment variable that we want to use to get // a directory path. |fallback_dir| is the directory relative to $HOME that we // use if |env_name| cannot be found or is empty. |fallback_dir| may be NULL. // Examples of |env_name| are XDG_CONFIG_HOME and XDG_DATA_HOME. BASE_EXPORT FilePath GetXDGDirectory(Environment* env, const char* env_name, const char* fallback_dir); // Wrapper around xdg_user_dir_lookup() from src/base/third_party/xdg-user-dirs // This looks up "well known" user directories like the desktop and music // folder. Examples of |dir_name| are DESKTOP and MUSIC. BASE_EXPORT FilePath GetXDGUserDirectory(const char* dir_name, const char* fallback_dir); enum DesktopEnvironment { DESKTOP_ENVIRONMENT_OTHER, DESKTOP_ENVIRONMENT_CINNAMON, DESKTOP_ENVIRONMENT_GNOME, // KDE3, KDE4 and KDE5 are sufficiently different that we count // them as different desktop environments here. DESKTOP_ENVIRONMENT_KDE3, DESKTOP_ENVIRONMENT_KDE4, DESKTOP_ENVIRONMENT_KDE5, DESKTOP_ENVIRONMENT_PANTHEON, DESKTOP_ENVIRONMENT_UNITY, DESKTOP_ENVIRONMENT_XFCE, }; // Return an entry from the DesktopEnvironment enum with a best guess // of which desktop environment we're using. We use this to know when // to attempt to use preferences from the desktop environment -- // proxy settings, password manager, etc. BASE_EXPORT DesktopEnvironment GetDesktopEnvironment(Environment* env); // Return a string representation of the given desktop environment. // May return NULL in the case of DESKTOP_ENVIRONMENT_OTHER. BASE_EXPORT const char* GetDesktopEnvironmentName(DesktopEnvironment env); // Convenience wrapper that calls GetDesktopEnvironment() first. BASE_EXPORT const char* GetDesktopEnvironmentName(Environment* env); } // namespace nix } // namespace base #endif // BASE_NIX_XDG_UTIL_H_
# Unless explicitly stated otherwise all files in this repository are licensed # under the Apache License Version 2.0. # This product includes software developed at Datadog (https://www.datadoghq.com/). # Copyright 2020 Datadog, Inc. import base64 import os import boto3 import logging import re logger = logging.getLogger() logger.setLevel(logging.getLevelName(os.environ.get("DD_LOG_LEVEL", "INFO").upper())) def get_env_var(envvar, default, boolean=False): """ Return the value of the given environment variable with debug logging. When boolean=True, parse the value as a boolean case-insensitively. """ value = os.getenv(envvar, default=default) if boolean: value = value.lower() == "true" logger.debug(f"{envvar}: {value}") return value ## @param DD_API_KEY - String - conditional - default: none ## The Datadog API key associated with your Datadog Account ## It can be found here: ## ## * Datadog US Site: https://app.datadoghq.com/account/settings#api ## * Datadog EU Site: https://app.datadoghq.eu/account/settings#api ## ## Must be set if one of the following is not set: DD_API_KEY_SECRET_ARN, DD_API_KEY_SSM_NAME, DD_KMS_API_KEY # DD_API_KEY = "<YOUR_DATADOG_API_KEY>" ## @param DD_API_KEY_SECRET_ARN - String - optional - default: none ## ARN of Datadog API key stored in AWS Secrets Manager ## ## Supercedes: DD_API_KEY_SSM_NAME, DD_KMS_API_KEY, DD_API_KEY ## @param DD_API_KEY_SSM_NAME - String - optional - default: none ## Name of parameter containing Datadog API key in AWS SSM Parameter Store ## ## Supercedes: DD_KMS_API_KEY, DD_API_KEY ## @param DD_KMS_API_KEY - String - optional - default: none ## AWS KMS encrypted Datadog API key ## ## Supercedes: DD_API_KEY ## @param DD_FORWARD_LOG - boolean - optional - default: true ## Set this variable to `False` to disable log forwarding. ## E.g., when you only want to forward metrics and traces from logs. # DD_FORWARD_LOG = get_env_var("DD_FORWARD_LOG", "true", boolean=True) ## @param DD_USE_TCP - boolean - optional -default: false ## Change this value to `true` to send your logs and metrics using the TCP network client ## By default, it uses the HTTP client. # DD_USE_TCP = get_env_var("DD_USE_TCP", "false", boolean=True) ## @param DD_USE_COMPRESSION - boolean - optional -default: true ## Only valid when sending logs over HTTP ## Change this value to `false` to send your logs without any compression applied ## By default, compression is enabled. # DD_USE_COMPRESSION = get_env_var("DD_USE_COMPRESSION", "true", boolean=True) ## @param DD_USE_COMPRESSION - integer - optional -default: 6 ## Change this value to set the compression level. ## Values range from 0 (no compression) to 9 (best compression). ## By default, compression is set to level 6. # DD_COMPRESSION_LEVEL = int(os.getenv("DD_COMPRESSION_LEVEL", 6)) ## @param DD_USE_SSL - boolean - optional -default: false ## Change this value to `true` to disable SSL ## Useful when you are forwarding your logs to a proxy. # DD_NO_SSL = get_env_var("DD_NO_SSL", "false", boolean=True) ## @param DD_SKIP_SSL_VALIDATION - boolean - optional -default: false ## Disable SSL certificate validation when forwarding logs via HTTP. # DD_SKIP_SSL_VALIDATION = get_env_var("DD_SKIP_SSL_VALIDATION", "false", boolean=True) ## @param DD_SITE - String - optional -default: datadoghq.com ## Define the Datadog Site to send your logs and metrics to. ## Set it to `datadoghq.eu` to send your logs and metrics to Datadog EU site. # DD_SITE = get_env_var("DD_SITE", default="datadoghq.com") ## @param DD_TAGS - list of comma separated strings - optional -default: none ## Pass custom tags as environment variable or through this variable. ## Ensure your tags are a comma separated list of strings with no trailing comma in the envvar! # DD_TAGS = get_env_var("DD_TAGS", "") ## @param DD_MAX_WORKERS - Max number of workers sending logs concurrently DD_MAX_WORKERS = int(os.getenv("DD_MAX_WORKERS", 20)) ## @param DD_API_URL - Url to use for validating the the api key. DD_API_URL = get_env_var( "DD_API_URL", default="{}://api.{}".format("http" if DD_NO_SSL else "https", DD_SITE), ) ## @param DD_TRACE_INTAKE_URL DD_TRACE_INTAKE_URL = get_env_var( "DD_TRACE_INTAKE_URL", default="{}://trace.agent.{}".format("http" if DD_NO_SSL else "https", DD_SITE), ) if DD_USE_TCP: DD_URL = get_env_var("DD_URL", default="lambda-intake.logs." + DD_SITE) try: if "DD_SITE" in os.environ and DD_SITE == "datadoghq.eu": DD_PORT = int(get_env_var("DD_PORT", default="443")) else: DD_PORT = int(get_env_var("DD_PORT", default="10516")) except Exception: DD_PORT = 10516 else: DD_URL = get_env_var("DD_URL", default="lambda-http-intake.logs." + DD_SITE) DD_PORT = int(get_env_var("DD_PORT", default="443")) ## @param DD_USE_VPC DD_USE_VPC = get_env_var("DD_USE_VPC", "false", boolean=True) ## @param DD_USE_PRIVATE_LINK - whether to forward logs via PrivateLink ## Overrides incompatible settings # DD_USE_PRIVATE_LINK = get_env_var("DD_USE_PRIVATE_LINK", "false", boolean=True) if DD_USE_PRIVATE_LINK: logger.debug("Private link enabled, overriding configuration settings") # Only the US Datadog site is supported when PrivateLink is enabled DD_SITE = "datadoghq.com" # TCP isn't supported when PrivateLink is enabled DD_USE_TCP = False DD_NO_SSL = False DD_PORT = 443 # Override URLs DD_URL = "api-pvtlink.logs.datadoghq.com" DD_API_URL = "https://pvtlink.api.datadoghq.com" DD_TRACE_INTAKE_URL = "https://trace-pvtlink.agent.datadoghq.com" class ScrubbingRuleConfig(object): def __init__(self, name, pattern, placeholder): self.name = name self.pattern = pattern self.placeholder = placeholder # Scrubbing sensitive data # Option to redact all pattern that looks like an ip address / email address / custom pattern SCRUBBING_RULE_CONFIGS = [ ScrubbingRuleConfig( "REDACT_IP", "\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", "xxx.xxx.xxx.xxx" ), ScrubbingRuleConfig( "REDACT_EMAIL", "[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+", "xxxxx@xxxxx.com", ), ScrubbingRuleConfig( "DD_SCRUBBING_RULE", get_env_var("DD_SCRUBBING_RULE", default=None), get_env_var("DD_SCRUBBING_RULE_REPLACEMENT", default="xxxxx"), ), ] # Filtering logs # Option to include or exclude logs based on a pattern match INCLUDE_AT_MATCH = get_env_var("INCLUDE_AT_MATCH", default=None) EXCLUDE_AT_MATCH = get_env_var("EXCLUDE_AT_MATCH", default=None) # DD API Key if "DD_API_KEY_SECRET_ARN" in os.environ: SECRET_ARN = os.environ["DD_API_KEY_SECRET_ARN"] DD_API_KEY = boto3.client("secretsmanager").get_secret_value(SecretId=SECRET_ARN)[ "SecretString" ] elif "DD_API_KEY_SSM_NAME" in os.environ: SECRET_NAME = os.environ["DD_API_KEY_SSM_NAME"] DD_API_KEY = boto3.client("ssm").get_parameter( Name=SECRET_NAME, WithDecryption=True )["Parameter"]["Value"] elif "DD_KMS_API_KEY" in os.environ: ENCRYPTED = os.environ["DD_KMS_API_KEY"] DD_API_KEY = boto3.client("kms").decrypt( CiphertextBlob=base64.b64decode(ENCRYPTED) )["Plaintext"] if type(DD_API_KEY) is bytes: DD_API_KEY = DD_API_KEY.decode("utf-8") elif "DD_API_KEY" in os.environ: DD_API_KEY = os.environ["DD_API_KEY"] # Strip any trailing and leading whitespace from the API key DD_API_KEY = DD_API_KEY.strip() os.environ["DD_API_KEY"] = DD_API_KEY # DD_MULTILINE_LOG_REGEX_PATTERN: Multiline Log Regular Expression Pattern DD_MULTILINE_LOG_REGEX_PATTERN = get_env_var( "DD_MULTILINE_LOG_REGEX_PATTERN", default=None ) DD_SOURCE = "ddsource" DD_CUSTOM_TAGS = "ddtags" DD_SERVICE = "service" DD_HOST = "host" DD_FORWARDER_VERSION = "3.28.4" # Additional target lambda invoked async with event data DD_ADDITIONAL_TARGET_LAMBDAS = get_env_var("DD_ADDITIONAL_TARGET_LAMBDAS", default=None) DD_S3_BUCKET_NAME = get_env_var("DD_S3_BUCKET_NAME", default=None) DD_S3_CACHE_FILENAME = "cache.json" DD_S3_CACHE_LOCK_FILENAME = "cache.lock" DD_TAGS_CACHE_TTL_SECONDS = int(get_env_var("DD_TAGS_CACHE_TTL_SECONDS", default=300)) DD_S3_CACHE_LOCK_TTL_SECONDS = 60
import csi3 print csi3.Project("mkk3") print csi3.Shot("mkk3", "JYW_0200") print csi3.Projects() print csi3.Seqs("TEMP") print csi3.Shots("TEMP", "SS") print csi3.searchwordItems("TEMP", "fx")
module.exports={A:{A:{"2":"J D E F A B oB"},B:{"1":"N O P Q R U V W X Y Z a b c d e f S H","2":"C K L","322":"G M"},C:{"1":"6 8 9 AB BB CB DB FB GB HB IB JB KB fB gB MB NB T OB PB QB SB TB UB VB WB XB YB ZB aB bB P Q R hB U V W X Y Z a b c d e f S H iB","2":"pB eB I g J D E F A B C K L G M N O h i j k l m n o p q r s t u qB rB","194":"0 1 2 3 4 5 v w x y z","513":"7 EB LB RB"},D:{"1":"7 8 9 AB BB CB DB EB FB GB HB IB JB KB fB LB gB MB NB T OB PB QB RB SB TB UB VB WB XB YB ZB aB bB P Q R U V W X Y Z a b c d e f S H iB sB tB","2":"0 1 I g J D E F A B C K L G M N O h i j k l m n o p q r s t u v w x y z","4":"2 3 4 5 6"},E:{"1":"C K L G cB dB zB 0B 1B lB 2B","2":"I g J D E F A B uB jB vB wB xB yB kB"},F:{"1":"0 1 2 3 4 5 6 7 8 9 u v w x y z AB BB CB DB EB FB GB HB IB JB KB LB MB NB T OB PB QB RB SB TB UB VB WB XB YB ZB aB bB P Q R hB","2":"F B C G M N O h i j k l m n o 3B 4B 5B 6B cB mB 7B dB","4":"p q r s t"},G:{"1":"IC JC KC LC MC NC OC PC QC RC lB","2":"E jB 8B nB 9B AC BC CC DC EC FC GC HC"},H:{"2":"SC"},I:{"2":"eB I TC UC VC WC nB XC YC","4":"H"},J:{"2":"D A"},K:{"2":"A B C cB mB dB","4":"T"},L:{"1":"H"},M:{"1":"S"},N:{"2":"A B"},O:{"1":"ZC"},P:{"1":"I aC bC cC dC eC kB fC gC hC iC jC"},Q:{"1":"kC"},R:{"4":"lC"},S:{"2":"mC"}},B:4,C:"Service Workers"};
import sys import os sys.path.append(os.path.abspath('../')) from unittest import TestCase from komand_microsoft_atp.connection.connection import Connection from komand_microsoft_atp.actions.run_antivirus_scan import RunAntivirusScan import json import logging class TestRunAntivirusScan(TestCase): def test_integration_run_antivirus_scan(self): """ TODO: Implement assertions at the end of this test case This is an integration test that will connect to the services your plugin uses. It should be used as the basis for tests below that can run independent of a "live" connection. This test assumes a normal plugin structure with a /tests directory. In that /tests directory should be json samples that contain all the data needed to run this test. To generate samples run: icon-plugin generate samples """ log = logging.getLogger("Test") test_conn = Connection() test_action = RunAntivirusScan() test_conn.logger = log test_action.logger = log try: with open("../tests/run_antivirus_scan.json") as file: test_json = json.loads(file.read()).get("body") connection_params = test_json.get("connection") action_params = test_json.get("input") except Exception as e: message = """ Could not find or read sample tests from /tests directory An exception here likely means you didn't fill out your samples correctly in the /tests directory Please use 'icon-plugin generate samples', and fill out the resulting test files in the /tests directory """ self.fail(message) test_conn.connect(connection_params) test_action.connection = test_conn results = test_action.run(action_params) # TODO: Remove this line self.fail("Unimplemented test case") # TODO: The following assert should be updated to look for data from your action # For example: self.assertEquals({"success": True}, results) self.assertEquals({}, results) def test_run_antivirus_scan(self): """ TODO: Implement test cases here Here you can mock the connection with data returned from the above integration test. For information on mocking and unit testing please go here: https://docs.google.com/document/d/1PifePDG1-mBcmNYE8dULwGxJimiRBrax5BIDG_0TFQI/edit?usp=sharing You can either create a formal Mock for this, or you can create a fake connection class to pass to your action for testing. """ self.fail("Unimplemented Test Case")
$(document).ready( function () { var containerDiv = document.getElementById("shootsC"); containerDiv.addEventListener("click", shootHere, false); }); function shootHere(event) { var str = event.target.id; str = str.slice(3, 5); console.log("shooting on cell: "+str); var msg = { type: "shot", value: str }; console.log("cannot shot on already shoot cell"); if((event.target.className != "isHit")&&(event.target.className != "isMiss")){ socket.send(JSON.stringify(msg)); } } var socket = new WebSocket("ws://0.0.0.0:3000"); socket.onmessage = function(event){ console.log(event.data); var data = JSON.parse(event.data); console.log(data); // CHECK WIN if(data.myStatus == 1) alert("You WON the game, reload the page to start a new one"); if(data.myStatus == 2) alert("You LOST the game, reload the page to start a new one"); // UPDATE UI var userText = document.getElementById("userText"); if(data['isPlaying']){ userText.innerHTML = "It's Your Turn"; document.getElementById("shootsC").style.pointerEvents = "auto"; document.getElementById("shootsC").style.cursor = "crosshair"; } else { userText.innerHTML = "It's The Opponent Turn"; document.getElementById("shootsC").style.pointerEvents = "none"; document.getElementById("shootsC").style.cursor = "not-allowed"; } // UPDATE SHPS NUMBER if(data.enemyShips){ var enemyDestroyer = 0; var enemySubmarine = 0; var enemyCarrier = 0; for(var i=0;i<data.enemyShips.length;i++){ if((data.enemyShips[i].size == 2) && (data.enemyShips[i].sunk == false)){ enemyDestroyer++; } else if((data.enemyShips[i].size == 3) && (data.enemyShips[i].sunk == false)){ enemySubmarine++; } else if((data.enemyShips[i].size == 4) && (data.enemyShips[i].sunk == false)){ enemyCarrier++; } } document.getElementById("enemyDestroyer").innerHTML = enemyDestroyer; document.getElementById("enemySubmarine").innerHTML = enemySubmarine; document.getElementById("enemyCarrier").innerHTML = enemyCarrier; var playerDestroyer = 0; var playerSubmarine = 0; var playerCarrier = 0; for(var i=0;i<data.ships.length;i++){ if((data.ships[i].size == 2) && (data.ships[i].sunk == false)){ playerDestroyer++; } else if((data.ships[i].size == 3) && (data.ships[i].sunk == false)){ playerSubmarine++; } else if((data.ships[i].size == 4) && (data.ships[i].sunk == false)){ playerCarrier++; } } document.getElementById("playerDestroyer").innerHTML = playerDestroyer; document.getElementById("playerSubmarine").innerHTML = playerSubmarine; document.getElementById("playerCarrier").innerHTML = playerCarrier; } //UPDATE SHIPS GRID var containerDiv = document.getElementById("shipsC"); var innerDivs = containerDiv.getElementsByTagName("DIV"); for(var i=0; i<innerDivs.length; i++) { if(data['shipsGrid'][i] == 3) innerDivs[i].className = "isBoatMiss "; } data.ships.forEach(function each(ship) { if(ship.orientation){ for(var i=0;i<ship.size;i++){ switch(ship.size){ case 2 : if(data['shipsGrid'][ship.x+i] == 2) { innerDivs[ship.x+i].className = "isBoatHit isBoatSmall"+i; } else { innerDivs[ship.x+i].className = "isBoatSmall"+i; } break; case 3 : if(data['shipsGrid'][ship.x+i] == 2){ innerDivs[ship.x+i].className = "isBoatHit isBoatMid"+i; } else { innerDivs[ship.x+i].className = "isBoatMid"+i; } break; case 4 : if(data['shipsGrid'][ship.x+i] == 2){ innerDivs[ship.x+i].className = "isBoatHit isBoatBig"+i; } else { innerDivs[ship.x+i].className = "isBoatBig"+i; } break; } } } else { for(var i=0;i<ship.size;i++){ switch(ship.size){ case 2 : if(data['shipsGrid'][ship.x+(i*10)] == 2) innerDivs[ship.x+(i*10)].className = "isBoatHit isBoatSmall"+i+" isBoatVertical"; else innerDivs[ship.x+(i*10)].className = "isBoatSmall"+i+" isBoatVertical"; break; case 3 : if(data['shipsGrid'][ship.x+(i*10)] == 2) innerDivs[ship.x+(i*10)].className = "isBoatHit isBoatMid"+i+" isBoatVertical"; else innerDivs[ship.x+(i*10)].className = "isBoatMid"+i+" isBoatVertical"; break; case 4 : if(data['shipsGrid'][ship.x+(i*10)] == 2) innerDivs[ship.x+(i*10)].className = "isBoatHit isBoatBig"+i+" isBoatVertical"; else innerDivs[ship.x+(i*10)].className = "isBoatBig"+i+" isBoatVertical"; break; } } } }); //UPDATE SHOTS GRID var containerDiv = document.getElementById("shootsC"); var innerDivs = containerDiv.getElementsByTagName("DIV"); for(var i=0; i<innerDivs.length; i++) { if(data['shots'][i] == 1){ innerDivs[i].className = "isHit"; innerDivs[i].removeEventListener("click", shootHere); } if(data['shots'][i] == 2){ innerDivs[i].className = "isMiss"; innerDivs[i].removeEventListener("click", shootHere); } } }
from editor.attributes.player.player_attribute import ( PlayerAttribute, PlayerAttributeTypes, ) class PlayerAttributeSs(PlayerAttribute): @classmethod def att_class_name(cls): return "SS" @classmethod def att_class_type(cls): return PlayerAttributeTypes.Position def get_raw_value(self): return self.parent.get_value() def get_value(self): return self.parent.get_value() def get_label(self): """ Get full label from parent and return first value (SS is set first) """ full_label = self.parent.get_label() return full_label[0] def set_value(self, value): return self.parent.set_value(value) def set_value_from_label(self, label): short_pass_accuracy_label = self.parent.short_pass_accuracy.get_label() full_label = (label, short_pass_accuracy_label) return self.parent.set_value_from_label(full_label)
# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import oslo_i18n _translators = oslo_i18n.TranslatorFactory(domain='tricircle') # The primary translation function using the well-known name "_" _ = _translators.primary # Translators for log levels. # # The abbreviated names are meant to reflect the usual use of a short # name like '_'. The "L" is for "log" and the other letter comes from # the level. _LI = _translators.log_info _LW = _translators.log_warning _LE = _translators.log_error _LC = _translators.log_critical
import React from "react" import { graphql } from "gatsby" import LandingPageLayout from "../layouts/LandingPageLayout" import HeroSection from "../sections/hero" import ExchangeSection from "../sections/exchanges" import BenefitSection from "../sections/benefit" import FeaturesListSection from "../sections/features-list" import ReviewsSection from "../sections/reviews/Reviews" import CoinCryptorSection from "../sections/coin-cryptor" import PeopleSayingSection from "../sections/people-saying" import PeopleSayingSection2 from "../sections/people-saying2" import FeatureCTASection from "../sections/feature-cta" import PeopleSayingSection3 from "../sections/people-saying3" import BestCryptorSection from "../sections/best-cryptor" import LevelUpCTASection from "../sections/level-up-cta" import TradingSoftwareSection from "../sections/trading-software" import Seo from "../components/seo" import { heroSectionData, benefitSection, benefitSectionData, featuresSection, featuresListData, reviewsData, testimonials1, testimonials2, benoistTestimonial, bestCryptorSection, softwareListData, } from "../utils/staticData" const title = `Level Up Now` const description = `Overwhelmed with the complexity of the cryptocurrency world? Bring your trading experience to another level with the power of cryptocurrency trading software Altrady, your best bitcoin trading platform choice` export default function Home({ data }) { const blogCategories = data.allPrismicBlogCategory.nodes const bestCryptorSectionData = data.allPrismicBlogPostApi.nodes.slice(0, 8) const exchangeData = data?.allPrismicExchanges.nodes.filter( item => item.data.upcoming === false ) return ( <LandingPageLayout> <Seo title="Altrady Crypto Trading Software is Fast, Easy & Secure" /> <HeroSection heroSectionData={heroSectionData} /> <ExchangeSection exchangeData={exchangeData} /> <BenefitSection title={benefitSection.title} content={benefitSection.content} benefitSectionData={benefitSectionData} /> <FeaturesListSection title={featuresSection.title} content={featuresSection.content} featuresListData={featuresListData} /> <ReviewsSection {...reviewsData} /> <CoinCryptorSection /> <PeopleSayingSection {...testimonials1} /> <PeopleSayingSection2 {...testimonials2} /> <FeatureCTASection /> <PeopleSayingSection3 {...benoistTestimonial} /> <BestCryptorSection title={bestCryptorSection.title} content={bestCryptorSection.content} categories={blogCategories} data={bestCryptorSectionData} /> <LevelUpCTASection title={title} description={description} button="Why Altrady?" /> <TradingSoftwareSection {...softwareListData} /> </LandingPageLayout> ) } export const query = graphql` query Home { allPrismicExchanges(sort: { order: ASC, fields: data___position }) { nodes { data { name slug icon { url } partner upcoming } } } allPrismicBlogPostApi(sort: { fields: data___date, order: DESC }) { nodes { data { title slug featured date category { id } content { type text url } summary thumbnail { url } } } } allPrismicBlogCategory { nodes { data { color name slug meta_description meta_keywords meta_title } prismicId } } } `
"""Optimization modules This package contains optimization algorithms used to tune image processing parameters. """
import xadmin from .models import Link, SideBar from typeidea.base_admin import BaseOwnerAdmin @xadmin.sites.register(Link) class LinkAdmin(BaseOwnerAdmin): list_display = ('title', 'href', 'status', 'weight', 'created_time') fields = ('title', 'href', 'status', 'weight') def save_model(self, request, obj, form, change): obj.owner = request.user return super(LinkAdmin, self).save_model(request, obj, form, change) @xadmin.sites.register(SideBar) class SideBarAdmin(BaseOwnerAdmin): list_display = ('title', 'display_type', 'content', 'created_time') fields = ('title', 'display_type', 'content') def save_model(self, request, obj, form, change): obj.owner = request.user return super(SideBarAdmin, self).save_model(request, obj, form, change)
from . import converters, pbivcs from . import version __all__ = [ converters, pbivcs, ] __version__ = version.version __title__ = version.project
"use strict"; Object.defineProperty(exports, "__esModule", { value: true }); exports.ic_app_settings_alt_outline = void 0; var ic_app_settings_alt_outline = { "viewBox": "0 0 24 24", "children": [{ "name": "g", "attribs": {}, "children": [{ "name": "rect", "attribs": { "fill": "none", "height": "24", "width": "24" }, "children": [{ "name": "rect", "attribs": { "fill": "none", "height": "24", "width": "24" }, "children": [] }] }, { "name": "rect", "attribs": { "fill": "none", "height": "1", "width": "10", "x": "7", "y": "20" }, "children": [{ "name": "rect", "attribs": { "fill": "none", "height": "1", "width": "10", "x": "7", "y": "20" }, "children": [] }] }, { "name": "rect", "attribs": { "fill": "none", "height": "1", "width": "10", "x": "7", "y": "3" }, "children": [{ "name": "rect", "attribs": { "fill": "none", "height": "1", "width": "10", "x": "7", "y": "3" }, "children": [] }] }, { "name": "path", "attribs": { "d": "M21.81,12.74l-0.82-0.63c0-0.09,0-0.13,0-0.22l0.8-0.63c0.16-0.12,0.2-0.34,0.1-0.51l-0.85-1.48 c-0.07-0.13-0.21-0.2-0.35-0.2c-0.05,0-0.1,0.01-0.15,0.03l-0.95,0.38c-0.08-0.05-0.11-0.07-0.19-0.11l-0.15-1.01 C19.22,8.15,19.05,8,18.85,8h-1.71c-0.2,0-0.37,0.15-0.4,0.34L16.6,9.35c-0.03,0.02-0.07,0.03-0.1,0.05 c-0.03,0.02-0.06,0.04-0.09,0.06l-0.95-0.38c-0.05-0.02-0.1-0.03-0.15-0.03c-0.14,0-0.27,0.07-0.35,0.2l-0.85,1.48 c-0.1,0.17-0.06,0.39,0.1,0.51l0.8,0.63c0,0.09,0,0.13,0,0.23l-0.8,0.63c-0.16,0.12-0.2,0.34-0.1,0.51l0.85,1.48 c0.07,0.13,0.21,0.2,0.35,0.2c0.05,0,0.1-0.01,0.15-0.03l0.95-0.37c0.08,0.05,0.12,0.07,0.2,0.11l0.15,1.01 c0.03,0.2,0.2,0.34,0.4,0.34h1.71c0.2,0,0.37-0.15,0.4-0.34l0.15-1.01c0.03-0.02,0.07-0.03,0.1-0.05c0.03-0.02,0.06-0.04,0.09-0.06 l0.95,0.38c0.05,0.02,0.1,0.03,0.15,0.03c0.14,0,0.27-0.07,0.35-0.2l0.85-1.48C22.01,13.08,21.97,12.86,21.81,12.74z M18,13.5 c-0.83,0-1.5-0.67-1.5-1.5c0-0.83,0.67-1.5,1.5-1.5s1.5,0.67,1.5,1.5C19.5,12.83,18.83,13.5,18,13.5z M17,18H7V6h10v1h2V3 c0-1.1-0.9-2-2-2H7C5.9,1,5,1.9,5,3v18c0,1.1,0.9,2,2,2h10c1.1,0,2-0.9,2-2v-4h-2V18z M7,3h10v1H7V3z M17,21H7v-1h10V21z" }, "children": [{ "name": "path", "attribs": { "d": "M21.81,12.74l-0.82-0.63c0-0.09,0-0.13,0-0.22l0.8-0.63c0.16-0.12,0.2-0.34,0.1-0.51l-0.85-1.48 c-0.07-0.13-0.21-0.2-0.35-0.2c-0.05,0-0.1,0.01-0.15,0.03l-0.95,0.38c-0.08-0.05-0.11-0.07-0.19-0.11l-0.15-1.01 C19.22,8.15,19.05,8,18.85,8h-1.71c-0.2,0-0.37,0.15-0.4,0.34L16.6,9.35c-0.03,0.02-0.07,0.03-0.1,0.05 c-0.03,0.02-0.06,0.04-0.09,0.06l-0.95-0.38c-0.05-0.02-0.1-0.03-0.15-0.03c-0.14,0-0.27,0.07-0.35,0.2l-0.85,1.48 c-0.1,0.17-0.06,0.39,0.1,0.51l0.8,0.63c0,0.09,0,0.13,0,0.23l-0.8,0.63c-0.16,0.12-0.2,0.34-0.1,0.51l0.85,1.48 c0.07,0.13,0.21,0.2,0.35,0.2c0.05,0,0.1-0.01,0.15-0.03l0.95-0.37c0.08,0.05,0.12,0.07,0.2,0.11l0.15,1.01 c0.03,0.2,0.2,0.34,0.4,0.34h1.71c0.2,0,0.37-0.15,0.4-0.34l0.15-1.01c0.03-0.02,0.07-0.03,0.1-0.05c0.03-0.02,0.06-0.04,0.09-0.06 l0.95,0.38c0.05,0.02,0.1,0.03,0.15,0.03c0.14,0,0.27-0.07,0.35-0.2l0.85-1.48C22.01,13.08,21.97,12.86,21.81,12.74z M18,13.5 c-0.83,0-1.5-0.67-1.5-1.5c0-0.83,0.67-1.5,1.5-1.5s1.5,0.67,1.5,1.5C19.5,12.83,18.83,13.5,18,13.5z M17,18H7V6h10v1h2V3 c0-1.1-0.9-2-2-2H7C5.9,1,5,1.9,5,3v18c0,1.1,0.9,2,2,2h10c1.1,0,2-0.9,2-2v-4h-2V18z M7,3h10v1H7V3z M17,21H7v-1h10V21z" }, "children": [] }] }] }] }; exports.ic_app_settings_alt_outline = ic_app_settings_alt_outline;
/** * Cookie.js * * Copyright 2009, Moxiecode Systems AB * Released under LGPL License. * * License: http://tinymce.moxiecode.com/license * Contributing: http://tinymce.moxiecode.com/contributing */ (function() { var each = tinymce.each; /** * This class contains simple cookie manangement functions. * * @class tinymce.util.Cookie * @static * @example * // Gets a cookie from the browser * console.debug(tinymce.util.Cookie.get('mycookie')); * * // Gets a hash table cookie from the browser and takes out the x parameter from it * console.debug(tinymce.util.Cookie.getHash('mycookie').x); * * // Sets a hash table cookie to the browser * tinymce.util.Cookie.setHash({x : '1', y : '2'}); */ tinymce.create('static tinymce.util.Cookie', { /** * Parses the specified query string into an name/value object. * * @method getHash * @param {String} n String to parse into a n Hashtable object. * @return {Object} Name/Value object with items parsed from querystring. */ getHash : function(n) { var v = this.get(n), h; if (v) { each(v.split('&'), function(v) { v = v.split('='); h = h || {}; h[unescape(v[0])] = unescape(v[1]); }); } return h; }, /** * Sets a hashtable name/value object to a cookie. * * @method setHash * @param {String} n Name of the cookie. * @param {Object} v Hashtable object to set as cookie. * @param {Date} e Optional date object for the expiration of the cookie. * @param {String} p Optional path to restrict the cookie to. * @param {String} d Optional domain to restrict the cookie to. * @param {String} s Is the cookie secure or not. */ setHash : function(n, v, e, p, d, s) { var o = ''; each(v, function(v, k) { o += (!o ? '' : '&') + escape(k) + '=' + escape(v); }); this.set(n, o, e, p, d, s); }, /** * Gets the raw data of a cookie by name. * * @method get * @param {String} n Name of cookie to retrieve. * @return {String} Cookie data string. */ get : function(n) { var c = document.cookie, e, p = n + "=", b; // Strict mode if (!c) return; b = c.indexOf("; " + p); if (b == -1) { b = c.indexOf(p); if (b != 0) return null; } else b += 2; e = c.indexOf(";", b); if (e == -1) e = c.length; return unescape(c.substring(b + p.length, e)); }, /** * Sets a raw cookie string. * * @method set * @param {String} n Name of the cookie. * @param {String} v Raw cookie data. * @param {Date} e Optional date object for the expiration of the cookie. * @param {String} p Optional path to restrict the cookie to. * @param {String} d Optional domain to restrict the cookie to. * @param {String} s Is the cookie secure or not. */ set : function(n, v, e, p, d, s) { document.cookie = n + "=" + escape(v) + ((e) ? "; expires=" + e.toGMTString() : "") + ((p) ? "; path=" + escape(p) : "") + ((d) ? "; domain=" + d : "") + ((s) ? "; secure" : ""); }, /** * Removes/deletes a cookie by name. * * @method remove * @param {String} n Cookie name to remove/delete. * @param {Strong} p Optional path to remove the cookie from. */ remove : function(n, p) { var d = new Date(); d.setTime(d.getTime() - 1000); this.set(n, '', d, p, d); } }); })();
#include <stdio.h> #include <pthread.h> #include <stdlib.h> #include <string.h> #include <signal.h> #include <openssl/md5.h> #include <unistd.h> #include <stdbool.h> /* Program description: * Program is performing dictionary attack on given hashed passwords. It creates 8 threads in total, first thread (consumer) * receives cracked passwords from cracking threads and then prints it, this thread is communicating with other threads * by conditional value. Other threads generate passwords using given or default dictionary, every thread is generating * passwords using different method. Three basic cracking threads create passwords from different latter cases and adds * numbers before or/and after generated word. Another three threads generate two word passwords separated by " ", "2", "4" * or nothing, words are modified same as in basic threads. The last thread generates numeric passwords. Main loop is * reading user input. Program may be reset by typing a path to new passwords file. Program prints statistics after typing * "stats" or sending a SIGHUP signal also prints on program exit and passwords file reset. User may exit the program * typing "exit" (safest) or by sending SIGINT (CTRL + C) but main hangs on reading input so something must be typed to * break main loop. Main in the end is performing a cleaning especially memory freeing. */ /* * dictionaries: https://web.archive.org/web/20120207113205/http://www.insidepro.com/eng/download.shtml * -> InsidePro (Mini) -> very good (1.9 MB) 140k passwords * -> Facebook (Words) -> better but heavy (19 MB) 2M passwords * openssl library: sudo apt-get install openssl-dev * compile flags: -lssl -lcrypto -pthread */ #define USAGE "usage: ./pass_cr passwords_file [dictionary_file]\n" #define DEFAULT_DICTIONARY "inside_pro_mini.dic" #define BUF_SIZE 64 #define NUM_THREADS 8 #define U_LONG_MAX 4294967295 /* mutex for thread count (thread id), conditional value and associated mutex for consumer cracking threads communication, * read write lock for threads reset after reading a new password file */ pthread_mutex_t count_mutex; pthread_mutex_t cond_mutex; pthread_cond_t pass_cracked_cv; pthread_rwlock_t tsd_rwlock; /* global flags */ static volatile bool running = true; static volatile bool show_stats = false; static volatile bool reset = false; /* structure containing all necessary data for threads, created in main and passed as an argument to all threads */ struct thread_shared_data{ char** dict_ptr; char** pass_ptr; char** br_pass_ptr; bool* is_cracked_ptr; unsigned long dict_size; unsigned long pass_size; unsigned long br_pass_size; unsigned int basic_thread_count; unsigned int two_word_thread_count; }; /* function that exits application, also handler for SIGINT (CTRL + C) */ void quit_program() { printf("\nQuiting\n"); running = false; show_stats = true; pthread_cond_broadcast(&pass_cracked_cv); } /* function that makes consumer thread print statistics, also handler for SIGHUP */ void print_statistics() { show_stats = true; pthread_mutex_lock(&cond_mutex); pthread_cond_broadcast(&pass_cracked_cv); pthread_mutex_unlock(&cond_mutex); } /* function that makes a hash of a given string using MD5 algorithm */ void md5_hash(char * in_str, char ** out_str) { if(*out_str != NULL) free(*out_str); *out_str = (char *)malloc(33*sizeof(char)); unsigned char digest[16]; MD5_CTX ctx; MD5_Init(&ctx); MD5_Update(&ctx, in_str, strlen(in_str)); MD5_Final(digest, &ctx); for(int n = 0; n < 16; ++n) sprintf(&(*out_str)[n*2],"%02x", (unsigned int)digest[n]); } /* thread that waits for broken passwords, when password is received thread prints password, this thread also * prints statistics that are reset after loading new passwords */ void *consumer_thread(void *arg) { struct thread_shared_data* my_tsd; my_tsd = (struct thread_shared_data*) arg; while(running) { pthread_mutex_lock(&cond_mutex); pthread_cond_wait(&pass_cracked_cv, &cond_mutex); if(!show_stats) { /* print cracked password */ if(my_tsd->br_pass_size > 0) printf("consumer thread: cracked password received: %s\n", my_tsd->br_pass_ptr[my_tsd->br_pass_size - 1]); } else if(show_stats) { /* print statistics */ printf("consumer thread: stats -> cracked %lu of %lu %.2f%%\n",my_tsd->br_pass_size, my_tsd->pass_size, ((double) my_tsd->br_pass_size / (double) my_tsd->pass_size * 100)); show_stats = false; } pthread_mutex_unlock(&cond_mutex); } printf("consumer thread: exit\n"); pthread_exit(NULL); } /* function that checks if generated passwords is the same as loaded */ void check_passwords(char * created_pass, struct thread_shared_data * my_tsd, unsigned int id) { char * hashed_pass = NULL; md5_hash(created_pass, &hashed_pass); for(unsigned long j = 0; j < my_tsd->pass_size && running; j++) { if(my_tsd->is_cracked_ptr[j] == true) continue; // skip already broken password if(!strcmp(my_tsd->pass_ptr[j], hashed_pass)) { /* password broken */ pthread_mutex_lock(&cond_mutex); printf("cracking thread %u: password broken -> %s\n", id, created_pass); my_tsd->is_cracked_ptr[j] = true; my_tsd->br_pass_ptr = realloc(my_tsd->br_pass_ptr, (++my_tsd->br_pass_size)*sizeof(char *)); my_tsd->br_pass_ptr[my_tsd->br_pass_size - 1] = (char*)malloc((strlen(created_pass)+1)*sizeof(char)); strcpy(my_tsd->br_pass_ptr[my_tsd->br_pass_size - 1], created_pass); pthread_cond_signal(&pass_cracked_cv); pthread_mutex_unlock(&cond_mutex); } } free(hashed_pass); } /* function that changes all string uppercase letters to lowercase letters */ void to_lowercase(char ** str) { for(int i=0; i < strlen(*str); i++) if((*str)[i] <= 'Z' && (*str)[i] >= 'A' ) (*str)[i] += 32; } /* function that changes all string lowercase letters to uppercase letters */ void to_uppercase(char ** str) { for(int i=0; i < strlen(*str); i++) if((*str)[i] <= 'z' && (*str)[i] >= 'a' ) (*str)[i] -= 32; } /* function change string depending on thread id * id == 0 -> all lowercase * id == 1 -> first uppercase * id == 2 -> all uppercase */ void change_string_by_id(unsigned int id, char ** str) { switch (id) { case 0: // all lowercase { to_lowercase(str); break; } case 1: // first letter uppercase { to_lowercase(str); unsigned int i=0; while((*str)[i] > 'z' || (*str)[i] < 'a') i++; (*str)[i] -= 32; break; } case 2: // all uppercase { to_uppercase(str); break; } } } /* function resets cracking threads depending on thread id * arguments: thread id and pointers to all values that needs to be reset * if thread doesnt have one of those values use NULL */ void reset_thread(unsigned int id, unsigned long * num, bool * first_loop, unsigned long * i) { printf("cracking thread %d: reset\n", id); if(id >= 0 && id <= 2) // cracking_thread_basic { *num = 0; *first_loop = true; } else if(id == 3) // cracking_thread_numbers { *num = 0; } else if(id >= 4 && id <= 6) // cracking_thread_two_words { *i = 0; } while(reset) sleep(1); printf("cracking thread %d: start\n", id); } /* thread that cracks passwords, thread adds numbers before or/and after every word (except first loop), * words from dictionary are modified depending on thread id * id == 0 -> all lowercase * id == 1 -> first uppercase * id == 2 -> all uppercase */ void *cracking_thread_basic(void *arg) { struct thread_shared_data* my_tsd; my_tsd = (struct thread_shared_data*) arg; pthread_mutex_lock(&count_mutex); unsigned int id = my_tsd->basic_thread_count; my_tsd->basic_thread_count++; pthread_mutex_unlock(&count_mutex); char * created_pass = NULL; unsigned long num = 0; bool first_loop = true; char num_buf[11]; // unsigned long range [0, 4294967295] -> 10 chars + '/0' while(running) { if(first_loop) { first_loop = false; for(unsigned long i = 0; i < my_tsd->dict_size && running; i++) { created_pass = (char *) malloc((strlen(my_tsd->dict_ptr[i]) + 1) * sizeof(char)); strcpy(created_pass, my_tsd->dict_ptr[i]); change_string_by_id(id, &created_pass); if(reset) { reset_thread(id, &num, &first_loop, NULL); free(created_pass); break; } pthread_rwlock_rdlock(&tsd_rwlock); check_passwords(created_pass, my_tsd, id); pthread_rwlock_unlock(&tsd_rwlock); free(created_pass); } } else { for(unsigned long i = 0; i < my_tsd->dict_size && running; i++) { if(reset) { reset_thread(id, &num, &first_loop, NULL); free(created_pass); break; } sprintf(num_buf, "%lu", num); created_pass = (char *)malloc((strlen(my_tsd->dict_ptr[i])+strlen(num_buf)+1)*sizeof(char)); /* dictionary word number */ strcat(created_pass, my_tsd->dict_ptr[i]); strcat(created_pass, num_buf); change_string_by_id(id, &created_pass); pthread_rwlock_rdlock(&tsd_rwlock); check_passwords(created_pass, my_tsd, id); pthread_rwlock_unlock(&tsd_rwlock); /* clear buffer */ sprintf(created_pass, ""); /* number dictionary word */ strcat(created_pass, num_buf); strcat(created_pass, my_tsd->dict_ptr[i]); change_string_by_id(id, &created_pass); pthread_rwlock_rdlock(&tsd_rwlock); check_passwords(created_pass, my_tsd, id); pthread_rwlock_unlock(&tsd_rwlock); /* number dictionary word number */ // not effective (zero passwords cracked) /*created_pass = (char *)realloc(created_pass ,(strlen(my_tsd->dict_ptr[i])+2*strlen(num_buf)+1)*sizeof(char)); strcat(created_pass, num_buf); change_string_by_id(id, &created_pass); pthread_rwlock_rdlock(&tsd_rwlock); check_passwords(created_pass, my_tsd, id); pthread_rwlock_unlock(&tsd_rwlock); free(created_pass);*/ } if(num == U_LONG_MAX) num = 0; else num++; } } printf("cracking thread %u: exit\n", id); pthread_exit(NULL); } /* thread that cracks 2 word passwords, words are separated by space or 2 or 4 or nothing * words from dictionary are modified depending on thread id * id == 0 -> all lowercase * id == 1 -> first uppercase * id == 2 -> all uppercase */ // not effective (zero passwords cracked) void *cracking_thread_two_words(void *arg) { struct thread_shared_data* my_tsd; my_tsd = (struct thread_shared_data*) arg; pthread_mutex_lock(&count_mutex); unsigned int local_id = my_tsd->two_word_thread_count; my_tsd->two_word_thread_count++; pthread_mutex_unlock(&count_mutex); unsigned int global_id = local_id + 4; char * created_pass = NULL; char * first_word = NULL; char * second_word = NULL; char separators[6] = " 24"; while(running) { for(unsigned long i = 0; i < my_tsd->dict_size && running; i++) { unsigned long first_size = strlen(my_tsd->dict_ptr[i]); first_word = (char *)malloc(first_size*sizeof(char)+1); strcpy(first_word, my_tsd->dict_ptr[i]); for(unsigned long j = i; j < my_tsd->dict_size && running; j++) { if(reset) { reset_thread(global_id, NULL, NULL, &i); break; } unsigned long second_size = strlen(my_tsd->dict_ptr[j]); second_word = (char *)malloc(second_size*sizeof(char)+1); strcpy(second_word, my_tsd->dict_ptr[j]); created_pass = (char *)malloc((first_size + second_size + 2)*sizeof(char)); /* no separator */ /* first_word second_word */ change_string_by_id(local_id, &first_word); change_string_by_id(local_id, &second_word); strcat(created_pass, first_word); strcat(created_pass, second_word); pthread_rwlock_rdlock(&tsd_rwlock); check_passwords(created_pass, my_tsd, global_id); pthread_rwlock_unlock(&tsd_rwlock); /* clear buffer */ sprintf(created_pass, ""); /* second_word first_word */ change_string_by_id(local_id, &first_word); change_string_by_id(local_id, &second_word); strcat(created_pass, first_word); strcat(created_pass, second_word); pthread_rwlock_rdlock(&tsd_rwlock); check_passwords(created_pass, my_tsd, global_id); pthread_rwlock_unlock(&tsd_rwlock); /* clear buffer */ sprintf(created_pass, ""); /* with separator */ for(int k = 0; k < strlen(separators) && running; k++) { /* first_word second_word */ change_string_by_id(local_id, &first_word); change_string_by_id(local_id, &second_word); strcat(created_pass, first_word); strncat(created_pass, &(separators[k]), 1); strcat(created_pass, second_word); pthread_rwlock_rdlock(&tsd_rwlock); check_passwords(created_pass, my_tsd, global_id); pthread_rwlock_unlock(&tsd_rwlock); /* clear buffer */ sprintf(created_pass, ""); /* second_word first_word */ change_string_by_id(local_id, &first_word); change_string_by_id(local_id, &second_word); strcat(created_pass, first_word); strncat(created_pass, &(separators[k]), 1); strcat(created_pass, second_word); pthread_rwlock_rdlock(&tsd_rwlock); check_passwords(created_pass, my_tsd, global_id); pthread_rwlock_unlock(&tsd_rwlock); /* clear buffer */ sprintf(created_pass, ""); } free(second_word); } free(first_word); } } printf("cracking thread %u: exit\n", global_id); pthread_exit(NULL); } /* thread that cracks passwords, thread cracks passwords containing only numbers */ void *cracking_thread_numbers(void *arg) { struct thread_shared_data* my_tsd; my_tsd = (struct thread_shared_data*) arg; unsigned int id = 3; unsigned long num = 0; char num_buf[11]; // unsigned long range [0, 4294967295] -> 10 chars + '/0' while(running) { if(reset) reset_thread(id, &num, NULL, NULL); sprintf(num_buf, "%lu", num); pthread_rwlock_rdlock(&tsd_rwlock); check_passwords(num_buf, my_tsd, id); pthread_rwlock_unlock(&tsd_rwlock); if(num == U_LONG_MAX) num = 0; else num++; } } /* function opens a file of given name, create dynamic array containing all lines of file that can be accessed by * pointer given in argument, also size can be accessed similarly * returns 0 if succeed otherwise -1 */ int read_file(char * file_name, char *** container_ptr, unsigned long * size) { FILE * file = fopen(file_name, "r"); if(file == NULL) return -1; char *str = (char*)malloc(BUF_SIZE*sizeof(char)); *size = 0; while(fscanf(file, "%[^\r\n] ", str) != EOF) { *container_ptr = realloc(*container_ptr, (++(*size)) * sizeof(char *)); (*container_ptr)[*size - 1] = (char*)malloc((strlen(str)+1)*sizeof(char)); strcpy((*container_ptr)[*size - 1], str); } free(str); pclose(file); return 0; } int main(int argc, char * argv[]) { pthread_t threads[NUM_THREADS]; struct thread_shared_data tsd; tsd.dict_ptr = NULL; tsd.pass_ptr = NULL; tsd.br_pass_ptr = NULL; tsd.is_cracked_ptr = NULL; tsd.br_pass_size = 0; tsd.basic_thread_count = 0; tsd.two_word_thread_count = 0; /* check if arguments were given*/ if(argc <= 1) { printf("main: passwords_file argument not given\n"); printf(USAGE); exit(1); } /* read dictionary into memory */ /* check if dictionary was given in arguments */ if(argc >= 3) { if(read_file(argv[2], &tsd.dict_ptr, &tsd.dict_size) == -1) { printf("main: dictionary file: %s not found\n", argv[2]); exit(1); } } else /* read default dictionary */ if(read_file(DEFAULT_DICTIONARY, &tsd.dict_ptr, &tsd.dict_size) == -1) { printf("main: default dictionary file: %s not found\n", DEFAULT_DICTIONARY); printf(USAGE); exit(1); } /* read hashed passwords into memory */ if(read_file(argv[1], &tsd.pass_ptr, &tsd.pass_size) == -1) { printf("main: passwords file: %s not found\n", argv[1]); exit(1); } else /* allocate is password cracked array and initialize all bits to zero == false */ tsd.is_cracked_ptr = (bool *)calloc(tsd.pass_size, sizeof(bool)); /* signal handling */ signal(SIGINT, quit_program); signal(SIGHUP, print_statistics); /* create threads and initialise mutex, cond value and read write lock */ pthread_mutex_init(&cond_mutex, NULL); pthread_mutex_init(&count_mutex, NULL); pthread_cond_init (&pass_cracked_cv, NULL); pthread_rwlock_init(&tsd_rwlock, NULL); pthread_create(&threads[0], NULL, consumer_thread, (void*) &tsd); pthread_create(&threads[1], NULL, cracking_thread_numbers, (void *) &tsd); for(int i = 2; i < NUM_THREADS; i+=2) { pthread_create(&threads[i], NULL, cracking_thread_basic, (void *) &tsd); pthread_create(&threads[i + 1], NULL, cracking_thread_two_words, (void *) &tsd); } /* main loop reading user input */ char *input = (char*)malloc(BUF_SIZE*sizeof(char)); char ** tmp_ptr = NULL; unsigned long tmp_size = 0; while(running) { printf("main: waiting for input:\n"); scanf("%s", input); if(!strcmp(input, "exit")) quit_program(); else if(!strcmp(input, "stats")) print_statistics(); else { if(read_file(input, &tmp_ptr, &tmp_size) == -1) printf("main: passwords file %s not found\n", input); else { /* reset */ printf("main: new passwords file loaded\n"); reset = true; pthread_rwlock_wrlock(&tsd_rwlock); printf("main: reset\n"); print_statistics(); for(unsigned long i = 0; i < tsd.pass_size; i++) free(tsd.pass_ptr[i]); free(tsd.pass_ptr); for(int i = 0; i < tsd.br_pass_size; i++) free(tsd.br_pass_ptr[i]); free(tsd.br_pass_ptr); tsd.br_pass_ptr = NULL; tsd.pass_ptr = tmp_ptr; tsd.pass_size = tmp_size; tmp_ptr = NULL; free(tsd.is_cracked_ptr); tsd.is_cracked_ptr = (bool *)calloc(tsd.pass_size, sizeof(bool)); tsd.br_pass_size = 0; printf("main: start\n"); reset = false; pthread_rwlock_unlock(&tsd_rwlock); } } } free(input); /* cleaning the program */ pthread_mutex_destroy(&count_mutex); pthread_mutex_destroy(&cond_mutex); pthread_cond_destroy(&pass_cracked_cv); pthread_rwlock_destroy(&tsd_rwlock); printf("main: freeing memory\n"); /* free dictionary array */ for(unsigned long i = 0; i < tsd.dict_size; i++) free(tsd.dict_ptr[i]); free(tsd.dict_ptr); /* free passwords array*/ if(tsd.pass_ptr != NULL) { for(unsigned long i = 0; i < tsd.pass_size; i++) free(tsd.pass_ptr[i]); free(tsd.pass_ptr); } /* free broken passwords array */ if(tsd.br_pass_ptr != NULL) for(unsigned long i = 0; i < tsd.br_pass_size; i++) free(tsd.br_pass_ptr[i]); free(tsd.br_pass_ptr); /* free is broken array */ free(tsd.is_cracked_ptr); printf("main: exit\n"); pthread_exit(NULL); }
# Generated by Django 2.1.8 on 2019-07-02 17:44 from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='GuestEmail', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('email', models.EmailField(max_length=254)), ('active', models.BooleanField(default=True)), ('update', models.DateTimeField(auto_now=True)), ('timestamp', models.DateTimeField(auto_now_add=True)), ], ), ]
# Copyright 2020 BULL SAS All rights reserved # from loguru import logger from collections import Counter from logflow.logsparser.Pattern import Pattern from typing import Dict, List, Tuple from loguru import logger # TODO: Test performance of static vs method class Journal: """A journal is a list of logs files. It reads, parses and associates the logs and the pattern. Args: parser_message (function): Function to split the message part of the line. path (str): path to the data associated_pattern (bool, optional): Associate or discover the patterns. Note that if associated_pattern is True, dict_patterns must be provided. Defaults to False. dict_patterns (dict, optional): Dict of the patterns for the association. Defaults to {}. large_file (bool, optional): Optimization for the reading of one large file. Not implemented yet. Defaults to False. pointer (int, optional): Optimization for the reading of one large file. Not implemented yet. Defaults to -1. encoding (str, optional): Encoding of the files read. Defaults to "latin-1". sort_function (function, optional): Function to sort the logs. Defaults to "", means logs are not sorted. output (str, optional) : Set the output type. "logpai" to be usable with the benchmark provided by logpai. Defaults return only the ID of log. """ def __init__(self, parser_message, path : str , associated_pattern=False, dict_patterns = {}, large_file=False, pointer=-1, encoding="latin-1", sort_function="", output=""): assert path != "" assert parser_message != "" if associated_pattern: assert dict_patterns != {} # self.list_logs = [] self.path = path if large_file: assert pointer != -1 self.pointer = pointer self.sort_function = sort_function self.parser_message = parser_message self.encoding = encoding self.dict_words_descriptors : Dict[str, str]= {} self.dict_message : Dict[Tuple[str, ...], Tuple[str, ...]] = {} self.associated_pattern = associated_pattern self.dict_patterns = dict_patterns self.output = output def run(self): """Start the process """ if not self.associated_pattern: # We discover the patterns self.counter_logs = {} self.dict_message = {} self.read_file() del self.dict_words_descriptors del self.dict_message self.dict_words_descriptors = {} self.dict_message = {} else: # We associate lines and patterns self.dict_message_associated : Dict[Tuple[str, ...], Pattern]= {} self.list_patterns = [] self.read_file() def count_log(self, line : str): """Count the number of same entries according to their descriptors. for space and computation optimization. Example using 3 entries : "Connexion of user Marc" "Connexion of user Marc" "Application failure node [1,0,0,2,4]" Counter_logs will be : {"Connexion of user Marc":2, "Application failure node [1,0,0,2,4]", 1}. To avoid useless computation, we use a dictionnary of line and line's descriptors. We do not compute the descriptors each time for each line. Args: line (str): line of log to add to the counter. """ # Parse the message to have the descriptors. message : List[str] = self.parser_message(line=line) if len(message) > 0: # Get the frozen message because python can't used list as dictionnary key. frozen_message : Tuple[str, ...] = tuple(message) if frozen_message in self.dict_message: # If the message is already in the dict, get the associated descriptors and add +1 self.counter_logs[self.dict_message[frozen_message]] += 1 else: # Else, compute the descriptors, add the line and descriptors into the dict, and add the line of descriptors to the dict. frozen_message_descriptors = tuple([self.filter_word(word) for word in message]) self.dict_message.setdefault(frozen_message, frozen_message_descriptors) self.counter_logs.setdefault(frozen_message_descriptors, 1) self.counter_logs[self.dict_message[frozen_message]] += 1 def associate_pattern(self, line : str): """Associate a line with a pattern. Add this pattern to the list of patterns. Args: line (str): line to be associated. """ # Parse the message message = [self.filter_word(word) for word in self.parser_message(line=line)] if len(message) > 0: frozen_message = tuple(message) if frozen_message in self.dict_message_associated: # If we have already seen the message, we know the pattern. if self.output == "": self.list_patterns.append(self.dict_message_associated[frozen_message].id) elif self.output == "logpai": self.list_patterns.append({'Content': message, 'EventId': int(self.dict_message_associated[frozen_message].id), 'EventTemplate': self.dict_message_associated[frozen_message].pattern_str}) else: # Else, compute it. best_pattern = Journal.find_pattern(message, self.dict_patterns) self.dict_message_associated[frozen_message] = best_pattern if self.output == "": self.list_patterns.append(best_pattern.id) elif self.output == "logpai": self.list_patterns.append({'Content': message, 'EventId': int(best_pattern.id), 'EventTemplate': best_pattern.pattern_str}) def read_file(self): """Read the logs files. """ if isinstance(self.path, str): # For only one file try: with open(self.path, "r", encoding=self.encoding) as file_open: if self.associated_pattern: if self.sort_function != "": lines = self.sort_function(list(file_open.readlines())) for line in lines: self.associate_pattern(line) else: for line in file_open.readlines(): self.associate_pattern(line) else: for line in file_open.readlines(): self.count_log(line) except: logger.error("Error while reading the file: " +str(self.path)) else: # For a list of files. for file_path in self.path: try: with open(file_path, "r", encoding=self.encoding) as file_open: if self.associated_pattern: if self.sort_function != "": lines = self.sort_function(list(file_open.readlines())) for line in lines: self.associate_pattern(line) else: for line in file_open.readlines(): self.associate_pattern(line) else: for line in file_open.readlines(): self.count_log(line) except: logger.error("Error while reading the file: " +str(file_path)) def filter_word(self, word : str) -> str: """Get the descriptors of the word Args: word (str): word to describe Returns: str: descriptors of the word. They use a string representation of a list. """ if self.is_number(word): return "NB" elif word.isalpha() or len(word) == 1: return word else: if word in self.dict_words_descriptors: return self.dict_words_descriptors[word] str_vector = Journal.create_vector(word) self.dict_words_descriptors.setdefault(word, str_vector) return str_vector def is_number(self, s : str) -> bool: """Detect if a string is a float. Args: s (str): string to parse Returns: bool: True if the string is a float, False else. """ try: float(s) return True except ValueError: return False @staticmethod def find_pattern(message : List[str], dict_patterns : dict) -> Pattern: """Find the pattern associated to a log. The best pattern is the pattern with the maximum common words with the line. Args: message (List[str]): list of the words of the message part of the log. dict_patterns (dict): the dict of patterns. Returns: Pattern: the pattern associated to the line. """ # Create a default pattern to compare it to the other ones to find the best pattern. best_pattern = Pattern(0, [], []) # Get the patterns with the same cardinality as the line. The cardinality of a pattern is the cardinality used for finding this pattern and not this number of words. dict_patterns_size = dict_patterns[len(message)] # Get the descriptors # Begin by the bigger pattern to save time. for size_pattern in sorted(dict_patterns_size.keys(), reverse=True): for pattern in dict_patterns_size[size_pattern]: nb_word_match = 0 # Compute the number of common words for i in range(len(pattern)): if pattern.pattern_word[i] == message[pattern.pattern_index[i]]: nb_word_match += 1 # If we have more common words, then we have a new best pattern if nb_word_match > len(best_pattern): best_pattern = pattern # If new size if lower than the size of the actual best pattern, stop the detection. if len(best_pattern) > size_pattern: break return best_pattern @staticmethod def static_is_number(s : str) -> bool: """Detect if a string is a float. Args: s (str): string to parse Returns: bool: True if the string is a float, False else. """ try: float(s) return True except ValueError: return False @staticmethod def static_filter_word(word : str) -> str: """Get the descriptors of the word Args: word (str): word to describe Returns: str: descriptors of the word. They use a string representation of a list. """ if Journal.static_is_number(word): return "NB" elif word.isalpha() or len(word) == 1: return word else: return Journal.create_vector(word) @staticmethod def create_vector(word : str) -> str: """Create the vector of descriptors associated to a word Args: word (str): the word to describe using descriptors Returns: str: the descriptors """ vector = ["0"]*5 number = False lower = False upper = False alnum = False for letter in word: if letter.isdigit(): vector[3] = "1" number = True elif letter.islower(): vector[1] = "1" lower = True elif letter.isupper(): vector[0] = "1" upper = True elif not letter.isalnum(): vector[2] = "1" alnum = True if number and lower and upper and alnum: break vector[4] = str(len(word)) str_vector = ''.join(vector) return str_vector
#!/usr/bin/env python # # This file is part of the Emotions project. The complete source code is # available at https://github.com/luigivieira/emotions. # # Copyright (c) 2016-2017, Luiz Carlos Vieira (http://www.luiz.vieira.nom.br) # # MIT License # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import os import numpy as np import dlib import cv2 from data import FaceData #============================================= class FaceDetector: """ Implements the detector of faces (with their landmarks) in images. """ _detector = None """ Instance of the dlib's object used to detect faces in images, shared by all instances of this class. """ _predictor = None """ Instance of the dlib's object used to predict the positions of facial landmarks in images, shared by all instances of this class. """ #--------------------------------------------- def detect(self, image, downSampleRatio = None): """ Tries to automatically detect a face in the given image. This method uses the face detector/predictor from the dlib package (with its default face model) to detect a face region and 68 facial landmarks. Even though dlib is able to detect more than one face in the image, for the current purposes of the fsdk project only a single face is needed. Hence, only the biggest face detected (estimated from the region size) is considered. Parameters ------ image: numpy.array Image data where to search for the face. downSampleRatio: float Returns ------ result: bool Indication on the success or failure of the facial detection. face: FaceData Instance of the FaceData class with the region and landmarks of the detected face, or None if no face was detected. """ ##################### # Setup the detector ##################### # Initialize the static detector and predictor if this is first use if FaceDetector._detector is None or FaceDetector._predictor is None: FaceDetector._detector = dlib.get_frontal_face_detector() faceModel = os.path.abspath('{}/models/face_model.dat' \ .format(os.path.dirname(__file__))) FaceDetector._predictor = dlib.shape_predictor(faceModel) ##################### # Performance cues ##################### # If requested, scale down the original image in order to improve # performance in the initial face detection if downSampleRatio is not None: detImage = cv2.resize(image, (0, 0), fx=1.0 / downSampleRatio, fy=1.0 / downSampleRatio) else: detImage = image ##################### # Face detection ##################### # Detect faces in the image detectedFaces = FaceDetector._detector(detImage, 1) if len(detectedFaces) == 0: return False, None # No matter how many faces have been found, consider only the first one region = detectedFaces[0] # If downscaling was requested, scale back the detected region so the # landmarks can be proper located on the image in full resolution if downSampleRatio is not None: region = dlib.rectangle(region.left() * downSampleRatio, region.top() * downSampleRatio, region.right() * downSampleRatio, region.bottom() * downSampleRatio) # Fit the shape model over the face region to predict the positions of # its facial landmarks faceShape = FaceDetector._predictor(image, region) ##################### # Return data ##################### face = FaceData() # Update the object data with the predicted landmark positions and # their bounding box (with a small margin of 10 pixels) face.landmarks = np.array([[p.x, p.y] for p in faceShape.parts()]) margin = 10 x, y, w, h = cv2.boundingRect(face.landmarks) face.region = ( max(x - margin, 0), max(y - margin, 0), min(x + w + margin, image.shape[1] - 1), min(y + h + margin, image.shape[0] - 1) ) return True, face
const assert = require("assert"); const expect = require("chai").expect; const request = require("supertest"); const validator = require("validator"); const app = require("../../ExpressFunctionApp/app"); describe('GET /api/hello', () => { it('should return 200 status', () => { return request(app) .get('/api/hello') .then((response) => { expect(response.status).to.eql(200) }) }); it('should return json response', () => { return request(app) .get('/api/hello') .then((response) => { expect(response.body).to.eql({"hello": "world"}); expect(response.headers['content-type']).to.include('application/json'); }) }); }); describe('GET /api/docs/', () => { it('should return 200 status for existing file', () => { return request(app) .get('/api/docs/swagger.json') .then((response) => { expect(response.status).to.eql(200) }) }); it('should return 404 status for non-existing file', () => { return request(app) .get('/api/docs/doesnexist.json') .then((response) => { expect(response.status).to.eql(404) }) }); }); describe('ALL /api/echo/:status?', () => { it('should return 200 status', () => { return request(app) .get('/api/echo') .then((response) => { expect(response.status).to.eql(200) }) }); it('should return request headers in echo-headers object, downcased keys', () => { return request(app) .get('/api/echo') .set('Custom-Echo-Header', 'Random-Value-123') .set('Another-Echo-Header', 'My value 456') .then((response) => { expect(response.body['echo-headers']['custom-echo-header']).to.eql('Random-Value-123'); expect(response.body['echo-headers']['another-echo-header']).to.eql('My value 456'); }) }); it('should return json response', () => { return request(app) .get('/api/echo') .then((response) => { expect(response.headers['content-type']).to.include('application/json'); }) }); it('should return query strings in echo-qs object', () => { return request(app) .get('/api/echo?abc=def&ghi=jkl') .then((response) => { expect(response.body['echo-qs']['abc']).to.eql('def'); expect(response.body['echo-qs']['ghi']).to.eql('jkl'); }) }); it('should return orignal url in echo-originalurl property', () => { return request(app) .get('/api/echo?abc=def&ghi=jkl') .set('x-waws-unencoded-url', '/api/echo?abc=def&ghi=jkl') .then((response) => { expect(response.body['echo-originalurl']).to.eql('/api/echo?abc=def&ghi=jkl'); }) }); ['GET', 'POST', 'PUT', 'DELETE', 'PATCH', 'OPTIONS'].forEach((method) => { it('should return ' + method + ' method in echo-method key', () => { return request(app) [method.toLowerCase()]('/api/echo') .then((response) => { expect(response.body['echo-method']).to.eql(method); }) }); }); it('should return json request body in echo-body object', () => { return request(app) .post('/api/echo') .set('Content-Type', 'application/json') .send({'key1': 'value1', 'key2': 'value2'}) .then((response) => { expect(response.body['echo-body-content-type']).to.include('application/json'); expect(response.body['echo-body']).to.eql({'key1': 'value1', 'key2': 'value2'}); }) }); it('should return 400 status for malformed json request body', () => { return request(app) .post('/api/echo') .set('Content-Type', 'application/json') .send('{"key1":}') .then((response) => { expect(response.body['error']['body']).to.eql('{\"key1\":}'); }) }); [200, 400, 401, 403, 404, 405, 410, 500, 502, 503, 504].forEach((status) => { it('should return ' + status + ' status if supplied in route parameter', () => { return request(app) .post('/api/echo/' + status.toString()) .then((response) => { expect(response.status).to.eql(status); }) }); }); }); describe('ALL /api/echo-from-text/:status?', () => { it('should return 200 status', () => { return request(app) .get('/api/echo-from-text') .then((response) => { expect(response.status).to.eql(200) }) }); it('should return request headers in echo-headers object, downcased keys', () => { return request(app) .get('/api/echo-from-text') .set('Custom-Echo-Header', 'Random-Value-123') .set('Another-Echo-Header', 'My value 456') .then((response) => { expect(response.body['echo-headers']['custom-echo-header']).to.eql('Random-Value-123'); expect(response.body['echo-headers']['another-echo-header']).to.eql('My value 456'); }) }); it('should return json response', () => { return request(app) .get('/api/echo-from-text') .then((response) => { expect(response.headers['content-type']).to.include('application/json'); }) }); it('should return query strings in echo-qs object', () => { return request(app) .get('/api/echo-from-text?abc=def&ghi=jkl') .then((response) => { expect(response.body['echo-qs']['abc']).to.eql('def'); expect(response.body['echo-qs']['ghi']).to.eql('jkl'); }) }); ['GET', 'POST', 'PUT', 'DELETE', 'PATCH', 'OPTIONS'].forEach((method) => { it('should return ' + method + ' method in echo-method key', () => { return request(app) [method.toLowerCase()]('/api/echo-from-text') .then((response) => { expect(response.body['echo-method']).to.eql(method); }) }); }); it('should return text request body in echo-body-text property', () => { return request(app) .post('/api/echo-from-text') .set('Content-Type', 'text/plain') .send('this is a text') .then((response) => { expect(response.body['echo-body-text']).to.eql('this is a text'); }) }); it('should convert text request body to json and return in body', () => { return request(app) .post('/api/echo-from-text') .set('Content-Type', 'text/plain') .send('{"key1": "value1", "key2": "value2"}') .then((response) => { expect(response.body).to.include({'key1': 'value1', 'key2': 'value2'}); }) }); it('should convert escaped text request body to json and return in body', () => { return request(app) .post('/api/echo-from-text') .set('Content-Type', 'text/plain') .send("\"{\\\"abc\\\": true, \\\"def\\\": 123, \\\"message\\\": \\\"message1\\\"}\"") .then((response) => { expect(response.body).to.include({'abc': true, 'def': 123, 'message': 'message1'}); }) }); [200, 400, 401, 403, 404, 405, 410, 500, 502, 503, 504].forEach((status) => { it('should return ' + status + ' status if supplied in route parameter', () => { return request(app) .post('/api/echo-from-text/' + status.toString()) .then((response) => { expect(response.status).to.eql(status); }) }); }); }); describe('GET /api/files/errors/:status', () => { [200, 400, 401, 403, 404, 405, 410, 500, 502, 503, 504].forEach((status) => { it('should return ' + status + ' status supplied in route parameter', () => { return request(app) .get('/api/files/errors/' + status.toString()) .then((response) => { expect(response.status).to.eql(status); }) }); }); }); describe('POST /api/all-types', () => { it('should return request headers in inputs object, downcased keys', () => { return request(app) .post('/api/all-types') .set('Content-Type', 'application/json') .set('Custom-Echo-Header', 'Random-Value-123') .set('Another-Echo-Header', 'My value 456') .then((response) => { expect(response.body['inputs']['headers']['custom-echo-header']).to.eql('Random-Value-123'); expect(response.body['inputs']['headers']['another-echo-header']).to.eql('My value 456'); }) }); it('should return request body in outputs object', () => { return request(app) .post('/api/all-types') .set('Content-Type', 'application/json') .send({'any': {'key1': 'value1'}}) .then((response) => { expect(response.body['inputs']['body']).to.eql({'any': {'key1': 'value1'}}); }) }); it('should return json response', () => { return request(app) .post('/api/all-types') .set('Content-Type', 'application/json') .send() .then((response) => { expect(response.headers['content-type']).to.include('application/json'); }) }); it('should return text output', () => { return request(app) .post('/api/all-types') .set('Content-Type', 'application/json') .send({'allTypesInputs': {'textInput': 'abc'}}) .then((response) => { expect(response.body['outputs']['textOutput']).to.eql('abc') }) }); it('should return empty string for empty text input', () => { return request(app) .post('/api/all-types') .set('Content-Type', 'application/json') .send({'allTypesInputs': {'textInput': ''}}) .then((response) => { expect(response.body['outputs']['textOutput']).to.eql('') }) }); it('should return null for null text input', () => { return request(app) .post('/api/all-types') .set('Content-Type', 'application/json') .send({'allTypesInputs': {'textInput': null}}) .then((response) => { expect(response.body['outputs']['textOutput']).to.eql(null) }) }); it('should return decimal output', () => { return request(app) .post('/api/all-types') .set('Content-Type', 'application/json') .send({'allTypesInputs': {'decimalInput': 123.45}}) .then((response) => { expect(response.body['outputs']['decimalOutput']).to.eql(123.45) }) }); it('should not add decimal points for round decimal input', () => { return request(app) .post('/api/all-types') .set('Content-Type', 'application/json') .send({'allTypesInputs': {'decimalInput': 42}}) .then((response) => { expect(response.body['outputs']['decimalOutput']).to.eql(42) }) }); it('should return null for null decimal input', () => { return request(app) .post('/api/all-types') .set('Content-Type', 'application/json') .send({'allTypesInputs': {'decimalInput': null}}) .then((response) => { expect(response.body['outputs']['decimalOutput']).to.eql(null) }) }); it('should return integer output', () => { return request(app) .post('/api/all-types') .set('Content-Type', 'application/json') .send({'allTypesInputs': {'integerInput': -789}}) .then((response) => { expect(response.body['outputs']['integerOutput']).to.eql(-789) }) }); it('should preserve decimals if sent for integer input', () => { return request(app) .post('/api/all-types') .set('Content-Type', 'application/json') .send({'allTypesInputs': {'integerInput': 67.89}}) .then((response) => { expect(response.body['outputs']['integerOutput']).to.eql(67.89) }) }); it('should return null for null integer input', () => { return request(app) .post('/api/all-types') .set('Content-Type', 'application/json') .send({'allTypesInputs': {'integerInput': null}}) .then((response) => { expect(response.body['outputs']['integerOutput']).to.eql(null) }) }); it('should return true boolean output', () => { return request(app) .post('/api/all-types') .set('Content-Type', 'application/json') .send({'allTypesInputs': {'booleanInput': true}}) .then((response) => { expect(response.body['outputs']['booleanOutput']).to.eql(true) }) }); it('should return false boolean output', () => { return request(app) .post('/api/all-types') .set('Content-Type', 'application/json') .send({'allTypesInputs': {'booleanInput': false}}) .then((response) => { expect(response.body['outputs']['booleanOutput']).to.eql(false) }) }); it('should return null for null boolean input', () => { return request(app) .post('/api/all-types') .set('Content-Type', 'application/json') .send({'allTypesInputs': {'booleanInput': null}}) .then((response) => { expect(response.body['outputs']['booleanOutput']).to.eql(null) }) }); it('should return null for incorrect boolean input', () => { return request(app) .post('/api/all-types') .set('Content-Type', 'application/json') .send({'allTypesInputs': {'booleanInput': 'true'}}) .then((response) => { expect(response.body['outputs']['booleanOutput']).to.eql(null) }) }); it('should return datetime output with ISO 8601 Z format', () => { return request(app) .post('/api/all-types') .set('Content-Type', 'application/json') .send({'allTypesInputs': {'datetimeInput': '2017-07-21T17:32:28Z'}}) .then((response) => { expect(response.body['outputs']['datetimeOutput']).to.eql('2017-07-21T17:32:28Z') }) }); it('should return datetime output with ISO 8601 and time offset format', () => { return request(app) .post('/api/all-types') .set('Content-Type', 'application/json') .send({'allTypesInputs': {'datetimeInput': '2017-07-21T17:32:28+0800'}}) .then((response) => { expect(response.body['outputs']['datetimeOutput']).to.eql('2017-07-21T17:32:28+0800') }) }); it('should return null for null datetime input', () => { return request(app) .post('/api/all-types') .set('Content-Type', 'application/json') .send({'allTypesInputs': {'datetimeInput': null}}) .then((response) => { expect(response.body['outputs']['datetimeOutput']).to.eql(null) }) }); it('should return collection output', () => { return request(app) .post('/api/all-types') .set('Content-Type', 'application/json') .send({'allTypesInputs': {'collectionInput': ['abc', 'def', 'ghi']}}) .then((response) => { expect(response.body['outputs']['collectionOutput']).to.eql(['abc', 'def', 'ghi']) }) }); it('should return empty collection for empty collection input', () => { return request(app) .post('/api/all-types') .set('Content-Type', 'application/json') .send({'allTypesInputs': {'collectionInput': []}}) .then((response) => { expect(response.body['outputs']['collectionOutput']).to.eql([]) }) }); it('should return null for non-collection input', () => { return request(app) .post('/api/all-types') .set('Content-Type', 'application/json') .send({'allTypesInputs': {'collectionInput': 'abc'}}) .then((response) => { expect(response.body['outputs']['collectionOutput']).to.eql(null) }) }); }); describe('GET /api/all-types/object', () => { beforeEach(() => { this.hardcoded = { "text": "text1", "decimal": 123.546, "integer": 42, "boolean": true, "datetime": "2017-07-21T17:32:28Z", "collection": ["text2", -543.21, 24, true, "2020-12-31T17:56:57Z"], "object": {"key1": "value1", "key2": {"key3": "value3"}} }; }); it('should return request headers in inputs object, downcased keys', () => { return request(app) .get('/api/all-types/object') .set('Content-Type', 'application/json') .set('Custom-Echo-Header', 'Random-Value-123') .set('Another-Echo-Header', 'My value 456') .then((response) => { expect(response.body['inputs']['headers']['custom-echo-header']).to.eql('Random-Value-123'); expect(response.body['inputs']['headers']['another-echo-header']).to.eql('My value 456'); }) }); it('should return json response', () => { return request(app) .get('/api/all-types/object') .set('Content-Type', 'application/json') .send() .then((response) => { expect(response.headers['content-type']).to.include('application/json'); }) }); it('should return hardcoded body in asObject', () => { return request(app) .get('/api/all-types/object') .set('Content-Type', 'application/json') .then((response) => { expect(response.body['outputs']['object']['asObject']).to.eql(this.hardcoded); }) }); it('should return hardcoded body in asString', () => { return request(app) .get('/api/all-types/object') .set('Content-Type', 'application/json') .then((response) => { expect(response.body['outputs']['object']['asString']).to.eql(this.hardcoded); }) }); it('should return hardcoded body in asObject if querystring is body', () => { return request(app) .get('/api/all-types/object?expected=body') .set('Content-Type', 'application/json') .then((response) => { expect(response.body['outputs']['object']['asObject']).to.eql(this.hardcoded); }) }); it('should return hardcoded body in asString if querystring is body', () => { return request(app) .get('/api/all-types/object?expected=body') .set('Content-Type', 'application/json') .then((response) => { expect(response.body['outputs']['object']['asString']).to.eql(this.hardcoded); }) }); it('should return empty object in asObject if querystring is literal "empty"', () => { return request(app) .get('/api/all-types/object?expected=empty') .set('Content-Type', 'application/json') .then((response) => { expect(response.body['outputs']['object']['asObject']).to.eql({}); }) }); it('should return empty object in asString if querystring is literal "empty"', () => { return request(app) .get('/api/all-types/object?expected=empty') .set('Content-Type', 'application/json') .then((response) => { expect(response.body['outputs']['object']['asString']).to.eql({}); }) }); it('should return hardcoded body in asObject if querystring is "" empty string', () => { return request(app) .get('/api/all-types/object?expected=') .set('Content-Type', 'application/json') .then((response) => { expect(response.body['outputs']['object']['asObject']).to.eql(this.hardcoded); }) }); it('should return hardcoded body in asString if querystring is "" empty string', () => { return request(app) .get('/api/all-types/object?expected=') .set('Content-Type', 'application/json') .then((response) => { expect(response.body['outputs']['object']['asString']).to.eql(this.hardcoded); }) }); it('should return hardcoded plaintext if requested in querystring', () => { return request(app) .get('/api/all-types/object?expected=plaintext') .set('Content-Type', 'application/json') .then((response) => { expect(response.text).to.eql('this is a plaintext'); }) }); it('should return hardcoded body in asObject if querystring not matching', () => { return request(app) .get('/api/all-types/object?expected=doesntexist') .set('Content-Type', 'application/json') .then((response) => { expect(response.body['outputs']['object']['asObject']).to.eql(this.hardcoded); }) }); it('should return hardcoded body in asString if querystring not matching', () => { return request(app) .get('/api/all-types/object?expected=doesntexist') .set('Content-Type', 'application/json') .then((response) => { expect(response.body['outputs']['object']['asString']).to.eql(this.hardcoded); }) }); }); describe('GET /api/all-types/array', () => { beforeEach(() => { this.hardcoded = [ "text1", 123.546, 42, true, "2017-07-21T17:32:28Z", {"key1": "value1", "key2": {"key3": "value3"}} ]; }); it('should return request headers in inputs object, downcased keys', () => { return request(app) .get('/api/all-types/array') .set('Content-Type', 'application/json') .set('Custom-Echo-Header', 'Random-Value-123') .set('Another-Echo-Header', 'My value 456') .then((response) => { expect(response.body['inputs']['headers']['custom-echo-header']).to.eql('Random-Value-123'); expect(response.body['inputs']['headers']['another-echo-header']).to.eql('My value 456'); }) }); it('should return json response', () => { return request(app) .get('/api/all-types/array') .set('Content-Type', 'application/json') .send() .then((response) => { expect(response.headers['content-type']).to.include('application/json'); }) }); it('should return hardcoded body in asArray', () => { return request(app) .get('/api/all-types/array') .set('Content-Type', 'application/json') .then((response) => { expect(response.body['outputs']['object']['asArray']).to.eql(this.hardcoded); }) }); it('should return hardcoded body in asString', () => { return request(app) .get('/api/all-types/array') .set('Content-Type', 'application/json') .then((response) => { expect(response.body['outputs']['object']['asString']).to.eql(this.hardcoded); }) }); it('should return hardcoded body in asArray if querystring is body', () => { return request(app) .get('/api/all-types/array?expected=body') .set('Content-Type', 'application/json') .then((response) => { expect(response.body['outputs']['object']['asArray']).to.eql(this.hardcoded); }) }); it('should return hardcoded body in asString if querystring is body', () => { return request(app) .get('/api/all-types/array?expected=body') .set('Content-Type', 'application/json') .then((response) => { expect(response.body['outputs']['object']['asString']).to.eql(this.hardcoded); }) }); it('should return empty object in asArray if querystring is literal "empty"', () => { return request(app) .get('/api/all-types/array?expected=empty') .set('Content-Type', 'application/json') .then((response) => { expect(response.body['outputs']['object']['asArray']).to.eql({}); }) }); it('should return empty object in asString if querystring is literal "empty"', () => { return request(app) .get('/api/all-types/array?expected=empty') .set('Content-Type', 'application/json') .then((response) => { expect(response.body['outputs']['object']['asString']).to.eql({}); }) }); it('should return hardcoded body in asArray if querystring is "" empty string', () => { return request(app) .get('/api/all-types/array?expected=') .set('Content-Type', 'application/json') .then((response) => { expect(response.body['outputs']['object']['asArray']).to.eql(this.hardcoded); }) }); it('should return hardcoded body in asString if querystring is "" empty string', () => { return request(app) .get('/api/all-types/array?expected=') .set('Content-Type', 'application/json') .then((response) => { expect(response.body['outputs']['object']['asString']).to.eql(this.hardcoded); }) }); it('should return hardcoded plaintext if requested in querystring', () => { return request(app) .get('/api/all-types/array?expected=plaintext') .set('Content-Type', 'application/json') .then((response) => { expect(response.text).to.eql('this is a plaintext'); }) }); it('should return hardcoded body in asArray if querystring not matching', () => { return request(app) .get('/api/all-types/array?expected=doesntexist') .set('Content-Type', 'application/json') .then((response) => { expect(response.body['outputs']['object']['asArray']).to.eql(this.hardcoded); }) }); it('should return hardcoded body in asString if querystring not matching', () => { return request(app) .get('/api/all-types/array?expected=doesntexist') .set('Content-Type', 'application/json') .then((response) => { expect(response.body['outputs']['object']['asString']).to.eql(this.hardcoded); }) }); }); describe('POST /api/all-types-stringified', () => { it('should return valid json request body as string', () => { return request(app) .post('/api/all-types-stringified') .set('Content-Type', 'application/json') .send({'any': {'key1': 'value1'}}) .then((response) => { expect(response.body['allTypesOutputsStringified']).to.eql('{"any":{"key1":"value1"}}'); }) }); it('should return json request body with null property as string', () => { return request(app) .post('/api/all-types-stringified') .set('Content-Type', 'application/json') .send({'key1': null}) .then((response) => { expect(response.body['allTypesOutputsStringified']).to.eql('{"key1":null}'); }) }); it('should return empty request body as string', () => { return request(app) .post('/api/all-types-stringified') .set('Content-Type', 'application/json') .send({}) .then((response) => { expect(response.body['allTypesOutputsStringified']).to.eql('{}'); }) }); }); describe('POST /api/all-types-odata', () => { it('should return odata querystring as string', () => { return request(app) .post("/api/all-types-odata?odata=substringof('needle', haystack) and dec gt 0.001 and bool eq true") .set('Content-Type', 'application/json') .then((response) => { expect(response.body['allTypesOutputsStringified']).to.eql("substringof('needle', haystack) and dec gt 0.001 and bool eq true"); }) }); it('should return unmodified urlencoded odata querystring as string', () => { return request(app) .post("/api/all-types-odata?odata=substringof(%27needle%27%2Chaystack)%20and%20deci%20gt%0.001and%20bool%20eq%20true") .set('Content-Type', 'application/json') .then((response) => { expect(response.body['allTypesOutputsStringified']).to.eql("substringof(%27needle%27%2Chaystack)%20and%20deci%20gt%0.001and%20bool%20eq%20true"); }) }); it('should return odata querystring with explicit null as string', () => { return request(app) .post('/api/all-types-odata?odata=decimal gt null and integer ge null and boolean eq null') .set('Content-Type', 'application/json') .send({}) .then((response) => { expect(response.body['allTypesOutputsStringified']).to.eql('decimal gt null and integer ge null and boolean eq null'); }) }); it('should return empty odata querystring as string', () => { return request(app) .post('/api/all-types-odata?odata=') .set('Content-Type', 'application/json') .send({'key1': null}) .then((response) => { expect(response.body['allTypesOutputsStringified']).to.eql(''); }) }); }); describe('GET /api/all-types-nullable', () => { beforeEach(() => { this.validValues = { "textOutput": "primitive text 1", "decimalOutput": 123456.789, "integerOutput": 111, "booleanOutput": true, "datetimeOutput": "2021-02-01T17:28:18.686Z", "textCollectionOutput": ["abc", "def", "ghi"], "decimalCollectionOutput": [1.1, 2.2, 3.3], "integerCollectionOutput": [4, 5, 6], "booleanCollectionOutput": [true, false, true], "datetimeCollectionOutput": [ "2021-02-01T17:23:56.139Z", "2022-02-01T17:23:56.139Z", "2023-02-01T17:23:56.139Z", ], "ObjectCollectionOutput": [ { "textOutput": "obj1", "decimalOutput": 7.7, "integerOutput": -1, "booleanOutput": true, "datetimeOutput": "2025-02-01T17:23:56.139Z" }, { "textOutput": "obj2", "decimalOutput": 8.8, "integerOutput": -2, "booleanOutput": true, "datetimeOutput": "2026-02-01T17:23:56.139Z" } ] }; this.explicitNull = { "textOutput": null, "decimalOutput": null, "integerOutput": null, "booleanOutput": null, "datetimeOutput": null, "textCollectionOutput": null, "decimalCollectionOutput": null, "integerCollectionOutput": null, "booleanCollectionOutput": null, "datetimeCollectionOutput": null, "ObjectCollectionOutput": null }; this.missingProperty = {}; }); it('should return hardcoded validValues json response by default', () => { return request(app) .get('/api/all-types-nullable') .set('Content-Type', 'application/json') .send() .then((response) => { expect(response.headers['content-type']).to.include('application/json'); expect(response.body['outputs']).to.eql(this.validValues); }) }); it('should return hardcoded validValues json response if querystring is validValues', () => { return request(app) .get('/api/all-types-nullable?expected=validValues') .set('Content-Type', 'application/json') .send() .then((response) => { expect(response.headers['content-type']).to.include('application/json'); expect(response.body['outputs']).to.eql(this.validValues); }) }); it('should return hardcoded validValues json response if querystring is empty string', () => { return request(app) .get('/api/all-types-nullable?expected=') .set('Content-Type', 'application/json') .send() .then((response) => { expect(response.headers['content-type']).to.include('application/json'); expect(response.body['outputs']).to.eql(this.validValues); }) }); it('should return hardcoded validValues json response if querystring not matching', () => { return request(app) .get('/api/all-types-nullable?expected=doesntexist') .set('Content-Type', 'application/json') .send() .then((response) => { expect(response.headers['content-type']).to.include('application/json'); expect(response.body['outputs']).to.eql(this.validValues); }) }); it('should return hardcoded explicitNull json response if querystring is explicitNull', () => { return request(app) .get('/api/all-types-nullable?expected=explicitNull') .set('Content-Type', 'application/json') .send() .then((response) => { expect(response.headers['content-type']).to.include('application/json'); expect(response.body['outputs']).to.eql(this.explicitNull); }) }); it('should return hardcoded empty object json response if querystring is missingProperty', () => { return request(app) .get('/api/all-types-nullable?expected=missingProperty') .set('Content-Type', 'application/json') .send() .then((response) => { expect(response.headers['content-type']).to.include('application/json'); expect(response.body['outputs']).to.eql(this.missingProperty); }) }); }); describe('GET /api/file-nullable', () => { beforeEach(() => { this.validValues = { "fileOutput": { "fileContent": "iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAIAAAD8GO2jAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAADsMAAA7DAcdvqGQAAACESURBVEhL7Y5BCsAwCATz/0+nYpZSkLoaTOnBOaRKNzsZ8zAtoLSAkhaMkbuSTCtYYiTSq13AHiMnuM84O4KUIxp9ltYL9NGHBZgU1UUdOwKhXmDBPwbP2a4lELC7kJDT8oUg4vAStKJAgOkFfQDL4GuIXBZozBNgYvxVUEULKC0gzHkBuvRcP4Oq7bUAAAAASUVORK5CYII=", "originalName": "CharA.png", "mimeType": "image/png", "md5": "8ccae3f262fbd8747735395a556229f7", "size": 239 }, "fileCollectionOutput": [ "iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAIAAAD8GO2jAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAADsMAAA7DAcdvqGQAAACySURBVEhL7ZPRDoAgCEW1//9nY+GYS5QL07VV5yVrdY8I5VJK2slRr9v4BSZKk3POdaXhHYqhYBTU6hGZLoC+nO5DiPcA2QTx2TGlBoCtcgs4mhbre9BGg+kENKaSy7cujAquTcfTCb2CuuoIaKAjYkTs0jiaTLkcPSmxxz2mrMEdboEAOoICPiuEeAUgDwnM88WbDP1o6jtgG+wfTXzykJ/EBSq3ssCvCFQQ5q1juoyUTo3XchvVQmDSAAAAAElFTkSuQmCC", "iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAIAAAD8GO2jAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAADsMAAA7DAcdvqGQAAACVSURBVEhL7ZXBDsAgCENl///PromeDKKRgsmyd9lO7VqRSa21RPL0Zxi/wZKtQxaR/qZhK6wNmvrxsC0qcqoDy8CvDqYGFHVgJfCrg/Ax1Q3QD+XzwY0E7XhZ6AlY/YD0irj9ACUBsR+QXhGd0QD9ZIwpEd2AGELfOYOBZ652f5ngzGZ3axql2Qq0tTzjexeNTrBBKS/XbjwlavjApAAAAABJRU5ErkJggg==", "iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAADsIAAA7CARUoSoAAAAC7SURBVFhH7ZfRCoAgDEW1///n6oIDsXRX3RDKA1EP5Y7XJRXPm7CQI52XsQXUHogxpqsnFu1DCdRuyeVGZaaWAEXlgEwrrRpmPZCL9GDehL1puLwFkACMRFMAA8hgvbDPuSQgMD3hKsDwD4HWMrgLaM34/SXQXmVXgemNyIJlPcDuoi4CTPSCWwLM7IGpAGbORi+YCUjsPcWBicBocWCWwEhx0PwqLtdTZloyWhyoAiUzxd7YP6eLBUK4ACJ7Yx0sF/V/AAAAAElFTkSuQmCC", ], "ObjectCollectionOutput": [ { "fileOutput": { "fileContent": "iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAADsIAAA7CARUoSoAAAACZSURBVFhH7ZbdCoAwCEZn7//OlTAhqum3H1kjz826cO6AP0T7SZrIls9phADcA0SUv570tFGVwFuoJsZY6SGB0uMjiCb8voBn/ZkoQQiYi8hrBQuQQCnEWsOC9kSXAIJ1370HLPk1pgCtdQumQE/9EdYogSchYArIBNROAscjd+C/YkZLeE0jcUjqKgGNuxyadphAK3+fgpQO3X9VGPJG2gMAAAAASUVORK5CYII=", "originalName": "CharE.png", "mimeType": "image/png", "md5": "66da72ec3afafceb2a9caa73c8cacc8a", "size": 260 } }, { "fileOutput": { "fileContent": "iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAADsIAAA7CARUoSoAAAAB5SURBVFhH7dTRCoAgDIXhrfd/50o0kGHNsckMzn9TF1If6OTzjhI72jMtANIB6iFk5vZWiz6zU4B+STTIDJBJUGkEfvvGJ0D7+SgNJAsHWMMYAgCA+SqWecd0yU34NINzA7zhEO4PKPuvTYInbAEA/wDgJlxZMoDoAgqtURHjBSxIAAAAAElFTkSuQmCC", "originalName": "CharF.png", "mimeType": "image/png", "md5": "add944ee2cd04a8af5d1bc6c54a2e9b3", "size": 228 } } ] }; this.explicitNull = { "fileOutput": null, "fileCollectionOutput": null, "ObjectCollectionOutput": null }; this.missingProperty = {}; }); it('should return hardcoded validValues json response by default', () => { return request(app) .get('/api/file-nullable') .set('Content-Type', 'application/json') .send() .then((response) => { expect(response.headers['content-type']).to.include('application/json'); expect(response.body['outputs']).to.eql(this.validValues); }) }); it('should return hardcoded validValues json response if querystring is validValues', () => { return request(app) .get('/api/file-nullable?expected=validValues') .set('Content-Type', 'application/json') .send() .then((response) => { expect(response.headers['content-type']).to.include('application/json'); expect(response.body['outputs']).to.eql(this.validValues); }) }); it('should return hardcoded validValues json response if querystring is empty string', () => { return request(app) .get('/api/file-nullable?expected=') .set('Content-Type', 'application/json') .send() .then((response) => { expect(response.headers['content-type']).to.include('application/json'); expect(response.body['outputs']).to.eql(this.validValues); }) }); it('should return hardcoded validValues json response if querystring not matching', () => { return request(app) .get('/api/file-nullable?expected=doesntexist') .set('Content-Type', 'application/json') .send() .then((response) => { expect(response.headers['content-type']).to.include('application/json'); expect(response.body['outputs']).to.eql(this.validValues); }) }); it('should return hardcoded explicitNull json response if querystring is explicitNull', () => { return request(app) .get('/api/file-nullable?expected=explicitNull') .set('Content-Type', 'application/json') .send() .then((response) => { expect(response.headers['content-type']).to.include('application/json'); expect(response.body['outputs']).to.eql(this.explicitNull); }) }); it('should return hardcoded empty object json response if querystring is missingProperty', () => { return request(app) .get('/api/file-nullable?expected=missingProperty') .set('Content-Type', 'application/json') .send() .then((response) => { expect(response.headers['content-type']).to.include('application/json'); expect(response.body['outputs']).to.eql(this.missingProperty); }) }); }); describe('POST /api/all-parameter-types/:string_path/:integer_path/:boolean_path', () => { it('should return all parameters in output', () => { return request(app) .post('/api/all-parameter-types/something/777/true?string_query=mystringquery&integer_query=666&boolean_query=true') .set('Content-Type', 'application/json') .set('string_header', 'this is a string header') .set('integer_header', '555') .set('boolean_header', 'true') .send({'string_body': 'this is a string property'}) .then((response) => { expect(response.status).to.eql(200); expect(response.body['allParameterTypesOutput']["querystring"]) .to.eql({'string_query': 'mystringquery', 'integer_query': '666', 'boolean_query': 'true'}); expect(response.body['allParameterTypesOutput']["headers"]["string_header"]) .to.eql('this is a string header'); expect(response.body['allParameterTypesOutput']["headers"]["integer_header"]) .to.eql('555'); expect(response.body['allParameterTypesOutput']["headers"]["boolean_header"]) .to.eql('true'); expect(response.body['allParameterTypesOutput']["path"]["string-path"]) .to.eql('something'); expect(response.body['allParameterTypesOutput']["path"]["integer-path"]) .to.eql('777'); expect(response.body['allParameterTypesOutput']["path"]["boolean-path"]) .to.eql('true'); expect(response.body['allParameterTypesOutput']['body']['string_body']) .to.eql('this is a string property'); }) }); }); describe('POST /api/path-encoding/:text', () => { it('should return spaces encoded as %20', () => { return request(app) .post('/api/path-encoding/text%20with%20spaces') .set('Content-Type', 'application/json') .send({}) .then((response) => { expect(response.status).to.eql(200); expect(response.body['path']).to.eql('text%20with%20spaces') }) }); it('should return spaces encoded as +', () => { return request(app) .post('/api/path-encoding/text+with+spaces') .set('Content-Type', 'application/json') .send({}) .then((response) => { expect(response.status).to.eql(200); expect(response.body['path']).to.eql('text+with+spaces') }) }); it('should return encoded special characters', () => { return request(app) .post("/api/path-encoding/%3A%2F%3F%23%5B%5D%40%21%24%26%27%28%29%2A%2B%2C%3B%3D%25%20") .set('Content-Type', 'application/json') .send({}) .then((response) => { expect(response.status).to.eql(200); expect(response.body['path']).to.eql('%3A%2F%3F%23%5B%5D%40%21%24%26%27%28%29%2A%2B%2C%3B%3D%25%20') }) }); }); describe('POST /api/query-encoding', () => { it('should return spaces encoded as %20', () => { return request(app) .post('/api/query-encoding?string_query=text%20with%20spaces') .set('Content-Type', 'application/json') .set('x-waws-unencoded-url', '/api/query-encoding?string_query=text%20with%20spaces') .send({}) .then((response) => { expect(response.status).to.eql(200); expect(response.body['query']).to.eql('text%20with%20spaces') }) }); it('should return spaces encoded as +', () => { return request(app) .post('/api/query-encoding?string_query=text+with+spaces') .set('Content-Type', 'application/json') .set('x-waws-unencoded-url', '/api/query-encoding?string_query=text+with+spaces') .send({}) .then((response) => { expect(response.status).to.eql(200); expect(response.body['query']).to.eql('text+with+spaces') }) }); it('should return encoded special characters', () => { return request(app) .post("/api/query-encoding?string_query=%3A%2F%3F%23%5B%5D%40%21%24%26%27%28%29%2A%2B%2C%3B%3D%25%20") .set('Content-Type', 'application/json') .set('x-waws-unencoded-url', "/api/query-encoding?string_query=%3A%2F%3F%23%5B%5D%40%21%24%26%27%28%29%2A%2B%2C%3B%3D%25%20") .send({}) .then((response) => { expect(response.status).to.eql(200); expect(response.body['query']).to.eql('%3A%2F%3F%23%5B%5D%40%21%24%26%27%28%29%2A%2B%2C%3B%3D%25%20') }) }); }); describe('POST /api/form-urlencoded/', () => { it('should return true for urlencoded content type', () => { return request(app) .post('/api/form-urlencoded/mytext/parsed') .set('Content-Type', 'application/x-www-form-urlencoded') .send() .then((response) => { expect(response.status).to.eql(200); expect(response.body['inputs']['x-www-form-urlencoded']).to.eql(true); }) }); it('should return false for application/json content type', () => { return request(app) .post('/api/form-urlencoded/mytext/parsed') .set('Content-Type', 'application/json') .send() .then((response) => { expect(response.status).to.eql(200); expect(response.body['inputs']['x-www-form-urlencoded']).to.eql(false); }) }); it('should return parsed data', () => { return request(app) .post('/api/form-urlencoded/mytext/parsed') .set('Content-Type', 'application/x-www-form-urlencoded') .send('string=abc&decimal=0.1&integer=23&boolean=true&datetime=2017-12-23T12:34:56Z') .then((response) => { expect(response.status).to.eql(200); expect(response.body['outputs']['textPathOutput']).to.eql('mytext'); expect(response.body['outputs']['textOutput']).to.eql('abc'); expect(response.body['outputs']['decimalOutput']).to.eql(0.1); expect(response.body['outputs']['integerOutput']).to.eql(23); expect(response.body['outputs']['booleanOutput']).to.eql(true); expect(response.body['outputs']['datetimeOutput']).to.eql('2017-12-23T12:34:56Z'); }) }); }) describe('POST /api/async-callback', () => { it('should return 202 if initialStatusCode parameter is absent', () => { return request(app) .post('/api/async-callback') .set('Content-Type', 'application/json') .send({}) .then((response) => { expect(response.status).to.eql(202); }) }); it('should return 202 if initialStatusCode parameter is null', () => { return request(app) .post('/api/async-callback') .set('Content-Type', 'application/json') .send({'initialStatusCode': null}) .then((response) => { expect(response.status).to.eql(202); }) }); [200, 400, 401, 403, 404, 405, 410, 500, 502, 503, 504].forEach((status) => { it('should return ' + status + ' if supplied in initialStatusCode parameter ', () => { return request(app) .post('/api/async-callback') .set('Content-Type', 'application/json') .send({'initialStatusCode': status}) .then((response) => { expect(response.status).to.eql(status); }) }); }); it('should return random uuid as receipt id', () => { return request(app) .post('/api/async-callback') .set('Content-Type', 'application/json') .send({}) .then((response) => { expect(response.status).to.eql(202); expect(validator.isUUID(response.body['receiptId'])).to.eql(true) }) }); it('should return input callbackUrl', () => { return request(app) .post('/api/async-callback?callbackUrl=something') .then((response) => { expect(response.status).to.eql(202); expect(response.body['inputs']['callbackUrl']).to.eql('something') }) }); it('should return urldecoded output callbackUrl without status', () => { return request(app) .post('/api/async-callback?callbackUrl=https%3A%2F%2Fsub.domain.tld%2Fpath1%2Fpath2%2Foperation%3Fqs%3Dabc') .then((response) => { expect(response.status).to.eql(202); expect(response.body['outputs']['callbackUrl']).to.eql('https://sub.domain.tld/path1/path2/operation?qs=abc') }) }); it('should return urldecoded output callbackUrl with status', () => { return request(app) .post('/api/async-callback?callbackUrl=https%3A%2F%2Fsub.domain.tld%2Fpath1%2Fpath2%2Foperation%3Fqs%3Dabc') .set('Content-Type', 'application/json') .send({'resultStatus': 'mystatus'}) .then((response) => { expect(response.status).to.eql(202); expect(response.body['outputs']['callbackUrl']).to.eql('https://sub.domain.tld/path1/path2/operation?qs=abc&status=mystatus') }) }); it('should return urldecoded output callbackUrl without status if status is empty string', () => { return request(app) .post('/api/async-callback?callbackUrl=https%3A%2F%2Fsub.domain.tld%2Fpath1%2Fpath2%2Foperation%3Fqs%3Dabc') .set('Content-Type', 'application/json') .send({'resultStatus': ''}) .then((response) => { expect(response.status).to.eql(202); expect(response.body['outputs']['callbackUrl']).to.eql('https://sub.domain.tld/path1/path2/operation?qs=abc') }) }); it('should return text output', () => { return request(app) .post('/api/async-callback') .set('Content-Type', 'application/json') .send({'textInput': 'xyz'}) .then((response) => { expect(response.status).to.eql(202); expect(response.body['outputs']['textOutput']).to.eql('xyz') }) }); it('should return status as payload', () => { return request(app) .post('/api/async-callback') .set('Content-Type', 'application/json') .send({'payloadStatus': 'efg'}) .then((response) => { expect(response.status).to.eql(202); expect(response.body['status']['status']).to.eql('efg') }) }); it('should return result status if supplied in request', () => { return request(app) .post('/api/async-callback') .set('Content-Type', 'application/json') .send({'resultStatus': 'something'}) .then((response) => { expect(response.status).to.eql(202); expect(response.body['outputs']['actualResultStatus']).to.eql('something') }) }); it('should return null result status if parameter is absent', () => { return request(app) .post('/api/async-callback') .set('Content-Type', 'application/json') .send({}) .then((response) => { expect(response.status).to.eql(202); expect(response.body['outputs']['actualResultStatus']).to.eql(null) }) }); it('should return null result status if input is empty string', () => { return request(app) .post('/api/async-callback') .set('Content-Type', 'application/json') .send({'resultStatus': ''}) .then((response) => { expect(response.status).to.eql(202); expect(response.body['outputs']['actualResultStatus']).to.eql(null) }) }); it('should return error message if supplied in request', () => { return request(app) .post('/api/async-callback') .set('Content-Type', 'application/json') .send({'errorMessage': 'this is an error message'}) .then((response) => { expect(response.status).to.eql(202); expect(response.body['error']).to.eql('this is an error message') }) }); it('should not return error message if input is empty string', () => { return request(app) .post('/api/async-callback') .set('Content-Type', 'application/json') .send({'errorMessage': ''}) .then((response) => { expect(response.status).to.eql(202); expect(response.body['error']).to.eql(undefined) }) }); it('should not return null error message if parameter is absent', () => { return request(app) .post('/api/async-callback') .set('Content-Type', 'application/json') .then((response) => { expect(response.status).to.eql(202); expect(response.body['error']).to.eql(undefined) }) }); }); describe('GET /api/data/array/integer', () => { it('should return counter elements in array of integers', () => { return request(app) .get('/api/data/array/integer?elements=10') .then((response) => { expect(response.status).to.eql(200); expect(response.headers['content-type']).to.include('application/json'); expect(response.body).to.eql([1,2,3,4,5,6,7,8,9,10]); }) }); });
/* * * Copyright 2016 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #ifndef GRPC_INTERNAL_COMPILER_NODE_GENERATOR_HELPERS_H #define GRPC_INTERNAL_COMPILER_NODE_GENERATOR_HELPERS_H #include <algorithm> #include "src/compiler/config.h" #include "src/compiler/generator_helpers.h" namespace grpc_node_generator { inline std::string GetJSServiceFilename(const std::string& filename) { return grpc_generator::StripProto(filename) + "_grpc_pb.js"; } // Get leading or trailing comments in a string. Comment lines start with "// ". // Leading detached comments are put in front of leading comments. template <typename DescriptorType> inline std::string GetNodeComments(const DescriptorType* desc, bool leading) { return grpc_generator::GetPrefixedComments(desc, leading, "//"); } } // namespace grpc_node_generator #endif // GRPC_INTERNAL_COMPILER_NODE_GENERATOR_HELPERS_H
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import Any from azure.core.configuration import Configuration from azure.core.pipeline import policies VERSION = "unknown" class AzureBlobStorageConfiguration(Configuration): """Configuration for AzureBlobStorage. Note that all parameters used to create this instance are saved as instance attributes. :param url: The URL of the service account, container, or blob that is the target of the desired operation. :type url: str """ def __init__( self, url: str, **kwargs: Any ) -> None: if url is None: raise ValueError("Parameter 'url' must not be None.") super(AzureBlobStorageConfiguration, self).__init__(**kwargs) self.url = url self.version = "2020-10-02" kwargs.setdefault('sdk_moniker', 'azureblobstorage/{}'.format(VERSION)) self._configure(**kwargs) def _configure( self, **kwargs: Any ) -> None: self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) self.http_logging_policy = kwargs.get('http_logging_policy') or policies.HttpLoggingPolicy(**kwargs) self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs) self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs) self.authentication_policy = kwargs.get('authentication_policy')
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** from enum import Enum __all__ = [ 'IpAddressType', 'LoadBalancerType', ] class IpAddressType(str, Enum): IPV4 = "ipv4" DUALSTACK = "dualstack" class LoadBalancerType(str, Enum): APPLICATION = "application" NETWORK = "network"
function SvgNoFlashRound(props) { return ( <svg xmlns='http://www.w3.org/2000/svg' height='1em' viewBox='0 0 24 24' width='1em' className='svg-icon' {...props}> <path fill='none' d='M0 0h24v24H0z' /> <path d='M3.16 3.16a.996.996 0 10-1.41 1.41l4.6 4.6-.21.23H3.6c-.88 0-1.6.72-1.6 1.6v9.4c0 .88.72 1.6 1.6 1.6h12.8c.75 0 1.38-.52 1.55-1.22l1.47 1.47a.996.996 0 101.41-1.41L3.16 3.16zM10 20c-2.21 0-4-1.79-4-4 0-1.95 1.4-3.57 3.25-3.92l1.57 1.57c-.26-.09-.53-.15-.82-.15a2.5 2.5 0 000 5 2.5 2.5 0 002.5-2.5c0-.29-.06-.56-.15-.82l1.57 1.57A3.993 3.993 0 0110 20zm8-4.83L10.83 8h.87c.56 0 1.1.24 1.48.65l.69.75h2.54c.88 0 1.6.72 1.6 1.6v4.17zm2.4-9.57h.75c.38 0 .62.41.44.74L19 11V7h-.5c-.28 0-.5-.22-.5-.5v-4c0-.28.22-.5.5-.5h2.73c.36 0 .6.37.46.7L20.4 5.6z' /> </svg> ); } export default SvgNoFlashRound;
#include <JavaScriptCore/RetainPtr.h>
// // VROKnuthPlassFormatter.hpp // ViroRenderer // // Created by Raj Advani on 12/2/16. // Copyright © 2016 Viro Media. All rights reserved. // // Permission is hereby granted, free of charge, to any person obtaining // a copy of this software and associated documentation files (the // "Software"), to deal in the Software without restriction, including // without limitation the rights to use, copy, modify, merge, publish, // distribute, sublicense, and/or sell copies of the Software, and to // permit persons to whom the Software is furnished to do so, subject to // the following conditions: // // The above copyright notice and this permission notice shall be included // in all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, // EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. // IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY // CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, // TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE // SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #ifndef VROKnuthPlassFormatter_hpp #define VROKnuthPlassFormatter_hpp #include <stdio.h> #include <string> #include <vector> #include <list> #include <memory> extern int kInfinity; enum class KPNodeType { Glue, Box, Penalty }; struct KPSum { float width, stretch, shrink; KPSum() : width(0), stretch(0), shrink(0) {} KPSum(float width, float stretch, float shrink) : width(width), stretch(stretch), shrink(shrink) {} virtual ~KPSum() {} }; inline KPSum operator-(KPSum lhs, const KPSum& rhs) { lhs.width -= rhs.width; lhs.stretch -= rhs.stretch; lhs.shrink -= rhs.shrink; return lhs; } struct KPNode { KPNodeType type; std::wstring value; KPNode(KPNodeType type, std::wstring value) : type(type), value(value) {} virtual ~KPNode() {} }; struct KPGlue : public KPNode { float width, stretch, shrink; KPGlue(float width, float stretch, float shrink, std::wstring value) : KPNode(KPNodeType::Glue, value), width(width), stretch(stretch), shrink(shrink) {} virtual ~KPGlue() {} }; struct KPBox : public KPNode { float width; KPBox(float width, std::wstring value) : KPNode(KPNodeType::Box, value), width(width) {} virtual ~KPBox() {} }; struct KPPenalty : public KPNode { float width, penalty; float flagged; KPPenalty(float width, float penalty, float flagged) : KPNode(KPNodeType::Penalty, L""), width(width), penalty(penalty), flagged(flagged) {} virtual ~KPPenalty() {} }; struct KPBreakpoint { int position; int demerits; float ratio; int line; int fitnessClass; KPSum totals; std::shared_ptr<KPBreakpoint> previous; KPBreakpoint(int position, int demerits, float ratio, int line, int fitnessClass, KPSum sum, std::shared_ptr<KPBreakpoint> previous) : position(position), demerits(demerits), ratio(ratio), line(line), fitnessClass(fitnessClass), totals(sum), previous(previous) {} virtual ~KPBreakpoint() {} }; struct KPBreakpointCandidate { std::shared_ptr<KPBreakpoint> parent; int demerits; float ratio; KPBreakpointCandidate(int demerits) : demerits(demerits), ratio(0) {} KPBreakpointCandidate(std::shared_ptr<KPBreakpoint> parent, int demerits, float ratio) : parent(parent), demerits(demerits), ratio(ratio) {} virtual ~KPBreakpointCandidate() {} }; struct VROBreakpoint { int position; float ratio; VROBreakpoint(int position, float ratio) : position(position), ratio(ratio) {} virtual ~VROBreakpoint() {} }; struct KPDemerits { int line; int flagged; int fitness; }; struct KPOptions { KPDemerits demerits; int tolerance; }; /* Formats (justifies) text according to the Knuth Plass dynamic programming algorithm. See here for details on the algorithm: http://defoe.sourceforge.net/folio/knuth-plass.html */ class VROKnuthPlassFormatter { public: VROKnuthPlassFormatter(std::vector<std::shared_ptr<KPNode>> &nodes, std::vector<float> &lineLengths, float tolerance); std::vector<VROBreakpoint> run(); private: std::vector<std::shared_ptr<KPNode>> _nodes; std::vector<float> _lineLengths; KPOptions _options; /* Find all the candidate breakpoints for the given node. There will be at most one candidate created per existing parent breakpoint. The found candidates will be added to the breakpoint list. Existing breakpoints in the list that are no longer optimal will be removed from the list. */ void findCandidateBreakpoints(std::shared_ptr<KPNode> &node, int nodeIndex, KPSum &sum, std::list<std::shared_ptr<KPBreakpoint>> &breakpoints) const; KPSum computeSum(const KPSum &sum, int breakpointIndex) const; float computeCost(const KPSum &sumFromParentToNode, std::shared_ptr<KPBreakpoint> &parent, int currentLine) const; }; #endif /* VROKnuthPlassFormatter_hpp */
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """tfdbg CLI as SessionRunHook.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.core.protobuf import config_pb2 from tensorflow.python.debug.lib import debug_utils from tensorflow.python.debug.lib import stepper from tensorflow.python.debug.wrappers import dumping_wrapper from tensorflow.python.debug.wrappers import framework from tensorflow.python.debug.wrappers import grpc_wrapper from tensorflow.python.debug.wrappers import local_cli_wrapper from tensorflow.python.training import session_run_hook # The prefix for GRPC endpoint URLs. _GRPC_ENDPOINT_PREFIX = "grpc://" class LocalCLIDebugHook(session_run_hook.SessionRunHook, local_cli_wrapper.LocalCLIDebugWrapperSession): """Command-line-interface debugger hook. Can be used as a monitor/hook for `tf.train.MonitoredSession`s and `tf.contrib.learn`'s `Estimator`s and `Experiment`s. """ def __init__(self, ui_type="curses"): """Create a local debugger command-line interface (CLI) hook. Args: ui_type: (str) user-interface type. """ self._ui_type = ui_type self._wrapper_initialized = False self._pending_tensor_filters = {} def add_tensor_filter(self, filter_name, tensor_filter): """Add a tensor filter. See doc of `LocalCLIDebugWrapperSession.add_tensor_filter()` for details. Override default behavior to accomodate the possibility of this method being called prior to the initialization of the underlying `LocalCLIDebugWrapperSession` object. Args: filter_name: See doc of `LocalCLIDebugWrapperSession.add_tensor_filter()` for details. tensor_filter: See doc of `LocalCLIDebugWrapperSession.add_tensor_filter()` for details. """ if self._wrapper_initialized: local_cli_wrapper.LocalCLIDebugWrapperSession.add_tensor_filter( self, filter_name, tensor_filter) else: self._pending_tensor_filters[filter_name] = tensor_filter def begin(self): pass def before_run(self, run_context): if not self._wrapper_initialized: local_cli_wrapper.LocalCLIDebugWrapperSession.__init__( self, run_context.session, ui_type=self._ui_type) # Actually register tensor filters registered prior to the construction # of the underlying LocalCLIDebugWrapperSession object. for filter_name in self._pending_tensor_filters: local_cli_wrapper.LocalCLIDebugWrapperSession.add_tensor_filter( self, filter_name, self._pending_tensor_filters[filter_name]) self._wrapper_initialized = True # Increment run call counter. self._run_call_count += 1 # Adapt run_context to an instance of OnRunStartRequest for invoking # superclass on_run_start(). on_run_start_request = framework.OnRunStartRequest( run_context.original_args.fetches, run_context.original_args.feed_dict, None, None, self._run_call_count) on_run_start_response = self.on_run_start(on_run_start_request) self._performed_action = on_run_start_response.action run_args = session_run_hook.SessionRunArgs( None, feed_dict=None, options=config_pb2.RunOptions()) if self._performed_action == framework.OnRunStartAction.DEBUG_RUN: self._decorate_options_for_debug(run_args.options, run_context.session.graph) elif self._performed_action == framework.OnRunStartAction.INVOKE_STEPPER: # The _finalized property must be set to False so that the NodeStepper # can insert ops for retrieving TensorHandles. # pylint: disable=protected-access run_context.session.graph._finalized = False # pylint: enable=protected-access with stepper.NodeStepper( run_context.session, run_context.original_args. fetches, run_context.original_args.feed_dict) as node_stepper: self.invoke_node_stepper( node_stepper, restore_variable_values_on_exit=True) return run_args def after_run(self, run_context, run_values): # Adapt run_context and run_values to OnRunEndRequest and invoke superclass # on_run_end() on_run_end_request = framework.OnRunEndRequest(self._performed_action, run_values.run_metadata) self.on_run_end(on_run_end_request) def _decorate_options_for_debug(self, options, graph): """Modify RunOptions.debug_options.debug_tensor_watch_opts for debugging. Args: options: (config_pb2.RunOptions) The RunOptions instance to be modified. graph: A TensorFlow Graph object. """ debug_utils.watch_graph( options, graph, debug_urls=self._get_run_debug_urls()) options.output_partition_graphs = True class DumpingDebugHook(session_run_hook.SessionRunHook, dumping_wrapper.DumpingDebugWrapperSession): """A debugger hook that dumps debug data to filesystem. Can be used as a monitor/hook for `tf.train.MonitoredSession`s and `tf.contrib.learn`'s `Estimator`s and `Experiment`s. """ def __init__(self, session_root, watch_fn=None, log_usage=True): """Create a local debugger command-line interface (CLI) hook. Args: session_root: See doc of `dumping_wrapper.DumpingDebugWrapperSession.__init__`. watch_fn: See doc of `dumping_wrapper.DumpingDebugWrapperSession.__init__`. log_usage: (bool) Whether usage is to be logged. """ self._session_root = session_root self._watch_fn = watch_fn self._log_usage = log_usage self._wrapper_initialized = False def begin(self): pass def before_run(self, run_context): if not self._wrapper_initialized: # TODO(cais): Make this hook have a DumpingDebugWrapperSession property # instead of subclassing DumpingDebugWrapperSession. dumping_wrapper.DumpingDebugWrapperSession.__init__( self, run_context.session, self._session_root, watch_fn=self._watch_fn, log_usage=self._log_usage) self._wrapper_initialized = True self._run_call_count += 1 debug_urls, watch_options = self._prepare_run_watch_config( run_context.original_args.fetches, run_context.original_args.feed_dict) run_options = config_pb2.RunOptions() debug_utils.watch_graph( run_options, run_context.session.graph, debug_urls=debug_urls, debug_ops=watch_options.debug_ops, node_name_regex_whitelist=watch_options.node_name_regex_whitelist, op_type_regex_whitelist=watch_options.op_type_regex_whitelist, tensor_dtype_regex_whitelist=watch_options.tensor_dtype_regex_whitelist, tolerate_debug_op_creation_failures=( watch_options.tolerate_debug_op_creation_failures)) run_args = session_run_hook.SessionRunArgs( None, feed_dict=None, options=run_options) return run_args def after_run(self, run_context, run_values): pass class GrpcDebugHook(session_run_hook.SessionRunHook): """A hook that streams debugger-related events to any grpc_debug_server. For example, the debugger data server is a grpc_debug_server. The debugger data server writes debugger-related events it receives via GRPC to logdir. This enables debugging features in Tensorboard such as health pills. When the arguments of debug_utils.watch_graph changes, strongly consider changing arguments here too so that features are available to tflearn users. Can be used as a monitor/hook for `tf.train.MonitoredSession`s and `tf.contrib.learn`'s `Estimator`s and `Experiment`s. """ def __init__(self, grpc_debug_server_addresses, watch_fn=None, log_usage=True): """Constructs a GrpcDebugHook. Args: grpc_debug_server_addresses: (`list` of `str`) A list of the gRPC debug server addresses, in the format of <host:port>, without the "grpc://" prefix. For example: ["localhost:7000", "192.168.0.2:8000"] watch_fn: A function that allows for customizing which ops to watch at which specific steps. See doc of `dumping_wrapper.DumpingDebugWrapperSession.__init__` for details. log_usage: (bool) Whether usage is to be logged. Raises: ValueError: if any debugger server addresses start with grpc://. """ for address in grpc_debug_server_addresses: if address.startswith(_GRPC_ENDPOINT_PREFIX): raise ValueError( ("Debug server address %r starts with %r. It should not because " "the hook already automatically adds the prefix.") % ( address, _GRPC_ENDPOINT_PREFIX)) # A wrapper session responsible for GRPC communication. self._grpc_debug_wrapper_session = None self._grpc_debug_server_addresses = grpc_debug_server_addresses self._watch_fn = watch_fn self._log_usage = log_usage def before_run(self, run_context): """Called right before a session is run. Args: run_context: A session_run_hook.SessionRunContext. Encapsulates information on the run. Returns: A session_run_hook.SessionRunArgs object. """ if not self._grpc_debug_wrapper_session: self._grpc_debug_wrapper_session = grpc_wrapper.GrpcDebugWrapperSession( run_context.session, self._grpc_debug_server_addresses, watch_fn=self._watch_fn, log_usage=self._log_usage) fetches = run_context.original_args.fetches feed_dict = run_context.original_args.feed_dict watch_options = self._watch_fn(fetches, feed_dict) run_options = config_pb2.RunOptions() debug_utils.watch_graph( run_options, run_context.session.graph, debug_urls=self._grpc_debug_wrapper_session.prepare_run_debug_urls( fetches, feed_dict), debug_ops=watch_options.debug_ops, node_name_regex_whitelist=watch_options.node_name_regex_whitelist, op_type_regex_whitelist=watch_options.op_type_regex_whitelist, tensor_dtype_regex_whitelist=watch_options.tensor_dtype_regex_whitelist, tolerate_debug_op_creation_failures=( watch_options.tolerate_debug_op_creation_failures)) return session_run_hook.SessionRunArgs( None, feed_dict=None, options=run_options)
/* eslint-disable func-names */ import $ from 'jquery'; import createFlash from '~/flash'; import Api from './api'; import { loadCSSFile } from './lib/utils/css_utils'; import { s__ } from './locale'; import ProjectSelectComboButton from './project_select_combo_button'; const projectSelect = async () => { await loadCSSFile(gon.select2_css_path); $('.ajax-project-select').each(function (i, select) { let placeholder; const simpleFilter = $(select).data('simpleFilter') || false; const isInstantiated = $(select).data('select2'); this.groupId = $(select).data('groupId'); this.userId = $(select).data('userId'); this.includeGroups = $(select).data('includeGroups'); this.allProjects = $(select).data('allProjects') || false; this.orderBy = $(select).data('orderBy') || 'id'; this.withIssuesEnabled = $(select).data('withIssuesEnabled'); this.withMergeRequestsEnabled = $(select).data('withMergeRequestsEnabled'); this.withShared = $(select).data('withShared') === undefined ? true : $(select).data('withShared'); this.includeProjectsInSubgroups = $(select).data('includeProjectsInSubgroups') || false; this.allowClear = $(select).data('allowClear') || false; placeholder = s__('ProjectSelect|Search for project'); if (this.includeGroups) { placeholder += s__('ProjectSelect| or group'); } $(select).select2({ placeholder, minimumInputLength: 0, query: (query) => { let projectsCallback; const finalCallback = function (projects) { const data = { results: projects, }; return query.callback(data); }; if (this.includeGroups) { projectsCallback = function (projects) { const groupsCallback = function (groups) { const data = groups.concat(projects); return finalCallback(data); }; return Api.groups(query.term, {}, groupsCallback); }; } else { projectsCallback = finalCallback; } if (this.groupId) { return Api.groupProjects( this.groupId, query.term, { with_issues_enabled: this.withIssuesEnabled, with_merge_requests_enabled: this.withMergeRequestsEnabled, with_shared: this.withShared, include_subgroups: this.includeProjectsInSubgroups, order_by: 'similarity', simple: true, }, projectsCallback, ).catch(() => { createFlash({ message: s__('ProjectSelect|Something went wrong while fetching projects'), }); }); } else if (this.userId) { return Api.userProjects( this.userId, query.term, { with_issues_enabled: this.withIssuesEnabled, with_merge_requests_enabled: this.withMergeRequestsEnabled, with_shared: this.withShared, include_subgroups: this.includeProjectsInSubgroups, }, projectsCallback, ); } return Api.projects( query.term, { order_by: this.orderBy, with_issues_enabled: this.withIssuesEnabled, with_merge_requests_enabled: this.withMergeRequestsEnabled, membership: !this.allProjects, }, projectsCallback, ); }, id(project) { if (simpleFilter) return project.id; return JSON.stringify({ name: project.name, url: project.web_url, }); }, text(project) { return project.name_with_namespace || project.name; }, initSelection(el, callback) { return Api.project(el.val()).then(({ data }) => callback(data)); }, allowClear: this.allowClear, dropdownCssClass: 'ajax-project-dropdown', }); if (isInstantiated || simpleFilter) return select; return new ProjectSelectComboButton(select); }); }; export default () => { if ($('.ajax-project-select').length) { import(/* webpackChunkName: 'select2' */ 'select2/select2') .then(projectSelect) .catch(() => {}); } };
import React from 'react'; import Modal from '../../UI/Modal/Modal'; const confirmDelete = props => { return ( <Modal modalClosed={props.cancelConfirmDelete} show={props.showConfirmDelete}> <div className="panel-group modal-panel"> <div className="panel panel-danger"> <div className="panel-heading">Confirmer</div> <div className="panel-body"> <p>Etes-vous sûr de vouloir supprimer "{props.objectName}"</p> <button className="btn btn-success" onClick={props.onDelete}> Confirmer </button> <button className="btn btn-default" onClick={props.cancelConfirmDelete}> Annuler </button> </div> </div> </div> </Modal> ); }; export default confirmDelete;
import sys import unittest from test import support from test.support import import_helper pwd = import_helper.import_module('pwd') @unittest.skipUnless(hasattr(pwd, 'getpwall'), 'Does not have getpwall()') class PwdTest(unittest.TestCase): def test_values(self): entries = pwd.getpwall() for e in entries: self.assertEqual(len(e), 7) self.assertEqual(e[0], e.pw_name) self.assertIsInstance(e.pw_name, str) self.assertEqual(e[1], e.pw_passwd) self.assertIsInstance(e.pw_passwd, str) self.assertEqual(e[2], e.pw_uid) self.assertIsInstance(e.pw_uid, int) self.assertEqual(e[3], e.pw_gid) self.assertIsInstance(e.pw_gid, int) self.assertEqual(e[4], e.pw_gecos) self.assertIsInstance(e.pw_gecos, str) self.assertEqual(e[5], e.pw_dir) self.assertIsInstance(e.pw_dir, str) self.assertEqual(e[6], e.pw_shell) self.assertIsInstance(e.pw_shell, str) # The following won't work, because of duplicate entries # for one uid # self.assertEqual(pwd.getpwuid(e.pw_uid), e) # instead of this collect all entries for one uid # and check afterwards (done in test_values_extended) def test_values_extended(self): entries = pwd.getpwall() entriesbyname = {} entriesbyuid = {} if len(entries) > 1000: # Huge passwd file (NIS?) -- skip this test self.skipTest('passwd file is huge; extended test skipped') for e in entries: entriesbyname.setdefault(e.pw_name, []).append(e) entriesbyuid.setdefault(e.pw_uid, []).append(e) # check whether the entry returned by getpwuid() # for each uid is among those from getpwall() for this uid for e in entries: if not e[0] or e[0] == '+': continue # skip NIS entries etc. self.assertIn(pwd.getpwnam(e.pw_name), entriesbyname[e.pw_name]) self.assertIn(pwd.getpwuid(e.pw_uid), entriesbyuid[e.pw_uid]) def test_errors(self): self.assertRaises(TypeError, pwd.getpwuid) self.assertRaises(TypeError, pwd.getpwuid, 3.14) self.assertRaises(TypeError, pwd.getpwnam) self.assertRaises(TypeError, pwd.getpwnam, 42) self.assertRaises(TypeError, pwd.getpwall, 42) # try to get some errors bynames = {} byuids = {} for (n, p, u, g, gecos, d, s) in pwd.getpwall(): bynames[n] = u byuids[u] = n allnames = list(bynames.keys()) namei = 0 fakename = allnames[namei] while fakename in bynames: chars = list(fakename) for i in range(len(chars)): if chars[i] == 'z': chars[i] = 'A' break elif chars[i] == 'Z': continue else: chars[i] = chr(ord(chars[i]) + 1) break else: namei = namei + 1 try: fakename = allnames[namei] except IndexError: # should never happen... if so, just forget it break fakename = ''.join(chars) self.assertRaises(KeyError, pwd.getpwnam, fakename) # In some cases, byuids isn't a complete list of all users in the # system, so if we try to pick a value not in byuids (via a perturbing # loop, say), pwd.getpwuid() might still be able to find data for that # uid. Using sys.maxint may provoke the same problems, but hopefully # it will be a more repeatable failure. fakeuid = sys.maxsize self.assertNotIn(fakeuid, byuids) self.assertRaises(KeyError, pwd.getpwuid, fakeuid) # -1 shouldn't be a valid uid because it has a special meaning in many # uid-related functions self.assertRaises(KeyError, pwd.getpwuid, -1) # should be out of uid_t range self.assertRaises(KeyError, pwd.getpwuid, 2**128) self.assertRaises(KeyError, pwd.getpwuid, -2**128) if __name__ == "__main__": unittest.main()
/*! pinpoint - v1.5.2 */ !function(){"use strict";angular.module("pinpointApp").filter("iconUrl",function(){return function(a){var b="/images/icons/";if(angular.isString(a)){switch(a){case"UNKNOWN_GROUP":b+="UNKNOWN.png";break;default:b+=a+".png"}return b}return""}})}(),function(){"use strict";angular.module("pinpointApp").filter("applicationNameToClassName",function(){return function(a){return a.replace(/[\.\^:]/gi,"_")}})}(),function(){"use strict";pinpointApp.factory("TimeSliderVoService",function(){return function(){this._nFrom=!1,this._nTo=!1,this._nInnerFrom=!1,this._nInnerTo=!1,this._nCount=!1,this._nTotal=!1,this.setFrom=function(a){if(a=parseInt(a,10),!angular.isNumber(a))throw new Error("timeSliderVoService:setFrom, It should be number. "+a);if(angular.isNumber(this._nTo)&&a>=this._nTo)throw"timeSliderVoService:setFrom, It should be smaller than To value.";return this._nFrom=a,this}.bind(this),this.getFrom=function(){return this._nFrom}.bind(this),this.setTo=function(a){if(a=parseInt(a,10),!angular.isNumber(a))throw new Error("timeSliderVoService:setTo It should be number. "+a);if(angular.isNumber(a)&&this._nFrom>=a)throw"timeSliderVoService:setTo, It should be bigger than From value.";return this._nTo=a,this}.bind(this),this.getTo=function(){return this._nTo}.bind(this),this.setInnerFrom=function(a){if(a=parseInt(a,10),!angular.isNumber(a))throw new Error("timeSliderVo:setInnerFrom It should be number. "+a);if(angular.isNumber(this._nInnerTo)&&a>=this._nInnerTo)throw"timeSliderVo:setInnerFrom, It should be smaller than InnerTo value.";return this._nInnerFrom=a,this}.bind(this),this.getInnerFrom=function(){return this._nInnerFrom}.bind(this),this.setInnerTo=function(a){if(a=parseInt(a,10),!angular.isNumber(a))throw new Error("timeSliderVoService:setInnerTo It should be number. "+a);if(angular.isNumber(this._nInnerFrom)&&this._nInnerFrom>=a)throw"timeSliderVoService:setInnerTo, It should be bigger than InnerFrom value.";return this._nInnerTo=a,this}.bind(this),this.getInnerTo=function(){return this._nInnerTo}.bind(this),this.setCount=function(a){if(a=parseInt(a,10),!angular.isNumber(a))throw new Error("timeSliderVoService:setCount It should be number. "+a);if(angular.isNumber(this._nTotal)&&a>this._nTotal)throw"timeSliderVoService:setCount, It should be smaller than Total value.";return this._nCount=a,this}.bind(this),this.addCount=function(a){if(a=parseInt(a,10),!angular.isNumber(a))throw new Error("timeSliderVoService:setCount It should be number. "+a);if(angular.isNumber(this._nTotal)&&a+this._nCount>this._nTotal)throw"timeSliderVoService:setCount, It should be smaller than Total value.";return this._nCount+=a,this}.bind(this),this.getCount=function(){return this._nCount}.bind(this),this.setTotal=function(a){if(a=parseInt(a,10),!angular.isNumber(a))throw new Error("timeSliderVoService:setTotal It should be number. "+a);if(angular.isNumber(this._nCount)&&this._nCount>a)throw"timeSliderVoService:setTotal, It should be bigger than Count value.";return this._nTotal=a,this}.bind(this),this.getTotal=function(){return this._nTotal}.bind(this),this.getReady=function(){return this._nFrom&&this._nTo&&this._nInnerFrom&&this._nInnerTo}.bind(this)}})}(),function(){"use strict";pinpointApp.factory("AlertsService",["$timeout",function(a){return function(b){this.$parent=b||null,this.setParent=function(a){return this.$parent=a,this}.bind(this),this.getParent=function(){return this.$parent}.bind(this),this.showError=function(b){a(function(){this.getElement(".error").show(),"string"==typeof b?this.getElement(".error .msg").text(b):(this.getElement(".error .msg").text(b.message),this.getElement(".error .method").text(b.request.method),this.getElement(".error .header").html(this._transTableFormat(b.request.heads)),this.getElement(".error .parameters").html(this._transTableFormat(b.request.parameters)),this.getElement(".error .url").text(b.request.url),this.getElement(".error .stacktrace").text(b.stacktrace))}.bind(this),300)}.bind(this),this.hideError=function(){a(function(){this.getElement(".error").hide()}.bind(this))}.bind(this),this.showWarning=function(b){a(function(){this.getElement(".warning").show(),this.getElement(".warning .msg").text(b)}.bind(this),300)}.bind(this),this.hideWarning=function(){a(function(){this.getElement(".warning").hide()}.bind(this))}.bind(this),this.showInfo=function(b){a(function(){this.getElement(".info").show(),this.getElement(".info .msg").html(b)}.bind(this),300)}.bind(this),this.hideInfo=function(){a(function(){this.getElement(".info").hide()}.bind(this))}.bind(this),this.getElement=function(a){return this.$parent?$(a,this.$parent):$(a)}.bind(this),this._transTableFormat=function(a){var b=["<table>"];for(var c in a)b.push("<tr>"),b.push("<td>"+c+"</td><td style='padding-left:20px'>"+a[c]+"</td>"),b.push("</tr>");return b.push("</table>"),b.join("")}}}])}(),function(){"use strict";pinpointApp.factory("ProgressBarService",["$timeout","$window","$location","UserLocalesService",function(a,b,c,d){var e=["ko","en"],f=5,g="__HIDE_LOADING_TIP";return function(h){this.$parent=h||null,this.nPercentage=0,this.bAutoIncrease=!0,this.nTimePromise=null,this.setParent=function(a){return this.$parent=a,this}.bind(this),this.getParent=function(){return this.$parent}.bind(this),this.startLoading=function(d){var e=!0;if(b.localStorage){var f=b.localStorage.getItem(g)||"-";e="-"===f?!0:!((new Date).valueOf()<parseInt(f))}this.bAutoIncrease=d||!0,this.setLoading(0),a(function(){/^\/main/.test(c.path())&&e&&(this.showBackground(),this.showTip()),this.getProgress().show(),this.autoIncrease()}.bind(this))}.bind(this),this.stopLoading=function(){a.cancel(this.nTimePromise),a(function(){this.getProgress().hide(),this.hideTip(),this.hideBackground()}.bind(this),300)}.bind(this),this.setLoading=function(a){return this.nPercentage=a,this.getProgressBar().width(a+"%"),this}.bind(this),this.getProgress=function(){return this.$parent?$(".progress",this.$parent):$(".progress")}.bind(this),this.getProgressBar=function(){return this.$parent?$(".progress .bar",this.$parent):$(".progress .bar")}.bind(this),this.autoIncrease=function(){if(this.bAutoIncrease!==!1){var b=_.random(1,4);this.nPercentage+b<=99&&(this.setLoading(this.nPercentage+b),this.nTimePromise=a(function(){this.autoIncrease()}.bind(this),500))}}.bind(this),this.showBackground=function(){}.bind(this),this.showTip=function(){}.bind(this),this._getRandomNum=function(){var a=parseInt(Math.random()*(f+1));return 10>a?"0"+a:a+""}.bind(this),this._getLocale=function(){return-1==e.indexOf(d.userLocale)?d.defaultLocale:d.userLocale}.bind(this),this.hideBackground=function(){this.$parent&&$(".progress-back",this.$parent).hide()}.bind(this),this.hideTip=function(){this.$parent&&$(".progress-tip",this.$parent).hide()}.bind(this)}}])}(),function(){"use strict";pinpointApp.factory("NavbarVoService",["PreferenceService",function(a){return function(){var b=this;this._sApplication=!1,this._periodType="",this._nPeriod=!1,this._nQueryEndTime=!1,this._sFilter=!1,this._sAgentId=!1,this._nQueryPeriod=!1,this._nQueryStartTime=!1,this._sReadablePeriod=!1,this._sQueryEndDateTime=!1,this._nCalleeRange=a.getCallee(),this._nCallerRange=a.getCaller(),this._sHint=!1,this._sDateTimeFormat="YYYY-MM-DD-HH-mm-ss",this.setApplication=function(a){return angular.isString(a)&&a.indexOf("@")>0&&(b._sApplication=a),b},this.getApplication=function(){return b._sApplication},this.setPeriod=function(a){return angular.isNumber(a)&&a>0&&(b._nPeriod=a),b},this.getPeriod=function(){return b._nPeriod},this.setQueryEndTime=function(a){return angular.isNumber(a)&&a>0&&(b._nQueryEndTime=a),b},this.getQueryEndTime=function(){return b._nQueryEndTime},this.getQueryPeriod=function(){return b._nQueryPeriod},this.getApplicationName=function(){return b._sApplication.split("@")[0]},this.getServiceTypeName=function(){return b._sApplication.split("@")[1]},this.getCalleeRange=function(){return b._nCalleeRange},this.getCallerRange=function(){return b._nCallerRange},this.setCalleeRange=function(a){b._nCalleeRange=a},this.setCallerRange=function(a){b._nCallerRange=a},this.setQueryStartTime=function(a){return angular.isNumber(a)&&a>0&&(b._nQueryStartTime=a),b},this.getQueryStartTime=function(){return b._nQueryStartTime},this.getReady=function(){return b._sApplication&&b._nPeriod&&b._nQueryEndTime},this.setFilter=function(a){return angular.isString(a)&&(b._sFilter=a),b},this.getFilter=function(){return b._sFilter},this.getFilterAsJson=function(){return JSON.parse(b._sFilter)},this.setHint=function(a){return angular.isString(a)&&(b._sHint=a),b},this.getHint=function(){return b._sHint},this.setAgentId=function(a){return angular.isString(a)&&(b._sAgentId=a),b},this.getAgentId=function(){return b._sAgentId},this.setReadablePeriod=function(a){var c=/^(\d)+(s|m|h|d|w|M|y)$/,d=c.exec(a);if(d){b._sReadablePeriod=a;var e=parseInt(a,10);switch(d[2]){case"m":b.setPeriod(60*e);break;case"h":b.setPeriod(60*e*60);break;case"d":b.setPeriod(60*e*60*24);break;case"w":b.setPeriod(60*e*60*24*7);break;case"M":b.setPeriod(60*e*60*24*30);break;case"y":b.setPeriod(60*e*60*24*30*12);break;default:b.setPeriod(e)}}else"realtime"===a&&(b.setPeriodType("realtime"),b._sReadablePeriod="1m",b.setPeriod(300));return b},this.isRealtime=function(){return"realtime"===this._periodType},this.getReadablePeriod=function(){return b._sReadablePeriod},this.setQueryEndDateTime=function(a){var c=/^(19[7-9][0-9]|20\d{2})-(0[0-9]|1[0-2])-(0[1-9]|[1-2][0-9]|3[0-1])-(0[0-9]|1[0-9]|2[0-3])-([0-5][0-9])-([0-5][0-9])$/;return c.test(a)&&(b._sQueryEndDateTime=a,b.setQueryEndTime(b._parseQueryEndDateTimeToTimestamp(a))),b},this.getQueryEndDateTime=function(){return b._sQueryEndDateTime},this._parseQueryEndDateTimeToTimestamp=function(a){return moment(a,b._sDateTimeFormat).valueOf()},this.autoCalculateByQueryEndTimeAndPeriod=function(){return b._nQueryPeriod=1e3*b._nPeriod*60,b._nQueryStartTime=b._nQueryEndTime-b._nQueryPeriod,b},this.autoCalcultateByQueryStartTimeAndQueryEndTime=function(){return b._nQueryPeriod=b._nQueryEndTime-b._nQueryStartTime,b._nPeriod=b._nQueryPeriod/1e3/60,b._sReadablePeriod=b._nQueryPeriod/1e3/60+"m",b._sQueryEndDateTime=moment(b._nQueryEndTime).format(b._sDateTimeFormat),b},this.autoCalculateByQueryEndDateTimeAndReadablePeriod=function(){return b._nQueryPeriod=1e3*b._nPeriod,b._nQueryStartTime=b._nQueryEndTime-b._nQueryPeriod,b},this.getPartialURL=function(a,c){return(a?b.getApplication()+"/":"")+b.getReadablePeriod()+"/"+b.getQueryEndDateTime()+(c&&b.getFilter()?"/"+b.getFilter():"")},this.setPeriodType=function(a){this._periodType=a},this.getPeriodType=function(){return this._periodType}}}])}(),function(){"use strict";pinpointApp.constant("TransactionDaoServiceConfig",{transactionInfoUrl:"/transactionInfo.pinpoint"}),pinpointApp.service("TransactionDaoService",["TransactionDaoServiceConfig","$timeout","$window",function(a,b,c){b(function(){c.transactionData={}}),this.addData=function(a,b,d){c.transactionData[a]=b},this.getDataByName=function(a,b){angular.isFunction(b)&&b(opener.transactionData[a]||{})},this.getTransactionDetail=function(b,c,d){jQuery.ajax({type:"GET",url:a.transactionInfoUrl,cache:!1,dataType:"json",data:{traceId:b,focusTimestamp:c},success:function(a){angular.isFunction(d)&&d(null,a)},error:function(a,b,c){angular.isFunction(d)&&d("ERROR",{})}})}}])}(),function(){"use strict";pinpointApp.factory("locationService",["$location","$route","$rootScope",function(a,b,c){return a.skipReload=function(){var d=b.current,e=c.$on("$locationChangeSuccess",function(){b.current=d,e()});return a},a}])}(),function(){"use strict";pinpointApp.constant("serverMapDaoServiceConfig",{serverMapDataUrl:"/getServerMapData.pinpoint",filteredServerMapDataUrl:"/getFilteredServerMapDataMadeOfDotGroup.pinpoint",filtermapUrl:"/filtermap.pinpoint",lastTransactionListUrl:"/lastTransactionList.pinpoint",transactionListUrl:"/transactionList.pinpoint",FILTER_DELIMETER:"^",FILTER_ENTRY_DELIMETER:"|",FILTER_FETCH_LIMIT:5e3}),pinpointApp.service("ServerMapDaoService",["serverMapDaoServiceConfig","PreferenceService",function(a,b){var c=this;this.getServerMapData=function(b,c){var d={applicationName:b.applicationName,from:b.from,to:b.to,callerRange:b.callerRange,calleeRange:b.calleeRange};isNaN(parseInt(b.serviceTypeName))?d.serviceTypeName=b.serviceTypeName:d.serviceTypeCode=b.serviceTypeName,jQuery.ajax({type:"GET",url:a.serverMapDataUrl,cache:!1,dataType:"json",data:d,success:function(a){angular.isFunction(c)&&c(null,b,a)},error:function(a,d,e){angular.isFunction(c)&&c(e,b,{})}})},this.getFilteredServerMapData=function(b,c){var d={applicationName:b.applicationName,from:b.from,to:b.to,originTo:b.originTo,filter:b.filter,limit:a.FILTER_FETCH_LIMIT,callerRange:b.callerRange,calleeRange:b.calleeRange,v:3,xGroupUnit:987,yGroupUnit:57};isNaN(parseInt(b.serviceTypeName))?d.serviceTypeName=b.serviceTypeName:d.serviceTypeCode=b.serviceTypeName,b.hint&&(d.hint=b.hint),jQuery.ajax({type:"GET",url:a.filteredServerMapDataUrl,cache:!1,dataType:"json",data:d,success:function(a){angular.isFunction(c)&&c(null,b,a)},error:function(a,d,e){angular.isFunction(c)&&c(e,b,{})}})},this.mergeFilteredMapData=function(a,b){return 0===a.linkDataArray.length&&0===a.nodeDataArray.length?(a.linkDataArray=b.linkDataArray,a.nodeDataArray=b.nodeDataArray):(angular.forEach(b.nodeDataArray,function(b,c){var d=this.findExistingNodeKeyFromLastMapData(a,b);d>=0?a=this.mergeNodeData(a,d,b):a.nodeDataArray.push(b)},this),angular.forEach(b.linkDataArray,function(b,c){var d=this.findExistingLinkFromLastMapData(a,b);d>=0?a=this.mergeLinkData(a,d,b):a.linkDataArray.push(b)},this)),a},this.findExistingNodeKeyFromLastMapData=function(a,b){for(var c in a.nodeDataArray)if(a.nodeDataArray[c].applicationName===b.applicationName&&a.nodeDataArray[c].serviceTypeCode===b.serviceTypeCode)return c;return-1},this.findExistingLinkFromLastMapData=function(a,b){for(var c in a.linkDataArray)if(a.linkDataArray[c].from===b.from&&a.linkDataArray[c].to===b.to)return c;return-1},this.addFilterProperty=function(a,b){var c=this.parseFilterText(a,b);return angular.forEach(b.nodeDataArray,function(a){angular.isDefined(_.findWhere(c,{nodeKey:a.key}))?a.isFiltered=!0:a.isFiltered=!1}),angular.forEach(b.linkDataArray,function(a,b){angular.isDefined(_.findWhere(c,{fromKey:a.from,toKey:a.to}))?a.isFiltered=!0:a.isFiltered=!1},this),b},this.parseFilterText=function(a,b){var c=[];return angular.forEach(a,function(a){c.push({fromServiceType:a.fst,fromApplication:a.fa,fromKey:this.findNodeKeyByApplicationName(a.fa,b),toServiceType:a.tst,toApplication:a.ta,toKey:this.findNodeKeyByApplicationName(a.ta,b),nodeKey:""})},this),c},this.findNodeKeyByApplicationName=function(a,b){var c=_.findWhere(b.nodeDataArray,{applicationName:a});return angular.isDefined(c)?c.key:!1},this.mergeNodeData=function(a,b,c){if(angular.isUndefined(a.nodeDataArray[b]))return a.nodeDataArray[b]=c,a;var d,e,f=a.nodeDataArray[b];if(f.errorCount+=c.errorCount,f.slowCount+=c.slowCount,f.totalCount+=c.totalCount,c.hasAlert&&(f.hasAlert=c.hasAlert),angular.isDefined(c.histogram))if(angular.isDefined(f.histogram))for(d in c.histogram)angular.isDefined(f.histogram[d])?f.histogram[d]+=c.histogram[d]:f.histogram[d]=c.histogram[d];else f.histogram=c.histogram;if(angular.isDefined(c.agentHistogram))for(d in c.agentHistogram)if(angular.isDefined(f.agentHistogram[d]))for(e in c.agentHistogram[d])angular.isDefined(f.agentHistogram[d][e])?f.agentHistogram[d][e]+=c.agentHistogram[d][e]:f.agentHistogram[d][e]=c.agentHistogram[d][e];else f.agentHistogram[d]=c.agentHistogram[d];if(angular.isDefined(c.timeSeriesHistogram))for(d in c.timeSeriesHistogram)if(angular.isDefined(f.timeSeriesHistogram)){var g=[];a:for(e in c.timeSeriesHistogram[d].values){for(var h in f.timeSeriesHistogram[d].values)if(f.timeSeriesHistogram[d].values[h][0]===c.timeSeriesHistogram[d].values[e][0]){f.timeSeriesHistogram[d].values[h][1]+=c.timeSeriesHistogram[d].values[e][1];continue a}g.push(c.timeSeriesHistogram[d].values[e])}f.timeSeriesHistogram[d].values=g.concat(f.timeSeriesHistogram[d].values)}else f.timeSeriesHistogram=c.timeSeriesHistogram;if(angular.isDefined(c.agentTimeSeriesHistogram))for(d in c.agentTimeSeriesHistogram)if(angular.isDefined(f.agentTimeSeriesHistogram))if(angular.isDefined(f.agentTimeSeriesHistogram[d]))for(e in c.agentTimeSeriesHistogram[d])angular.isDefined(f.agentTimeSeriesHistogram[d][e])?f.agentTimeSeriesHistogram[d][e].values=c.agentTimeSeriesHistogram[d][e].values.concat(f.agentTimeSeriesHistogram[d][e].values):f.agentTimeSeriesHistogram[d][e]=c.agentTimeSeriesHistogram[d][e];else f.agentTimeSeriesHistogram[d]=c.agentTimeSeriesHistogram[d];else f.agentTimeSeriesHistogram=c.agentTimeSeriesHistogram;if(angular.isDefined(c.serverList))for(d in c.serverList)if(f.serverList[d])for(e in c.serverList[d].instanceList)if(f.serverList[d].instanceList[e])for(var i in c.serverList[d].instanceList[e].histogram)f.serverList[d].instanceList[e].histogram[i]?f.serverList[d].instanceList[e].histogram[i]+=c.serverList[d].instanceList[e].histogram[i]:f.serverList[d].instanceList[e].histogram[i]=c.serverList[d].instanceList[e].histogram[i];else f.serverList[d].instanceList[e]=c.serverList[d].instanceList[e];else f.serverList[d]=c.serverList[d];return a},this.mergeLinkData=function(a,b,c){if(angular.isUndefined(a.linkDataArray[b]))return a.linkDataArray[b]=c,a;var d,e,f=a.linkDataArray[b];if(f.errorCount+=c.errorCount,f.slowCount+=c.slowCount,f.totalCount+=c.totalCount,c.hasAlert&&(f.hasAlert=c.hasAlert),angular.isDefined(c.histogram))for(d in c.histogram)f.histogram[d]?f.histogram[d]+=c.histogram[d]:f.histogram[d]=c.histogram[d];if(angular.isDefined(c.timeSeriesHistogram))for(d in c.timeSeriesHistogram)if(angular.isDefined(f.timeSeriesHistogram)){var g=[];a:for(e in c.timeSeriesHistogram[d].values){for(var h in f.timeSeriesHistogram[d].values)if(f.timeSeriesHistogram[d].values[h][0]===c.timeSeriesHistogram[d].values[e][0]){f.timeSeriesHistogram[d].values[h][1]+=c.timeSeriesHistogram[d].values[e][1];continue a}g.push(c.timeSeriesHistogram[d].values[e])}f.timeSeriesHistogram[d].values=g.concat(f.timeSeriesHistogram[d].values)}else f.timeSeriesHistogram=c.timeSeriesHistogram;if(angular.isDefined(c.sourceHistogram))for(d in c.sourceHistogram)if(angular.isDefined(f.sourceHistogram[d]))for(e in c.sourceHistogram[d])f.sourceHistogram[d][e]?f.sourceHistogram[d][e]+=c.sourceHistogram[d][e]:f.sourceHistogram[d][e]=c.sourceHistogram[d][e];else f.sourceHistogram[d]=c.sourceHistogram[d];if(angular.isDefined(c.targetHistogram))for(d in c.targetHistogram)if(angular.isDefined(f.targetHistogram[d]))for(e in c.targetHistogram[d])f.targetHistogram[d][e]?f.targetHistogram[d][e]+=c.targetHistogram[d][e]:f.targetHistogram[d][e]=c.targetHistogram[d][e];else f.targetHistogram[d]=c.targetHistogram[d];return a},this.mergeTimeSeriesResponses=function(a,b){return angular.forEach(b.values,function(b,c){angular.isUndefined(a.timeSeriesResponses.values[c])?a.timeSeriesResponses.values[c]=b:a.timeSeriesResponses.values[c]=_.union(b,a.timeSeriesResponses.values[c])},this),a.timeSeriesResponses.time=_.union(b.time,a.timeSeriesResponses.time),a},this.mergeGroup=function(a,b){var c=this,d=a.nodeDataArray,e=a.linkDataArray,f=c._getInboundCountMap(d,e);return b.forEach(function(a){var b=a+"_GROUP",g=[],h=[],i={},j={};d.forEach(function(k,l){var m,n,o=b+"_"+k.key,p=0;e.forEach(function(b,c){b.from==k.key&&b.targetInfo.serviceType==a&&f[b.to]&&1==f[b.to].toCount&&p++}),2>p||(e.forEach(function(e,g){e.targetInfo.serviceType==a&&(f[e.to]&&f[e.to].toCount>1||e.from==k.key&&(m||(m=c._createNewNode(o,b)),n||(n=c._createNewLink(k.key,o)),c._addToSubNode(m,c._getNodeByApplicationName(d,e.targetInfo.applicationName,a),function(){}),c._mergeLinkData(n,e),n.unknownLinkGroup.push(e),i[e.to]=null,j[e.key]=null))}),m&&(m.unknownNodeGroup.sort(function(a,b){return b.totalCount-a.totalCount}),g.push(m)),n&&(n.unknownLinkGroup.sort(function(a,b){return b.totalCount-a.totalCount}),h.push(n)))}),c._addToOriginal(d,g),c._addToOriginal(e,h),c._removeByKey(d,i),c._removeByKey(e,j)}),a},this._addToOriginal=function(a,b){b.forEach(function(b){a.push(b)})},this._getInboundCountMap=function(a,b){var c={};return a.forEach(function(a){var d={toCount:0,fromCount:0,totalCallCount:0};b.forEach(function(b){b.to===a.key&&(d.toCount++,d.totalCallCount+=b.totalCount),b.from===a.key&&d.fromCount++}),c[a.key]=d}),c},this._selectMergeTarget=function(a,b){var c=[];return a.forEach(function(a,d){b[a.key].fromCount>0||b[a.key].toCount<2||c.push(a)}),c},this._getFromNodes=function(a,b){var c=[];return a.forEach(function(a){a.to===b&&c.push(a.from)}),c},this.mergeMultiLinkGroup=function(a,b){var c=this,d=a.nodeDataArray,e=a.linkDataArray,f=this._getInboundCountMap(d,e),g=this._selectMergeTarget(d,f),h={},i=[],j=[],k={},l={};return g.forEach(function(a,d){if(h[a.key]!==!0||-1!=b.indexOf(a.serviceType)){h[a.key]=!0;var f=a.serviceType+"_GROUP",m=null,n=null,o=f+"_"+a.key,p=c._getFromNodes(e,a.key);g.forEach(function(d,g){if(h[d.key]!==!0&&a.serviceType===d.serviceType&&-1!=b.indexOf(d.serviceType)){var i=c._getFromNodes(e,d.key);if(p.length===i.length){var j=0;for(j=0;j<p.length;j++)if(-1===i.indexOf(p[j]))return;if(h[d.key]=!0,null===m&&(m=c._createNewMultiGroupNode(o,f)),null===n)for(n=[],j=0;j<i.length;j++)n.push(c._createNewLink(i[j],o));c._addToSubNode(m,d,function(a){k[a]=null}),c._addToSubLink(e,n,d.key,function(a,b){l[a]=null;var c=/(.*)\^(.*)/.exec(b.from),d=/(.*)\^(.*)/.exec(b.to),e=!1,f=null,g=0;m.subGroup.forEach(function(a,b){a.applicationName===c[1]&&(e=!0,f=a,g=b)}),e===!1&&(f={applicationName:c[1],groups:[],isLast:!1},m.subGroup.push(f),g=m.subGroup.length-1),f.groups.push({applicationName:d[1],hasAlert:b.hasAlert,totalCount:b.totalCount,serviceType:d[2],key:b.to,idx:g})})}}}),null!==m&&c._addToSubNode(m,a,function(a){k[a]=null}),null!==n&&c._addToSubLink(e,n,a.key,function(a,b){l[a]=null;var c=/(.*)\^(.*)/.exec(b.from),d=/(.*)\^(.*)/.exec(b.to),e=!1,f=null,g=0;m.subGroup.forEach(function(a,b){a.applicationName===c[1]&&(e=!0,f=a,g=b)}),e===!1&&(f={applicationName:c[1],groups:[],isLast:!1},m.subGroup.push(f),g=m.subGroup.length-1),f.groups.push({applicationName:d[1],hasAlert:b.hasAlert,totalCount:b.totalCount,serviceType:d[2],key:b.to,idx:g})}),m&&m.subGroup&&(m.subGroup[m.subGroup.length-1].isLast=!0),null!==m&&null!==n&&(i.push(m),n.forEach(function(a){j.push(a)})),m=null,n=null}}),c._addToOriginal(d,i),c._addToOriginal(e,j),c._removeByKey(d,k),c._removeByKey(e,l),a},this._createNewLink=function(a,b){return{key:a+"-"+b,from:a,to:b,sourceInfo:{},targetInfo:[],totalCount:0,errorCount:0,slowCount:0,hasAlert:!1,unknownLinkGroup:[],histogram:{}}},this._createNewNode=function(a,b){return{key:a,unknownNodeGroup:[],serviceType:b,category:b,instanceCount:0,isMultiGroup:!1,isCollapse:!0}},this._createNewMultiGroupNode=function(a,b){return{key:a,unknownNodeGroup:[],subGroup:[],serviceType:b,category:b,instanceCount:0,isMultiGroup:!0,isCollapse:!0}},this._removeByKey=function(a,b){$.each(b,function(b,c){a.forEach(function(c,d){c.key==b&&a.splice(d,1)})})},this._addToSubNode=function(a,b,c){delete b.category,a.instanceCount+=b.instanceCount,a.unknownNodeGroup.push(b),c(b.key)},this._addToSubLink=function(a,b,c,d){var e=this;a.forEach(function(a){a.to===c&&(b.forEach(function(b){b.from==a.from&&(e._mergeLinkData(b,a),b.unknownLinkGroup.push(a))}),d(a.key,a))})},this._mergeLinkData=function(a,b){a.totalCount+=b.totalCount,a.errorCount+=b.errorCount,a.slowCount+=b.slowCount,a.sourceInfo=b.sourceInfo,b.hasAlert&&(a.hasAlert=b.hasAlert)},this._getNodeByApplicationName=function(a,b,c){for(var d in a)if(b===a[d].applicationName&&c===a[d].serviceType)return a[d];return!1},this.getNodeDataByKey=function(a,b){var c=a.nodeDataArray,d=!1;return c.forEach(function(a){a.key===b&&(d=a)}),d},this.getLinkNodeDataByNodeKey=function(a,b,c){var d=a.linkDataArray,e=!1;return d.forEach(function(a){a.to===b&&-1!=a.from.indexOf(c)&&(e=a)}),e},this.getLinkDataByKey=function(a,b){var c=a.linkDataArray,d=!1;return c.forEach(function(a){a.key===b&&(d=a)}),d},this.getUnknownNodeDataByUnknownNodeGroup=function(a,b){for(var d in b)b[d]=c.getNodeDataByKey(a,b[d].key);return b},this.getUnknownLinkDataByUnknownLinkGroup=function(a,b){for(var d in b)b[d]=c.getLinkDataByKey(a,b[d].key),b[d].fromNode=c.getNodeDataByKey(a,b[d].from),b[d].toNode=c.getNodeDataByKey(a,b[d].to);return b},this.extractDataFromApplicationMapData=function(a){var b=["applicationName","category","errorCount","hasAlert","instanceCount","isWas","key","slowCount","serviceType","totalCount","histogram"],c=["errorCount","from","hasAlert","key","sourceInfo","slowCount","to","targetInfo","totalCount"],d={nodeDataArray:[],linkDataArray:[]};return angular.forEach(a.nodeDataArray,function(a,c){d.nodeDataArray.push({}),angular.forEach(b,function(b){d.nodeDataArray[c][b]=a[b]})}),angular.forEach(a.linkDataArray,function(a,b){d.linkDataArray.push({}),angular.forEach(c,function(c){d.linkDataArray[b][c]=a[c]})}),d}}])}(),function(){"use strict";pinpointApp.constant("agentDaoServiceConfig",{agentStatUrl:"/getAgentStat.pinpoint"}),pinpointApp.service("AgentDaoService",["agentDaoServiceConfig",function(a){this.getAgentStat=function(b,c){jQuery.ajax({type:"GET",url:a.agentStatUrl,cache:!1,dataType:"json",data:b,success:function(a){angular.isFunction(c)&&c(null,a)},error:function(a,b,d){angular.isFunction(c)&&c(d,{})}})},this.getSampleRate=function(a){var b=100,c=a/5,d=Math.floor(c/b);return b>=c?1:d},this.parseMemoryChartDataForAmcharts=function(a,b){var c=[],d=b.charts.JVM_GC_OLD_TIME.points,e=b.charts.JVM_GC_OLD_COUNT.points;if(d.length!==e.length)throw new Error("assertion error","time.length != count.length");for(var f,g,h,i,j=0;j<e.length;++j){var k={time:moment(d[j].timestamp).format("YYYY-MM-dd HH:mm:ss")};for(var l in a.line)if(a.line[l].isFgc){var m=0,n=0;if(f=d[j].maxVal,g=e[j].maxVal,h&&i){var o=g-i,p=f-h,q=Math.abs(o)>0&&Math.abs(p)>0,r=0>o&&0>p;q&&(r?(m=g,n=f):(m=g-i,n=f-h),i=g,h=f)}else h=f,i=g;m>0&&n>0&&(k[a.line[l].key+"Count"]=m,k[a.line[l].key+"Time"]=n)}else{var s=b.charts[a.line[l].id].points[j].maxVal;s>=0&&(k[a.line[l].key]=s)}c.push(k)}return c},this.parseCpuLoadChartDataForAmcharts=function(a,b){var c=b.charts.CPU_LOAD_JVM,d=b.charts.CPU_LOAD_SYSTEM;if(c||d){a.isAvailable=!0;var e=[],f=c.points,g=d.points;if(f.length!==g.length)throw new Error("assertion error","jvmCpuLoad.length != systemCpuLoad.length");for(var h=0;h<f.length;++h){if(f[h].timestamp!==g[h].timestamp)throw new Error("assertion error","timestamp mismatch between jvmCpuLoad and systemCpuLoad");var i={time:moment(f[h].timestamp).toString("YYYY-MM-dd HH:mm:ss"),maxCpuLoad:100},j="number"==typeof b.charts.CPU_LOAD_JVM.points[h].maxVal?b.charts.CPU_LOAD_JVM.points[h].maxVal.toFixed(2):0,k="number"==typeof b.charts.CPU_LOAD_SYSTEM.points[h].maxVal?b.charts.CPU_LOAD_SYSTEM.points[h].maxVal.toFixed(2):0;j>=0&&(i.jvmCpuLoad=j),k>=0&&(i.systemCpuLoad=k),e.push(i)}return e}},this.parseTpsChartDataForAmcharts=function(a,b){var c=b.charts.TPS_SAMPLED_CONTINUATION.points,d=b.charts.TPS_SAMPLED_NEW.points,e=b.charts.TPS_UNSAMPLED_CONTINUATION.points,f=b.charts.TPS_UNSAMPLED_NEW.points,g=b.charts.TPS_TOTAL.points,h=g.length;if(h>0){a.isAvailable=!0;for(var i=[],j=-1,k=0;h>k;k++){var l={time:moment(c[k].timestamp).toString("YYYY-MM-dd HH:mm:ss")},m="number"==typeof c[k].avgVal?c[k].avgVal.toFixed(2):0,n="number"==typeof d[k].avgVal?d[k].avgVal.toFixed(2):0,o="number"==typeof e[k].avgVal?e[k].avgVal.toFixed(2):0,p="number"==typeof f[k].avgVal?f[k].avgVal.toFixed(2):0,q="number"==typeof g[k].avgVal?g[k].avgVal.toFixed(2):0;m!=j&&(l.sampledContinuationTps=m),n!=j&&(l.sampledNewTps=n),o!=j&&(l.unsampledContinuationTps=o),p!=j&&(l.unsampledNewTps=p),q!=j&&(l.totalTps=q),i.push(l)}return i}}}])}(),function(){"use strict";pinpointApp.factory("SidebarTitleVoService",[function(){return function(){var a=this;this._sImage=!1,this._sImageType=!1,this._sTitle=!1,this._sImage2=!1,this._sImageType2=!1,this._sTitle2=!1,this.setImageType=function(b){if(!angular.isString(b))throw"ImageType should be string in SidebarTitleVo";return a._sImageType=b,a._sImage=a._parseImageTypeToImageUrl(b),a},this._parseImageTypeToImageUrl=function(a){if(angular.isString(a)){var b="/images/icons/";switch(a){case"UNKNOWN_GROUP":b+="UNKNOWN.png";break;default:b+=a+".png"}return b}throw"ImageType should be string in SidebarTitleVo"},this.getImageType=function(){return a._sImageType},this.getImage=function(){return a._sImage},this.setTitle=function(b){if(!angular.isString(b))throw"Title should be string in SidebarTitleVo";return a._sTitle=b,a},this.getTitle=function(){return a._sTitle},this.setImageType2=function(b){if(!angular.isString(b))throw"ImageType2 should be string in SidebarTitleVo";return a._sImageType2=b,a._sImage2=a._parseImageTypeToImageUrl(b),a},this.getImageType2=function(){return a._sImageType2},this.getImage2=function(){return a._sImage2},this.setTitle2=function(b){if(!angular.isString(b))throw"Title2 should be string in SidebarTitleVo";return a._sTitle2=b,a},this.getTitle2=function(){return a._sTitle2}}}])}(),function(){"use strict";pinpointApp.factory("filteredMapUtilService",["filterConfig","ServerMapFilterVoService","$window",function(a,b,c){return{mergeFilters:function(a,b){var c=[];if(a.getFilter()){var d=JSON.parse(a.getFilter());if(angular.isArray(d)){c=d;var e=this.findFilterInNavbarVo(b.getFromApplication(),b.getFromServiceType(),b.getToApplication(),b.getToServiceType(),a);if(e)return c[e.index]=b.toJson(),c}}return c.push(b.toJson()),c},mergeHints:function(a,b){var c={},d=this.parseShortHintToLongHint(JSON.parse(a.getHint())),e=b.getHint();if(d)if(e){var f=_.keys(e)[0],g=e[f];c=angular.copy(d),angular.isDefined(c[f])?(c[f]=_.union(c[f],g),c[f]=this.uniqueHintValue(c[f])):c[f]=g}else c=d;else c=b.getHint();return c},uniqueHintValue:function(a){for(var b=0;b<a.length;++b)for(var c=b+1;c<a.length;++c)a[b].rpc===a[c].rpc&&a[b].rpcServiceTypeCode===a[c].rpcServiceTypeCode&&a.splice(c--,1);return a},parseShortHintToLongHint:function(a){var b=angular.copy(a);return angular.forEach(b,function(a,c){for(var d=[],e=0;e<a.length;e+=2)d.push({rpc:a[e],rpcServiceTypeCode:a[e+1]});b[c]=d}),b},parseLongHintToShortHint:function(a){var b=angular.copy(a);return angular.forEach(b,function(a,c){var d=[];for(var e in a)d.push(a[e].rpc),d.push(a[e].rpcServiceTypeCode);b[c]=d}),b},getStartValueForFilterByLabel:function(a,b){var c=function(){for(var c in b)if(b[c].label===a)return c;return!1}(),d=0;return c>0&&(d=parseInt(b[c-1].label,10)),d},getFilteredMapUrlWithFilterVo:function(a,b,d){var e=this.mergeFilters(a,b),f=b.getMainApplication()+"@"+b.getMainServiceTypeName(),g="#/filteredMap/"+f+"/"+a.getReadablePeriod()+"/"+a.getQueryEndDateTime()+"/"+c.encodeURIComponent(JSON.stringify(e));if(a.getHint()||d.getHint()){var h=this.mergeHints(a,d),i=this.parseLongHintToShortHint(h);g+="/"+c.encodeURIComponent(JSON.stringify(i))}return g},findFilterInNavbarVo:function(a,b,c,d,e){var f=JSON.parse(e.getFilter()),g=!1;return"USER"===b&&(a="USER"),angular.isArray(f)&&angular.forEach(f,function(e,f){var h=new ServerMapFilterVo(e);a===h.getFromApplication()&&c===h.getToApplication()&&b===h.getFromServiceType()&&d===h.getToServiceType()&&(g={oServerMapFilterVoService:h,index:f})}),g},doFiltersHaveUnknownNode:function(a){for(var b in a)if("UNKNOWN"===a[b].tst)return!0;return!1}}}])}(),function(){"use strict";pinpointApp.constant("filterConfig",{FILTER_DELIMETER:"^",FILTER_ENTRY_DELIMETER:"|"})}(),function(){"use strict";pinpointApp.factory("ServerMapFilterVoService",[function(){return function(a){var b=this;this._sMainApplication=null,this._nMainServiceTypeCode=null,this._sMainServiceTypeName=null,this._sFromApplication=null,this._sFromServiceType=null,this._sFromAgentName=null,this._sToApplication=null,this._sToServiceType=null,this._sToAgentName=null,this._sResponseFrom=0,this._sResponseTo="max",this._bIncludeException=null,this._sRequestUrlPattern="", this.setMainApplication=function(a){if(!angular.isString(a))throw new Error("mainApplication should be string in ServerMapFilterVo. : ",a);return b._sMainApplication=a,b},this.getMainApplication=function(){return b._sMainApplication},this.setMainServiceTypeCode=function(a){if(!angular.isNumber(a))throw new Error("mainServiceTypeCode should be number in ServerMapFilterVo. : ",a);return b._nMainServiceTypeCode=a,b},this.setMainServiceTypeName=function(a){if(!angular.isString(a))throw new Error("mainServiceTypeName should be string in ServerMapFilterVo. : ",a);return b._sMainServiceTypeName=a,b},this.getMainServiceTypeCode=function(){return b._nMainServiceTypeCode},this.getMainServiceTypeName=function(){return b._sMainServiceTypeName},this.setFromApplication=function(a){if(!angular.isString(a))throw new Error("fromApplication should be string in ServerMapFilterVo. : ",a);return b._sFromApplication=a,b},this.getFromApplication=function(){return b._sFromApplication},this.setFromServiceType=function(a){if(!angular.isString(a))throw new Error("fromServiceType should be string in ServerMapFilterVo. : ",a);return b._sFromServiceType=a,b},this.getFromServiceType=function(){return b._sFromServiceType},this.setFromAgentName=function(a){if(!angular.isString(a))throw new Error("fromAgentName should be string in ServerMapFilterVo. : ",a);return b._sFromAgentName=a,b},this.getFromAgentName=function(){return b._sFromAgentName},this.setToApplication=function(a){if(!angular.isString(a))throw new Error("toApplication should be string in ServerMapFilterVo. : ",a);return b._sToApplication=a,b},this.getToApplication=function(){return b._sToApplication},this.setToServiceType=function(a){if(!angular.isString(a))throw new Error("toServiceType should be string in ServerMapFilterVo. : ",a);return b._sToServiceType=a,b},this.getToServiceType=function(){return b._sToServiceType},this.setToAgentName=function(a){if(!angular.isString(a))throw new Error("toAgentName should be string in ServerMapFilterVo. : ",a);return b._sToAgentName=a,b},this.getToAgentName=function(){return b._sToAgentName},this.setResponseFrom=function(a){if(angular.isString(a))b._sResponseFrom=parseInt(a,10);else{if(!angular.isNumber(a))throw new Error("responseFrom should be string in ServerMapFilterVo. : ",a);b._sResponseFrom=a}return b},this.getResponseFrom=function(){return b._sResponseFrom},this.setResponseTo=function(a){if("max"===a)b._sResponseTo="max";else{if(!angular.isNumber(a)&&!angular.isString(a))throw new Error("responseTo should be string in ServerMapFilterVo.");a=parseInt(a,10),a>=3e4?b._sResponseTo="max":b._sResponseTo=a}return b},this.getResponseTo=function(){return b._sResponseTo},this.setIncludeException=function(a){if(!angular.isDefined(a))throw new Error("includeException should be defined in ServerMapFilterVo.");return b._bIncludeException=a,b},this.getIncludeException=function(){return b._bIncludeException},this.setRequestUrlPattern=function(a){return angular.isString(a)&&(b._sRequestUrlPattern=a),b},this.getRequestUrlPattern=function(){return b._sRequestUrlPattern},this.toJson=function(){var a={fa:b._sFromApplication,fst:b._sFromServiceType,ta:b._sToApplication,tst:b._sToServiceType,ie:b._bIncludeException};return 0===b._sResponseFrom&&"max"===b._sResponseTo||(a.rf=b._sResponseFrom,a.rt=b._sResponseTo),b._sRequestUrlPattern&&(a.url=b._sRequestUrlPattern),b._sFromAgentName&&(a.fan=b._sFromAgentName),b._sToAgentName&&(a.tan=b._sToAgentName),a},a&&angular.isObject(a)&&(this.setFromApplication(a.fa).setFromServiceType(a.fst).setToApplication(a.ta).setToServiceType(a.tst).setIncludeException(a.ie),angular.isNumber(a.rf)&&a.rt&&this.setResponseFrom(a.rf).setResponseTo(a.rt),a.url&&this.setRequestUrlPattern(a.url),a.fan&&this.setFromAgentName(a.fan),a.tan&&this.setToAgentName(a.tan))}}])}(),function(a){"use strict";pinpointApp.constant("AlarmAjaxServiceConfig",{group:"/userGroup.pinpoint",groupMember:"/userGroup/member.pinpoint",pinpointUser:"/user.pinpoint",alarmRule:"/alarmRule.pinpoint",alarmRuleSet:"/alarmRule/checker.pinpoint"}),pinpointApp.service("AlarmAjaxService",["AlarmAjaxServiceConfig","$http",function(b,c){function d(a,b,d){c.post(a,b).then(function(a){d(a.data)},function(a){d(a)})}function e(a,b,d){c.put(a,b).then(function(a){d(a.data)},function(a){d(a)})}function f(b,c,d){a.ajax(b,{type:"DELETE",data:JSON.stringify(c),contentType:"application/json"}).done(function(a){d(a)}).fail(function(a){d(a)})}function g(a,b,d){var e="?";for(var f in b)e+=("?"==e?"":"&")+f+"="+b[f];c.get(a+e).then(function(a){d(a.data)},function(a){d(a)})}this.getUserGroupList=function(a,c){g(b.group,a,c)},this.createUserGroup=function(a,c){d(b.group,a,c)},this.updateUserGroup=function(a,c){e(b.group,a,c)},this.removeUserGroup=function(a,c){f(b.group,a,c)},this.addMemberInGroup=function(a,c){d(b.groupMember,a,c)},this.getGroupMemberListInGroup=function(a,c){g(b.groupMember,a,c)},this.removeMemberInGroup=function(a,c){f(b.groupMember,a,c)},this.getPinpointUserList=function(a,c){g(b.pinpointUser,a,c)},this.createPinpointUser=function(a,c){d(b.pinpointUser,a,c)},this.updatePinpointUser=function(a,c){e(b.pinpointUser,a,c)},this.removePinpointUser=function(a,c){f(b.pinpointUser,a,c)},this.getRuleList=function(a,c){g(b.alarmRule,a,c)},this.createRule=function(a,c){d(b.alarmRule,a,c)},this.updateRule=function(a,c){e(b.alarmRule,a,c)},this.removeRule=function(a,c){f(b.alarmRule,a,c)},this.getRuleSet=function(a,c){g(b.alarmRuleSet,a,c)}}])}(jQuery),function(a){"use strict";pinpointApp.constant("AlarmUtilServiceConfig",{hideClass:"hide-me",hasNotEditClass:"has-not-edit"}),pinpointApp.service("AlarmUtilService",["AlarmUtilServiceConfig","AlarmAjaxService",function(a,b){var c=this;this.show=function(b){b.removeClass(a.hideClass)},this.hide=function(){for(var b=0;b<arguments.length;b++)arguments[b].addClass(a.hideClass)},this.showLoading=function(b,c){b[c?"removeClass":"addClass"](a.hasNotEditClass),b.removeClass(a.hideClass)},this.showAlert=function(b,c){b.find(".message").html(c).end().removeClass(a.hideClass).animate({height:300},500,function(){})},this.sendCRUD=function(a,d,e,f,g){b[a](d,function(a){a.errorCode||a.status?(c.showAlert(g,a.errorMessage||a.statusText),f(a)):e(a)})},this.setTotal=function(a,b){a.html("("+b+")")},this.setFilterBackground=function(a){a.css("background-color","#FFFFF1")},this.unsetFilterBackground=function(a){a.css("background-color","#FFF")},this.hasDuplicateItem=function(a,b){for(var c=(a.length,!1),d=0;d<a.length;d++)if(b(a[d])){c=!0;break}return c},this.closeAlert=function(a,b){a.animate({height:50},100,function(){c.hide(a,b)})},this.extractID=function(a){return a.prop("id").split("_")[1]}}])}(jQuery),function(a){"use strict";pinpointApp.service("AlarmBroadcastService",["$rootScope",function(a){var b=this;this.sendInit=function(a,c){b.sendReloadWithUserGroupID(a,c)},this.sendLoadPinpointUser=function(b){a.$broadcast("alarmPinpointUser.configuration.load",b)},this.sendReloadWithUserGroupID=function(b,c){a.$broadcast("alarmGroupMember.configuration.load",b,c),a.$broadcast("alarmRule.configuration.load",b)},this.sendSelectionEmpty=function(){a.$broadcast("alarmGroupMember.configuration.selectNone"),a.$broadcast("alarmRule.configuration.selectNone")},this.sendCallbackAddedUser=function(b){a.$broadcast("alarmPinpointUser.configuration.addUserCallback",b)},this.sendUserAdd=function(b){a.$broadcast("alarmGroupMember.configuration.addUser",b)},this.sendUserUpdated=function(b){a.$broadcast("alarmGroupMember.configuration.updateUser",b)},this.sendUserRemoved=function(b){a.$broadcast("alarmGroupMember.configuration.removeUser",b)}}])}(jQuery),function(){"use strict";pinpointApp.factory("ServerMapHintVoService",[function(){return function(a){var b=this;this._sApplicationName=!1,this._aHint=!1,this.setHint=function(a,b){this._sApplicationName=a,this._aHint=b},this.getHint=function(){var a={};return b._sApplicationName?a[b._sApplicationName]=b._aHint:a=!1,a}}}])}(),function(){"use strict";pinpointApp.factory("isVisibleService",function(){function a(d,e,f,g,h,i,j,k){var l=d.parentNode,m=2;if(!c(d))return!1;if(e||0===e||(e=99999),9===l.nodeType)return!0;if("0"===b(d,"opacity")||"none"===b(d,"display")||"hidden"===b(d,"visibility"))return!1;if("undefined"!=typeof f&&"undefined"!=typeof g&&"undefined"!=typeof h&&"undefined"!=typeof i&&"undefined"!=typeof j&&"undefined"!=typeof k||(f=d.offsetTop,i=d.offsetLeft,h=f+d.offsetHeight,g=i+d.offsetWidth,j=d.offsetWidth,k=d.offsetHeight),l){var n=b(l,"overflow");return("hidden"===n||"scroll"===n||"auto"===n)&&e>0&&(e-=1,i+m>l.offsetWidth+l.scrollLeft||i+j-m<l.scrollLeft||f+m>l.offsetHeight+l.scrollTop||f+k-m<l.scrollTop)?!1:(d.offsetParent===l&&(i+=l.offsetLeft,f+=l.offsetTop),a(l,e,f,g,h,i,j,k))}return!0}function b(a,b){return window.getComputedStyle?document.defaultView.getComputedStyle(a,null)[b]:a.currentStyle?a.currentStyle[b]:void 0}function c(a){for(;a=a.parentNode;)if(a==document)return!0;return!1}return a})}(),function(){"use strict";pinpointApp.factory("UserLocalesService",["$window",function(a){var b="en",c=a.navigator.userLanguage||a.navigator.language;return c="string"===$.type(c)&&c.length>=2?c.substring(0,2):b,{userLocale:c,defaultLocale:b}}])}(),function(){"use strict";var a={configuration:{general:{warning:"(User configuration is stored in browser cache. Server-side storage will be supported in a future release.)",empty:"Favorite list empty"},alarmRules:{mainStyle:"",title:"Alarm Rule Type",desc:"The following types of alarm rules are supported by Pinpoint.",category:[{title:"[Type]",items:[{name:"SLOW COUNT",desc:"Sends an alarm when the number of slow requests sent by the application exceeds the configured threshold."},{name:"SLOW RATE",desc:"Sends an alarm when the percentage(%) of slow requests sent by the application exceeds the configured threshold."},{name:"ERROR COUNT",desc:"Sends an alarm when the number of failed requests sent by the application exceeds the configured threshold."},{name:"ERROR RATE",desc:"Sends an alarm when the percentage(%) of failed requests sent by the application exceeds the configured threshold."},{name:"TOTAL COUNT",desc:"Sends an alarm when the number of all requests sent by the application exceeds the configured threshold."},{name:"SLOW COUNT TO CALLEE",desc:"Sends an alarm when the number of slow responses returned by the application exceeds the configured threshold."},{name:"SLOW RATE TO CALLEE",desc:"Sends an alarm when the percentage(%) of slow responses returned by the application exceeds the configured threshold."},{name:"ERROR COUNT TO CALLEE",desc:"Sends an alarm when the number of failed responses returned by the application exceeds the configured threshold."},{name:"ERROR RATE TO CALLEE",desc:"Sends an alarm when the percentage(%) of failed responses returned by the application exceeds the configured threshold."},{name:"TOTAL COUNT TO CALLEE",desc:"Sends an alarm when the number of all remote calls sent to the application exceeds the configured threshold."},{name:"HEAP USAGE RATE",desc:"Sends an alarm when the application's heap usage(%) exceeds the configured threshold."},{name:"JVM CPU USAGE RATE",desc:"Sends an alarm when the application's CPU usage(%) exceeds the configured threshold."}]}]}},navbar:{searchPeriod:{guide:"Search duration may not be greater than {{day}} days."},applicationSelector:{mainStyle:"",title:"Application List",desc:"Shows the list of applications with Pinpoint installed.",category:[{title:"[Legend]",items:[{name:"Icon",desc:"Application Type"},{name:"Text",desc:"Application Name. The value set using <code>-Dpinpoint.applicationName</code> when launching Pinpoint agent."}]}]},depth:{mainStyle:"",title:'<img src="images/inbound.png" width="22px" height="22px" style="margin-top:-4px;"> Inbound 와 <img src="images/outbound.png" width="22px" height="22px" style="margin-top:-4px"> Outbound',desc:"Search-depth of server map.",category:[{title:"[범례]",items:[{name:"Inbound",desc:"Number of depth to render for requests coming in to the selected node."},{name:"Outbound",desc:"Number of depth to render for requests going out from the selected node"}]}]},periodSelector:{mainStyle:"",title:"Period Selector",desc:"Selects the time period for querying data.",category:[{title:"[Usage]",items:[{name:"<button type='button' class='btn btn-success btn-xs'><span class='glyphicon glyphicon-th-list'></span></button>",desc:"Query for data traced during the most recent selected time-period.<br/>Auto-refresh is supported for 5m, 10m, 3h time-period."},{name:"<button type='button' class='btn btn-success btn-xs'><span class='glyphicon glyphicon-calendar'></span></button>",desc:"Query for data traced between the two selected times for a maximum of 48 hours."}]}]}},servermap:{"default":{mainStyle:"width:560px;",title:"Server Map",desc:"Displays a topological view of the distributed server map.",category:[{title:"[Node]",list:["Each node is a logical unit of application.","The value on the top-right corner represents the number of server instances assigned to that application. (Not shown when there is only one such instance)","An alarm icon is displayed on the top-left corner if an error/exception is detected in one of the server instances.","Clicking a node shows information on all incoming transactions on the right-hand side of the screen."]},{title:"[Arrow]",list:["Each arrow represents a transaction flow.","The number shows the transaction count and is displayed in red for transactions with error.","<span class='glyphicon glyphicon-filter' style='color:green;'></span> is shown when a filter is applied.","Clicking an arrow shows information on all transactions passing through the selected section on the right-hand side of the screen."]},{title:"[Applying Filter]",list:["Right-clicking on an arrow displays a filter menu.","'Filter' filters the server map to only show transactions that has passed through the selected section.","'Filter Wizard' allows additional filter configurations."]},{title:"[Chart Configuration]",list:["Right-clicking on an empty area displays a chart configuration menu.","Node Setting / Merge Unknown : Groups all agent-less applications into a single node.","Double-clicking on an empty resets the zoom level of the server map."]}]}},realtime:{"default":{mainStyle:"",title:"Realtime Active Thread Chart",desc:"Shows the Active Thread count of each agent in realtime.",category:[{title:"[Error Messages]",items:[{name:"UNSUPPORTED VERSION",desc:"Agent version too old. (Please upgrade the agent to 1.5.0+)",nameStyle:"width:120px;border-bottom:1px solid gray",descStyle:"border-bottom:1px solid gray"},{name:"CLUSTER OPTION NOTSET",desc:"Option disabled by agent. (Please set profiler.pinpoint.activethread to true in profiler.config)",nameStyle:"width:120px;border-bottom:1px solid gray",descStyle:"border-bottom:1px solid gray"},{name:"TIMEOUT",desc:"Agent connection timed out receiving active thread count. Please contact the administrator if problem persists.",nameStyle:"width:120px;border-bottom:1px solid gray",descStyle:"border-bottom:1px solid gray"},{name:"NOT FOUND",desc:"Agent not found. (If you get this message while the agent is running, please set profiler.tcpdatasender.command.accept.enable to true in profiler.config)",nameStyle:"width:120px;border-bottom:1px solid gray",descStyle:"border-bottom:1px solid gray"},{name:"CLUSTER CHANNEL CLOSED",desc:"Agent session expired.",nameStyle:"width:120px;border-bottom:1px solid gray",descStyle:"border-bottom:1px solid gray"},{name:"PINPOINT INTERNAL ERROR",desc:"Pinpoint internal error. Please contact the administrator.",nameStyle:"width:120px;border-bottom:1px solid gray",descStyle:"border-bottom:1px solid gray"},{name:"No Active Thread",desc:"The agent has no threads that are currently active.",nameStyle:"width:120px;border-bottom:1px solid gray",descStyle:"border-bottom:1px solid gray"},{name:"No Response",desc:"No response from Pinpoint Web. Please contact the administrator.",nameStyle:"width:120px;border-bottom:1px solid gray",descStyle:"border-bottom:1px solid gray"}]}]}},scatter:{"default":{mainStyle:"",title:"Response Time Scatter Chart",desc:"",category:[{title:"[Legend]",items:[{name:"<span class='glyphicon glyphicon-stop' style='color:#2ca02c'></span>",desc:"Successful Transaction"},{name:"<span class='glyphicon glyphicon-stop' style='color:#f53034'></span>",desc:"Failed Transaction"},{name:"X-axis",desc:"Transaction Timestamp (hh:mm)"},{name:"Y-axis",desc:"Response Time (ms)"}]},{title:"[Usage]",image:"<img src='/images/help/scatter_01.png' width='200px' height='125px'>",items:[{name:"<span class='glyphicon glyphicon-plus'></span>",desc:"Drag on the scatter chart to show detailed information on selected transactions."},{name:"<span class='glyphicon glyphicon-cog'></span>",desc:"Set the min/max value of the Y-axis (Response Time)."},{name:"<span class='glyphicon glyphicon-download-alt'></span>",desc:"Download the chart as an image file."},{name:"<span class='glyphicon glyphicon-fullscreen'></span>",desc:"Open the chart in a new window."}]}]}},nodeInfoDetails:{responseSummary:{mainStyle:"",title:"Response Summary Chart",desc:"",category:[{title:"[Legend]",items:[{name:"X-Axis",desc:"Response Time"},{name:"Y-Axis",desc:"Transaction Count"},{name:"<spanstyle='color:#2ca02c'>1s</span>",desc:"No. of Successful transactions (less than 1 second)"},{name:"<span style='color:#3c81fa'>3s</span>",desc:"No. of Successful transactions (1 ~ 3 seconds)"},{name:"<span style='color:#f8c731'>5s</span>",desc:"No. of Successful transactions (3 ~ 5 seconds)"},{name:"<span style='color:#f69124'>Slow</span>",desc:"No. of Successful transactions (greater than 5 seconds)"},{name:"<span style='color:#f53034'>Error</span>",desc:"No. of Failed transactions regardless of response time"}]}]},load:{mainStyle:"",title:"Load Chart",desc:"",category:[{title:"[Legend]",items:[{name:"X-Axis",desc:"Transaction Timestamp (in minutes)"},{name:"Y-Axis",desc:"Transaction Count"},{name:"<spanstyle='color:#2ca02c'>1s</span>",desc:"No. of Successful transactions (less than 1 second)"},{name:"<span style='color:#3c81fa'>3s</span>",desc:"No. of Successful transactions (1 ~ 3 seconds)"},{name:"<span style='color:#f8c731'>5s</span>",desc:"No. of Successful transactions (3 ~ 5 seconds)"},{name:"<span style='color:#f69124'>Slow</span>",desc:"No. of Successful transactions (greater than 5 seconds)"},{name:"<span style='color:#f53034'>Error</span>",desc:"No. of Failed transactions regardless of response time"}]},{title:"[Usage]",list:["Clicking on a legend item shows/hides all transactions within the selected group.","Dragging on the chart zooms in to the dragged area."]}]},nodeServers:{mainStyle:"width:400px;",title:"Server Information",desc:"List of physical servers and their server instances.",category:[{title:"[Legend]",items:[{name:"<span class='glyphicon glyphicon-home'></span>",desc:"Hostname of the physical server"},{name:"<span class='glyphicon glyphicon-hdd'></span>",desc:"AgentId of the Pinpoint agent installed on the server instance running on the physical server"}]},{title:"[Usage]",items:[{name:"<button type='button' class='btn btn-default btn-xs'>Inspector</button>",desc:"Open a new window with detailed information on the WAS with Pinpoint installed."},{name:"<span class='glyphicon glyphicon-record' style='color:#3B99FC'></span>",desc:"Display statistics on transactions carried out by the server instance."},{name:"<span class='glyphicon glyphicon-hdd' style='color:red'></span>",desc:"Display statistics on transactions (with error) carried out by the server instance."}]}]},unknownList:{mainStyle:"",title:"UnknownList",desc:"From the chart's top-right icon",category:[{title:"[Usage]",items:[{name:"1st",desc:"Toggle between Response Summary Chart / Load Chart"},{name:"2nd",desc:"Show Node Details"}]}]},searchAndOrder:{mainStyle:"",title:"Search and Fliter",desc:"Filter by server name or total count.Clicking Name or Count sorts the list in ascending/descending order."}},linkInfoDetails:{responseSummary:{mainStyle:"",title:"Response Summary Chart",desc:"",category:[{title:"[Legend]",items:[{name:"X-Axis",desc:"Response Time"},{name:"Y-Axis",desc:"Transaction Count"},{name:"<spanstyle='color:#2ca02c'>1s</span>",desc:"No. of Successful transactions (less than 1 second)"},{name:"<span style='color:#3c81fa'>3s</span>",desc:"No. of Successful transactions (1 ~ 3 seconds)"},{name:"<span style='color:#f8c731'>5s</span>",desc:"No. of Successful transactions (3 ~ 5 seconds)"},{name:"<span style='color:#f69124'>Slow</span>",desc:"No. of Successful transactions (greater than 5 seconds)"},{name:"<span style='color:#f53034'>Error</span>",desc:"No. of Failed transactions regardless of response time"}]},{title:"[Usage]",list:["Click on the bar to query for transactions within the selected response time."]}]},load:{mainStyle:"",title:"Load Chart",desc:"",category:[{title:"[Legend]",items:[{name:"X-Axis",desc:"Transaction Timestamp (in minutes)"},{name:"Y-Axis",desc:"Transaction Count"},{name:"<spanstyle='color:#2ca02c'>1s</span>",desc:"No. of Successful transactions (less than 1 second)"},{name:"<span style='color:#3c81fa'>3s</span>",desc:"No. of Successful transactions (1 ~ 3 seconds)"},{name:"<span style='color:#f8c731'>5s</span>",desc:"No. of Successful transactions (3 ~ 5 seconds)"},{name:"<span style='color:#f69124'>Slow</span>",desc:"No. of Successful transactions (greater than 5 seconds)"},{name:"<span style='color:#f53034'>Error</span>",desc:"No. of Failed transactions regardless of response time"}]},{title:"[Usage]",list:["Clicking on a legend item shows/hides all transactions within the selected group.","Dragging on the chart zooms in to the dragged area."]}]},linkServers:{mainStyle:"width:350px;",title:"Server Information",desc:"List of physical servers and their server instances.",category:[{title:"[Legend]",items:[{name:"<span class='glyphicon glyphicon-home'></span>",desc:"Hostname of the physical server"},{name:"<span class='glyphicon glyphicon-hdd'></span>",desc:"AgentId of the Pinpoint agent installed on the server instance running on the physical server"}]},{title:"[Usage]",items:[{name:"<button type='button' class='btn btn-default btn-xs'>Inspector</button>",desc:"Open a new window with detailed information on the WAS with Pinpoint installed."},{name:"<button type='button' class='btn btn-default btn-xs'><span class='glyphicon glyphicon-plus'></span></button>",desc:"Display statistics on transactions carried out by the server instance."},{name:"<button type='button' class='btn btn-danger btn-xs'><span class='glyphicon glyphicon-plus'></span></button>",desc:"Display statistics on transactions (with error) carried out by the server instance."}]}]},unknownList:{mainStyle:"",title:"UnknownList",desc:"From the chart's top-right icon,",category:[{title:"[Usage]",items:[{name:"1st",desc:"Toggle between Response Summary Chart"},{name:"2dn",desc:"Show Node Details"}]}]},searchAndOrder:{mainStyle:"",title:"Search and Filter",desc:"Filter by server name or total count.<br/>Clicking Name or Count sorts the list in ascending/descending order."}},inspector:{list:{mainStyle:"",title:"Agent list",desc:"List of agents registered under the current Application Name",category:[{title:"[Legend]",items:[{name:"<span class='glyphicon glyphicon-home'></span>",desc:"Hostname of the agent's machine"},{name:"<span class='glyphicon glyphicon-hdd'></span>",desc:"Agent-id of the installed agent"},{name:"<span class='glyphicon glyphicon-ok-sign' style='color:#40E340'></span>",desc:"Agent was running at the time of query"},{name:"<span class='glyphicon glyphicon-minus-sign' style='color:#F00'></span>",desc:"Agent was shutdown at the time of query"},{name:"<span class='glyphicon glyphicon-remove-sign' style='color:#AAA'></span>",desc:"Agent was disconnected at the time of query"},{name:"<span class='glyphicon glyphicon-question-sign' style='color:#AAA'></span>",desc:"Agent status was unknown at the time of query"}]}]},heap:{mainStyle:"",title:"Heap",desc:"JVM's heap information and full garbage collection times(if any)",category:[{title:"[Legend]",items:[{name:"Max",desc:"Maximum heap size"},{name:"Used",desc:"Heap currently in use"},{name:"FCG",desc:"Full garbage collection duration (number of FGCs in parenthesis if it occurred more than once)"}]}]},permGen:{mainStyle:"",title:"PermGen",desc:"JVM's PermGen information and full garbage collection times(if any)",category:[{title:"[Legend]",items:[{name:"Max",desc:"Maximum heap size"},{name:"Used",desc:"Heap currently in use"},{name:"FCG",desc:"Full garbage collection duration (number of FGCs in parenthesis if it occurred more than once)"}]}]},cpuUsage:{mainStyle:"",title:"Cpu Usage",desc:"JVM/System's CPU Usage - For multi-core CPUs, displays the average CPU usage of all the cores",category:[{title:"[Legend]",items:[{name:"Java 1.6",desc:"Only the JVM's CPU usage is collected"},{name:"Java 1.7+",desc:"Both the JVM's and the system's CPU usage are collected"}]}]},tps:{mainStyle:"",title:"TPS",desc:"Transactions per second received by the server",category:[{title:"[Legend]",items:[{name:"Sampled New (S.N)",desc:"Profiled transactions that started from the current agent"},{name:"Sampled Continuation (S.C)",desc:"Profiled transactions that started from another agent"},{name:"Unsampled New (U.N)",desc:"Unprofiled transactions that started from the current agent"},{name:"Unsampled Continuation (U.C)",desc:"Unprofiled transactions that started from another agent"},{name:"Total",desc:"All transactions"}]}]},wrongApp:["<div style='font-size:12px'>The agent is currently registered under {{application2}} due to the following:<br>","1. The agent has moved from {{application1}} to {{application2}}<br>","2. A different agent with the same agent id has been registered to {{application2}}<hr>","For the former case, you should delete the mapping between {{application1}} and {{agentId}}.<br>","For the latter case, the agent id of the duplicate agent must be changed.</div>"].join("")},callTree:{column:{mainStyle:"",title:"Call Tree",desc:"",category:[{title:"[Column]",items:[{name:"Gap",desc:"Time elapsed between the start of the previous method and entry of this method"},{name:"Exec",desc:"The overall duration of the method call from method entry until method exit"},{name:"Exec(%)",desc:"<img src='/images/help/callTree_01.png'/>"},{name:"",desc:"<span style='background-color:#FFFFFF;color:#5bc0de'>Light blue</span> The execution time of the method call as a percentage of the total execution time of the transaction"},{name:"",desc:"<span style='background-color:#FFFFFF;color:#4343C8'>Dark blue</span> A percentage of the self execution time"},{name:"Self",desc:"The time that was used for execution of this method only, excluding time consumed in nested methods call"}]}]}},transactionTable:{log:{}},transactionList:{openError:{noParent:"Scatter data of parent window had been changed.\r\nso can't scan the data any more.",noData:"There is no {{application}} scatter data in parent window."}}};pinpointApp.constant("helpContent-en",a)}(),function(){"use strict";var a={configuration:{general:{warning:"(설정 정보는 브라우저 캐쉬에 저장합니다. 서버 측 저장은 추후 지원 할 예정입니다.)",empty:"등록된 목록이 없습니다."},alarmRules:{mainStyle:"",title:"알람 룰의 종류",desc:"Pinpoint에서 지원하는 Alarm rule의 종류는 아래와 같습니다.",category:[{title:"[항목]",items:[{name:"SLOW COUNT",desc:"application 내에서 외부서버를 호출한 요청 중 slow 호출의 개수가 임계치를 초과한 경우 알람이 전송된다."},{name:"SLOW RATE",desc:"application 내에서 외부서버를 호출한 요청 중 slow 호출의 비율(%)이 임계치를 초과한 경우 알람이 전송된다."},{name:"ERROR COUNT",desc:"application 내에서 외부서버를 호출한 요청 중 error 가 발생한 호출의 개수가 임계치를 초과한 경우 알람이 전송된다."},{name:"ERROR RATE",desc:"application 내에서 외부서버를 호출한 요청 중 error 가 발생한 호출의 비율이 임계치를 초과한 경우 알람이 전송된다."},{name:"TOTAL COUNT",desc:"application 내에서 외부서버를 호출한 요청의 개수가 임계치를 초과한 경우 알람이 전송된다."},{name:"SLOW COUNT TO CALLEE",desc:"외부에서 application을 호출한 요청 중에 외부서버로 응답을 늦게 준 요청의 개수가 임계치를 초과한 경우 알람이 전송된다."},{name:"SLOW RATE TO CALLEE",desc:"외부에서 application을 호출한 요청 중에 외부서버로 응답을 늦게 준 요청의 비율(%)이 임계치를 초과한 경우 알람이 전송된다."},{name:"ERROR COUNT TO CALLEE",desc:"외부에서 application을 호출한 요청 중에 에러가 발생한 요청의 개수가 임계치를 초과한 경우 알람이 전송된다."},{name:"ERROR RATE TO CALLEE",desc:"외부에서 application을 호출한 요청 중에 에러가 발생한 요청의 비율(%)이 임계치를 초과한 경우 알람이 전송된다."},{name:"TOTAL COUNT TO CALLEE",desc:"외부에서 application을 호출한 요청 개수가 임계치를 초과한 경우 알람이 전송된다."},{name:"HEAP USAGE RATE",desc:"heap의 사용률이 임계치를 초과한 경우 알람이 전송된다."},{name:"JVM CPU USAGE RATE",desc:"applicaiton의 CPU 사용률이 임계치를 초과한 경우 알람이 전송된다."}]}]}},navbar:{searchPeriod:{guide:"한번에 검색 할 수 있는 최대 기간은 {{day}}일 입니다."},applicationSelector:{mainStyle:"",title:"응용프로그램 목록",desc:"핀포인트가 설치된 응용프로그램 목록입니다.",category:[{title:"[범례]",items:[{name:"아이콘",desc:"응용프로그램의 종류"},{name:"텍스트",desc:"응용프로그램의 이름입니다. Pinpoint agent 설정에서 applicationName에 지정한 값입니다."}]}]},depth:{mainStyle:"",title:'<img src="images/inbound.png" width="22px" height="22px" style="margin-top:-4px;"> Inbound 와 <img src="images/outbound.png" width="22px" height="22px" style="margin-top:-4px"> Outbound',desc:"서버맵의 탐색 깊이를 설정합니다.",category:[{title:"[범례]",items:[{name:"Inbound",desc:"선택된 노드를 기준으로 들어오는 탐색깊이"},{name:"Outbound",desc:"선택된 노드를 기준으로 나가는 탐색 깊이"}]}]},periodSelector:{mainStyle:"",title:"조회 시간 설정",desc:"데이터 조회 시간을 선택합니다.",category:[{title:"[범례]",items:[{name:"<button type='button' class='btn btn-success btn-xs'><span class='glyphicon glyphicon-th-list'></span></button>",desc:"현재 시간을 기준으로 선택한 시간 이전부터 현재시간 사이에 수집된 데이터를 조회합니다.<br/>최근 5m, 10m, 3h조회는 자동 새로고침 기능을 지원합니다."},{name:"<button type='button' class='btn btn-success btn-xs'><span class='glyphicon glyphicon-calendar'></span></button>",desc:"지정된 시간 사이에 수집된 데이터를 조회합니다. 조회 시간은 분단위로 최대 48시간을 지정할 수 있습니다."}]}]}},servermap:{"default":{mainStyle:"width:560px;",title:"서버맵",desc:"분산된 서버를 도식화 한 지도 입니다.",category:[{title:"[박스]",list:["박스는 응용프로그램 그룹을 나타냅니다.","우측의 숫자는 응용프로그램 그룹에 속한 서버 인스턴스의 개수입니다.(한 개일 때에는 숫자를 보여주지 않습니다.)","좌측 빨간 알람은 임계값을 초과한 모니터링 항목이 있을 때 나타납니다."]},{title:"[화살표]",list:["화살표는 트랜잭션의 흐름을 나타냅니다.","화살표의 숫자는 호출 수 입니다. 임계치 이상의 에러를 포함하면 빨간색으로 보여집니다.","<span class='glyphicon glyphicon-filter' style='color:green;'></span> : 필터가 적용되면 아이콘이 표시됩니다."]},{title:"[박스의 기능]",list:["박스를 선택하면 어플리케이션으로 유입된 트랜잭션 정보를 화면 우측에 보여줍니다."]},{title:"[화살표의 기능]",list:["화살표를 선택하여 선택된 구간을 통과하는 트랜잭션의 정보를 화면 우측에 보여줍니다.","Context menu의 Filter는 선택된 구간을 통과하는 트랜잭션만 모아서 보여줍니다.","Filter wizard는 보다 상세한 필터 설정을 할 수 있습니다.","필터가 적용되면 화살표에 <span class='glyphicon glyphicon-filter' style='color:green;'></span>아이콘이 표시됩니다."]},{title:"[차트 설정]",list:["비어있는 부분을 마우스 오른쪽 클릭하여 context menu를 열면 차트 설정메뉴가 보입니다.","Node Setting / Merge Unknown : agent가 설치되어있지 않은 응용프로그램을 하나의 박스로 보여줍니다.","비어있는 부분 더블클릭 : 줌을 초기화 합니다."]}]}},realtime:{"default":{mainStyle:"",title:"Realtime Active Thread Chart",desc:"각 Agent의 Active Thread 갯수를 실시간으로 보여줍니다.",category:[{title:"[에러 메시지 설명]",items:[{name:"UNSUPPORTED VERSION",desc:"해당 에이전트의 버전에서는 지원하지 않는 기능입니다. (1.5.0 이상 버전으로 업그레이드하세요.)",nameStyle:"width:120px;border-bottom:1px solid gray",descStyle:"border-bottom:1px solid gray"},{name:"CLUSTER OPTION NOTSET",desc:"해당 에이전트의 설정에서 기능이 비활성화되어 있습니다. (pinpoint 설정 파일에서 profiler.pinpoint.activethread 항목을 true로 변경하세요.)",nameStyle:"width:120px;border-bottom:1px solid gray",descStyle:"border-bottom:1px solid gray"},{name:"TIMEOUT",desc:"해당 에이전트에서 일시적으로 활성화 된 스레드 개수를 받지 못하였습니다.(오래 지속될 경우 담당자에게 문의하세요.)",nameStyle:"width:120px;border-bottom:1px solid gray",descStyle:"border-bottom:1px solid gray"},{name:"NOT FOUND",desc:"해당 에이전트를 찾을 수 없습니다.(에이전트가 활성화 되어 있는 경우에 해당 메시지가 발생한다면 pinpoint 설정 파일에서 profiler.tcpdatasender.command.accept.enable 항목을 true로 변경하세요.)",nameStyle:"width:120px;border-bottom:1px solid gray",descStyle:"border-bottom:1px solid gray"},{name:"CLUSTER CHANNEL CLOSED",desc:"해당 에이전트와의 세션이 종료되었습니다.",nameStyle:"width:120px;border-bottom:1px solid gray",descStyle:"border-bottom:1px solid gray"},{name:"PINPOINT INTERNAL ERROR",desc:"핀포인트 내부 에러가 발생하였습니다.(담당자에게 문의하세요.)",nameStyle:"width:120px;border-bottom:1px solid gray",descStyle:"border-bottom:1px solid gray" },{name:"No Active Thread",desc:"현재 해당 에이전트는 활성화된 스레드가 존재하지 않습니다.",nameStyle:"width:120px;border-bottom:1px solid gray",descStyle:"border-bottom:1px solid gray"},{name:"No Response",desc:"핀포인트 웹 서버로부터의 응답을 받지 못하였습니다.(담당자에게 문의하세요.)",nameStyle:"width:120px;border-bottom:1px solid gray",descStyle:"border-bottom:1px solid gray"}]}]}},scatter:{"default":{mainStyle:"",title:"Response Time Scatter Chart",desc:"수집된 트랜잭션의 응답시간 분포도입니다.",category:[{title:"[범례]",items:[{name:"<span class='glyphicon glyphicon-stop' style='color:#2ca02c'></span>",desc:"에러가 없는 트랜잭션 (Success)"},{name:"<span class='glyphicon glyphicon-stop' style='color:#f53034'></span>",desc:"에러를 포함한 트랜잭션 (Failed)"},{name:"X축",desc:"트랜잭션이 실행된 시간 (시:분)"},{name:"Y축",desc:"트랜잭션의 응답 속도 (ms)"}]},{title:"[기능]",image:"<img src='/images/help/scatter_01.png' width='200px' height='125px'>",items:[{name:"<span class='glyphicon glyphicon-plus'></span>",desc:"마우스로 영역을 드래그하여 드래그 된 영역에 속한 트랜잭션의 상세정보를 조회할 수 있습니다."},{name:"<span class='glyphicon glyphicon-cog'></span>",desc:"에러를 포함한 트랜잭션 (Failed)"},{name:"X축",desc:"응답시간(Y축)의 최대 또는 최소값을 변경할 수 있습니다."},{name:"<span class='glyphicon glyphicon-download-alt'></span>",desc:"차트를 새창으로 크게 보여줍니다."}]}]}},nodeInfoDetails:{responseSummary:{mainStyle:"",title:"Response Summary Chart",desc:"응답결과 요약 입니다.",category:[{title:"[범례]",items:[{name:"X축",desc:"트랜잭션 응답시간 요약 단위"},{name:"Y축",desc:"트랜잭션의 개수"},{name:"<spanstyle='color:#2ca02c'>1s</span>",desc:"<span class='label label-info'>0초 <= 응답시간 < 1초</span> 에 해당하는 성공한 트랜잭션의 수"},{name:"<span style='color:#3c81fa'>3s</span>",desc:"<span class='label label-info'>1초 <= 응답시간 < 3초</span> 에 해당하는 성공한 트랜잭션의 수"},{name:"<span style='color:#f8c731'>5s</span>",desc:"<span class='label label-info'>3초 <= 응답시간 < 5초</span> 에 해당하는 성공한 트랜잭션의 수"},{name:"<span style='color:#f69124'>Slow</span>",desc:"<span class='label label-info'>5초 <= 응답시간</span> 에 해당하는 성공한 트랜잭션의 수"},{name:"<span style='color:#f53034'>Error</span>",desc:"응답시간과 무관하게 실패한 트랜잭션의 수"}]}]},load:{mainStyle:"",title:"Load Chart",desc:"시간별 트랜잭션의 응답 결과입니다.",category:[{title:"[범례]",items:[{name:"X축",desc:"트랜잭션이 실행된 시간 (분단위)"},{name:"Y축",desc:"트랜잭션의 개수"},{name:"<spanstyle='color:#2ca02c'>1s</span>",desc:"<span class='label label-info'>0초 <= 응답시간 < 1초</span> 에 해당하는 성공한 트랜잭션의 수"},{name:"<span style='color:#3c81fa'>3s</span>",desc:"<span class='label label-info'>1초 <= 응답시간 < 3초</span> 에 해당하는 성공한 트랜잭션의 수"},{name:"<span style='color:#f8c731'>5s</span>",desc:"<span class='label label-info'>3초 <= 응답시간 < 5초</span> 에 해당하는 성공한 트랜잭션의 수"},{name:"<span style='color:#f69124'>Slow</span>",desc:"<span class='label label-info'>5초 <= 응답시간</span> 에 해당하는 성공한 트랜잭션의 수"},{name:"<span style='color:#f53034'>Error</span>",desc:"응답시간과 무관하게 실패한 트랜잭션의 수"}]},{title:"[기능]",list:["범례를 클릭하여 해당 응답시간에 속한 트랜잭션을 차트에서 제외하거나 포함 할 수 있습니다.","마우스로 드래그하여 드래그 한 범위를 확대할 수 있습니다."]}]},nodeServers:{mainStyle:"width:400px;",title:"Server Information",desc:"물리서버와 해당 서버에서 동작중인 서버 인스턴스의 정보를 보여줍니다.",category:[{title:"[범례]",items:[{name:"<span class='glyphicon glyphicon-home'></span>",desc:"물리서버의 호스트이름입니다."},{name:"<span class='glyphicon glyphicon-hdd'></span>",desc:"물리서버에 설치된 서버 인스턴스에서 동작중인 Pinpoint의 agentId입니다."}]},{title:"[기능]",items:[{name:"<button type='button' class='btn btn-default btn-xs'>Inspector</button>",desc:"Pinpoint가 설치된 WAS의 상세한 정보를 보여줍니다."},{name:"<span class='glyphicon glyphicon-record' style='color:#3B99FC'></span>",desc:"해당 인스턴스에서 처리된 트랜잭션 통계를 조회할 수 있습니다."},{name:"<span class='glyphicon glyphicon-hdd' style='color:red'></span>",desc:"에러를 발생시킨 트랜잭션이 포함되어있다는 의미입니다."}]}]},unknownList:{mainStyle:"",title:"UnknownList",desc:"차트 오른쪽 상단의 아이콘부터",category:[{title:"[기능]",items:[{name:"첫번째",desc:"Response Summary"},{name:"두번째",desc:"해당 노드 상세보기"}]}]},searchAndOrder:{mainStyle:"",title:"검색과 필터링",desc:"서버 이름과 Count로 검색이 가능합니다.",category:[{title:"[기능]",items:[{name:"Name",desc:"이름을 오름/내림차순 정렬 합니다."},{name:"Count",desc:"갯수를 오름/내림차순 정렬 합니다."}]}]}},linkInfoDetails:{responseSummary:{mainStyle:"",title:"Response Summary Chart",desc:"응답결과 요약 입니다.",category:[{title:"[범례]",items:[{name:"X축",desc:"트랜잭션 응답시간 요약 단위"},{name:"Y축",desc:"트랜잭션의 개수"},{name:"<spanstyle='color:#2ca02c'>1s</span>",desc:"<span class='label label-info'>0초 <= 응답시간 < 1초</span> 에 해당하는 성공한 트랜잭션의 수"},{name:"<span style='color:#3c81fa'>3s</span>",desc:"<span class='label label-info'>1초 <= 응답시간 < 3초</span> 에 해당하는 성공한 트랜잭션의 수"},{name:"<span style='color:#f8c731'>5s</span>",desc:"<span class='label label-info'>3초 <= 응답시간 < 5초</span> 에 해당하는 성공한 트랜잭션의 수"},{name:"<span style='color:#f69124'>Slow</span>",desc:"<span class='label label-info'>5초 <= 응답시간</span> 에 해당하는 성공한 트랜잭션의 수"},{name:"<span style='color:#f53034'>Error</span>",desc:"응답시간과 무관하게 실패한 트랜잭션의 수"}]},{title:"[기능]",list:["바(bar)를 클릭하면 해당 응답시간에 속한 트랜잭션의 목록을 조회합니다."]}]},load:{mainStyle:"",title:"Load Chart",desc:"시간별 트랜잭션의 응답 결과입니다.",category:[{title:"[범례]",items:[{name:"X축",desc:"트랜잭션이 실행된 시간 (분단위)"},{name:"Y축",desc:"트랜잭션의 개수"},{name:"<spanstyle='color:#2ca02c'>1s</span>",desc:"<span class='label label-info'>0초 <= 응답시간 < 1초</span> 에 해당하는 성공한 트랜잭션의 수"},{name:"<span style='color:#3c81fa'>3s</span>",desc:"<span class='label label-info'>1초 <= 응답시간 < 3초</span> 에 해당하는 성공한 트랜잭션의 수"},{name:"<span style='color:#f8c731'>5s</span>",desc:"<span class='label label-info'>3초 <= 응답시간 < 5초</span> 에 해당하는 성공한 트랜잭션의 수"},{name:"<span style='color:#f69124'>Slow</span>",desc:"<span class='label label-info'>5초 <= 응답시간</span> 에 해당하는 성공한 트랜잭션의 수"},{name:"<span style='color:#f53034'>Error</span>",desc:"응답시간과 무관하게 실패한 트랜잭션의 수"}]},{title:"[기능]",list:["범례를 클릭하여 해당 응답시간에 속한 트랜잭션을 차트에서 제외하거나 포함 할 수 있습니다.","마우스로 드래그하여 드래그 한 범위를 확대할 수 있습니다."]}]},linkServers:{mainStyle:"width:350px;",title:"Server Information",desc:"해당 구간을 통과하는 트랜잭션을 호출한 서버 인스턴스의 정보입니다. (호출자)",category:[{title:"[범례]",items:[{name:"<span class='glyphicon glyphicon-hdd'></span>",desc:"물리서버에 설치된 서버 인스턴스에서 동작중인 Pinpoint의 agentId입니다."}]},{title:"[기능]",items:[{name:"<button type='button' class='btn btn-default btn-xs'>Inspector</button>",desc:"Pinpoint가 설치된 WAS의 상세한 정보를 보여줍니다."},{name:"<button type='button' class='btn btn-default btn-xs'><span class='glyphicon glyphicon-plus'></span></button>",desc:"해당 인스턴스에서 처리된 트랜잭션 통계를 조회할 수 있습니다."},{name:"<button type='button' class='btn btn-danger btn-xs'><span class='glyphicon glyphicon-plus'></span></button>",desc:"에러를 발생시킨 트랜잭션이 포함되어있다는 의미입니다."}]}]},unknownList:{mainStyle:"",title:"UnknownList",desc:"차트 오른쪽 상단의 아이콘부터",category:[{title:"[기능]",items:[{name:"첫번째",desc:"Response Summary"},{name:"두번째",desc:"해당 노드 상세보기"}]}]},searchAndOrder:{mainStyle:"",title:"검색과 필터링",desc:"서버 이름과 Count로 검색이 가능합니다.",category:[{title:"[기능]",items:[{name:"Name",desc:"이름을 오름/내림차순 정렬 합니다."},{name:"Count",desc:"갯수를 오름/내림차순 정렬 합니다."}]}]}},inspector:{list:{mainStyle:"",title:"Agent 리스트",desc:"현 Appliation Name에 등록된 agent 리스트입니다.",category:[{title:"[범례]",items:[{name:"<span class='glyphicon glyphicon-home'></span>",desc:"Agent가 설치된 장비의 호스트 이름"},{name:"<span class='glyphicon glyphicon-hdd'></span>",desc:"설치된 agent의 agent-id"},{name:"<span class='glyphicon glyphicon-ok-sign' style='color:#40E340'></span>",desc:"정상적으로 실행중인 agent 상태 표시"},{name:"<span class='glyphicon glyphicon-minus-sign' style='color:#F00'></span>",desc:"Shutdown 된 agent 상태 표시"},{name:"<span class='glyphicon glyphicon-remove-sign' style='color:#AAA'></span>",desc:"연결이 끊긴 agent 상태 표시"},{name:"<span class='glyphicon glyphicon-question-sign' style='color:#AAA'></span>",desc:"알수 없는 상태의 agent 상태 표시"}]}]},heap:{mainStyle:"",title:"Heap",desc:"JVM의 heap 정보와 full garbage collection 소요 시간",category:[{title:"[범례]",items:[{name:"Max",desc:"최대 heap 사이즈"},{name:"Used",desc:"현재 사용 중인 heap 사이즈"},{name:"FCG",desc:"Full garbage collection의 총 소요 시간(2번 이상 발생 시, 괄호 안에 발생 횟수 표시)"}]}]},permGen:{mainStyle:"",title:"PermGen",desc:"JVM의 PermGen 정보와 full garbage collection 소요 시간",category:[{title:"[범례]",items:[{name:"Max",desc:"최대 heap 사이즈"},{name:"Used",desc:"현재 사용 중인 heap 사이즈"},{name:"FCG",desc:"Full garbage collection의 총 소요 시간(2번 이상 발생 시, 괄호 안에 발생 횟수 표시)"}]}]},cpuUsage:{mainStyle:"",title:"Cpu Usage",desc:"JVM과 시스템의 CPU 사용량 - 멀티코어 CPU의 경우, 전체 코어 사용량의 평균입니다.",category:[{title:"[범례]",items:[{name:"Java 1.6",desc:"JVM의 CPU 사용량만 수집됩니다."},{name:"Java 1.7+",desc:"JVM과 전체 시스템의 CPU 사용량 모두 수집됩니다."}]}]},tps:{mainStyle:"",title:"TPS",desc:"서버로 인입된 초당 트랜잭션 수",category:[{title:"[범례]",items:[{name:"Sampled New (S.N)",desc:"선택된 agent에서 시작한 샘플링된 트랜잭션"},{name:"Sampled Continuation (S.C)",desc:"다른 agent에서 시작한 샘플링된 트랜잭션"},{name:"Unsampled New (U.N)",desc:"선택된 agent에서 시작한 샘플링되지 않은 트랜잭션"},{name:"Unsampled Continuation (U.C)",desc:"다른 agent에서 시작한 샘플링되지 않은 트랜잭션"},{name:"Total",desc:"모든 트랜잭션"}]}]},wrongApp:["<div style='font-size:12px'>해당 agent는 {{application1}}이 아닌 {{application2}}에 포함되어 있습니다.<br>","원인은 다음 중 하나입니다.<hr>","1. 해당 agent가 {{application1}}에서 {{application2}}으로 이동한 경우<br>","2.{{agentId}}의 agent가 {{application2}}에도 등록 된 경우<hr>","1의 경우 {{application1}}과 {{agentId}}간의 매핑 저보를 삭제해야 합니다<br>","2의 경우 중복 등록 된 agent의 id를 변경해야 합니다.</div>"].join("")},callTree:{column:{mainStyle:"",title:"Call Tree",desc:"Call Tree의 컬럼명을 설명합니다.",category:[{title:"[컬럼]",items:[{name:"Gap",desc:"이전 메소드가 시작된 후 현재 메소드를 실행하기 까지의 지연 시간"},{name:"Exec",desc:"메소드 시작부터 종료까지의 시간"},{name:"Exec(%)",desc:"<img src='/images/help/callTree_01.png'/>"},{name:"",desc:"<span style='background-color:#FFFFFF;color:#5bc0de'>옅은 파란색</span><br/>트랜잭션 전체 실행 시간 대 exec 시간의 비율"},{name:"",desc:"<span style='background-color:#FFFFFF;color:#4343C8'>진한 파란색</span><br/>self 시간 비율"},{name:"Self",desc:"메소드 자체의 시작부터 종료까지의 시간으로 하위의 메소드가 실행된 시간을 제외한 값"}]}]}},transactionTable:{log:{}},transactionList:{openError:{noParent:"부모 윈도우의 scatter chart 정보가 변경되어 더 이상 transaction 정보를 표시할 수 없습니다.",noData:"부모 윈도우에 {{application}} scatter chart 정보가 없습니다."}}};pinpointApp.constant("helpContent-ko",a)}(),function(){"use strict";var a=['<div class="pinpoint-tooltip" style="{{mainStyle}}">',"<dl>","<dt>{{{title}}}</dt>","<dd>{{{desc}}}</dd>","</dl>","{{#if category}}<hr/>{{/if}}","{{#each category}}","<p>{{title}}</p>",'{{#if image}}<p style="text-align:center">{{{image}}}</p>{{/if}}',"{{#if items}}","<table>","{{#each items}}","<tr>",'<td style="{{nameStyle}}">{{{name}}}</td>','<td style="{{descStyle}}">{{{desc}}}</td>',"</tr>","{{/each}}","</table>","{{/if}}","{{#if list}}","<ul>","{{#each list}}","<li>{{{this}}}</li>","{{/each}}","</ul>","{{/if}}","{{/each}}","</div>"].join(""),b=Handlebars.compile(a);pinpointApp.constant("helpContentTemplate",b)}(),function(){"use strict";pinpointApp.factory("helpContentService",["$window","$injector","UserLocalesService",function(a,b,c){var d="helpContent-"+c.userLocale,e="helpContent-"+c.defaultLocale;return b.has(d)?b.get(d):b.get(e)}])}(),function(a){"use strict";pinpointApp.service("AnalyticsService",["$rootScope","globalConfig",function(a,b){"undefined"!=typeof ga&&(this.send=function(a,c,d,e,f){b.sendAllowed!==!1&&(1==arguments.length?ga("send","pageview",arguments[0]):ga("send","event",a,c,d,e,f))}),this.CONST={},this.CONST.MAIN="Main",this.CONST.CONTEXT="Context",this.CONST.CALLSTACK="CallStack",this.CONST.MIXEDVIEW="MixedView",this.CONST.INSPECTOR="Inspector",this.CONST.CLK_APPLICATION="ClickApplication",this.CONST.CLK_TIME="ClickTime",this.CONST.CLK_SEARCH_NODE="ClickSearchNode",this.CONST.CLK_CLEAR_SEARCH="ClickClearSearch",this.CONST.CLK_NODE="ClickNode",this.CONST.CLK_LINK="ClickLink",this.CONST.CLK_UPDATE_TIME="ClickUpdateTime",this.CONST.CLK_HELP="ClickHelp",this.CONST.CLK_SCATTER_SETTING="ClickScatterSetting",this.CONST.CLK_DOWNLOAD_SCATTER="ClickDownloadScatter",this.CONST.CLK_RESPONSE_GRAPH="ClickResponseGraph",this.CONST.CLK_LOAD_GRAPH="ClickLoadGraph",this.CONST.CLK_SHOW_GRAPH="ClickShowGraph",this.CONST.CLK_SHOW_SERVER_LIST="ClickShowServerList",this.CONST.CLK_OPEN_INSPECTOR="ClickOpenInspector",this.CONST.CLK_FILTER_TRANSACTION="ClickFilterTransaction",this.CONST.CLK_FILTER_TRANSACTION_WIZARD="ClickFilterTransactionWizard",this.CONST.CLK_MORE="ClickMore",this.CONST.CLK_DISTRIBUTED_CALL_FLOW="ClickDistributedCallFlow",this.CONST.CLK_SERVER_MAP="ClickServerMap",this.CONST.CLK_RPC_TIMELINE="ClickRPCTimeline",this.CONST.CLK_CALL="ClickCall",this.CONST.CLK_TRANSACTION="ClickTransaction",this.CONST.CLK_HEAP="ClickHeap",this.CONST.CLK_PERM_GEN="ClickPermGen",this.CONST.CLK_CPU_LOAD="ClickCpuLoad",this.CONST.CLK_REFRESH="ClickRefresh",this.CONST.CLK_CALLEE_RANGE="ClickCalleeRange",this.CONST.CLK_CALLER_RANGE="ClickCallerRange",this.CONST.CLK_REALTIME_CHART_HIDE="ClickRealtimeChartHide",this.CONST.CLK_REALTIME_CHART_SHOW="ClickRealtimeChartShow",this.CONST.CLK_REALTIME_CHART_PIN_ON="ClickRealtimeChartPinOn",this.CONST.CLK_REALTIME_CHART_PIN_OFF="ClickRealtimeChartPinOff",this.CONST.CLK_SHOW_SERVER_TYPE_DETAIL="ClickShowServerTypeDetail",this.CONST.CLK_CHANGE_AGENT_INSPECTOR="ClickChangeAgentInspector",this.CONST.CLK_CHANGE_AGENT_MAIN="ClickChangeAgentMain",this.CONST.CLK_START_REALTIME="ClickStartRealtime",this.CONST.CLK_CONFIGURATION="ClickConfiguration",this.CONST.CLK_GENERAL="ClickConfigurationGeneral",this.CONST.CLK_ALARM="ClickConfigurationAlarm",this.CONST.CLK_HELP="ClickConfigurationHelp",this.CONST.CLK_GENERAL_SET_DEPTH="ClickGeneralSetDepth",this.CONST.CLK_GENERAL_SET_PERIOD="ClickGeneralSetPeriod",this.CONST.CLK_GENERAL_SET_FAVORITE="ClickGeneralSetFavorite",this.CONST.CLK_ALARM_CREATE_USER_GROUP="ClickAlarmCreateUserGroup",this.CONST.CLK_ALARM_REFRESH_USER_GROUP="ClickAlarmRefreshUserGroup",this.CONST.CLK_ALARM_FILTER_USER_GROUP="ClickAlarmFilterUserGroup",this.CONST.CLK_ALARM_ADD_USER="ClickAlarmAddUser",this.CONST.CLK_ALARM_REFRESH_USER="ClickAlarmRefreshUser",this.CONST.CLK_ALARM_FILTER_USER="ClickAlarmFilterUser",this.CONST.CLK_ALARM_CREATE_PINPOINT_USER="ClickAlarmCreatePinpointUser",this.CONST.CLK_ALARM_REFRESH_PINPOINT_USER="ClickAlarmRefreshPinpointUser",this.CONST.CLK_ALARM_FILTER_PINPOINT_USER="ClickAlarmFilterPinpointUser",this.CONST.CLK_ALARM_CREATE_RULE="ClickAlarmCreateUserGroup",this.CONST.CLK_ALARM_REFRESH_RULE="ClickAlarmRefreshUserGroup",this.CONST.CLK_ALARM_FILTER_RULE="ClickAlarmFilterUserGroup",this.CONST.TG_DATE="ToggleDate",this.CONST.TG_UPDATE_ON="ToggleUpdateOn",this.CONST.TG_UPDATE_OFF="ToggleUpdateOff",this.CONST.TG_NODE_VIEW="ToggleNodeView",this.CONST.TG_SCATTER_SUCCESS="ToggleScatterSuccess",this.CONST.TG_SCATTER_FAILED="ToggleScatterFailed",this.CONST.TG_MERGE_TYPE="ToggleMergeType",this.CONST.TG_CALL_COUNT="ToggleCallCount",this.CONST.TG_TPS="ToggleTPS",this.CONST.TG_ROUTING="ToggleRouting",this.CONST.TG_CURVE="ToggleCurve",this.CONST.TG_REALTIME_CHART_RESIZE="ToggleRealtimeChartResize",this.CONST.ST_="Sort",this.CONST.ASCENDING="ascending",this.CONST.DESCENDING="descending",this.CONST.ON="on",this.CONST.OFF="off",this.CONST.MAIN_PAGE="/main.page",this.CONST.FILTEREDMAP_PAGE="/filteredMap.page",this.CONST.INSPECTOR_PAGE="/inspector.page",this.CONST.SCATTER_FULL_SCREEN_PAGE="/scatterFullScreen.page",this.CONST.TRANSACTION_DETAIL_PAGE="/transactionDetail.page",this.CONST.TRANSACTION_LIST_PAGE="/transactionList.page",this.CONST.TRANSACTION_VIEW_PAGE="/transactionView.page"}])}(jQuery),function(){"use strict";pinpointApp.constant("RealtimeWebsocketServiceConfig",{wsUrl:"/agent/activeThread.pinpointws",wsTimeout:1e4,retryTimeout:3e3,maxRetryCount:1}),pinpointApp.service("RealtimeWebsocketService",["RealtimeWebsocketServiceConfig",function(a){function b(){i=new WebSocket("ws://"+location.host+a.wsUrl),i.onopen=function(a){g=h=Date.now(),c(),f.onopen(a)},i.onmessage=function(a){h=Date.now(),f.onmessage(JSON.parse(a.data))},i.onclose=function(a){console.log("onClose websocket",a),i=null,d(),f.onclose(a),e()}}function c(){j=setInterval(function(){null!==h&&(Date.now()-h<a.wsTimeout||f.ondelay())},1e3)}function d(){clearInterval(j)}function e(){null!==g&&g===h&&Date.now()-g<a.retryTimeout&&k<a.maxRetryCount&&(console.log("retry websocket connection"),k++,f.retry())}var f,g=null,h=null,i=null,j=null,k=0;this.open=function(a){return i=null,f=a,angular.isDefined(WebSocket)?(b(),!0):!1},this.isOpened=function(){return null!==i},this.close=function(){null!==i&&i.close(),i=null},this.send=function(a){null!==i&&i.send(a)},this.stopReceive=function(a){null!==i&&i.send(a),d()}}])}(),function(){"use strict";pinpointApp.constant("PreferenceServiceConfig",{names:{favorite:"preference.favorite"},defaults:{callee:1,caller:1,period:"5m"},list:[{name:"caller",type:"number"},{name:"callee",type:"number"},{name:"period",type:"string"}],cst:{periodTypes:["5m","20m","1h","3h","6h","12h","1d","2d"],depthList:[1,2,3,4],maxFavorite:5e3,maxPeriod:2,realtimeScatterPeriod:3e5,responseType:["1s","3s","5s","Slow","Error"],responseTypeColor:["#2ca02c","#3c81fa","#f8c731","#f69124","#f53034"],agentAllStr:"All"}}),pinpointApp.service("PreferenceService",["PreferenceServiceConfig","webStorage",function(a,b){function c(){b.add(a.names.favorite,JSON.stringify(g))}function d(){jQuery.each(a.list,function(c,d){var g=d.name;switch(f[g]=b.get(g)||a.defaults[g],d.type){case"number":f[g]=parseInt(f[g])}var h=g.substring(0,1).toUpperCase()+g.substring(1);e["get"+h]=function(){return f[g]},e["set"+h]=function(a){b.add(g,a),f[g]=a}}),g=b.get(a.names.favorite)||[]}var e=this,f={},g=[];d(),this.addFavorite=function(b){g.length!=a.cst.maxFavorite&&-1===g.indexOf(b)&&(g.push(b),c())},this.removeFavorite=function(a){var b=g.indexOf(a);-1!==b&&(g.splice(b,1),c())},this.getFavoriteList=function(){return g},this.getDepthList=function(){return a.cst.depthList},this.getPeriodTypes=function(){return a.cst.periodTypes},this.getMaxPeriod=function(){return a.cst.maxPeriod},this.getRealtimeScatterXRange=function(){return a.cst.realtimeScatterPeriod},this.getRealtimeScatterXRangeStr=function(){return a.cst.realtimeScatterPeriod/1e3/60+"m"},this.getResponseTypeColor=function(){return a.cst.responseTypeColor},this.getAgentAllStr=function(){return a.cst.agentAllStr},this.getResponseTypeFormat=function(){var b={};return jQuery.each(a.cst.responseType,function(a,c){b[c]=0}),b},this.getCalleeByApp=function(a){return angular.isUndefined(a)?this.getCallee():b.get(a+"+callee")||this.getCallee()},this.getCallerByApp=function(a){return angular.isUndefined(a)?this.getCaller():b.get(a+"+caller")||this.getCaller()},this.setDepthByApp=function(a,c){angular.isUndefined(a)||null===a||angular.isUndefined(c)||null===c||b.add(a,c)}}])}(),function(){"use strict";pinpointApp.constant("CommonAjaxServiceConfig",{serverTimeUrl:"/serverTime.pinpoint",applicationListUrl:"/applications.pinpoint",realtimeSummaryNLoadDataUrl:"/getResponseTimeHistogramData.pinpoint"}),pinpointApp.service("CommonAjaxService",["CommonAjaxServiceConfig","$http",function(a,b){function c(a){var b=[];for(var c in a)b.push(c+"="+a[c]);return b.join("&")}this.getSQLBind=function(a,b,c){jQuery.ajax({type:"POST",url:a,data:b,cache:!1,dataType:"json",success:function(a){angular.isFunction(c)&&c(a)},error:function(a,b,d){angular.isFunction(c)&&c(d)}})},this.getServerTime=function(c){b.get(a.serverTimeUrl).success(function(a){c(a.currentServerTime)}).error(function(){c(Date.now())})},this.getApplicationList=function(c,d){b.get(a.applicationListUrl).success(function(a){c(a)}).error(function(){d()})},this.getResponseTimeHistogramData=function(d,e,f){b({url:a.realtimeSummaryNLoadDataUrl+"?"+c(d),method:"GET"}).then(function(a){e(a.data)},function(){f()})}}])}(),function(a){"use strict";pinpointApp.constant("AgentAjaxServiceConfig",{agentList:"/getAgentList.pinpoint",agentInfo:"/getAgentInfo.pinpoint",agetEvent:"/getAgentEvent.pinpoint",agentStatus:"/getAgentStatus.pinpoint",agentEventList:"/getAgentEvents.pinpoint",agentStateForChart:"/getAgentStat.pinpoint"}),pinpointApp.service("AgentAjaxService",["AgentAjaxServiceConfig","$http",function(a,b){function c(a,c,e){b.get(a+d(c)).then(function(a){e(a.data)},function(a){e(a)})}function d(a){var b="?";for(var c in a)b+=("?"==b?"":"&")+c+"="+a[c];return b}this.getAgentList=function(b,d){c(a.agentList,b,d)},this.getAgentStateForChart=function(b,d){c(a.agentStateForChart,b,d)},this.getAgentInfo=function(b,d){c(a.agentInfo,b,d)},this.getEventList=function(b,d){b.exclude="10199",c(a.agentEventList,b,d)},this.getEvent=function(b,d){c(a.agetEvent,b,d)}}])}(jQuery),function(a){"use strict";pinpointApp.constant("TooltipServiceConfig",{scatter:{position:"bottom",trigger:"click"},navbar:{position:"bottom",trigger:"click"},agentList:{position:"bottom",trigger:"click"},heap:{position:"top",trigger:"click"},permGen:{position:"top",trigger:"click"},cpuUsage:{position:"top",trigger:"click"},tps:{position:"top",trigger:"click"},responseSummaryChart:{position:"top",trigger:"click"},loadChart:{position:"top",trigger:"click"},serverList:{position:"bottom",trigger:"click"},callTree:{position:"bottom",trigger:"click"},realtime:{position:"top",trigger:"click"},alarmRules:{position:"top",trigger:"click"}}),pinpointApp.service("TooltipService",["TooltipServiceConfig","helpContentTemplate","helpContentService",function(b,c,d){function e(a){switch(a){case"scatter":return function(){return c(d.scatter["default"])};case"navbar":return function(){return c(d.navbar.applicationSelector)+c(d.navbar.depth)+c(d.navbar.periodSelector)};case"agentList":return function(){return c(d.inspector.list)};case"heap":return function(){return c(d.inspector.heap)};case"permGen":return function(){return c(d.inspector.permGen)};case"cpuUsage":return function(){return c(d.inspector.cpuUsage)};case"tps":return function(){return c(d.inspector.tps)};case"responseSummaryChart":return function(){return c(d.nodeInfoDetails.responseSummary)};case"loadChart":return function(){return c(d.nodeInfoDetails.load)};case"serverList":return function(){return c(d.nodeInfoDetails.nodeServers)};case"callTree":return function(){return c(d.callTree.column)};case"realtime":return function(){return c(d.realtime["default"])};case"alarmRules":return function(){return c(d.configuration.alarmRules)}}}this.init=function(c){a("."+c+"Tooltip").tooltipster({content:e(c),position:b[c].position,trigger:b[c].trigger})}}])}(jQuery),function(a){"use strict";pinpointApp.constant("cfg",{periodTypePrefix:".navbar.periodType",periodType:{RANGE:"range",LAST:"last",REALTIME:"realtime"}}),pinpointApp.directive("navbarDirective",["cfg","$route","$rootScope","$http","$document","$timeout","$window","webStorage","helpContentService","AnalyticsService","PreferenceService","TooltipService","CommonAjaxService",function(b,c,d,e,f,g,h,i,j,k,l,m,n){return{restrict:"EA",replace:!0,templateUrl:"features/navbar/navbar.html?v="+G_BUILD_TIME,link:function(c,e){function o(){a("#navbar_depth div").on("show.bs.dropdown",function(){}).on("hide.bs.dropdown",function(a){T===!0?a.preventDefault():U===!1&&c.$apply(function(){c.cancelDepth(!1)}),T=!1,U=!1}),a("#navbar_depth .dropdown-menu").on("click",function(){T=!0})}var p,q,r,s,t,u,v,w,x,y,z,A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T=!1,U=!1;c.showNavbar=!1,c.periodDelay=!1,c.aReadablePeriodList=l.getPeriodTypes(),c.autoUpdate=!1,c.timeLeft=10,c.timeCountDown=10,c.timeList=[{time:10,label:"10 seconds"},{time:20,label:"20 seconds"},{time:30,label:"30 seconds"},{time:60,label:"1 minute"}],c.callee=u=l.getCalleeByApp(c.application),c.caller=v=l.getCallerByApp(c.application),c.rangeList=l.getDepthList(),c.applications=[{text:"Select an application.",value:""}],e.bind("selectstart",function(a){return!1}),m.init("navbar"),o(),w=function(a){s=a,c.periodType=I(),c.showNavbar=!0,c.showStaticApplication=!1,c.showStatic=!c.showApplication,p=e.find(".application"),c.applications=[{text:"Loading...",value:""}],c.application=s.getApplication()||"",c.callee=u=l.getCalleeByApp(c.application),c.caller=v=l.getCallerByApp(c.application),c.disableApplication=!0,c.readablePeriod=s.getReadablePeriod()||l.getPeriod(),c.periodCalendar=s.getReadablePeriod()||l.getPeriod(),c.queryEndTime=s.getQueryEndTime()||"",y(),x(),C()},H=function(b){s=b,c.periodType=I(),c.showNavbar=!0,c.showStaticApplication=!0,p=e.find(".application"),c.application=s.getApplication()||"",c.applicationName=s.getApplicationName()||"",c.readablePeriod=s.getReadablePeriod()||l.getPeriod(),c.periodCalendar=s.getReadablePeriod()||l.getPeriod(),c.queryEndTime=s.getQueryEndTime()||"",a("#ui-datepicker-div").remove()},x=function(){t=a("#ui-datepicker-div"),t.find(".guide").html(j.navbar.searchPeriod.guide.replace(/\{\{day\}\}/,l.getMaxPeriod())),t.find("button.ui-datepicker-close").on("click",function(){t.hide()}),q=e.find("#from-picker"),q.datetimepicker({altField:"#from-picker-alt",altFieldTimeOnly:!1,dateFormat:"yy-mm-dd",timeFormat:"HH:mm",controlType:"select",showButtonPanel:!1,onSelect:function(){var a=moment(K(q)),b=moment(K(r));if(b.isAfter(moment(K(q)).add(l.getMaxPeriod(),"days"))||a.isAfter(b)){var c=R();z(r,a.add(c[0],c[1]).format())}},onClose:function(a,b){""!==r.val()?q.datetimepicker("getDate")>r.datetimepicker("getDate")&&r.datetimepicker("setDate",q.datetimepicker("getDate")):r.val(a)}}),z(q,s.getQueryStartTime()||moment().subtract(20,"minute").valueOf()),r=e.find("#to-picker"),r.datetimepicker({altField:"#to-picker-alt",altFieldTimeOnly:!1,dateFormat:"yy-mm-dd",timeFormat:"HH:mm",controlType:"select",showButtonPanel:!1,onSelect:function(){var a=moment(K(q)),b=moment(K(r));if(a.isBefore(moment(K(r)).subtract(l.getMaxPeriod(),"days"))||a.isAfter(b)){var c=R();z(q,b.subtract(c[0],c[1]).format())}},onClose:function(a,b){""!==q.val()?q.datetimepicker("getDate")>r.datetimepicker("getDate")&&q.datetimepicker("setDate",r.datetimepicker("getDate")):q.val(a)}}),z(r,s.getQueryEndTime()),a("#from-picker-alt").on("click",function(){Q()}),a("#to-picker-alt").on("click",function(){Q()}),a(document).mousedown(function(b){t.is(":visible")!==!1&&0===a(b.target).parents("div#ui-datepicker-div").length&&t.hide()})},R=function(){var a=[],b=c.periodCalendar.substring(c.periodCalendar.length-1);return a[0]=parseInt(c.periodCalendar),a[1]="d"==b?"days":"h"==b?"hours":"minutes",a},Q=function(){t.is(":visible")?t.hide():(t.css("left",a("#navbar_period").offset().left),t.show())},K=function(a){return a.datetimepicker("getDate")},I=function(){if(s.isRealtime())return b.periodType.REALTIME;var a=b.periodType.LAST;return a=h.name&&i.get(h.name+b.periodTypePrefix)?i.get(h.name+b.periodTypePrefix):s.getApplication()?b.periodType.RANGE:b.periodType.LAST,s.getReadablePeriod()&&_.indexOf(c.aReadablePeriodList,s.getReadablePeriod())<0&&(a=b.periodType.RANGE),a},J=function(){h.name=h.name||"window."+_.random(1e5,999999),i.add(h.name+b.periodTypePrefix,c.periodType)},z=function(a,b){a.datetimepicker("setDate",b?new Date(b):new Date)},B=function(){c.application&&(s.setApplication(c.application),c.callee=u=l.getCalleeByApp(c.application),c.caller=v=l.getCallerByApp(c.application),s.setCalleeRange(c.callee),s.setCallerRange(c.caller),c.periodType===b.periodType.LAST&&c.readablePeriod?(s.setPeriodType(b.periodType.LAST),A(function(a){s.setReadablePeriod(c.readablePeriod),s.setQueryEndDateTime(moment(a).format("YYYY-MM-DD-HH-mm-ss")),s.autoCalculateByQueryEndDateTimeAndReadablePeriod(),G(),z(q,s.getQueryStartTime()),z(r,s.getQueryEndTime())})):c.periodType===b.periodType.REALTIME?(s.setPeriodType(b.periodType.REALTIME),A(function(a){s.setReadablePeriod(l.getRealtimeScatterXRangeStr()),s.setQueryEndDateTime(moment(a).format("YYYY-MM-DD-HH-mm-ss")),s.autoCalculateByQueryEndDateTimeAndReadablePeriod(),G(),z(q,s.getQueryStartTime()),z(r,s.getQueryEndTime())})):D()&&E()&&(s.setPeriodType(b.periodType.RANGE),s.setQueryStartTime(D()),s.setQueryEndTime(E()),s.autoCalcultateByQueryStartTimeAndQueryEndTime(),G()))},G=function(){d.$broadcast("realtimeChartController.close"),J(),c.$emit("navbarDirective.changed",s)},A=function(a){n.getServerTime(function(b){a(b)})},C=function(){n.getApplicationList(function(a){angular.isArray(a)===!1||0===a.length?(c.applications[0].text="Application not found.",d.$broadcast("alarmRule.applications.set",c.applications),d.$broadcast("configuration.general.applications.set",c.applications)):(S=a,F(S,function(){c.disableApplication=!1,g(function(){s.getApplication()?(p.select2("val",s.getApplication()),c.application=s.getApplication()):p.select2("open")}),d.$broadcast("alarmRule.applications.set",c.applications),d.$broadcast("configuration.general.applications.set",c.applications)})),c.hideFakeApplication=!0},function(){c.applications[0].text="Application error.",c.hideFakeApplication=!0})},D=function(){return q.datetimepicker("getDate").getTime()},E=function(){return r.datetimepicker("getDate").getTime()},F=function(a,b){var d=l.getFavoriteList();c.favoriteCount=d.length,c.applications=[{text:"",value:""}];var e=[],f=[];angular.forEach(a,function(a,b){var c=a.applicationName+"@"+a.serviceType;-1===d.indexOf(c)?f.push({text:c,value:a.applicationName+"@"+a.code}):e.push({text:c,value:a.applicationName+"@"+a.code})}),c.applications=e.concat(f),angular.isFunction(b)&&b.apply(c)},y=function(){function a(a){if(!a.id)return a.text;var b=a.text.split("@");if(b.length>1){var c=f.get(0).createElement("img");return c.src="/images/icons/"+b[1]+".png",c.style.height="25px",c.style.paddingRight="3px",c.outerHTML+b[0]}return a.text}p.select2({placeholder:"Select an application",searchInputPlaceholder:"Input your application name",allowClear:!1,formatResult:a,formatSelection:a,escapeMarkup:function(a){return a}}).on("change",function(a){k.send(k.CONST.MAIN,k.CONST.CLK_APPLICATION),c.application=a.val,c.$digest(),B()})},N=function(a){var b=parseInt(a);switch(a.substring(a.length-1)){case"m":b*=6e4;break;case"h":b*=36e5;break;case"d":b*=864e5}return b},O=function(a){c.periodType===b.periodType.LAST?(s.setQueryEndDateTime(moment(s.getQueryEndTime()+a).format("YYYY-MM-DD-HH-mm-ss")),s.autoCalculateByQueryEndDateTimeAndReadablePeriod(),G(),z(q,s.getQueryStartTime()),z(r,s.getQueryEndTime())):(z(q,s.getQueryStartTime()+a),z(r,s.getQueryEndTime()+a),s.setQueryStartTime(D()),s.setQueryEndTime(E()),s.autoCalcultateByQueryStartTimeAndQueryEndTime(),G())},P=function(a){k.send(k.CONST.MAIN,k.CONST.CLK_TIME,a),c.periodDelay=!0,c.readablePeriod=a,c.autoUpdate=!1,B(),g(function(){c.periodDelay=!1,c.$$phase||c.$digest()},1e3)},c.search=function(){B()},c.setPeriod=function(a){c.periodType=b.periodType.LAST,P(a)},c.getPreviousClass=function(){return""},c.getNextClass=function(){return""},c.getPeriodClassInCalendar=function(a){return c.periodCalendar===a?"btn-success":""},c.getPeriodClass=function(a){var d="";return c.periodType!==b.periodType.LAST?d:(c.readablePeriod===a&&(d+="btn-info"),c.periodDelay&&(d+=" wait"),d)},c.showUpdate=function(){return!!(c.periodType===b.periodType.LAST&&_.indexOf(["5m","20m","1h","3h"],c.readablePeriod)>=0&&c.application)},c.changeUpdateSetting=function(){k.send(k.CONST.MAIN,c.autoUpdate?k.CONST.TG_UPDATE_OFF:k.CONST.TG_UPDATE_ON)},L=function(){c.autoUpdate&&(c.timeLeft-=1,0===c.timeLeft?(c.update(),c.timeLeft=c.timeCountDown):g(L,1e3))},M=function(){c.timeLeft=c.timeCountDown},c.setPeriodForCalendar=function(a){c.periodCalendar=a},c.setAutoUpdateTime=function(a){k.send(k.CONST.MAIN,k.CONST.CLK_UPDATE_TIME,a+"s"),c.timeCountDown=a,c.timeLeft=a},c.setCallee=function(a){c.callee=a},c.setCaller=function(a){c.caller=a},c.setDepth=function(){T=!1,U=!0,a("#navbar_depth .dropdown-menu").trigger("click.bs.dropdown"),console.log("previous :",u,v,", current :",c.callee,c.caller),u===c.callee&&v===c.caller||(k.send(k.CONST.MAIN,k.CONST.CLK_CALLEE_RANGE,c.callee),k.send(k.CONST.MAIN,k.CONST.CLK_CALLER_RANGE,c.caller),u=c.callee,v=c.caller,l.setDepthByApp(c.application+"+callee",c.callee),l.setDepthByApp(c.application+"+caller",c.caller),window.location.reload(!0))},c.cancelDepth=function(b){c.callee=u,c.caller=v,b&&(T=!1,U=!0,a("#navbar_depth .dropdown-menu").trigger("click.bs.dropdown"))},c.update=function(){var a=c.autoUpdate;c.autoUpdate=!1,c.periodDelay=!0,B(),g(function(){c.periodDelay=!1,M(),c.autoUpdate=a,c.$$phase||c.$digest()},1e3)},c.togglePeriod=function(a){k.send(k.CONST.MAIN,k.CONST.TG_DATE,a),c.periodType=a,c.autoUpdate=!1},c.setRealtime=function(){c.periodType!==b.periodType.REALTIME&&(k.send(k.CONST.MAIN,k.CONST.CLK_START_REALTIME), c.periodType=b.periodType.REALTIME,c.autoUpdate=!1,B())},c.isRealtime=function(){return"undefined"==typeof s||null===s?!1:s.isRealtime()},c.showConfig=function(){d.$broadcast("configuration.show")},c.$watch("autoUpdate",function(a,b){a?g(L,1e3):M()}),c.$on("navbarDirective.initialize",function(a,b){w(b)}),c.$on("navbarDirective.initialize.andReload",function(a,d){w(d),c.periodType=b.periodType.LAST,P(l.getPeriod())}),c.$on("navbarDirective.initialize.realtime.andReload",function(a,d){w(d),c.periodType=b.periodType.REALTIME,P(l.getPeriod())}),c.$on("navbarDirective.initializeWithStaticApplication",function(a,b){H(b)}),c.$on("navbarDirective.moveToPast",function(a){O(c.periodType===b.periodType.LAST?-N(c.readablePeriod):-(s.getQueryEndTime()-s.getQueryStartTime()))}),c.$on("navbarDirective.moveToFuture",function(a){O(c.periodType===b.periodType.LAST?N(c.readablePeriod):s.getQueryEndTime()-s.getQueryStartTime())}),c.$on("navbarDirective.changedFavorite",function(a){F(S,function(){c.disableApplication=!1,g(function(){s.getApplication()&&(p.select2("val",s.getApplication()),c.application=s.getApplication())})})})}}}])}(jQuery),function(){"use strict";pinpointApp.constant("serverMapDirectiveConfig",{options:{sContainerId:"servermap",sOverviewId:"servermapOverview",sBigFont:"11pt Lato,NanumGothic,ng,dotum,AppleGothic,sans-serif",sSmallFont:"11pt Lato,NanumGothic,ng,dotum,AppleGothic,sans-serif",sImageDir:"/images/servermap/",htLinkType:{sRouting:"Normal",sCurve:"JumpGap"},htLinkTheme:{"default":{backgroundColor:"#ffffff",borderColor:"#c5c5c5",fontFamily:"11pt Lato,NanumGothic,ng,dotum,AppleGothic,sans-serif",fontColor:"#000000",fontAlign:"center",margin:1,strokeWidth:1},bad:{backgroundColor:"#ffc9c9",borderColor:"#7d7d7d",fontFamily:"11pt Lato,NanumGothic,ng,dotum,AppleGothic,sans-serif",fontColor:"#FF1300",fontAlign:"center",margin:1,strokeWidth:1}}}}),pinpointApp.directive("serverMapDirective",["serverMapDirectiveConfig","$rootScope","ServerMapDaoService","AlertsService","ProgressBarService","SidebarTitleVoService","$filter","ServerMapFilterVoService","filteredMapUtilService","$base64","ServerMapHintVoService","$timeout","$location","$window","helpContentTemplate","helpContentService","AnalyticsService",function(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q){return{restrict:"EA",replace:!0,templateUrl:"features/serverMap/serverMap.html?v="+G_BUILD_TIME,link:function(r,s,t){var u,v,w,x,y,z,A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T,U,V,W;z=new d(s),A=new e(s),E=!1,y=null,B={applicationMapData:{linkDataArray:[],nodeDataArray:[]},lastFetchedTimestamp:[],timeSeriesResponses:{values:{},time:[]}},w={},C={},D={},r.oNavbarVoService=null,r.totalRequestCount=!0,r.bShowServerMapStatus=!1,r.linkRouting=a.options.htLinkType.sRouting,r.linkCurve=a.options.htLinkType.sCurve,r.searchNodeQuery="",r.searchNodeIndex=0,r.searchNodeList=[],J=s.find(".serverMapTime"),F=s.find(".fromAgentName"),G=s.find(".toAgentName"),F.select2(),G.select2(),H=!1,r.mergeTypeList=[],r.mergeStatus={},r.showAntStyleHint=!1,W=function(a){a.nodeDataArray.forEach(function(a){a.isWas===!1&&"USER"!==a.serviceType&&angular.isUndefined(r.mergeStatus[a.serviceType])&&(r.mergeTypeList.push(a.serviceType),r.mergeStatus[a.serviceType]=!0)})},M=function(){r.nodeContextMenuStyle="",r.linkContextMenuStyle="",r.backgroundContextMenuStyle="",r.urlPattern="",r.responseTime={from:0,to:3e4},r.includeFailed=null,$("#filterWizard").modal("hide"),"$apply"!=r.$$phase&&"$digest"!=r.$$phase&&"$apply"!=r.$root.$$phase&&"$digest"!=r.$root.$$phase&&r.$digest()},K=function(a,b,d,e,f,g,h,j){A.startLoading(),z.hideError(),z.hideWarning(),z.hideInfo(),y&&y.clear(),A.setLoading(10),w={applicationName:a,serviceTypeName:b,from:d-e,to:d,originTo:r.oNavbarVoService.getQueryEndTime(),callerRange:r.oNavbarVoService.getCallerRange(),calleeRange:r.oNavbarVoService.getCalleeRange(),period:e,filter:n.encodeURIComponent(f),hint:g?n.encodeURIComponent(g):!1},f?c.getFilteredServerMapData(w,function(a,b,d){if(a)return A.stopLoading(),z.showError("There is some error."),!1;A.setLoading(50),b.from===d.lastFetchedTimestamp?r.$emit("serverMapDirective.allFetched",d):(B.lastFetchedTimestamp=d.lastFetchedTimestamp-1,r.$emit("serverMapDirective.fetched",B.lastFetchedTimestamp,d));var e=JSON.parse(f);B.applicationMapData=c.mergeFilteredMapData(B.applicationMapData,d.applicationMapData);var g=c.extractDataFromApplicationMapData(B.applicationMapData);if(g=c.addFilterProperty(e,g),W(g),i.doFiltersHaveUnknownNode(e))for(var k in r.mergeStatus)r.mergeStatus[k]=!1;N(B),Q(b,g,h,j)}):c.getServerMapData(w,function(a,b,d){if(a||d.exception)return A.stopLoading(),a?z.showError("There is some error."):z.showError(d.exception),r.$emit("serverMapDirective.hasNoData"),!1;A.setLoading(50),N(d),B=d;var e=c.extractDataFromApplicationMapData(d.applicationMapData);W(e),Q(b,e,h,j)})},N=function(a){0===a.applicationMapData.nodeDataArray.length?r.$emit("serverMapDirective.hasNoData"):r.$emit("serverMapDirective.hasData")},L=function(a,b,c,d){r.inspectApplicationName=c,r.inspectCategory=d,r.nodeContextMenuStyle={display:"block",top:a,left:b,"z-index":9999999},r.$digest()},O=function(a,b){r.linkContextMenuStyle={display:"block",top:a,left:b,"z-index":9999999},r.$digest()},P=function(a,b){r.backgroundContextMenuStyle={display:"block",top:a,left:b,"z-index":9999999},r.$digest()},V=function(){var a=[];for(var b in r.mergeStatus)r.mergeStatus[b]===!0&&a.push(b);return a},Q=function(d,e,f,h){var i=V();if(I=c.mergeMultiLinkGroup(c.mergeGroup(e,i),i),A.setLoading(80),0!==I.nodeDataArray.length){I.linkDataArray=R(I.linkDataArray,f,h),A.setLoading(90);var k=a.options;k.fOnNodeSubGroupClicked=function(a,b,d,e){var f=c.getLinkNodeDataByNodeKey(B.applicationMapData,d,e);f.fromNode=c.getNodeDataByKey(B.applicationMapData,f.from),f.toNode=c.getNodeDataByKey(B.applicationMapData,f.to),k.fOnLinkClicked(a,f)},k.fOnNodeClicked=function(a,d,e,f){var g;angular.isDefined(d.unknownNodeGroup)&&!e?d.unknownNodeGroup=c.getUnknownNodeDataByUnknownNodeGroup(B.applicationMapData,d.unknownNodeGroup):g=c.getNodeDataByKey(B.applicationMapData,e||d.key),g&&(d=g),E="node",D=d,r.$emit("serverMapDirective.nodeClicked",a,w,d,I,f),r.oNavbarVoService&&r.oNavbarVoService.isRealtime()&&b.$broadcast("realtimeChartController.initialize",d.isWas,d.applicationName,r.oNavbarVoService.getApplication()+"/"+r.oNavbarVoService.getReadablePeriod()+"/"+r.oNavbarVoService.getQueryEndDateTime()+"/"+r.oNavbarVoService.getCallerRange()),M()},k.fOnNodeDoubleClicked=function(a,b,c){a.diagram.zoomToRect(b.actualBounds,1.2)},k.fOnNodeContextClicked=function(a,b){if(!r.oNavbarVoService.isRealtime()){M();var d=c.getNodeDataByKey(B.applicationMapData,b.key);d&&(b=d),D=b,u&&b.isWas===!0&&L(a.event.layerY,a.event.layerX,b.applicationName,b.category)}},k.fOnLinkClicked=function(a,b){if(!r.oNavbarVoService.isRealtime()){var d;angular.isDefined(b.unknownLinkGroup)?b.unknownLinkGroup=c.getUnknownLinkDataByUnknownLinkGroup(B.applicationMapData,b.unknownLinkGroup):d=c.getLinkDataByKey(B.applicationMapData,b.key),d&&(d.fromNode=b.fromNode,d.toNode=b.toNode,b=d),E="link",C=b,M(),r.$emit("serverMapDirective.linkClicked",a,w,b,I)}},k.fOnLinkContextClicked=function(a,b){var f=c.getLinkDataByKey(B.applicationMapData,b.key);f&&(f.fromNode=b.fromNode,f.toNode=b.toNode,b=f),M(),C=b,v&&!angular.isArray(b.targetInfo)&&(O(a.event.layerY,a.event.layerX),r.$emit("serverMapDirective.linkContextClicked",a,d,b,e))},k.fOnBackgroundClicked=function(a){r.$emit("serverMapDirective.backgroundClicked",a,d),M()},k.fOnBackgroundDoubleClicked=function(a){S()},k.fOnBackgroundContextClicked=function(a){r.$emit("serverMapDirective.backgroundContextClicked",a,d),M(),x&&P(a.diagram.lastInput.event.layerY,a.diagram.lastInput.event.layerX)};var l;try{l=_.find(I.nodeDataArray,function(a){return a.applicationName===d.applicationName&&angular.isUndefined(d.serviceType)?!0:a.applicationName===d.applicationName&&a.serviceTypeCode===d.serviceTypeCode}),l&&(k.sBoldKey=l.key)}catch(n){z.showError("There is some error while selecting a node."),console.log(n)}A.setLoading(100),null===y?y=new ServerMap(k,m,q):y.option(k),y.load(I),A.stopLoading(),T(l)}else if(A.stopLoading(),r.oNavbarVoService.getFilter()){var o=r.oNavbarVoService.getFilterAsJson(),p=[];p.push("<p>There is no data with the filter below.</p>"),angular.forEach(o,function(a,b){p.push("<p><b>Filter"),o.length>1&&p.push(" #"+(b+1)),p.push(" : "+a.fa+"("+a.fst+") ~ "+a.ta+"("+a.tst+")</b><br>"),p.push("<ul>"),a.url&&p.push("<li>Url Pattern : "+j.decode(a.url)+"</li>"),a.rf&&a.rt&&p.push("<li>Response Time : "+g("number")(a.rf)+" ms ~ "+g("number")(a.rt)+" ms</li>"),p.push("<li>Transaction Result : "+(a.ie?"Failed Only":"Success + Failed")+"</li>"),p.push("</ul></p>")}),z.showInfo(p.join(""))}else z.showInfo("There is no data.")},R=function(a,b,c){var d=a;for(var e in d)d[e].from===d[e].to?d[e].routing="AvoidsNodes":d[e].routing=b,d[e].curve=c;return d},U=function(){M();var a=new f;"USER"===C.fromNode.serviceType?a.setImageType("USER").setTitle("USER"):a.setImageType(C.fromNode.serviceType).setTitle(C.fromNode.applicationName),a.setImageType2(C.toNode.serviceType).setTitle2(C.toNode.applicationName),r.fromAgent=C.fromAgent||[],r.toAgent=C.toAgent||[],r.sourceInfo=C.sourceInfo,r.targetInfo=C.targetInfo,r.fromApplicationName=C.fromNode.applicationName,r.toApplicationName=C.toNode.applicationName,F.select2("val",""),G.select2("val",""),r.fromAgentName="",r.toAgentName="",r.$broadcast("sidebarTitleDirective.initialize.forServerMap",a),$("#filterWizard").modal("show"),H||(H=!0,$("#filterWizard").on("shown.bs.modal",function(){if($("slider",this).addClass("auto"),setTimeout(function(){$("#filterWizard slider").removeClass("auto")},500),r.oNavbarVoService.getFilter()){var a=i.findFilterInNavbarVo(C.fromNode.applicationName,C.fromNode.serviceType,C.toNode.applicationName,C.toNode.serviceType,r.oNavbarVoService);if(a){r.urlPattern=a.oServerMapFilterVoService.getRequestUrlPattern(),r.responseTime.from=a.oServerMapFilterVoService.getResponseFrom();var b=a.oServerMapFilterVoService.getResponseTo();r.responseTime.to="max"===b?3e4:b,r.includeFailed=a.oServerMapFilterVoService.getIncludeException(),F.select2("val",a.oServerMapFilterVoService.getFromAgentName()),G.select2("val",a.oServerMapFilterVoService.getToAgentName())}else r.responseTime={from:0,to:3e4}}else r.responseTime={from:0,to:3e4};r.$$phase||r.$digest()}))},r.passingTransactionResponseToScatterChart=function(){r.$emit("serverMapDirective.passingTransactionResponseToScatterChart",D),M()},r.passingTransactionList=function(){q.send(q.CONST.CONTEXT,q.CONST.CLK_FILTER_TRANSACTION);var a=new h;a.setMainApplication(C.filterApplicationName).setMainServiceTypeName(C.filterApplicationServiceTypeName).setMainServiceTypeCode(C.filterApplicationServiceTypeCode).setFromApplication(C.fromNode.applicationName).setFromServiceType(C.fromNode.serviceType).setToApplication(C.toNode.applicationName).setToServiceType(C.toNode.serviceType);var b=new k;C.sourceInfo.isWas&&C.targetInfo.isWas&&b.setHint(C.toNode.applicationName,C.filterTargetRpcList),r.$broadcast("serverMapDirective.openFilteredMap",a,b),M()},r.openFilterWizard=function(){q.send(q.CONST.CONTEXT,q.CONST.CLK_FILTER_TRANSACTION_WIZARD),U()},S=function(){y&&y.zoomToFit()},T=function(a){l(function(){"node"===E&&D?y.highlightNodeByKey(D.key):"link"===E&&C?y.highlightLinkByFromTo(C.from,C.to):a&&y.highlightNodeByKey(a.key)})},r.responseTimeFormatting=function(a){return 3e4==a?"30,000+ ms":g("number")(a)+" ms"},r.passingTransactionMap=function(){var a=new h;a.setMainApplication(C.filterApplicationName).setMainServiceTypeCode(C.filterApplicationServiceTypeCode).setMainServiceTypeName(C.filterApplicationServiceTypeName).setFromApplication(C.fromNode.applicationName).setFromServiceType(C.fromNode.serviceType).setToApplication(C.toNode.applicationName).setToServiceType(C.toNode.serviceType).setResponseFrom(r.responseTime.from).setResponseTo(r.responseTime.to).setIncludeException(r.includeFailed).setRequestUrlPattern(j.encode(r.urlPattern)),r.fromAgentName&&a.setFromAgentName(r.fromAgentName),r.toAgentName&&a.setToAgentName(r.toAgentName);var b=new k;C.sourceInfo.isWas&&C.targetInfo.isWas&&b.setHint(C.toNode.applicationName,C.filterTargetRpcList),r.$broadcast("serverMapDirective.openFilteredMap",a,b),M()},r.toggleMergeGroup=function(a){q.send(q.CONST.CONTEXT,q.CONST.TG_MERGE_TYPE,a),r.mergeStatus[a]=!r.mergeStatus[a],Q(w,c.extractDataFromApplicationMapData(B.applicationMapData),r.linkRouting,r.linkCurve),M()},r.toggleLinkLableTextType=function(a){"tps"===a?(q.send(q.CONST.CONTEXT,q.CONST.TG_TPS),r.totalRequestCount=!1,r.tps=!0):(q.send(q.CONST.CONTEXT,q.CONST.TG_CALL_COUNT),r.totalRequestCount=!0,r.tps=!1),r.totalRequestCount="tps"!==a,r.tps="tps"===a,Q(w,I,r.linkRouting,r.linkCurve),M()},r.toggleLinkRouting=function(b){q.send(q.CONST.CONTEXT,q.CONST.TG_ROUTING,b),r.linkRouting=a.options.htLinkType.sRouting=b,Q(w,I,r.linkRouting,r.linkCurve),M()},r.toggleLinkCurve=function(b){q.send(q.CONST.CONTEXT,q.CONST.TG_CURVE,b),r.linkCurve=a.options.htLinkType.sCurve=b,Q(w,I,r.linkRouting,r.linkCurve),M()},r.refresh=function(){q.send(q.CONST.CONTEXT,q.CONST.CLK_REFRESH),y&&y.refresh(),M()},r.$on("serverMapDirective.initialize",function(a,b){r.oNavbarVoService&&w.applicationName!==b.getApplicationName()&&(E=!1),r.oNavbarVoService=b,r.oNavbarVoService.getQueryEndTime()===!1||r.oNavbarVoService.getQueryStartTime()===!1?r.bShowServerMapStatus=!1:r.bShowServerMapStatus=!0,v=x=!0,u=!1,K(b.getApplicationName(),b.getServiceTypeName(),b.getQueryEndTime(),b.getQueryPeriod(),b.getFilter(),b.getHint(),r.linkRouting,r.linkCurve)}),r.$on("serverMapDirective.fetch",function(a,b,c){K(r.oNavbarVoService.getApplicationName(),r.oNavbarVoService.getServiceTypeName(),c,b,r.oNavbarVoService.getFilter(),r.oNavbarVoService.getHint(),r.linkRouting,r.linkCurve)}),r.$on("serverMapDirective.initializeWithMapData",function(a,b,d,e){M(),r.bShowServerMapStatus=!1,x=!0,u=b,v=!1,w={applicationName:d.applicationId},B=d,r.oNavbarVoService=e,Q(w,c.extractDataFromApplicationMapData(B.applicationMapData),r.linkRouting,r.linkCurve)}),r.$on("serverMapDirective.zoomToFit",function(a){S()}),r.$on("serverMapDirective.openFilterWizard",function(a,b){C=b,U()}),r.searchNodeByEnter=function(a){13==a.keyCode&&r.searchNode()},r.searchNodeWithCategory=function(a){y&&(r.searchNodeIndex=a,y.searchNode(r.searchNodeList[a].applicationName,r.searchNodeList[a].serviceType))},r.searchNode=function(){y&&""!==r.searchNodeQuery&&(q.send(q.CONST.MAIN,q.CONST.CLK_SEARCH_NODE),r.searchNodeIndex=0,r.searchNodeList=y.searchNode(r.searchNodeQuery),jQuery(s).find(".search-result").show().find(".count").html("Result : "+r.searchNodeList.length))},r.clearSearchNode=function(){q.send(q.CONST.MAIN,q.CONST.CLK_CLEAR_SEARCH),y.clearQuery(),r.searchNodeIndex=0,r.searchNodeQuery="",r.searchNodeList=[],jQuery(s).find(".search-result").hide()},r.toggleShowAntStyleHint=function(){r.showAntStyleHint=!r.showAntStyleHint},r.moveToPast=function(){r.$emit("navbarDirective.moveToPast"),J.effect("highlight",{color:"#FFFF00"},1e3)},r.moveToFuture=function(){r.$emit("navbarDirective.moveToFuture"),J.effect("highlight",{color:"#FFFF00"},1e3)},r.toggleToolbar=function(){var a=jQuery(s).find(".servermap-toolbar"),b=jQuery(s).find(".servermap-toolbar-handle span");-1==parseInt(a.css("top"))?(a.find(".search-result").hide(),a.animate({top:-55},"fast",function(){b.addClass("glyphicon-chevron-down").removeClass("glyphicon-chevron-up")})):a.animate({top:-1},"fast",function(){r.searchNodeList.length>0&&a.find(".search-result").show(),b.addClass("glyphicon-chevron-up").removeClass("glyphicon-chevron-down")})},jQuery(".serverMapTooltip").tooltipster({content:function(){return o(p.servermap["default"])},position:"bottom-right",trigger:"click"})}}}])}(),function(){"use strict";pinpointApp.constant("realtimeChartDirectiveConfig",{params:{TIMEOUT_MAX_COUNT:"timeoutMaxCount",SHOW_EXTRA_INFO:"showExtraInfo",REQUEST_LABEL:"requestLabel",REQUEST_COLOR:"requestColor",CHART_COLOR:"chartColor",NAMESPACE:"namespace",XCOUNT:"xcount",HEIGHT:"height",WIDTH:"width"},responseCode:{ERROR_BLACK:111,TIMEOUT:211},consts:{maxDealyCount:5,verticalGridCount:5},message:{NO_ACTIVE_THREAD:"No Active Thread",NO_RESPONSE:"No Response"}}),pinpointApp.directive("realtimeChartDirective",["realtimeChartDirectiveConfig",function(a){return{restrict:"EA",replace:!0,template:'<svg width="" height=""></svg>',link:function(b,c,d){function e(){L=d3.select(c.get(0)).attr("width",y.width).attr("height",y.height).append("g").attr("class","base").attr("transform","translate("+(y.showExtraInfo?z.margin.left:0)+","+z.margin.top+")"),L.append("defs").append("clipPath").attr("id","clip-"+y.namespace).append("rect").attr("x",1).attr("y",0).attr("width",A).attr("height",B)}function f(){G.length=0;for(var a=Date.now(),b=0;b<y.requestLabel.length;b++)G.push(d3.range(y.xAxisCount).map(function(){return{y:0,d:a}}))}function g(){M=d3.scale.linear().domain(z.domain).range([0,A]),y.showExtraInfo?L.append("g").attr("class","y axis"):(P=L.append("g").attr("class","v-grid"),i()),O=L.append("g").attr("class","h-grid"),h()}function h(){N=d3.scale.linear().domain([0,C]).range([B,0]),y.showExtraInfo&&L.selectAll("g.y.axis").call(d3.svg.axis().scale(N).ticks(3).orient("left").tickFormat(z.yAxisFormat)),j()}function i(){for(var b=[],c=0;c<a.consts.verticalGridCount;c++)b.push(J*c);Q=P.selectAll("line").data(b).enter().append("line").attr("class","grid").attr({x1:function(a){return a},x2:function(a){return a},y1:-10,y2:B})[0]}function j(){var a=[],b=L.select("g.y.axis").selectAll("g.tick");if(0!==b.length){jQuery.each(b[0],function(b,c){a.push(parseFloat(c.getAttribute("transform").replace(/translate\(0,(-?[0-9.]*)\)/,"$1")))});var c=O.selectAll("line").data(a);c.enter().append("line").attr("class","grid").transition().attr("x1",0).attr("x2",A).attr("y1",function(a,b){return a}).attr("y2",function(a,b){return a}),c.transition().attr("x1",0).attr("x2",A).attr("y1",function(a){return a}).attr("y2",function(a){return a}),c.exit().remove()}}function k(){S=d3.svg.area().interpolate(z.interpolation).x(function(a,b){return M(b)}).y0(function(a,b){return N(a.y0)}).y1(function(a,b){return N(a.y+a.y0)})}function l(){R=L.append("g").attr("class","pathArea").attr("clip-path","url(#clip-"+y.namespace+")").selectAll("path").data(G).enter().append("path").attr("d",S).attr("class","area ").attr("fill",function(a,b){return y.requestColor[b]})}function m(){V=L.append("g").attr("class","chart-tooltip").attr("transform","translate(-1000, 0)"),V.append("line").attr("x1",z.tooltipWidth).attr("y1",10).attr("x2",z.tooltipWidth).attr("y2",B).attr("class","guideLine"),W=V.append("g").attr("transform","translate(0, 10)"),W.append("rect").attr("width",40).attr("height",90).attr("fill","#000").attr("fill-opacity","0.7"),W.selectAll("text").data(y.requestLabel).enter().append("text").attr("x",function(a,b){return 30}).attr("y",function(a,b){return 20*(y.requestColor.length-b-1)+20+"px"}).attr("fill",function(a,b){return y.requestColor[b]}).attr("text-anchor","end").text(function(a,b){return y.requestLabel[b]}),X=L.append("text").attr("class","date").attr("x","29%").attr("y","6px").text("")}function n(){y.showExtraInfo!==!1&&L.on("mouseout",function(){V.attr("transform","translate(-1000, 0)"),X.text(""),E=-1,D=-1}).on("mousemove",function(a){D=parseInt(M.invert(d3.mouse(this)[0])),E=parseInt(d3.mouse(this)[0]),E=E>A?-1e3:E,V.attr("transform","translate("+(E-z.tooltipWidth)+",0)"),p(o(D))})}function o(a){var b=[];if(-1===a)return b;for(var c=0;c<G.length;c++)b.push({y:G[c][a]?G[c][a].y:0,d:G[c][a]?G[c][a].d:Date.now()});return b}function p(a){y.showExtraInfo!==!1&&-1!==E&&-1!==D&&(W.selectAll("text").text(function(b,c){return a[c].y}),X.text(z.timeFormat(new Date(a[0].d))))}function q(){y.showExtraInfo!==!1&&(L.append("g").attr("transform",function(a,b){return"translate("+(y.width-z.margin.left-z.margin.right+4+32)+",10)"}).attr("class","request-label").append("text").attr("text-anchor","end").attr("fill","#000").style("font-size","14px").style("font-weight","bold").attr("y","0%").text("Total"),L.append("g").attr("transform",function(a,b){return"translate("+(y.width-z.margin.left-z.margin.right+4+32)+",10)"}).attr("class","request-label").selectAll("text").data(y.requestLabel).enter().append("text").attr("text-anchor","end").attr("fill","#9B9B9B").attr("y",function(a,b){return 20*(4-b)+"%"}).text(function(a,b){return y.requestLabel[b]}),U=L.append("g").attr("transform",function(a,b){return"translate("+(y.width-z.margin.left-z.margin.right+4+70)+",10)"}).attr("class","request-count").append("text").attr("text-anchor","end").attr("fill","#000").attr("y","0%").text("0"),T=L.append("g").attr("transform",function(a,b){return"translate("+(y.width-z.margin.left-z.margin.right+4+70)+",10)"}).attr("class","request-count").selectAll("text").data(y.requestLabel).enter().append("text").attr("text-anchor","end").attr("y",function(a,b){return 20*(4-b)+"%"}).attr("fill",function(a,b){return y.requestColor[b]}).text("0"))}function r(){Y=L.append("text").attr("class","error").attr("y","40%").attr("x","50%"),Z=Y.append("tspan").attr("x","50%").attr("dy","0%"),$=Y.append("tspan").attr("x","50%").attr("dy","14px")}function s(a,b){Y.style("fill",a?"#F00":"#000"),b.length>1?(Y.attr("y","20%"),Z.text(b[0]),$.text(b[1])):(Y.attr("y","40%"),Z.text(b[0]),$.text(""))}function t(a){if(y.showExtraInfo!==!1){var b=0;T.data(a).text(function(a,c){return"undefined"!=typeof a.y?(b+=a.y,a.y):""}),U.text(b)}}function u(){I=0}function v(){aa=aa.each(function(){if(I++,0===F.length)return void(I>a.consts.maxDealyCount&&y.showExtraInfo===!1&&s(!1,[a.message.NO_RESPONSE]));u();var b=F.shift();t(b),h(),y.showExtraInfo===!1&&w();var c=0;for(c=0;c<G.length;c++)G[c].push(b[c]);for(_(G),k(),x(),c=0;c<G.length;c++)G[c].shift();p(o(D))}).transition().each("start",function(){v()})}function w(){for(var a=0,b=0;b<Q.length;b++)a=Math.max(a,parseInt(d3.select(Q[b]).attr("x1")));var c=M(0)*K++,d=a+J;P.transition().attr("transform","translate("+c+")").each("end",function(){for(var a=0;a<Q.length;a++){var b=d3.select(Q[a]),e=parseInt(b.attr("x1"));e<Math.abs(c)&&(b.attr({x1:d,x2:d}),d+=J)}})}function x(){R.data(G).attr("d",S).attr("transform",null).transition().attr("transform","translate("+M(0)+")")}var y={timeoutMaxCount:parseInt(d[a.params.TIMEOUT_MAX_COUNT]),showExtraInfo:"true"===d[a.params.SHOW_EXTRA_INFO],requestLabel:b[d[a.params.REQUEST_LABEL]],requestColor:b[d[a.params.CHART_COLOR]],xAxisCount:parseInt(d[a.params.XCOUNT]),namespace:d[a.params.NAMESPACE],height:parseInt(d[a.params.HEIGHT]),width:parseInt(d[a.params.WIDTH])},z={domain:[1,y.xAxisCount-2],margin:{top:6,left:y.showExtraInfo?38:20,right:80,bottom:6},timeFormat:d3.time.format("%Y.%m.%d %H:%M:%S"),transaction:{duration:1e3,ease:"linear"},yAxisFormat:d3.format("d"),tooltipWidth:50,interpolation:"basis"},A=y.width-(y.showExtraInfo?z.margin.left+z.margin.right:0),B=y.height-z.margin.top-z.margin.bottom,C=0,D=-1,E=-1,F=[],G=[],H=0,I=0,J=parseInt(A/(a.consts.verticalGridCount-1)),K=1;f();var L,M,N,O,P,Q,R,S,T,U,V,W,X,Y,Z,$,_=d3.layout.stack().y(function(a){return a.y}),aa=d3.select({}).transition().duration(z.transaction.duration).ease(z.transaction.ease);_(G),e(),g(),k(),l(),m(),n(),q(),r(),v(),b.$on("realtimeChartDirective.onData."+y.namespace,function(a,b,c,d,e){C=d,H=0,s(!1,["",""]),e===!1&&F.push(b.map(function(a){return{y:parseInt(a),d:c}}))}),b.$on("realtimeChartDirective.onError."+y.namespace,function(b,c,d,e){C=e,c.code===a.responseCode.TIMEOUT?(H<y.timeoutMaxCount?F.push(G[G.length-1]):(u(),s(!0,c.message.split("_"))),H++):(u(),s(c.code!==a.responseCode.ERROR_BLACK,c.message.split("_")))}),b.$on("realtimeChartDirective.clear."+y.namespace,function(a,b,d){F.length=0,f(),c.html(""),t([{},{},{},{}]),_(G),e(),g(),k(),l(),m(),n(),q(),r()})}}}])}(),function(){"use strict";pinpointApp.constant("scatterDirectiveConfig",{scatterDataUrl:"/getScatterData.pinpoint",template:"<div id='scatter'></div>",images:{config:"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAA4AAAAOCAYAAAAfSC3RAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAyJpVFh0WE1MOmNvbS5hZG9iZS54bXAAAAAAADw/eHBhY2tldCBiZWdpbj0i77u/IiBpZD0iVzVNME1wQ2VoaUh6cmVTek5UY3prYzlkIj8+IDx4OnhtcG1ldGEgeG1sbnM6eD0iYWRvYmU6bnM6bWV0YS8iIHg6eG1wdGs9IkFkb2JlIFhNUCBDb3JlIDUuMC1jMDYxIDY0LjE0MDk0OSwgMjAxMC8xMi8wNy0xMDo1NzowMSAgICAgICAgIj4gPHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4gPHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9IiIgeG1sbnM6eG1wPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvIiB4bWxuczp4bXBNTT0iaHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wL21tLyIgeG1sbnM6c3RSZWY9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC9zVHlwZS9SZXNvdXJjZVJlZiMiIHhtcDpDcmVhdG9yVG9vbD0iQWRvYmUgUGhvdG9zaG9wIENTNS4xIFdpbmRvd3MiIHhtcE1NOkluc3RhbmNlSUQ9InhtcC5paWQ6QTI2NzMzRDI2QTlGMTFFM0E1RENBRjZGODkwRDBCMEIiIHhtcE1NOkRvY3VtZW50SUQ9InhtcC5kaWQ6QTI2NzMzRDM2QTlGMTFFM0E1RENBRjZGODkwRDBCMEIiPiA8eG1wTU06RGVyaXZlZEZyb20gc3RSZWY6aW5zdGFuY2VJRD0ieG1wLmlpZDpBMjY3MzNEMDZBOUYxMUUzQTVEQ0FGNkY4OTBEMEIwQiIgc3RSZWY6ZG9jdW1lbnRJRD0ieG1wLmRpZDpBMjY3MzNEMTZBOUYxMUUzQTVEQ0FGNkY4OTBEMEIwQiIvPiA8L3JkZjpEZXNjcmlwdGlvbj4gPC9yZGY6UkRGPiA8L3g6eG1wbWV0YT4gPD94cGFja2V0IGVuZD0iciI/Pn/ejkcAAAFwSURBVHjanFKxSsNQFE2CFEtaTNsMCbSQ4lA6GSghi2igo/o/GVz6AfmD/EFGJ7cKDiVLO0hFEByzpG1EA5LUxntC80jFKhi4LzfnnnPfzXmPz7KM+9ezT6iq6gnF+T4Nny88/110VK1WbwRB4OM4vgyC4PVXIQlW9JqRwGo2mzm2XC65zWYzplSnBo1CeFDu1Ov1XhaLhVWpVBimKAqXJInVarWmJGQ42xHjybIcQSSK4rNt29cgOI5jR1Gkk5gLw1DC2LkvWGBCu90eDwaDDOF53hXhhwjf908LHByYBo2ArqZpfnY6HbEYw3XdJ5riAzEajR4LHJxut6syhyhq5c6apt1hdER5EnCIKzFzqPM7fUwkSZrhf+r1+lmaphFqjUZuJIeaYRgT4q7ZqFvxmn7+GAQYBDcRyIGhBs6PN4dyjKLP5/OL4XA4RSAHhlpZs3OO1PF+W3ggwxQ6u7d+v3+7s9Nfd3VrQm3fXf0SYADyptv3yy4A0QAAAABJRU5ErkJggg==",download:"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAA4AAAAOCAYAAAAfSC3RAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAyJpVFh0WE1MOmNvbS5hZG9iZS54bXAAAAAAADw/eHBhY2tldCBiZWdpbj0i77u/IiBpZD0iVzVNME1wQ2VoaUh6cmVTek5UY3prYzlkIj8+IDx4OnhtcG1ldGEgeG1sbnM6eD0iYWRvYmU6bnM6bWV0YS8iIHg6eG1wdGs9IkFkb2JlIFhNUCBDb3JlIDUuMC1jMDYxIDY0LjE0MDk0OSwgMjAxMC8xMi8wNy0xMDo1NzowMSAgICAgICAgIj4gPHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4gPHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9IiIgeG1sbnM6eG1wPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvIiB4bWxuczp4bXBNTT0iaHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wL21tLyIgeG1sbnM6c3RSZWY9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC9zVHlwZS9SZXNvdXJjZVJlZiMiIHhtcDpDcmVhdG9yVG9vbD0iQWRvYmUgUGhvdG9zaG9wIENTNS4xIFdpbmRvd3MiIHhtcE1NOkluc3RhbmNlSUQ9InhtcC5paWQ6RUU3MjQyMUQ2QTlGMTFFM0IxRTY4MjI3MUU5MUUyMzMiIHhtcE1NOkRvY3VtZW50SUQ9InhtcC5kaWQ6RUU3MjQyMUU2QTlGMTFFM0IxRTY4MjI3MUU5MUUyMzMiPiA8eG1wTU06RGVyaXZlZEZyb20gc3RSZWY6aW5zdGFuY2VJRD0ieG1wLmlpZDpFRTcyNDIxQjZBOUYxMUUzQjFFNjgyMjcxRTkxRTIzMyIgc3RSZWY6ZG9jdW1lbnRJRD0ieG1wLmRpZDpFRTcyNDIxQzZBOUYxMUUzQjFFNjgyMjcxRTkxRTIzMyIvPiA8L3JkZjpEZXNjcmlwdGlvbj4gPC9yZGY6UkRGPiA8L3g6eG1wbWV0YT4gPD94cGFja2V0IGVuZD0iciI/PlnySxAAAADPSURBVHjaxFLBDYMwDLQjMgEf4M8E4Y0YgzHaTTpGFuDfARAdoH3DiwmQSLmqRIZSpPLpSTH4krOjc9g5R0cQIDDzgozj2Ffrum6xOTcKtqolSUJCuNlR0UH8WTiZcpPGzEaB3xVWVXVO0/QhOeTgP1rKOU7/QdM0RZ7nd2OMwxc5eHkeixEm+71aKUVhGJLWmoZhoL7vaRxHX7xtW/ZzlHOTgDiKIirL8oTcWnuZ914dsyzbfXfoCuAmdV3z15ezBgRr8Nuc4ocRXhGeAgwAFHJVgfQ6KdUAAAAASUVORK5CYII=",fullscreen:"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAA4AAAAOCAYAAAAfSC3RAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAyJpVFh0WE1MOmNvbS5hZG9iZS54bXAAAAAAADw/eHBhY2tldCBiZWdpbj0i77u/IiBpZD0iVzVNME1wQ2VoaUh6cmVTek5UY3prYzlkIj8+IDx4OnhtcG1ldGEgeG1sbnM6eD0iYWRvYmU6bnM6bWV0YS8iIHg6eG1wdGs9IkFkb2JlIFhNUCBDb3JlIDUuMC1jMDYxIDY0LjE0MDk0OSwgMjAxMC8xMi8wNy0xMDo1NzowMSAgICAgICAgIj4gPHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4gPHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9IiIgeG1sbnM6eG1wPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvIiB4bWxuczp4bXBNTT0iaHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wL21tLyIgeG1sbnM6c3RSZWY9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC9zVHlwZS9SZXNvdXJjZVJlZiMiIHhtcDpDcmVhdG9yVG9vbD0iQWRvYmUgUGhvdG9zaG9wIENTNS4xIFdpbmRvd3MiIHhtcE1NOkluc3RhbmNlSUQ9InhtcC5paWQ6QkMwOTc3QTg2QTlGMTFFMzk5MzhBOTM4OEFCMzg3MTciIHhtcE1NOkRvY3VtZW50SUQ9InhtcC5kaWQ6QkMwOTc3QTk2QTlGMTFFMzk5MzhBOTM4OEFCMzg3MTciPiA8eG1wTU06RGVyaXZlZEZyb20gc3RSZWY6aW5zdGFuY2VJRD0ieG1wLmlpZDpCQzA5NzdBNjZBOUYxMUUzOTkzOEE5Mzg4QUIzODcxNyIgc3RSZWY6ZG9jdW1lbnRJRD0ieG1wLmRpZDpCQzA5NzdBNzZBOUYxMUUzOTkzOEE5Mzg4QUIzODcxNyIvPiA8L3JkZjpEZXNjcmlwdGlvbj4gPC9yZGY6UkRGPiA8L3g6eG1wbWV0YT4gPD94cGFja2V0IGVuZD0iciI/PvZjiPIAAAE/SURBVHjanFKxaoRAEHXhbM4UopViGWzv0C5VUvsJqdP7A/6Dfb7DMmCVSuRCunClaKUYSKUQM2+zY1SsMjDuuDtv9u3ME9M0af+xAz6u687opmnENslxnFX1uq6FhhvDMKRluofvVeczuMr9vREmhMhRmRy/F3IukhOjM7Mh4B9VNkqQq67rR9u25VnbtsdxHPkZ6zcWRfFAN2qGYVzx3/e9X1XVC2LLsnzTNK8MQK5kCL4AwcqylPTiOH4m8C1igNI0fUIcBEGu3rwGRlFkK3qvRM9XtD+I9h3iLMvaXaDneRdF78T0cHPXdW+Iif6ZgavmALClB9q0nBRw3RyMAa0mWnJzGIbvJEneOeb9hRgEK0e2mjtG9kX+qeJH8hs163lkh2UlVWAruYIlp8ShzeNYqEQqaE9ym638R4ABADZiqF446UJLAAAAAElFTkSuQmCC",error:"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADAAAAAwCAYAAABXAvmHAAAABmJLR0QA/wD/AP+gvaeTAAADvUlEQVRoge2ZQW8bRRTHf2+2tMTrkCYFkQ9QqBBKC03aOOkldqtKUcUFqeUbkAN3pFSIGxQOFRx6IN8AJeIYtVIc1xdqHAdCE3EgKeeqNAlV8dIm8u7j0FasHW+8u7E3Rcrv9mbe7P7/mtHsmx044IA9Ie14iI4fP1LdPjYKZEEHBE4g9KOkAUVwUO4rrIIuI9bt9CvrJbl5b2uv796TASc7POgJEyJ6BeiJOPyRotNGrSm78NMvcTXEMuDkMqfVeNdQLsZ9cQO3MDqZnqv8GnVgJAM6MtLlpLyvQD8BrKgva0EN0Rt2zZ6UYvFp2EGhDTzODp0wRmZABuLpC81d18jlnrnyWpjkUAac82fOqMoswht70xaaTU/1g9cKlTutElsa+CebGVbj5RXS7dEWDoGqqHc+VVhcaJEXzLNlY34EjrVVXXjWXSOjuy0nE9ShY2OvGmO+Z//EA7xued4POjLSFZQQaMCxnnwNvNcRWZGQAafL/SKwt1mjk8ucVvEWaP9WGRcXo0PNvhNNZ0CNd42XRzyAhUrTWdgxA052eFCNLnZeU2RUxBu084tL/sYdM+AJE8lpioR4mI93NPoDHT9+xNnue0D0wiwZhL/sXqdfZn7bftFUNwPVrb5zvKziAZTe6kYq42+qX0IiY0nqiYWRXF1Y1yneqUTFxEE56Q8P+QNReSvsc+x8uV2SAHAuDIfKE3jbHzfuQm+2S1AHqdPYaCDRijMm3f4gsBb6v9BooLovKqKgPPaHjQYeJCglHsKf/rBuF1JYFXgnzHPC7hrtRlR+98cNM6B3kxQTD132Rw0GTDFBJbFwhYI/rjOQPrxxB3iUqKJobHb3OXVf0DoDcvPelqrMJKspPCpM+ytRaPIdMMh3yUmKhIroVGPjDgPPf7TeSkRSNGZDn4kR61Og1mlFEajh8VmzjqYG0vnSCqI3OqspAqLfpm8vNN3iA2shu2ZPAktB/QmybDuHPg/qDDQgxeJT18hHwHpHZIVBeVhT90MplZ4EpexajfbMldeMepdkH4o8gaqIXjpa+PmP3fJaltPP/g5rDuVh++S1ZFPgoj1fqbRKDHUesOcrFdeSc0AStdJSTd2zqfmFUpjk0Aeanrnymu2mMijf0JkttgZy3XZTo62WjZ9Yl3zV7NlTWHyJMh73GT4UmEWsq+l8aSXq4L1ds14Yet9TMyHCFZTeiMM3VZgW0ak4t5MvaM9F9+V3D1c3UhmM5FBOPv/10c9/B/C/Ue4LsuoZXVFlvrvPKTcWZgccsA/8C1bvKjPyfF8QAAAAAElFTkSuQmCC"},options:{containerId:"",containerClass:"bigscatterchart",width:400,height:250,minX:0,maxX:1e3,minY:0,maxY:1e4,minZ:0,maxZ:5,bubbleSize:3,labelX:"",labelY:"(ms)",realtime:!1,chartAnimationTime:300,gridAxisStyle:{lineWidth:1,lineDash:[1,0],globalAlpha:1,strokeStyle:"#e3e3e3"},padding:{top:40,left:50,right:40,bottom:30},typeInfo:{0:["Failed","#d62728",20],1:["Success","#2ca02c",10]},propertyIndex:{x:0,y:1,meta:2,transactionId:3,type:4,groupCount:5},checkBoxImage:{checked:"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAA4AAAAOCAYAAAAfSC3RAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAyJpVFh0WE1MOmNvbS5hZG9iZS54bXAAAAAAADw/eHBhY2tldCBiZWdpbj0i77u/IiBpZD0iVzVNME1wQ2VoaUh6cmVTek5UY3prYzlkIj8+IDx4OnhtcG1ldGEgeG1sbnM6eD0iYWRvYmU6bnM6bWV0YS8iIHg6eG1wdGs9IkFkb2JlIFhNUCBDb3JlIDUuMC1jMDYxIDY0LjE0MDk0OSwgMjAxMC8xMi8wNy0xMDo1NzowMSAgICAgICAgIj4gPHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4gPHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9IiIgeG1sbnM6eG1wPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvIiB4bWxuczp4bXBNTT0iaHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wL21tLyIgeG1sbnM6c3RSZWY9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC9zVHlwZS9SZXNvdXJjZVJlZiMiIHhtcDpDcmVhdG9yVG9vbD0iQWRvYmUgUGhvdG9zaG9wIENTNS4xIFdpbmRvd3MiIHhtcE1NOkluc3RhbmNlSUQ9InhtcC5paWQ6ODk0MjRENUI2Qjk2MTFFM0E3NkNCRkIyQTkxMjZFQjMiIHhtcE1NOkRvY3VtZW50SUQ9InhtcC5kaWQ6ODk0MjRENUM2Qjk2MTFFM0E3NkNCRkIyQTkxMjZFQjMiPiA8eG1wTU06RGVyaXZlZEZyb20gc3RSZWY6aW5zdGFuY2VJRD0ieG1wLmlpZDo4OTQyNEQ1OTZCOTYxMUUzQTc2Q0JGQjJBOTEyNkVCMyIgc3RSZWY6ZG9jdW1lbnRJRD0ieG1wLmRpZDo4OTQyNEQ1QTZCOTYxMUUzQTc2Q0JGQjJBOTEyNkVCMyIvPiA8L3JkZjpEZXNjcmlwdGlvbj4gPC9yZGY6UkRGPiA8L3g6eG1wbWV0YT4gPD94cGFja2V0IGVuZD0iciI/PkJ02akAAAEfSURBVHjalJI/aoRQEMbnrU8RRVYsBAXJASxEtLGRXEDIJXKTpPcOHiDl1gsp1QvEIiD2AbGLmpm3f7Is68YMjOOb9/0Y/Rg2zzPEcTzDP6IsS7Y5QXTAsibFIH4BQVVVi1Mcx9liyTFN13W/+JpPI0iSpL1hGEHf9+E0TSBAxtifkGVZgSzLgBkMwwCbNZNOEIVpmo3v+78gigLMt+O/3IR0XW/yPH/uuu4AEqQoComeSIznhyUoDMP3cRwPoKZpLyjaqqoKJOacfy5B6Mc39QRYFMUrOtbQO4lt24Z70BlMkqSkSxJdmrMEiYiiSGwOrh6v6/oxTdMP6lGlM/Wv3RYMPY55hrMs292DKNn1kqOb4HketG0L5N5CsB8BBgCZjoUNsxfiYwAAAABJRU5ErkJggg==", unchecked:"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAA4AAAAOCAYAAAAfSC3RAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAyJpVFh0WE1MOmNvbS5hZG9iZS54bXAAAAAAADw/eHBhY2tldCBiZWdpbj0i77u/IiBpZD0iVzVNME1wQ2VoaUh6cmVTek5UY3prYzlkIj8+IDx4OnhtcG1ldGEgeG1sbnM6eD0iYWRvYmU6bnM6bWV0YS8iIHg6eG1wdGs9IkFkb2JlIFhNUCBDb3JlIDUuMC1jMDYxIDY0LjE0MDk0OSwgMjAxMC8xMi8wNy0xMDo1NzowMSAgICAgICAgIj4gPHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4gPHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9IiIgeG1sbnM6eG1wPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvIiB4bWxuczp4bXBNTT0iaHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wL21tLyIgeG1sbnM6c3RSZWY9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC9zVHlwZS9SZXNvdXJjZVJlZiMiIHhtcDpDcmVhdG9yVG9vbD0iQWRvYmUgUGhvdG9zaG9wIENTNS4xIFdpbmRvd3MiIHhtcE1NOkluc3RhbmNlSUQ9InhtcC5paWQ6OEQxQ0ZERUQ2Qjk2MTFFMzg5MjNGMjAzRjdCQ0FEMjkiIHhtcE1NOkRvY3VtZW50SUQ9InhtcC5kaWQ6OEQxQ0ZERUU2Qjk2MTFFMzg5MjNGMjAzRjdCQ0FEMjkiPiA8eG1wTU06RGVyaXZlZEZyb20gc3RSZWY6aW5zdGFuY2VJRD0ieG1wLmlpZDo4RDFDRkRFQjZCOTYxMUUzODkyM0YyMDNGN0JDQUQyOSIgc3RSZWY6ZG9jdW1lbnRJRD0ieG1wLmRpZDo4RDFDRkRFQzZCOTYxMUUzODkyM0YyMDNGN0JDQUQyOSIvPiA8L3JkZjpEZXNjcmlwdGlvbj4gPC9yZGY6UkRGPiA8L3g6eG1wbWV0YT4gPD94cGFja2V0IGVuZD0iciI/Pq1+Js8AAABTSURBVHjaYvz//z+DiYnJfwYSwJkzZxgZjY2NYZoYidQHVs9Eoia4WhY0JxDUBfQWA7KNRGlCVsfEQCYY1UhLjf9hEUtEAgAnOUZyEjlIH0CAAQDn/BlKI9rDJAAAAABJRU5ErkJggg=="}}}),pinpointApp.directive("scatterDirective",["scatterDirectiveConfig","$rootScope","$compile","$timeout","webStorage","$window","$http","CommonAjaxService","TooltipService","AnalyticsService","PreferenceService",function(a,b,c,d,e,f,g,h,i,j,k){return{template:a.template,restrict:"EA",replace:!0,link:function(c,g,l){function m(c,g,h,l,m){var n=v.getQueryStartTime(),o=v.getQueryEndTime(),p=v.getFilter(),q=g.split("^")[0],r={};angular.copy(a.options,r),r.sPrefix="BigScatterChart2-"+parseInt(1e5*Math.random()),r.containerId=c,r.width=h?h:400,r.height=l?l:250,r.minX=n,r.maxX=o,r.errorImage=a.images.error,r.realtime=u();var s=new BigScatterChart2(r,t(m),[new BigScatterChart2.SettingPlugin(a.images.config).addCallback(function(a,b){e.add("scatter-y-min",b.min),e.add("scatter-y-max",b.max),a.changeRangeOfY(b),a.redraw()}),new BigScatterChart2.DownloadPlugin(a.images.download,"PNG").addCallback(function(a){j.send(j.CONST.MAIN,j.CONST.CLK_DOWNLOAD_SCATTER)}),new BigScatterChart2.WideOpenPlugin(a.images.fullscreen).addCallback(function(){var a=v.isRealtime()?"realtime/"+v.getQueryEndDateTime():v.getPartialURL(!1,!0);f.open("#/scatterFullScreenMode/"+x.applicationName+"@"+x.serviceType+"/"+a+"/"+t().join(","),"width=900, height=700, resizable=yes")}),new BigScatterChart2.HelpPlugin(i)],{sendAnalytics:function(a,b){j.send(j.CONST.MAIN,j.CONST["Success"===a?"TG_SCATTER_SUCCESS":"TG_SCATTER_FAILED"],j.CONST[b?"ON":"OFF"])},loadFromStorage:function(a){return e.get(a)},onSelect:function(a,b){if(3===arguments.length)f.open("#/transactionList/"+v.getPartialURL(!0,!1),g+"|"+arguments[0]+"|"+arguments[1]+"|"+arguments[2]);else{var c=g+"|"+b.fromX+"|"+b.toX+"|"+b.fromY+"|"+b.toY;f.open("#/transactionList/"+v.getPartialURL(!0,!1),c)}},onError:function(){}},k.getAgentAllStr());return d(function(){angular.isUndefined(m)?s.drawWithDataSource(new BigScatterChart2.DataLoadManager(q,p,{url:a.scatterDataUrl,realtime:u(),realtimeInterval:2e3,realtimeDefaultTimeGap:3e3,realtimeResetTimeGap:2e4,fetchLimit:5e3,fetchingInterval:2e3,useIntervalForFetching:!1},function(a,c,d){v.setQueryEndDateTime(c),b.$broadcast("responseTimeChartDirective.loadRealtime",q,s.getCurrentAgent(),a.min,a.max)})):s.addBubbleAndMoveAndDraw(s.createDataBlock(m)),f.htoScatter[g]=s},100),s}function n(a,b,c){g.children().hide(),r(),angular.isDefined(w[a])?(w[a].target.show(),u()?h.getServerTime(function(b){w[a].scatter.resume(b-k.getRealtimeScatterXRange(),b),w[a].scatter.selectAgent(k.getAgentAllStr(),!0)}):(w[a].scatter.resume(),w[a].scatter.selectAgent(k.getAgentAllStr(),!0))):p(a,b,c)}function o(a,b,c,d){angular.isDefined(w[a])?w[a].scatter.addBubbleAndMoveAndDraw(w[a].scatter.createDataBlocK(d)):p(a,b,c,d).hide()}function p(b,c,d,e){var f=angular.element(a.template),h=m(f,b,c,d,e);return w[b]={target:f,scatter:h},g.append(f),f}function q(a){g.children().hide(),angular.isDefined(w[a])&&(w[a].target.show(),w[a].scatter.selectAgent(k.getAgentAllStr(),!0))}function r(){angular.forEach(w,function(a,b){a.scatter.pause()})}function s(){for(var a in w)w[a].scatter.abort();w={}}function t(a){var b,c=[];if("undefined"!=typeof a)return $.each(a.scatter.metadata,function(a,b){c.push(b[0])}),c;if(x.agentList)return x.agentList;if(x.serverList)for(b in x.serverList){var d=x.serverList[b].instanceList;for(var e in d)c.push(d[e].name)}return c}function u(){return"realtime"===v.getPeriodType()}var v=null,w={},x=null;c.$on("scatterDirective.initialize",function(a,b){v=b,s(),g.empty()}),c.$on("scatterDirective.initializeWithNode",function(a,b,d,e){c.currentAgent=k.getAgentAllStr(),x=b,n(b.key,d,e)}),c.$on("scatterDirective.initializeWithData",function(a,b,d){c.currentAgent=k.getAgentAllStr();var e=b.split("^");x={applicationName:e[0],serviceType:e[1],key:b},o(b,null,null,d)}),c.$on("scatterDirective.showByNode",function(a,b){x=b,q(b.key)}),c.$on("responseTimeChartDirective.showErrorTransacitonList",function(b,c){f.htoScatter[x.key].selectType("Failed").fireDragEvent({animate:function(){},css:function(b){return"left"===b?a.options.padding.left+"px":"top"===b?a.options.padding.top+"px":"0px"},width:function(){return a.options.width-a.options.padding.left-a.options.padding.right},height:function(){return a.options.height-a.options.padding.top-a.options.padding.bottom}})}),c.$on("changedCurrentAgent",function(a,b){w[x.key].scatter.selectAgent(b)})}}}])}(),function(){"use strict";pinpointApp.constant("nodeInfoDetailsDirectiveConfig",{maxTimeToShowLoadAsDefaultForUnknown:43200}),pinpointApp.directive("nodeInfoDetailsDirective",["nodeInfoDetailsDirectiveConfig","$filter","$timeout","isVisibleService","$window","AnalyticsService","PreferenceService","TooltipService","CommonAjaxService",function(a,b,c,d,e,f,g,h,i){return{restrict:"EA",replace:!0,templateUrl:"features/nodeInfoDetails/nodeInfoDetails.html?v="+G_BUILD_TIME,scope:{},link:function(j,k){function l(a){var b=g.getResponseTypeFormat();return $.each(a,function(a,c){$.each(c,function(a,c){b[a]+=c})}),b}function m(a){var b=[];return $.each(a,function(a,c){for(var d=0;d<c.length;d++){var e=c[d];b.length<d+1&&(b[d]={key:e.key,values:[]});for(var f=0;f<e.values.length;f++)b[d].values.length<f+1&&(b[d].values[f]=[e.values[f][0],0]),b[d].values[f][1]+=e.values[f][1]}}),b}var n,o,p,q,r,s,t,u,v,w,x,y,z,A,B,C,D,E,F=!1;t=!1,j.htLastUnknownNode=!1,angular.element(e).bind("resize",function(a){t&&/_GROUP$/.test(o.category)&&y(o)}),k.find(".unknown-list").bind("scroll",function(a){y(o)}),w=function(){p={},q={},s={},r=!1,j.currentAgent=g.getAgentAllStr(),j.showNodeInfoDetails=!1,j.node=!1,j.unknownNodeGroup=null,j.hosts=null,j.showNodeServers=!1,j.agents=null,j.showAgents=!1,j.showNodeResponseSummaryForUnknown=!1,j.showNodeResponseSummary=!1,j.showNodeLoad=!1,j.agentHistogram=!1,j.nodeOrderBy="totalCount",j.nodeOrderByNameClass="",j.nodeOrderByCountClass="glyphicon-sort-by-order-alt",j.nodeOrderByDesc=!0,"$apply"!=j.$$phase&&"$digest"!=j.$$phase&&"$apply"!=j.$root.$$phase&&"$digest"!=j.$root.$$phase&&j.$digest()},x=function(b){j.showNodeInfoDetails=!0,j.node=b,j.unknownNodeGroup=b.unknownNodeGroup,j.serverList=b.serverList,j.showNodeServers=!_.isEmpty(j.serverList),j.agentHistogram=b.agentHistogram,j.serverCount=0,j.errorServerCount=0;for(var d in j.serverList){var e=j.serverList[d].instanceList;for(var f in e)j.serverCount++,j.agentHistogram[e[f].name]&&j.agentHistogram[e[f].name].Error>0&&j.errorServerCount++}/_GROUP$/.test(b.serviceType)===!1?(j.showNodeResponseSummary=!0,j.showNodeLoad=!0,B("forNode",b.applicationName,b.histogram,"100%","150px"),D("forNode",b.applicationName,b.timeSeriesHistogram,"100%","220px",!0)):/_GROUP$/.test(b.serviceType)&&(j.showNodeResponseSummaryForUnknown=!(j.oNavbarVoService.getPeriod()<=a.maxTimeToShowLoadAsDefaultForUnknown),y(b),j.htLastUnknownNode=b,c(function(){k.find('[data-toggle="tooltip"]').tooltip("destroy").tooltip()})),"$apply"!=j.$$phase&&"$digest"!=j.$$phase&&"$apply"!=j.$root.$$phase&&"$digest"!=j.$root.$$phase&&j.$digest()},y=function(a){c(function(){angular.forEach(a.unknownNodeGroup,function(a){var c=a.applicationName,e=b("applicationNameToClassName")(c);if(!angular.isDefined(p[c])&&!angular.isDefined(q[c])){var f=".nodeInfoDetails .summaryCharts_"+e,g=angular.element(f),h=d(g.get(0),1);h&&(j.showNodeResponseSummaryForUnknown?(p[c]=!0,B(null,c,a.histogram,"360px","180px")):(q[c]=!0,D(null,c,a.timeSeriesHistogram,"360px","200px",!0)))}})})},B=function(a,c,d,e,f){var g=b("applicationNameToClassName")(c);a=a||"forNode_"+g,j.$broadcast("responseTimeChartDirective.initAndRenderWithData."+a,d,e,f,!1,!0)},C=function(a,c,d,e,f){var g=b("applicationNameToClassName")(c);a=a||"forNode_"+g,j.$broadcast("responseTimeChartDirective.updateData."+a,d)},D=function(a,c,d,e,f,g){var h=b("applicationNameToClassName")(c);a=a||"forNode_"+h,j.$broadcast("loadChartDirective.initAndRenderWithData."+a,d,e,f,g)},E=function(a,c,d,e,f,g){var h=b("applicationNameToClassName")(c);a=a||"forNode_"+h,j.$broadcast("loadChartDirective.updateData."+a,d)},v=function(a){for(var b=null,c=0;c<o.unknownNodeGroup.length;c++)if(o.unknownNodeGroup[c].key===a){b=o.unknownNodeGroup[c];break}return b},z=function(){t=!1,k.hide()},A=function(){t=!0,k.show()},j.showNodeDetailInformation=function(a){o=v(a),x(o),j.$emit("nodeInfoDetail.showDetailInformationClicked",r,o)},j.goBackToUnknownNode=function(){o=j.htLastUnknownNode,p={},q={},x(o),j.$emit("nodeInfoDetail.showDetailInformationClicked",r,o)},j.renderNodeResponseSummary=function(a,b){angular.isUndefined(p[a])&&(p[a]=!0,B(null,a,v(b).histogram,"360px","180px"))},j.renderNodeLoad=function(a,b){angular.isUndefined(q[a])&&(q[a]=!0,D(null,a,v(b).timeSeriesHistogram,"360px","200px",!0))},j.renderNodeAgentCharts=function(a){f.send(f.CONST.MAIN,f.CONST.CLK_SHOW_GRAPH),angular.isDefined(s[a])||(s[a]=!0,B(null,a,o.agentHistogram[a],"100%","150px"),D(null,a,o.agentTimeSeriesHistogram[a],"100%","200px",!0))},j.nodeSearchChange=function(){y(o)},j.nodeOrderByName=function(){"applicationName"===j.nodeOrderBy?(j.nodeOrderByDesc=!j.nodeOrderByDesc,"glyphicon-sort-by-alphabet-alt"===j.nodeOrderByNameClass?j.nodeOrderByNameClass="glyphicon-sort-by-alphabet":j.nodeOrderByNameClass="glyphicon-sort-by-alphabet-alt"):(j.nodeOrderByNameClass="glyphicon-sort-by-alphabet-alt",j.nodeOrderByCountClass="",j.nodeOrderByDesc=!0,j.nodeOrderBy="applicationName"),y(o)},j.nodeOrderByCount=function(){"totalCount"===j.nodeOrderBy?(j.nodeOrderByDesc=!j.nodeOrderByDesc,"glyphicon-sort-by-order-alt"===j.nodeOrderByCountClass?j.nodeOrderByCountClass="glyphicon-sort-by-order":j.nodeOrderByCountClass="glyphicon-sort-by-order-alt"):(j.nodeOrderByCountClass="glyphicon-sort-by-order-alt",j.nodeOrderByNameClass="",j.nodeOrderByDesc=!0,j.nodeOrderBy="totalCount"),y(o)},j.showUnknownNodeBy=function(a,b){return a?b.applicationName.indexOf(a)>-1||b.totalCount.toString().indexOf(a)>-1:!0},j.showServerList=function(){f.send(f.CONST.MAIN,f.CONST.CLK_SHOW_SERVER_LIST),j.$emit("serverListDirective.show",!0,o,j.oNavbarVoService)},j.$on("nodeInfoDetailsDirective.initialize",function(a,b,c,d,e,f,g,h){A(),w(),r=c,u=d.key,o=d,j.htLastUnknownNode=!1,j.oNavbarVoService=f,j.nodeSearch=h||"",n=e,x(d)}),j.$on("nodeInfoDetailsDirective.hide",function(a){z()}),j.$on("nodeInfoDetailsDirective.lazyRendering",function(a,b){y(o)}),j.$on("responseTimeChartDirective.itemClicked.forNode",function(a,b){}),j.$on("responseTimeChartDirective.loadRealtime",function(a,b,c,d,e){angular.isUndefined(j.node)||j.node.applicationName!==b||F===!1&&(F=!0,i.getResponseTimeHistogramData({applicationName:j.node.applicationName,serviceTypeName:j.node.category,from:d,to:e},function(a){c===g.getAgentAllStr()?(C("forNode",j.node.applicationName,l(a.summary)),E("forNode",j.node.applicationName,m(a.timeSeries))):(C("forNode",j.node.applicationName,a.summary[c]),E("forNode",j.node.applicationName,a.timeSeries[c])),F=!1},function(){F=!1}))}),j.$on("changedCurrentAgent",function(a,b){var c=null,d=null;b===g.getAgentAllStr()?(c=j.node.histogram,d=j.node.timeSeriesHistogram):(c=j.node.agentHistogram[b],d=j.node.agentTimeSeriesHistogram[b]),B("forNode",j.node.applicationName,c,"100%","150px"),D("forNode",j.node.applicationName,d,"100%","220px",!0)}),h.init("responseSummaryChart"),h.init("loadChart")}}}])}(),function(){"use strict";pinpointApp.constant("linkInfoDetailsDirectiveConfig",{maxTimeToShowLoadAsDefaultForUnknown:43200}),pinpointApp.directive("linkInfoDetailsDirective",["linkInfoDetailsDirectiveConfig","$filter","ServerMapFilterVoService","filteredMapUtilService","$timeout","isVisibleService","ServerMapHintVoService","AnalyticsService","$window",function(a,b,c,d,e,f,g,h,i){return{restrict:"EA",replace:!0,templateUrl:"features/linkInfoDetails/linkInfoDetails.html?v="+G_BUILD_TIME,scope:!0,link:function(j,k,l){var m,n,o,p,q,r,s,t,u,v,w,x,y,z,A,B,C,D;j.linkSearch="",s=!1,t=!1,q=!1,j.htLastUnknownLink=!1,angular.element(i).bind("resize",function(a){q&&n.targetRawData&&z(n)}),k.find(".unknown-list").bind("scroll",function(a){z(n)}),v=function(){m=!1,n=!1,o={},r={},p={},j.linkCategory=null,j.unknownLinkGroup=null,j.showLinkInfoDetails=!1,j.showLinkResponseSummary=!1,j.showLinkLoad=!1,j.showLinkServers=!1,j.linkSearch="",j.linkOrderBy="totalCount",j.linkOrderByNameClass="",j.linkOrderByCountClass="glyphicon-sort-by-order-alt",j.linkOrderByDesc=!0,j.sourceApplicationName="",j.sourceHistogram=!1,j.namespace=null,j.$$phase||j.$digest()},w=function(b){if(j.link=b,b.unknownLinkGroup)j.unknownLinkGroup=b.unknownLinkGroup,j.htLastUnknownLink=b,j.showLinkResponseSummaryForUnknown=!(j.oNavbarVoService.getPeriod()<=a.maxTimeToShowLoadAsDefaultForUnknown),z(b),e(function(){k.find('[data-toggle="tooltip"]').tooltip("destroy").tooltip()});else{j.showLinkResponseSummary=!0,j.showLinkLoad=!0,y("forLink",b.targetInfo.applicationName,b.histogram,"100%","150px"),x("forLink",b.targetInfo.applicationName,b.timeSeriesHistogram,"100%","220px",!0),j.showLinkServers=!_.isEmpty(b.sourceHistogram),j.sourceApplicationName=b.sourceInfo.applicationName,j.sourceHistogram=b.sourceHistogram,j.fromNode=b.fromNode,j.serverCount=0,j.errorServerCount=0;for(var c in j.sourceHistogram)j.serverCount++,j.sourceHistogram[c].Error>0&&j.errorServerCount++}j.showLinkInfoDetails=!0,j.$$phase||j.$digest()},z=function(a){e(function(){angular.forEach(a.unknownLinkGroup,function(a,c){var d=a.targetInfo.applicationName,e=b("applicationNameToClassName")(d);if(!angular.isDefined(o[d])&&!angular.isDefined(p[d])){var g=".linkInfoDetails .summaryCharts_"+e,h=angular.element(g),i=f(h.get(0),1);i&&(j.showLinkResponseSummaryForUnknown?(o[d]=!0,C(null,a,"360px","180px")):(p[d]=!0,x(null,d,a.timeSeriesHistogram,"360px","200px",!0)))}})})},y=function(a,e,f,h,i){var k=b("applicationNameToClassName")(e);a=a||"forLink_"+k,"forLink"===a&&s?j.$broadcast("responseTimeChartDirective.updateData."+a,f):("forLink"===a&&(s=!0),j.$broadcast("responseTimeChartDirective.initAndRenderWithData."+a,f,h,i,!0,!0),j.$on("responseTimeChartDirective.itemClicked."+a,function(a,b){var e=b.responseTime,f=b.count,h=new c;h.setMainApplication(n.filterApplicationName).setMainServiceTypeCode(n.filterApplicationServiceTypeCode),"USER"===n.sourceInfo.serviceType?h.setFromApplication("USER").setFromServiceType("USER"):h.setFromApplication(n.sourceInfo.applicationName).setFromServiceType(n.sourceInfo.serviceType),h.setToApplication(n.targetInfo.applicationName).setToServiceType(n.targetInfo.serviceType),"error"===e.toLowerCase()?h.setIncludeException(!0):"slow"===e.toLowerCase()?h.setResponseFrom(1e3*d.getStartValueForFilterByLabel(e,f)).setIncludeException(!1).setResponseTo("max"):h.setResponseFrom(1e3*d.getStartValueForFilterByLabel(e,f)).setIncludeException(!1).setResponseTo(1e3*parseInt(e,10));var i=new g;n.sourceInfo.isWas&&n.targetInfo.isWas&&i.setHint(n.targetInfo.applicationName,n.filterTargetRpcList),j.$emit("linkInfoDetailsDirective.ResponseSummary.barClicked",h,i)}))},C=function(a,e,f,h){var i=b("applicationNameToClassName")(e.targetInfo.applicationName);a=a||"forLink_"+i,"forLink"===a&&s?j.$broadcast("responseTimeChartDirective.updateData."+a,e.histogram):("forLink"===a&&(s=!0),j.$broadcast("responseTimeChartDirective.initAndRenderWithData."+a,e.histogram,f,h,!0,!0),j.$on("responseTimeChartDirective.itemClicked."+a,function(a,b){var f=b.responseTime,h=b.count,i=new c;i.setMainApplication(e.filterApplicationName).setMainServiceTypeCode(e.filterApplicationServiceTypeCode),"USER"===e.sourceInfo.serviceType?i.setFromApplication("USER").setFromServiceType("USER"):i.setFromApplication(e.sourceInfo.applicationName).setFromServiceType(e.sourceInfo.serviceType),i.setToApplication(e.targetInfo.applicationName).setToServiceType(e.targetInfo.serviceType),"error"===f.toLowerCase()?i.setIncludeException(!0):"slow"===f.toLowerCase()?i.setResponseFrom(1e3*d.getStartValueForFilterByLabel(f,h)).setIncludeException(!1).setResponseTo("max"):i.setResponseFrom(1e3*d.getStartValueForFilterByLabel(f,h)).setIncludeException(!1).setResponseTo(1e3*parseInt(f,10));var k=new g;e.sourceInfo.isWas&&e.targetInfo.isWas&&k.setHint(e.targetInfo.applicationName,e.filterTargetRpcList),j.$emit("linkInfoDetailsDirective.ResponseSummary.barClicked",i,k)}))},x=function(a,c,d,e,f,g){var h=b("applicationNameToClassName")(c);a=a||"forLink_"+h,"forLink"===a&&t?j.$broadcast("loadChartDirective.updateData."+a,d):("forLink"===a&&(t=!0),j.$broadcast("loadChartDirective.initAndRenderWithData."+a,d,e,f,g))},D=function(a){for(var b=null,c=0;c<n.unknownLinkGroup.length;c++)if(n.unknownLinkGroup[c].key===a){b=n.unknownLinkGroup[c];break}return b},j.showLinkDetailInformation=function(a){n=D(a),w(n),j.$emit("linkInfoDetail.showDetailInformationClicked",m,n)},j.goBackToUnknownLink=function(){n=j.htLastUnknownLink,o={},p={},w(n),j.$emit("linkInfoDetail.showDetailInformationClicked",m,n)},j.renderLinkResponseSummary=function(a,b){angular.isUndefined(o[a])&&(o[a]=!0,C(null,D(b),"360px","180px"))},j.renderLinkLoad=function(a,b){angular.isUndefined(p[a])&&(p[a]=!0,x(null,a,D(b).timeSeriesHistogram,"360px","200px",!0))},A=function(){q=!1,k.hide()},B=function(){q=!0,k.show()},j.showServerList=function(){h.send(h.CONST.MAIN,h.CONST.CLK_SHOW_SERVER_LIST),j.$emit("serverListDirective.show",!1,n,j.oNavbarVoService)},j.linkOrderByName=function(){"targetInfo.applicationName"===j.linkOrderBy?(j.linkOrderByDesc=!j.linkOrderByDesc,"glyphicon-sort-by-alphabet-alt"===j.linkOrderByNameClass?j.linkOrderByNameClass="glyphicon-sort-by-alphabet":j.linkOrderByNameClass="glyphicon-sort-by-alphabet-alt"):(j.linkOrderByNameClass="glyphicon-sort-by-alphabet-alt",j.linkOrderByCountClass="",j.linkOrderByDesc=!0,j.linkOrderBy="targetInfo.applicationName"),z(n)},j.linkOrderByCount=function(){"totalCount"===j.linkOrderBy?(j.linkOrderByDesc=!j.linkOrderByDesc,"glyphicon-sort-by-order-alt"===j.linkOrderByCountClass?j.linkOrderByCountClass="glyphicon-sort-by-order":j.linkOrderByCountClass="glyphicon-sort-by-order-alt"):(j.linkOrderByCountClass="glyphicon-sort-by-order-alt",j.linkOrderByNameClass="",j.linkOrderByDesc=!0,j.linkOrderBy="totalCount"),z(n)},j.showUnknownLinkBy=function(a,b){return a?b.targetInfo.applicationName.indexOf(a)>-1||b.totalCount.toString().indexOf(a)>-1:!0},j.passingTransactionMapFromLinkInfoDetails=function(a){var b=D(a),d=new c;d.setMainApplication(b.filterApplicationName).setMainServiceTypeCode(b.filterApplicationServiceTypeCode).setFromApplication(b.sourceInfo.applicationName).setFromServiceType(b.sourceInfo.serviceType).setToApplication(b.targetInfo.applicationName).setToServiceType(b.targetInfo.serviceType);var e=new g;b.sourceInfo.isWas&&b.targetInfo.isWas&&e.setHint(b.toNode.applicationName,b.filterTargetRpcList),j.$emit("linkInfoDetailsDirective.openFilteredMap",d,e)},j.openFilterWizard=function(a){var b=D(a);j.$emit("linkInfoDetailsDirective.openFilterWizard",b)},j.renderLinkAgentCharts=function(a){angular.isDefined(r[a])||(r[a]=!0,y(null,a,n.sourceHistogram[a],"100%","150px"),x(null,a,n.sourceTimeSeriesHistogram[a],"100%","200px",!0))},j.linkSearchChange=function(){z(n)},j.$on("linkInfoDetailsDirective.hide",function(a){A()}),j.$on("linkInfoDetailsDirective.initialize",function(a,b,c,d,e,f,g){B(),v(),m=c,u=d.key,n=d,j.htLastUnknownLink=!1,j.oNavbarVoService=f,w(d)}),j.$on("linkInfoDetailsDirective.lazyRendering",function(a,b){z(n)})}}}])}(),function(){"use strict";pinpointApp.constant("agentListConfig",{}),pinpointApp.directive("agentListDirective",["agentListConfig","$rootScope","AgentAjaxService","TooltipService","AnalyticsService",function(a,b,c,d,e){return{restrict:"EA",replace:!0,templateUrl:"features/agentList/agentList.html?v="+G_BUILD_TIME,link:function(a,b,f){d.init("agentList");var g={sign:{100:"ok-sign",200:"minus-sign",201:"minus-sign",300:"remove-sign","-1":"question-sign"},color:{100:"#40E340",200:"#F00",201:"#F00",300:"#AAA","-1":"#AAA"}},h=function(b,d,e,f,g){c.getAgentList({application:b,from:e,to:f},function(b){b.errorCode||b.status||(a.agentGroup=b,a.select(i(g)))})},i=function(b){for(var c in a.agentGroup)for(var d in a.agentGroup[c])if(a.agentGroup[c][d].agentId===b)return a.agentGroup[c][d];return!1};a.select=function(b){e.send(e.CONST.INSPECTOR,e.CONST.CLK_CHANGE_AGENT_INSPECTOR),a.currentAgent=b,a.$emit("agentListDirective.agentChanged",b)},a.getState=function(a){return g.sign[a+""]},a.getStateColor=function(a){return g.color[a+""]},a.$on("agentListDirective.initialize",function(a,b){h(b.getApplicationName(),b.getServiceTypeName(),b.getQueryStartTime(),b.getQueryEndTime(),b.getAgentId())})}}}])}(),function(a){"use strict";pinpointApp.constant("agentInfoConfig",{agentStatUrl:"/getAgentStat.pinpoint"}),pinpointApp.directive("agentInfoDirective",["agentInfoConfig","$timeout","AlertsService","ProgressBarService","AgentDaoService","AgentAjaxService","TooltipService","AnalyticsService","helpContentService",function(b,c,d,e,f,g,h,i,j){return{restrict:"EA",replace:!0,templateUrl:"features/agentInfo/agentInfo.html?v"+G_BUILD_TIME,link:function(b,k,l){b.agentInfoTemplate="features/agentInfo/agentInfoReady.html?v="+G_BUILD_TIME,b.showEventInfo=!1,b.showDetail=!1,b.selectTime=-1;var m,n=null,o=!1,p=new d,q=new e,r=null,s=function(c){null===r&&(r=a("#target-picker"),r.datetimepicker({dateFormat:"yy-mm-dd",timeFormat:"HH:mm:ss",controlType:"select",showButtonPanel:!0,onSelect:function(){},onClose:function(a){var c=moment(a,"YYYY-MM-DD HH:mm:ss").valueOf();b.selectTime!==c&&n.setSelectTime(c)}}),a("#ui-datepicker-div").addClass("inspector-datepicker")),t(c)},t=function(a){r.datetimepicker("setDate",new Date(a))},u=function(c,d){null!==n?n.emptyData():n=new TimeSlider("timeSlider",{width:a("#timeSlider").get(0).getBoundingClientRect().width,height:90,handleSrc:"images/handle.png",timeSeries:d?d:A(c),handleTimeSeries:c,selectTime:c[1],eventData:[]}).addEvent("clickEvent",function(a){z(a[2])}).addEvent("selectTime",function(a){b.selectTime=a,y(a),w(),t(a)}).addEvent("changeSelectionZone",function(a){x(b.agent.agentId,a,v(a[0],a[1]),function(){})}).addEvent("changeSliderTimeSeries",function(a){})},v=function(a,b){return(b-a)/1e3/60},w=function(){n.setDefaultStateLineColor(TimeSlider.EventColor[100==b.agent.status.state.code?"10100":"10200"])},x=function(a,d,e,h){q.startLoading(),q.setLoading(40),g.getAgentStateForChart({agentId:a,from:d[0],to:d[1],sampleRate:f.getSampleRate(e)},function(a){return a.errorCode||a.status?(q.stopLoading(),void p.showError("There is some error.")):(b.agentStat=a,angular.isDefined(a.type)&&a.type&&(b.agent.jvmGcType=a.type),q.setLoading(80),D(a),c(function(){q.setLoading(100),q.stopLoading()},700),void h())})},y=function(a){q.startLoading(),q.setLoading(40),g.getAgentInfo({agentId:b.agent.agentId,timestamp:a},function(a){var c=b.agent.jvmGcType;q.setLoading(80),b.agent=a,b.agent.jvmGcType=c,b.currentServiceInfo=C(a),q.setLoading(100),q.stopLoading()})},z=function(a){g.getEvent({agentId:b.agent.agentId,eventTimestamp:a.eventTimestamp,eventTypeCode:a.eventTypeCode},function(a){a.errorCode||a.status?p.showError("There is some error."):(b.eventInfo=a,b.showEventInfo=!0)})},A=function(a){var b=a[0],c=a[1],d=1728e5,e=c-b;return e>d?[c-d,c]:[c-3*e,c]},B=function(){o===!1&&(h.init("heap"),h.init("permGen"),h.init("cpuUsage"),h.init("tps"),o=!0)},C=function(a){if(a.serverMetaData&&a.serverMetaData.serviceInfos)for(var b=a.serverMetaData.serviceInfos,c=0;c<b.length;++c)if(b[c].serviceLibs.length>0)return b[c]},D=function(a){var c={id:"heap",title:"Heap Usage",span:"span12",line:[{id:"JVM_MEMORY_HEAP_USED",key:"Used",values:[],isFgc:!1},{id:"JVM_MEMORY_HEAP_MAX",key:"Max",values:[],isFgc:!1},{id:"fgc",key:"FGC",values:[],bar:!0,isFgc:!0}]},d={id:"nonheap",title:"PermGen Usage",span:"span12",line:[{id:"JVM_MEMORY_NON_HEAP_USED",key:"Used",values:[],isFgc:!1},{id:"JVM_MEMORY_NON_HEAP_MAX",key:"Max",values:[],isFgc:!1},{id:"fgc",key:"FGC",values:[],bar:!0,isFgc:!0}]},e={id:"cpuLoad",title:"JVM/System Cpu Usage",span:"span12",isAvailable:!1},g={id:"tps",title:"Transactions Per Second",span:"span12",isAvailable:!1};b.memoryGroup=[c,d],b.cpuLoadChart=e,b.tpsChart=g,b.$broadcast("jvmMemoryChartDirective.initAndRenderWithData.forHeap",f.parseMemoryChartDataForAmcharts(c,a),"100%","270px"),b.$broadcast("jvmMemoryChartDirective.initAndRenderWithData.forNonHeap",f.parseMemoryChartDataForAmcharts(d,a),"100%","270px"),b.$broadcast("cpuLoadChartDirective.initAndRenderWithData.forCpuLoad",f.parseCpuLoadChartDataForAmcharts(e,a),"100%","270px"),b.$broadcast("tpsChartDirective.initAndRenderWithData.forTps",f.parseTpsChartDataForAmcharts(g,a),"100%","270px")},E=function(a,b){g.getEventList({agentId:a,from:b[0],to:b[1]},function(a){a.errorCode||a.status?p.showError("There is some error."):n.addEventData(a)})},F=function(a,c){b.cpuLoadChart.isAvailable&&b.$broadcast("cpuLoadChartDirective.showCursorAt.forCpuLoad",c.index)},G=function(a,c){b.tpsChart.isAvailable&&b.$broadcast("tpsChartDirective.showCursorAt.forTps",c.index)};b.toggleHelp=function(){a("._wrongApp").popover({title:"<span class='label label-info'>"+m.getApplicationName()+"</span> <span class='glyphicon glyphicon-resize-horizontal'></span> <span class='label label-info'>"+b.agent.applicationName+"</span>",content:j.inspector.wrongApp.replace(/\{\{application1\}\}/g,m.getApplicationName()).replace(/\{\{application2\}\}/g,b.agent.applicationName).replace(/\{\{agentId\}\}/g,b.agent.agentId),html:!0}).popover("toggle")},b.isSameApplication=function(){return b.agent.applicationName===m.getApplicationName()},b.formatDate=function(a){return moment(a).format("YYYY.MM.DD HH:mm:ss")},b.hideEventInfo=function(){b.showEventInfo=!1},b.zoomInTimeSlider=function(){n.zoomIn()},b.zoomOutTimeSlider=function(){n.zoomOut();var a=n.getSliderTimeSeries();E(b.agent.agentId,a)},b.toggleShowDetail=function(){i.send(i.CONST.INSPECTOR,i.CONST.CLK_SHOW_SERVER_TYPE_DETAIL),b.showDetail=!b.showDetail},b.hasDuplicate=function(a,c){for(var d=b.currentServiceInfo.serviceLibs.length,e=!1,f=0;d>f;f++)if(b.currentServiceInfo.serviceLibs[f]==a&&f!=c){e=!0;break}return e?"color:red":""},b.selectServiceInfo=function(a){a.serviceLibs.length>0&&(b.currentServiceInfo=a)},b.$on("agentInfoDirective.initialize",function(a,d,e){m=d,b.agentInfoTemplate="features/agentInfo/agentInfoMain.html?v="+G_BUILD_TIME,b.agent=e,b.chartGroup=null,b.currentServiceInfo=C(e);var f,g,h=[];null===n?(h[0]=m.getQueryStartTime(),h[1]=m.getQueryEndTime(),g=m.getPeriod()):(h=n.getSelectionTimeSeries(),f=n.getSliderTimeSeries(),g=v(h[0],h[1])),-1===b.selectTime?b.selectTime=m.getQueryEndTime():b.selectTime!==m.getQueryEndTime()&&y(b.selectTime),c(function(){x(e.agentId,h,g,function(){s(b.selectTime),B(),u(h,f),w(),E(e.agentId,f||A(h))}),b.$apply()})}),b.$on("jvmMemoryChartDirective.cursorChanged.forHeap",function(a,c){b.$broadcast("jvmMemoryChart.showCursorAt.forNonHeap",c.index),F(a,c),G(a,c)}),b.$on("jvmMemoryChartDirective.cursorChanged.forNonHeap",function(a,c){b.$broadcast("jvmMemoryChartDirective.showCursorAt.forHeap",c.index),F(a,c),G(a,c)}),b.$on("cpuLoadChartDirective.cursorChanged.forCpuLoad",function(a,c){b.$broadcast("jvmMemoryChartDirective.showCursorAt.forHeap",c.index),b.$broadcast("jvmMemoryChartDirective.showCursorAt.forNonHeap",c.index),G(a,c)}),b.$on("tpsChartDirective.cursorChanged.forTps",function(a,c){b.$broadcast("jvmMemoryChartDirective.showCursorAt.forHeap",c.index),b.$broadcast("jvmMemoryChartDirective.showCursorAt.forNonHeap",c.index),F(a,c)})}}}])}(jQuery),function(){"use strict";pinpointApp.constant("timeSliderDirectiveConfig",{scaleCount:10}),pinpointApp.directive("timeSliderDirective",["timeSliderDirectiveConfig","$timeout","AnalyticsService",function(a,b,c){return{restrict:"EA",replace:!0,templateUrl:"features/timeSlider/timeSlider.html?v="+G_BUILD_TIME,link:function(d,e,f){var g,h,i,j,k,l,m;g=e.find(".timeslider_input"),d.oTimeSliderVoService=null,d.disableMore=!1,d.done=!1,h=function(a){d.oTimeSliderVoService=a,d.oTimeSliderVoService.getReady()!==!1&&(b(function(){g.jslider({from:d.oTimeSliderVoService.getFrom(),to:d.oTimeSliderVoService.getTo(),scale:j(i()),skin:"round_plastic",calculate:function(a){return k(a)},beforeMouseDown:function(a,b){return!1},beforeMouseMove:function(a,b){return!1}}),e.find(".jslider-pointer-from").addClass("jslider-transition"),e.find(".jslider-bg .v").addClass("jslider-transition")},100),m())},m=function(){d.oTimeSliderVoService.getCount()&&d.oTimeSliderVoService.getTotal()&&d.oTimeSliderVoService.getCount()>=d.oTimeSliderVoService.getTotal()&&(d.disableMore=!0)},i=function(){var b=d.oTimeSliderVoService.getFrom(),c=d.oTimeSliderVoService.getTo(),e=c-b,f=e/(a.scaleCount-1),g=[];return _.times(a.scaleCount,function(a){g.push(b+f*a)}),g},j=function(a){var b=[];return _.each(a,function(a){b.push(k(a))}),b},k=function(a){return moment(a).format("HH:mm")},l=function(a,b){g.jslider("value",a,b)},d.more=function(){c.send(c.CONST.CALLSTACK,c.CONST.CLK_MORE),d.$emit("timeSliderDirective.moreClicked",d.oTimeSliderVoService)},d.$on("timeSliderDirective.initialize",function(a,b){h(b),m()}),d.$on("timeSliderDirective.setInnerFromTo",function(a,b){d.oTimeSliderVoService=b,l(b.getInnerFrom(),b.getInnerTo()),m()}),d.$on("timeSliderDirective.enableMore",function(a){d.disableMore=!1}),d.$on("timeSliderDirective.disableMore",function(a){d.disableMore=!0}),d.$on("timeSliderDirective.changeMoreToDone",function(a){d.done=!0}),d.$on("timeSliderDirective.changeDoneToMore",function(a){d.done=!1})}}}])}(),function(){"use strict";pinpointApp.directive("transactionTableDirective",["$window","helpContentTemplate","helpContentService","AnalyticsService",function(a,b,c,d){return{restrict:"EA",replace:!0,templateUrl:"features/transactionTable/transactionTable.html?v="+G_BUILD_TIME,link:function(e,f,g){var h,i,j;e.transactionList=[],e.currentTransaction=null,e.transactionReverse=!1,h=function(){e.transactionList=[]},i=function(a){e.transactionList.length>0?(e.transactionOrderBy="",e.transactionReverse=!1,f.find("table tbody tr:last-child")[0].scrollIntoView(!0)):(e.transactionOrderBy="elapsed",e.transactionReverse=!0),e.transactionList=e.transactionList.concat(a),j(),f.find('[data-toggle="tooltip"]').tooltip("destroy").tooltip()},j=function(){var a=1;angular.forEach(e.transactionList,function(b,c){b.index=a++})},e.traceByApplication=function(a){d.send(d.CONST.CALLSTACK,d.CONST.CLK_TRANSACTION),e.currentTransaction=a,e.$emit("transactionTableDirective.applicationSelected",a)},e.openTransactionView=function(b){a.open("#/transactionView/"+b.agentId+"/"+b.traceId+"/"+b.collectorAcceptTime)},e.transactionOrder=function(a){e.transactionOrderBy===a?e.transactionReverse=!e.transactionReverse:e.transactionReverse=!1,d.send(d.CONST.CALLSTACK,d.CONST.ST_+a.charAt(0).toUpperCase()+a.substring(1),e.transactionReverse?d.CONST.DESCENDING:d.CONST.ASCENDING), e.transactionOrderBy=a},e.$on("transactionTableDirective.appendTransactionList",function(a,b){i(b)}),e.$on("transactionTableDirective.clear",function(a){h()}),e.initTooltipster=function(){jQuery(".neloTooltip").tooltipster({content:function(){return b(c.transactionTable.log)},position:"bottom",trigger:"click",interactive:!0})}}}}])}(),function(a){"use strict";pinpointApp.directive("timelineDirective",["AnalyticsService",function(b){return{restrict:"EA",replace:!0,templateUrl:"features/timeline/timeline.html?v="+G_BUILD_TIME,link:function(c,d,e){var f,g,h,i,j,k=["#66CCFF","#FFCCFF","#66CC00","#FFCC33","#669999","#FF9999","#6666FF","#FF6633","#66FFCC","#006666","#FFFF00","#66CCCC","#FFCCCC","#6699FF","#FF99FF","#669900","#FF9933","#66FFFF","#996600","#66FF00"],l=[],m=new InfiniteCircularScroll({scroller:a(d),wrapper:a(d).find("div"),elementHeight:21,template:['<div class="timeline-bar">','<div class="clickable-bar">','<div class="timeline-bar-frame">',"<span>",'<span class="before" style="position:absolute;text-align:right;white-space:nowrap;left:-70px;display:none;"><span class="glyphicon glyphicon-resize-horizontal" aria-hidden="true"></span> <span class="startTime"></span>ms</span>','<span class="nameType"></span>','<span class="after" style="white-space:nowrap;display:none;">( <span class="glyphicon glyphicon-resize-horizontal" aria-hidden="true"></span> <span class="startTime"></span>ms )</span>',"</span>","</div>","</div>","</div>"].join("")});m.renderFunc(function(a,b,d){var e=o(d);a[b==this._selectedRow?"addClass":"removeClass"]("timeline-bar-selected").find("div.clickable-bar").css({width:p(d)+"px",backgroundColor:n(d[c.key.applicationName]),marginLeft:e+"px"}).find("span.nameType").html(d[c.key.applicationName]+"/"+d[c.key.apiType]+"("+(d[c.key.end]-d[c.key.begin])+"ms)"),e>=68?a.find("span.before").show().end().find("span.after").hide().end().find("span.before .startTime").html(q(d)):a.find("span.before").hide().end().find("span.after").show().end().find("span.after .startTime").html(q(d))}),g=function(){var a=[];return angular.forEach(c.timeline.callStack,function(b){b[c.key.isMethod]&&!b[c.key.excludeFromTimeline]&&""!==b[c.key.service]&&a.push(b)}),a},f=function(b){c.timeline=b,c.key=b.callStackIndex,c.barRatio=1e3/(b.callStack[0][c.key.end]-b.callStack[0][c.key.begin]),c.newCallStacks=g(),m.source(c.newCallStacks).viewAreaHeight(a(d).parentsUntil("div.wrapper").height()-70).selectedRow(-1,angular.noop).reset(),c.maxHeight=m.contentsAreaHeight(),i=0,c.$digest()};var n=function(a){var b=l.indexOf(a);return-1==b&&(l.push(a),b=l.length-1),k[b>=k.length?0:b]},o=function(a){return(a[c.key.begin]-c.timeline.callStackStart)*c.barRatio+.9},p=function(a){return(a[c.key.end]-a[c.key.begin])*c.barRatio+.9},q=function(a){return a[c.key.begin]-c.timeline.callStackStart};a(d).on("click",".clickable-bar",function(){b.send(b.CONST.CALLSTACK,b.CONST.CLK_CALL),c.$emit("transactionDetail.selectDistributedCallFlowRow",c.newCallStacks[parseInt(a(this).parent().attr("data-index"))][6])}),a(d).on("mouseenter",".timeline-bar-frame",function(b){a(this).parent().css({"box-shadow":"6px 6px 2px -2px rgba(0,0,0,0.75)","font-weight":"bold"})}),a(d).on("mouseleave",".timeline-bar-frame",function(b){a(this).parent().css({"box-shadow":"none","font-weight":"normal"})}),c.$on("timelineDirective.initialize",function(a,b){f(b)}),c.$on("timelineDirective.resize",function(b){m.resize(a(d).parentsUntil("div.wrapper").height()-70)}),c.$on("timelineDirective.searchCall",function(a,b,d){var e=h(i,-1,b);-1==e?0===i?c.$emit("transactionDetail.timelineSearchCallResult","No call took longer than {time}ms."):(e=h(0,i,b),-1==e?c.$emit("transactionDetail.timelineSearchCallResult","No call took longer than {time}ms."):j(e,"Loop")):j(e,"")}),h=function(a,b,d){return m.searchRow(a,b,function(a){return a[c.key.end]-a[c.key.begin]>=d})},j=function(a,b){m.selectedRow(a,function(a){this.$wrapper.find("div[data-index="+this._selectedRow+"]").removeClass("timeline-bar-selected"),this.$wrapper.find("div[data-index="+a+"]").addClass("timeline-bar-selected")}).moveByRow(a),i=a+1,c.$emit("transactionDetail.timelineSearchCallResult",b)}}}}])}(jQuery),function(){"use strict";pinpointApp.constant("agentChartGroupConfig",{POINTS_TIMESTAMP:0,POINTS_MIN:1,POINTS_MAX:2,POINTS_AVG:3}),pinpointApp.directive("agentChartGroupDirective",["agentChartGroupConfig","$timeout","AgentDaoService","AnalyticsService",function(a,b,c,d){return{restrict:"EA",replace:!0,templateUrl:"features/agentChartGroup/agentChartGroup.html?v="+G_BUILD_TIME,scope:{namespace:"@"},link:function(a,b,e){var f,g,h,i,j,k,l,m;a.showChartGroup=!1,h=function(e){f={Heap:!1,PermGen:!1,CpuLoad:!1},g=null,a.showChartGroup=!0,a.$digest(),c.getAgentStat(e,function(a,b){return a?void console.log("error",a):(f.Heap===!1&&i(b),void(g=b))}),b.tabs({activate:function(b,c){var e=c.newTab.text();return"Heap"==e?(d.send(d.CONST.MIXEDVIEW,d.CONST.CLK_HEAP),void(f.Heap===!1?i(g):a.$broadcast("jvmMemoryChartDirective.resize.forHeap_"+a.namespace))):"PermGen"==e?(d.send(d.CONST.MIXEDVIEW,d.CONST.CLK_PERM_GEN),void(f.PermGen===!1?j(g):a.$broadcast("jvmMemoryChartDirective.resize.forNonHeap_"+a.namespace))):"CpuLoad"==e?(d.send(d.CONST.MIXEDVIEW,d.CONST.CLK_CPU_LOAD),void(f.CpuLoad===!1?k(g):a.$broadcast("cpuLoadChartDirective.resize.forCpuLoad_"+a.namespace))):void 0}}),b.tabs("paging")},i=function(b){f.Heap=!0;var d={id:"heap",title:"Heap",span:"span12",line:[{id:"JVM_MEMORY_HEAP_USED",key:"Used",values:[],isFgc:!1},{id:"JVM_MEMORY_HEAP_MAX",key:"Max",values:[],isFgc:!1},{id:"fgc",key:"FGC",values:[],isFgc:!0}]};a.$broadcast("jvmMemoryChartDirective.initAndRenderWithData.forHeap_"+a.namespace,c.parseMemoryChartDataForAmcharts(d,b),"100%","100%")},j=function(b){f.PermGen=!0;var d={id:"nonheap",title:"PermGen",span:"span12",line:[{id:"JVM_MEMORY_NON_HEAP_USED",key:"Used",values:[],isFgc:!1},{id:"JVM_MEMORY_NON_HEAP_MAX",key:"Max",values:[],isFgc:!1},{id:"fgc",key:"FGC",values:[],isFgc:!0}]};a.$broadcast("jvmMemoryChartDirective.initAndRenderWithData.forNonHeap_"+a.namespace,c.parseMemoryChartDataForAmcharts(d,b),"100%","100%")},k=function(b){f.CpuLoad=!0;var d={id:"cpuLoad",title:"JVM/System Cpu Usage",span:"span12",isAvailable:!1};a.$broadcast("cpuLoadChartDirective.initAndRenderWithData.forCpuLoad_"+a.namespace,c.parseCpuLoadChartDataForAmcharts(d,b),"100%","100%")},l=function(b){f.Heap&&a.$broadcast("jvmMemoryChartDirective.showCursorAt.forHeap_"+a.namespace,b),f.PermGen&&a.$broadcast("jvmMemoryChartDirective.showCursorAt.forNonHeap_"+a.namespace,b),f.CpuLoad&&a.$broadcast("cpuLoadChartDirective.showCursorAt.forCpuLoad_"+a.namespace,b)},m=function(){f.Heap&&a.$broadcast("jvmMemoryChartDirective.resize.forHeap_"+a.namespace),f.PermGen&&a.$broadcast("jvmMemoryChartDirective.resize.forNonHeap_"+a.namespace),f.CpuLoad&&a.$broadcast("cpuLoadChartDirective.resize.forCpuLoad_"+a.namespace)},a.$on("agentChartGroupDirective.initialize."+a.namespace,function(a,b){h(b)}),a.$on("agentChartGroupDirective.showCursorAt."+a.namespace,function(a,b){l(b)}),a.$on("agentChartGroupDirective.resize."+a.namespace,function(a){m()})}}}])}(),function(){"use strict";pinpointApp.directive("sidebarTitleDirective",["$timeout","$rootScope","PreferenceService","AnalyticsService",function(a,b,c,d){return{restrict:"E",replace:!0,templateUrl:"features/sidebar/title/sidebarTitle.html?v="+G_BUILD_TIME,scope:{namespace:"@"},link:function(e,f,g){function h(a){if(e.currentAgent=c.getAgentAllStr(),"undefined"==typeof a)return void(e.agentList=[]);var b=[];if(a.serverList)for(var d in a.serverList){var f=a.serverList[d].instanceList;for(var g in f)b.push(g)}e.agentList=b}function i(b,c){e.isWas=angular.isDefined(c)&&angular.isDefined(c.isWas)?c.isWas:!1,e.stImage=b.getImage(),e.stImageShow=!!b.getImage(),e.stTitle=b.getTitle(),e.stImage2=b.getImage2(),e.stImage2Show=!!b.getImage2(),e.stTitle2=b.getTitle2(),a(function(){f.find('[data-toggle="tooltip"]').tooltip("destroy").tooltip()})}function j(){e.currentAgent=c.getAgentAllStr(),e.stImage=!1,e.stImageShow=!1,e.stTitle=!1,e.stImage2=!1,e.stTitle2=!1,e.stImage2Show=!1,e.isWas=!1,e.agentList=[]}e.agentList=[],a(function(){j()}),e.changeAgent=function(){d.send(d.CONST.INSPECTOR,d.CONST.CLK_CHANGE_AGENT_MAIN),b.$broadcast("changedCurrentAgent",e.currentAgent)},e.$on("sidebarTitleDirective.initialize."+e.namespace,function(a,b,c){i(b,c),h(c)}),e.$on("sidebarTitleDirective.empty."+e.namespace,function(a){j()})}}}])}(),function(){"use strict";pinpointApp.directive("filterInformationDirective",["$filter","$base64",function(a,b){return{restrict:"EA",replace:!0,templateUrl:"features/sidebar/filter/filterInformation.html?v="+G_BUILD_TIME,scope:{namespace:"@"},link:function(c,d,e){var f,g;f=function(d){if(g(),oServerMapFilterVo.getRequestUrlPattern()&&(c.urlPattern=b.decode(d.getRequestUrlPattern())),c.includeException=v.getIncludeException()?"Failed Only":"Success + Failed",angular.isNumber(d.getResponseFrom())&&oServerMapFilterVo.getResponseTo()){var e=[];e.push(a("number")(d.getResponseFrom())),e.push("ms"),e.push("~"),"max"===d.getResponseTo()?e.push("30,000+"):e.push(a("number")(d.getResponseTo())),e.push("ms"),c.responseTime=e.join(" ")}var f=d.getFromAgentName(),h=d.getToAgentName();f||h?c.agentFilterInfo=(f||"all")+" -> "+(h||"all"):c.agentFilterInfo=!1},g=function(){c.urlPattern="none",c.responseTime="none",c.includeException="none"},c.$on("filterInformationDirective.initialize."+c.namespace,function(a,b){f(b)})}}}])}(),function(){"use strict";pinpointApp.directive("distributedCallFlowDirective",["$filter","$timeout","CommonAjaxService",function(a,b,c){return{restrict:"E",replace:!0,templateUrl:"features/distributedCallFlow/distributedCallFlow.html?v=${buildTime}",scope:{namespace:"@"},link:function(d,e,f){var g,h,i,j,k,l,m,n,o,p,q,r,s,t,u;window.callStacks=[],o=function(a){var b=0,c=0,d="#";for(b=0,c=0;b<a.length;c=a.charCodeAt(b++)+((c<<5)-c));for(b=0,d="#";3>b;d+=("00"+(c>>8*b++&255).toString(16)).slice(-2));return d},k=function(a,b,c,d,e){var f=[];c=c.replace(/&/g,"&amp;").replace(/</g,"&lt;").replace(/>/g,"&gt;");var g=h.getItemById(e.id);i=g.agent?g.agent:i;var j=o(i),k=h.getIdxById(e.id),l="dcf-popover";if(g.hasException?l+=" has-exception":g.isMethod||(l+=" not-method"),f.push('<div class="'+l+'" data-container=".grid-canvas" data-toggle="popover" data-trigger="manual" data-placement="right" data-content="'+c+'">'),f.push("<div style='position:absolute;top:0;left:0;bottom:0;width:5px;background-color:"+j+"'></div>"),f.push("<span style='display:inline-block;height:1px;width:"+15*e.indent+"px'></span>"),window.callStacks[k+1]&&window.callStacks[k+1].indent>window.callStacks[k].indent?e._collapsed?f.push(" <span class='toggle expand'></span>&nbsp;"):f.push(" <span class='toggle collapse'></span>&nbsp;"):f.push(" <span class='toggle'></span>&nbsp;"),g.hasException)f.push('<span class="glyphicon glyphicon-fire"></span>&nbsp;');else if(g.isMethod){var m=parseInt(g.methodType);switch(m){case 100:f.push('<i class="xi-shipping"></i>&nbsp;');break;case 200:f.push('<span class="glyphicon glyphicon-transfer"></span>&nbsp;');break;case 900:f.push('<i class="xi-info-triangle" style="color:#FF6600"></i>&nbsp;')}}else"SQL"===g.method?f.push('<button type="button" class="btn btn-default btn-xs btn-success sql" style="padding:0px 2px 0px 2px"><span class="glyphicon glyphicon-eye-open sql"></span></button>&nbsp;'):f.push('<span class="glyphicon glyphicon-info-sign"></span>&nbsp;');return f.push(c),f.push("</div>"),f.join("")},l=function(a){var b=!0;if(angular.isDefined(a.parent)&&null!==a.parent)for(var c=window.callStacks[a.parent];c;)c._collapsed&&(b=!1),c=window.callStacks[c.parent];return b},q=function(a,b,c,d,e){var f=[];return f.push('<div class="dcf-popover" data-container=".grid-canvas" data-toggle="popover" data-trigger="manual" data-placement="right" data-content="'+c+'">'),f.push(c),f.push("</div>"),f.join("")},r=function(a,b,c,d,e){if(c&&0!==c.length){var f=[];f.push('<a class="btn btn-default btn-xs"'),f.push('href="'),f.push(c),f.push('" target="_blank">');var g=h.getItemById(e.id),i=g.logButtonName;return f.push(i),f.push("</a>"),f.join("")}},n=function(b,c,d,e,f){return a("date")(d,"HH:mm:ss sss")},p=function(a,b,c,d,e){if(angular.isUndefined(c)||null===c||""===c||0===c)return"";var f;return f="#5bc0de","<span class='percent-complete-bar' style='background:"+f+";width:"+c+"%'><span class='percent-complete-bar' style='background-color:#4343C8;width:"+e.execPer+"%;height:4px;float:left;margin-top:1px;'></span></span>"},m=function(a,b){var c=[],d=100/(b[0][a.end]-b[0][a.begin]);return angular.forEach(b,function(b,e){c.push({id:"id_"+e,parent:b[a.parentId]?b[a.parentId]-1:null,indent:b[a.tab],method:b[a.title],argument:b[a.arguments],execTime:b[a.begin]>0?b[a.begin]:null,gapMs:b[a.gap],timeMs:b[a.elapsedTime],timePer:b[a.elapsedTime]?(b[a.end]-b[a.begin])*d+.9:null,"class":b[a.simpleClassName],methodType:b[a.methodType],apiType:b[a.apiType],agent:b[a.agent],applicationName:b[a.applicationName],hasException:b[a.hasException],isMethod:b[a.isMethod],logLink:b[a.logPageUrl],logButtonName:b[a.logButtonName],isFocused:b[a.isFocused],execMilli:b[a.executionMilliseconds],execPer:b[a.elapsedTime]&&b[a.executionMilliseconds]?parseInt(b[a.executionMilliseconds].replace(/,/gi,""))/parseInt(b[a.elapsedTime].replace(/,/gi,""))*100:0})}),c},j=function(a){window.callStacks=m(a.callStackIndex,a.callStack);var f={enableCellNavigation:!0,enableColumnReorder:!0,enableTextSelectionOnCells:!0,topPanelHeight:30,rowHeight:25};h=new Slick.Data.DataView({inlineFilters:!0}),h.beginUpdate(),h.setItems(window.callStacks),h.setFilter(l),h.getItemMetadata=function(a){var b=h.getItemByIdx(a),c={cssClasses:""};return b.hasException===!0&&(c.cssClasses+=" error-point"),b.isFocused===!0&&(c.cssClasses+=" entry-point"),null!==b.execTime&&(c.cssClasses+=" id_"+(a+1)),c},h.endUpdate();var i=[{id:"method",name:"Method",field:"method",width:400,formatter:k},{id:"argument",name:"Argument",field:"argument",width:300,formatter:q},{id:"exec-time",name:"Start Time",field:"execTime",width:90,formatter:n},{id:"gap-ms",name:"Gap(ms)",field:"gapMs",width:70,cssClass:"right-align"},{id:"time-ms",name:"Exec(ms)",field:"timeMs",width:70,cssClass:"right-align"},{id:"time-per",name:"Exec(%)",field:"timePer",width:100,formatter:p},{id:"exec-milli",name:"Self(ms)",field:"execMilli",width:75,cssClass:"right-align"},{id:"class",name:"Class",field:"class",width:120},{id:"api-type",name:"API",field:"apiType",width:90},{id:"agent",name:"Agent",field:"agent",width:130},{id:"application-name",name:"Application",field:"applicationName",width:150}];g=new Slick.Grid(e.get(0),h,i,f),g.setSelectionModel(new Slick.RowSelectionModel);var j=!0,o=!1;g.onClick.subscribe(function(a,d){var f;if($(a.target).hasClass("toggle")&&(f=h.getItem(d.row),f&&(f._collapsed?f._collapsed=!1:f._collapsed=!0,h.updateItem(f.id,f)),a.stopImmediatePropagation()),$(a.target).hasClass("sql")){f=h.getItem(d.row);var g=h.getItem(d.row+1),i="sql="+encodeURIComponent(f.argument);angular.isDefined(g)&&"SQL-BindValue"===g.method?(i+="&bind="+encodeURIComponent(g.argument),c.getSQLBind("/sqlBind.pinpoint",i,function(a){$("#customLogPopup").find("h4").html("SQL").end().find("div.modal-body").html('<h4>Binded SQL <button class="btn btn-default btn-xs sql">Copy</button></h4><div style="position:absolute;left:10000px">'+a+'</div><pre class="prettyprint lang-sql" style="margin-top:0px">'+a.replace(/\t\t/g,"")+'</pre><hr><h4>Original SQL <button class="btn btn-default btn-xs sql">Copy</button></h4><div style="position:absolute;left:10000px">'+f.argument+'</div><pre class="prettyprint lang-sql" style="margin-top:0px">'+f.argument.replace(/\t\t/g,"")+'</pre><h4>SQL Bind Value <button class="btn btn-default btn-xs sql">Copy</button></h4><div style="position:absolute;left:10000px">'+g.argument+'</div><pre class="prettyprint lang-sql" style="margin-top:0px">'+g.argument+"</pre>").end().modal("show"),prettyPrint()})):($("#customLogPopup").find("h4").html("SQL").end().find("div.modal-body").html('<h4>Original SQL <button class="btn btn-default btn-xs sql">Copy</button></h4><div style="position:absolute;left:10000px">'+f.argument+'</div><pre class="prettyprint lang-sql" style="margin-top:0px">'+f.argument.replace(/\t\t/g,"")+"</pre>").end().modal("show"),prettyPrint())}o||(o=b(function(){j&&e.find(".dcf-popover").popover("hide"),j=!0,o=!1},300))}),g.onDblClick.subscribe(function(a,b){j=!1,$(a.target).popover("toggle")}),g.onCellChange.subscribe(function(a,b){h.updateItem(b.item.id,b.item)}),g.onActiveCellChanged.subscribe(function(a,b){d.$emit("distributedCallFlowDirective.rowSelected."+d.namespace,b.grid.getDataItem(b.row))}),s=function(a){var b=h.getItem(a+1);return!(!b||a!==b.parent)},g.onKeyDown.subscribe(function(a,b){var c=h.getItem(b.row);if(37==a.which)if(s(b.row))c._collapsed=!0,h.updateItem(c.id,c);else{var d=h.getItem(b.row-1);d&&g.setActiveCell(b.row-1,0)}else if(39==a.which){if(c._collapsed)c._collapsed=!1;else{var e=h.getItem(b.row+1);e&&g.setActiveCell(b.row+1,0)}h.updateItem(c.id,c)}}),h.onRowCountChanged.subscribe(function(a,b){g.updateRowCount(),g.render()}),h.onRowsChanged.subscribe(function(a,b){g.invalidateRows(b.rows),g.render()})},$("#customLogPopup").on("click","button",function(){var a=document.createRange();a.selectNode($(this).parent().next().get(0)),window.getSelection().addRange(a);try{document.execCommand("copy")}catch(b){console.log("unable to copy :",b)}window.getSelection().removeAllRanges()}),d.$on("distributedCallFlowDirective.initialize."+d.namespace,function(a,b){j(b)}),d.$on("distributedCallFlowDirective.resize."+d.namespace,function(a){g&&g.resizeCanvas()}),d.$on("distributedCallFlowDirective.selectRow."+d.namespace,function(a,b){var c=b-1;g.setSelectedRows([c]),g.setActiveCell(c,0),g.scrollRowToTop(c)}),d.$on("distributedCallFlowDirective.searchCall."+d.namespace,function(a,b,c){var e=t(b,c);-1==e?c>0?(u(t(b,0)),d.$emit("transactionDetail.calltreeSearchCallResult","Loop")):d.$emit("transactionDetail.calltreeSearchCallResult","No call took longer than {time}ms."):(u(e),d.$emit("transactionDetail.calltreeSearchCallResult",""))}),t=function(a,b){for(var c=0,d=-1,e=0;e<window.callStacks.length;e++)if(parseInt(window.callStacks[e].execMilli.replace(/,/gi,""))>=a){if(c==b){d=e;break}c++}return d},u=function(a){g.setSelectedRows([a]),g.setActiveCell(a,0),g.scrollRowIntoView(a,!0)}}}}])}(),function(){"use strict";pinpointApp.constant("responseTimeChartDirectiveConfig",{myColors:["#2ca02c","#3c81fa","#f8c731","#f69124","#f53034"]}),pinpointApp.directive("responseTimeChartDirective",["responseTimeChartDirectiveConfig","$timeout","AnalyticsService","PreferenceService",function(a,b,c,d){var e=d.getResponseTypeColor();return{template:"<div></div>",replace:!0,restrict:"EA",scope:{namespace:"@"},link:function(a,f,g){var h,i,j,k,l,m,n,o;j=function(){h="responseTimeId-"+a.namespace,f.attr("id",h)},k=function(a,b){f.css("width",a||"100%"),f.css("height",b||"150px")},l=function(d,e,f){b(function(){var b={type:"serial",theme:"none",dataProvider:d,startDuration:0,valueAxes:[{gridAlpha:.1,usePrefixes:!0}],graphs:[{balloonText:e?"[[category]] filtering":"",colorField:"color",labelText:"[[value]]",fillAlphas:.3,alphaField:"alpha",lineAlpha:.8,lineColor:"#787779",type:"column",valueField:"count"}],categoryField:"responseTime",categoryAxis:{gridAlpha:0}};f&&(b.chartCursor={fullWidth:!0,categoryBalloonAlpha:.7,cursorColor:"#000000",cursorAlpha:0,zoomable:!1}),i=AmCharts.makeChart(h,b),i.addListener("clickGraphItem",function(b){"Error"==b.item.category&&(c.send(c.CONST.MAIN,c.CONST.CLK_RESPONSE_GRAPH),a.$emit("responseTimeChartDirective.showErrorTransacitonList",b.item.category)),e&&a.$emit("responseTimeChartDirective.itemClicked."+a.namespace,b.item.serialDataItem.dataContext)}),e&&(i.addListener("clickGraphItem",m),i.addListener("rollOverGraphItem",function(a){a.event.target.style.cursor="pointer"}))})},m=function(b){a.$emit("responseTimeChartDirective.itemClicked."+a.namespace,b.item.serialDataItem.dataContext)},n=function(a){i.dataProvider=a,b(function(){i.validateData()})},o=function(a){angular.isUndefined(a)&&(a=d.getResponseTypeFormat());var b=[],c=[.2,.3,.4,.6,.6],f=0;for(var g in a)b.push({responseTime:g,count:a[g],color:e[f],alpha:c[f++]});return b},a.$on("responseTimeChartDirective.initAndRenderWithData."+a.namespace,function(a,b,c,d,e,f){j(),k(c,d),l(o(b),e,f)}),a.$on("responseTimeChartDirective.updateData."+a.namespace,function(a,b){n(o(b))})}}}])}(),function(){"use strict";pinpointApp.constant("loadChartDirectiveConfig",{}),pinpointApp.directive("loadChartDirective",["loadChartDirectiveConfig","$timeout","AnalyticsService","PreferenceService",function(a,b,c,d){var e=d.getResponseTypeColor();return{template:'<div style="text-align:center"></div>',replace:!0,restrict:"EA",scope:{namespace:"@"},link:function(a,d,f){var g,h,i,j,k,l,m,n,o,p;j=function(){g="loadId-"+a.namespace,d.attr("id",g)},k=function(a,b){a&&d.css("width",a),b&&d.css("height",b)},l=function(a,d){b(function(){var b={type:"serial",theme:"light",legend:{autoMargins:!1,align:"right",borderAlpha:0,equalWidths:!0,horizontalGap:0,verticalGap:0,markerSize:10,useGraphSettings:!1,valueWidth:0,spacing:0,markerType:"circle",position:"top"},dataProvider:a,valueAxes:[{stackType:"regular",axisAlpha:1,usePrefixes:!0,gridAlpha:.1}],categoryField:"time",categoryAxis:{startOnAxis:!0,gridPosition:"start",labelFunction:function(a,b,c){var d=a.indexOf("-"),e=a.indexOf(" ");return a.substring(d+1,e)+"\n"+a.substring(e+1)}},balloon:{fillAlpha:1,borderThickness:1},graphs:[{balloonText:"[[title]] : <b>[[value]]</b>",fillAlphas:.2,fillColors:e[0],lineAlpha:.8,lineColor:"#787779",title:h[0],type:"step",legendColor:e[0],valueField:h[0]},{balloonText:"[[title]] : <b>[[value]]</b>",fillAlphas:.3,fillColors:e[1],lineAlpha:.8,lineColor:"#787779",title:h[1],type:"step",legendColor:e[1],valueField:h[1]},{balloonText:"[[title]] : <b>[[value]]</b>",fillAlphas:.4,fillColors:e[2],lineAlpha:.8,lineColor:"#787779",title:h[2],type:"step",legendColor:e[2],valueField:h[2]},{balloonText:"[[title]] : <b>[[value]]</b>",fillAlphas:.6,fillColors:e[3],lineAlpha:.8,lineColor:"#787779",title:h[3],type:"step",legendColor:e[3],valueField:h[3]},{balloonText:"[[title]] : <b>[[value]]</b>",fillAlphas:.6,fillColors:e[4],lineAlpha:.8,lineColor:"#787779",title:h[4],type:"step",legendColor:e[4],valueField:h[4]}]};d&&(b.chartCursor={cursorPosition:"mouse",categoryBalloonAlpha:.7,categoryBalloonDateFormat:"H:NN"}),i=AmCharts.makeChart(g,b),i.addListener("clickGraph",function(a){c.send(c.CONST.MAIN,c.CONST.CLK_LOAD_GRAPH)})})},o=function(a,c){b(function(){var b={type:"serial",pathToImages:"./components/amcharts/images/",theme:"light",dataProvider:a,valueAxes:[{stackType:"regular",axisAlpha:0,gridAlpha:0,labelsEnabled:!1}],categoryField:"time",categoryAxis:{startOnAxis:!0,gridPosition:"start",labelFunction:function(a,b,c){return moment(a).format("HH:mm")}},chartScrollbar:{graph:"AmGraph-1"},graphs:[{id:"AmGraph-1",fillAlphas:.2,fillColors:e[0],lineAlpha:.8,lineColor:"#787779",type:"step",valueField:h[0]},{id:"AmGraph-2",fillAlphas:.3,fillColors:e[1],lineAlpha:.8,lineColor:"#787779",type:"step",valueField:h[1]},{id:"AmGraph-3",fillAlphas:.4,fillColors:e[2],lineAlpha:.8,lineColor:"#787779",type:"step",valueField:h[2]},{id:"AmGraph-4",fillAlphas:.6,fillColors:e[3],lineAlpha:.8,lineColor:"#787779",type:"step",valueField:h[3]},{id:"AmGraph-5",fillAlphas:.6,fillColors:e[4],lineAlpha:.8,lineColor:"#787779",type:"step",valueField:h[4]}]};c&&(b.chartCursor={avoidBalloonOverlapping:!1}),i=AmCharts.makeChart(g,b),i.addListener("changed",function(a){})})},p=function(){d.append("<h4 style='padding-top:25%'>No Data</h4>")},n=function(a){angular.isUndefined(i)?0!==a.length&&l(a,!0):(i.dataProvider=a,b(function(){i.validateData()}))},m=function(a){function b(a){for(var b in c)if(moment(a).format("YYYY-MM-DD HH:mm")===c[b].time)return b;return-1}if(angular.isUndefined(a))return[];h=[];for(var c=[],d=0;d<a.length;d++){var e=a[d];h.push(e.key);for(var f=0;f<e.values.length;f++){var g=e.values[f],i=b(g[0]);if(i>-1)c[i][e.key]=g[1];else{var j={time:moment(g[0]).format("YYYY-MM-DD HH:mm")};j[e.key]=g[1],c.push(j)}}}return c},a.$on("loadChartDirective.initAndRenderWithData."+a.namespace,function(a,b,c,d,e){j(),k(c,d);var f=m(b);0===f.length?p():l(f,e)}),a.$on("loadChartDirective.updateData."+a.namespace,function(a,b){n(m(b))}),a.$on("loadChartDirective.initAndSimpleRenderWithData."+a.namespace,function(a,b,c,d,e){j(),k(c,d);var f=m(b);0===f.length?p():o(f,e)})}}}])}(),function(){"use strict";angular.module("pinpointApp").directive("jvmMemoryChartDirective",["$timeout",function(a){return{template:"<div></div>",replace:!0,restrict:"E",scope:{namespace:"@"},link:function(b,c,d){var e,f,g,h,i,j,k;g=function(){e="multipleValueAxesId-"+b.namespace,c.attr("id",e)},h=function(a,b){a&&c.css("width",a),b&&c.css("height",b)},i=function(c){var d={type:"serial",theme:"light",autoMargins:!1,marginTop:10,marginLeft:70,marginRight:70,marginBottom:30,legend:{useGraphSettings:!0,autoMargins:!1,align:"right",position:"top",valueWidth:70},usePrefixes:!0,dataProvider:c,valueAxes:[{id:"v1",gridAlpha:0,axisAlpha:1,position:"right",title:"Full GC (ms)",minimum:0},{id:"v2",gridAlpha:0,axisAlpha:1,position:"left",title:"Memory (bytes)",minimum:0}],graphs:[{valueAxis:"v2",balloonText:"[[value]]B",legendValueText:"[[value]]B",lineColor:"rgb(174, 199, 232)",title:"Max",valueField:"Max",fillAlphas:0,connect:!1},{valueAxis:"v2",balloonText:"[[value]]B",legendValueText:"[[value]]B",lineColor:"rgb(31, 119, 180)",fillColor:"rgb(31, 119, 180)",title:"Used",valueField:"Used",fillAlphas:.4,connect:!1},{valueAxis:"v1",balloonFunction:function(a,b){var c=a.serialDataItem.dataContext,d=c.FGCTime+"ms",e=c.FGCCount;return e>1&&(d+=" ("+e+")"),d},legendValueText:"[[value]]ms",lineColor:"#FF6600",title:"FGC",valueField:"FGCTime",type:"column",fillAlphas:.3,connect:!1}],chartCursor:{categoryBalloonAlpha:.7,fullWidth:!0,cursorAlpha:.1},categoryField:"time",categoryAxis:{axisColor:"#DADADA",startOnAxis:!0,gridPosition:"start",labelFunction:function(a,b,c){return a.substring(a.indexOf(" ")+1)}}};a(function(){f=AmCharts.makeChart(e,d),f.chartCursor.addListener("changed",function(a){b.$emit("jvmMemoryChartDirective.cursorChanged."+b.namespace,a)})})},j=function(a){a?(angular.isNumber(a)&&(a=f.dataProvider[a].time),f.chartCursor.showCursorAt(a)):f.chartCursor.hideCursor()},k=function(){f&&(f.validateNow(),f.validateSize())},b.$on("jvmMemoryChartDirective.initAndRenderWithData."+b.namespace,function(a,b,c,d){g(),h(c,d),i(b)}),b.$on("jvmMemoryChartDirective.showCursorAt."+b.namespace,function(a,b){j(b)}),b.$on("jvmMemoryChartDirective.resize."+b.namespace,function(a){k()})}}}])}(),function(){"use strict";angular.module("pinpointApp").directive("cpuLoadChartDirective",["$timeout",function(a){return{template:"<div></div>",replace:!0,restrict:"E",scope:{namespace:"@"},link:function(b,c,d){var e,f,g,h,i,j,k;g=function(){e="multipleValueAxesId-"+b.namespace,c.attr("id",e)},h=function(a,b){a&&c.css("width",a),b&&c.css("height",b)},i=function(c){var d={type:"serial",theme:"light",autoMargins:!1,marginTop:10,marginLeft:70,marginRight:70,marginBottom:30,legend:{useGraphSettings:!0,autoMargins:!0,align:"right",position:"top",valueWidth:70},usePrefixes:!0,dataProvider:c,valueAxes:[{id:"v1",gridAlpha:0,axisAlpha:1,position:"left",title:"Cpu Usage (%)",maximum:100,minimum:0}],graphs:[{valueAxis:"v1",balloonText:"[[value]]%",legendValueText:"[[value]]%",lineColor:"rgb(31, 119, 180)",fillColor:"rgb(31, 119, 180)",title:"JVM",valueField:"jvmCpuLoad",fillAlphas:.4,connect:!1},{valueAxis:"v1",balloonText:"[[value]]%",legendValueText:"[[value]]%",lineColor:"rgb(174, 199, 232)",fillColor:"rgb(174, 199, 232)",title:"System",valueField:"systemCpuLoad",fillAlphas:.4,connect:!1},{valueAxis:"v1",showBalloon:!1,lineColor:"#FF6600",title:"Max",valueField:"maxCpuLoad",fillAlphas:0,visibleInLegend:!1}],chartCursor:{categoryBalloonAlpha:.7,fullWidth:!0,cursorAlpha:.1},categoryField:"time",categoryAxis:{axisColor:"#DADADA",startOnAxis:!0,gridPosition:"start",labelFunction:function(a,b,c){return moment(a).format("HH:mm:ss")}}};a(function(){f=AmCharts.makeChart(e,d),f.chartCursor.addListener("changed",function(a){b.$emit("cpuLoadChartDirective.cursorChanged."+b.namespace,a)})})},j=function(a){a?(angular.isNumber(a)&&(a=f.dataProvider[a].time),f.chartCursor.showCursorAt(a)):f.chartCursor.hideCursor()},k=function(){f&&(f.validateNow(),f.validateSize())},b.$on("cpuLoadChartDirective.initAndRenderWithData."+b.namespace,function(a,b,c,d){g(),h(c,d),i(b)}),b.$on("cpuLoadChartDirective.showCursorAt."+b.namespace,function(a,b){j(b)}),b.$on("cpuLoadChartDirective.resize."+b.namespace,function(a){k()})}}}])}(),function(){"use strict";angular.module("pinpointApp").directive("tpsChartDirective",["$timeout",function(a){return{template:"<div></div>",replace:!0,restrict:"E",scope:{namespace:"@"},link:function(b,c,d){var e,f,g,h,i,j,k;g=function(){e="multipleValueAxesId-"+b.namespace,c.attr("id",e)},h=function(a,b){a&&c.css("width",a),b&&c.css("height",b)},i=function(c){var d={type:"serial",theme:"light",autoMargins:!1,marginTop:10,marginLeft:70,marginRight:70,marginBottom:30,legend:{useGraphSettings:!0,autoMargins:!0,align:"right",position:"top",valueWidth:70},usePrefixes:!0,dataProvider:c,valueAxes:[{stackType:"regular",gridAlpha:0,axisAlpha:1,position:"left",title:"TPS",minimum:0}],graphs:[{balloonText:"Sampled Continuation : [[value]]",legendValueText:"[[value]]",lineColor:"rgb(214, 141, 8)",fillColor:"rgb(214, 141, 8)",title:"S.C",valueField:"sampledContinuationTps",fillAlphas:.4,connect:!0},{balloonText:"Sampled New : [[value]]",legendValueText:"[[value]]",lineColor:"rgb(252, 178, 65)",fillColor:"rgb(252, 178, 65)",title:"S.N",valueField:"sampledNewTps",fillAlphas:.4,connect:!0},{balloonText:"Unsampled Continuation : [[value]]",legendValueText:"[[value]]",lineColor:"rgb(90, 103, 166)",fillColor:"rgb(90, 103, 166)",title:"U.C",valueField:"unsampledContinuationTps",fillAlphas:.4,connect:!0},{balloonText:"Unsampled New : [[value]]",legendValueText:"[[value]]",lineColor:"rgb(160, 153, 255)",fillColor:"rgb(160, 153, 255)",title:"U.N",valueField:"unsampledNewTps",fillAlphas:.4,connect:!0},{balloonText:"Total : [[value]]",legendValueText:"[[value]]",lineColor:"rgba(31, 119, 180, 0)",fillColor:"rgba(31, 119, 180, 0)",valueField:"totalTps",fillAlphas:.4,connect:!0}],chartCursor:{categoryBalloonAlpha:.7,fullWidth:!0,cursorAlpha:.1},categoryField:"time",categoryAxis:{axisColor:"#DADADA",startOnAxis:!0,gridPosition:"start",labelFunction:function(a,b,c){return moment(a).format("HH:mm:ss")}}};a(function(){f=AmCharts.makeChart(e,d),f.chartCursor.addListener("changed",function(a){b.$emit("tpsChartDirective.cursorChanged."+b.namespace,a)})})},j=function(a){a?(angular.isNumber(a)&&(a=f.dataProvider[a].time),f.chartCursor.showCursorAt(a)):f.chartCursor.hideCursor()},k=function(){f&&(f.validateNow(),f.validateSize())},b.$on("tpsChartDirective.initAndRenderWithData."+b.namespace,function(a,b,c,d){g(),h(c,d),i(b)}),b.$on("tpsChartDirective.showCursorAt."+b.namespace,function(a,b){j(b)}),b.$on("tpsChartDirective.resize."+b.namespace,function(a){k()})}}}])}(),function(){"use strict";pinpointApp.directive("loadingDirective",["$timeout","$templateCache","$compile",function(a,b,c){return{restrict:"A",scope:{showLoading:"=loadingDirective",loadingMessage:"@"},link:function(a,d,e){a.loadingMessage||(a.loadingMessage="Please Wait...");var f=b.get(e.loadingDirective);"static"===d.css("position")&&d.css("position","relative"),d.append(c(f)(a))}}}])}(),function(a){"use strict";pinpointApp.constant("ConfigurationConfig",{menu:{GENERAL:"general",ALARM:"alarm",HELP:"help"}}),pinpointApp.controller("ConfigurationCtrl",["$scope","$element","ConfigurationConfig","AnalyticsService",function(b,c,d,e){var f=c.find(".modal-body");b.descriptionOfCurrentTab="Set your option", b.currentTab=d.menu.GENERAL;for(var g in d.menu)!function(a){var c="is"+a.substring(0,1).toUpperCase()+a.substring(1).toLowerCase();b[c]=function(){return b.currentTab==d.menu[a]}}(g);a(c).on("hidden.bs.modal",function(a){b.currentTab=d.menu.GENERAL,b.$broadcast("configuration.alarm.initClose")}),b.setCurrentTab=function(a){if(b.currentTab!=a)switch(b.currentTab=a,a){case d.menu.GENERAL:e.send(e.CONST.MAIN,e.CONST.CLK_GENERAL),f.css("background-color","#e9eaed"),b.descriptionOfCurrentTab="Set your option",b.$broadcast("general.configuration.show");break;case d.menu.ALARM:e.send(e.CONST.MAIN,e.CONST.CLK_ALARM),f.css("background-color","#e9eaed"),b.descriptionOfCurrentTab="Set your alarm rules",b.$broadcast("alarmUserGroup.configuration.show");break;case d.menu.HELP:e.send(e.CONST.MAIN,e.CONST.CLK_HELP),f.css("background-color","#FFF"),b.descriptionOfCurrentTab=""}},b.$on("configuration.show",function(){e.send(e.CONST.MAIN,e.CONST.CLK_CONFIGURATION),c.modal("show")})}])}(jQuery),function(a){"use strict";pinpointApp.controller("HelpCtrl",["$scope","$element",function(a,b){a.enHelpList=[{title:"Quick start guide",link:"https://github.com/naver/pinpoint/blob/master/quickstart/README.md"},{title:"Technical Overview of Pinpoint",link:"https://github.com/naver/pinpoint/wiki/Technical-Overview-Of-Pinpoint"},{title:"Using Pinpont with Docker",link:"http://yous.be/2015/05/05/using-pinpoint-with-docker/"},{title:"Notes on Jetty Plugin for Pinpoint ",link:"https://github.com/cijung/Docs/blob/master/JettyPluginNotes.md"},{title:"About Alarm",link:"https://github.com/naver/pinpoint/blob/master/doc/alarm.md#alarm"}],a.koHelpList=[{title:"Pinpoint 개발자가 작성한 Pinpoint 기술문서",link:"http://helloworld.naver.com/helloworld/1194202"},{title:"소개 및 설치 가이드",link:"http://dev2.prompt.co.kr/33"},{title:"Pinpoint 사용 경험",link:"http://www.barney.pe.kr/blog/category/development/page/2/"},{title:"설치 가이드 동영상 강좌 1",link:"https://www.youtube.com/watch?v=hrvKaEaDEGs"},{title:"설치 가이드 동영상 강좌 2",link:"https://www.youtube.com/watch?v=fliKPGHGXK4"},{title:"AWS Ubuntu 14.04 설치 가이드 ",link:"http://lky1001.tistory.com/132"},{title:"Alarm 가이드",link:"https://github.com/naver/pinpoint/blob/master/doc/alarm.md#alarm-1"}]}])}(jQuery),function(a){"use strict";pinpointApp.constant("GeneralConfig",{menu:{GENERAL:"general",ALRAM:"alram"}}),pinpointApp.controller("GeneralCtrl",["GeneralConfig","$scope","$rootScope","$element","$document","PreferenceService","AnalyticsService","helpContentService",function(a,b,c,d,e,f,g,h){function i(a){g.send(g.CONST.MAIN,g.CONST.CLK_GENERAL_SET_FAVORITE),f.addFavorite(a),b.$apply(function(){b.favoriteList=f.getFavoriteList(),c.$broadcast("navbarDirective.changedFavorite")})}function j(a){if(!a.id)return a.text;var b=a.text.split("@");if(b.length>1){var c=e.get(0).createElement("img");return c.src="/images/icons/"+b[1]+".png",c.style.height="25px",c.style.paddingRight="3px",c.outerHTML+b[0]}return a.text}function k(){var a=!1;l.select2({placeholder:"Select an application.",searchInputPlaceholder:"Input your application name.",allowClear:!1,formatResult:j,formatSelection:j,escapeMarkup:function(a){return a}}).on("select2-selecting",function(b){a=!0}).on("select2-close",function(b){a===!0&&setTimeout(function(){i(l.select2("val"))},0),a=!1})}d.find("span.general-warning").html(h.configuration.general.warning),d.find("div.favorite-empty").html(h.configuration.general.empty),b.$on("general.configuration.show",function(){}),b.depthList=f.getDepthList(),b.periodTypes=f.getPeriodTypes(),b.caller=f.getCaller(),b.callee=f.getCallee(),b.period=f.getPeriod(),b.favoriteList=f.getFavoriteList(),b.changeCaller=function(){g.send(g.CONST.MAIN,g.CONST.CLK_GENERAL_SET_DEPTH,b.caller),f.setCaller(b.caller)},b.changeCallee=function(){g.send(g.CONST.MAIN,g.CONST.CLK_GENERAL_SET_DEPTH,b.callee),f.setCallee(b.callee)},b.changePeriod=function(){g.send(g.CONST.MAIN,g.CONST.CLK_GENERAL_SET_PERIOD,b.period),f.setPeriod(b.period)},b.removeFavorite=function(a){g.send(g.CONST.MAIN,g.CONST.CLK_GENERAL_SET_FAVORITE),f.removeFavorite(a),b.favoriteList=f.getFavoriteList(),c.$broadcast("navbarDirective.changedFavorite")};var l=d.find(".applicationList");b.$on("configuration.general.applications.set",function(a,c){b.applications=c,k()})}])}(jQuery),function(a){"use strict";pinpointApp.directive("alarmUserGroupDirective",["$rootScope","$timeout","helpContentTemplate","helpContentService","AlarmUtilService","AlarmBroadcastService","AnalyticsService","globalConfig",function(b,c,d,e,f,g,h,i){return{restrict:"EA",replace:!0,templateUrl:"features/configuration/alarm/alarmUserGroup.html?v="+G_BUILD_TIME,scope:!0,link:function(b,d){function e(c){""!==r&&a("#"+b.prefix+r).removeClass("selected"),a("#"+b.prefix+c).addClass("selected"),r=c}function j(){f.unsetFilterBackground(x),B.val("")}function k(a){f.showLoading(z,!1),p(f.extractID(a),a.find("span.contents").html())}function l(a){a.find("span.right").remove().end().find("span.remove").show().end().removeClass("remove"),t=!1}function m(a){t!==!0&&(e(f.extractID(a)),g.sendReloadWithUserGroupID(a.find("span.contents").html()))}function n(a){f.sendCRUD("createUserGroup",{id:a},function(b){v.push({number:b.number,id:a}),i.userId?g.sendInit(a,{userId:i.userId,name:i.userName,department:i.userDepartment}):g.sendInit(a),c(function(){e(b.number)}),f.setTotal(y,v.length),f.hide(z,C)},function(a){},A)}function o(a,b){f.sendCRUD("updateUserGroup",{number:a,id:b},function(c){for(var d=0;d<v.length;d++)v[d].number==a&&(v[d].id=b);f.hide(z,C)},function(a){},A)}function p(a,d){f.sendCRUD("removeUserGroup",{id:d},function(d){b.$apply(function(){for(var b=0;b<v.length;b++)if(v[b].number==a){v.splice(b,1);break}v.length>0?(c(function(){e(v[0].number)}),g.sendReloadWithUserGroupID(v[0].id)):g.sendSelectionEmpty()}),f.setTotal(y,v.length),f.hide(z),t=!1},function(a){},A)}function q(a,d){f.sendCRUD("getUserGroupList",angular.isUndefined(d)||""===d?{userId:i.userId||""}:{userGroupId:d},function(d){u=!0,v=b.userGroupList=d,f.setTotal(y,v.length),f.hide(z),v.length>0&&(a&&(g.sendInit(v[0].id),r=v[0].number),c(function(){e(r)})),i.userId&&g.sendLoadPinpointUser(i.userDepartment)},function(a){},A)}b.prefix="alarmUserGroup_";var r="",s=!0,t=!1,u=!1,v=b.userGroupList=[],w=d,x=w.find(".wrapper"),y=w.find(".total"),z=w.find(".some-loading"),A=w.find(".some-alert"),B=w.find("div.filter-input input"),C=(w.find("div.filter-input button.trash"),w.find(".some-edit-content")),D=C.find("input"),E=C.find(".title-message"),F=a(['<span class="right">','<button class="btn btn-danger confirm-cancel"><span class="glyphicon glyphicon-remove" aria-hidden="true"></span></button>','<button class="btn btn-danger confirm-remove" style="margin-left:2px;"><span class="glyphicon glyphicon-ok" aria-hidden="true"></span></button>',"</span>"].join("")),G=w.find(".some-list-content ul");G.on("click",function(c){var d=a(c.toElement||c.target),e=d.get(0).tagName.toLowerCase(),f=d.parents("li");if("button"===e)d.hasClass("edit")?b.onUpdate(c):d.hasClass("confirm-remove")?k(f):d.hasClass("confirm-cancel")&&revmoeCancel(f);else if("span"===e)if(d.hasClass("remove")){if(t===!0)return;t=!0,f.addClass("remove").find("span.remove").hide().end().append(F)}else d.hasClass("contents")?m(f):d.hasClass("glyphicon-edit")?b.onUpdate(c):d.hasClass("glyphicon-remove")?l(f):d.hasClass("glyphicon-ok")&&k(f);else"li"===e&&m(d)}),b.onRefresh=function(){t!==!0&&(h.send(h.CONST.MAIN,h.CONST.CLK_ALARM_REFRESH_USER_GROUP),j(),f.showLoading(z,!1),q(!1,a.trim(B.val())))},b.onCreate=function(){t!==!0&&(s=!0,E.html("Create new alarm group"),D.val(""),f.show(C),D.focus())},b.onUpdate=function(b){if(t!==!0){s=!1;var c=a(b.toElement||b.target).parents("li"),d=f.extractID(c),e=c.find("span.contents").html();E.html('Input new name of "'+e+'".'),D.val(e).prop("id","updateUserGroup_"+d),f.show(C),D.focus().select()}},b.onSearch=function(){if(t!==!0){var b=a.trim(B.val());if(0!==b.length&&b.length<3)return f.showLoading(z,!1),void f.showAlert(A,"You must enter at least three characters.");h.send(h.CONST.MAIN,h.CONST.CLK_ALARM_FILTER_USER_GROUP),f.showLoading(z,!1),q(!1,b)}},b.onInputEdit=function(a){13==a.keyCode?b.onApplyEdit():27==a.keyCode&&(b.onCancelEdit(),a.stopPropagation())},b.onCancelEdit=function(){f.hide(C)},b.onApplyEdit=function(){var b=a.trim(D.val());return""===b?(f.showLoading(z,!0),void f.showAlert(A,"Input group id.")):(f.showLoading(z,!0),f.hasDuplicateItem(v,function(a){return a.id==b})&&s===!0?void f.showAlert(A,"Exist a same group name in the lists."):void(s?(h.send(h.CONST.MAIN,h.CONST.CLK_ALARM_CREATE_USER_GROUP),n(b)):o(f.extractID(D),b)))},b.onCloseAlert=function(){f.closeAlert(A,z)},b.$on("alarmUserGroup.configuration.show",function(){u===!1&&q(!0)})}}}])}(jQuery),function(a){"use strict";pinpointApp.directive("alarmGroupMemberDirective",["$rootScope","$timeout","helpContentTemplate","helpContentService","AlarmUtilService","AlarmBroadcastService","AnalyticsService",function(b,c,d,e,f,g,h){return{restrict:"EA",replace:!0,templateUrl:"features/configuration/alarm/alarmGroupMember.html?v="+G_BUILD_TIME,scope:!0,link:function(b,c){function d(a){f.showLoading(t,!1),m(f.extractID(a))}function e(a){a.find("span.right").remove().end().find("span.remove").show().end().removeClass("remove"),A=!1}function i(a){var c=0;if(b.groupMemberList!=B)for(c=0;c<B.length;c++)if(B[c].memberId==a.userId){B[c].name=a.name,B[c].department=a.department;break}for(c=0;c<b.groupMemberList.length;c++)if(b.groupMemberList[c].memberId==a.userId){b.groupMemberList[c].name=a.name,b.groupMemberList[c].department=a.department;break}}function j(a){var c=0;if(b.groupMemberList!=B)for(c=0;c<B.length;c++)if(B[c].memberId==a){B.splice(c,1);break}for(c=0;c<b.groupMemberList.length;c++)if(b.groupMemberList[c].memberId==a){b.groupMemberList.splice(c,1);break}}function k(){A===!0&&q.find("li.remove").each(function(){e(a(this))}),f.unsetFilterBackground(r),v.val("")}function l(a){f.sendCRUD("addMemberInGroup",{userGroupId:y,memberId:a.userId},function(c){b.groupMemberList.push({userGroupId:y,memberId:a.userId,name:a.name,department:a.department}),f.setTotal(s,B.length),f.hide(t),g.sendCallbackAddedUser(!0)},function(a){},u)}function m(a){f.sendCRUD("removeMemberInGroup",{userGroupId:y,memberId:a},function(c){b.$apply(function(){j(a)}),f.setTotal(s,B.length),f.hide(t),A=!1},function(a){},u)}function n(a){f.sendCRUD("getGroupMemberListInGroup",{userGroupId:y},function(c){z=!0,B=b.groupMemberList=c,f.setTotal(s,B.length),f.hide(t),angular.isDefined(a)&&l(a)},function(a){},u)}function o(c){return a("#"+b.prefix+c).length>0}function p(){return""===y?(f.showLoading(t,!1),f.showAlert(u,"Not selected User Group.",!0),!1):!0}var q=a(c),r=q.find(".wrapper"),s=q.find(".total"),t=q.find(".some-loading"),u=q.find(".some-alert"),v=q.find("div.filter-input input"),w=q.find("div.filter-input button.trash"),x=a(['<span class="position:absolute;right;0px">','<button class="btn btn-danger confirm-cancel"><span class="glyphicon glyphicon-remove" aria-hidden="true"></span></button>','<button class="btn btn-danger confirm-remove" style="margin-left:2px;"><span class="glyphicon glyphicon-ok" aria-hidden="true"></span></button>',"</span>"].join("")),y="",z=!1,A=!1,B=b.groupMemberList=[],C=q.find(".some-list-content ul");C.on("click",function(b){var c=a(b.toElement||b.target),f=c.get(0).tagName.toLowerCase(),g=c.parents("li");if("button"==f)c.hasClass("confirm-cancel")?e(g):c.hasClass("confirm-remove")&&d(g);else if("span"==f)if(c.hasClass("remove")){if(A===!0)return;A=!0,g.addClass("remove").find("span.remove").hide().end().append(x)}else c.hasClass("glyphicon-remove")?e(g):c.hasClass("glyphicon-ok")&&d(g)}),b.prefix="alarmGroupMember_",b.onRefresh=function(){A!==!0&&p()!==!1&&(h.send(h.CONST.MAIN,h.CONST.CLK_ALARM_REFRESH_USER),v.val(""),f.showLoading(t,!1),n())},b.onInputFilter=function(c){return A!==!0?13==c.keyCode?void b.onFilterGroup():void(a.trim(v.val()).length>=3?w.removeClass("disabled"):w.addClass("disabled")):void 0},b.onFilterGroup=function(){if(A!==!0&&p()!==!1){var c=a.trim(v.val());if(0!==c.length&&c.length<3)return f.showLoading(t,!1),void f.showAlert(u,"You must enter at least three characters.");if(h.send(h.CONST.MAIN,h.CONST.CLK_ALARM_FILTER_USER),""===c)b.groupMemberList.length!=B.length&&(b.groupMemberList=B,f.unsetFilterBackground(r)),w.addClass("disabled");else{for(var d=[],e=(B.length,0);e<B.length;e++)-1!=B[e].memberId.indexOf(c)&&d.push(B[e]);b.groupMemberList=d,f.setFilterBackground(r)}}},b.onFilterEmpty=function(){A!==!0&&""!==a.trim(v.val())&&(v.val(""),b.onFilterGroup())},b.onCloseAlert=function(){f.closeAlert(u,t)},b.$on("alarmGroupMember.configuration.load",function(a,b,c){y=b,k(),n(c)}),b.$on("alarmGroupMember.configuration.selectNone",function(a){y="",B=b.groupMemberList=[]}),b.$on("alarmGroupMember.configuration.addUser",function(a,b){return p()===!1?void g.sendCallbackAddedUser(!1):(f.showLoading(t,!1),void(o(b.userId)===!1?(h.send(h.CONST.MAIN,h.CONST.CLK_ALARM_ADD_USER),k(),l(b)):(f.showAlert(u,"Exist a same user in the lists.",!0),g.sendCallbackAddedUser(!0))))}),b.$on("alarmGroupMember.configuration.updateUser",function(a,b){o(b.userId)&&(k(),i(b))}),b.$on("alarmGroupMember.configuration.removeUser",function(a,c){o(c)&&(k(),b.$apply(function(){j(c)}))})}}}])}(jQuery),function(a){"use strict";pinpointApp.directive("alarmPinpointUserDirective",["$rootScope","$timeout","helpContentTemplate","helpContentService","AlarmUtilService","AlarmBroadcastService","AnalyticsService","globalConfig",function(b,c,d,e,f,g,h,i){return{restrict:"EA",replace:!0,templateUrl:"features/configuration/alarm/alarmPinpointUser.html?v="+G_BUILD_TIME,scope:!0,link:function(b,c){function d(a){f.showLoading(t,!1),g.sendUserAdd(k(f.extractID(a)))}function e(a){f.showLoading(t,!1),n(f.extractID(a))}function j(a){a.find("span.right").remove().end().find("span.remove").show().end().find("button.move").removeClass("disabled").end().removeClass("remove"),H=!1}function k(a){for(var b,c=I.length,d=0;c>d;d++)if(I[d].userId==a){b=I[d];break}return b}function l(a,c,d,e,g){var h={userId:a,name:c,department:d,phoneNumber:e,email:g};f.sendCRUD("createPinpointUser",h,function(a){f.hide(t,x),v.val("userName"),w.val(c),b.onSearch()},function(a){},u)}function m(a,b,c,d,e){var h={userId:a,name:b,department:c,phoneNumber:d,email:e};f.sendCRUD("updatePinpointUser",h,function(i){for(var j=0;j<I.length;j++)I[j].userId==a&&(I[j].name=b,I[j].department=c,I[j].phoneNumber=d,I[j].email=e);f.hide(t,x),g.sendUserUpdated(h)},function(a){},u)}function n(a){f.sendCRUD("removePinpointUser",{userId:a},function(c){b.$apply(function(){for(var b=0;b<I.length;b++)if(I[b].userId==a){I.splice(b,1);break}}),f.setTotal(s,I.length),f.hide(t),H=!1,g.sendUserRemoved(a)},function(a){},u)}function o(a){f.sendCRUD("getPinpointUserList",a||{},function(a){I=b.pinpointUserList=a,f.setTotal(s,I.length),f.hide(t)},function(a){},u)}function p(a){var b=/^(([^<>()[\]\.,;:\s@\"]+(\.[^<>()[\]\.,;:\s@\"]+)*)|(\".+\"))@(([^<>()[\]\.,;:\s@\"]+\.)+[^<>()[\]\.,;:\s@\"]{2,})$/i;return b.test(a)}function q(a){var b=/^\d+$/;return b.test(a)}var r=a(c),s=(r.find(".some-list-header button"),r.find(".wrapper"),r.find(".empty-list"),r.find(".total")),t=r.find(".some-loading"),u=r.find(".some-alert"),v=r.find("select"),w=r.find("div.filter-input input"),x=r.find(".some-edit-content"),y=x.find("input[name=userID]"),z=x.find("input[name=name]"),A=x.find("input[name=department]"),B=x.find("input[name=phone]"),C=x.find("input[name=email]"),D=x.find(".title-message"),E=a(['<span class="position:absolute;right:0px">','<button class="btn btn-danger confirm-cancel"><span class="glyphicon glyphicon-remove" aria-hidden="true"></span></button>','<button class="btn btn-danger confirm-remove" style="margin-left:2px;"><span class="glyphicon glyphicon-ok" aria-hidden="true"></span></button>',"</span>"].join("")),F=!1,G=!0,H=!1,I=b.pinpointUserList=[],J=r.find(".some-list-content ul");J.on("click",function(c){var f=a(c.toElement||c.target),g=f.get(0).tagName.toLowerCase(),h=f.parents("li");if("button"==g)f.hasClass("confirm-cancel")?j(h):f.hasClass("confirm-cancel")?e(h):f.hasClass("move")?d(h):f.hasClass("edit-user")&&b.onUpdate(c);else if("span"==g)if(f.hasClass("remove")){if(H===!0)return;H=!0,h.addClass("remove").find("span.remove").hide().end().find("button.move").addClass("disabled").end().append(E)}else f.hasClass("contents")||(f.hasClass("glyphicon-edit")?b.onUpdate(c):f.hasClass("glyphicon-remove")?j(h):f.hasClass("glyphicon-ok")?e(h):f.hasClass("glyphicon-chevron-left")&&d(h))}),b.isAllowedCreate=i.editUserInfo,b.onCreate=function(){H!==!0&&(G=!0,D.html("Create new pinpoint user"),y.prop("disabled",""),y.val(""),z.val(""),A.val(""),B.val(""),C.val(""),f.show(x),y.focus())},b.onUpdate=function(b){if(H!==!0){G=!1;var c=a(b.toElement||b.target).parents("li"),d=k(f.extractID(c));D.html("Update pinpoint user data."),y.prop("disabled","disabled"),y.val(d.userId),z.val(d.name),A.val(d.department),B.val(d.phoneNumber),C.val(d.email),f.show(x),z.focus().select()}},b.onInputSearch=function(a){return H!==!0&&13==a.keyCode?void b.onSearch():void 0},b.onSearch=function(){if(H!==!0){var b=v.val(),c=a.trim(w.val());if(0!==c.length&&c.length<3)return f.showLoading(t,!1),void f.showAlert(u,"You must enter at least three characters.");h.send(h.CONST.MAIN,h.CONST.CLK_ALARM_FILTER_PINPOINT_USER),f.showLoading(t,!1);var d={};d[b]=c,o(d)}},b.onInputEdit=function(a){13==a.keyCode?b.onApplyEdit():27==a.keyCode&&(b.onCancelEdit(),a.stopPropagation())},b.onCancelEdit=function(){f.hide(x)},b.onApplyEdit=function(){var b=a.trim(y.val()),c=a.trim(z.val()),d=a.trim(A.val()),e=a.trim(B.val()),g=a.trim(C.val());return""===b||""===c?(f.showLoading(t,!0),void f.showAlert(u,"You must input user id and user name.")):(f.showLoading(t,!0),f.hasDuplicateItem(I,function(a){return a.userId==b})&&G===!0?void f.showAlert(u,"Exist a same user id in the lists."):q(e)===!1?void f.showAlert(u,"You can only input numbers."):p(g)===!1?void f.showAlert(u,"Invalid email format."):void(G?(h.send(h.CONST.MAIN,h.CONST.CLK_ALARM_CREATE_PINPOINT_USER),l(b,c,d,e,g)):m(b,c,d,e,g)))},b.onCloseAlert=function(){f.closeAlert(u,t)},b.$on("alarmPinpointUser.configuration.load",function(a,b){F===!1&&o(""===b?{}:{department:b})}),b.$on("alarmPinpointUser.configuration.addUserCallback",function(a){f.hide(t)})}}}])}(jQuery),function(a){"use strict";pinpointApp.directive("alarmRuleDirective",["$rootScope","$document","$timeout","AlarmUtilService","AnalyticsService","TooltipService",function(b,c,d,e,f,g){return{restrict:"EA",replace:!0,templateUrl:"features/configuration/alarm/alarmRule.html?v="+G_BUILD_TIME,scope:!0,link:function(b,h){function i(){O===!0&&u.find("tr.remove").each(function(){k(a(this))}),b.onCancelEdit(),e.unsetFilterBackground(v),z.val(""),A.val("")}function j(a){e.showLoading(x,!1),r(e.extractID(a))}function k(a){a.removeClass("remove").find("span.removeTemplate").remove().end().find("span.remove").show().end().find("td").removeClass("remove"),O=!1}function l(){a('[data-toggle="alarm-popover"]').popover()}function m(a){if(!a.id)return a.text;var b=a.text.split("@");if(b.length>1){var d=c.get(0).createElement("img");return d.src="/images/icons/"+b[1]+".png",d.style.height="25px",d.style.paddingRight="3px",d.outerHTML+b[0]}return a.text}function n(){D.select2({placeholder:"Select an application.",searchInputPlaceholder:"Input your application name.",allowClear:!1,formatResult:m,formatSelection:m,escapeMarkup:function(a){return a}}).on("change",function(a){}),E.select2({searchInputPlaceholder:"Input your rule name.",placeholder:"Select an rule.",allowClear:!1,formatResult:m,formatSelection:m,escapeMarkup:function(a){return a}}).on("change",function(a){})}function o(a){for(var b,c=P.length,d=0;c>d;d++)if(P[d].ruleId==a){b=P[d];break}return b}function p(a,b,c,f,g,h,i){var j={applicationId:a,serviceType:b,userGroupId:L,checkerName:c,threshold:f,smsSend:g,emailSend:h,notes:i};e.sendCRUD("createRule",j,function(a){j.ruleId=a.ruleId,P.push(j),e.setTotal(w,P.length),d(function(){l()}),e.hide(x,C)},function(a){},y)}function q(a,b,c,f,g,h,i,j){var k={ruleId:a,applicationId:b,serviceType:c,userGroupId:L,checkerName:f,threshold:parseInt(g,10),smsSend:h,emailSend:i,notes:j};e.sendCRUD("updateRule",k,function(k){for(var m=0;m<P.length;m++)P[m].ruleId==a&&(P[m].applicationId=b,P[m].serviceType=c,P[m].checkerName=f,P[m].threshold=g,P[m].smsSend=h,P[m].emailSend=i,P[m].notes=j);e.hide(x,C),d(function(){l()})},function(a){},y)}function r(a){e.sendCRUD("removeRule",{ruleId:a},function(c){b.$apply(function(){for(var b=0;b<P.length;b++)if(P[b].ruleId==a){P.splice(b,1);break}}),e.setTotal(w,P.length),e.hide(x),O=!1},function(a){},y)}function s(a){e.sendCRUD("getRuleList",{userGroupId:L},function(a){M=!0,P=b.ruleList=a,e.setTotal(w,P.length),e.hide(x),d(function(){l()})},function(a){},y)}function t(){b.ruleSets.length>1||e.sendCRUD("getRuleSet",{},function(a){for(var c=0;c<a.length;c++)b.ruleSets.push({text:a[c]})},function(a){},y)}var u=a(h),v=u.find(".wrapper"),w=u.find(".total"),x=u.find(".some-loading"),y=u.find(".some-alert"),z=u.find("div.filter-input input[name=filterApplication]"),A=u.find("div.filter-input input[name=filterRule]"),B=u.find("div.filter-input button.trash"),C=u.find(".some-edit-content"),D=C.find("select[name=application]"),E=C.find("select[name=rule]"),F=C.find("input[name=threshold]"),G=C.find("input[name=sms]"),H=C.find("input[name=email]"),I=C.find("textarea"),J=C.find(".title-message"),K=a(['<span class="removeTemplate">','<button class="btn btn-danger confirm-cancel"><span class="glyphicon glyphicon-remove" aria-hidden="true"></span></button>','<button class="btn btn-danger confirm-remove" style="margin-left:2px;"><span class="glyphicon glyphicon-ok" aria-hidden="true"></span></button>',"</span>"].join("")),L="",M=!1,N=!0,O=!1,P=b.ruleList=[],Q=u.find(".some-list-content .wrapper tbody");Q.on("click",function(c){var d=a(c.toElement||c.target),e=d.get(0).tagName.toLowerCase(),f=d.parents("tr");if("button"==e)d.hasClass("edit")?b.onUpdate(c):d.hasClass("confirm-remove")?j(f):d.hasClass("confirm-cancel")&&k(f);else if("span"==e)if(d.hasClass("remove")){if(O===!0)return;O=!0,f.addClass("remove").find("td:last-child").addClass("remove").find("span.remove").hide().end().append(K)}else d.hasClass("glyphicon-edit")?b.onUpdate(c):d.hasClass("glyphicon-remove")?k(f):d.hasClass("glyphicon-ok")&&j(f)}),g.init("alarmRules"),b.ruleSets=[{text:""}],b.onRefresh=function(){O!==!0&&(f.send(f.CONST.MAIN,f.CONST.CLK_ALARM_REFRESH_RULE),i(),e.showLoading(x,!1),s(!1))},b.onCreate=function(){O!==!0&&(N=!0,J.html("Create new pinpoint user"),D.select2("val",""),E.select2("val",""),F.val("0"),G.prop("checked",""),H.prop("checked",""),I.val(""),e.show(C))},b.onUpdate=function(b){if(O!==!0){N=!1;var c=a(b.toElement||b.target).parents("tr"),d=e.extractID(c),f=o(d);J.html("Update rule data."),D.select2("val",f.applicationId+"@"+f.serviceType).parent().prop("id","updateRule_"+d),E.select2("val",f.checkerName),F.val(f.threshold),G.prop("checked",f.smsSend),H.prop("checked",f.emailSend),I.val(f.notes),e.show(C)}},b.onInputFilter=function(c){return O!==!0?13==c.keyCode?void b.onFilterGroup():void(a.trim(z.val()).length>=3||a.trim(A.val()).length?B.removeClass("disabled"):B.addClass("disabled")):void 0},b.onFilterGroup=function(){if(O!==!0){var c=a.trim(z.val()),d=a.trim(A.val());if(0!==c.length&&c.length<3||0!==d.length&&d.length<3)return e.showLoading(x,!1),void e.showAlert(y,"You must enter at least three characters.");if(f.send(f.CONST.MAIN,f.CONST.CLK_ALARM_FILTER_RULE),""===c&&""===d)b.ruleList.length!=P.length&&(b.ruleList=P,e.unsetFilterBackground(v)),B.addClass("disabled");else{for(var g=[],h=(P.length,0);h<P.length;h++)""===c?-1!=P[h].checkerName.indexOf(d)&&g.push(P[h]):""===d?-1!=P[h].applicationId.indexOf(c)&&g.push(P[h]):-1!=P[h].applicationId.indexOf(c)&&-1!=P[h].checkerName.indexOf(d)&&g.push(P[h]);b.ruleList=g,e.setFilterBackground(v)}}},b.onFilterEmpty=function(){O!==!0&&(""===a.trim(z.val())&&""===a.trim(A.val())||(z.val(""),A.val(""),b.onFilterGroup()))},b.onInputEdit=function(a){27==a.keyCode&&(b.onCancelEdit(),a.stopPropagation())},b.onCancelEdit=function(){e.hide(C)},b.onApplyEdit=function(){var a=D.select2("val").split("@"),b=E.select2("val"),c=F.val(),d=G.prop("checked"),g=H.prop("checked"),h=I.val();return""===a[0]||""===b?(e.showLoading(x,!0),void e.showAlert(y,"Select application name and rule.")):(e.showLoading(x,!0),e.hasDuplicateItem(P,function(c){return c.applicationId===a[0]&&c.checkerName===b})&&N===!0?void e.showAlert(y,"Exist a same rule set in the lists"):void(N?(f.send(f.CONST.MAIN,f.CONST.CLK_ALARM_CREATE_RULE),p(a[0],a[1],b,c,d,g,h)):q(e.extractID(D.parent()),a[0],a[1],b,c,d,g,h)))},b.onCloseAlert=function(){e.closeAlert(y,x)},b.$on("alarmRule.configuration.load",function(a,b){L=b,i(),s(!0),t()}),b.$on("alarmRule.configuration.selectNone",function(a){i(),L="",P=b.ruleList=[]}),b.$on("alarmRule.applications.set",function(a,c){b.applications=c,n()})}}}])}(jQuery),function(a){"use strict";pinpointApp.constant("RealtimeChartCtrlConfig",{sendPrefix:"applicationName=",keys:{CODE:"code",TYPE:"type",RESULT:"result",STATUS:"status",COMMAND:"command",MESSAGE:"message",TIME_STAMP:"timeStamp",PARAMETERS:"parameters",APPLICATION_NAME:"applicationName",ACTIVE_THREAD_COUNTS:"activeThreadCounts"},values:{PING:"PING",PONG:"PONG",REQUEST:"REQUEST",RESPONSE:"RESPONSE",ACTIVE_THREAD_COUNT:"activeThreadCount"},template:{agentChart:'<div class="agent-chart"><div></div></div>',chartDirective:Handlebars.compile('<realtime-chart-directive timeout-max-count="{{timeoutMaxCount}}" chart-color="{{chartColor}}" xcount="{{xAxisCount}}" show-extra-info="{{showExtraInfo}}" request-label="requestLabelNames" namespace="{{namespace}}" width="{{width}}" height="{{height}}"></realtime-chart-directive>')},css:{borderWidth:2,height:180,navbarHeight:70,titleHeight:30},sumChart:{width:260,height:120},otherChart:{width:120,height:60},"const":{MIN_Y:10}}),pinpointApp.controller("RealtimeChartCtrl",["RealtimeChartCtrlConfig","$scope","$element","$rootScope","$compile","$timeout","$window","globalConfig","$location","RealtimeWebsocketService","AnalyticsService","TooltipService",function(b,c,d,e,f,g,h,i,j,k,l,m){function n(){O=d.find("div.agent-sum-chart"),P=d.find("div.agent-sum-chart div:first-child span:first-child"),Q=d.find("div.agent-sum-chart div:first-child span:last-child"),R=d.find("div.agent-chart-list"),S=d.find(".connection-message"),T=d.find(".handle .glyphicon"),U=d.find(".glyphicon-pushpin"),S.hide(),P.html(""),Q.html("0")}function o(){q("sum")===!1&&(O.append(f(b.template.chartDirective({width:b.sumChart.width,height:b.sumChart.height,namespace:"sum",chartColor:"sumChartColor",xAxisCount:W,showExtraInfo:"true",timeoutMaxCount:V}))(c)),_.sum=-1)}function p(){angular.isDefined(_.sum)?(_={},_.sum=-1):_={}}function q(a){return angular.isDefined(_[a])}function r(d){var e=a(b.template.agentChart).append(f(b.template.chartDirective({width:b.otherChart.width,height:b.otherChart.height,namespace:$.length,chartColor:"agentChartColor",xAxisCount:W,showExtraInfo:"false",timeoutMaxCount:V}))(c));R.append(e),x(d,$.length),$.push(e)}function s(){var a=k.open({onopen:function(a){E()},onmessage:function(a){t(a)},onclose:function(a){c.$apply(function(){I()})},ondelay:function(){k.close()},retry:function(){c.retryConnection()}});a&&o()}function t(a){switch(S.hide(),a[b.keys.TYPE]){case b.values.PING:k.send(ga);break;case b.values.RESPONSE:var c=a[b.keys.RESULT];if(c[b.keys.APPLICATION_NAME]!==Z)return;var d=c[b.keys.ACTIVE_THREAD_COUNTS],e=B(d);C(e),u(d,e,c[b.keys.TIME_STAMP])}}function u(a,d,e){var f=Math.max(D(),b["const"].MIN_Y),g=0,h=!0;for(var i in a)w(i,g),a[i][b.keys.CODE]===X?(h=!1,c.$broadcast("realtimeChartDirective.onData."+_[i],a[i][b.keys.STATUS],e,f,h)):c.$broadcast("realtimeChartDirective.onError."+_[i],a[i],e,f),z(g),g++;c.$broadcast("realtimeChartDirective.onData.sum",d,e,f,h),Q.html(g)}function v(a){return ha[b.keys.PARAMETERS][b.keys.APPLICATION_NAME]=a,JSON.stringify(ha)}function w(a,b){q(a)===!1&&(y(b)?x(a,b):r(a)),A(b,a)}function x(a,b){_[a]=b}function y(a){return $.length>a}function z(a){$[a].show()}function A(a,b){$[a].find("div").html(b)}function B(a){var c=[0,0,0,0];for(var d in a)a[d][b.keys.CODE]===X&&jQuery.each(a[d][b.keys.STATUS],function(a,b){c[a]+=b});return c}function C(a){aa.push(a.reduce(function(a,b){return a+b})),aa.length>W&&aa.shift()}function D(){return d3.max(aa,function(a){return a})}function E(){k.send(v(Z))}function F(){k.isOpened()===!1?s():E(),ea=!0}function G(){ea=!1,k.stopReceive(v(""))}function H(){e.$broadcast("realtimeChartDirective.clear.sum"),a.each($,function(a,b){e.$broadcast("realtimeChartDirective.clear."+a),b.hide()})}function I(){S.css("background-color","rgba(200, 200, 200, 0.9)"),S.find("h4").css("color","red").html("Closed connection.<br/><br/>Select node again."),S.find("button").show(),S.show()}function J(){S.css("background-color","rgba(138, 171, 136, 0.5)"),S.find("h4").css("color","blue").html("Waiting Connection..."),S.find("button").hide(),S.show()}function K(){d.animate({bottom:-fa,left:0},500,function(){T.removeClass("glyphicon-chevron-down").addClass("glyphicon-chevron-up")})}function L(){d.animate({bottom:0,left:0},500,function(){T.removeClass("glyphicon-chevron-up").addClass("glyphicon-chevron-down")})}function M(){d.innerWidth(d.parent().width()-b.css.borderWidth+"px")}function N(){U.css("color",ba?"red":"")}d=a(d);var O,P,Q,R,S,T,U,V=10,W=10,X=0,Y="",Z="",$=[],_={},aa=[0],ba=!0,ca=!1,da=!1,ea=!0,fa=b.css.height,ga=function(){var a={};return a[b.keys.TYPE]=b.values.PONG,JSON.stringify(a)}(),ha=function(){var a={};return a[b.keys.TYPE]=b.values.REQUEST,a[b.keys.COMMAND]=b.values.ACTIVE_THREAD_COUNT,a[b.keys.PARAMETERS]={},a}(),ia=null;m.init("realtime"),c.sumChartColor=["rgba(44, 160, 44, 1)","rgba(60, 129, 250, 1)","rgba(248, 199, 49, 1)","rgba(246, 145, 36, 1)"],c.agentChartColor=["rgba(44, 160, 44, .8)","rgba(60, 129, 250, .8)","rgba(248, 199, 49, .8)","rgba(246, 145, 36, .8)"],c.requestLabelNames=["1s","3s","5s","Slow"],c.bInitialized=!1,a(document).on("visibilitychange",function(){switch(document.visibilityState){case"hidden":ia=g(function(){k.close(),ia=null},6e4);break;case"visible":null!==ia?g.cancel(ia):c.retryConnection(),ia=null}}),n(),c.$on("realtimeChartController.close",function(){K();var a=ea;c.closePopup(),ea=a,N()}),c.$on("realtimeChartController.initialize",function(a,b,d,e){if((ba!==!0||Y!==e)&&/^\/main/.test(j.path())!==!1&&(ca=angular.isUndefined(b)?!1:b,d=angular.isUndefined(d)?"":d,Y=e,n(),P.html(Z=d),i.useRealTime!==!1&&ea!==!1)){if(ca===!1)return void K();p(),M(),c.bInitialized=!0,L(),c.closePopup(),P.html(Z=d),J(),F(),N()}}),c.retryConnection=function(){J(),F()},c.pin=function(){ba=!ba,l.send(l.CONST.MAIN,ba?l.CONST.CLK_REALTIME_CHART_PIN_ON:l.CONST.CLK_REALTIME_CHART_PIN_OFF),N()},c.resizePopup=function(){l.send(l.CONST.MAIN,l.CONST.TG_REALTIME_CHART_RESIZE),da?(fa=b.css.height,d.css({height:b.css.height+"px",bottom:"0px"}),R.css("height","150px")):(fa=h.innerHeight-b.css.navbarHeight,d.css({height:fa+"px",bottom:"0px"}),R.css("height",fa-b.css.titleHeight+"px")),da=!da},c.closePopup=function(){G(),H(),S.hide(),P.html(Z=""),Q.html("0")},a(h).on("resize",function(){M()})}])}(jQuery),function(){"use strict";pinpointApp.controller("MainCtrl",["filterConfig","$scope","$timeout","$routeParams","locationService","NavbarVoService","$window","SidebarTitleVoService","filteredMapUtilService","$rootElement","AnalyticsService","PreferenceService",function(a,b,c,d,e,f,g,h,i,j,k,l){k.send(k.CONST.MAIN_PAGE);var m,n,o,p,q,r;b.hasScatter=!1,g.htoScatter={},n=!0,o=!1,b.sidebarLoading=!0,c(function(){m=new f,d.application&&m.setApplication(d.application),d.readablePeriod&&m.setReadablePeriod(d.readablePeriod),d.queryEndDateTime&&m.setQueryEndDateTime(d.queryEndDateTime),m.setCalleeRange(l.getCalleeByApp(d.application)), m.setCallerRange(l.getCallerByApp(d.application)),m.isRealtime()?b.$broadcast("navbarDirective.initialize.realtime.andReload",m):angular.isDefined(d.application)&&angular.isUndefined(d.readablePeriod)?b.$broadcast("navbarDirective.initialize.andReload",m):(g.$routeParams=d,m.autoCalculateByQueryEndDateTimeAndReadablePeriod(),b.$broadcast("navbarDirective.initialize",m),b.$broadcast("scatterDirective.initialize",m),b.$broadcast("serverMapDirective.initialize",m))},500),p=function(){return e.path().split("/")[1]||"main"},q=function(){var a="/"+p()+"/"+m.getApplication()+"/";m.isRealtime()?(a+=m.getPeriodType(),g.$routeParams={application:m.getApplication(),readablePeriod:m.getPeriodType()}):a+=m.getReadablePeriod()+"/"+m.getQueryEndDateTime(),e.path()!==a&&("/main"===e.path()?e.path(a).replace():e.skipReload().path(a).replace(),g.$routeParams={application:m.getApplication(),readablePeriod:m.getReadablePeriod().toString(),queryEndDateTime:m.getQueryEndDateTime().toString()},b.$$phase||b.$apply())},r=function(a,b){var c=i.getFilteredMapUrlWithFilterVo(m,a,b);g.open(c,"")},b.getMainContainerClass=function(){return o?"no-data":""},b.getInfoDetailsClass=function(){var a=[];return b.hasScatter&&a.push("has-scatter"),b.hasFilter&&a.push("has-filter"),a.join(" ")},b.$on("serverMapDirective.hasData",function(a){o=!1,b.sidebarLoading=!1}),b.$on("serverMapDirective.hasNoData",function(a){o=!0,b.sidebarLoading=!1}),b.$on("navbarDirective.changed",function(a,c){o=!1,m=c,q(m),g.htoScatter={},b.hasScatter=!1,b.sidebarLoading=!0,m.isRealtime()&&b.$broadcast("realtimeChartController.close"),b.$broadcast("sidebarTitleDirective.empty.forMain"),b.$broadcast("nodeInfoDetailsDirective.hide"),b.$broadcast("linkInfoDetailsDirective.hide"),b.$broadcast("scatterDirective.initialize",m),b.$broadcast("serverMapDirective.initialize",m),b.$broadcast("sidebarTitleDirective.empty.forMain")}),b.$on("serverMapDirective.passingTransactionResponseToScatterChart",function(a,c){b.$broadcast("scatterDirective.initializeWithNode",c)}),b.$on("serverMapDirective.nodeClicked",function(a,c,d,e,f,g){n=!0;var i=new h;i.setImageType(e.serviceType),e.isWas===!0?(b.hasScatter=!0,i.setTitle(e.applicationName),b.$broadcast("scatterDirective.initializeWithNode",e)):e.unknownNodeGroup?(i.setTitle(e.serviceType.replace("_"," ")),b.hasScatter=!1):(i.setTitle(e.applicationName),b.hasScatter=!1),b.hasFilter=!1,b.$broadcast("sidebarTitleDirective.initialize.forMain",i,e),b.$broadcast("nodeInfoDetailsDirective.initialize",c,d,e,f,m,null,g),b.$broadcast("linkInfoDetailsDirective.hide")}),b.$on("serverMapDirective.linkClicked",function(a,c,d,e,f){n=!1;var g=new h;e.unknownLinkGroup?g.setImageType(e.sourceInfo.serviceType).setTitle("Unknown Group from "+e.sourceInfo.applicationName):g.setImageType(e.sourceInfo.serviceType).setTitle(e.sourceInfo.applicationName).setImageType2(e.targetInfo.serviceType).setTitle2(e.targetInfo.applicationName),b.hasScatter=!1;var j=i.findFilterInNavbarVo(e.sourceInfo.applicationName,e.sourceInfo.serviceType,e.targetInfo.applicationName,e.targetInfo.serviceType,m);j?(b.hasFilter=!0,b.$broadcast("filterInformationDirective.initialize.forMain",j.oServerMapFilterVoService)):b.hasFilter=!1,b.$broadcast("sidebarTitleDirective.initialize.forMain",g),b.$broadcast("nodeInfoDetailsDirective.hide"),b.$broadcast("linkInfoDetailsDirective.initialize",c,d,e,f,m)}),b.$on("serverMapDirective.openFilteredMap",function(a,b,c){r(b,c)}),b.$on("linkInfoDetailsDirective.openFilteredMap",function(a,b,c){r(b,c)}),b.$on("linkInfoDetailsDirective.openFilterWizard",function(a,c,d){b.$broadcast("serverMapDirective.openFilterWizard",c,d)}),b.$on("linkInfoDetailsDirective.ResponseSummary.barClicked",function(a,b){r(b)}),b.$on("linkInfoDetailsDirective.showDetailInformationClicked",function(a,c,d){b.hasScatter=!1;var e=new h;e.setImageType(d.sourceInfo.serviceType).setTitle(d.sourceInfo.applicationName).setImageType2(d.targetInfo.serviceType).setTitle2(d.targetInfo.applicationName),b.$broadcast("sidebarTitleDirective.initialize.forMain",e),b.$broadcast("nodeInfoDetailsDirective.hide")}),b.$on("nodeInfoDetailDirective.showDetailInformationClicked",function(a,c,d){b.hasScatter=!1;var e=new h;e.setImageType(d.serviceType),d.unknownNodeGroup?(e.setTitle(d.serviceType.replace("_"," ")),b.hasScatter=!1):(e.setTitle(d.applicationName),b.hasScatter=!1),b.$broadcast("sidebarTitleDirective.initialize.forMain",e),b.$broadcast("linkInfoDetailsDirective.hide")}),b.loadingOption={hideTip:"init"},b.$watch("loadingOption.hideTip",function(a){if("init"!=a&&g.localStorage){var b=new Date;b.setDate(b.getDate()+30),g.localStorage.setItem("__HIDE_LOADING_TIP",a?b.valueOf():"-")}})}])}(),function(){"use strict";pinpointApp.controller("InspectorCtrl",["$scope","$timeout","$routeParams","locationService","NavbarVoService","AnalyticsService",function(a,b,c,d,e,f){f.send(f.CONST.INSPECTOR_PAGE);var g,h,i,j,k,l;b(function(){g=new e,c.application&&g.setApplication(c.application),c.readablePeriod&&g.setReadablePeriod(c.readablePeriod),c.queryEndDateTime&&g.setQueryEndDateTime(c.queryEndDateTime),c.agentId&&g.setAgentId(c.agentId),g.autoCalculateByQueryEndDateTimeAndReadablePeriod(),a.$emit("navbarDirective.initializeWithStaticApplication",g),a.$emit("agentListDirective.initialize",g)},500),a.$on("navbarDirective.changed",function(a,b){g=b,j()}),a.$on("agentListDirective.agentChanged",function(b,c){h=c,g.setAgentId(c.agentId),l()&&j(),h&&a.$emit("agentInfoDirective.initialize",g,h)}),i=function(){var a=d.path().split("/");return a[1]||"inspector"},j=function(){var b=k();l()&&("/inspector"===d.path()||d.skipReload().path(b).replace(),a.$emit("navbarDirective.initializeWithStaticApplication",g),a.$emit("agentListDirective.initialize",g))},k=function(){var a="/"+i()+"/"+g.getApplication()+"/"+g.getReadablePeriod()+"/"+g.getQueryEndDateTime();return g.getAgentId()&&(a+="/"+g.getAgentId()),a},l=function(){var a=k();return d.path()!==a}}])}(),function(){"use strict";pinpointApp.constant("TransactionListConfig",{applicationUrl:"/transactionmetadata.pinpoint",MAX_FETCH_BLOCK_SIZE:100}),pinpointApp.controller("TransactionListCtrl",["TransactionListConfig","$scope","$location","$routeParams","$rootScope","$timeout","$window","$http","webStorage","TimeSliderVoService","TransactionDaoService","AnalyticsService","helpContentService",function(a,b,c,d,e,f,g,h,i,j,k,l,m){l.send(l.CONST.TRANSACTION_LIST_PAGE);var n,o,p,q,r,s,t,u,v,w,x,y,z,A,B,C,D,E,F,G,H;f(function(){n=1,o=0,b.transactionDetailUrl="index.html#/transactionDetail",b.sidebarLoading=!0;var a=E(),c=F(),e=!angular.isUndefined(d.transactionInfo);if(e){var h=d.transactionInfo.lastIndexOf("-"),i=d.transactionInfo.lastIndexOf("-",h-1);s=[d.transactionInfo.substring(0,i),d.transactionInfo.substring(i+1,h),d.transactionInfo.substring(h+1)]}a&&c?(p=A(g.name),B(p.applicationName)?(q=C(p),G(e)):H(m.transactionList.openError.noData.replace(/\{\{application\}\}/,p.applicationName))):e===!1?H(m.transactionList.openError.noParent):(p=D(),q=[[s[1],s[2],s[0]]],G(e)),f(function(){$("#main-container").layout({north__minSize:20,north__size:(window.innerHeight-40)/2,center__maskContents:!0})},100)},100),H=function(a){alert(a),g.location.replace(g.location.href.replace("transactionList","main"))},G=function(a){r=new j,r.setTotal(q.length),t(a)},E=function(){return angular.isDefined(g.opener)},F=function(){if(angular.isUndefined(g.opener)||null===g.opener)return!1;var a=g.opener.$routeParams;if(angular.isDefined(d)&&angular.isDefined(a))if("realtime"===a.readablePeriod){if(angular.equals(d.application,a.application))return!0}else if(angular.equals(d.application,a.application)&&angular.equals(d.readablePeriod,a.readablePeriod)&&angular.equals(d.queryEndDateTime,a.queryEndDateTime))return!0;return!1},A=function(a){var b=a.split("|");return 4===b.length?{applicationName:b[0],type:b[1],min:b[2],max:b[3]}:{applicationName:b[0],nXFrom:b[1],nXTo:b[2],nYFrom:b[3],nYTo:b[4]}},D=function(){return{applicationName:d.application.split("@")[0],nXFrom:parseInt(s[1])-1e3,nXTo:parseInt(s[1])+1e3,nYFrom:0,nYTo:0}},B=function(a){return angular.isDefined(g.opener.htoScatter[a])},C=function(a){var b=g.opener.htoScatter[a.applicationName];return a.type?b.getDataByRange(a.type,a.min,a.max):b.getDataByXY(a.nXFrom,a.nXTo,a.nYFrom,a.nYTo)},w=function(a){b.$emit("transactionTableDirective.appendTransactionList",a.metadata)},x=function(){if(!q)return g.alert("Query failed - Query parameter cache deleted.\n\nPossibly due to scatter chart being refreshed."),!1;for(var b=[],c=o,d=0;c<a.MAX_FETCH_BLOCK_SIZE*n&&c<q.length;c++,d++)c>0&&b.push("&"),b=b.concat(["I",d,"=",q[c][0]]),b=b.concat(["&T",d,"=",q[c][1]]),b=b.concat(["&R",d,"=",q[c][2]]),o++;return n++,b},u=function(){y(x(),function(c){return 0===c.metadata.length?(b.$emit("timeSliderDirective.disableMore"),b.$emit("timeSliderDirective.changeMoreToDone"),!1):(c.metadata.length<a.MAX_FETCH_BLOCK_SIZE||r.getTotal()===c.metadata.length+r.getCount()?(b.$emit("timeSliderDirective.disableMore"),b.$emit("timeSliderDirective.changeMoreToDone"),r.setInnerFrom(p.nXFrom)):(b.$emit("timeSliderDirective.enableMore"),r.setInnerFrom(_.last(c.metadata).collectorAcceptTime)),w(c),r.addCount(c.metadata.length),b.$emit("timeSliderDirective.setInnerFromTo",r),void(b.sidebarLoading=!1))})},v=function(){a.MAX_FETCH_BLOCK_SIZE=1e8,u()},t=function(c){var d=x();y(d,function(d){return 0===d.metadata.length?(b.$emit("timeSliderDirective.disableMore"),b.$emit("timeSliderDirective.changeMoreToDone"),!1):(d.metadata.length<a.MAX_FETCH_BLOCK_SIZE||r.getTotal()===d.metadata.length?(b.$emit("timeSliderDirective.disableMore"),b.$emit("timeSliderDirective.changeMoreToDone"),r.setInnerFrom(p.nXFrom)):(b.$emit("timeSliderDirective.enableMore"),r.setInnerFrom(_.last(d.metadata).collectorAcceptTime)),w(d),r.setFrom(p.nXFrom),r.setTo(p.nXTo),r.setInnerTo(p.nXTo),r.setCount(d.metadata.length),b.$emit("timeSliderDirective.initialize",r),b.sidebarLoading=!1,void(c&&z({traceId:s[0],collectorAcceptTime:s[1],elapsed:s[2]})))})},y=function(b,c){h.post(a.applicationUrl,b.join(""),{headers:{"Content-Type":"application/x-www-form-urlencoded"}}).success(function(a,b){angular.isFunction(c)&&c(a)}).error(function(a,b){g.alert("Failed to fetching the request information.")})},z=function(a){c.path("/transactionList/"+d.application+"/"+d.readablePeriod+"/"+d.queryEndDateTime+"/"+a.traceId+"-"+a.collectorAcceptTime+"-"+a.elapsed,!1);var e="index.html#/transactionDetail";a.traceId&&a.collectorAcceptTime&&(e+="/"+g.encodeURIComponent(a.traceId)+"/"+a.collectorAcceptTime,f(function(){b.transactionDetailUrl=e}))},b.$on("transactionTableDirective.applicationSelected",function(a,b){z(b)}),b.$on("timeSliderDirective.moreClicked",function(a){b.sidebarLoading=!0,b.$emit("timeSliderDirective.disableMore"),f(function(){u()},1e3)})}])}(),function(){"use strict";pinpointApp.constant("TransactionDetailConfig",{applicationUrl:"/transactionInfo.pinpoint"}),pinpointApp.controller("TransactionDetailCtrl",["TransactionDetailConfig","$scope","$rootScope","$routeParams","$timeout","$rootElement","AlertsService","ProgressBarService","TransactionDaoService","$window","$location","AnalyticsService","TooltipService",function(a,b,c,d,e,f,g,h,i,j,k,l,m){l.send(l.CONST.TRANSACTION_DETAIL_PAGE);var n,o,p,q,r,s,t,u=!0;p=!1,c.wrapperClass="no-navbar",c.wrapperStyle={"padding-top":"70px"},n=new g(f),o=new h(f),$("#customLogPopup").modal("hide"),e(function(){d.traceId&&d.focusTimestamp&&(o.startLoading(),o.setLoading(30),i.getTransactionDetail(d.traceId,d.focusTimestamp,function(a,b){a||b.exception?(o.stopLoading(),a?n.showError("There is some error while downloading the data."):n.showError(b.exception)):(o.setLoading(70),q(b),r(),e(function(){o.setLoading(100),o.stopLoading()},100))}))}),q=function(a){b.transactionDetail=a,b.logLinkEnable=a.logLinkEnable||!1,b.loggingTransactionInfo=a.loggingTransactionInfo||!1,b.logButtonName=a.logButtonName||"",b.logPageUrl=a.logPageUrl||"",b.logDisableMessage=a.disableButtonMessage||"",b.completeStateClass=s(a.completeState),b.$digest(),f.find('[data-toggle="tooltip"]').tooltip("destroy").tooltip()},s=function(a){var b="label-important";return"Complete"===a?b="label-success":"Progress"===a&&(b="label-warning"),b},r=function(){p===!1&&(p=!0,b.$broadcast("distributedCallFlowDirective.initialize.forTransactionDetail",b.transactionDetail))},t=function(){b.searchMinTime=1e3,b.timelineSearchIndex=0,b.calltreeSearchIndex=0,b.searchMessage=""},b.calltreeSearchIndex=0,b.timelineSearchIndex=0,b.searchMinTime=1e3,b.searchMessage="",b.searchCall=function(){$("#CallStacks").is(":visible")?b.$broadcast("distributedCallFlowDirective.searchCall.forTransactionDetail",parseInt(b.searchMinTime),parseInt(b.calltreeSearchIndex)):b.$broadcast("timelineDirective.searchCall",parseInt(b.searchMinTime),parseInt(b.timelineSearchIndex))},b.viewLog=function(a){return b.loggingTransactionInfo===!1?($("#customLogPopup").find("h4").html("Notice").end().find("div.modal-body").html(b.logDisableMessage).end().modal("show"),!1):void window.open(a)},b.$watch("searchMinTime",function(a){b.calltreeSearchIndex=0,b.timelineSearchIndex=0}),b.openInNewWindow=function(){j.open(k.absUrl())},window.onresize=function(a){b.$broadcast("distributedCallFlowDirective.resize.forTransactionDetail"),b.$broadcast("timelineDirective.resize")},b.openTransactionView=function(){j.open("/#/transactionView/"+b.transactionDetail.agentId+"/"+b.transactionDetail.transactionId+"/"+b.transactionDetail.callStackStart)},b.$on("transactionDetail.selectDistributedCallFlowRow",function(a,c){l.send(l.CONST.CALLSTACK,l.CONST.CLK_DISTRIBUTED_CALL_FLOW),$("#traceTabs li:nth-child(1) a").trigger("click"),b.$broadcast("distributedCallFlowDirective.selectRow.forTransactionDetail",c)}),b.$on("transactionDetail.calltreeSearchCallResult",function(a,c){"Loop"==c?b.calltreeSearchIndex=1:(b.searchMessage=c.replace("{time}",b.searchMinTime),""===c&&b.calltreeSearchIndex++)}),b.$on("transactionDetail.timelineSearchCallResult",function(a,c){"Loop"===c?b.timelineSearchIndex=1:(b.searchMessage=c.replace("{time}",b.searchMinTime),""===c&&b.timelineSearchIndex++)}),$('#traceTabs li a[data-toggle="tab"]').on("shown.bs.tab",function(a){-1!=a.target.href.indexOf("#CallStacks")&&l.send(l.CONST.CALLSTACK,l.CONST.CLK_DISTRIBUTED_CALL_FLOW)}),$("#traceTabs li a").bind("click",function(a){a.preventDefault()}),$("#traceTabs li:nth-child(2) a").bind("click",function(a){l.send(l.CONST.CALLSTACK,l.CONST.CLK_SERVER_MAP),t(),b.$broadcast("serverMapDirective.initializeWithMapData",!1,b.transactionDetail)});$("#traceTabs li:nth-child(3) a").bind("click",function(a){l.send(l.CONST.CALLSTACK,l.CONST.CLK_RPC_TIMELINE),t(),u&&(b.$broadcast("timelineDirective.initialize",b.transactionDetail),u=!1)}),m.init("callTree")}])}(),function(){"use strict";pinpointApp.controller("FilteredMapCtrl",["filterConfig","$scope","$routeParams","$timeout","TimeSliderVoService","NavbarVoService","$window","SidebarTitleVoService","filteredMapUtilService","$rootElement","AnalyticsService",function(a,b,c,d,e,f,g,h,i,j,k){k.send(k.CONST.FILTEREDMAP_PAGE);var l,m,n,o,p,q,r,s;b.hasScatter=!1,g.htoScatter={},o=!0,p=!1,q=!1,b.sidebarLoading=!1,d(function(){l=new f,c.application&&l.setApplication(c.application),c.readablePeriod&&l.setReadablePeriod(c.readablePeriod),c.queryEndDateTime&&l.setQueryEndDateTime(c.queryEndDateTime),c.filter&&l.setFilter(c.filter),c.hint&&l.setHint(c.hint),g.$routeParams=c,l.autoCalculateByQueryEndDateTimeAndReadablePeriod(),m=(new e).setFrom(l.getQueryStartTime()).setTo(l.getQueryEndTime()).setInnerFrom(l.getQueryEndTime()-1).setInnerTo(l.getQueryEndTime()),d(function(){b.$broadcast("timeSliderDirective.initialize",m),b.$broadcast("serverMapDirective.initialize",l),b.$broadcast("scatterDirective.initialize",l)})},500),r=function(a,b){var c=i.getFilteredMapUrlWithFilterVo(l,a,b);g.open(c,"")},s=function(a){angular.isDefined(a)&&angular.forEach(a,function(a,c){b.$broadcast("scatterDirective.initializeWithData",c,a)})},b.getMainContainerClass=function(){return o?"no-data":""},b.getInfoDetailsClass=function(){var a=[];return b.hasScatter&&a.push("has-scatter"),b.hasFilter&&a.push("has-filter"),a.join(" ")},b.$on("serverMapDirective.hasData",function(a){o=!1,b.sidebarLoading=!1}),b.$on("serverMap.hasNoData",function(a){o=!0,b.sidebarLoading=!1}),b.$on("serverMapDirective.fetched",function(a,c,e){m.setInnerFrom(c),p=!0,q=!0,b.$broadcast("timeSliderDirective.setInnerFromTo",m),s(e.applicationScatterData),0===e.applicationMapData.nodeDataArray.length&&0===e.applicationMapData.linkDataArray.length?d(function(){b.$broadcast("timeSliderDirective.moreClicked")},500):b.$broadcast("timeSliderDirective.enableMore")}),b.$on("serverMapDirective.allFetched",function(a,c){m.setInnerFrom(m.getFrom()),p=!0,q=!0,b.$broadcast("timeSliderDirective.setInnerFromTo",m),b.$broadcast("timeSliderDirective.changeMoreToDone"),b.$broadcast("timeSliderDirective.disableMore"),s(c.applicationScatterData)}),b.$on("timeSliderDirective.moreClicked",function(a){var c=new f;c.setApplication(l.getApplication()).setQueryStartTime(l.getQueryStartTime()).setQueryEndTime(m.getInnerFrom()).autoCalcultateByQueryStartTimeAndQueryEndTime(),b.sidebarLoading=!0,b.$broadcast("timeSliderDirective.disableMore"),b.$broadcast("serverMapDirective.fetch",c.getQueryPeriod(),c.getQueryEndTime())}),b.$on("serverMapDirective.nodeClicked",function(a,c,d,e,f){n=!0;var g=new h;g.setImageType(e.serviceType),e.isWas===!0?(b.hasScatter=!0,g.setTitle(e.applicationName),b.$broadcast("scatterDirective.showByNode",e)):e.unknownNodeGroup?(g.setTitle(e.serviceType.replace("_"," ")),b.hasScatter=!1):(g.setTitle(e.applicationName),b.hasScatter=!1),b.hasFilter=!1,b.$broadcast("sidebarTitleDirective.initialize.forFilteredMap",g,e),b.$broadcast("nodeInfoDetailsDirective.initialize",c,d,e,f,l,p),b.$broadcast("linkInfoDetailsDirective.hide",c,d,e,f,l),p=!1}),b.$on("serverMapDirective.linkClicked",function(a,c,d,e,f){n=!1;var g=new h;e.unknownLinkGroup?g.setImageType(e.sourceInfo.serviceType).setTitle("Unknown Group from "+e.sourceInfo.applicationName):g.setImageType(e.sourceInfo.serviceType).setTitle(e.sourceInfo.applicationName).setImageType2(e.targetInfo.serviceType).setTitle2(e.targetInfo.applicationName),b.hasScatter=!1;var j=i.findFilterInNavbarVo(e.sourceInfo.applicationName,e.sourceInfo.serviceType,e.targetInfo.applicationName,e.targetInfo.serviceType,l);j?(b.hasFilter=!0,b.$broadcast("filterInformationDirective.initialize.forFilteredMap",j.oServerMapFilterVoService)):b.hasFilter=!1,b.$broadcast("sidebarTitleDirective.initialize.forFilteredMap",g),b.$broadcast("nodeInfoDetailsDirective.hide"),b.$broadcast("linkInfoDetailsDirective.initialize",c,d,e,f,l,q),q=!1}),b.$on("serverMapDirective.openFilteredMap",function(a,b,c){r(b,c)}),b.$on("linkInfoDetailsDirective.openFilteredMap",function(a,b,c){r(b,c)}),b.$on("linkInfoDetailsDirective.openFilterWizard",function(a,c,d){b.$broadcast("serverMapDirective.openFilterWizard",c,d)}),b.$on("linkInfoDetailsDirective.ResponseSummary.barClicked",function(a,b,c){r(b,c)}),b.$on("linkInfoDetail.showDetailInformationClicked",function(a,c,d){b.hasScatter=!1;var e=new h;e.setImageType(d.sourceInfo.serviceType).setTitle(d.sourceInfo.applicationName).setImageType2(d.targetInfo.serviceType).setTitle2(d.targetInfo.applicationName),b.$broadcast("sidebarTitleDirective.initialize.forFilteredMap",e),b.$broadcast("nodeInfoDetailsDirective.hide")}),b.$on("nodeInfoDetail.showDetailInformationClicked",function(a,c,d){b.hasScatter=!1;var e=new h;e.setImageType(d.serviceType).setTitle(d.applicationName),b.$broadcast("sidebarTitleDirective.initialize.forMain",e,d),b.$broadcast("linkInfoDetailsDirective.hide")})}])}(),function(){"use strict";pinpointApp.constant("TransactionViewConfig",{applicationUrl:"/transactionInfo.pinpoint"}),pinpointApp.controller("TransactionViewCtrl",["TransactionViewConfig","$scope","$rootScope","$rootElement","AlertsService","ProgressBarService","$timeout","$routeParams","NavbarVoService","TransactionDaoService","AgentDaoService","AnalyticsService","PreferenceService",function(a,b,c,d,e,f,g,h,i,j,k,l,m){l.send(l.CONST.TRANSACTION_VIEW_PAGE);var n,o,p,q,r,s,t,u;c.wrapperClass="no-navbar",c.wrapperStyle={"padding-top":"30px"},n=new e(d),o=new f(d),g(function(){h.agentId&&h.traceId&&h.focusTimestamp&&(o.startLoading(),o.setLoading(30),j.getTransactionDetail(h.traceId,h.focusTimestamp,function(a,c){(a||c.exception)&&(o.stopLoading(),a?n.showError("There is some error while downloading the data."):n.showError(c.exception)),o.setLoading(70),p(c),r(),s(),g(function(){o.setLoading(100),o.stopLoading(),$("#main-container").layout({north__minSize:50,north__size:210,onload_end:function(){b.$broadcast("distributedCallFlowDirective.resize.forTransactionView")},onresize_end:function(a){"center"===a&&(b.$broadcast("distributedCallFlowDirective.resize.forTransactionView"),b.$broadcast("agentChartGroupDirective.resize.forTransactionView"),b.$broadcast("serverMapDirective.zoomToFit"))},center__maskContents:!0})},100)}),t(h.agentId,h.focusTimestamp))},500),p=function(a){b.transactionDetail=a,b.completeStateClass=q(a.completeState),b.$digest(),d.find('[data-toggle="tooltip"]').tooltip("destroy").tooltip()},q=function(a){var b="label-important";return"Complete"===a?b="label-success":"Progress"===a&&(b="label-warning"),b},r=function(){b.$broadcast("distributedCallFlowDirective.initialize.forTransactionView",b.transactionDetail)},s=function(){var a=new i;a.setReadablePeriod(m.getPeriodTypes()[0]),a.setQueryEndDateTime(moment(parseInt(h.focusTimestamp)).format("YYYY-MM-DD-HH-mm-ss")),b.$broadcast("serverMapDirective.initializeWithMapData",!0,b.transactionDetail,a)},t=function(a,c){c=parseInt(c,10);var d={agentId:a,from:c-6e5,to:c+6e5,sampleRate:k.getSampleRate(20)};b.$broadcast("agentChartGroupDirective.initialize.forTransactionView",d)},u=function(a){b.$broadcast("agentChartGroupDirective.showCursorAt.forTransactionView",a)},b.$on("distributedCallFlowDirective.rowSelected.forTransactionView",function(a,b){var c;if(b.execTime){var d=5e3,e=new Date(b.execTime);c=moment(Math.floor(e.getTime()/d)*d).format("YYYY-MM-DD HH:mm:ss")}else c=!1;u(c)})}])}(),function(){"use strict";pinpointApp.controller("ScatterFullScreenModeCtrl",["$scope","$rootScope","$window","$routeParams","$timeout","NavbarVoService","AnalyticsService",function(a,b,c,d,e,f,g){g.send(g.CONST.SCATTER_FULL_SCREEN_PAGE),c.htoScatter=c.htoScatter||{},c.$routeParams=c.$routeParams||d;var h;b.wrapperClass="no-navbar",b.wrapperStyle={"padding-top":"0px"},e(function(){h=new f,d.application&&h.setApplication(d.application),d.readablePeriod&&h.setReadablePeriod(d.readablePeriod),d.queryEndDateTime&&h.setQueryEndDateTime(d.queryEndDateTime),h.autoCalculateByQueryEndDateTimeAndReadablePeriod(),a.$emit("scatterDirective.initialize",h),a.$emit("scatterDirective.initializeWithNode",{key:h.getApplicationName()+"^"+h.getServiceTypeName(),serviceType:h.getServiceTypeName(),applicationName:h.getApplicationName(),agentList:d.agentList.split(",")},800,600)},500)}])}(); //# sourceMappingURL=pinpoint.min.js.map
# Generated by Django 2.1.3 on 2018-11-15 11:35 from django.db import migrations import s3direct.fields class Migration(migrations.Migration): dependencies = [ ('cms', '0007_auto_20181114_1420'), ] operations = [ migrations.AlterField( model_name='report', name='media_original', field=s3direct.fields.S3DirectField(blank=True, null=True, verbose_name='Medien-Anhang'), ), migrations.AlterField( model_name='reporttranslation', name='media_original', field=s3direct.fields.S3DirectField(blank=True, null=True, verbose_name='Medien-Anhang'), ), ]
#include <std.h> inherit VOTING; void create(){ object ob; :: create(); set_light(1); set_indoors(1); set_short(" %^BOLD%^%^WHITE%^A voting booth "); set_long( @SEETH %^CYAN%^You enter into a small room, and pull the curtains closed behind you, so as to maintain your privacy. In front of you is a large sign detailing various forms of government being suggested for the %^BLUE%^Clan of Kindly Woodspeople. %^CYAN%^There is a small wooden box below the sign in which you may place your vote. Remember to choose wisely, for you will only have one chance. SEETH ); set_exits(([ "out":"/d/shadow/room/city/cguild/ranger/rooms/hall1.c", ])); }
import math from typing import List from typing import Optional from typing import Tuple from optuna.logging import get_logger from optuna.study import Study from optuna.study import StudyDirection from optuna.trial import FrozenTrial from optuna.trial import TrialState from optuna.visualization.plotly_imports import _imports from optuna.visualization.utils import _is_log_scale if _imports.is_successful(): from optuna.visualization.plotly_imports import Contour from optuna.visualization.plotly_imports import go from optuna.visualization.plotly_imports import make_subplots from optuna.visualization.plotly_imports import plotly from optuna.visualization.plotly_imports import Scatter logger = get_logger(__name__) def plot_contour(study: Study, params: Optional[List[str]] = None) -> "go.Figure": """Plot the parameter relationship as contour plot in a study. Note that, If a parameter contains missing values, a trial with missing values is not plotted. Example: The following code snippet shows how to plot the parameter relationship as contour plot. .. testcode:: import optuna def objective(trial): x = trial.suggest_uniform('x', -100, 100) y = trial.suggest_categorical('y', [-1, 0, 1]) return x ** 2 + y study = optuna.create_study() study.optimize(objective, n_trials=10) optuna.visualization.plot_contour(study, params=['x', 'y']) .. raw:: html <iframe src="../_static/plot_contour.html" width="100%" height="500px" frameborder="0"> </iframe> Args: study: A :class:`~optuna.study.Study` object whose trials are plotted for their objective values. params: Parameter list to visualize. The default is all parameters. Returns: A :class:`plotly.graph_objs.Figure` object. """ _imports.check() return _get_contour_plot(study, params) def _get_contour_plot(study: Study, params: Optional[List[str]] = None) -> "go.Figure": layout = go.Layout(title="Contour Plot",) trials = [trial for trial in study.trials if trial.state == TrialState.COMPLETE] if len(trials) == 0: logger.warning("Your study does not have any completed trials.") return go.Figure(data=[], layout=layout) all_params = {p_name for t in trials for p_name in t.params.keys()} if params is None: sorted_params = sorted(list(all_params)) elif len(params) <= 1: logger.warning("The length of params must be greater than 1.") return go.Figure(data=[], layout=layout) else: for input_p_name in params: if input_p_name not in all_params: raise ValueError("Parameter {} does not exist in your study.".format(input_p_name)) sorted_params = sorted(list(set(params))) param_values_range = {} for p_name in sorted_params: values = [t.params[p_name] for t in trials if p_name in t.params] param_values_range[p_name] = (min(values), max(values)) if len(sorted_params) == 2: x_param = sorted_params[0] y_param = sorted_params[1] sub_plots = _generate_contour_subplot(trials, x_param, y_param, study.direction) figure = go.Figure(data=sub_plots, layout=layout) figure.update_xaxes(title_text=x_param, range=param_values_range[x_param]) figure.update_yaxes(title_text=y_param, range=param_values_range[y_param]) if _is_log_scale(trials, x_param): log_range = [math.log10(p) for p in param_values_range[x_param]] figure.update_xaxes(range=log_range, type="log") if _is_log_scale(trials, y_param): log_range = [math.log10(p) for p in param_values_range[y_param]] figure.update_yaxes(range=log_range, type="log") else: figure = make_subplots( rows=len(sorted_params), cols=len(sorted_params), shared_xaxes=True, shared_yaxes=True ) figure.update_layout(layout) showscale = True # showscale option only needs to be specified once for x_i, x_param in enumerate(sorted_params): for y_i, y_param in enumerate(sorted_params): if x_param == y_param: figure.add_trace(go.Scatter(), row=y_i + 1, col=x_i + 1) else: sub_plots = _generate_contour_subplot( trials, x_param, y_param, study.direction ) contour = sub_plots[0] scatter = sub_plots[1] contour.update(showscale=showscale) # showscale's default is True if showscale: showscale = False figure.add_trace(contour, row=y_i + 1, col=x_i + 1) figure.add_trace(scatter, row=y_i + 1, col=x_i + 1) figure.update_xaxes(range=param_values_range[x_param], row=y_i + 1, col=x_i + 1) figure.update_yaxes(range=param_values_range[y_param], row=y_i + 1, col=x_i + 1) if _is_log_scale(trials, x_param): log_range = [math.log10(p) for p in param_values_range[x_param]] figure.update_xaxes(range=log_range, type="log", row=y_i + 1, col=x_i + 1) if _is_log_scale(trials, y_param): log_range = [math.log10(p) for p in param_values_range[y_param]] figure.update_yaxes(range=log_range, type="log", row=y_i + 1, col=x_i + 1) if x_i == 0: figure.update_yaxes(title_text=y_param, row=y_i + 1, col=x_i + 1) if y_i == len(sorted_params) - 1: figure.update_xaxes(title_text=x_param, row=y_i + 1, col=x_i + 1) return figure def _generate_contour_subplot( trials: List[FrozenTrial], x_param: str, y_param: str, direction: StudyDirection ) -> Tuple["Contour", "Scatter"]: x_indices = sorted(list({t.params[x_param] for t in trials if x_param in t.params})) y_indices = sorted(list({t.params[y_param] for t in trials if y_param in t.params})) if len(x_indices) < 2: logger.warning("Param {} unique value length is less than 2.".format(x_param)) return go.Contour(), go.Scatter() if len(y_indices) < 2: logger.warning("Param {} unique value length is less than 2.".format(y_param)) return go.Contour(), go.Scatter() z = [[float("nan") for _ in range(len(x_indices))] for _ in range(len(y_indices))] x_values = [] y_values = [] for trial in trials: if x_param not in trial.params or y_param not in trial.params: continue x_values.append(trial.params[x_param]) y_values.append(trial.params[y_param]) x_i = x_indices.index(trial.params[x_param]) y_i = y_indices.index(trial.params[y_param]) if isinstance(trial.value, int): value = float(trial.value) elif isinstance(trial.value, float): value = trial.value else: raise ValueError( "Trial{} has COMPLETE state, but its value is non-numeric.".format(trial.number) ) z[y_i][x_i] = value # TODO(Yanase): Use reversescale argument to reverse colorscale if Plotly's bug is fixed. # If contours_coloring='heatmap' is specified, reversesecale argument of go.Contour does not # work correctly. See https://github.com/pfnet/optuna/issues/606. colorscale = plotly.colors.PLOTLY_SCALES["Blues"] if direction == StudyDirection.MINIMIZE: colorscale = [[1 - t[0], t[1]] for t in colorscale] colorscale.reverse() contour = go.Contour( x=x_indices, y=y_indices, z=z, colorbar={"title": "Objective Value"}, colorscale=colorscale, connectgaps=True, contours_coloring="heatmap", hoverinfo="none", line_smoothing=1.3, ) scatter = go.Scatter( x=x_values, y=y_values, marker={"color": "black"}, mode="markers", showlegend=False ) return (contour, scatter)
# -*- coding: utf-8 -*- """ Created on Fri Feb 12 22:05:13 2021 @author: Gary """ import subprocess import shutil import datetime import pandas as pd import numpy as np update_fn = './sources/upload_dates.csv' today_str = '2021-03-12' make_final = True today = datetime.date.today() s= 'jupyter nbconvert --template=nbextensions --ExecutePreprocessor.allow_errors=True --ExecutePreprocessor.timeout=-1 --execute weekly_report.ipynb --to=html ' print(subprocess.run(s)) if make_final: print('copying result to Google Drive webshare') shutil.copyfile('weekly_report.html', f'c:/Users/Gary/Google Drive/webshare/weekly_reports/{today_str}.html') print('updating date file') updates = pd.read_csv(update_fn) updates.weekly_report = np.where(updates.weekly_report.isna(), today, updates.weekly_report) updates.to_csv(update_fn,index=False)
/* Highcharts JS v8.0.0 (2019-12-10) Exporting module (c) 2010-2019 Torstein Honsi License: www.highcharts.com/license */ (function(c){"object"===typeof module&&module.exports?(c["default"]=c,module.exports=c):"function"===typeof define&&define.amd?define("highcharts/modules/export-data",["highcharts","highcharts/modules/exporting"],function(q){c(q);c.Highcharts=q;return c}):c("undefined"!==typeof Highcharts?Highcharts:void 0)})(function(c){function q(b,a,c,F){b.hasOwnProperty(a)||(b[a]=F.apply(null,c))}c=c?c._modules:{};q(c,"mixins/ajax.js",[c["parts/Globals.js"],c["parts/Utilities.js"]],function(b,a){var c=a.objectEach; b.ajax=function(a){var h=b.merge(!0,{url:!1,type:"get",dataType:"json",success:!1,error:!1,data:!1,headers:{}},a);a={json:"application/json",xml:"application/xml",text:"text/plain",octet:"application/octet-stream"};var f=new XMLHttpRequest;if(!h.url)return!1;f.open(h.type.toUpperCase(),h.url,!0);h.headers["Content-Type"]||f.setRequestHeader("Content-Type",a[h.dataType]||a.text);c(h.headers,function(a,b){f.setRequestHeader(b,a)});f.onreadystatechange=function(){if(4===f.readyState){if(200===f.status){var a= f.responseText;if("json"===h.dataType)try{a=JSON.parse(a)}catch(n){h.error&&h.error(f,n);return}return h.success&&h.success(a)}h.error&&h.error(f,f.responseText)}};try{h.data=JSON.stringify(h.data)}catch(e){}f.send(h.data||!0)};b.getJSON=function(a,c){b.ajax({url:a,success:c,dataType:"json",headers:{"Content-Type":"text/plain"}})}});q(c,"mixins/download-url.js",[c["parts/Globals.js"]],function(b){var a=b.win,c=a.navigator,q=a.document,h=a.URL||a.webkitURL||a,f=/Edge\/\d+/.test(c.userAgent);b.dataURLtoBlob= function(b){if((b=b.match(/data:([^;]*)(;base64)?,([0-9A-Za-z+/]+)/))&&3<b.length&&a.atob&&a.ArrayBuffer&&a.Uint8Array&&a.Blob&&h.createObjectURL){var c=a.atob(b[3]),e=new a.ArrayBuffer(c.length);e=new a.Uint8Array(e);for(var d=0;d<e.length;++d)e[d]=c.charCodeAt(d);b=new a.Blob([e],{type:b[1]});return h.createObjectURL(b)}};b.downloadURL=function(e,h){var p=q.createElement("a");if("string"===typeof e||e instanceof String||!c.msSaveOrOpenBlob){if(f||2E6<e.length)if(e=b.dataURLtoBlob(e),!e)throw Error("Failed to convert to blob"); if("undefined"!==typeof p.download)p.href=e,p.download=h,q.body.appendChild(p),p.click(),q.body.removeChild(p);else try{var d=a.open(e,"chart");if("undefined"===typeof d||null===d)throw Error("Failed to open window");}catch(A){a.location.href=e}}else c.msSaveOrOpenBlob(e,h)}});q(c,"modules/export-data.src.js",[c["parts/Globals.js"],c["parts/Utilities.js"]],function(b,a){function c(b,a){var c=n.navigator,w=-1<c.userAgent.indexOf("WebKit")&&0>c.userAgent.indexOf("Chrome"),l=n.URL||n.webkitURL||n;try{if(c.msSaveOrOpenBlob&& n.MSBlobBuilder){var e=new n.MSBlobBuilder;e.append(b);return e.getBlob("image/svg+xml")}if(!w)return l.createObjectURL(new n.Blob(["\ufeff"+b],{type:a}))}catch(G){}}var q=a.defined,h=a.extend,f=a.isObject,e=a.pick,n=b.win,p=n.document,d=b.seriesTypes,A=b.downloadURL,C=b.fireEvent;b.setOptions({exporting:{csv:{columnHeaderFormatter:null,dateFormat:"%Y-%m-%d %H:%M:%S",decimalPoint:null,itemDelimiter:null,lineDelimiter:"\n"},showTable:!1,useMultiLevelHeaders:!0,useRowspanHeaders:!0},lang:{downloadCSV:"Download CSV", downloadXLS:"Download XLS",openInCloud:"Open in Highcharts Cloud",viewData:"View data table"}});b.addEvent(b.Chart,"render",function(){this.options&&this.options.exporting&&this.options.exporting.showTable&&!this.options.chart.forExport&&this.viewData()});b.Chart.prototype.setUpKeyToAxis=function(){d.arearange&&(d.arearange.prototype.keyToAxis={low:"y",high:"y"});d.gantt&&(d.gantt.prototype.keyToAxis={start:"x",end:"x"})};b.Chart.prototype.getDataRows=function(a){var c=this.hasParallelCoordinates, h=this.time,w=this.options.exporting&&this.options.exporting.csv||{},l=this.xAxis,d={},f=[],p=[],n=[],y,z=function(k,c,g){if(w.columnHeaderFormatter){var d=w.columnHeaderFormatter(k,c,g);if(!1!==d)return d}return k?k instanceof b.Axis?k.options.title&&k.options.title.text||(k.isDatetimeAxis?"DateTime":"Category"):a?{columnTitle:1<g?c:k.name,topLevelColumnTitle:k.name}:k.name+(1<g?" ("+c+")":""):"Category"},D=function(a,c,g){var k={},d={};c.forEach(function(c){var e=(a.keyToAxis&&a.keyToAxis[c]||c)+ "Axis";e=b.isNumber(g)?a.chart[e][g]:a[e];k[c]=e&&e.categories||[];d[c]=e&&e.isDatetimeAxis});return{categoryMap:k,dateTimeValueAxisMap:d}},g=[];var u=0;this.setUpKeyToAxis();this.series.forEach(function(k){var t=k.options.keys||k.pointArrayMap||["y"],r=t.length,v=!k.requireSorting&&{},f=l.indexOf(k.xAxis),B=D(k,t),m;if(!1!==k.options.includeInDataExport&&!k.options.isInternal&&!1!==k.visible){b.find(g,function(a){return a[0]===f})||g.push([f,u]);for(m=0;m<r;)y=z(k,t[m],t.length),n.push(y.columnTitle|| y),a&&p.push(y.topLevelColumnTitle||y),m++;var q={chart:k.chart,autoIncrement:k.autoIncrement,options:k.options,pointArrayMap:k.pointArrayMap};k.options.data.forEach(function(a,b){c&&(B=D(k,t,b));var g={series:q};k.pointClass.prototype.applyOptions.apply(g,[a]);a=g.x;var l=k.data[b]&&k.data[b].name;m=0;k.xAxis&&"name"!==k.exportKey||(a=l);v&&(v[a]&&(a+="|"+b),v[a]=!0);d[a]||(d[a]=[],d[a].xValues=[]);d[a].x=g.x;d[a].name=l;for(d[a].xValues[f]=g.x;m<r;)b=t[m],l=g[b],d[a][u+m]=e(B.categoryMap[b][l], B.dateTimeValueAxisMap[b]?h.dateFormat(w.dateFormat,l):null,l),m++});u+=m}});for(r in d)Object.hasOwnProperty.call(d,r)&&f.push(d[r]);var r=a?[p,n]:[n];for(u=g.length;u--;){var t=g[u][0];var v=g[u][1];var x=l[t];f.sort(function(a,b){return a.xValues[t]-b.xValues[t]});var E=z(x);r[0].splice(v,0,E);a&&r[1]&&r[1].splice(v,0,E);f.forEach(function(a){var b=a.name;x&&!q(b)&&(x.isDatetimeAxis?(a.x instanceof Date&&(a.x=a.x.getTime()),b=h.dateFormat(w.dateFormat,a.x)):b=x.categories?e(x.names[a.x],x.categories[a.x], a.x):a.x);a.splice(v,0,b)})}r=r.concat(f);C(this,"exportData",{dataRows:r});return r};b.Chart.prototype.getCSV=function(a){var b="",c=this.getDataRows(),d=this.options.exporting.csv,l=e(d.decimalPoint,","!==d.itemDelimiter&&a?(1.1).toLocaleString()[1]:"."),h=e(d.itemDelimiter,","===l?";":","),f=d.lineDelimiter;c.forEach(function(a,d){for(var e,m=a.length;m--;)e=a[m],"string"===typeof e&&(e='"'+e+'"'),"number"===typeof e&&"."!==l&&(e=e.toString().replace(".",l)),a[m]=e;b+=a.join(h);d<c.length-1&&(b+= f)});return b};b.Chart.prototype.getTable=function(a){var b='<table id="highcharts-data-table-'+this.index+'">',c=this.options,d=a?(1.1).toLocaleString()[1]:".",h=e(c.exporting.useMultiLevelHeaders,!0);a=this.getDataRows(h);var f=0,l=h?a.shift():null,q=a.shift(),n=function(a,b,c,h){var g=e(h,"");b="text"+(b?" "+b:"");"number"===typeof g?(g=g.toString(),","===d&&(g=g.replace(".",d)),b="number"):h||(b="empty");return"<"+a+(c?" "+c:"")+' class="'+b+'">'+g+"</"+a+">"};!1!==c.exporting.tableCaption&&(b+= '<caption class="highcharts-table-caption">'+e(c.exporting.tableCaption,c.title.text?c.title.text.replace(/&/g,"&amp;").replace(/</g,"&lt;").replace(/>/g,"&gt;").replace(/"/g,"&quot;").replace(/'/g,"&#x27;").replace(/\//g,"&#x2F;"):"Chart")+"</caption>");for(var p=0,z=a.length;p<z;++p)a[p].length>f&&(f=a[p].length);b+=function(a,b,e){var d="<thead>",f=0;e=e||b&&b.length;var g,l=0;if(g=h&&a&&b){a:if(g=a.length,b.length===g){for(;g--;)if(a[g]!==b[g]){g=!1;break a}g=!0}else g=!1;g=!g}if(g){for(d+="<tr>";f< e;++f){g=a[f];var m=a[f+1];g===m?++l:l?(d+=n("th","highcharts-table-topheading",'scope="col" colspan="'+(l+1)+'"',g),l=0):(g===b[f]?c.exporting.useRowspanHeaders?(m=2,delete b[f]):(m=1,b[f]=""):m=1,d+=n("th","highcharts-table-topheading",'scope="col"'+(1<m?' valign="top" rowspan="'+m+'"':""),g))}d+="</tr>"}if(b){d+="<tr>";f=0;for(e=b.length;f<e;++f)"undefined"!==typeof b[f]&&(d+=n("th",null,'scope="col"',b[f]));d+="</tr>"}return d+"</thead>"}(l,q,Math.max(f,q.length));b+="<tbody>";a.forEach(function(a){b+= "<tr>";for(var c=0;c<f;c++)b+=n(c?"td":"th",null,c?"":'scope="row"',a[c]);b+="</tr>"});b+="</tbody></table>";a={html:b};C(this,"afterGetTable",a);return a.html};b.Chart.prototype.downloadCSV=function(){var a=this.getCSV(!0);A(c(a,"text/csv")||"data:text/csv,\ufeff"+encodeURIComponent(a),this.getFilename()+".csv")};b.Chart.prototype.downloadXLS=function(){var a='<html xmlns:o="urn:schemas-microsoft-com:office:office" xmlns:x="urn:schemas-microsoft-com:office:excel" xmlns="http://www.w3.org/TR/REC-html40"><head>\x3c!--[if gte mso 9]><xml><x:ExcelWorkbook><x:ExcelWorksheets><x:ExcelWorksheet><x:Name>Ark1</x:Name><x:WorksheetOptions><x:DisplayGridlines/></x:WorksheetOptions></x:ExcelWorksheet></x:ExcelWorksheets></x:ExcelWorkbook></xml><![endif]--\x3e<style>td{border:none;font-family: Calibri, sans-serif;} .number{mso-number-format:"0.00";} .text{ mso-number-format:"@";}</style><meta name=ProgId content=Excel.Sheet><meta charset=UTF-8></head><body>'+ this.getTable(!0)+"</body></html>";A(c(a,"application/vnd.ms-excel")||"data:application/vnd.ms-excel;base64,"+n.btoa(unescape(encodeURIComponent(a))),this.getFilename()+".xls")};b.Chart.prototype.viewData=function(){this.dataTableDiv||(this.dataTableDiv=p.createElement("div"),this.dataTableDiv.className="highcharts-data-table",this.renderTo.parentNode.insertBefore(this.dataTableDiv,this.renderTo.nextSibling));this.dataTableDiv.innerHTML=this.getTable();C(this,"afterViewData",this.dataTableDiv)};b.Chart.prototype.openInCloud= function(){function a(b){Object.keys(b).forEach(function(c){"function"===typeof b[c]&&delete b[c];f(b[c])&&a(b[c])})}var c=b.merge(this.userOptions);a(c);c={name:c.title&&c.title.text||"Chart title",options:c,settings:{constructor:"Chart",dataProvider:{csv:this.getCSV()}}};var d=JSON.stringify(c);(function(){var a=p.createElement("form");p.body.appendChild(a);a.method="post";a.action="https://cloud-api.highcharts.com/openincloud";a.target="_blank";var b=p.createElement("input");b.type="hidden";b.name= "chart";b.value=d;a.appendChild(b);a.submit();p.body.removeChild(a)})()};if(a=b.getOptions().exporting)h(a.menuItemDefinitions,{downloadCSV:{textKey:"downloadCSV",onclick:function(){this.downloadCSV()}},downloadXLS:{textKey:"downloadXLS",onclick:function(){this.downloadXLS()}},viewData:{textKey:"viewData",onclick:function(){this.viewData()}},openInCloud:{textKey:"openInCloud",onclick:function(){this.openInCloud()}}}),a.buttons&&a.buttons.contextButton.menuItems.push("separator","downloadCSV","downloadXLS", "viewData","openInCloud");d.map&&(d.map.prototype.exportKey="name");d.mapbubble&&(d.mapbubble.prototype.exportKey="name");d.treemap&&(d.treemap.prototype.exportKey="name")});q(c,"masters/modules/export-data.src.js",[],function(){})}); //# sourceMappingURL=export-data.js.map
var bcrypt = require('bcryptjs') module.exports = { friendlyName: 'Signup', description: 'Signup to the movie club.', inputs: { firstName: { type: 'string' }, lastName: { type: 'string' }, email: { type: 'string' }, password: { type: 'string' }, confirmPassword: { type: 'string', required: true }, isKid: { type: 'boolean', }, }, exits: { redirect: { responseType: 'redirect' }, problem: { responseType: 'badCombo' } }, fn: async function ({ firstName, lastName, email, password, confirmPassword, isKid }) { sails.log(firstName, lastName, email, password, confirmPassword, isKid); this.email = email.toLowerCase(); // this propably works var isUser = await User.findOne({ email: email }); if (isUser) throw { problem: '<h2> Email already in use! </h2>' } else { if (password === confirmPassword) { let newUser = await User.create({ firstName, lastName, email, password: await bcrypt.hash(password, 12), isKid }) throw { redirect: '/login' }; } else { throw { problem: '<h1>Passwords not match!!!</h1>' } } } } };
import React from 'react'; import createSvgIcon from './utils/createSvgIcon'; export default createSvgIcon( <React.Fragment><path d="M20.84 4.22c-.05-.12-.11-.23-.18-.34-.14-.21-.33-.4-.54-.54-.11-.07-.22-.13-.34-.18-.24-.1-.5-.16-.78-.16h-1V1h-2v2H8V1H6v2H5c-.42 0-.8.13-1.12.34-.21.14-.4.33-.54.54-.07.11-.13.22-.18.34-.1.24-.16.5-.16.78v14a2 2 0 0 0 2 2h14c.28 0 .54-.06.78-.16.12-.05.23-.11.34-.18.21-.14.4-.33.54-.54.21-.32.34-.71.34-1.12V5c0-.28-.06-.54-.16-.78zM5 19V5h14v14H5z" /><path d="M12 12.88c-2.03 0-6 1.08-6 3.58V18h12v-1.53c0-2.51-3.97-3.59-6-3.59zM8.31 16c.69-.56 2.38-1.12 3.69-1.12s3.01.56 3.69 1.12H8.31zM12 12c1.65 0 3-1.35 3-3s-1.35-3-3-3-3 1.35-3 3 1.35 3 3 3zm0-4c.55 0 1 .45 1 1s-.45 1-1 1-1-.45-1-1 .45-1 1-1z" /></React.Fragment> , 'PermContactCalendarOutlined');
# cython: language_level=3 # distutils: language = c++ # -*- coding: utf-8 -*- # ***************************************************************************** # Copyright (c) 2016-2020, Intel Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # - Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # - Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF # THE POSSIBILITY OF SUCH DAMAGE. # ***************************************************************************** """ Interface of the counting function of the dpnp Notes ----- This module is a face or public interface file for the library it contains: - Interface functions - documentation for the functions - The functions parameters check """ import numpy from dpnp.dpnp_algo.dpnp_algo import * # TODO need to investigate why dpnp.dpnp_algo can not be used from dpnp.dparray import dparray from dpnp.dpnp_utils import * __all__ = [ 'count_nonzero' ] def count_nonzero(in_array1, axis=None, *, keepdims=False): """ Counts the number of non-zero values in the array ``in_array1``. For full documentation refer to :obj:`numpy.count_nonzero`. Limitations ----------- Parameter ``in_array1`` is supported as :obj:`dpnp.ndarray`. Otherwise the function will be executed sequentially on CPU. Parameter ``axis`` is supported only with default value `None`. Parameter ``keepdims`` is supported only with default value `False`. Examples -------- >>> import dpnp as np >>> np.count_nonzero(np.array([1, 0, 3, 0, 5]) 3 >>> np.count_nonzero(np.array([[1, 0, 3, 0, 5],[0, 9, 0, 7, 0]])) 5 """ is_dparray1 = isinstance(in_array1, dparray) if (not use_origin_backend(in_array1) and is_dparray1): if axis is not None: checker_throw_value_error("count_nonzero", "axis", type(axis), None) if keepdims is not False: checker_throw_value_error("count_nonzero", "keepdims", keepdims, False) result = dpnp_count_nonzero(in_array1) # scalar returned if result.shape == (1,): return result.dtype.type(result[0]) return result return numpy.count_nonzero(in_array1, axis, keepdims=keepdims)
import { hash } from "./core/hash"; import { compile } from "./core/compile"; import { getSheet } from "./core/get-sheet"; /** * css entry * @param {String} str */ function css(str) { const defs = [].slice.call(arguments, 1); const ctx = this || {}; return hash( str.map ? compile(str, defs, ctx.p) : str, getSheet(ctx.target), ctx.g ); } /** * CSS Global function to declare global styes */ const glob = css.bind({ g: 1 }); export { css, glob };
from django.contrib import admin from .models import Node from author.models import Author import random import string # Register your models here. class NodeAdmin(admin.ModelAdmin): # Our Node form in the admin panel model = Node list_display = ["server_username", "server_password", "hostname", "api_url", "node_auth_username", "node_auth_password"] readonly_fields = ['auth_user'] fields = [('auth_user'), ("server_username"), ("server_password"), ("hostname"), ("api_url"), ("node_auth_username"), ("node_auth_password")] def save_model(self, request, obj, form, change): # We need to create a new Author object and such displayName = obj.server_username password = obj.server_password hostname = obj.hostname # Email has to be unique. So we generate a new email. email = ''.join(random.choice(string.ascii_lowercase) for i in range(10)) + "@node.com" if len(Author.objects.filter(displayName=displayName)) == 1: # User exists, we are updating our node new_author = Author.objects.get(displayName=displayName) else: new_author = Author.objects.create_user( displayName=displayName, password=password, first_name="NODE", last_name="NODE", email=email) new_author.host = hostname new_author.is_node = True new_author.is_active = True new_author.save() obj.auth_user = new_author super().save_model(request, obj, form, change) def delete_model(self, request, obj): # We just delete the Author object we are attatched to # It will cascade node_user = Author.objects.get(displayName=obj.server_username) node_user.delete() # We want to delete all authors that belong to this host on deletion, which will cascade anything else they have touched. authors = Author.objects.filter(host__contains=obj.hostname) for author in authors: author.delete() # And sometimes the hostname on an author has no backslash: print(obj.hostname[:-1]) authors = Author.objects.filter(host=obj.hostname[:-1]) for author in authors: author.delete() super().delete_model(request, obj) admin.site.register(Node, NodeAdmin)
# -*- coding: utf-8 -*- from __future__ import unicode_literals from __future__ import absolute_import import functools import inspect import os import pkg_resources import re import six import types from abc import abstractmethod from io import BytesIO from collections import defaultdict from botocore.config import Config from botocore.handlers import BUILTIN_HANDLERS from botocore.awsrequest import AWSResponse from distutils.version import LooseVersion from six.moves.urllib.parse import urlparse from werkzeug.wrappers import Request from moto import settings import responses from moto.packages.httpretty import HTTPretty from moto.compat import patch from .utils import ( convert_httpretty_response, convert_regex_to_flask_path, convert_flask_to_responses_response, ) ACCOUNT_ID = os.environ.get("MOTO_ACCOUNT_ID", "123456789012") RESPONSES_VERSION = pkg_resources.get_distribution("responses").version class BaseMockAWS(object): nested_count = 0 mocks_active = False def __init__(self, backends): from moto.instance_metadata import instance_metadata_backend from moto.core import moto_api_backend self.backends = backends self.backends_for_urls = {} default_backends = { "instance_metadata": instance_metadata_backend, "moto_api": moto_api_backend, } self.backends_for_urls.update(self.backends) self.backends_for_urls.update(default_backends) self.FAKE_KEYS = { "AWS_ACCESS_KEY_ID": "foobar_key", "AWS_SECRET_ACCESS_KEY": "foobar_secret", } self.ORIG_KEYS = {} self.default_session_mock = patch("boto3.DEFAULT_SESSION", None) if self.__class__.nested_count == 0: self.reset() def __call__(self, func, reset=True): if inspect.isclass(func): return self.decorate_class(func) return self.decorate_callable(func, reset) def __enter__(self): self.start() return self def __exit__(self, *args): self.stop() def start(self, reset=True): if not self.__class__.mocks_active: self.default_session_mock.start() self.mock_env_variables() self.__class__.mocks_active = True self.__class__.nested_count += 1 if reset: for backend in self.backends.values(): backend.reset() self.enable_patching() def stop(self): self.__class__.nested_count -= 1 if self.__class__.nested_count < 0: raise RuntimeError("Called stop() before start().") if self.__class__.nested_count == 0: if self.__class__.mocks_active: try: self.default_session_mock.stop() except RuntimeError: # We only need to check for this exception in Python 3.6 and 3.7 # https://bugs.python.org/issue36366 pass self.unmock_env_variables() self.__class__.mocks_active = False self.disable_patching() def decorate_callable(self, func, reset): def wrapper(*args, **kwargs): self.start(reset=reset) try: result = func(*args, **kwargs) finally: self.stop() return result functools.update_wrapper(wrapper, func) wrapper.__wrapped__ = func return wrapper def decorate_class(self, klass): for attr in dir(klass): if attr.startswith("_"): continue attr_value = getattr(klass, attr) if not hasattr(attr_value, "__call__"): continue # Check if this is a classmethod. If so, skip patching if inspect.ismethod(attr_value) and attr_value.__self__ is klass: continue # Check if this is a staticmethod. If so, skip patching for cls in inspect.getmro(klass): if attr_value.__name__ not in cls.__dict__: continue bound_attr_value = cls.__dict__[attr_value.__name__] if not isinstance(bound_attr_value, staticmethod): break else: # It is a staticmethod, skip patching continue try: setattr(klass, attr, self(attr_value, reset=False)) except TypeError: # Sometimes we can't set this for built-in types continue return klass def mock_env_variables(self): # "Mock" the AWS credentials as they can't be mocked in Botocore currently # self.env_variables_mocks = mock.patch.dict(os.environ, FAKE_KEYS) # self.env_variables_mocks.start() for k, v in self.FAKE_KEYS.items(): self.ORIG_KEYS[k] = os.environ.get(k, None) os.environ[k] = v def unmock_env_variables(self): # This doesn't work in Python2 - for some reason, unmocking clears the entire os.environ dict # Obviously bad user experience, and also breaks pytest - as it uses PYTEST_CURRENT_TEST as an env var # self.env_variables_mocks.stop() for k, v in self.ORIG_KEYS.items(): if v: os.environ[k] = v else: del os.environ[k] class HttprettyMockAWS(BaseMockAWS): def reset(self): HTTPretty.reset() def enable_patching(self): if not HTTPretty.is_enabled(): HTTPretty.enable() for method in HTTPretty.METHODS: for backend in self.backends_for_urls.values(): for key, value in backend.urls.items(): HTTPretty.register_uri( method=method, uri=re.compile(key), body=convert_httpretty_response(value), ) def disable_patching(self): HTTPretty.disable() HTTPretty.reset() RESPONSES_METHODS = [ responses.GET, responses.DELETE, responses.HEAD, responses.OPTIONS, responses.PATCH, responses.POST, responses.PUT, ] class CallbackResponse(responses.CallbackResponse): """ Need to subclass so we can change a couple things """ def get_response(self, request): """ Need to override this so we can pass decode_content=False """ if not isinstance(request, Request): url = urlparse(request.url) if request.body is None: body = None elif isinstance(request.body, six.text_type): body = six.BytesIO(six.b(request.body)) elif hasattr(request.body, "read"): body = six.BytesIO(request.body.read()) else: body = six.BytesIO(request.body) req = Request.from_values( path="?".join([url.path, url.query]), input_stream=body, content_length=request.headers.get("Content-Length"), content_type=request.headers.get("Content-Type"), method=request.method, base_url="{scheme}://{netloc}".format( scheme=url.scheme, netloc=url.netloc ), headers=[(k, v) for k, v in six.iteritems(request.headers)], ) request = req headers = self.get_headers() result = self.callback(request) if isinstance(result, Exception): raise result status, r_headers, body = result body = responses._handle_body(body) headers.update(r_headers) return responses.HTTPResponse( status=status, reason=six.moves.http_client.responses.get(status), body=body, headers=headers, preload_content=False, # Need to not decode_content to mimic requests decode_content=False, ) def _url_matches(self, url, other, match_querystring=False): """ Need to override this so we can fix querystrings breaking regex matching """ if not match_querystring: other = other.split("?", 1)[0] if responses._is_string(url): if responses._has_unicode(url): url = responses._clean_unicode(url) if not isinstance(other, six.text_type): other = other.encode("ascii").decode("utf8") return self._url_matches_strict(url, other) elif isinstance(url, responses.Pattern) and url.match(other): return True else: return False botocore_mock = responses.RequestsMock( assert_all_requests_are_fired=False, target="botocore.vendored.requests.adapters.HTTPAdapter.send", ) responses_mock = responses.RequestsMock(assert_all_requests_are_fired=False) # Add passthrough to allow any other requests to work # Since this uses .startswith, it applies to http and https requests. responses_mock.add_passthru("http") def _find_first_match_legacy(self, request): for i, match in enumerate(self._matches): if match.matches(request): return match return None def _find_first_match(self, request): match_failed_reasons = [] for i, match in enumerate(self._matches): match_result, reason = match.matches(request) if match_result: return match, match_failed_reasons else: match_failed_reasons.append(reason) return None, match_failed_reasons # Modify behaviour of the matcher to only/always return the first match # Default behaviour is to return subsequent matches for subsequent requests, which leads to https://github.com/spulec/moto/issues/2567 # - First request matches on the appropriate S3 URL # - Same request, executed again, will be matched on the subsequent match, which happens to be the catch-all, not-yet-implemented, callback # Fix: Always return the first match if LooseVersion(RESPONSES_VERSION) < LooseVersion("0.12.1"): responses_mock._find_match = types.MethodType( _find_first_match_legacy, responses_mock ) else: responses_mock._find_match = types.MethodType(_find_first_match, responses_mock) BOTOCORE_HTTP_METHODS = ["GET", "DELETE", "HEAD", "OPTIONS", "PATCH", "POST", "PUT"] class MockRawResponse(BytesIO): def __init__(self, input): if isinstance(input, six.text_type): input = input.encode("utf-8") super(MockRawResponse, self).__init__(input) def stream(self, **kwargs): contents = self.read() while contents: yield contents contents = self.read() class BotocoreStubber(object): def __init__(self): self.enabled = False self.methods = defaultdict(list) def reset(self): self.methods.clear() def register_response(self, method, pattern, response): matchers = self.methods[method] matchers.append((pattern, response)) def __call__(self, event_name, request, **kwargs): if not self.enabled: return None response = None response_callback = None found_index = None matchers = self.methods.get(request.method) base_url = request.url.split("?", 1)[0] for i, (pattern, callback) in enumerate(matchers): if pattern.match(base_url): if found_index is None: found_index = i response_callback = callback else: matchers.pop(found_index) break if response_callback is not None: for header, value in request.headers.items(): if isinstance(value, six.binary_type): request.headers[header] = value.decode("utf-8") status, headers, body = response_callback( request, request.url, request.headers ) body = MockRawResponse(body) response = AWSResponse(request.url, status, headers, body) return response botocore_stubber = BotocoreStubber() BUILTIN_HANDLERS.append(("before-send", botocore_stubber)) def not_implemented_callback(request): status = 400 headers = {} response = "The method is not implemented" return status, headers, response class BotocoreEventMockAWS(BaseMockAWS): def reset(self): botocore_stubber.reset() responses_mock.reset() def enable_patching(self): botocore_stubber.enabled = True for method in BOTOCORE_HTTP_METHODS: for backend in self.backends_for_urls.values(): for key, value in backend.urls.items(): pattern = re.compile(key) botocore_stubber.register_response(method, pattern, value) if not hasattr(responses_mock, "_patcher") or not hasattr( responses_mock._patcher, "target" ): responses_mock.start() for method in RESPONSES_METHODS: # for backend in default_backends.values(): for backend in self.backends_for_urls.values(): for key, value in backend.urls.items(): responses_mock.add( CallbackResponse( method=method, url=re.compile(key), callback=convert_flask_to_responses_response(value), stream=True, match_querystring=False, ) ) responses_mock.add( CallbackResponse( method=method, url=re.compile(r"https?://.+.amazonaws.com/.*"), callback=not_implemented_callback, stream=True, match_querystring=False, ) ) botocore_mock.add( CallbackResponse( method=method, url=re.compile(r"https?://.+.amazonaws.com/.*"), callback=not_implemented_callback, stream=True, match_querystring=False, ) ) def disable_patching(self): botocore_stubber.enabled = False self.reset() try: responses_mock.stop() except RuntimeError: pass MockAWS = BotocoreEventMockAWS class ServerModeMockAWS(BaseMockAWS): def reset(self): import requests requests.post("http://localhost:5000/moto-api/reset") def enable_patching(self): if self.__class__.nested_count == 1: # Just started self.reset() from boto3 import client as real_boto3_client, resource as real_boto3_resource def fake_boto3_client(*args, **kwargs): region = self._get_region(*args, **kwargs) if region: if "config" in kwargs: kwargs["config"].__dict__["user_agent_extra"] += " region/" + region else: config = Config(user_agent_extra="region/" + region) kwargs["config"] = config if "endpoint_url" not in kwargs: kwargs["endpoint_url"] = "http://localhost:5000" return real_boto3_client(*args, **kwargs) def fake_boto3_resource(*args, **kwargs): if "endpoint_url" not in kwargs: kwargs["endpoint_url"] = "http://localhost:5000" return real_boto3_resource(*args, **kwargs) def fake_httplib_send_output(self, message_body=None, *args, **kwargs): def _convert_to_bytes(mixed_buffer): bytes_buffer = [] for chunk in mixed_buffer: if isinstance(chunk, six.text_type): bytes_buffer.append(chunk.encode("utf-8")) else: bytes_buffer.append(chunk) msg = b"\r\n".join(bytes_buffer) return msg self._buffer.extend((b"", b"")) msg = _convert_to_bytes(self._buffer) del self._buffer[:] if isinstance(message_body, bytes): msg += message_body message_body = None self.send(msg) # if self._expect_header_set: # read, write, exc = select.select([self.sock], [], [self.sock], 1) # if read: # self._handle_expect_response(message_body) # return if message_body is not None: self.send(message_body) self._client_patcher = patch("boto3.client", fake_boto3_client) self._resource_patcher = patch("boto3.resource", fake_boto3_resource) if six.PY2: self._httplib_patcher = patch( "httplib.HTTPConnection._send_output", fake_httplib_send_output ) self._client_patcher.start() self._resource_patcher.start() if six.PY2: self._httplib_patcher.start() def _get_region(self, *args, **kwargs): if "region_name" in kwargs: return kwargs["region_name"] if type(args) == tuple and len(args) == 2: service, region = args return region return None def disable_patching(self): if self._client_patcher: self._client_patcher.stop() self._resource_patcher.stop() if six.PY2: self._httplib_patcher.stop() class Model(type): def __new__(self, clsname, bases, namespace): cls = super(Model, self).__new__(self, clsname, bases, namespace) cls.__models__ = {} for name, value in namespace.items(): model = getattr(value, "__returns_model__", False) if model is not False: cls.__models__[model] = name for base in bases: cls.__models__.update(getattr(base, "__models__", {})) return cls @staticmethod def prop(model_name): """ decorator to mark a class method as returning model values """ def dec(f): f.__returns_model__ = model_name return f return dec model_data = defaultdict(dict) class InstanceTrackerMeta(type): def __new__(meta, name, bases, dct): cls = super(InstanceTrackerMeta, meta).__new__(meta, name, bases, dct) if name == "BaseModel": return cls service = cls.__module__.split(".")[1] if name not in model_data[service]: model_data[service][name] = cls cls.instances = [] return cls @six.add_metaclass(InstanceTrackerMeta) class BaseModel(object): def __new__(cls, *args, **kwargs): instance = super(BaseModel, cls).__new__(cls) cls.instances.append(instance) return instance # Parent class for every Model that can be instantiated by CloudFormation # On subclasses, implement the two methods as @staticmethod to ensure correct behaviour of the CF parser class CloudFormationModel(BaseModel): @staticmethod @abstractmethod def cloudformation_name_type(): # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-name.html # This must be implemented as a staticmethod with no parameters # Return None for resources that do not have a name property pass @staticmethod @abstractmethod def cloudformation_type(): # This must be implemented as a staticmethod with no parameters # See for example https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-dynamodb-table.html return "AWS::SERVICE::RESOURCE" @abstractmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name ): # This must be implemented as a classmethod with parameters: # cls, resource_name, cloudformation_json, region_name # Extract the resource parameters from the cloudformation json # and return an instance of the resource class pass @abstractmethod def update_from_cloudformation_json( cls, original_resource, new_resource_name, cloudformation_json, region_name ): # This must be implemented as a classmethod with parameters: # cls, original_resource, new_resource_name, cloudformation_json, region_name # Extract the resource parameters from the cloudformation json, # delete the old resource and return the new one. Optionally inspect # the change in parameters and no-op when nothing has changed. pass @abstractmethod def delete_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name ): # This must be implemented as a classmethod with parameters: # cls, resource_name, cloudformation_json, region_name # Extract the resource parameters from the cloudformation json # and delete the resource. Do not include a return statement. pass class BaseBackend(object): def _reset_model_refs(self): # Remove all references to the models stored for service, models in model_data.items(): for model_name, model in models.items(): model.instances = [] def reset(self): self._reset_model_refs() self.__dict__ = {} self.__init__() @property def _url_module(self): backend_module = self.__class__.__module__ backend_urls_module_name = backend_module.replace("models", "urls") backend_urls_module = __import__( backend_urls_module_name, fromlist=["url_bases", "url_paths"] ) return backend_urls_module @property def urls(self): """ A dictionary of the urls to be mocked with this service and the handlers that should be called in their place """ url_bases = self._url_module.url_bases unformatted_paths = self._url_module.url_paths urls = {} for url_base in url_bases: for url_path, handler in unformatted_paths.items(): url = url_path.format(url_base) urls[url] = handler return urls @property def url_paths(self): """ A dictionary of the paths of the urls to be mocked with this service and the handlers that should be called in their place """ unformatted_paths = self._url_module.url_paths paths = {} for unformatted_path, handler in unformatted_paths.items(): path = unformatted_path.format("") paths[path] = handler return paths @property def url_bases(self): """ A list containing the url_bases extracted from urls.py """ return self._url_module.url_bases @property def flask_paths(self): """ The url paths that will be used for the flask server """ paths = {} for url_path, handler in self.url_paths.items(): url_path = convert_regex_to_flask_path(url_path) paths[url_path] = handler return paths def decorator(self, func=None): if settings.TEST_SERVER_MODE: mocked_backend = ServerModeMockAWS({"global": self}) else: mocked_backend = MockAWS({"global": self}) if func: return mocked_backend(func) else: return mocked_backend def deprecated_decorator(self, func=None): if func: return HttprettyMockAWS({"global": self})(func) else: return HttprettyMockAWS({"global": self}) # def list_config_service_resources(self, resource_ids, resource_name, limit, next_token): # """For AWS Config. This will list all of the resources of the given type and optional resource name and region""" # raise NotImplementedError() class ConfigQueryModel(object): def __init__(self, backends): """Inits based on the resource type's backends (1 for each region if applicable)""" self.backends = backends def list_config_service_resources( self, resource_ids, resource_name, limit, next_token, backend_region=None, resource_region=None, aggregator=None, ): """For AWS Config. This will list all of the resources of the given type and optional resource name and region. This supports both aggregated and non-aggregated listing. The following notes the difference: - Non-Aggregated Listing - This only lists resources within a region. The way that this is implemented in moto is based on the region for the resource backend. You must set the `backend_region` to the region that the API request arrived from. resource_region can be set to `None`. - Aggregated Listing - This lists resources from all potential regional backends. For non-global resource types, this should collect a full list of resources from all the backends, and then be able to filter from the resource region. This is because an aggregator can aggregate resources from multiple regions. In moto, aggregated regions will *assume full aggregation from all resources in all regions for a given resource type*. The `backend_region` should be set to `None` for these queries, and the `resource_region` should optionally be set to the `Filters` region parameter to filter out resources that reside in a specific region. For aggregated listings, pagination logic should be set such that the next page can properly span all the region backends. As such, the proper way to implement is to first obtain a full list of results from all the region backends, and then filter from there. It may be valuable to make this a concatenation of the region and resource name. :param resource_ids: A list of resource IDs :param resource_name: The individual name of a resource :param limit: How many per page :param next_token: The item that will page on :param backend_region: The region for the backend to pull results from. Set to `None` if this is an aggregated query. :param resource_region: The region for where the resources reside to pull results from. Set to `None` if this is a non-aggregated query. :param aggregator: If the query is an aggregated query, *AND* the resource has "non-standard" aggregation logic (mainly, IAM), you'll need to pass aggregator used. In most cases, this should be omitted/set to `None`. See the conditional logic under `if aggregator` in the moto/iam/config.py for the IAM example. :return: This should return a list of Dicts that have the following fields: [ { 'type': 'AWS::The AWS Config data type', 'name': 'The name of the resource', 'id': 'The ID of the resource', 'region': 'The region of the resource -- if global, then you may want to have the calling logic pass in the aggregator region in for the resource region -- or just us-east-1 :P' } , ... ] """ raise NotImplementedError() def get_config_resource( self, resource_id, resource_name=None, backend_region=None, resource_region=None ): """For AWS Config. This will query the backend for the specific resource type configuration. This supports both aggregated, and non-aggregated fetching -- for batched fetching -- the Config batching requests will call this function N times to fetch the N objects needing to be fetched. - Non-Aggregated Fetching - This only fetches a resource config within a region. The way that this is implemented in moto is based on the region for the resource backend. You must set the `backend_region` to the region that the API request arrived from. `resource_region` should be set to `None`. - Aggregated Fetching - This fetches resources from all potential regional backends. For non-global resource types, this should collect a full list of resources from all the backends, and then be able to filter from the resource region. This is because an aggregator can aggregate resources from multiple regions. In moto, aggregated regions will *assume full aggregation from all resources in all regions for a given resource type*. ... :param resource_id: :param resource_name: :param backend_region: :param resource_region: :return: """ raise NotImplementedError() class base_decorator(object): mock_backend = MockAWS def __init__(self, backends): self.backends = backends def __call__(self, func=None): if self.mock_backend != HttprettyMockAWS and settings.TEST_SERVER_MODE: mocked_backend = ServerModeMockAWS(self.backends) else: mocked_backend = self.mock_backend(self.backends) if func: return mocked_backend(func) else: return mocked_backend class deprecated_base_decorator(base_decorator): mock_backend = HttprettyMockAWS class MotoAPIBackend(BaseBackend): def reset(self): import moto.backends as backends for name, backends_ in backends.named_backends(): if name == "moto_api": continue for region_name, backend in backends_.items(): backend.reset() self.__init__() moto_api_backend = MotoAPIBackend()
(function() { let sourceNameIdx = 0; /** * Builder for creating a sequence of actions */ function Actions() { this.sourceTypes = new Map([["key", KeySource], ["pointer", PointerSource], ["general", GeneralSource]]); this.sources = new Map(); this.sourceOrder = []; for (let sourceType of this.sourceTypes.keys()) { this.sources.set(sourceType, new Map()); } this.currentSources = new Map(); for (let sourceType of this.sourceTypes.keys()) { this.currentSources.set(sourceType, null); } this.createSource("general"); this.tickIdx = 0; } Actions.prototype = { ButtonType: { LEFT: 0, MIDDLE: 1, RIGHT: 2, BACK: 3, FORWARD: 4, }, /** * Generate the action sequence suitable for passing to * test_driver.action_sequence * * @returns {Array} Array of WebDriver-compatible actions sequences */ serialize: function() { let actions = []; for (let [sourceType, sourceName] of this.sourceOrder) { let source = this.sources.get(sourceType).get(sourceName); let serialized = source.serialize(this.tickIdx + 1); if (serialized) { serialized.id = sourceName; actions.push(serialized); } } return actions; }, /** * Generate and send the action sequence * * @returns {Promise} fulfilled after the sequence is executed, * rejected if any actions fail. */ send: function() { let actions; try { actions = this.serialize(); } catch(e) { return Promise.reject(e); } return test_driver.action_sequence(actions); }, /** * Get the action source with a particular source type and name. * If no name is passed, a new source with the given type is * created. * * @param {String} type - Source type ('general', 'key', or 'pointer') * @param {String?} name - Name of the source * @returns {Source} Source object for that source. */ getSource: function(type, name) { if (!this.sources.has(type)) { throw new Error(`${type} is not a valid action type`); } if (name === null || name === undefined) { name = this.currentSources.get(type); } if (name === null || name === undefined) { return this.createSource(type, null); } return this.sources.get(type).get(name); }, setSource: function(type, name) { if (!this.sources.has(type)) { throw new Error(`${type} is not a valid action type`); } if (!this.sources.get(type).has(name)) { throw new Error(`${name} is not a valid source for ${type}`); } this.currentSources.set(type, name); return this; }, /** * Add a new key input source with the given name * * @param {String} name - Name of the key source * @param {Bool} set - Set source as the default key source * @returns {Actions} */ addKeyboard: function(name, set=true) { this.createSource("key", name); if (set) { this.setKeyboard(name); } return this; }, /** * Set the current default key source * * @param {String} name - Name of the key source * @returns {Actions} */ setKeyboard: function(name) { this.setSource("key", name); return this; }, /** * Add a new pointer input source with the given name * * @param {String} type - Name of the key source * @param {String} pointerType - Type of pointing device * @param {Bool} set - Set source as the default key source * @returns {Actions} */ addPointer: function(name, pointerType="mouse", set=true) { this.createSource("pointer", name, {pointerType: pointerType}); if (set) { this.setPointer(name); } return this; }, /** * Set the current default pointer source * * @param {String} name - Name of the pointer source * @returns {Actions} */ setPointer: function(name) { this.setSource("pointer", name); return this; }, createSource: function(type, name, parameters={}) { if (!this.sources.has(type)) { throw new Error(`${type} is not a valid action type`); } let sourceNames = new Set(); for (let [_, name] of this.sourceOrder) { sourceNames.add(name); } if (!name) { do { name = "" + sourceNameIdx++; } while (sourceNames.has(name)) } else { if (sourceNames.has(name)) { throw new Error(`Alreay have a source of type ${type} named ${name}.`); } } this.sources.get(type).set(name, new (this.sourceTypes.get(type))(parameters)); this.currentSources.set(type, name); this.sourceOrder.push([type, name]); return this.sources.get(type).get(name); }, /** * Insert a new actions tick * * @param {Number?} duration - Minimum length of the tick in ms. * @returns {Actions} */ addTick: function(duration) { this.tickIdx += 1; if (duration) { this.pause(duration); } return this; }, /** * Add a pause to the current tick * * @param {Number?} duration - Minimum length of the tick in ms. * @returns {Actions} */ pause: function(duration) { this.getSource("general").addPause(this, duration); return this; }, /** * Create a keyDown event for the current default key source * * @param {String} key - Key to press * @param {String?} sourceName - Named key source to use or null for the default key source * @returns {Actions} */ keyDown: function(key, {sourceName=null}={}) { let source = this.getSource("key", sourceName); source.keyDown(this, key); return this; }, /** * Create a keyDown event for the current default key source * * @param {String} key - Key to release * @param {String?} sourceName - Named key source to use or null for the default key source * @returns {Actions} */ keyUp: function(key, {sourceName=null}={}) { let source = this.getSource("key", sourceName); source.keyUp(this, key); return this; }, /** * Create a pointerDown event for the current default pointer source * * @param {String} button - Button to press * @param {String?} sourceName - Named pointer source to use or null for the default * pointer source * @returns {Actions} */ pointerDown: function(x, y, {origin="viewport", button=this.ButtonType.LEFT, sourceName=null}={}) { let source = this.getSource("pointer", sourceName); source.pointerDown(this, button, x, y, origin); return this; }, /** * Create a pointerUp event for the current default pointer source * * @param {String} button - Button to release * @param {String?} sourceName - Named pointer source to use or null for the default pointer * source * @returns {Actions} */ pointerUp: function({button=this.ButtonType.LEFT, sourceName=null}={}) { let source = this.getSource("pointer", sourceName); source.pointerUp(this, button); return this; }, /** * Create a move event for the current default pointer source * * @param {Number} x - Destination x coordinate * @param {Number} y - Destination y coordinate * @param {String|Element} origin - Origin of the coordinate system. * Either "pointer", "viewport" or an Element * @param {Number?} duration - Time in ms for the move * @param {String?} sourceName - Named pointer source to use or null for the default pointer * source * @returns {Actions} */ pointerMove: function(x, y, {origin="viewport", duration, sourceName=null}={}) { let source = this.getSource("pointer", sourceName); source.pointerMove(this, x, y, duration, origin); return this; }, }; function GeneralSource() { this.actions = new Map(); } GeneralSource.prototype = { serialize: function(tickCount) { if (!this.actions.size) { return undefined; } let actions = []; let data = {"type": "none", "actions": actions}; for (let i=0; i<tickCount; i++) { if (this.actions.has(i)) { actions.push(this.actions.get(i)); } else { actions.push({"type": "pause"}); } } return data; }, addPause: function(actions, duration) { let tick = actions.tickIdx; if (this.actions.has(tick)) { throw new Error(`Already have a pause action for the current tick`); } this.actions.set(tick, {type: "pause", duration: duration}); }, }; function KeySource() { this.actions = new Map(); } KeySource.prototype = { serialize: function(tickCount) { if (!this.actions.size) { return undefined; } let actions = []; let data = {"type": "key", "actions": actions}; for (let i=0; i<tickCount; i++) { if (this.actions.has(i)) { actions.push(this.actions.get(i)); } else { actions.push({"type": "pause"}); } } return data; }, keyDown: function(actions, key) { let tick = actions.tickIdx; if (this.actions.has(tick)) { tick = actions.addTick().tickIdx; } this.actions.set(tick, {type: "keyDown", value: key}); }, keyUp: function(actions, key) { let tick = actions.tickIdx; if (this.actions.has(tick)) { tick = actions.addTick().tickIdx; } this.actions.set(tick, {type: "keyUp", value: key}); }, }; function PointerSource(parameters={pointerType: "mouse"}) { let pointerType = parameters.pointerType || "mouse"; if (!["mouse", "pen", "touch"].includes(pointerType)) { throw new Error(`Invalid pointerType ${pointerType}`); } this.type = pointerType; this.actions = new Map(); } PointerSource.prototype = { serialize: function(tickCount) { if (!this.actions.size) { return undefined; } let actions = []; let data = {"type": "pointer", "actions": actions, "parameters": {"pointerType": this.type}}; for (let i=0; i<tickCount; i++) { if (this.actions.has(i)) { actions.push(this.actions.get(i)); } else { actions.push({"type": "pause"}); } } return data; }, pointerDown: function(actions, button, x, y, origin) { let tick = actions.tickIdx; if (this.actions.has(tick)) { tick = actions.addTick().tickIdx; } this.actions.set(tick, {type: "pointerDown", button, x, y, origin}); }, pointerUp: function(actions, button) { let tick = actions.tickIdx; if (this.actions.has(tick)) { tick = actions.addTick().tickIdx; } this.actions.set(tick, {type: "pointerUp", button}); }, pointerMove: function(actions, x, y, duration, origin) { let tick = actions.tickIdx; if (this.actions.has(tick)) { tick = actions.addTick().tickIdx; } this.actions.set(tick, {type: "pointerMove", x, y, origin}); if (duration) { this.actions.get(tick).duration = duration; } }, }; test_driver.Actions = Actions; })();
// Copyright (c) 2014-2019 The PRiVCY Core developers // Distributed under the MIT/X11 software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef PRIV_STACKTRACES_H #define PRIV_STACKTRACES_H #include <string> #include <sstream> #include <exception> #include <cxxabi.h> #include <tinyformat.h> std::string DemangleSymbol(const std::string& name); std::string GetPrettyExceptionStr(const std::exception_ptr& e); std::string GetCrashInfoStrFromSerializedStr(const std::string& ciStr); template<typename T> std::string GetExceptionWhat(const T& e); template<> inline std::string GetExceptionWhat(const std::exception& e) { return e.what(); } // Default implementation template<typename T> inline std::string GetExceptionWhat(const T& e) { std::ostringstream s; s << e; return s.str(); } void RegisterPrettyTerminateHander(); void RegisterPrettySignalHandlers(); #endif//PRIV_STACKTRACES_H
/** * JS Linting */ require('mocha-eslint')([ 'assets/js', 'config', 'models', 'routes', 'test', 'app.js' ]);
import json from threading import Thread import numpy as np from acconeer.exptool import configs, utils from acconeer.exptool.clients import SocketClient, UARTClient class Detector(Thread): connection_checked = False def __init__(self, demo_ctrl, name): super().__init__(name=name) self.terminating = False self._demo_ctrl = demo_ctrl self.alive = False def run(self): print("#### New thread for %s" % self.name) args = self._demo_ctrl.streaming_client_args print("args:", args) utils.config_logging(args) if args.socket_addr: client = SocketClient(args.socket_addr) else: port = args.serial_port or utils.autodetect_serial_port() client = UARTClient(port) try: client.connect() except Exception as e: print("Got exception:", e) session_info = client.setup_session(self.config) print("Session info:\n", session_info, "\n") client.start_session() while not self.terminating: sweep_info, sweep_data = client.get_next() d = self.process_data(sweep_data) if d is not None: self._demo_ctrl.put_cmd(str(d)) if self.terminating: break client.disconnect() def stop(self): self.alive = False self.terminating = True self.join() print("%s stream stopped" % self.name) def process_data(self, line): print("default data processing") return {"unknown": line} def update_config(self, params): args = self._demo_ctrl.streaming_client_args self.config.sensor = args.sensors self.config.update_rate = 10 if "range_start" in params and "range_end" in params: self.config.range_interval = [float(params["range_start"]), float(params["range_end"])] if "frequency" in params: self.config.update_rate = float(params["frequency"]) if "gain" in params: self.config.gain = float(params["gain"]) if "average" in params: self.config.running_average_factor = float(params["average"]) class PowerBinHandler(Detector): detector_name = "powerbins" def __init__(self, demo_ctrl, params): super().__init__(demo_ctrl, self.detector_name) self.config = configs.PowerBinServiceConfig() self.update_config(params) if "bins" in params: self.config.bin_count = int(params["bins"]) def process_data(self, a): data = [round(float(x)) for x in a[0:]] return json.dumps({"powerbins": data}) class EnvelopeHandler(Detector): detector_name = "envelope" def __init__(self, demo_ctrl, params): super().__init__(demo_ctrl, self.detector_name) self.config = configs.EnvelopeServiceConfig() self.update_config(params) if "profile" in params: if params["profile"] == "1": self.config.profile = configs.EnvelopeServiceConfig.Profile.PROFILE_1 elif params["profile"] == "2": self.config.profile = configs.EnvelopeServiceConfig.Profile.PROFILE_2 elif params["profile"] == "2": self.config.profile = configs.EnvelopeServiceConfig.Profile.PROFILE_3 elif params["profile"] == "2": self.config.profile = configs.EnvelopeServiceConfig.Profile.PROFILE_4 elif params["profile"] == "2": self.config.profile = configs.EnvelopeServiceConfig.Profile.PROFILE_5 else: print("Unknown profile") def process_data(self, a): data = [round(int(x)) for x in a[0:]] return json.dumps({"envelope": data}) class IQHandler(Detector): detector_name = "iq" def __init__(self, demo_ctrl, params): super().__init__(demo_ctrl, self.detector_name) self.config = configs.IQServiceConfig() self.update_config(params) def process_data(self, a): response = [] for z in a.tolist()[0::18]: response.append({"re": np.real(z), "im": np.imag(z)}) return json.dumps({"iq": response}) class SparseHandler(Detector): detector_name = "sparse" def __init__(self, demo_ctrl, params): super().__init__(demo_ctrl, self.detector_name) self.config = configs.SparseServiceConfig() self.config.sampling_mode = configs.SparseServiceConfig.SamplingMode.A self.update_config(params) def process_data(self, a): data = [[int(j) for j in i] for i in a] return json.dumps({"sparse": data})
// Load required packages var mongoose = require('mongoose'); // Define our user schema var UserSchema = new mongoose.Schema({ name: {type: String, required: true}, email: {type: String, required: true, unique: true}, pendingTasks: {type: [String], default: []}, dateCreated: {type: Date, default: Date.now}, }, { versionKey: false }); // Export the Mongoose model module.exports = mongoose.model('User', UserSchema);
// // ABI39_0_0RNSharedElementContent_h // ABI39_0_0React-native-shared-element // #ifndef ABI39_0_0RNSharedElementContent_h #define ABI39_0_0RNSharedElementContent_h #import <UIKit/UIKit.h> #import "ABI39_0_0RNSharedElementTypes.h" @interface ABI39_0_0RNSharedElementContent : NSObject @property (nonatomic, readonly) id data; @property (nonatomic, readonly) ABI39_0_0RNSharedElementContentType type; @property (nonatomic, readonly) UIEdgeInsets insets; - (instancetype)initWithData:(id) data type:(ABI39_0_0RNSharedElementContentType)type insets:(UIEdgeInsets)insets; - (NSString*) typeName; + (BOOL) isKindOfImageView:(UIView*) view; + (UIImageView*) imageViewFromView:(UIView*) view; + (CGRect) layoutForRect:(CGRect)layout content:(ABI39_0_0RNSharedElementContent*) content contentMode:(UIViewContentMode) contentMode reverse:(BOOL)reverse; @end #endif
# Copyright 2020 QuantumBlack Visual Analytics Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND # NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS # BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # # The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo # (either separately or in combination, "QuantumBlack Trademarks") are # trademarks of QuantumBlack. The License does not grant you any right or # license to the QuantumBlack Trademarks. You may not use the QuantumBlack # Trademarks or any confusingly similar mark as a trademark for your product, # or use the QuantumBlack Trademarks in any other manner that might cause # confusion in the marketplace, including but not limited to in advertising, # on websites, or on software. # # See the License for the specific language governing permissions and # limitations under the License. """investor """ __version__ = "0.1"
''' Testing tech ''' import pytest import numpy as np from freelunch.base import * from freelunch.util import UnpicklableObjectiveFunction from freelunch.darwin import rand_1 from freelunch.benchmarks import exponential def test_hyp_parse(): opt = optimiser(exponential()) assert(rand_1.__name__ == opt.parse_hyper(rand_1).__class__.__name__) with pytest.raises(AttributeError): opt.parse_hyper('Not a function') def test_no_optimiser(): with pytest.raises(TypeError): optimiser() with pytest.raises(AttributeError): optimiser(lambda x: None).run() with pytest.raises(AttributeError): optimiser(lambda x: None).run_mp() def test_unpicklable(): obj = lambda x:None x = optimiser(obj) with pytest.raises(UnpicklableObjectiveFunction): x(n_runs=2, n_workers=2) def test_naughty_obj(): opt = optimiser(obj=lambda x: np.random.choice([np.nan, np.inf, 'a string'])) for i in range(20): assert opt.obj([]) == None
#!/usr/bin/env python ############################################################################### # $Id$ # # Project: GDAL/OGR Test Suite # Purpose: Test read functionality for OGR OGDI driver. # Author: Even Rouault <even dot rouault at mines dash paris dot org> # ############################################################################### # Copyright (c) 2010-2011, Even Rouault <even dot rouault at mines-paris dot org> # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. ############################################################################### import os import sys from osgeo import ogr sys.path.append( '../pymod' ) import gdaltest import ogrtest ############################################################################### def ogr_ogdi_1(): ogrtest.ogdi_ds = None # Skip tests when -fsanitize is used because of memleaks in libogdi if 'TRAVIS_BRANCH' in os.environ and \ os.environ['TRAVIS_BRANCH'].find('sanitize') >= 0: print('Skipping because of memory leaks in OGDI') ogrtest.ogdi_drv = None return 'skip' try: ogrtest.ogdi_drv = ogr.GetDriverByName('OGDI') except: ogrtest.ogdi_drv = None if ogrtest.ogdi_drv is None: return 'skip' if not gdaltest.download_file('http://freefr.dl.sourceforge.net/project/ogdi/OGDI_Test_Suite/3.1/ogdits-3.1.0.zip', 'ogdits-3.1.0.zip'): return 'skip' try: os.stat('tmp/cache/ogdits-3.1') except: try: gdaltest.unzip( 'tmp/cache', 'tmp/cache/ogdits-3.1.0.zip') try: os.stat('tmp/cache/ogdits-3.1') except: return 'skip' except: return 'skip' url_name ='gltp:/vrf/' + os.getcwd()+ '/tmp/cache/ogdits-3.1/data/vpf/vm2alv2/texash' ds = ogr.Open(url_name) ogrtest.ogdi_ds = ds if ds is None: gdaltest.post_reason('cannot open ' + url_name) return 'fail' if ds.GetLayerCount() != 57: print(ds.GetLayerCount()) gdaltest.post_reason('did not get expected layer count') return 'fail' layers = [ ('libref@libref(*)_line', ogr.wkbLineString, 15), ('libreft@libref(*)_text', ogr.wkbPoint, 4), ('markersp@bnd(*)_point', ogr.wkbPoint, 40), ('polbnda@bnd(*)_area', ogr.wkbPolygon, 6)] for l in layers: lyr = ds.GetLayerByName(l[0]) if lyr.GetLayerDefn().GetGeomType() != l[1]: return 'fail' if lyr.GetFeatureCount() != l[2]: print(lyr.GetFeatureCount()) return 'fail' #if l[1] != ogr.wkbNone: # if lyr.GetSpatialRef().ExportToWkt().find('WGS 84') == -1: # return 'fail' lyr = ds.GetLayerByName('libref@libref(*)_line') feat = lyr.GetNextFeature() wkt = 'LINESTRING (-97.570159912109375 31.242000579833984,-97.569938659667969 31.242116928100586,-97.562828063964844 31.245765686035156,-97.558868408203125 31.247797012329102,-97.555778503417969 31.249361038208008,-97.55413818359375 31.250171661376953)' ref_geom = ogr.CreateGeometryFromWkt(wkt) if ogrtest.check_feature_geometry(feat, ref_geom) != 0: print(feat.GetGeometryRef().ExportToWkt()) return 'fail' return 'success' ############################################################################### # Run test_ogrsf def ogr_ogdi_2(): if ogrtest.ogdi_ds is None: return 'skip' import test_cli_utilities if test_cli_utilities.get_test_ogrsf_path() is None: return 'skip' url_name ='gltp:/vrf/' + os.getcwd()+ '/tmp/cache/ogdits-3.1/data/vpf/vm2alv2/texash' ret = gdaltest.runexternal(test_cli_utilities.get_test_ogrsf_path() + ' --config OGR_OGDI_LAUNDER_LAYER_NAMES YES -ro "' + url_name + '" markersp_bnd contourl_elev polbnda_bnd extractp_ind') if ret.find('INFO') == -1 or ret.find('ERROR') != -1: print(ret) return 'fail' return 'success' ############################################################################### # Test GetFeature() def ogr_ogdi_3(): if ogrtest.ogdi_ds is None: return 'skip' lyr0 = ogrtest.ogdi_ds.GetLayer(0) lyr0.ResetReading() feat00_ref = lyr0.GetNextFeature() feat01_ref = lyr0.GetNextFeature() feat02_ref = lyr0.GetNextFeature() lyr1 = ogrtest.ogdi_ds.GetLayer(1) lyr1.ResetReading() feat10_ref = lyr1.GetNextFeature() feat11_ref = lyr1.GetNextFeature() feat02 = lyr0.GetFeature(2) feat00 = lyr0.GetFeature(0) feat01 = lyr0.GetFeature(1) feat10 = lyr1.GetFeature(0) feat11 = lyr1.GetFeature(1) if not feat00.Equal(feat00_ref): gdaltest.post_reason('features not equal') return 'fail' if not feat01.Equal(feat01_ref): gdaltest.post_reason('features not equal') return 'fail' if not feat02.Equal(feat02_ref): gdaltest.post_reason('features not equal') return 'fail' if not feat10.Equal(feat10_ref): gdaltest.post_reason('features not equal') return 'fail' if not feat11.Equal(feat11_ref): gdaltest.post_reason('features not equal') return 'fail' return 'success' ############################################################################### # Extract of full dataset def ogr_ogdi_4(): if ogrtest.ogdi_drv is None: return 'skip' url_name ='gltp:/vrf/' + os.getcwd()+ '/data/vm2alv2_texash/texash' ds = ogr.Open(url_name) if ds is None: gdaltest.post_reason('cannot open dataset') return 'fail' if ds.GetLayerCount() != 6: print(ds.GetLayerCount()) gdaltest.post_reason('did not get expected layer count') return 'fail' layers = [ ('polbnda@bnd(*)_area', ogr.wkbPolygon, 6)] for l in layers: lyr = ds.GetLayerByName(l[0]) if lyr.GetLayerDefn().GetGeomType() != l[1]: return 'fail' if lyr.GetFeatureCount() != l[2]: print(lyr.GetFeatureCount()) return 'fail' lyr = ds.GetLayerByName('polbnda@bnd(*)_area') feat = lyr.GetNextFeature() if feat['id'] != 1 or feat['f_code'] != 'FA001' or feat['acc'] != 1: gdaltest.post_reason('bad attributes') feat.DumpReadable() return 'fail' wkt = 'POLYGON ((-97.6672973632812 31.250171661377,-97.5832977294922 31.250171661377,-97.5780029296875 31.250171661377,-97.5780029296875 31.250171661377,-97.5780944824219 31.2494583129883,-97.5779724121094 31.2492084503174,-97.577751159668 31.24880027771,-97.5776443481445 31.2484683990479,-97.5775451660156 31.2482070922852,-97.5774078369141 31.2479457855225,-97.5772705078125 31.2477989196777,-97.5771331787109 31.2477321624756,-97.5768661499023 31.2476787567139,-97.5766830444336 31.2476959228516,-97.5763168334961 31.2477016448975,-97.576042175293 31.247673034668,-97.5757141113281 31.2475509643555,-97.5754852294922 31.2473278045654,-97.5752792358398 31.2470207214356,-97.5751190185547 31.2467250823975,-97.5750122070312 31.2465076446533,-97.5748443603516 31.2462825775147,-97.5746002197266 31.2460918426514,-97.5742874145508 31.2459144592285,-97.5739288330078 31.2458171844482,-97.5736083984375 31.2457542419434,-97.5731201171875 31.2456817626953,-97.5728302001953 31.245641708374,-97.5724792480469 31.2455806732178,-97.5721817016602 31.2454471588135,-97.5719223022461 31.2453022003174,-97.5717086791992 31.2450218200684,-97.5715408325195 31.2446899414062,-97.5713882446289 31.2445201873779,-97.5711669921875 31.2442722320557,-97.5710678100586 31.2440910339355,-97.5711975097656 31.2438926696777,-97.5713577270508 31.2437191009521,-97.5718154907227 31.2434253692627,-97.5724258422852 31.2431831359863,-97.5726470947266 31.2430419921875,-97.5728530883789 31.2427291870117,-97.5728759765625 31.2424869537354,-97.57275390625 31.2423858642578,-97.5727996826172 31.2423534393311,-97.5712738037109 31.2422771453857,-97.5710067749023 31.2422466278076,-97.5707092285156 31.2421951293945,-97.5702285766602 31.2420444488525,-97.5701599121094 31.242000579834,-97.5701599121094 31.242000579834,-97.5794296264648 31.2372093200684,-97.5909194946289 31.2314224243164,-97.6050415039062 31.2241363525391,-97.6213302612305 31.2157878875732,-97.6490707397461 31.201566696167,-97.6662445068359 31.1928386688232,-97.6803207397461 31.1855792999268,-97.6936721801758 31.1787204742432,-97.7042617797852 31.1732997894287,-97.7107391357422 31.1699485778809,-97.7178192138672 31.1663246154785,-97.7325134277344 31.1587982177734,-97.7502975463867 31.1499614715576,-97.7502975463867 31.1499614715576,-97.7502975463867 31.1671733856201,-97.7502975463867 31.1671733856201,-97.7502975463867 31.250171661377,-97.6672973632812 31.250171661377))' ref_geom = ogr.CreateGeometryFromWkt(wkt) if ogrtest.check_feature_geometry(feat, ref_geom) != 0: print(feat.GetGeometryRef().ExportToWkt()) return 'fail' ds = None # Test opening one single layer ds = ogr.Open(url_name +':polbnda@bnd(*):area') if ds is None: gdaltest.post_reason('cannot open dataset') return 'fail' if ds.GetLayerCount() != 1: print(ds.GetLayerCount()) gdaltest.post_reason('did not get expected layer count') return 'fail' return 'success' ############################################################################### # Run test_ogrsf def ogr_ogdi_5(): if ogrtest.ogdi_drv is None: return 'skip' import test_cli_utilities if test_cli_utilities.get_test_ogrsf_path() is None: return 'skip' url_name ='gltp:/vrf/' + os.getcwd()+ '/data/vm2alv2_texash/texash' ret = gdaltest.runexternal(test_cli_utilities.get_test_ogrsf_path() + ' --config OGR_OGDI_LAUNDER_LAYER_NAMES YES -ro "' + url_name + '"') if ret.find('INFO') == -1 or ret.find('ERROR') != -1: print(ret) return 'fail' return 'success' ############################################################################### def ogr_ogdi_cleanup(): if ogrtest.ogdi_ds is None: return 'skip' ogrtest.ogdi_ds = None return 'success' gdaltest_list = [ ogr_ogdi_1, ogr_ogdi_2, ogr_ogdi_3, ogr_ogdi_4, ogr_ogdi_5, ogr_ogdi_cleanup] if __name__ == '__main__': gdaltest.setup_run( 'ogr_ogdi' ) gdaltest.run_tests( gdaltest_list ) gdaltest.summarize()
""" logistics.py A module for a single class LogisticSystem. Refer to LogisticSystem documentation for more information. """ import copy from typing import List from order import Vehicle from order import Order from order import Item class LogisticSystem: """ A class used to represent logistics system. Attributes ---------- orders : list of orders all orders known to logistics system vehicles : list of vehicles all vehicles known to logistics system Methods ------- placeOrder(order) assign an order a vehicle>>> from order import * trackOrder(order_id) print information about an order with passed order id """ def __init__(self, vehicles: List[Vehicle]) -> None: """Initialize logistics system with vehicles andd orders (empty list). >>> vehicles = [Vehicle(1), Vehicle(10)] >>> logistics_sys = LogisticSystem(vehicles) >>> logistics_sys.placeOrder(Order("Bohdan", "Lviv", 2, [Item("Arduino", 200)])) Your order number is 0. >>> logistics_sys.trackOrder(0) Your order #0 is sent to Lviv. Total price: 200 UAH. """ self.orders = [] self.vehicles = copy.copy(vehicles) def placeOrder(self, order: Order) -> None: """Assign a vehicle to an order and add an order to orders of logistics system. >>> vehicles = [Vehicle(1)] >>> logistics_sys = LogisticSystem(vehicles) >>> logistics_sys.placeOrder(Order("Jack", "London", 4, [Item("Arduino mega", 200)])) Your order number is 1. >>> logistics_sys.placeOrder(Order("Tom", "Kyiv", 31, [Item("Cap", 120)])) Your order number is 2. There is no available vehicle to deliver an order. >>> logistics_sys.orders #doctest: +ELLIPSIS [<order.Order object at 0x...>] """ try: vehicle = next(vehicle for vehicle in self.vehicles if vehicle.is_available) order.assign_vehicle(vehicle) self.orders.append(order) vehicle.is_available = False except StopIteration: print("There is no available vehicle to deliver an order.") def trackOrder(self, order_id: int) -> str: """Print information (order id, city of delivery, price in UAH) about the order with passed in order id. >>> vehicles = [Vehicle(1)] >>> logistics_sys = LogisticSystem(vehicles) >>> logistics_sys.placeOrder(Order("Jack", "London", 4, [Item("Arduino mega", 200)])) Your order number is 3. >>> logistics_sys.trackOrder(3) Your order #3 is sent to London. Total price: 200 UAH. """ try: order = next(order for order in self.orders if order.order_id == order_id) order_id = order.order_id city = order.location.city amount = order.calculate_amount() print(f"Your order #{order_id} is sent to {city}. Total price: {amount} UAH.") except StopIteration: print("No such order.") if __name__ == "__main__": import doctest doctest.testmod()
/* * Copyright 2020 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * WordPress dependencies */ import { __, sprintf } from '@wordpress/i18n'; /** * External dependencies */ import PropTypes from 'prop-types'; /** * Internal dependencies */ import { FIELD_TYPES, SCALE_DIRECTION } from '../../constants'; import { AnimationInputPropTypes } from '../types'; export const ZoomEffectInputPropTypes = { zoomFrom: PropTypes.shape(AnimationInputPropTypes), }; export default { zoomDirection: { label: __('Direction', 'web-stories'), tooltip: sprintf( /* translators: 1: scaleIn. 2: scaleOut */ __('Valid values are %1$s or %2$s', 'web-stories'), 'zoomIn', 'zoomOut' ), type: FIELD_TYPES.DIRECTION_PICKER, values: [SCALE_DIRECTION.SCALE_IN, SCALE_DIRECTION.SCALE_OUT], defaultValue: SCALE_DIRECTION.SCALE_OUT, }, };
import React from 'react'; import Dropzone from 'react-dropzone' import PropTypes from 'prop-types'; import { connect } from 'react-redux'; import { bindActionCreators } from 'redux'; import * as actions from '../actions/actions'; class Upload extends React.Component { constructor() { super() this.state = { files: [] } } onDrop(files) { this.setState({ files }); this.props.actions.uploadedItem(files[0].preview); } render() { const { img } = this.props; return ( <section> <div className="dropzone"> <Dropzone onDrop={this.onDrop.bind(this)}> <p>Try dropping some files here, or click to select files to upload.</p> </Dropzone> </div> <aside> <div> { (this.state.files.length > 0) ? (this.state.files.map(f => <div key={f.name}> <p>{f.name}</p> <p> {f.size} bytes </p> <img style={{width:'100%'}} src={f.preview} /> </div>)) : (<img style={{ width: '100%' }}src={img} />) } </div> </aside> </section> ); } } Upload.propTypes = { actions: PropTypes.object.isRequired, }; function mapStateToProps(state) { const { items } = state; return { items }; } function mapDispatchToProps(dispatch) { return { actions: bindActionCreators(actions, dispatch) }; } export default connect( mapStateToProps, mapDispatchToProps )(Upload);
import { mapGetters, mapActions } from "vuex"; export default { props: { siteDefaultPanelView: { type: String, required: true, }, }, computed: { ...mapGetters("panels", ["currentPanel", "currentPanelView"]), }, methods: { ...mapActions("panels", ["initCurrentPanelView"]), }, created() { this.initCurrentPanelView(this.siteDefaultPanelView); }, render() { return this.$scopedSlots.default({ currentPanel: this.currentPanel, currentPanelView: this.currentPanelView, }); }, };
# Copyright 2019 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # Or at https://developers.google.com/open-source/licenses/bsd from __future__ import absolute_import import os from trainer2 import train_ml_helpers from trainer2.stopwords import STOP_WORDS def GenerateTopWords(word_dict): """Requires ./stopwords.txt exist in folder for the function to run. """ stop_words = [s.encode('utf-8') for s in STOP_WORDS] sorted_words = sorted(word_dict, key=word_dict.get, reverse=True) top_words = [] index = 0 while len(top_words) < train_ml_helpers.COMPONENT_FEATURES: if sorted_words[index] not in stop_words: top_words.append(sorted_words[index]) index += 1 return top_words def parse_words_from_content(contents): """Returns given list of strings, extract the top (most common) words. """ word_dict = {} for content in contents: words = content.encode('utf-8').split() for word in words: if word in word_dict: word_dict[word] += 1 else: word_dict[word] = 1 return GenerateTopWords(word_dict) def make_top_words_list(contents, job_dir): """Returns the top (most common) words in the entire dataset for component prediction. If a file is already stored in job_dir containing these words, the words from the file are simply returned. Otherwise, the most common words are determined and written to job_dir, before being returned. Returns: A list of the most common words in the dataset (the number of them determined by train_ml_helpers.COMPONENT_FEATURES). """ if not os.path.exists(job_dir): os.mkdir(job_dir) if os.access(job_dir + 'topwords.txt', os.R_OK): print("Found topwords.txt") with open(job_dir + 'topwords.txt', 'rb') as f: top_words = f.read().split() else: top_words = parse_words_from_content(contents) with open(job_dir + 'topwords.txt', 'w') as f: for word in top_words: f.write('%s\n' % word.decode('utf-8')) return top_words
/* * Copyright (c) 2005-2017 Lev Walkin <vlm@lionet.info>. * All rights reserved. * Redistribution and modifications are permitted subject to BSD license. */ #include <asn_system.h> #include <asn_internal.h> #include <asn_bit_data.h> /* * Create a contiguous non-refillable bit data structure. * Can be freed by FREEMEM(). */ asn_bit_data_t* asn_bit_data_new_contiguous( const void* data, size_t size_bits) { size_t size_bytes = (size_bits + 7) / 8; asn_bit_data_t* pd; uint8_t* bytes; /* Get the extensions map */ pd = CALLOC(1, sizeof(*pd) + size_bytes + 1); if (!pd) { return NULL; } bytes = (void*) (((char*) pd) + sizeof(*pd)); memcpy(bytes, data, size_bytes); bytes[size_bytes] = 0; pd->buffer = bytes; pd->nboff = 0; pd->nbits = size_bits; return pd; } char* asn_bit_data_string(asn_bit_data_t* pd) { static char buf[2][32]; static int n; n = (n + 1) % 2; snprintf( buf[n], sizeof(buf[n]), "{m=%" ASN_PRI_SIZE " span %" ASN_PRI_SIZE "[%" ASN_PRI_SIZE "..%" ASN_PRI_SIZE "] (%" ASN_PRI_SIZE ")}", pd->moved, ((uintptr_t)(pd->buffer) & 0xf), pd->nboff, pd->nbits, pd->nbits - pd->nboff); return buf[n]; } void asn_get_undo(asn_bit_data_t* pd, int nbits) { if ((ssize_t) pd->nboff < nbits) { assert((ssize_t) pd->nboff < nbits); } else { pd->nboff -= nbits; pd->moved -= nbits; } } /* * Extract a small number of bits (<= 31) from the specified PER data pointer. */ int32_t asn_get_few_bits(asn_bit_data_t* pd, int nbits) { size_t off; /* Next after last bit offset */ ssize_t nleft; /* Number of bits left in this stream */ uint32_t accum; const uint8_t* buf; if (nbits < 0) return -1; nleft = pd->nbits - pd->nboff; if (nbits > nleft) { int32_t tailv, vhead; if (!pd->refill || nbits > 31) return -1; /* Accumulate unused bytes before refill */ ASN_DEBUG("Obtain the rest %d bits (want %d)", (int) nleft, (int) nbits); tailv = asn_get_few_bits(pd, nleft); if (tailv < 0) return -1; /* Refill (replace pd contents with new data) */ if (pd->refill(pd)) return -1; nbits -= nleft; vhead = asn_get_few_bits(pd, nbits); /* Combine the rest of previous pd with the head of new one */ tailv = (tailv << nbits) | vhead; /* Could == -1 */ return tailv; } /* * Normalize position indicator. */ if (pd->nboff >= 8) { pd->buffer += (pd->nboff >> 3); pd->nbits -= (pd->nboff & ~0x07); pd->nboff &= 0x07; } pd->moved += nbits; pd->nboff += nbits; off = pd->nboff; buf = pd->buffer; /* * Extract specified number of bits. */ if (off <= 8) accum = nbits ? (buf[0]) >> (8 - off) : 0; else if (off <= 16) accum = ((buf[0] << 8) + buf[1]) >> (16 - off); else if (off <= 24) accum = ((buf[0] << 16) + (buf[1] << 8) + buf[2]) >> (24 - off); else if (off <= 31) accum = (((uint32_t) buf[0] << 24) + (buf[1] << 16) + (buf[2] << 8) + (buf[3])) >> (32 - off); else if (nbits <= 31) { asn_bit_data_t tpd = *pd; /* Here are we with our 31-bits limit plus 1..7 bits offset. */ asn_get_undo(&tpd, nbits); /* The number of available bits in the stream allow * for the following operations to take place without * invoking the ->refill() function */ accum = asn_get_few_bits(&tpd, nbits - 24) << 24; accum |= asn_get_few_bits(&tpd, 24); } else { asn_get_undo(pd, nbits); return -1; } accum &= (((uint32_t) 1 << nbits) - 1); ASN_DEBUG( " [PER got %2d<=%2d bits => span %d %+ld[%d..%d]:%02x (%d) => 0x%x]", (int) nbits, (int) nleft, (int) pd->moved, (((long) pd->buffer) & 0xf), (int) pd->nboff, (int) pd->nbits, ((pd->buffer != NULL) ? pd->buffer[0] : 0), (int) (pd->nbits - pd->nboff), (int) accum); return accum; } /* * Extract a large number of bits from the specified PER data pointer. */ int asn_get_many_bits( asn_bit_data_t* pd, uint8_t* dst, int alright, int nbits) { int32_t value; if (alright && (nbits & 7)) { /* Perform right alignment of a first few bits */ value = asn_get_few_bits(pd, nbits & 0x07); if (value < 0) return -1; *dst++ = value; /* value is already right-aligned */ nbits &= ~7; } while (nbits) { if (nbits >= 24) { value = asn_get_few_bits(pd, 24); if (value < 0) return -1; *(dst++) = value >> 16; *(dst++) = value >> 8; *(dst++) = value; nbits -= 24; } else { value = asn_get_few_bits(pd, nbits); if (value < 0) return -1; if (nbits & 7) { /* implies left alignment */ value <<= 8 - (nbits & 7), nbits += 8 - (nbits & 7); if (nbits > 24) *dst++ = value >> 24; } if (nbits > 16) *dst++ = value >> 16; if (nbits > 8) *dst++ = value >> 8; *dst++ = value; break; } } return 0; } /* * Put a small number of bits (<= 31). */ int asn_put_few_bits(asn_bit_outp_t* po, uint32_t bits, int obits) { size_t off; /* Next after last bit offset */ size_t omsk; /* Existing last byte meaningful bits mask */ uint8_t* buf; if (obits <= 0 || obits >= 32) return obits ? -1 : 0; ASN_DEBUG( "[PER put %d bits %x to %p+%d bits]", obits, (int) bits, (void*) po->buffer, (int) po->nboff); /* * Normalize position indicator. */ if (po->nboff >= 8) { po->buffer += (po->nboff >> 3); po->nbits -= (po->nboff & ~0x07); po->nboff &= 0x07; } /* * Flush whole-bytes output, if necessary. */ if (po->nboff + obits > po->nbits) { size_t complete_bytes; if (!po->buffer) po->buffer = po->tmpspace; complete_bytes = (po->buffer - po->tmpspace); ASN_DEBUG( "[PER output %ld complete + %ld]", (long) complete_bytes, (long) po->flushed_bytes); if (po->output(po->tmpspace, complete_bytes, po->op_key) < 0) return -1; if (po->nboff) po->tmpspace[0] = po->buffer[0]; po->buffer = po->tmpspace; po->nbits = 8 * sizeof(po->tmpspace); po->flushed_bytes += complete_bytes; } /* * Now, due to sizeof(tmpspace), we are guaranteed large enough space. */ buf = po->buffer; omsk = ~((1 << (8 - po->nboff)) - 1); off = (po->nboff + obits); /* Clear data of debris before meaningful bits */ bits &= (((uint32_t) 1 << obits) - 1); ASN_DEBUG( "[PER out %d %u/%x (t=%d,o=%d) %x&%x=%x]", obits, (int) bits, (int) bits, (int) po->nboff, (int) off, buf[0], (int) (omsk & 0xff), (int) (buf[0] & omsk)); if (off <= 8) /* Completely within 1 byte */ po->nboff = off, bits <<= (8 - off), buf[0] = (buf[0] & omsk) | bits; else if (off <= 16) po->nboff = off, bits <<= (16 - off), buf[0] = (buf[0] & omsk) | (bits >> 8), buf[1] = bits; else if (off <= 24) po->nboff = off, bits <<= (24 - off), buf[0] = (buf[0] & omsk) | (bits >> 16), buf[1] = bits >> 8, buf[2] = bits; else if (off <= 31) po->nboff = off, bits <<= (32 - off), buf[0] = (buf[0] & omsk) | (bits >> 24), buf[1] = bits >> 16, buf[2] = bits >> 8, buf[3] = bits; else { if (asn_put_few_bits(po, bits >> (obits - 24), 24)) return -1; if (asn_put_few_bits(po, bits, obits - 24)) return -1; } ASN_DEBUG( "[PER out %u/%x => %02x buf+%ld]", (int) bits, (int) bits, buf[0], (long) (po->buffer - po->tmpspace)); return 0; } /* * Output a large number of bits. */ int asn_put_many_bits(asn_bit_outp_t* po, const uint8_t* src, int nbits) { while (nbits) { uint32_t value; if (nbits >= 24) { value = (src[0] << 16) | (src[1] << 8) | src[2]; src += 3; nbits -= 24; if (asn_put_few_bits(po, value, 24)) return -1; } else { value = src[0]; if (nbits > 8) value = (value << 8) | src[1]; if (nbits > 16) value = (value << 8) | src[2]; if (nbits & 0x07) value >>= (8 - (nbits & 0x07)); if (asn_put_few_bits(po, value, nbits)) return -1; break; } } return 0; } int asn_put_aligned_flush(asn_bit_outp_t* po) { uint32_t unused_bits = (0x7 & (8 - (po->nboff & 0x07))); size_t complete_bytes = (po->buffer ? po->buffer - po->tmpspace : 0) + ((po->nboff + 7) >> 3); if (unused_bits) { po->buffer[po->nboff >> 3] &= ~0u << unused_bits; } if (po->output(po->tmpspace, complete_bytes, po->op_key) < 0) { return -1; } else { po->buffer = po->tmpspace; po->nboff = 0; po->nbits = 8 * sizeof(po->tmpspace); po->flushed_bytes += complete_bytes; return 0; } }
export function up(queryInterface, Sequelize) { return queryInterface.createTable('shifts', { id: { type: Sequelize.INTEGER, primaryKey: true, autoIncrement: true, }, mentorId: { type: Sequelize.INTEGER, allowNull: false, references: { model: 'mentors', key: 'id' }, }, day: Sequelize.STRING, startTime: Sequelize.TIME, endTime: Sequelize.TIME, createdAt: Sequelize.DATE, updatedAt: Sequelize.DATE, }); } export function down(queryInterface) { queryInterface.dropTable('shifts'); }
import csv import os from dagster import ( EventMetadataEntry, Field, Materialization, Selector, String, execute_pipeline, input_hydration_config, output_materialization_config, pipeline, seven, solid, usable_as_dagster_type, ) @input_hydration_config(Selector({'csv': Field(String)})) def less_simple_data_frame_input_hydration_config(context, selector): csv_path = os.path.join(os.path.dirname(__file__), selector['csv']) with open(csv_path, 'r') as fd: lines = [row for row in csv.DictReader(fd)] context.log.info('Read {n_lines} lines'.format(n_lines=len(lines))) return LessSimpleDataFrame(lines) @output_materialization_config( { 'csv': Field( { 'path': String, 'sep': Field(String, is_required=False, default_value=','), }, is_required=False, ), 'json': Field({'path': String,}, is_required=False,), } ) def less_simple_data_frame_output_materialization_config( context, config, value ): # Materialize LessSimpleDataFrame into a csv file csv_path = os.path.join( os.path.dirname(__file__), os.path.abspath(config['csv']['path']) ) os.makedirs(os.path.dirname(csv_path), exist_ok=True) with open(csv_path, 'w') as fd: fieldnames = list(value[0].keys()) writer = csv.DictWriter( fd, fieldnames, delimiter=config['csv']['sep'] ) writer.writeheader() writer.writerows(value) context.log.debug( 'Wrote dataframe as .csv to {path}'.format(path=csv_path) ) yield Materialization( '1data_frame_csv', 'LessSimpleDataFrame materialized as csv', [ EventMetadataEntry.path( path=csv_path, label='data_frame_csv_path', description='LessSimpleDataFrame written to csv format', ) ], ) # Materialize LessSimpleDataFrame into a json file json_path = os.path.abspath(config['json']['path']) with open(json_path, 'w') as fd: json_value = seven.json.dumps([dict(row) for row in value]) fd.write(json_value) context.log.debug( 'Wrote dataframe as .json to {path}'.format(path=json_path) ) yield Materialization( 'data_frame_json', 'LessSimpleDataFrame materialized as json', [ EventMetadataEntry.path( path=json_path, label='data_frame_json_path', description='LessSimpleDataFrame written to json format', ) ], ) @usable_as_dagster_type( name='LessSimpleDataFrame', description='A more sophisticated data frame that type checks its structure.', input_hydration_config=less_simple_data_frame_input_hydration_config, output_materialization_config=less_simple_data_frame_output_materialization_config, ) class LessSimpleDataFrame(list): pass @solid def sort_by_calories( context, cereals: LessSimpleDataFrame ) -> LessSimpleDataFrame: sorted_cereals = sorted( cereals, key=lambda cereal: int(cereal['calories']) ) context.log.info( 'Least caloric cereal: {least_caloric}'.format( least_caloric=sorted_cereals[0]['name'] ) ) context.log.info( 'Most caloric cereal: {most_caloric}'.format( most_caloric=sorted_cereals[-1]['name'] ) ) return LessSimpleDataFrame(sorted_cereals) @pipeline def output_materialization_pipeline(): sort_by_calories() if __name__ == '__main__': execute_pipeline( output_materialization_pipeline, { 'solids': { 'sort_by_calories': { 'inputs': {'cereals': {'csv': 'cereal.csv'}}, 'outputs': [ { 'result': { 'csv': {'path': 'output/cereal_out.csv'}, 'json': {'path': 'output/cereal_out.json'}, } } ], } } }, )
''' Created on 24 Feb 2012 @author: Qasim Note: The from_string methods actually require a list, ["string", "string2"], with no newline characters. The to_string methods will actually return a string like "bu7\nbu8u8" A typical DNA string is 'bu7bw9bv1u7u7bu2'. ''' import random from collections import namedtuple Tower = namedtuple('Tower', 'blue green yellow')('u', 'v', 'w') _tls = 'l1 l2 l3 l4 l5 l6 l7 l8 l9 l10 l11 l12 l13 l14 l15 l16 l17 l18' _tls += ' l19 l20 l21 l22 l23 l24 l25 l26 l27 l28 l29 l30 l31 l32' TowerLocation = namedtuple('TowerLocation', _tls)(*range(1, 33)) class BaseBuildInvalidStringError(Exception): def __init__(self, s, location): self.s = s self.location = location def __str__(self): err = "Location {loc} in the string '{s}' is invalid. {msg}" if self.location == 0: return err.format(loc=self.location, s=self.s, msg="It must 'b'.") elif self.location == 1: return err.format(loc=self.location, s=self.s, msg="It must be 'u', 'v', or 'w'") elif self.location == 2: return err.format(loc=self.location, s=self.s, msg="It must be a number 1, 2, ..., 9") elif self.location >= 3: return err.format(loc="3+", s=self.s, msg="The string is too long.") class BaseBuild(namedtuple('BaseBuild', 'tower_type location')): __slots__ = () def __new__(cls, *args): if len(args) == 1: return cls.from_string(*args) else: return super(BaseBuild, cls).__new__(cls, *args) @classmethod def from_string(cls, s): """Return new BaseBuild instance built from the entire string. Raise BaseBuildInvalidStringError for invalid strings.""" if len(s) >= 5: raise BaseBuildInvalidStringError(s, 3) if s[0] != 'b': raise BaseBuildInvalidStringError(s, 0) tower_type = s[1] try: t = {'u': Tower.blue, 'v': Tower.green, 'w': Tower.yellow}[tower_type] except KeyError: raise BaseBuildInvalidStringError(s, 1) location1 = s[2] try: location2 = s[3] except IndexError: location2 = '' try: location = int(location1+location2) except ValueError: raise BaseBuildInvalidStringError(s, 2) if location <= 0: raise BaseBuildInvalidStringError(s, 2) try: l = TowerLocation[location-1] except IndexError: raise BaseBuildInvalidStringError(s, 2) return BaseBuild(t, l) @classmethod def from_string_info(cls, s): """Return new BaseBuild instance and number of characters used. Raise BaseBuildInvalidStringError for failure.""" if len(s) < 3: raise BaseBuildInvalidStringError(s, len(s)) try: int(s[3]) except (ValueError, IndexError): return cls.from_string(s[:3]), 3 else: return cls.from_string(s[:4]), 4 def __repr__(self): return 'b{0}{1}'.format(self.tower_type, self.location) class BaseUpgradeInvalidStringError(Exception): def __init__(self, s): self.s = s def __str__(self): err = "Error building a BaseUpgrade from the string '{}'." return err.format(self.s) class BaseUpgrade(namedtuple('BaseUpgrade', 'location')): __slots__=() @classmethod def from_string(cls, s): """Return a BaseUpgrade instance built from the entire string. Raise BaseUpgradeInvalidStringError for invalid strings""" if len(s) not in (2, 3) or s[0] != 'u': raise BaseUpgradeInvalidStringError(s) location1 = s[1] try: location2 = s[2] except IndexError: location2 = '' try: location = int(location1+location2) except ValueError: BaseUpgradeInvalidStringError(s) if location <= 0: raise BaseUpgradeInvalidStringError(s) try: loc = TowerLocation[location-1] except IndexError: raise BaseUpgradeInvalidStringError(s) return BaseUpgrade(loc) @classmethod def from_string_info(cls, s): """Return new BaseUpgrade instance and number of characters used. Raise BaseUpgradeInvalidStringError for failure.""" if len(s) < 2: raise BaseBuildInvalidStringError(s, len(s)) try: int(s[2]) except (IndexError, ValueError): return cls.from_string(s[:2]), 2 else: return cls.from_string(s[:3]), 3 def __repr__(self): return "u{0}".format(self.location) def get_random_base(): random_location = random.choice(TowerLocation) randbase = random.choice([BaseBuild, BaseUpgrade]) if randbase == BaseBuild: tower_type = random.choice(Tower) return BaseBuild(tower_type, random_location) elif randbase == BaseUpgrade: return BaseUpgrade(random_location) class DNAException(Exception): pass class DNAInvalidCreationString(DNAException): def __init__(self, s, loc): self.s = s self.loc = loc def __str__(self): err = "In string '{0}', there was an error at location {1}" return err.format(self.s, self.loc) class DNAValidationError(DNAException): def __init__(self, dna, base_loc, reason): self.dna = dna self.base_loc = base_loc self.reason = reason def __str__(self): s = "The base at location(0+) {1} is invalid. {2} DNA: {0}" return s.format(self.dna, self.base_loc, self.reason) class DNAInvalidBuildLocation(DNAValidationError): """There is already a tower at that location""" def __init__(self, dna, base_loc, reason="There is already a tower at this location."): super().__init__(dna, base_loc, reason) class DNAInvalidUpgradeLocation(DNAValidationError): """There is no tower at that location to upgrade.""" def __init__(self, dna, base_loc, reason = "There is no tower at that location to upgrade"): super().__init__(dna, base_loc, reason) class DNAInvalidInitialTower(DNAInvalidBuildLocation): """You cannot have the yellow tower at the start""" def __init__(self, dna, base_loc, reason = "First tower must be blue(u) or green(v) due to cost."): super().__init__(dna, base_loc, reason) class DNA(tuple): """A tuple of DNA Base instructions (Base1, Base2, Base3, ..., BaseN) You can make the DNA from a list of base sequences, another DNA object or from a string. """ __slots__ = () def __new__(cls, str_or_lst=""): if isinstance(str_or_lst, str): return cls.from_string(str_or_lst) else: cls.validate_list(str_or_lst) return tuple.__new__(cls, str_or_lst[:]) def mutate_remove(self, location): """Return DNA with the base at the location (0+) removed (and validates)""" dna_list = list(self) del dna_list[location] return DNA(dna_list) def mutate_insert(self, location, base): """Return DNA with the base added at the location (0+) (and validates)""" dna_list = list(self) dna_list.insert(location, base) return DNA(dna_list) def mutate_replace(self, location, base): """Return DNA with base at given location replaced with new base (and validates)""" dna_list = list(self) dna_list[location] = base return DNA(dna_list) def mutate(self): """Return a new, mutated DNA object based on the current DNA. The mutation is guaranteed to be valid, different and non-empty.""" #Ensure all positions have a tower in them unbuilt_locs = [loc for loc in TowerLocation] for base in self: if isinstance(base, BaseBuild): unbuilt_locs.remove(base.location) new_dna = list(self) for unbuilt_loc in unbuilt_locs: towertype = random.choice(Tower) insert_loc = random.randrange(0, len(self)+1) new_dna.insert(insert_loc, BaseBuild(towertype, unbuilt_loc)) #First tower must be green or blue. if new_dna[0].tower_type not in (Tower.blue, Tower.green): tt = random.choice((Tower.blue, Tower.green)) new_dna[0] = BaseBuild(tt, new_dna[0].location) new_dna = DNA(new_dna) #Now mutate while True: choices = [self.mutate_insert]*3 + [self.mutate_remove] + \ [self.mutate_replace]*2 mutation = random.choice(choices) if mutation == self.mutate_insert: #Insert an upgrade tower somewhere upgrade_loc = random.choice(TowerLocation) for i, base in enumerate(new_dna): if base.location == upgrade_loc: start_loc = i+1 break insert_loc = random.randrange(start_loc, len(new_dna)+1) base = BaseUpgrade(upgrade_loc) new_dna = new_dna.mutate_insert(insert_loc, base) return new_dna elif mutation == self.mutate_remove: #Remove a random upgrade tower upgrade_poses = [] for i, base in enumerate(new_dna): if isinstance(base, BaseUpgrade): upgrade_poses.append(i) if not upgrade_poses: continue new_dna = new_dna.mutate_remove(random.choice(upgrade_poses)) return new_dna elif mutation == self.mutate_replace: #Replace a build tower with a different tower type build_poses = [] for i, base in enumerate(new_dna): if isinstance(base, BaseBuild): build_poses.append(i) random_pos = random.choice(build_poses) random_tower = new_dna[random_pos] if random_pos == 0: tt_choices = [Tower.blue, Tower.green] else: tt_choices = list(Tower) tt_choices.remove(random_tower.tower_type) tt = random.choice(tt_choices) new_build = BaseBuild(tt, random_tower.location) new_dna = new_dna.mutate_replace(random_pos, new_build) return new_dna def validate(self): return self.validate_list(self) @classmethod def validate_list(cls, lst): """Return whether a sequence of bases is a valid DNA string Raise various DNAValidationError stating what is incorrect""" if len(lst) == 0: return True locs = [] #Locations that towers have been built #Not enough money for this tower initially if isinstance(lst[0], BaseBuild): if lst[0].tower_type not in (Tower.blue, Tower.green): raise DNAInvalidInitialTower(lst, 0) for i, base in enumerate(lst): #i starts at 0 if isinstance(base, BaseBuild): if base.location in locs: raise DNAInvalidBuildLocation(lst, i) locs.append(base.location) elif isinstance(base, BaseUpgrade): if base.location not in locs: raise DNAInvalidUpgradeLocation(lst, i) return True @classmethod def from_string(cls, s): """Return a DNA instance built from a string.""" recorded_dna = [] s_used_total = 0 while s != "": fail = True try: new_base, s_used = BaseBuild.from_string_info(s) except BaseBuildInvalidStringError: pass else: fail = False if fail: try: new_base, s_used = BaseUpgrade.from_string_info(s) except BaseUpgradeInvalidStringError: pass else: fail = False if fail: #No base matched the string raise DNAInvalidCreationString(s, s_used_total) else: recorded_dna.append(new_base) s_used_total += s_used s = s[s_used:] d = DNA(recorded_dna) #Create DNA by a list return d def __str__(self): s = "" for base in self: s += str(base) return s class Village: """String format for a village is 123:bu7u7 98:bw8 :bw9u9 (score: DNA string) A score of "" (empty) means the DNA has not been scored. The planet is used to check if a DNA is scored before using score_func in order to score it. Internal: self.dnas = [((DNA Object, Score), ...] """ def __init__(self, str_or_lst="", planet=None, score_func=None): if isinstance(str_or_lst, str): self.dnas = self.from_string(str_or_lst).dnas else: self.dnas = str_or_lst[:] #Must make copy of list of (dnas, score) self.planet = planet self.score_func = score_func def set_planet(self, planet): self.planet = planet def set_score_func(self, score_func): self.score_func = score_func def step(self): """Do a step of the village - THIS IS THE MAIN FUNCTION This will score DNAs which are not already scored and also develop the next generation of DNAs when appropriate. Returns: True - There is an unscored DNA left False - The next step will develop next generation """ def get_unscored(): num_unscored = 0 for dna, score in self.dnas: if score is None: num_unscored += 1 return num_unscored if get_unscored() == 0: self.develop_next_gen() return True else: for i, dna_info in enumerate(self.dnas): dna, score = dna_info if score is None: if self.planet is None: score = self.score_func(dna) else: prior_score = self.planet.check_against(dna) if prior_score is False: score = self.score_func(dna) if score is not None: self.planet.update(dna, score) else: score = prior_score break self.dnas[i] = (dna, score) if get_unscored() == 0: return False else: return True def develop_next_gen(self): """Develop the next generation of DNAs This will take the top 50% of DNAs, mutate them. The next generation is the top 50% plus the mutated 50%. For an odd number of DNAs, less than half of the village will be conserved. Following has been altered. (with a 10% chance of a large mutation occurring). The last ranking top DNA will be replaced by its mutation. A large mutation is 15 standard mutations. The mutated DNA is definitely to be different from the original DNA. Note: Every DNA must have a score otherwise the village cannot be sorted. """ village_size = len(self.dnas) self.dnas.sort(key=lambda tup: tup[1], reverse=True) #Descending if village_size % 2 == 0: mutate_num = int(village_size/2) top_dnas = self.dnas[:mutate_num] else: mutate_num = int((village_size + 1) / 2) top_dnas = self.dnas[:mutate_num] #Remove the last top dna at the end new_dnas = [] for i in range(mutate_num): base_dna = top_dnas[i][0] new_dna = base_dna.mutate() if random.random() < 1: #Large mutation for j in range(14): new_dna = new_dna.mutate() if random.random() < 0.1: for j in range(99): new_dna = new_dna.mutate() new_dnas.append((new_dna, None)) if village_size % 2 == 0: self.dnas = top_dnas else: self.dnas = top_dnas[:-1] self.dnas.extend(new_dnas) @classmethod def from_string(cls, s): if s == "": return Village() str_lines = s.split("\n") dna_score_list = [] for line in str_lines: score, dna = line.split(":") try: scorenum = int(score) except ValueError: scorenum = None d = DNA.from_string(dna) dna_score_list.append((d, scorenum)) return Village(dna_score_list) def __repr__(self): s = "" for i, dna_info in enumerate(self.dnas): dna, score = dna_info if score is None: scoretxt = "" else: scoretxt = str(score) #Last line should not have newline char if i != len(self.dnas)-1: s += "{0}:{1}\n".format(scoretxt, str(dna)) else: s += "{0}:{1}".format(scoretxt, str(dna)) return s class VillageFile(Village): def __init__(self, file_location, planet=None, score_func=None): self.file_location = file_location with open(file_location) as f: village_str = f.read() super(VillageFile, self).__init__(village_str, planet, score_func) def step(self): retval = super(VillageFile, self).step() with open(self.file_location, 'w') as f: f.write(str(self)) return retval class PlanetException(Exception): pass class PlanetEmptyScore(PlanetException): """The list used to create the planet is invalid due to an unscored DNA.""" def __init__(self, given_list, incorrect_item): self.given_list = given_list self.incorrect_item = incorrect_item def __str__(self): s = "The invalid item was (due to empty score): {0}.\n\n\ The complete input was:\n\n{1}" return s.format(self.incorrect_item, self.given_list) class PlanetInvalidUpdate(PlanetException): def __init__(self, dna, score): self.dna = dna self.score = score def __str__(self): s = "This update {0}:{1} is invalid because the score must be a number." s = s.format(self.score, self.dna) return s class Planet: """A global storage of DNAs with known scores""" def __init__(self, str_or_lst=""): if isinstance(str_or_lst, str): self.validate_string(str_or_lst) v = Village.from_string(str_or_lst) self.known_dnas = v.dnas[:] else: self.validate_list(str_or_lst) self.known_dnas = str_or_lst[:] def update(self, dna, score): """Return whether the planet was updated with the new data.""" if score is None: raise PlanetInvalidUpdate(dna, score) if dna not in [dna for dna, score in self.known_dnas]: self.known_dnas.append((dna, score)) return True return False def check_against(self, dna): """Return the score if it is stored otherwise False""" for stored_dna, score in self.known_dnas: if stored_dna == dna: return score return False @classmethod def validate_list(cls, l): for i, dna_info in enumerate(l): score = dna_info[1] if score is None: raise PlanetEmptyScore(l[:], dna_info) return True @classmethod def validate_string(cls, s): v = Village.from_string(s) #Ensure every dna has a valid score for i, dna_info in enumerate(v.dnas): score = dna_info[1] if score is None: raise PlanetEmptyScore(s, s[i]) return True def __str__(self): v = Village(self.known_dnas) return str(v) class PlanetFile(Planet): """A global storage of scored DNAs into a file. This will update the file as appropriate automatically.""" def __init__(self, file_location): self.file_location = file_location with open(file_location) as planet_file: s = planet_file.read() super(PlanetFile, self).__init__(s) def update(self, dna, score): """Return whether an update with the data provided occurred Updates the file as appropriate.""" did_update = super(PlanetFile, self).update(dna, score) if did_update: with open(self.file_location, 'a') as pf: pf.write("\n{0}:{1}".format(score, str(dna))) return did_update
r"""Importing this file must **not** initialize CUDA context. test_distributed relies on this assumption to properly run. This means that when this is imported no CUDA calls shall be made, including torch.cuda.device_count(), etc. torch.testing._internal.common_cuda.py can freely initialize CUDA context when imported. """ import sys import os import platform import re import gc import types import math from functools import partial import inspect import io import copy import operator import argparse import unittest import warnings import random import contextlib import shutil import threading from pathlib import Path import socket import subprocess import time from collections import OrderedDict from collections.abc import Sequence from contextlib import contextmanager, closing from functools import wraps from itertools import product from copy import deepcopy from numbers import Number import tempfile import json import __main__ # type: ignore[import] import errno import ctypes from typing import cast, Any, Dict, Iterable, Iterator, Optional, Union, List, TypeVar from unittest.mock import MagicMock import numpy as np import expecttest from .._core import \ (_compare_tensors_internal, _compare_scalars_internal, _compare_return_type) import torch import torch.cuda from torch.testing import make_tensor from torch._utils_internal import get_writable_path from torch._six import string_classes from torch import Tensor import torch.backends.cudnn import torch.backends.mkl from enum import Enum from statistics import mean import functools from .composite_compliance import no_dispatch torch.backends.disable_global_flags() FILE_SCHEMA = "file://" if sys.platform == 'win32': FILE_SCHEMA = "file:///" # Environment variable `IN_CI` is set in `.jenkins/common.sh`. IS_IN_CI = os.getenv('IN_CI') == '1' IS_SANDCASTLE = os.getenv('SANDCASTLE') == '1' or os.getenv('TW_JOB_USER') == 'sandcastle' IS_FBCODE = os.getenv('PYTORCH_TEST_FBCODE') == '1' IS_REMOTE_GPU = os.getenv('PYTORCH_TEST_REMOTE_GPU') == '1' RETRY_TEST_CASES = os.getenv('PYTORCH_RETRY_TEST_CASES') == '1' OVERRIDE_FLAKY_SIGNAL = os.getenv('PYTORCH_OVERRIDE_FLAKY_SIGNAL') == '1' MAX_NUM_RETRIES = 3 DISABLED_TESTS_FILE = '.pytorch-disabled-tests.json' SLOW_TESTS_FILE = '.pytorch-slow-tests.json' slow_tests_dict: Optional[Dict[str, Any]] = None disabled_tests_dict: Optional[Dict[str, Any]] = None NATIVE_DEVICES = ('cpu', 'cuda', 'meta') class _TestParametrizer(object): """ Decorator class for parametrizing a test function, yielding a set of new tests spawned from the original generic test, each specialized for a specific set of test inputs. For example, parametrizing a test across the set of ops will result in a test function per op. The decision of how to parametrize / what to parametrize over is intended to be implemented by each derived class. In the details, the decorator adds a 'parametrize_fn' property to the test function that is called during device-specific test instantiation performed in instantiate_device_type_tests(). Because of this, there is no need to parametrize over device type, as that is already handled separately. If the decorator is applied to a test function that already has a 'parametrize_fn' property, a new composite 'parametrize_fn' will be created that generates tests with the product of the parameters generated by the old and new parametrize_fns. This allows for convenient composability of decorators. """ def _parametrize_test(self, test, generic_cls, device_cls): """ Parametrizes the given test function across whatever dimension is specified by the derived class. Tests can be parametrized over any arbitrary dimension or combination of dimensions, such as all ops, all modules, or all ops + their associated dtypes. Args: test (fn): Test function to parametrize over generic_cls (class): Generic test class object containing tests (e.g. TestFoo) device_cls (class): Device-specialized test class object (e.g. TestFooCPU); set to None if the tests are not part of a device-specific set Returns: Generator object returning 3-tuples of: test (fn): Parametrized test function; must support a device arg and args for any params test_name (str): Parametrized suffix for the test (e.g. opname_int64); will be appended to the base name of the test param_kwargs (dict): Param kwargs to pass to the test (e.g. {'op': 'add', 'dtype': torch.int64}) """ raise NotImplementedError def __call__(self, fn): if hasattr(fn, 'parametrize_fn'): # Do composition with the product of args. old_parametrize_fn = fn.parametrize_fn new_parametrize_fn = self._parametrize_test fn.parametrize_fn = compose_parametrize_fns(old_parametrize_fn, new_parametrize_fn) else: fn.parametrize_fn = self._parametrize_test return fn def compose_parametrize_fns(old_parametrize_fn, new_parametrize_fn): """ Returns a parametrize_fn that parametrizes over the product of the parameters handled by the given parametrize_fns. Each given parametrize_fn should each have the signature f(test, generic_cls, device_cls). The test names will be a combination of the names produced by the parametrize_fns in "<new_name>_<old_name>" order. This order is done to match intuition for constructed names when composing multiple decorators; the names will be built in top to bottom order when stacking parametrization decorators. Args: old_parametrize_fn (callable) - First parametrize_fn to compose. new_parametrize_fn (callable) - Second parametrize_fn to compose. """ def composite_fn(test, generic_cls, device_cls, old_parametrize_fn=old_parametrize_fn, new_parametrize_fn=new_parametrize_fn): old_tests = [(test, test_name, param_kwargs) for (test, test_name, param_kwargs) in old_parametrize_fn(test, generic_cls, device_cls)] for (old_test, old_test_name, old_param_kwargs) in old_tests: for (new_test, new_test_name, new_param_kwargs) in \ new_parametrize_fn(old_test, generic_cls, device_cls): redundant_params = set(old_param_kwargs.keys()).intersection(new_param_kwargs.keys()) if redundant_params: raise RuntimeError('Parametrization over the same parameter by multiple parametrization ' 'decorators is not supported. For test "{}", the following parameters ' 'are handled multiple times: {}'.format( test.__name__, redundant_params)) full_param_kwargs = {**old_param_kwargs, **new_param_kwargs} merged_test_name = '{}{}{}'.format(new_test_name, '_' if old_test_name != '' and new_test_name != '' else '', old_test_name) yield (new_test, merged_test_name, full_param_kwargs) return composite_fn def instantiate_parametrized_tests(generic_cls): """ Instantiates tests that have been decorated with a parametrize_fn. This is generally performed by a decorator subclass of _TestParametrizer. The generic test will be replaced on the test class by parametrized tests with specialized names. Args: generic_cls (class): Generic test class object containing tests (e.g. TestFoo) """ for attr_name in tuple(dir(generic_cls)): class_attr = getattr(generic_cls, attr_name) if not hasattr(class_attr, 'parametrize_fn'): continue # Remove the generic test from the test class. delattr(generic_cls, attr_name) # Add parametrized tests to the test class. def instantiate_test_helper(cls, name, test, param_kwargs): @wraps(test) def instantiated_test(self, param_kwargs=param_kwargs): test(self, **param_kwargs) assert not hasattr(generic_cls, name), "Redefinition of test {0}".format(name) setattr(generic_cls, name, instantiated_test) for (test, test_suffix, param_kwargs) in class_attr.parametrize_fn( class_attr, generic_cls=generic_cls, device_cls=None): full_name = '{}_{}'.format(test.__name__, test_suffix) instantiate_test_helper(cls=generic_cls, name=full_name, test=test, param_kwargs=param_kwargs) class subtest(object): """ Explicit subtest case for use with test parametrization. Allows for explicit naming of individual subtest cases as well as applying decorators to the parametrized test. Args: arg_values (iterable): Iterable of arg values (e.g. range(10)) or tuples of arg values (e.g. [(1, 2), (3, 4)]). name (str): Optional name to use for the test. decorators (iterable): Iterable of decorators to apply to the generated test. """ __slots__ = ['arg_values', 'name', 'decorators'] def __init__(self, arg_values, name=None, decorators=None): self.arg_values = arg_values self.name = name self.decorators = decorators if decorators else [] class parametrize(_TestParametrizer): """ Decorator for applying generic test parametrizations. The interface for this decorator is modeled after `@pytest.mark.parametrize`. Basic usage between this decorator and pytest's is identical. The first argument should be a string containing comma-separated names of parameters for the test, and the second argument should be an iterable returning values or tuples of values for the case of multiple parameters. Beyond this basic usage, the decorator provides some additional functionality that pytest does not. 1. Parametrized tests end up as generated test functions on unittest test classes. Since this differs from how pytest works, this decorator takes on the additional responsibility of naming these test functions. The default test names consists of the test's base name followed by each parameter name + value (e.g. "test_bar_x_1_y_foo"), but custom names can be defined using `name_fn` or the `subtest` structure (see below). 2. The decorator specially handles parameter values of type `subtest`, which allows for more fine-grained control over both test naming and test execution. In particular, it can be used to tag subtests with explicit test names or apply arbitrary decorators (see examples below). Examples:: @parametrize("x", range(5)) def test_foo(self, x): ... @parametrize("x,y", [(1, 'foo'), (2, 'bar'), (3, 'baz')]) def test_bar(self, x, y): ... @parametrize("x,y", [(1, 'foo'), (2, 'bar'), (3, 'baz')], name_fn=lambda x, y: '{}_{}'.format(x, y)) def test_bar_custom_names(self, x, y): ... @parametrize("x, y", [subtest((1, 2), name='double'), subtest((1, 3), name='triple', decorators=[unittest.expectedFailure]), subtest((1, 4), name='quadruple')]) def test_baz(self, x, y): ... Args: arg_str (str): String of arg names separate by commas (e.g. "x,y"). arg_values (iterable): Iterable of arg values (e.g. range(10)) or tuples of arg values (e.g. [(1, 2), (3, 4)]). name_fn (callable): Optional function that takes in parameters and returns subtest name. """ def __init__(self, arg_str, arg_values, name_fn=None): self.arg_names = arg_str.split(',') self.arg_values = arg_values self.name_fn = name_fn def _formatted_str_repr(self, name, value): """ Returns a string representation for the given arg that is suitable for use in test function names. """ if isinstance(value, torch.dtype): return dtype_name(value) elif isinstance(value, torch.device): return str(value) # Can't use isinstance as it would cause a circular import elif value.__class__.__name__ == 'OpInfo' or value.__class__.__name__ == 'ModuleInfo': return value.formatted_name else: # Include name and value separated by underscore. return '{}_{}'.format(name, str(value).replace('.', '_')) def _default_subtest_name(self, values): return '_'.join([self._formatted_str_repr(a, v) for a, v in zip(self.arg_names, values)]) def _get_subtest_name(self, values, explicit_name=None): if explicit_name: subtest_name = explicit_name elif self.name_fn: subtest_name = self.name_fn(*values) else: subtest_name = self._default_subtest_name(values) return subtest_name def _parametrize_test(self, test, generic_cls, device_cls): if len(self.arg_names) == 0: # No additional parameters needed for the test. test_name = '' yield (test, test_name, {}) else: # Each "values" item is expected to be either: # * A tuple of values with one for each arg. For a single arg, a single item is expected. # * A subtest instance with arg_values matching the previous. for values in self.arg_values: maybe_name = None if isinstance(values, subtest): sub = values values = sub.arg_values maybe_name = sub.name # Apply decorators. @wraps(test) def test_wrapper(*args, **kwargs): return test(*args, **kwargs) for decorator in sub.decorators: test_wrapper = decorator(test_wrapper) gen_test = test_wrapper else: gen_test = test values = list(values) if len(self.arg_names) > 1 else [values] if len(values) != len(self.arg_names): raise RuntimeError('Expected # values == # arg names, but got: {} ' 'values and {} names for test "{}"'.format( len(values), len(self.arg_names), test.__name__)) param_kwargs = { name: value for name, value in zip(self.arg_names, values) } test_name = self._get_subtest_name(values, explicit_name=maybe_name) if '.' in test_name: raise RuntimeError('Test name cannot contain periods, but got: {}'.format(test_name)) yield (gen_test, test_name, param_kwargs) class ProfilingMode(Enum): LEGACY = 1 SIMPLE = 2 PROFILING = 3 def cppProfilingFlagsToProfilingMode(): old_prof_exec_state = torch._C._jit_set_profiling_executor(True) old_prof_mode_state = torch._C._jit_set_profiling_mode(True) torch._C._jit_set_profiling_executor(old_prof_exec_state) torch._C._jit_set_profiling_mode(old_prof_mode_state) if old_prof_exec_state: if old_prof_mode_state: return ProfilingMode.PROFILING else: return ProfilingMode.SIMPLE else: return ProfilingMode.LEGACY @contextmanager def enable_profiling_mode_for_profiling_tests(): if GRAPH_EXECUTOR == ProfilingMode.PROFILING: old_prof_exec_state = torch._C._jit_set_profiling_executor(True) old_prof_mode_state = torch._C._jit_set_profiling_mode(True) try: yield finally: if GRAPH_EXECUTOR == ProfilingMode.PROFILING: torch._C._jit_set_profiling_executor(old_prof_exec_state) torch._C._jit_set_profiling_mode(old_prof_mode_state) @contextmanager def enable_profiling_mode(): old_prof_exec_state = torch._C._jit_set_profiling_executor(True) old_prof_mode_state = torch._C._jit_set_profiling_mode(True) try: yield finally: torch._C._jit_set_profiling_executor(old_prof_exec_state) torch._C._jit_set_profiling_mode(old_prof_mode_state) @contextmanager def num_profiled_runs(num_runs): old_num_runs = torch._C._jit_set_num_profiled_runs(num_runs) try: yield finally: torch._C._jit_set_num_profiled_runs(old_num_runs) func_call = torch._C.ScriptFunction.__call__ meth_call = torch._C.ScriptMethod.__call__ def prof_callable(callable, *args, **kwargs): if 'profile_and_replay' in kwargs: del kwargs['profile_and_replay'] if GRAPH_EXECUTOR == ProfilingMode.PROFILING: with enable_profiling_mode_for_profiling_tests(): callable(*args, **kwargs) return callable(*args, **kwargs) return callable(*args, **kwargs) def prof_func_call(*args, **kwargs): return prof_callable(func_call, *args, **kwargs) def prof_meth_call(*args, **kwargs): return prof_callable(meth_call, *args, **kwargs) # TODO fix when https://github.com/python/mypy/issues/2427 is address torch._C.ScriptFunction.__call__ = prof_func_call # type: ignore[assignment] torch._C.ScriptMethod.__call__ = prof_meth_call # type: ignore[assignment] def _get_test_report_path(): # allow users to override the test file location. We need this # because the distributed tests run the same test file multiple # times with different configurations. override = os.environ.get('TEST_REPORT_SOURCE_OVERRIDE') test_source = override if override is not None else 'python-unittest' return os.path.join('test-reports', test_source) parser = argparse.ArgumentParser() parser.add_argument('--subprocess', action='store_true', help='whether to run each test in a subprocess') parser.add_argument('--seed', type=int, default=1234) parser.add_argument('--accept', action='store_true') parser.add_argument('--jit_executor', type=str) parser.add_argument('--repeat', type=int, default=1) parser.add_argument('--test_bailouts', action='store_true') parser.add_argument('--save-xml', nargs='?', type=str, const=_get_test_report_path(), default=_get_test_report_path() if IS_IN_CI else None) parser.add_argument('--discover-tests', action='store_true') parser.add_argument('--log-suffix', type=str, default="") parser.add_argument('--run-parallel', type=int, default=1) parser.add_argument('--import-slow-tests', type=str, nargs='?', const=SLOW_TESTS_FILE) parser.add_argument('--import-disabled-tests', type=str, nargs='?', const=DISABLED_TESTS_FILE) # Only run when -h or --help flag is active to display both unittest and parser help messages. def run_unittest_help(argv): unittest.main(argv=argv) if '-h' in sys.argv or '--help' in sys.argv: help_thread = threading.Thread(target=run_unittest_help, args=(sys.argv,)) help_thread.start() help_thread.join() args, remaining = parser.parse_known_args() if args.jit_executor == 'legacy': GRAPH_EXECUTOR = ProfilingMode.LEGACY elif args.jit_executor == 'profiling': GRAPH_EXECUTOR = ProfilingMode.PROFILING elif args.jit_executor == 'simple': GRAPH_EXECUTOR = ProfilingMode.SIMPLE else: # infer flags based on the default settings GRAPH_EXECUTOR = cppProfilingFlagsToProfilingMode() IMPORT_SLOW_TESTS = args.import_slow_tests IMPORT_DISABLED_TESTS = args.import_disabled_tests LOG_SUFFIX = args.log_suffix RUN_PARALLEL = args.run_parallel TEST_BAILOUTS = args.test_bailouts TEST_DISCOVER = args.discover_tests TEST_IN_SUBPROCESS = args.subprocess TEST_SAVE_XML = args.save_xml REPEAT_COUNT = args.repeat SEED = args.seed if not expecttest.ACCEPT: expecttest.ACCEPT = args.accept UNITTEST_ARGS = [sys.argv[0]] + remaining torch.manual_seed(SEED) # CI Prefix path used only on CI environment CI_TEST_PREFIX = str(Path(os.getcwd())) def wait_for_process(p): try: return p.wait() except KeyboardInterrupt: # Give `p` a chance to handle KeyboardInterrupt. Without this, # `pytest` can't print errors it collected so far upon KeyboardInterrupt. exit_status = p.wait(timeout=5) if exit_status is not None: return exit_status else: p.kill() raise except: # noqa: B001,E722, copied from python core library p.kill() raise finally: # Always call p.wait() to ensure exit p.wait() def shell(command, cwd=None, env=None): sys.stdout.flush() sys.stderr.flush() # The following cool snippet is copied from Py3 core library subprocess.call # only the with # 1. `except KeyboardInterrupt` block added for SIGINT handling. # 2. In Py2, subprocess.Popen doesn't return a context manager, so we do # `p.wait()` in a `final` block for the code to be portable. # # https://github.com/python/cpython/blob/71b6c1af727fbe13525fb734568057d78cea33f3/Lib/subprocess.py#L309-L323 assert not isinstance(command, torch._six.string_classes), "Command to shell should be a list or tuple of tokens" p = subprocess.Popen(command, universal_newlines=True, cwd=cwd, env=env) return wait_for_process(p) # Used to run the same test with different tensor types def repeat_test_for_types(dtypes): def repeat_helper(f): @wraps(f) def call_helper(self, *args): for dtype in dtypes: with TestCase.subTest(self, dtype=dtype): f(self, *args, dtype=dtype) return call_helper return repeat_helper def discover_test_cases_recursively(suite_or_case): if isinstance(suite_or_case, unittest.TestCase): return [suite_or_case] rc = [] for element in suite_or_case: print(element) rc.extend(discover_test_cases_recursively(element)) return rc def get_test_names(test_cases): return ['.'.join(case.id().split('.')[-2:]) for case in test_cases] def _print_test_names(): suite = unittest.TestLoader().loadTestsFromModule(__main__) test_cases = discover_test_cases_recursively(suite) for name in get_test_names(test_cases): print(name) def chunk_list(lst, nchunks): return [lst[i::nchunks] for i in range(nchunks)] # sanitize filename e.g., distributed/pipeline/sync/skip/test_api.py -> distributed.pipeline.sync.skip.test_api def sanitize_test_filename(filename): # inspect.getfile returns absolute path in some CI jobs, converting it to relative path if needed if filename.startswith(CI_TEST_PREFIX): filename = filename[len(CI_TEST_PREFIX) + 1:] strip_py = re.sub(r'.py$', '', filename) return re.sub('/', r'.', strip_py) def lint_test_case_extension(suite): succeed = True for test_case_or_suite in suite: test_case = test_case_or_suite if isinstance(test_case_or_suite, unittest.TestSuite): first_test = test_case_or_suite._tests[0] if len(test_case_or_suite._tests) > 0 else None if first_test is not None and isinstance(first_test, unittest.TestSuite): return succeed and lint_test_case_extension(test_case_or_suite) test_case = first_test if test_case is not None: test_class = test_case.id().split('.', 1)[1].split('.')[0] if not isinstance(test_case, TestCase): err = "This test class should extend from torch.testing._internal.common_utils.TestCase but it doesn't." print(f"{test_class} - failed. {err}") succeed = False return succeed def run_tests(argv=UNITTEST_ARGS): # import test files. if IMPORT_SLOW_TESTS: if os.path.exists(IMPORT_SLOW_TESTS): global slow_tests_dict with open(IMPORT_SLOW_TESTS, 'r') as fp: slow_tests_dict = json.load(fp) else: print(f'[WARNING] slow test file provided but not found: {IMPORT_SLOW_TESTS}') if IMPORT_DISABLED_TESTS: if os.path.exists(IMPORT_DISABLED_TESTS): global disabled_tests_dict with open(IMPORT_DISABLED_TESTS, 'r') as fp: disabled_tests_dict = json.load(fp) else: print(f'[WARNING] disabled test file provided but not found: {IMPORT_DISABLED_TESTS}') # Determine the test launch mechanism if TEST_DISCOVER: _print_test_names() return # Before running the tests, lint to check that every test class extends from TestCase suite = unittest.TestLoader().loadTestsFromModule(__main__) if not lint_test_case_extension(suite): sys.exit(1) if TEST_IN_SUBPROCESS: failed_tests = [] test_cases = discover_test_cases_recursively(suite) for case in test_cases: test_case_full_name = case.id().split('.', 1)[1] other_args = [] if IMPORT_DISABLED_TESTS: other_args.append('--import-disabled-tests') if IMPORT_SLOW_TESTS: other_args.append('--import-slow-tests') cmd = [sys.executable] + [argv[0]] + other_args + argv[1:] + [test_case_full_name] string_cmd = " ".join(cmd) exitcode = shell(cmd) if exitcode != 0: # This is sort of hacky, but add on relevant env variables for distributed tests. if 'TestDistBackendWithSpawn' in test_case_full_name: backend = os.environ.get("BACKEND", "") world_size = os.environ.get("WORLD_SIZE", "") env_prefix = f"BACKEND={backend} WORLD_SIZE={world_size}" string_cmd = env_prefix + " " + string_cmd # Log the command to reproduce the failure. print(f"Test exited with non-zero exitcode {exitcode}. Command to reproduce: {string_cmd}") failed_tests.append(test_case_full_name) assert len(failed_tests) == 0, "{} unit test(s) failed:\n\t{}".format( len(failed_tests), '\n\t'.join(failed_tests)) elif RUN_PARALLEL > 1: test_cases = discover_test_cases_recursively(suite) test_batches = chunk_list(get_test_names(test_cases), RUN_PARALLEL) processes = [] for i in range(RUN_PARALLEL): command = [sys.executable] + argv + ['--log-suffix=-shard-{}'.format(i + 1)] + test_batches[i] processes.append(subprocess.Popen(command, universal_newlines=True)) failed = False for p in processes: failed |= wait_for_process(p) != 0 assert not failed, "Some test shards have failed" elif TEST_SAVE_XML is not None: # import here so that non-CI doesn't need xmlrunner installed import xmlrunner # type: ignore[import] test_filename = sanitize_test_filename(inspect.getfile(sys._getframe(1))) test_report_path = TEST_SAVE_XML + LOG_SUFFIX test_report_path = os.path.join(test_report_path, test_filename) os.makedirs(test_report_path, exist_ok=True) verbose = '--verbose' in argv or '-v' in argv if verbose: print('Test results will be stored in {}'.format(test_report_path)) unittest.main(argv=argv, testRunner=xmlrunner.XMLTestRunner(output=test_report_path, verbosity=2 if verbose else 1)) elif REPEAT_COUNT > 1: for _ in range(REPEAT_COUNT): if not unittest.main(exit=False, argv=argv).result.wasSuccessful(): sys.exit(-1) else: unittest.main(argv=argv) IS_LINUX = sys.platform == "linux" IS_WINDOWS = sys.platform == "win32" IS_MACOS = sys.platform == "darwin" IS_PPC = platform.machine() == "ppc64le" def is_avx512_vnni_supported(): if sys.platform != 'linux': return False with open("/proc/cpuinfo", encoding="ascii") as f: lines = f.read() return "vnni" in lines IS_AVX512_VNNI_SUPPORTED = is_avx512_vnni_supported() if IS_WINDOWS: @contextmanager def TemporaryFileName(*args, **kwargs): # Ideally we would like to not have to manually delete the file, but NamedTemporaryFile # opens the file, and it cannot be opened multiple times in Windows. To support Windows, # close the file after creation and try to remove it manually if 'delete' in kwargs: if kwargs['delete'] is not False: raise UserWarning("only TemporaryFileName with delete=False is supported on Windows.") else: kwargs['delete'] = False f = tempfile.NamedTemporaryFile(*args, **kwargs) try: f.close() yield f.name finally: os.unlink(f.name) else: @contextmanager # noqa: T484 def TemporaryFileName(*args, **kwargs): with tempfile.NamedTemporaryFile(*args, **kwargs) as f: yield f.name if IS_WINDOWS: @contextmanager def TemporaryDirectoryName(suffix=None): # On Windows the directory created by TemporaryDirectory is likely to be removed prematurely, # so we first create the directory using mkdtemp and then remove it manually try: dir_name = tempfile.mkdtemp(suffix=suffix) yield dir_name finally: shutil.rmtree(dir_name) else: @contextmanager # noqa: T484 def TemporaryDirectoryName(suffix=None): with tempfile.TemporaryDirectory(suffix=suffix) as d: yield d IS_FILESYSTEM_UTF8_ENCODING = sys.getfilesystemencoding() == 'utf-8' def _check_module_exists(name: str) -> bool: r"""Returns if a top-level module with :attr:`name` exists *without** importing it. This is generally safer than try-catch block around a `import X`. It avoids third party libraries breaking assumptions of some of our tests, e.g., setting multiprocessing start method when imported (see librosa/#747, torchvision/#544). """ try: import importlib.util spec = importlib.util.find_spec(name) return spec is not None except ImportError: return False TEST_NUMPY = _check_module_exists('numpy') TEST_SCIPY = _check_module_exists('scipy') TEST_MKL = torch.backends.mkl.is_available() TEST_NUMBA = _check_module_exists('numba') TEST_DILL = _check_module_exists('dill') TEST_LIBROSA = _check_module_exists('librosa') BUILD_WITH_CAFFE2 = _check_module_exists("caffe2.python.caffe2_pybind11_state") # Python 2.7 doesn't have spawn NO_MULTIPROCESSING_SPAWN = os.environ.get('NO_MULTIPROCESSING_SPAWN', '0') == '1' TEST_WITH_ASAN = os.getenv('PYTORCH_TEST_WITH_ASAN', '0') == '1' TEST_WITH_DEV_DBG_ASAN = os.getenv('PYTORCH_TEST_WITH_DEV_DBG_ASAN', '0') == '1' TEST_WITH_TSAN = os.getenv('PYTORCH_TEST_WITH_TSAN', '0') == '1' TEST_WITH_UBSAN = os.getenv('PYTORCH_TEST_WITH_UBSAN', '0') == '1' TEST_WITH_ROCM = os.getenv('PYTORCH_TEST_WITH_ROCM', '0') == '1' # TODO: Remove PYTORCH_MIOPEN_SUGGEST_NHWC once ROCm officially supports NHWC in MIOpen # See #64427 TEST_WITH_MIOPEN_SUGGEST_NHWC = os.getenv('PYTORCH_MIOPEN_SUGGEST_NHWC', '0') == '1' # Enables tests that are slow to run (disabled by default) TEST_WITH_SLOW = os.getenv('PYTORCH_TEST_WITH_SLOW', '0') == '1' # Disables non-slow tests (these tests enabled by default) # This is usually used in conjunction with TEST_WITH_SLOW to # run *only* slow tests. (I could have done an enum, but # it felt a little awkward. TEST_SKIP_FAST = os.getenv('PYTORCH_TEST_SKIP_FAST', '0') == '1' # Disables noarch tests; all but one CI configuration disables these. We don't # disable them for local runs because you still want to run them # (unlike slow tests!) TEST_SKIP_NOARCH = os.getenv('PYTORCH_TEST_SKIP_NOARCH', '0') == '1' # Determine whether to enable cuda memory leak check. # CUDA mem leak check is expensive and thus we don't want to execute it on every # test case / configuration. # If this is True then CUDA memory leak checks are skipped. If this is false # then CUDA memory leak checks are performed. # See: https://github.com/pytorch/pytorch/pull/59402#issuecomment-858811135 TEST_SKIP_CUDA_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_SKIP_CUDA_MEM_LEAK_CHECK', '0') == '1' # Disables tests for when on Github Actions ON_GHA = os.getenv('GITHUB_ACTIONS', '0') == '1' # True if CI is running TBB-enabled Pytorch IS_TBB = "tbb" in os.getenv("BUILD_ENVIRONMENT", "") # Dict of NumPy dtype -> torch dtype (when the correspondence exists) numpy_to_torch_dtype_dict = { np.bool_ : torch.bool, np.uint8 : torch.uint8, np.int8 : torch.int8, np.int16 : torch.int16, np.int32 : torch.int32, np.int64 : torch.int64, np.float16 : torch.float16, np.float32 : torch.float32, np.float64 : torch.float64, np.complex64 : torch.complex64, np.complex128 : torch.complex128 } if IS_WINDOWS: # Size of `np.intc` is platform defined. # It is returned by functions like `bitwise_not`. # On Windows `int` is 32-bit # https://docs.microsoft.com/en-us/cpp/cpp/data-type-ranges?view=msvc-160 numpy_to_torch_dtype_dict[np.intc] = torch.int # Dict of torch dtype -> NumPy dtype torch_to_numpy_dtype_dict = {value : key for (key, value) in numpy_to_torch_dtype_dict.items()} ALL_TENSORTYPES = [torch.float, torch.double, torch.half] # bfloat16 bringup is currently only available on ROCm # ALL_TENSORTYPES2 will eventually be unified with ALL_TENSORTYPES # when bfloat16 bringup is complete on all platforms if TEST_WITH_ROCM: ALL_TENSORTYPES2 = [torch.float, torch.double, torch.half, torch.bfloat16] else: ALL_TENSORTYPES2 = ALL_TENSORTYPES def skipIfRocm(fn): @wraps(fn) def wrapper(*args, **kwargs): if TEST_WITH_ROCM: raise unittest.SkipTest("test doesn't currently work on the ROCm stack") else: fn(*args, **kwargs) return wrapper # Skips a test on CUDA if ROCm is unavailable or its version is lower than requested. def skipIfRocmVersionLessThan(version=None): def dec_fn(fn): @wraps(fn) def wrap_fn(self, *args, **kwargs): if not TEST_WITH_ROCM: reason = "ROCm not available" raise unittest.SkipTest(reason) rocm_version = str(torch.version.hip) rocm_version = rocm_version.split("-")[0] # ignore git sha rocm_version_tuple = tuple(int(x) for x in rocm_version.split(".")) if rocm_version_tuple is None or version is None or rocm_version_tuple < tuple(version): reason = "ROCm {0} is available but {1} required".format(rocm_version_tuple, version) raise unittest.SkipTest(reason) return fn(self, *args, **kwargs) return wrap_fn return dec_fn def skipIfNotMiopenSuggestNHWC(fn): @wraps(fn) def wrapper(*args, **kwargs): if not TEST_WITH_MIOPEN_SUGGEST_NHWC: raise unittest.SkipTest("test doesn't currently work without MIOpen NHWC activation") else: fn(*args, **kwargs) return wrapper # Context manager for setting deterministic flag and automatically # resetting it to its original value class DeterministicGuard: def __init__(self, deterministic, *, warn_only=False): self.deterministic = deterministic self.warn_only = warn_only def __enter__(self): self.deterministic_restore = torch.are_deterministic_algorithms_enabled() self.warn_only_restore = torch.is_deterministic_algorithms_warn_only_enabled() torch.use_deterministic_algorithms( self.deterministic, warn_only=self.warn_only) def __exit__(self, exception_type, exception_value, traceback): torch.use_deterministic_algorithms( self.deterministic_restore, warn_only=self.warn_only_restore) # Context manager for setting cuda sync debug mode and reset it # to original value # we are not exposing it to the core because sync debug mode is # global and thus not thread safe class CudaSyncGuard: def __init__(self, sync_debug_mode): self.mode = sync_debug_mode def __enter__(self): self.debug_mode_restore = torch.cuda.get_sync_debug_mode() torch.cuda.set_sync_debug_mode(self.mode) def __exit__(self, exception_type, exception_value, traceback): torch.cuda.set_sync_debug_mode(self.debug_mode_restore) # This decorator can be used for API tests that call # torch.use_deterministic_algorithms(). When the test is finished, it will # restore the previous deterministic flag setting. # # If CUDA >= 10.2, this will set the environment variable # CUBLAS_WORKSPACE_CONFIG=:4096:8 so that the error associated with that # setting is not thrown during the test unless the test changes that variable # on purpose. The previous CUBLAS_WORKSPACE_CONFIG setting will also be # restored once the test is finished. # # Note that if a test requires CUDA to actually register the changed # CUBLAS_WORKSPACE_CONFIG variable, a new subprocess must be created, because # CUDA only checks the variable when the runtime initializes. Tests can be # run inside a subprocess like so: # # import subprocess, sys, os # script = ''' # # Test code should go here # ''' # try: # subprocess.check_output( # [sys.executable, '-c', script], # stderr=subprocess.STDOUT, # cwd=os.path.dirname(os.path.realpath(__file__)), # env=os.environ.copy()) # except subprocess.CalledProcessError as e: # error_message = e.output.decode('utf-8') # # Handle exceptions raised by the subprocess here # def wrapDeterministicFlagAPITest(fn): @wraps(fn) def wrapper(*args, **kwargs): with DeterministicGuard( torch.are_deterministic_algorithms_enabled(), warn_only=torch.is_deterministic_algorithms_warn_only_enabled()): class CuBLASConfigGuard: cublas_var_name = 'CUBLAS_WORKSPACE_CONFIG' def __enter__(self): self.is_cuda10_2_or_higher = ( (torch.version.cuda is not None) and ([int(x) for x in torch.version.cuda.split(".")] >= [10, 2])) if self.is_cuda10_2_or_higher: self.cublas_config_restore = os.environ.get(self.cublas_var_name) os.environ[self.cublas_var_name] = ':4096:8' def __exit__(self, exception_type, exception_value, traceback): if self.is_cuda10_2_or_higher: cur_cublas_config = os.environ.get(self.cublas_var_name) if self.cublas_config_restore is None: if cur_cublas_config is not None: del os.environ[self.cublas_var_name] else: os.environ[self.cublas_var_name] = self.cublas_config_restore with CuBLASConfigGuard(): fn(*args, **kwargs) return wrapper def skipIfCompiledWithoutNumpy(fn): # Even if the numpy module is present, if `USE_NUMPY=0` is used during the # build, numpy tests will fail numpy_support = TEST_NUMPY if numpy_support: try: # The numpy module is present, verify that PyTorch is compiled with # numpy support torch.from_numpy(np.array([2, 2])) except RuntimeError: numpy_support = False @wraps(fn) def wrapper(*args, **kwargs): if not numpy_support: raise unittest.SkipTest("PyTorch was compiled without numpy support") else: fn(*args, **kwargs) return wrapper def _test_function(fn, device): def run_test_function(self): return fn(self, device) return run_test_function def skipIfNoLapack(fn): @wraps(fn) def wrapper(*args, **kwargs): if not torch._C.has_lapack: raise unittest.SkipTest('PyTorch compiled without Lapack') else: fn(*args, **kwargs) return wrapper def skipIfNotRegistered(op_name, message): """Wraps the decorator to hide the import of the `core`. Args: op_name: Check if this op is registered in `core._REGISTERED_OPERATORS`. message: message to fail with. Usage: @skipIfNotRegistered('MyOp', 'MyOp is not linked!') This will check if 'MyOp' is in the caffe2.python.core """ if not BUILD_WITH_CAFFE2: return unittest.skip("Pytorch is compiled without Caffe2") try: from caffe2.python import core skipper = unittest.skipIf(op_name not in core._REGISTERED_OPERATORS, message) except ImportError: skipper = unittest.skip("Cannot import `caffe2.python.core`") return skipper def skipIfNoSciPy(fn): @wraps(fn) def wrapper(*args, **kwargs): if not TEST_SCIPY: raise unittest.SkipTest("test require SciPy, but SciPy not found") else: fn(*args, **kwargs) return wrapper def skipIfOnGHA(fn): @wraps(fn) def wrapper(*args, **kwargs): if ON_GHA: raise unittest.SkipTest("Test disabled for GHA") else: fn(*args, **kwargs) return wrapper def skipIfTBB(message="This test makes TBB sad"): def dec_fn(fn): @wraps(fn) def wrapper(*args, **kwargs): if IS_TBB: raise unittest.SkipTest(message) else: fn(*args, **kwargs) return wrapper return dec_fn def slowTest(fn): @wraps(fn) def wrapper(*args, **kwargs): if not TEST_WITH_SLOW: raise unittest.SkipTest("test is slow; run with PYTORCH_TEST_WITH_SLOW to enable test") else: fn(*args, **kwargs) wrapper.__dict__['slow_test'] = True return wrapper # noarch tests are tests that should be only run on one CI configuration, # because they don't exercise any interesting platform specific code # and so if run once, indicate the test should pass everywhere. # See https://github.com/pytorch/pytorch/issues/53743 def noarchTest(fn): @wraps(fn) def wrapper(*args, **kwargs): if TEST_SKIP_NOARCH: raise unittest.SkipTest("test is noarch: we are skipping noarch tests due to TEST_SKIP_NOARCH") else: fn(*args, **kwargs) return wrapper def slowAwareTest(fn): fn.__dict__['slow_test'] = True return fn def skipCUDAMemoryLeakCheckIf(condition): def dec(fn): if getattr(fn, '_do_cuda_memory_leak_check', True): # if current True fn._do_cuda_memory_leak_check = not condition return fn return dec def skipCUDANonDefaultStreamIf(condition): def dec(fn): if getattr(fn, '_do_cuda_non_default_stream', True): # if current True fn._do_cuda_non_default_stream = not condition return fn return dec def suppress_warnings(fn): @wraps(fn) def wrapper(*args, **kwargs): with warnings.catch_warnings(): warnings.simplefilter("ignore") fn(*args, **kwargs) return wrapper def to_gpu(obj, type_map=None): if type_map is None: type_map = {} if isinstance(obj, torch.Tensor): assert obj.is_leaf t = type_map.get(obj.dtype, obj.dtype) with torch.no_grad(): res = obj.clone().to(dtype=t, device="cuda") res.requires_grad = obj.requires_grad return res elif torch.is_storage(obj): return obj.new().resize_(obj.size()).copy_(obj) elif isinstance(obj, list): return [to_gpu(o, type_map) for o in obj] elif isinstance(obj, tuple): return tuple(to_gpu(o, type_map) for o in obj) else: return deepcopy(obj) def get_function_arglist(func): return inspect.getfullargspec(func).args def set_rng_seed(seed): torch.manual_seed(seed) random.seed(seed) if TEST_NUMPY: np.random.seed(seed) @contextlib.contextmanager def freeze_rng_state(): # no_dispatch needed for test_composite_compliance # Some OpInfos use freeze_rng_state for rng determinism, but # test_composite_compliance overrides dispatch for all torch functions # which we need to disable to get and set rng state with no_dispatch(): rng_state = torch.get_rng_state() if torch.cuda.is_available(): cuda_rng_state = torch.cuda.get_rng_state() try: yield finally: with no_dispatch(): if torch.cuda.is_available(): torch.cuda.set_rng_state(cuda_rng_state) torch.set_rng_state(rng_state) @contextlib.contextmanager def set_default_dtype(dtype): saved_dtype = torch.get_default_dtype() torch.set_default_dtype(dtype) try: yield finally: torch.set_default_dtype(saved_dtype) def iter_indices(tensor): if tensor.dim() == 0: return range(0) if tensor.dim() == 1: return range(tensor.size(0)) return product(*(range(s) for s in tensor.size())) def is_iterable(obj): try: iter(obj) return True except TypeError: return False def is_iterable_of_tensors(iterable, include_empty=False): """ Returns True if iterable is an iterable of tensors and False o.w. If the iterable is empty, the return value is :attr:`include_empty` """ # Tensor itself is iterable so we check this first if isinstance(iterable, torch.Tensor): return False try: if len(iterable) == 0: return include_empty for t in iter(iterable): if not isinstance(t, torch.Tensor): return False except TypeError as te: return False return True class CudaNonDefaultStream(): def __enter__(self): # Before starting CUDA test save currently active streams on all # CUDA devices and set new non default streams to all CUDA devices # to ensure CUDA tests do not use default stream by mistake. beforeDevice = torch.cuda.current_device() self.beforeStreams = [] for d in range(torch.cuda.device_count()): self.beforeStreams.append(torch.cuda.current_stream(d)) deviceStream = torch.cuda.Stream(device=d) torch._C._cuda_setStream(deviceStream._cdata) torch._C._cuda_setDevice(beforeDevice) def __exit__(self, exec_type, exec_value, traceback): # After completing CUDA test load previously active streams on all # CUDA devices. beforeDevice = torch.cuda.current_device() for d in range(torch.cuda.device_count()): torch._C._cuda_setStream(self.beforeStreams[d]._cdata) torch._C._cuda_setDevice(beforeDevice) class CudaMemoryLeakCheck(): def __init__(self, testcase, name=None): self.name = testcase.id() if name is None else name self.testcase = testcase # initialize context & RNG to prevent false positive detections # when the test is the first to initialize those from torch.testing._internal.common_cuda import initialize_cuda_context_rng initialize_cuda_context_rng() # Stores CUDA memory data provided by PyTorch's caching allocator and # the CUDA driver. # # NOTE: The undocumented torch.cuda.mem_get_info() returns # (#free bytes, #total bytes available) on the GPU def __enter__(self): self.caching_allocator_befores = [] self.driver_befores = [] # Performs a gc if required (required if any CUDA memory is held) num_devices = torch.cuda.device_count() for i in range(num_devices): caching_allocator_mem_allocated = torch.cuda.memory_allocated(i) # NOTE: gc is based exclusively on caching allocator memory # because the driver will always have some bytes in use (context size?) if caching_allocator_mem_allocated > 0: gc.collect() torch.cuda.empty_cache() break # Acquires caching allocator and driver statistics before the test is run for i in range(num_devices): self.caching_allocator_befores.append(torch.cuda.memory_allocated(i)) bytes_free, bytes_total = torch.cuda.mem_get_info(i) driver_mem_allocated = bytes_total - bytes_free self.driver_befores.append(driver_mem_allocated) def __exit__(self, exec_type, exec_value, traceback): # Don't check for leaks if an exception was thrown if exec_type is not None: return # Compares caching allocator before/after statistics # An increase in allocated memory is a discrepancy indicating a possible # memory leak discrepancy_detected = False num_devices = torch.cuda.device_count() for i in range(num_devices): caching_allocator_mem_allocated = torch.cuda.memory_allocated(i) if caching_allocator_mem_allocated > self.caching_allocator_befores[i]: discrepancy_detected = True break # Short-circuits if no discrepancy detected if not discrepancy_detected: return # Validates the discrepancy persists after garbage collection and # is confirmed by the driver API # NOTE: driver API iscrepancies alone are ignored because with the jiterator # some tests may permanently increase the CUDA context size and # that will appear as a driver memory leak but is the expected behavior. # GCs and clears the cache gc.collect() torch.cuda.empty_cache() for i in range(num_devices): caching_allocator_mem_allocated = torch.cuda.memory_allocated(i) bytes_free, bytes_total = torch.cuda.mem_get_info(i) driver_mem_allocated = bytes_total - bytes_free caching_allocator_discrepancy = False driver_discrepancy = False if caching_allocator_mem_allocated > self.caching_allocator_befores[i]: caching_allocator_discrepancy = True if driver_mem_allocated > self.driver_befores[i]: driver_discrepancy = True if caching_allocator_discrepancy and not driver_discrepancy: # Just raises a warning if the leak is not validated by the # driver API # NOTE: this may be a problem with how the caching allocator collects its # statistics or a leak too small to trigger the allocation of an # additional block of memory by the CUDA driver msg = ("CUDA caching allocator reports a memory leak not " "verified by the driver API in {}! " "Caching allocator allocated memory was {} and is now reported as {} " "on device {}. " "CUDA driver allocated memory was {} and is now {}.").format( self.name, self.caching_allocator_befores[i], caching_allocator_mem_allocated, i, self.driver_befores[i], driver_mem_allocated) warnings.warn(msg) elif caching_allocator_discrepancy and driver_discrepancy: # A caching allocator discrepancy validated by the driver API is a # failure (except on ROCm, see below) msg = ("CUDA driver API confirmed a leak in {}! " "Caching allocator allocated memory was {} and is now reported as {} " "on device {}. " "CUDA driver allocated memory was {} and is now {}.").format( self.name, self.caching_allocator_befores[i], caching_allocator_mem_allocated, i, self.driver_befores[i], driver_mem_allocated) # See #62533 # ROCM: Sometimes the transient memory is reported as leaked memory if TEST_WITH_ROCM: warnings.warn(msg) else: raise RuntimeError(msg) @contextmanager def skip_exception_type(exc_type): try: yield except exc_type as e: raise unittest.SkipTest(f"not implemented: {e}") from e # "min_satisfying_examples" setting has been deprecated in hypythesis # 3.56.0 and removed in hypothesis 4.x try: import hypothesis def settings(*args, **kwargs): if 'min_satisfying_examples' in kwargs and hypothesis.version.__version_info__ >= (3, 56, 0): kwargs.pop('min_satisfying_examples') return hypothesis.settings(*args, **kwargs) hypothesis.settings.register_profile( "pytorch_ci", settings( derandomize=True, suppress_health_check=[hypothesis.HealthCheck.too_slow], database=None, max_examples=50, verbosity=hypothesis.Verbosity.normal)) hypothesis.settings.register_profile( "dev", settings( suppress_health_check=[hypothesis.HealthCheck.too_slow], database=None, max_examples=10, verbosity=hypothesis.Verbosity.normal)) hypothesis.settings.register_profile( "debug", settings( suppress_health_check=[hypothesis.HealthCheck.too_slow], database=None, max_examples=1000, verbosity=hypothesis.Verbosity.verbose)) hypothesis.settings.load_profile( "pytorch_ci" if IS_IN_CI else os.getenv('PYTORCH_HYPOTHESIS_PROFILE', 'dev') ) except ImportError: print('Fail to import hypothesis in common_utils, tests are not derandomized') def check_if_enable(test: unittest.TestCase): test_suite = str(test.__class__).split('\'')[1] test_name = f'{test._testMethodName} ({test_suite})' if slow_tests_dict is not None and test_name in slow_tests_dict: getattr(test, test._testMethodName).__dict__['slow_test'] = True if not TEST_WITH_SLOW: raise unittest.SkipTest("test is slow; run with PYTORCH_TEST_WITH_SLOW to enable test") if not IS_SANDCASTLE and disabled_tests_dict is not None: if test_name in disabled_tests_dict: issue_url, platforms = disabled_tests_dict[test_name] platform_to_conditional: Dict = { "mac": IS_MACOS, "macos": IS_MACOS, "win": IS_WINDOWS, "windows": IS_WINDOWS, "linux": IS_LINUX, "rocm": TEST_WITH_ROCM, "asan": TEST_WITH_ASAN } if platforms == [] or any([platform_to_conditional[platform] for platform in platforms]): raise unittest.SkipTest( f"Test is disabled because an issue exists disabling it: {issue_url}" + f" for {'all' if platforms == [] else ''}platform(s) {', '.join(platforms)}. " + "If you're seeing this on your local machine and would like to enable this test, " + "please make sure IN_CI is not set and you are not using the flag --import-disabled-tests.") if TEST_SKIP_FAST: if not getattr(test, test._testMethodName).__dict__.get('slow_test', False): raise unittest.SkipTest("test is fast; we disabled it with PYTORCH_TEST_SKIP_FAST") # Acquires the comparison dtype, required since isclose # requires both inputs have the same dtype, and isclose is not supported # for some device x dtype combinations. # NOTE: Remaps bfloat16 to float32 since neither the CPU or CUDA device types # support needed bfloat16 comparison methods. # NOTE: Remaps float16 to float32 on CPU since the CPU device type doesn't # support needed float16 comparison methods. # TODO: Update this once bfloat16 and float16 are better supported. def get_comparison_dtype(a, b): # TODO: update this when promote_types supports bfloat16 and/or # isclose supports bfloat16. a_dtype = torch.float32 if a.dtype is torch.bfloat16 else a.dtype b_dtype = torch.float32 if b.dtype is torch.bfloat16 else b.dtype compare_dtype = torch.promote_types(a_dtype, b_dtype) # non-CUDA (CPU, for example) float16 -> float32 # TODO: update this when isclose is implemented for CPU float16 if (compare_dtype is torch.float16 and (a.device != b.device or a.device.type != 'cuda' or b.device.type != 'cuda')): compare_dtype = torch.float32 return compare_dtype # This implements a variant of assertRaises/assertRaisesRegex where we first test # if the exception is NotImplementedError, and if so just skip the test instead # of failing it. # # This is implemented by inheriting from the (private) implementation of # assertRaises from unittest.case, and slightly tweaking it for this new # behavior. The year is 2021: this private class hierarchy hasn't changed since # 2010, seems low risk to inherit from. class AssertRaisesContextIgnoreNotImplementedError(unittest.case._AssertRaisesContext): def __exit__(self, exc_type, exc_value, tb): if exc_type is not None and issubclass(exc_type, NotImplementedError): self.test_case.skipTest(f"not_implemented: {exc_value}") # type: ignore[attr-defined] return super().__exit__(exc_type, exc_value, tb) @contextmanager def set_warn_always_context(new_val: bool): old_val = torch.is_warn_always_enabled() torch.set_warn_always(new_val) try: yield finally: torch.set_warn_always(old_val) class TestCase(expecttest.TestCase): # NOTE: "precision" lets classes and generated tests set minimum # atol values when comparing tensors. Used by @precisionOverride and @toleranceOverride, for # example. # NOTE: "rel_tol" lets classes and generated tests set minimum # rtol values when comparing tensors. Used by @toleranceOverride, for example. _precision: float = 0 _rel_tol: float = 0 # checker to early terminate test suite if unrecoverable failure occurs. def _should_stop_test_suite(self): if torch.cuda.is_initialized(): # CUDA device side error will cause subsequence test cases to fail. # stop entire test suite if catches RuntimeError during torch.cuda.synchronize(). try: torch.cuda.synchronize() except RuntimeError as rte: return True return False else: return False @property def precision(self) -> float: return self._precision @precision.setter def precision(self, prec: float) -> None: self._precision = prec @property def rel_tol(self) -> float: return self._rel_tol @rel_tol.setter def rel_tol(self, prec: float) -> None: self._rel_tol = prec _do_cuda_memory_leak_check = False _do_cuda_non_default_stream = False # When True, if a test case raises a NotImplementedError, instead of failing # the test, skip it instead. _ignore_not_implemented_error = False def __init__(self, method_name='runTest'): super().__init__(method_name) test_method = getattr(self, method_name, None) if test_method is not None: # Wraps the tested method if we should do CUDA memory check. if not TEST_SKIP_CUDA_MEM_LEAK_CHECK: self._do_cuda_memory_leak_check &= getattr(test_method, '_do_cuda_memory_leak_check', True) # FIXME: figure out the flaky -1024 anti-leaks on windows. See #8044 if self._do_cuda_memory_leak_check and not IS_WINDOWS: self.wrap_with_cuda_policy(method_name, self.assertLeaksNoCudaTensors) # Wraps the tested method if we should enforce non default CUDA stream. self._do_cuda_non_default_stream &= getattr(test_method, '_do_cuda_non_default_stream', True) if self._do_cuda_non_default_stream and not IS_WINDOWS: self.wrap_with_cuda_policy(method_name, self.enforceNonDefaultStream) if self._ignore_not_implemented_error: self.wrap_with_policy(method_name, lambda: skip_exception_type(NotImplementedError)) def assertLeaksNoCudaTensors(self, name=None): name = self.id() if name is None else name return CudaMemoryLeakCheck(self, name) def enforceNonDefaultStream(self): return CudaNonDefaultStream() def wrap_with_cuda_policy(self, method_name, policy): test_method = getattr(self, method_name) # the import below may initialize CUDA context, so we do it only if # self._do_cuda_memory_leak_check or self._do_cuda_non_default_stream # is True. # TODO: sure looks like we unconditionally initialize the context here # -- ezyang from torch.testing._internal.common_cuda import TEST_CUDA fullname = self.id().lower() # class_name.method_name if TEST_CUDA and ('gpu' in fullname or 'cuda' in fullname): setattr(self, method_name, self.wrap_method_with_policy(test_method, policy)) def wrap_with_policy(self, method_name, policy): test_method = getattr(self, method_name) setattr(self, method_name, self.wrap_method_with_policy(test_method, policy)) # A policy is a zero-argument function that returns a context manager. # We don't take the context manager directly as it may be necessary to # construct it once per test method def wrap_method_with_policy(self, method, policy): # Assumes that `method` is the tested function in `self`. # NOTE: Python Exceptions (e.g., unittest.Skip) keeps objects in scope # alive, so this cannot be done in setUp and tearDown because # tearDown is run unconditionally no matter whether the test # passes or not. For the same reason, we can't wrap the `method` # call in try-finally and always do the check. @wraps(method) def wrapper(self, *args, **kwargs): with policy(): method(*args, **kwargs) return types.MethodType(wrapper, self) def wrap_with_cuda_memory_check(self, method): return self.wrap_method_with_policy(method, self.assertLeaksNoCudaTensors) # Recursive function that incorporates retry logic when PYTORCH_RETRY_TEST_CASES=1 and enables early test # termination. [DISCLAIMER: ONLY WORKS WITH UNITTEST] # When report_only is True, flaky tests are only reported, but the signal remains the same (the test will still # show up red). # Otherwise, the flaky test will show up green while its stats are captured by test reports. def _run_with_retry(self, result=None, num_runs_left=0, report_only=True): if num_runs_left == 0: return using_unittest = isinstance(result, unittest.TestResult) if using_unittest: failures_before = 0 if result is None else len(result.failures) # num tests marked as failed before starting errors_before = 0 if result is None else len(result.errors) # num tests marked as errored before starting super().run(result=result) # Early terminate test if necessary. if self._should_stop_test_suite(): result.stop() if not RETRY_TEST_CASES or not using_unittest: return err = sys.exc_info() num_retries_left = num_runs_left - 1 if failures_before < len(result.failures): print(f" {self._testMethodName} failed - num_retries_left: {num_retries_left}") if (report_only and num_retries_left < MAX_NUM_RETRIES) or (not report_only and num_retries_left > 0): result.failures.pop(-1) result.addExpectedFailure(self, err) self._run_with_retry(result=result, num_runs_left=num_retries_left, report_only=report_only) elif errors_before < len(result.errors): print(f" {self._testMethodName} errored - num_retries_left: {num_retries_left}") if (report_only and num_retries_left < MAX_NUM_RETRIES) or (not report_only and num_retries_left > 0): result.errors.pop(-1) result.addExpectedFailure(self, err) self._run_with_retry(result=result, num_runs_left=num_retries_left, report_only=report_only) elif report_only and num_retries_left < MAX_NUM_RETRIES: print(f" {self._testMethodName} succeeded - num_retries_left: {num_retries_left}") result.addUnexpectedSuccess(self) self._run_with_retry(result=result, num_runs_left=num_retries_left, report_only=report_only) def run(self, result=None): num_runs = MAX_NUM_RETRIES + 1 if RETRY_TEST_CASES else 1 self._run_with_retry(result=result, num_runs_left=num_runs, report_only=not OVERRIDE_FLAKY_SIGNAL) def setUp(self): check_if_enable(self) set_rng_seed(SEED) @staticmethod def _make_crow_indices(n_rows, n_cols, nnz, *, device, dtype, random=True): """Return crow_indices of a CSR tensor with size (n_rows, n_cols) and the number of specified elements nnz. If random is True, the column counts of rows are in random order. Otherwise, the column counts of rows are defined by the used sampling method. Sampling method --------------- The used sampling method was introduced in https://pearu.github.io/csr_sampling.html, and here we give only an overall description of the method. Notice that crow_indices can be defined as cumsum(counts) where counts is a sequence of non-negative integers satisfying the following conditions: len(counts) == n_rows + 1 counts.max() <= n_cols while counts[i + 1] is interpreted as the number of specified elements in the i-th row. The used sampling method aims at increasing the diversity of CSR samples, that is, a CSR sample should contain (i) rows that are all filled, (ii) rows with no elements at all, and (iii) rows that are partially filled. At the same time and for the given total number of specified elements (nnz), there should be minimal preference to rows with a given number of elements. To achieve this, the sampling method is built-up on using a sawteeth model for counts. In the simplest case, we would have counts = arange(n_rows + 1) % (n_cols + 1) that has equal number of all possible column counts per row. This formula can be used only for specific input values of n_rows, n_cols, and nnz. To generalize this model to any combinations of inputs, the counts model above is extended with an incomplete sawtooth, and the right and lower rectangular parts that will guarantee that counts.sum() == nnz for any combination of n_rows, n_cols, and nnz. Basically, we'll find a maximal window in (n_rows + 1, n_cols + 1)-grid that is able to hold a sequence of sawteeth and so-called final correction, while the external part of the window is filled with counts to meet the nnz contraint exactly. """ assert 0 <= nnz <= n_rows * n_cols def sawteeth(n, m): # return the total number of counts in the sequence of # sawteeth where n and m define a window in (n_rows+1, # n_cols+1) rectangle where the sequence of sawteeth # perfectly fit. M = (n_cols - m) * (n_cols - m + 1) // 2 K = (n_rows - n) % (n_cols - m + 1) return M * ((n_rows - n) // (n_cols - m + 1)) + K * (K - 1) // 2 # Different from the original method description, here counts # has leading 0 required by crow_indices: counts = torch.zeros(n_rows + 1, dtype=dtype, device=torch.device('cpu')) n = m = 0 N = sawteeth(n, m) if N and nnz >= max(N, n_cols): # determine the width of the sawteeth window. We use bisection to solve # N(n, 0) == 0 or nnz - n * n_cols < max(N(n, 0), n_cols) # for n n_left = n n_right = n_rows - 1 N_right = sawteeth(n_right, m) while n_right - n_left > 1: n_middle = (n_left + n_right) // 2 N_middle = sawteeth(n_middle, m) if N_middle == 0 or nnz - n_middle * n_cols < max(N_middle, n_cols): n_right, N_right = n_middle, N_middle else: n_left = n_middle n, N = n_right, N_right # fill the right rectangle with counts: assert n counts[-n:].fill_(n_cols) if N and nnz - n * n_cols >= max(N, n_rows - n): # determine the height of the sawteeth window. We use bisection to solve # N(n, m) == 0 or nnz - n * n_cols - m * (n_rows - n) < max(N(n, m), n_rows - n) # for m. m_left = m m_right = n_cols - 1 N_right = sawteeth(n, m_right) while m_right - m_left > 1: m_middle = (m_left + m_right) // 2 N_middle = sawteeth(n, m_middle) if N_middle == 0 or nnz - n * n_cols - m_middle * (n_rows - n) < max(N_middle, n_rows - n): m_right, N_right = m_middle, N_middle else: m_left = m_middle m, N = m_right, N_right # fill the bottom rectangle with counts: assert m counts[1:n_rows - n + 1].fill_(m) if N: # fill the sawteeth window with counts q, r = divmod(nnz - n * n_cols - m * (n_rows - n), (n_cols - m) * (n_cols - m + 1) // 2) p = 1 + q * (n_cols - m + 1) if sys.version_info >= (3, 8): k = math.isqrt(2 * r) else: # math.isqrt(x) is available starting from Python 3.8. # Here we use int(math.sqrt(x)) as an approximation # that appers to give exaxt result for all x values # less than 2**35, at least, the upper limit of x is # TBD. k = int(math.sqrt(2 * r)) if k * (k + 1) > 2 * r: k -= 1 corr = r - k * (k + 1) // 2 assert not ((p > 1) and (m > 0)) # full sawteeth are never on top of a bottom rectangle # sequence of full sawteeth: counts[1:p] = torch.arange(p - 1, dtype=dtype, device=counts.device) % (n_cols - m + 1) # incomplete sawtooth: counts[p:p + k + 1] += torch.arange(k + 1, dtype=dtype, device=counts.device) else: # given input does not support sawteeth p = 1 corr = nnz - n * n_cols - m * (n_rows - n) # correction that will guarantee counts.sum() == nnz: counts[p] += corr if random: # randomize crow_indices by shuffling the sawteeth # sequence: perm = torch.randperm(n_rows, device=counts.device) counts[1:] = counts[1:][perm] # compute crow_indices: crow_indices = counts crow_indices.cumsum_(dim=0) return crow_indices.to(device=device) def genSparseCSRTensor(self, size, nnz, *, device, dtype, index_dtype): sparse_dim = 2 assert all(size[d] > 0 for d in range(sparse_dim)) or nnz == 0, 'invalid arguments' assert len(size) == sparse_dim def random_sparse_csr(n_rows, n_cols, nnz): crow_indices = self._make_crow_indices(n_rows, n_cols, nnz, device=device, dtype=index_dtype) col_indices = torch.zeros(nnz, dtype=index_dtype, device=device) for i in range(n_rows): count = crow_indices[i + 1] - crow_indices[i] col_indices[crow_indices[i]:crow_indices[i + 1]], _ = torch.sort( torch.randperm(n_cols, dtype=index_dtype, device=device)[:count]) values = make_tensor([nnz], device=device, dtype=dtype, low=-1, high=1) return values, crow_indices, col_indices values, crow_indices, col_indices = random_sparse_csr(size[0], size[1], nnz) return torch.sparse_csr_tensor(crow_indices, col_indices, values, size=size, dtype=dtype, device=device) def genSparseTensor(self, size, sparse_dim, nnz, is_uncoalesced, device, dtype): # Assert not given impossible combination, where the sparse dims have # empty numel, but nnz > 0 makes the indices containing values. assert all(size[d] > 0 for d in range(sparse_dim)) or nnz == 0, 'invalid arguments' v_size = [nnz] + list(size[sparse_dim:]) v = make_tensor(v_size, device=device, dtype=dtype, low=-1, high=1) i = torch.rand(sparse_dim, nnz, device=device) i.mul_(torch.tensor(size[:sparse_dim]).unsqueeze(1).to(i)) i = i.to(torch.long) if is_uncoalesced: v = torch.cat([v, torch.randn_like(v)], 0) i = torch.cat([i, i], 1) x = torch.sparse_coo_tensor(i, v, torch.Size(size), dtype=dtype, device=device) if not is_uncoalesced: x = x.coalesce() else: # FIXME: `x` is a sparse view of `v`. Currently rebase_history for # sparse views is not implemented, so this workaround is # needed for inplace operations done on `x`, e.g., copy_(). # Remove after implementing something equivalent to CopySlice # for sparse views. # NOTE: We do clone() after detach() here because we need to be able to change size/storage of x afterwards x = x.detach().clone() return x, x._indices().clone(), x._values().clone() def safeToDense(self, t): return t.coalesce().to_dense() # Compares a torch function with a reference function for a given sample input (object of SampleInput) # Note: only values are compared, type comparison is not done here def compare_with_reference(self, torch_fn, ref_fn, sample_input, **kwargs): n_inp, n_args, n_kwargs = sample_input.numpy() t_inp, t_args, t_kwargs = sample_input.input, sample_input.args, sample_input.kwargs actual = torch_fn(t_inp, *t_args, **t_kwargs) expected = ref_fn(n_inp, *n_args, **n_kwargs) self.assertEqual(actual, expected, exact_device=False) # Compares the given Torch and NumPy functions on the given tensor-like object. # NOTE: both torch_fn and np_fn should be functions that take a single # tensor (array). If the torch and/or NumPy function require additional # arguments then wrap the function in a lambda or pass a partial function. # TODO: add args/kwargs for passing to assertEqual (e.g. rtol, atol) def compare_with_numpy(self, torch_fn, np_fn, tensor_like, device=None, dtype=None, **kwargs): assert TEST_NUMPY if isinstance(tensor_like, torch.Tensor): assert device is None assert dtype is None t_cpu = tensor_like.detach().cpu() if t_cpu.dtype is torch.bfloat16: t_cpu = t_cpu.float() a = t_cpu.numpy() t = tensor_like else: d = copy.copy(torch_to_numpy_dtype_dict) d[torch.bfloat16] = np.float32 a = np.array(tensor_like, dtype=d[dtype]) t = torch.tensor(tensor_like, device=device, dtype=dtype) np_result = np_fn(a) torch_result = torch_fn(t).cpu() # Converts arrays to tensors if isinstance(np_result, np.ndarray): try: np_result = torch.from_numpy(np_result) except Exception: # NOTE: copying an array before conversion is necessary when, # for example, the array has negative strides. np_result = torch.from_numpy(np_result.copy()) if t.dtype is torch.bfloat16 and torch_result.dtype is torch.bfloat16 and np_result.dtype is torch.float: torch_result = torch_result.to(torch.float) self.assertEqual(np_result, torch_result, **kwargs) # Some analysis of tolerance by logging tests from test_torch.py can be found # in https://github.com/pytorch/pytorch/pull/32538. # dtype name : (rtol, atol) dtype_precisions = { torch.float16 : (0.001, 1e-5), torch.bfloat16 : (0.016, 1e-5), torch.float32 : (1.3e-6, 1e-5), torch.float64 : (1e-7, 1e-7), torch.complex32 : (0.001, 1e-5), torch.complex64 : (1.3e-6, 1e-5), torch.complex128 : (1e-7, 1e-7), } # Returns the "default" rtol and atol for comparing scalars or # tensors of the given dtypes. def _getDefaultRtolAndAtol(self, dtype0, dtype1): rtol = max(self.dtype_precisions.get(dtype0, (0, 0))[0], self.dtype_precisions.get(dtype1, (0, 0))[0]) atol = max(self.dtype_precisions.get(dtype0, (0, 0))[1], self.dtype_precisions.get(dtype1, (0, 0))[1]) return rtol, atol # Checks if two dense tensors are equal(-ish), returning (True, None) # when they are and (False, debug_msg) when they are not. # If exact_dtype is true both tensors must have the same dtype. # If exact_device is true both tensors must be on the same device. # See the "Test Framework Tensor 'Equality'" note for more details. # NOTE: tensors on different devices are moved to the CPU to be compared when # exact_device is False. # NOTE: this function checks the tensors' devices, sizes, and dtypes # and acquires the appropriate device, dtype, rtol and atol to compare # them with. It then calls _compare_tensors_internal. def _compareTensors(self, a, b, *, rtol: Optional[float] = None, atol=None, equal_nan=True, exact_dtype=True, exact_device=False) -> _compare_return_type: assert (atol is None) == (rtol is None) if not isinstance(a, torch.Tensor): return (False, "argument a, {0}, to _compareTensors is not a tensor!".format(a)) if not isinstance(b, torch.Tensor): return (False, "argument b, {0}, to _compareTensors is not a tensor!".format(b)) # Validates tensors are on the same device if exact_device and a.device != b.device: return (False, ("Attempted to compare equality of tensors on " "different devices! Got devices {0} and " "{1}.".format(a.device, b.device))) # Compares tensors of different devices on the CPU if a.device != b.device: a = a.cpu() b = b.cpu() # Checks size matches if a.size() != b.size(): return (False, ("Attempted to compare equality of tensors with " "different sizes. Got sizes {0} and {1}.").format(a.size(), b.size())) # Checks dtype (if exact_dtype) if exact_dtype and a.dtype is not b.dtype: return (False, ("Attempted to compare equality of tensors with " "different dtypes. Got dtypes {0} and {1}.").format(a.dtype, b.dtype)) # Acquires rtol and atol if rtol is None: rtol, atol = self._getDefaultRtolAndAtol(a.dtype, b.dtype) atol = max(atol, self.precision) rtol = max(rtol, self.rel_tol) # Converts to comparison dtype dtype = get_comparison_dtype(a, b) a = a.to(dtype) b = b.to(dtype) return _compare_tensors_internal(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan) # Checks if two scalars are equal(-ish), returning (True, None) # when they are and (False, debug_msg) when they are not. # NOTE: this function just acquires rtol and atol # before calling _compare_scalars_internal. def _compareScalars(self, a, b, *, rtol: Optional[float] = None, atol: Optional[float] = None, equal_nan=True) -> _compare_return_type: # Acquires rtol and atol assert (atol is None) == (rtol is None) if rtol is None: if isinstance(a, complex) or isinstance(b, complex): rtol, atol = self._getDefaultRtolAndAtol(torch.complex64, torch.complex64) elif isinstance(a, float) or isinstance(b, float): rtol, atol = self._getDefaultRtolAndAtol(torch.float32, torch.float32) else: rtol, atol = 0, 0 rtol = cast(float, rtol) atol = cast(float, atol) assert atol is not None atol = max(atol, self.precision) rtol = max(rtol, self.rel_tol) return _compare_scalars_internal(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan) # Construct assert messages basd on internal debug message and user provided message. def _get_assert_msg(self, msg, debug_msg=None): if msg is None: return debug_msg else: return f"\n{msg}" if debug_msg is None else f"{debug_msg}\n{msg}" def assertEqualIgnoreType(self, *args, **kwargs) -> None: # If you are seeing this function used, that means test is written wrongly # and deserves detailed investigation return self.assertEqual(*args, exact_dtype=False, **kwargs) def _is_dict(self, obj): return isinstance(obj, (dict, torch._C.ScriptDict)) # type: ignore[attr-defined] # Compares x and y # TODO: default exact_device to True def assertEqual(self, x, y, msg: Optional[str] = None, *, atol: Optional[float] = None, rtol: Optional[float] = None, equal_nan=True, exact_dtype=True, exact_device=False) -> None: assert (atol is None) == (rtol is None), "If one of atol or rtol is specified, then the other must be too" debug_msg: Optional[str] = None if x is None or y is None: self.assertTrue(x is None and y is None) # Tensor x Number and Number x Tensor comparisons elif isinstance(x, torch.Tensor) and isinstance(y, Number): self.assertEqual(x.item(), y, atol=atol, rtol=rtol, msg=msg, exact_dtype=exact_dtype, exact_device=exact_device) elif isinstance(y, torch.Tensor) and isinstance(x, Number): self.assertEqual(x, y.item(), atol=atol, rtol=rtol, msg=msg, exact_dtype=exact_dtype, exact_device=exact_device) # Tensor x np.bool elif isinstance(x, torch.Tensor) and isinstance(y, np.bool_): self.assertEqual(x.item(), y, atol=atol, rtol=rtol, msg=msg, exact_dtype=exact_dtype, exact_device=exact_device) elif isinstance(y, torch.Tensor) and isinstance(x, np.bool_): self.assertEqual(x, y.item(), atol=atol, rtol=rtol, msg=msg, exact_dtype=exact_dtype, exact_device=exact_device) # Tensor x Tensor elif isinstance(x, torch.Tensor) and isinstance(y, torch.Tensor): debug_msg = ("Attempted to compare with different is_sparse settings: " f"Expected: {x.is_sparse}; Actual: {y.is_sparse}.") super().assertEqual(x.is_sparse, y.is_sparse, msg=self._get_assert_msg(msg=msg, debug_msg=debug_msg)) debug_msg = ("Attempted to compare with different is_quantized settings: " f"Expected: {x.is_quantized}; Actual: {y.is_quantized}.") super().assertEqual(x.is_quantized, y.is_quantized, msg=self._get_assert_msg(msg=msg, debug_msg=debug_msg)) if x.is_sparse: if x.size() != y.size(): debug_msg_sparse = ("Attempted to compare equality of tensors with different sizes: " f"Expected: {x.size()}; Actual: {y.size()}.") super().assertTrue(False, msg=self._get_assert_msg(msg=msg, debug_msg=debug_msg_sparse)) x = x.coalesce() y = y.coalesce() indices_result, debug_msg_indices = self._compareTensors(x._indices(), y._indices(), rtol=rtol, atol=atol, equal_nan=equal_nan, exact_dtype=exact_dtype, exact_device=exact_device) if not indices_result: assert debug_msg_indices is not None debug_msg = "Sparse tensor indices failed to compare as equal! " + debug_msg_indices super().assertTrue(indices_result, msg=self._get_assert_msg(msg, debug_msg=debug_msg)) values_result, debug_msg_values = self._compareTensors(x._values(), y._values(), rtol=rtol, atol=atol, equal_nan=equal_nan, exact_dtype=exact_dtype, exact_device=exact_device) if not values_result: assert debug_msg_values is not None debug_msg = "Sparse tensor values failed to compare as equal! " + debug_msg_values super().assertTrue(values_result, msg=self._get_assert_msg(msg, debug_msg=debug_msg)) elif x.is_quantized and y.is_quantized: self.assertEqual(x.qscheme(), y.qscheme(), atol=atol, rtol=rtol, msg=msg, exact_dtype=exact_dtype, exact_device=exact_device) if x.qscheme() == torch.per_tensor_affine: self.assertEqual(x.q_scale(), y.q_scale(), atol=atol, rtol=rtol, msg=msg, exact_dtype=exact_dtype, exact_device=exact_device) self.assertEqual(x.q_zero_point(), y.q_zero_point(), atol=atol, rtol=rtol, msg=msg, exact_dtype=exact_dtype, exact_device=exact_device) elif x.qscheme() == torch.per_channel_affine: self.assertEqual(x.q_per_channel_scales(), y.q_per_channel_scales(), atol=atol, rtol=rtol, msg=msg, exact_dtype=exact_dtype, exact_device=exact_device) self.assertEqual(x.q_per_channel_zero_points(), y.q_per_channel_zero_points(), atol=atol, rtol=rtol, msg=msg, exact_dtype=exact_dtype, exact_device=exact_device) self.assertEqual(x.q_per_channel_axis(), y.q_per_channel_axis(), atol=atol, rtol=rtol, msg=msg, exact_dtype=exact_dtype, exact_device=exact_device) result, debug_msg_compare = self._compareTensors(x.int_repr().to(torch.int32), y.int_repr().to(torch.int32), atol=atol, rtol=rtol, exact_dtype=exact_dtype, exact_device=exact_device) if not result: assert debug_msg_compare is not None debug_msg = "Quantized representations failed to compare as equal! " + debug_msg_compare super().assertTrue(result, msg=self._get_assert_msg(msg, debug_msg=debug_msg)) else: result, debug_msg_generic = self._compareTensors(x, y, rtol=rtol, atol=atol, equal_nan=equal_nan, exact_dtype=exact_dtype, exact_device=exact_device) if not result: assert debug_msg_generic is not None debug_msg = "Tensors failed to compare as equal!" + debug_msg_generic super().assertTrue(result, msg=self._get_assert_msg(msg, debug_msg=debug_msg)) elif isinstance(x, (np.ndarray, torch.Tensor)) or isinstance(y, (np.ndarray, torch.Tensor)): def maybe_to_tensor(a: Union[np.ndarray, torch.Tensor]) -> Union[np.ndarray, torch.Tensor]: if not isinstance(a, np.ndarray): return a try: return torch.from_numpy(a) except TypeError: # This happens if the dtype is non-numeric or not supported by torch return a def maybe_to_list(a: Any) -> Any: if not isinstance(a, (np.ndarray, torch.Tensor)): return a return a.tolist() x = maybe_to_tensor(x) y = maybe_to_tensor(y) if isinstance(x, torch.Tensor) and isinstance(y, torch.Tensor): self.assertEqual( x, y, atol=atol, rtol=rtol, msg=msg, exact_dtype=exact_dtype, exact_device=exact_device ) else: # In case we can't convert the array to a tensor, we fall back to comparing x and y as iterables self.assertEqual( maybe_to_list(x), maybe_to_list(y), atol=atol, rtol=rtol, msg=msg, exact_dtype=exact_dtype, exact_device=exact_device ) elif isinstance(x, string_classes) and isinstance(y, string_classes): debug_msg = ("Attempted to compare [string] types: " f"Expected: {repr(x)}; Actual: {repr(y)}.") super().assertEqual(x, y, msg=self._get_assert_msg(msg, debug_msg=debug_msg)) elif type(x) == set and type(y) == set: debug_msg = ("Attempted to compare [set] types: " f"Expected: {x}; Actual: {y}.") super().assertEqual(x, y, msg=self._get_assert_msg(msg, debug_msg=debug_msg)) elif self._is_dict(x) and self._is_dict(y): if isinstance(x, OrderedDict) and isinstance(y, OrderedDict): self.assertEqual(x.items(), y.items(), atol=atol, rtol=rtol, msg=msg, exact_dtype=exact_dtype, exact_device=exact_device) else: self.assertEqual(set(x.keys()), set(y.keys()), atol=atol, rtol=rtol, msg=msg, exact_dtype=exact_dtype, exact_device=exact_device) key_list = list(x.keys()) self.assertEqual([x[k] for k in key_list], [y[k] for k in key_list], atol=atol, rtol=rtol, msg=msg, exact_dtype=exact_dtype, exact_device=exact_device) elif isinstance(x, type) and isinstance(y, type): # See TestTorch.test_assert_equal_generic_meta debug_msg = ("Attempted to compare [type] types: " f"Expected: {x}; Actual: {y}.") super().assertEqual(x, y, msg=self._get_assert_msg(msg, debug_msg=debug_msg)) elif is_iterable(x) and is_iterable(y): debug_msg = ("Attempted to compare the lengths of [iterable] types: " f"Expected: {len(x)}; Actual: {len(y)}.") super().assertEqual(len(x), len(y), msg=self._get_assert_msg(msg, debug_msg=debug_msg)) for x_, y_ in zip(x, y): self.assertEqual(x_, y_, atol=atol, rtol=rtol, msg=msg, exact_dtype=exact_dtype, exact_device=exact_device) elif isinstance(x, bool) and isinstance(y, bool): super().assertTrue(x == y, msg=msg) # Scalar x Scalar elif isinstance(x, Number) and isinstance(y, Number): result, debug_msg_scalars = self._compareScalars(x, y, rtol=rtol, atol=atol, equal_nan=equal_nan) if not result: assert debug_msg_scalars is not None debug_msg = "Scalars failed to compare as equal! " + debug_msg_scalars super().assertTrue(result, msg=self._get_assert_msg(msg, debug_msg=debug_msg)) else: super().assertEqual(x, y, msg=msg) def assertNotEqual(self, x, y, msg: Optional[str] = None, *, # type: ignore[override] atol: Optional[float] = None, rtol: Optional[float] = None, **kwargs) -> None: with self.assertRaises(AssertionError, msg=msg): self.assertEqual(x, y, msg, atol=atol, rtol=rtol, **kwargs) def assertEqualTypeString(self, x, y) -> None: # This API is used simulate deprecated x.type() == y.type() self.assertEqual(x.device, y.device) self.assertEqual(x.dtype, y.dtype) self.assertEqual(x.is_sparse, y.is_sparse) def assertObjectIn(self, obj: Any, iterable: Iterable[Any]) -> None: for elem in iterable: if id(obj) == id(elem): return raise AssertionError("object not found in iterable") # Reimplemented to provide special behavior when # _ignore_not_implemented_error is True def assertRaises(self, expected_exception, *args, **kwargs): if self._ignore_not_implemented_error: context: Optional[AssertRaisesContextIgnoreNotImplementedError] = \ AssertRaisesContextIgnoreNotImplementedError(expected_exception, self) # type: ignore[call-arg] try: return context.handle('assertRaises', args, kwargs) # type: ignore[union-attr] finally: # see https://bugs.python.org/issue23890 context = None else: return super().assertRaises(expected_exception, *args, **kwargs) # Reimplemented to provide special behavior when # _ignore_not_implemented_error is True def assertRaisesRegex(self, expected_exception, expected_regex, *args, **kwargs): # Verifies that an exception with the type expected_exception and message # matching the regular expression defined by expected_regex is thrown. # If the test is instantiated for a non-native device type (like XLA) # then the message is not validated. # Checks whether the test is instantiated for a device type by testing # if the test class has defined the device_type attribute and, # if so, tests whether the instantiated device type is native or not if hasattr(self, 'device_type') and self.device_type not in NATIVE_DEVICES: # type: ignore[attr-defined] # empty string matches any string expected_regex = '' if self._ignore_not_implemented_error: context = AssertRaisesContextIgnoreNotImplementedError( # type: ignore[call-arg] expected_exception, self, expected_regex) return context.handle('assertRaisesRegex', args, kwargs) # type: ignore[attr-defined] else: return super().assertRaisesRegex(expected_exception, expected_regex, *args, **kwargs) # TODO: Support context manager interface # NB: The kwargs forwarding to callable robs the 'subname' parameter. # If you need it, manually apply your callable in a lambda instead. def assertExpectedRaises(self, exc_type, callable, *args, **kwargs): subname = None if 'subname' in kwargs: subname = kwargs['subname'] del kwargs['subname'] try: callable(*args, **kwargs) except exc_type as e: self.assertExpected(str(e), subname) return # Don't put this in the try block; the AssertionError will catch it self.fail(msg="Did not raise when expected to") def assertNotWarn(self, callable, msg=''): r""" Test if :attr:`callable` does not raise a warning. """ with warnings.catch_warnings(record=True) as ws: warnings.simplefilter("always") # allow any warning to be raised with set_warn_always_context(True): callable() self.assertTrue(len(ws) == 0, msg) @contextmanager def assertWarnsOnceRegex(self, category, regex=''): """Context manager for code that *must always* warn This filters expected warnings from the test and fails if the expected warning is not caught. It uses set_warn_always() to force TORCH_WARN_ONCE to behave like TORCH_WARN """ pattern = re.compile(regex) with warnings.catch_warnings(record=True) as ws: warnings.simplefilter("always") # allow any warning to be raised with set_warn_always_context(True): yield if len(ws) == 0: self.fail('no warning caught') self.assertTrue(any([type(w.message) is category for w in ws])) self.assertTrue( any([re.match(pattern, str(w.message)) for w in ws]), f'{pattern}, {[w.message for w in ws if type(w.message) is category]}') def assertExpected(self, s, subname=None): r""" Test that a string matches the recorded contents of a file derived from the name of this test and subname. This file is placed in the 'expect' directory in the same directory as the test script. You can automatically update the recorded test output using --accept. If you call this multiple times in a single function, you must give a unique subname each time. """ if not isinstance(s, str): raise TypeError("assertExpected is strings only") def remove_prefix(text, prefix): if text.startswith(prefix): return text[len(prefix):] return text # NB: we take __file__ from the module that defined the test # class, so we place the expect directory where the test script # lives, NOT where test/common_utils.py lives. This doesn't matter in # PyTorch where all test scripts are in the same directory as # test/common_utils.py, but it matters in onnx-pytorch module_id = self.__class__.__module__ munged_id = remove_prefix(self.id(), module_id + ".") test_file = os.path.realpath(sys.modules[module_id].__file__) expected_file = os.path.join(os.path.dirname(test_file), "expect", munged_id) subname_output = "" if subname: expected_file += "-" + subname subname_output = " ({})".format(subname) expected_file += ".expect" expected = None def accept_output(update_type): print("Accepting {} for {}{}:\n\n{}".format(update_type, munged_id, subname_output, s)) with open(expected_file, 'w') as f: # Adjust for producer_version, leave s unmodified s_tag = re.sub(r'(producer_version): "[0-9.]*"', r'\1: "CURRENT_VERSION"', s) f.write(s_tag) try: with open(expected_file) as f: expected = f.read() except IOError as e: if e.errno != errno.ENOENT: raise elif expecttest.ACCEPT: return accept_output("output") else: raise RuntimeError( ("I got this output for {}{}:\n\n{}\n\n" "No expect file exists; to accept the current output, run:\n" "python {} {} --accept").format(munged_id, subname_output, s, __main__.__file__, munged_id)) from None # a hack for JIT tests if IS_WINDOWS: expected = re.sub(r'CppOp\[(.+?)\]', 'CppOp[]', expected) s = re.sub(r'CppOp\[(.+?)\]', 'CppOp[]', s) # Adjust for producer_version expected = expected.replace( 'producer_version: "CURRENT_VERSION"', 'producer_version: "{}"'.format(torch.onnx.producer_version) ) if expecttest.ACCEPT: if expected != s: return accept_output("updated output") else: if hasattr(self, "assertMultiLineEqual"): # Python 2.7 only # NB: Python considers lhs "old" and rhs "new". self.assertMultiLineEqual(expected, s) else: self.assertEqual(s, expected) def assertExpectedStripMangled(self, s, subname=None): s = re.sub(r'__torch__[^ ]+', '', s) self.assertExpected(s, subname) def assertGreaterAlmostEqual(self, first, second, places=None, msg=None, delta=None): """Assert that ``first`` is greater than or almost equal to ``second``. The equality of ``first`` and ``second`` is determined in a similar way to the ``assertAlmostEqual`` function of the standard library. """ if delta is not None and places is not None: raise TypeError("specify delta or places not both") if first >= second: return diff = second - first if delta is not None: if diff <= delta: return standardMsg = f"{first} not greater than or equal to {second} within {delta} delta" else: if places is None: places = 7 if round(diff, places) == 0: return standardMsg = f"{first} not greater than or equal to {second} within {places} places" msg = self._formatMessage(msg, standardMsg) raise self.failureException(msg) # run code in subprocess and capture exceptions. @staticmethod def run_process_no_exception(code, env=None): import subprocess popen = subprocess.Popen( [sys.executable, '-c', code], stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) (stdout, stderr) = popen.communicate() return (stdout, stderr) # returns captured stderr @staticmethod def runWithPytorchAPIUsageStderr(code): env = os.environ.copy() env["PYTORCH_API_USAGE_STDERR"] = "1" # remove IN_CI flag since this is a wrapped test process. # IN_CI flag should be set in the parent process only. if "IN_CI" in env.keys(): del env["IN_CI"] (stdout, stderr) = TestCase.run_process_no_exception(code, env=env) return stderr.decode('ascii') def download_file(url, binary=True): from urllib.parse import urlsplit from urllib import request, error filename = os.path.basename(urlsplit(url)[2]) data_dir = get_writable_path(os.path.join(os.path.dirname(__file__), 'data')) path = os.path.join(data_dir, filename) if os.path.exists(path): return path try: data = request.urlopen(url, timeout=15).read() with open(path, 'wb' if binary else 'w') as f: f.write(data) return path except error.URLError as e: msg = "could not download test file '{}'".format(url) warnings.warn(msg, RuntimeWarning) raise unittest.SkipTest(msg) from e def find_free_port(): """ Finds an available port and returns that port number. NOTE: If this function is being used to allocate a port to Store (or indirectly via init_process_group or init_rpc), it should be used in conjuction with the `retry_on_connect_failures` decorator as there is a potential race condition where the allocated port may become unavailable before it can be used """ with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock: sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.bind(('localhost', 0)) _, port = sock.getsockname() return port # Errors that we can get in c10d initialization for which we should retry tests for. ADDRESS_IN_USE = "Address already in use" CONNECT_TIMEOUT = "connect() timed out." def retry_on_connect_failures(func=None, connect_errors=(ADDRESS_IN_USE)): """Reruns a test if the test returns a RuntimeError and the exception contains one of the strings in connect_errors.""" # This if block is executed when using this function as a decorator with arguments. if func is None: return partial(retry_on_connect_failures, connect_errors=connect_errors) @wraps(func) def wrapper(*args, **kwargs): n_retries = 10 tries_remaining = n_retries while True: try: return func(*args, **kwargs) except RuntimeError as error: if any(connect_error in str(error) for connect_error in connect_errors): tries_remaining -= 1 if tries_remaining == 0: raise RuntimeError(f"Failing after {n_retries} retries with error: {str(error)}") time.sleep(random.random()) continue raise return wrapper # Decorator to retry upon certain Exceptions. def retry(ExceptionToCheck, tries=3, delay=3, skip_after_retries=False): def deco_retry(f): @wraps(f) def f_retry(*args, **kwargs): mtries, mdelay = tries, delay while mtries > 1: try: return f(*args, **kwargs) except ExceptionToCheck as e: msg = "%s, Retrying in %d seconds..." % (str(e), mdelay) print(msg) time.sleep(mdelay) mtries -= 1 try: return f(*args, **kwargs) except ExceptionToCheck as e: raise unittest.SkipTest(f"Skipping after {tries} consecutive {str(e)}") from e if skip_after_retries else e return f_retry # true decorator return deco_retry # Methods for matrix generation def random_square_matrix_of_rank(l, rank, dtype=torch.double, device='cpu'): assert rank <= l A = torch.randn(l, l, dtype=dtype, device=device) u, s, vh = torch.linalg.svd(A, full_matrices=False) for i in range(l): if i >= rank: s[i] = 0 elif s[i] == 0: s[i] = 1 return (u * s.to(dtype).unsqueeze(-2)) @ vh def random_well_conditioned_matrix(*shape, dtype, device, mean=1.0, sigma=0.001): """ Returns a random rectangular matrix (batch of matrices) with singular values sampled from a Gaussian with mean `mean` and standard deviation `sigma`. The smaller the `sigma`, the better conditioned the output matrix is. """ primitive_dtype = { torch.float: torch.float, torch.double: torch.double, torch.cfloat: torch.float, torch.cdouble: torch.double } x = torch.rand(shape, dtype=dtype, device=device) m = x.size(-2) n = x.size(-1) u, _, vh = torch.linalg.svd(x, full_matrices=False) s = (torch.randn(*(shape[:-2] + (min(m, n),)), dtype=primitive_dtype[dtype], device=device) * sigma + mean) \ .sort(-1, descending=True).values.to(dtype) return (u * s.unsqueeze(-2)) @ vh # Returns a noncontiguous (tensor with the same shape and values as t # The noncontiguous tensor is constructed such that elements in the innermost # dimension are separated by zeros or (whenever possible) nans # TODO: consider more complicated noncontiguity schemes def noncontiguous_like(t): # Short-circuits if t is already noncontiguous if not t.is_contiguous(): return t # Special-cases 0-dim tensors if t.ndim == 0: result = t.detach().unsqueeze(0).repeat_interleave(2, dim=-1) if t.dtype.is_floating_point or t.dtype.is_complex: result[0] = math.nan else: result[0] = 0 result.set_(result.storage(), 1, t.size(), ()) result.requires_grad_(t.requires_grad) return result # 1+ dim tensor case result = torch.repeat_interleave(t.detach(), 2, dim=-1) if t.dtype.is_floating_point or t.dtype.is_complex: result[..., 1::2] = math.nan else: result[..., 1::2] = 0 strides = list(result.stride()) strides[-1] = strides[-1] * 2 result.set_(result.storage(), result.storage_offset(), t.size(), stride=tuple(strides)) result.requires_grad_(t.requires_grad) return result # TODO: remove this (prefer make_symmetric_matrices below) def random_symmetric_matrix(l, *batches, **kwargs): dtype = kwargs.get('dtype', torch.double) device = kwargs.get('device', 'cpu') A = torch.randn(*(batches + (l, l)), dtype=dtype, device=device) A = (A + A.mT).div_(2) return A # Creates a symmetric matrix or batch of symmetric matrices # Shape must be a square matrix or batch of square matrices def make_symmetric_matrices(*shape, device, dtype): assert shape[-1] == shape[-2] t = make_tensor(shape, device=device, dtype=dtype) t = (t + t.mT).div_(2) return t def random_hermitian_matrix(l, *batches, **kwargs): dtype = kwargs.get('dtype', torch.double) device = kwargs.get('device', 'cpu') A = torch.randn(*(batches + (l, l)), dtype=dtype, device=device) A = (A + A.mH).div_(2) return A def random_symmetric_psd_matrix(l, *batches, **kwargs): """ Returns a batch of random symmetric positive-semi-definite matrices. The shape of the result is batch_dims + (matrix_size, matrix_size) The following example creates a tensor of size 2 x 4 x 3 x 3 >>> matrices = random_symmetric_psd_matrix(3, 2, 4, dtype=dtype, device=device) """ dtype = kwargs.get('dtype', torch.double) device = kwargs.get('device', 'cpu') A = torch.randn(*(batches + (l, l)), dtype=dtype, device=device) return A @ A.mT def random_hermitian_psd_matrix(matrix_size, *batch_dims, dtype=torch.double, device='cpu'): """ Returns a batch of random Hermitian positive-semi-definite matrices. The shape of the result is batch_dims + (matrix_size, matrix_size) The following example creates a tensor of size 2 x 4 x 3 x 3 >>> matrices = random_hermitian_psd_matrix(3, 2, 4, dtype=dtype, device=device) """ A = torch.randn(*(batch_dims + (matrix_size, matrix_size)), dtype=dtype, device=device) return A @ A.mH # TODO: remove this (prefer make_symmetric_pd_matrices below) def random_symmetric_pd_matrix(matrix_size, *batch_dims, **kwargs): dtype = kwargs.get('dtype', torch.double) device = kwargs.get('device', 'cpu') A = torch.randn(*(batch_dims + (matrix_size, matrix_size)), dtype=dtype, device=device) return torch.matmul(A, A.mT) \ + torch.eye(matrix_size, dtype=dtype, device=device) * 1e-5 # Creates a symmetric positive-definite matrix or batch of # such matrices def make_symmetric_pd_matrices(*shape, device, dtype): assert shape[-1] == shape[-2] t = make_tensor(shape, device=device, dtype=dtype) i = torch.eye(shape[-1], device=device, dtype=dtype) * 1e-5 return t @ t.mT + i def random_hermitian_pd_matrix(matrix_size, *batch_dims, dtype, device): """ Returns a batch of random Hermitian positive-definite matrices. The shape of the result is batch_dims + (matrix_size, matrix_size) The following example creates a tensor of size 2 x 4 x 3 x 3 >>> matrices = random_hermitian_pd_matrix(3, 2, 4, dtype=dtype, device=device) """ A = torch.randn(*(batch_dims + (matrix_size, matrix_size)), dtype=dtype, device=device) return A @ A.mH + torch.eye(matrix_size, dtype=dtype, device=device) # TODO: remove this (prefer make_fullrank_matrices_with_distinct_singular_values below) def random_fullrank_matrix_distinct_singular_value(matrix_size, *batch_dims, **kwargs): dtype = kwargs.get('dtype', torch.double) device = kwargs.get('device', 'cpu') silent = kwargs.get("silent", False) if silent and not torch._C.has_lapack: return torch.ones(matrix_size, matrix_size, dtype=dtype, device=device) A = torch.randn(batch_dims + (matrix_size, matrix_size), dtype=dtype, device=device) u, _, vh = torch.linalg.svd(A, full_matrices=False) real_dtype = A.real.dtype if A.dtype.is_complex else A.dtype s = torch.arange(1., matrix_size + 1, dtype=real_dtype, device=device).mul_(1.0 / (matrix_size + 1)) return (u * s.to(A.dtype)) @ vh # Creates a full rank matrix with distinct signular values or # a batch of such matrices # Shape must be a square matrix or batch of square matrices def make_fullrank_matrices_with_distinct_singular_values(*shape, device, dtype): assert shape[-1] == shape[-2] t = make_tensor(shape, device=device, dtype=dtype) u, _, vh = torch.linalg.svd(t, full_matrices=False) # TODO: improve the handling of complex tensors here real_dtype = t.real.dtype if t.dtype.is_complex else t.dtype s = torch.arange(1., shape[-1] + 1, dtype=real_dtype, device=device).mul_(1.0 / (shape[-1] + 1)) return (u * s.to(dtype)) @ vh def random_matrix(rows, columns, *batch_dims, **kwargs): """Return rectangular matrix or batches of rectangular matrices. Parameters: dtype - the data type device - the device kind singular - when True, the output will be singular """ dtype = kwargs.get('dtype', torch.double) device = kwargs.get('device', 'cpu') silent = kwargs.get("silent", False) singular = kwargs.get("singular", False) if silent and not torch._C.has_lapack: return torch.ones(rows, columns, dtype=dtype, device=device) A = torch.randn(batch_dims + (rows, columns), dtype=dtype, device=device) u, _, vh = torch.linalg.svd(A, full_matrices=False) k = min(rows, columns) s = torch.linspace(1 / (k + 1), 1, k, dtype=dtype, device=device) if singular: # make matrix singular s[k - 1] = 0 if k > 2: # increase the order of singularity so that the pivoting # in LU factorization will be non-trivial s[0] = 0 return (u * s.unsqueeze(-2)) @ vh def random_lowrank_matrix(rank, rows, columns, *batch_dims, **kwargs): """Return rectangular matrix or batches of rectangular matrices with given rank. """ B = random_matrix(rows, rank, *batch_dims, **kwargs) C = random_matrix(rank, columns, *batch_dims, **kwargs) return B.matmul(C) def random_sparse_matrix(rows, columns, density=0.01, **kwargs): """Return rectangular random sparse matrix within given density. The density of the result approaches to given density as the size of the matrix is increased and a relatively small value of density is specified but higher than min(rows, columns)/(rows * columns) for non-singular matrices. """ dtype = kwargs.get('dtype', torch.double) device = kwargs.get('device', 'cpu') singular = kwargs.get("singular", False) k = min(rows, columns) nonzero_elements = max(min(rows, columns), int(rows * columns * density)) row_indices = [i % rows for i in range(nonzero_elements)] column_indices = [i % columns for i in range(nonzero_elements)] random.shuffle(column_indices) indices = [row_indices, column_indices] values = torch.randn(nonzero_elements, dtype=dtype, device=device) # ensure that the diagonal dominates values *= torch.tensor([-float(i - j)**2 for i, j in zip(*indices)], dtype=dtype, device=device).exp() indices_tensor = torch.tensor(indices) A = torch.sparse_coo_tensor(indices_tensor, values, (rows, columns), device=device) return A.coalesce() def random_sparse_pd_matrix(matrix_size, density=0.01, **kwargs): """Return random sparse positive-definite matrix with given density. The eigenvalues of the matrix are defined as:: arange(1, matrix_size+1)/matrix_size Algorithm: A = diag(arange(1, matrix_size+1)/matrix_size) while <A density is smaller than required>: <choose random i, j in range(matrix_size), theta in [0, 2*pi]> R = <rotation matrix (i,j,theta)> A = R^T A R """ import math torch = kwargs.get('torch', globals()['torch']) dtype = kwargs.get('dtype', torch.double) device = kwargs.get('device', 'cpu') data = dict([((i, i), float(i + 1) / matrix_size) for i in range(matrix_size)]) def multiply(data, N, i, j, cs, sn, left=True): for k in range(N): if left: ik, jk = (k, i), (k, j) else: ik, jk = (i, k), (j, k) aik, ajk = data.get(ik, 0), data.get(jk, 0) aik, ajk = cs * aik + sn * ajk, -sn * aik + cs * ajk if aik: data[ik] = aik else: data.pop(ik, None) if ajk: data[jk] = ajk else: data.pop(jk, None) target_nnz = density * matrix_size * matrix_size while len(data) < target_nnz: i = random.randint(0, matrix_size - 1) j = random.randint(0, matrix_size - 1) if i != j: theta = random.uniform(0, 2 * math.pi) cs = math.cos(theta) sn = math.sin(theta) multiply(data, matrix_size, i, j, cs, sn, left=True) multiply(data, matrix_size, i, j, cs, sn, left=False) icoords, jcoords, values = [], [], [] for (i, j), v in sorted(data.items()): icoords.append(i) jcoords.append(j) values.append(v) indices_tensor = torch.tensor([icoords, jcoords]) return torch.sparse_coo_tensor(indices_tensor, values, (matrix_size, matrix_size), dtype=dtype, device=device) def do_test_dtypes(self, dtypes, layout, device): for dtype in dtypes: if dtype != torch.float16: out = torch.zeros((2, 3), dtype=dtype, layout=layout, device=device) self.assertIs(dtype, out.dtype) self.assertIs(layout, out.layout) self.assertEqual(device, out.device) def do_test_empty_full(self, dtypes, layout, device): shape = torch.Size([2, 3]) def check_value(tensor, dtype, layout, device, value, requires_grad): self.assertEqual(shape, tensor.shape) self.assertIs(dtype, tensor.dtype) self.assertIs(layout, tensor.layout) self.assertEqual(tensor.requires_grad, requires_grad) if tensor.is_cuda and device is not None: self.assertEqual(device, tensor.device) if value is not None: fill = tensor.new(shape).fill_(value) self.assertEqual(tensor, fill) def get_int64_dtype(dtype): module = '.'.join(str(dtype).split('.')[1:-1]) if not module: return torch.int64 return operator.attrgetter(module)(torch).int64 default_dtype = torch.get_default_dtype() check_value(torch.empty(shape), default_dtype, torch.strided, -1, None, False) check_value(torch.full(shape, -5.), default_dtype, torch.strided, -1, None, False) for dtype in dtypes: for rg in {dtype.is_floating_point, False}: int64_dtype = get_int64_dtype(dtype) v = torch.empty(shape, dtype=dtype, device=device, layout=layout, requires_grad=rg) check_value(v, dtype, layout, device, None, rg) out = v.new() check_value(torch.empty(shape, out=out, device=device, layout=layout, requires_grad=rg), dtype, layout, device, None, rg) check_value(v.new_empty(shape), dtype, layout, device, None, False) check_value(v.new_empty(shape, dtype=int64_dtype, device=device, requires_grad=False), int64_dtype, layout, device, None, False) check_value(torch.empty_like(v), dtype, layout, device, None, False) check_value(torch.empty_like(v, dtype=int64_dtype, layout=layout, device=device, requires_grad=False), int64_dtype, layout, device, None, False) if dtype is not torch.float16 and layout != torch.sparse_coo: fv = 3 v = torch.full(shape, fv, dtype=dtype, layout=layout, device=device, requires_grad=rg) check_value(v, dtype, layout, device, fv, rg) check_value(v.new_full(shape, fv + 1), dtype, layout, device, fv + 1, False) out = v.new() check_value(torch.full(shape, fv + 2, out=out, device=device, layout=layout, requires_grad=rg), dtype, layout, device, fv + 2, rg) check_value(v.new_full(shape, fv + 3, dtype=int64_dtype, device=device, requires_grad=False), int64_dtype, layout, device, fv + 3, False) check_value(torch.full_like(v, fv + 4), dtype, layout, device, fv + 4, False) check_value(torch.full_like(v, fv + 5, dtype=int64_dtype, layout=layout, device=device, requires_grad=False), int64_dtype, layout, device, fv + 5, False) # this helper method is to recursively # clone the tensor-type input of operators tested by OpInfo def clone_input_helper(input): if isinstance(input, torch.Tensor): return torch.clone(input) if isinstance(input, Sequence): return tuple(map(clone_input_helper, input)) return input THESE_TAKE_WAY_TOO_LONG = { 'test_Conv3d_groups', 'test_conv_double_backward', 'test_conv_double_backward_groups', 'test_Conv3d_dilated', 'test_Conv3d_stride_padding', 'test_Conv3d_dilated_strided', 'test_Conv3d', 'test_Conv2d_dilated', 'test_ConvTranspose3d_dilated', 'test_ConvTranspose2d_dilated', 'test_snli', 'test_Conv2d', 'test_Conv2d_padding', 'test_ConvTranspose2d_no_bias', 'test_ConvTranspose2d', 'test_ConvTranspose3d', 'test_Conv2d_no_bias', 'test_matmul_4d_4d', 'test_multinomial_invalid_probs', } running_script_path = None def set_running_script_path(): global running_script_path try: running_file = os.path.abspath(os.path.realpath(sys.argv[0])) if running_file.endswith('.py'): # skip if the running file is not a script running_script_path = running_file except Exception: pass def check_test_defined_in_running_script(test_case): if running_script_path is None: return test_case_class_file = os.path.abspath(os.path.realpath(inspect.getfile(test_case.__class__))) assert test_case_class_file == running_script_path, "Class of loaded TestCase \"{}\" " \ "is not defined in the running script \"{}\", but in \"{}\". Did you " \ "accidentally import a unittest.TestCase from another file?".format( test_case.id(), running_script_path, test_case_class_file) def load_tests(loader, tests, pattern): set_running_script_path() test_suite = unittest.TestSuite() for test_group in tests: for test in test_group: check_test_defined_in_running_script(test) test_suite.addTest(test) return test_suite class BytesIOContext(io.BytesIO): def __enter__(self): return self def __exit__(self, *args): pass # Tentative value for nondet_tol for gradcheck when backward implementation # relies on nondeterministic operations, i.e., those listed here: # https://pytorch.org/docs/stable/generated/torch.use_deterministic_algorithms.html # # For more information see https://github.com/pytorch/pytorch/issues/56202 GRADCHECK_NONDET_TOL = 1e-12 def gradcheck(fn, inputs, **kwargs): # Wrapper around gradcheck that enables certain keys by default. # Use this testing-internal gradcheck instead of autograd.gradcheck so that new features like vmap and # forward-mode AD are tested by default. We create this wrapper because we'd like to keep new checks # to be disabled to default for the public-facing api to avoid breaking user code. # # All PyTorch devs doing testing should use this wrapper instead of autograd.gradcheck. default_values = { "check_batched_grad": True, "fast_mode": True, } if os.environ.get('PYTORCH_TEST_WITH_SLOW_GRADCHECK', "0FF") == "ON": default_values["fast_mode"] = False for key, value in default_values.items(): # default value override values explicitly set to None k = kwargs.get(key, None) kwargs[key] = k if k is not None else value return torch.autograd.gradcheck(fn, inputs, **kwargs) def gradgradcheck(fn, inputs, grad_outputs=None, **kwargs): # Wrapper around gradgradcheck that enables certain keys by default # See gradcheck above for an explanation of why we need something like this. # # All PyTorch devs doing testing should use this wrapper instead of autograd.gradgradcheck default_values = { "check_batched_grad": True, "fast_mode": True, } if os.environ.get('PYTORCH_TEST_WITH_SLOW_GRADCHECK', "0FF") == "ON": default_values["fast_mode"] = False for key, value in default_values.items(): # default value override values explicitly set to None k = kwargs.get(key, None) kwargs[key] = k if k is not None else value return torch.autograd.gradgradcheck(fn, inputs, grad_outputs, **kwargs) def _assertGradAndGradgradChecks(test_case, apply_fn, inputs, **kwargs): # call assert function rather than returning a bool since it's nicer # if we get whether this failed on the gradcheck or the gradgradcheck. test_case.assertTrue(gradcheck(apply_fn, inputs, **kwargs)) test_case.assertTrue(gradgradcheck(apply_fn, inputs, **kwargs)) @contextmanager def set_cwd(path: str) -> Iterator[None]: old_cwd = os.getcwd() try: os.chdir(path) yield finally: os.chdir(old_cwd) # Using @precisionOverride specific to your test is the recommended way # of doing this. These are just some values that worked for test_nn. dtype2prec_DONTUSE = {torch.float: 1e-5, torch.double: 1e-5, torch.half: 1e-2, torch.bfloat16: 1e-1} def _wrap_warn_once(regex): def decorator(fn): def inner(self, *args, **kwargs): with self.assertWarnsOnceRegex(UserWarning, regex): fn(self, *args, **kwargs) return inner return decorator # This is a wrapper that wraps a test to run this test twice, one with # coalesced=True, another with coalesced=False for coalesced/uncoalesced sparse tensors. def coalescedonoff(f): @wraps(f) def wrapped(self, *args, **kwargs): f(self, *args, **kwargs, coalesced=True) f(self, *args, **kwargs, coalesced=False) return wrapped @contextlib.contextmanager def disable_gc(): if gc.isenabled(): try: gc.disable() yield finally: gc.enable() else: yield def find_library_location(lib_name: str) -> Path: # return the shared library file in the installed folder if exist, # else the file in the build folder torch_root = Path(torch.__file__).resolve().parent path = torch_root / 'lib' / lib_name if os.path.exists(path): return path torch_root = Path(__file__).resolve().parent.parent.parent return torch_root / 'build' / 'lib' / lib_name def sandcastle_skip(reason): """ Similar to unittest.skip, however in the sandcastle environment it just "passes" the test instead to avoid creating tasks complaining about tests skipping continuously. """ def decorator(func): if not IS_SANDCASTLE: func.__unittest_skip__ = True func.__unittest_skip_why__ = reason return func @wraps(func) def wrapper(*args, **kwargs): print(f'Skipping {func.__name__} on sandcastle for following reason: {reason}', file=sys.stderr) return return wrapper return decorator def mock_wrapper(method): """ Returns a function that calls the real implementation of a method in addition to passing args to a mock object. """ mock = MagicMock() @wraps(method) def wrapper(self, *args, **kwargs): mock(*args, **kwargs) return method(self, *args, **kwargs) wrapper.mock = mock # type: ignore[attr-defined] return wrapper def get_tensors_from(args, kwargs): """ Returns a set of all Tensor objects in the given args and kwargs. """ return set([arg for arg in args if isinstance(arg, Tensor)] + [v for v in kwargs.values() if isinstance(v, Tensor)]) # Returns scalar tensor representation of a list of integer byte values def bytes_to_scalar(byte_list: List[int], dtype: torch.dtype, device: torch.device): dtype_to_ctype: Dict[torch.dtype, Any] = { torch.int8: ctypes.c_int8, torch.uint8: ctypes.c_uint8, torch.int16: ctypes.c_int16, torch.int32: ctypes.c_int32, torch.int64: ctypes.c_int64, torch.bool: ctypes.c_bool, torch.float32: ctypes.c_float, torch.complex64: ctypes.c_float, torch.float64: ctypes.c_double, torch.complex128: ctypes.c_double, } ctype = dtype_to_ctype[dtype] num_bytes = ctypes.sizeof(ctype) def check_bytes(byte_list): for byte in byte_list: assert 0 <= byte <= 255 if dtype.is_complex: assert len(byte_list) == (num_bytes * 2) check_bytes(byte_list) real = ctype.from_buffer((ctypes.c_byte * num_bytes)( *byte_list[:num_bytes])).value imag = ctype.from_buffer((ctypes.c_byte * num_bytes)( *byte_list[num_bytes:])).value res = real + 1j * imag else: assert len(byte_list) == num_bytes check_bytes(byte_list) res = ctype.from_buffer((ctypes.c_byte * num_bytes)( *byte_list)).value return torch.tensor(res, device=device, dtype=dtype) def has_breakpad(): # We always build with breakpad in CI if IS_IN_CI: return True # If not on a special build, check that the library was actually linked in try: torch._C._get_minidump_directory() # type: ignore[attr-defined] return True except RuntimeError as e: if "Minidump handler is uninintialized" in str(e): return True return False def sandcastle_skip_if(condition, reason): """ Similar to unittest.skipIf, however in the sandcastle environment it just "passes" the test instead to avoid creating tasks complaining about tests skipping continuously. """ def decorator(func): if not IS_SANDCASTLE and condition: func.__unittest_skip__ = True func.__unittest_skip_why__ = reason return func @wraps(func) def wrapper(*args, **kwargs): if condition and IS_SANDCASTLE: print(f'Skipping {func.__name__} on sandcastle for following reason: {reason}', file=sys.stderr) return else: return func(*args, **kwargs) return wrapper return decorator def dtype_name(dtype): """ Returns the pretty name of the dtype (e.g. torch.int64 -> int64). """ return str(dtype).split('.')[1] def set_single_threaded_if_parallel_tbb(fn): """Set test to be single threaded for parallel tbb. See https://github.com/pytorch/pytorch/issues/64571#issuecomment-914691883 """ if not IS_TBB: return fn @wraps(fn) def wrap_fn(*args, **kwargs): num_threads = torch.get_num_threads() torch.set_num_threads(1) try: return fn(*args, **kwargs) finally: torch.set_num_threads(num_threads) return wrap_fn @functools.lru_cache() def get_cycles_per_ms() -> float: """Measure and return approximate number of cycles per millisecond for torch.cuda._sleep """ def measure() -> float: start = torch.cuda.Event(enable_timing=True) end = torch.cuda.Event(enable_timing=True) start.record() torch.cuda._sleep(1000000) end.record() end.synchronize() cycles_per_ms = 1000000 / start.elapsed_time(end) return cycles_per_ms # Get 10 values and remove the 2 max and 2 min and return the avg. # This is to avoid system disturbance that skew the results, e.g. # the very first cuda call likely does a bunch of init, which takes # much longer than subsequent calls. # # Tested on both Tesla V100, Quadro GP100, Titan RTX, RTX 3090 GPUs # and seems to return stable values. Therefore, we enable caching # using lru_cache decorator above. num = 10 vals = [] for _ in range(num): vals.append(measure()) vals = sorted(vals) return mean(vals[2 : num - 2]) T = TypeVar('T') def first_sample(self: unittest.TestCase, samples: Iterable[T]) -> T: """ Returns the first sample from an iterable of samples, like those returned by OpInfo. The test will be skipped if no samples are available. """ try: return next(iter(samples)) except StopIteration: raise unittest.SkipTest('Skipped! Need at least 1 sample input')
import torch import torch.nn.functional as F from torch import nn, einsum from einops import rearrange, repeat from einops.layers.torch import Rearrange, Reduce # constants ABS_MAX_STEPS = 100 # helper functions def exists(val): return val is not None # classes class PreNorm(nn.Module): def __init__(self, dim, fn): super().__init__() self.fn = fn self.norm = nn.LayerNorm(dim) def forward(self, x, **kwargs): x = self.norm(x) return self.fn(x, **kwargs) def FeedForward(dim, mult = 4): return nn.Sequential( nn.Linear(dim, dim * mult), nn.GELU(), nn.Linear(dim * mult, dim) ) class Attention(nn.Module): def __init__( self, *, dim, dim_head = 64, heads = 8, causal = False ): super().__init__() self.heads = heads self.causal = causal self.scale = dim_head ** -0.5 inner_dim = dim_head * heads self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False) self.to_out = nn.Linear(inner_dim, dim) def forward(self, x, mask = None): n, h, device = x.shape[1], self.heads, x.device qkv = self.to_qkv(x).chunk(3, dim = -1) q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), qkv) sim = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale mask_value = -torch.finfo(sim.dtype).max if exists(mask): mask = rearrange(mask, 'b i -> b () i ()') * rearrange(mask, 'b j -> b () () j') sim = sim.masked_fill(mask, mask_value) if self.causal: i, j = sim.shape[-2:] causal_mask = torch.ones((i, j), device = device).triu(j - i + 1).bool() sim = sim.masked_fill(causal_mask, mask_value) attn = sim.softmax(dim = -1) out = einsum('b h i j, b h j d -> b h i d', attn, v) out = rearrange(out, 'b h n d -> b n (h d)') return self.to_out(out) # pondering classes and helper functions def pad_to(t, padding, dim = -1, value = 0.): if dim > 0: dim = dim - t.ndim zeroes = -dim - 1 return F.pad(t, (*((0, 0) * zeroes), *padding), value = value) def safe_cumprod(t, eps = 1e-10, dim = -1): t = torch.clip(t, min = eps, max = 1.) return torch.exp(torch.cumsum(torch.log(t), dim = dim)) def exclusive_cumprod(t, dim = -1): cum_prod = safe_cumprod(t, dim = dim) return pad_to(cum_prod, (1, -1), value = 1., dim = dim) def calc_geometric(l, dim = -1): return exclusive_cumprod(1 - l, dim = dim) * l # main class class Block(nn.Module): def __init__( self, *, dim, dim_head = 64, heads = 8, causal = False, ff_mult = 4 ): super().__init__() self.causal = causal self.attn = PreNorm(dim, Attention(dim = dim, dim_head = dim_head, heads = heads, causal = causal)) self.ff = PreNorm(dim, FeedForward(dim = dim, mult = ff_mult)) self.to_halt_logits = nn.Sequential( nn.Linear(dim, 1), Rearrange('... () -> ...') ) def forward(self, x, mask = None): x = self.attn(x, mask = mask) + x x = self.ff(x) + x if self.causal: denom = torch.arange(x.shape[-2], device = x.device) denom = rearrange(denom, 'n -> () n ()') halt_input = x.cumsum(dim = 1) / (denom + 1) else: halt_input = x.mean(dim = 1) halt_logits = self.to_halt_logits(halt_input) return x, halt_logits class PonderTransformer(nn.Module): def __init__( self, *, num_tokens, dim, max_seq_len, causal = True, dim_head = 64, heads = 8, ponder_kl_div_loss_weight = 0.01, ponder_lambda_p = 0.2, ponder_epsilon = 0.05, eps = 1e-20 ): super().__init__() self.eps = eps self.causal = causal self.seq_len = max_seq_len self.token_emb = nn.Embedding(num_tokens, dim) self.pos_emb = nn.Embedding(max_seq_len, dim) # calculate max steps thres = 1 - ponder_epsilon halt_probs = calc_geometric(torch.full((ABS_MAX_STEPS,), ponder_lambda_p)) cum_halt_probs = halt_probs.cumsum(dim = 0) self.train_max_steps = (cum_halt_probs < thres).sum().item() self.ponder_lambda_p = ponder_lambda_p self.ponder_kl_div_loss_weight = ponder_kl_div_loss_weight # pondering block self.block = Block( dim = dim, dim_head = dim_head, heads = heads, causal = causal ) # hidden state to 'y' - output self.to_logits = nn.Sequential( nn.LayerNorm(dim), nn.Linear(dim, num_tokens) ) def forward(self, x, *, labels = None, mask = None): n, device, eps, max_steps, causal = x.shape[1], x.device, self.eps, self.train_max_steps, self.causal x = self.token_emb(x) pos_emb = self.pos_emb(torch.arange(n, device = device)) x = x + rearrange(pos_emb, 'n d -> () n d') if self.training: assert exists(labels), 'labels must be passed in during training' hiddens = [] halting_logits = [] # training mode for _ in range(max_steps): x, halt_logits = self.block(x) hiddens.append(x) halting_logits.append(halt_logits) # stack halting probs (lambda) and y halting_logits = torch.stack(halting_logits, dim = 1) halting_probs = calc_geometric(halting_logits.sigmoid(), dim = 1) hiddens = torch.stack(hiddens, dim = 1) logits = self.to_logits(hiddens) # calculate kl div with geometric prior geometric_dist = calc_geometric(torch.full((max_steps,), self.ponder_lambda_p, device = device)) if self.causal: geometric_dist = repeat(geometric_dist, 'l -> (l n)', n = n) halting_probs = rearrange(halting_probs, '... l n -> ... (l n)') kl_div_loss = F.kl_div( torch.log(geometric_dist + eps), halting_probs, None, None, 'batchmean' ) # calculate cross entropy loss labels = repeat(labels, 'b n -> b (l n)', l = max_steps) logits = rearrange(logits, 'b l n d -> b d (l n)') ce_loss = F.cross_entropy(logits, labels, ignore_index = 0) weighted_ce_loss = ce_loss * halting_probs # sum loss loss = weighted_ce_loss.mean() + self.ponder_kl_div_loss_weight * kl_div_loss.mean() return loss else: # evaluation mode hiddens = [] halting_logits = [] layer_halt = [] for i in range(self.train_max_steps): is_last = i == (self.train_max_steps - 1) x, halt_logits = self.block(x) hiddens.append(x) if self.causal: halt_logits = halt_logits[..., -1] halting_logits.append(halt_logits) # calculating halting probs halting_probs = torch.stack(halting_logits, dim = 1).sigmoid() p = calc_geometric(halting_probs, dim = 1)[:, -1] should_halt = torch.rand_like(p) <= p # stack the halting signal across layers and determine whether to stop early layer_halt.append(should_halt) # do not exit early if it is the last one if is_last: continue # break if halting has been sampled for all layers layer_was_halted = torch.any(torch.stack(layer_halt), dim = 0) if torch.all(layer_was_halted): break # calculate max number of layers max_num_layers = len(layer_halt) # stack the hiddens and the boolean tensor indicating halting for each layer hiddens = torch.stack(hiddens, dim = 1) layer_halt = torch.stack(layer_halt, dim = 1) # calculate the index of the first halt signal, and make it the last layer if none of them halted halt_layer_indices = (layer_halt.cumsum(dim = 1) == 0).sum(dim = 1).clamp(max = max_num_layers - 1) # select out the correct hidden layers to logits halt_layer_indices_expanded = repeat(halt_layer_indices, 'b -> b () n d', n = hiddens.shape[-2], d = hiddens.shape[-1]) hiddens = hiddens.gather(1, halt_layer_indices_expanded) hiddens = rearrange(hiddens, 'b () n d -> b n d') return self.to_logits(hiddens), halt_layer_indices
const service = require("../service"); module.exports = (app) => { app.get("/stats", async (req, res) => { try { const minerCount = await service.client.count(); const requestedCount = await service.pow.requestedCount(); const completedCount = await service.pow.completedCount(); res.json({ minerCount, requestedCount, completedCount }); } catch (err) { res.status(400).json({ success: "not ok", error: err.message, }); } }); };
const urljoin = require("url-join"); const path = require("path"); const config = require("./data/SiteConfig"); module.exports = { pathPrefix: config.pathPrefix === "" ? "/" : config.pathPrefix, siteMetadata: { siteUrl: urljoin(config.siteUrl, config.pathPrefix), rssMetadata: { site_url: urljoin(config.siteUrl, config.pathPrefix), feed_url: urljoin(config.siteUrl, config.pathPrefix, config.siteRss), title: config.siteTitle, description: config.siteDescription, image_url: `${urljoin( config.siteUrl, config.pathPrefix )}/logos/logo-512.png`, copyright: config.copyright, }, }, plugins: [ "gatsby-plugin-react-helmet", "gatsby-plugin-lodash", "gatsby-plugin-styled-components", { resolve: "gatsby-source-filesystem", options: { name: "assets", path: `${__dirname}/static/`, }, }, { resolve: "gatsby-source-filesystem", options: { name: "posts", path: `${__dirname}/content/`, }, }, { resolve: "gatsby-transformer-remark", options: { plugins: [ { resolve: `gatsby-remark-relative-images`, }, { resolve: "gatsby-remark-images", options: { maxWidth: 690, }, }, { resolve: "gatsby-remark-responsive-iframe", }, "gatsby-remark-copy-linked-files", "gatsby-remark-autolink-headers", "gatsby-remark-prismjs", ], }, }, { resolve: "gatsby-plugin-google-analytics", options: { trackingId: config.googleAnalyticsID, }, }, { resolve: "gatsby-plugin-nprogress", options: { color: config.themeColor, }, }, "gatsby-plugin-sharp", "gatsby-transformer-sharp", "gatsby-plugin-catch-links", "gatsby-plugin-twitter", "gatsby-plugin-sitemap", { resolve: "gatsby-plugin-manifest", options: { name: config.siteTitle, short_name: config.siteTitleShort, description: config.siteDescription, start_url: config.pathPrefix, background_color: config.backgroundColor, theme_color: config.themeColor, display: "minimal-ui", icons: [ { src: "/logos/logo-192.png", sizes: "192x192", type: "image/png", }, { src: "/logos/logo-512.png", sizes: "512x512", type: "image/png", }, ], }, }, "gatsby-plugin-offline", { resolve: "gatsby-plugin-netlify-cms", options: { modulePath: path.resolve("src/netlifycms/index.js"), // default: undefined enableIdentityWidget: true, publicPath: "admin", htmlTitle: "Content Manager", includeRobots: false, }, }, { resolve: "gatsby-plugin-feed", options: { setup(ref) { const ret = ref.query.site.siteMetadata.rssMetadata; ret.allMarkdownRemark = ref.query.allMarkdownRemark; ret.generator = "ACZ"; return ret; }, query: ` { site { siteMetadata { rssMetadata { site_url feed_url title description image_url copyright } } } } `, feeds: [ { serialize(ctx) { const { rssMetadata } = ctx.query.site.siteMetadata; return ctx.query.allMarkdownRemark.edges.map((edge) => ({ categories: edge.node.frontmatter.tags, date: edge.node.fields.date, title: edge.node.frontmatter.title, description: edge.node.excerpt, url: rssMetadata.site_url + edge.node.fields.slug, guid: rssMetadata.site_url + edge.node.fields.slug, custom_elements: [ { "content:encoded": edge.node.html }, { author: config.userEmail }, ], })); }, query: ` { allMarkdownRemark( limit: 1000, sort: { order: DESC, fields: [fields___date] }, ) { edges { node { excerpt html timeToRead fields { slug date } frontmatter { title cover date category tags } } } } } `, output: config.siteRss, title: config.siteRssTitle, }, ], }, }, ], };
/* * $QNXLicenseC: * Copyright 2007, 2008 QNX Software Systems. * * Licensed under the Apache License, Version 2.0 (the "License"). You * may not reproduce, modify or distribute this software except in * compliance with the License. You may obtain a copy of the License * at: http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTIES OF ANY KIND, either express or implied. * * This file may contain contributions from others, either as * contributors under the License or as licensors under other terms. * Please review this entire file for other proprietary rights or license * notices, as well as the QNX Development Suite License Guide at * http://licensing.qnx.com/license-guide/ for other information. * $ */ // PS2 controller API specification #ifndef _PS2CONTR_H #define _PS2CONTR_H #include <stdlib.h> #include <stdio.h> #define START_PULSE_CODE (100) #define PROCES_PRTY (15) #define PROMOTE_PRTY (5) //#define TRY_LIMIT (1000000) //#define TIME_DELAY (10000) //#define MAX_RESEND (100) #define TRY_LIMIT (10000) #define MOUSE_TRY_LIMIT (100000) #define TIME_DELAY (10000) #define MAX_RESEND (10) #define INTR_ENABLE (1) #define INTR_DISABLE (0) // Command targets #define SEND_DEVICE (1) #define SEND_CONTR (2) // Delay after OUT ? #define DO_DELAY (1) #define NO_DELAY (0) // Resend // #define RESEND_DELAY (5) // Delay if device requests resend // Typical PS2 device and controller replies #define ACK (0xfa) /* ack code */ #define RESEND (0xfe) /* resend */ #define SELF_TEST_OK (0x55) /* self test OK */ // Status register bits #define OUT_BUFF_FULL (0x01) /* output register is full */ #define IN_BUFF_FULL (0x02) /* controller is ready to accept new byte */ #define INOUT_BUFF_FULL (0x03) /* combination of two previous flags */ #define AUX_DATA_COME (0x20) /* data comes from AUX device */ #define PARITY_ERROR (0x80) /* parity error */ // Controller command byte bits #define KBD_INTR_ENABLED (0x01) /* Enable interrupt fro the keyboard */ #define AUX_INTR_ENABLED (0x02) /* Enable interrupt fro the mouse */ #define KBD_DISABLED (0x10) /* Keybord interface disabled */ #define AUX_DISABLED (0x20) /* AUX interface disabled */ #define KBD_TRANS_MODE (0x40) /* Keyboard is in translate mode */ // Controller commands #define AUX_MAGIC (0xd4) /* tells kbd ctrler next byte for aux */ #define KBD_ENABLE (0xae) /* enable keyboard */ #define KBD_DISABLE (0xad) /* disable keyboard */ #define AUX_ENABLE (0xa8) /* enable the aux device(s) */ #define AUX_DISABLE (0xa7) /* disable the aux device(s) */ #define SELF_TEST (0xaa) /* controller self test */ #define GET_CMD_BYTE (0x20) /* get the controller command byte */ #define SET_CMD_BYTE (0x60) /* write the controller command byte */ #define DATA_DEF_PORT (0x60) /* controller default ioport */ #define STATUS_DEF_PORT (0x64) /* contrller default status port */ #define KBD_DEF_IRQ (1) /* default keyboard irq */ #define AUX_DEF_IRQ (12) /* default aux irq */ #define MAX_DEVICES (10) #define MAX_EVENTS (100) #define NO_PULSE_ASSIGNED (-1) // For 8042 #define DEF_KBD_PORT (0) #define DEF_AUX_PORT (1) typedef struct _data { #define INIT_DONE (0x01) #define MULTIPLEX_MODE (0x02) #define DO_SELF_TEST (0x04) #define HAVE_CMD_BYTE (0x08) int flags; /* interrupt handler local flags */ int irq_base; /* keyboard IRQ */ int irq_base_id; /* keyboard IRQ id (returned by InterruptAttach) */ int irq_aux; /* auxiliary IRQ */ int irq_aux_id; /* auxiliary IRQ id (returned by InterruptAttach) */ int time_delay; /* atomic timeout for replies */ int cmd_byte; /* command byte */ int nPorts; /* number of available ports */ int nDataPort; /* data i/o port number */ int nStatPort; /* status i/o port number */ int nDataPort_id; /* mapped data i/o port number */ int nStatPort_id; /* mapped status i/o port number */ check_data *f_chdat; /* callback function to check if data comes */ get_switch_cmd *f_getsw; /* callback function to get controller switch command */ validate_data *f_valid; /* callback function that validates data and */ /* determines logical port number */ enable_port_cmd *f_encmd; /* callback function that returns port enable command */ /* byte */ switch_mode *f_swmode; /* callback function that returns port switch command */ /* byte */ dispatch_t *dpp; int nConnID; /* connection ID */ int ncoids[MAX_DEVICES]; /* connection ID */ int nPulseID[MAX_DEVICES]; /* Pulse IDs (from pulse_attach) */ int nPriority[MAX_DEVICES]; /* Pulse IDs (from pulse_attach) */ struct sigevent ev[MAX_EVENTS]; /* Events pool */ unsigned char inData[MAX_EVENTS]; /* Data exchange buffer (between Interrupt handler */ /* and pulse processing program */ } data, *pData; // This code makes sence for Solution Engine only #ifdef VARIANT_sengine static inline uint8_t sengine_in8 (uintptr_t addr) { return in16 (addr) >> 8; } static inline void sengine_out8 (uintptr_t addr, uint8_t val) { return out16 (addr, val << 8); } #define in8 sengine_in8 #define out8 sengine_out8 #endif // Local service functions int drain_input (pData pd); int drain_output (pData pd); void toggle_devices (pData pd, int bEnable); void toggle_interrupt (pData pd, int bEnable); int get_command_byte (pData pd, unsigned char *cmd_byte); int get_device_poll (pData pd, int nPortNum, unsigned char *c); int read_data_sync (pData pd, int nPortNum, unsigned char *buf, int nLen); void set_command_byte (pData pd, unsigned char c); int send_device (pData pd, unsigned char cmd, int dest, int bDelay); void select_target (pData pd, int nPortNum); int drain_input (pData pd); int drain_output (pData pd); int self_test (pData pd); void clean_irq (pData pd, int nIrq_id); // Default callbacks int f_check_data (int nPortNum, unsigned char nStatus); int f_get_switch_cmd (int nPortNum); int f_validate_data (int nStatus, int nIrqId); int f_enable_port_cmd (int nPortNum, unsigned char cmd); int f_switch_mode (int nNewMode); // Interrupt handler prototype const struct sigevent *contr_intr (void *area, int id); #endif // _PS2CONTR_H __SRCVERSION( "$URL: http://svn/product/tags/public/bsp/nto650/ATMEL-AT91SAM9G45-EKES-650/latest/src/hardware/devi/lib/ps2contr.h $ $Rev: 657836 $" )
"use strict"; module.exports = { settings: { "number_of_shards": 2, analysis: { analyzer: { lowercase_only: { type: "custom", char_filter: [], tokenizer: "keyword", filter: ["lowercase"] }, html_snowball: { type: "custom", char_filter: ["html_strip"], tokenizer: "standard", filter: ["lowercase", "stop", "snowball"] } } } }, mappings: { my_thing: { properties: { atom: { type: "long", //include_in_all: false }, atom2: { type: "long", //include_in_all: false }, my_bool: { type: "boolean", //include_in_all: false }, raw_thing: { enabled: false }, some_url: { type: "keyword", //include_in_all: false }, category: { type: "text", analyzer: "snowball", fields: { raw: { type: "keyword" }, lowered: { type: "text", analyzer: "lowercase_only" } } }, condition: { type: "keyword", //include_in_all: false }, created: { type: "date", format: "dateOptionalTime", //include_in_all: false }, name: { type: "text", analyzer: "html_snowball", //include_in_all: true } } } } };
""" Django settings for app project. Generated by 'django-admin startproject' using Django 3.2.2. For more information on this file, see https://docs.djangoproject.com/en/3.2/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/3.2/ref/settings/ """ from pathlib import Path # Build paths inside the project like this: BASE_DIR / 'subdir'. BASE_DIR = Path(__file__).resolve().parent.parent # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'django-insecure-d*$j^+=f@by)ifn@+i=^xu2gf+tlkh!zv5w&29*8bb#q#hmt*2' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'app.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'app.wsgi.application' # Database # https://docs.djangoproject.com/en/3.2/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': BASE_DIR / 'db.sqlite3', } } # Password validation # https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/3.2/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/3.2/howto/static-files/ STATIC_URL = '/static/' # Default primary key field type # https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
import math import tensorflow as tf from networks.parameter import user_params class QAOutputLayer(tf.layers.Layer): def __init__(self, params:user_params,feature_size:int, is_trainning=True, name:str="QAOutputLayer", dtype=tf.float32): super(QAOutputLayer, self).__init__(is_trainning, name, dtype) self.params = params self.feature_size = feature_size self.exp_epsilon = -1e25 self.scale = 1/math.sqrt(2*self.feature_size) def get_output_feature_size(self): return None def build(self, _): self.w_p1 = self.add_variable("w_p1",[2*self.feature_size],regularizer=tf.nn.l2_loss) self.w_p2 = self.add_variable("w_p2",[2*self.feature_size],regularizer=tf.nn.l2_loss) self.built = True def call(self, inputs, **kwargs): m0,m1,m2 = inputs["M0"],inputs["M1"],inputs["M2"] #[batch_size*sent_number,c_seq_len] context_mask = tf.squeeze(inputs["context_mask"],-1) start_context = tf.concat([ m0,m1],axis=-1) #[batch_size,sent_number,c_seq_len] start_factor = tf.reduce_sum(start_context*self.w_p1,axis=-1) # start_factor = tf.where(tf.cast(start_factor,bool),start_factor,tf.ones_like(start_factor)*self.exp_epsilon) start_factor = start_factor* self.scale padding = (1-context_mask)*self.exp_epsilon start_factor = start_factor+padding start_factor = tf.reshape(start_factor,[-1,self.params.c_seq_len]) end_context = tf.concat([ m0,m2],axis=-1) #[batch_size,sent_number,c_seq_len] end_factor = tf.reduce_sum(end_context*self.w_p2,axis=-1) end_factor = end_factor*self.scale #end_factor = tf.where(tf.cast(end_factor,bool),end_factor,tf.ones_like(end_factor)*self.exp_epsilon) end_factor = end_factor+padding end_factor = tf.reshape(end_factor,[-1,self.params.c_seq_len]) return start_factor,end_factor
from plugin.sync.core.enums import SyncData, SyncMode, SyncMedia from plugin.sync.modes.pull.lists.base import Lists import elapsed import logging log = logging.getLogger(__name__) class LikedLists(Lists): data = [SyncData.Liked] mode = SyncMode.Pull @elapsed.clock def run(self): # Retrieve plex sections p_sections, p_sections_map = self.sections() # Retrieve plex playlists p_playlists = dict(self.get_playlists()) # Retrieve trakt lists t_lists = self.trakt[(SyncMedia.Lists, SyncData.Liked)] if t_lists is None: log.warn('Unable to retrieve liked lists') return # Process trakt lists for _, t_list in t_lists.items(): self.process(SyncData.Liked, p_playlists, p_sections_map, t_list) def create_playlist(self, uri, name): # Check if playlist creation is enabled if self.configuration['sync.lists.liked.playlists'] is False: log.info('No playlist found named %r ("Create playlists in plex" not enabled)', name) return None # Create playlist return super(LikedLists, self).create_playlist(uri, name)
# Copyright [yyyy] [name of copyright owner] # Copyright 2021 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """PyTorch SelecSLS Net example for ImageNet Classification License: CC BY 4.0 (https://creativecommons.org/licenses/by/4.0/legalcode) Author: Dushyant Mehta (@mehtadushy) SelecSLS (core) Network Architecture as proposed in "XNect: Real-time Multi-person 3D Human Pose Estimation with a Single RGB Camera, Mehta et al." https://arxiv.org/abs/1907.00837 Based on ResNet implementation in https://github.com/rwightman/pytorch-image-models and SelecSLS Net implementation in https://github.com/mehtadushy/SelecSLS-Pytorch """ from typing import List import torch import torch.nn as nn import torch.nn.functional as F from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from .helpers import build_model_with_cfg from .layers import create_classifier from .registry import register_model __all__ = ['SelecSLS'] # model_registry will add each entrypoint fn to this def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (4, 4), 'crop_pct': 0.875, 'interpolation': 'bilinear', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.0', 'classifier': 'fc', **kwargs } default_cfgs = { 'selecsls42': _cfg( url='', interpolation='bicubic'), 'selecsls42b': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-selecsls/selecsls42b-8af30141.pth', interpolation='bicubic'), 'selecsls60': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-selecsls/selecsls60-bbf87526.pth', interpolation='bicubic'), 'selecsls60b': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-selecsls/selecsls60b-94e619b5.pth', interpolation='bicubic'), 'selecsls84': _cfg( url='', interpolation='bicubic'), } class SequentialList(nn.Sequential): def __init__(self, *args): super(SequentialList, self).__init__(*args) @torch.jit._overload_method # noqa: F811 def forward(self, x): # type: (List[torch.Tensor]) -> (List[torch.Tensor]) pass @torch.jit._overload_method # noqa: F811 def forward(self, x): # type: (torch.Tensor) -> (List[torch.Tensor]) pass def forward(self, x) -> List[torch.Tensor]: for module in self: x = module(x) return x class SelectSeq(nn.Module): def __init__(self, mode='index', index=0): super(SelectSeq, self).__init__() self.mode = mode self.index = index @torch.jit._overload_method # noqa: F811 def forward(self, x): # type: (List[torch.Tensor]) -> (torch.Tensor) pass @torch.jit._overload_method # noqa: F811 def forward(self, x): # type: (Tuple[torch.Tensor]) -> (torch.Tensor) pass def forward(self, x) -> torch.Tensor: if self.mode == 'index': return x[self.index] else: return torch.cat(x, dim=1) def conv_bn(in_chs, out_chs, k=3, stride=1, padding=None, dilation=1): if padding is None: padding = ((stride - 1) + dilation * (k - 1)) // 2 return nn.Sequential( nn.Conv2d(in_chs, out_chs, k, stride, padding=padding, dilation=dilation, bias=False), nn.BatchNorm2d(out_chs), nn.ReLU(inplace=True) ) class SelecSLSBlock(nn.Module): def __init__(self, in_chs, skip_chs, mid_chs, out_chs, is_first, stride, dilation=1): super(SelecSLSBlock, self).__init__() self.stride = stride self.is_first = is_first assert stride in [1, 2] # Process input with 4 conv blocks with the same number of input and output channels self.conv1 = conv_bn(in_chs, mid_chs, 3, stride, dilation=dilation) self.conv2 = conv_bn(mid_chs, mid_chs, 1) self.conv3 = conv_bn(mid_chs, mid_chs // 2, 3) self.conv4 = conv_bn(mid_chs // 2, mid_chs, 1) self.conv5 = conv_bn(mid_chs, mid_chs // 2, 3) self.conv6 = conv_bn(2 * mid_chs + (0 if is_first else skip_chs), out_chs, 1) def forward(self, x: List[torch.Tensor]) -> List[torch.Tensor]: if not isinstance(x, list): x = [x] assert len(x) in [1, 2] d1 = self.conv1(x[0]) d2 = self.conv3(self.conv2(d1)) d3 = self.conv5(self.conv4(d2)) if self.is_first: out = self.conv6(torch.cat([d1, d2, d3], 1)) return [out, out] else: return [self.conv6(torch.cat([d1, d2, d3, x[1]], 1)), x[1]] class SelecSLS(nn.Module): """SelecSLS42 / SelecSLS60 / SelecSLS84 Parameters ---------- cfg : network config dictionary specifying block type, feature, and head args num_classes : int, default 1000 Number of classification classes. in_chans : int, default 3 Number of input (color) channels. drop_rate : float, default 0. Dropout probability before classifier, for training global_pool : str, default 'avg' Global pooling type. One of 'avg', 'max', 'avgmax', 'catavgmax' """ def __init__(self, cfg, num_classes=1000, in_chans=3, drop_rate=0.0, global_pool='avg'): self.num_classes = num_classes self.drop_rate = drop_rate super(SelecSLS, self).__init__() self.stem = conv_bn(in_chans, 32, stride=2) self.features = SequentialList(*[cfg['block'](*block_args) for block_args in cfg['features']]) self.from_seq = SelectSeq() # from List[tensor] -> Tensor in module compatible way self.head = nn.Sequential(*[conv_bn(*conv_args) for conv_args in cfg['head']]) self.num_features = cfg['num_features'] self.feature_info = cfg['feature_info'] self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) for n, m in self.named_modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1.) nn.init.constant_(m.bias, 0.) def get_classifier(self): return self.fc def reset_classifier(self, num_classes, global_pool='avg'): self.num_classes = num_classes self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) def forward_features(self, x): x = self.stem(x) x = self.features(x) x = self.head(self.from_seq(x)) return x def forward(self, x): x = self.forward_features(x) x = self.global_pool(x) if self.drop_rate > 0.: x = F.dropout(x, p=self.drop_rate, training=self.training) x = self.fc(x) return x def _create_selecsls(variant, pretrained, **kwargs): cfg = {} feature_info = [dict(num_chs=32, reduction=2, module='stem.2')] if variant.startswith('selecsls42'): cfg['block'] = SelecSLSBlock # Define configuration of the network after the initial neck cfg['features'] = [ # in_chs, skip_chs, mid_chs, out_chs, is_first, stride (32, 0, 64, 64, True, 2), (64, 64, 64, 128, False, 1), (128, 0, 144, 144, True, 2), (144, 144, 144, 288, False, 1), (288, 0, 304, 304, True, 2), (304, 304, 304, 480, False, 1), ] feature_info.extend([ dict(num_chs=128, reduction=4, module='features.1'), dict(num_chs=288, reduction=8, module='features.3'), dict(num_chs=480, reduction=16, module='features.5'), ]) # Head can be replaced with alternative configurations depending on the problem feature_info.append(dict(num_chs=1024, reduction=32, module='head.1')) if variant == 'selecsls42b': cfg['head'] = [ (480, 960, 3, 2), (960, 1024, 3, 1), (1024, 1280, 3, 2), (1280, 1024, 1, 1), ] feature_info.append(dict(num_chs=1024, reduction=64, module='head.3')) cfg['num_features'] = 1024 else: cfg['head'] = [ (480, 960, 3, 2), (960, 1024, 3, 1), (1024, 1024, 3, 2), (1024, 1280, 1, 1), ] feature_info.append(dict(num_chs=1280, reduction=64, module='head.3')) cfg['num_features'] = 1280 elif variant.startswith('selecsls60'): cfg['block'] = SelecSLSBlock # Define configuration of the network after the initial neck cfg['features'] = [ # in_chs, skip_chs, mid_chs, out_chs, is_first, stride (32, 0, 64, 64, True, 2), (64, 64, 64, 128, False, 1), (128, 0, 128, 128, True, 2), (128, 128, 128, 128, False, 1), (128, 128, 128, 288, False, 1), (288, 0, 288, 288, True, 2), (288, 288, 288, 288, False, 1), (288, 288, 288, 288, False, 1), (288, 288, 288, 416, False, 1), ] feature_info.extend([ dict(num_chs=128, reduction=4, module='features.1'), dict(num_chs=288, reduction=8, module='features.4'), dict(num_chs=416, reduction=16, module='features.8'), ]) # Head can be replaced with alternative configurations depending on the problem feature_info.append(dict(num_chs=1024, reduction=32, module='head.1')) if variant == 'selecsls60b': cfg['head'] = [ (416, 756, 3, 2), (756, 1024, 3, 1), (1024, 1280, 3, 2), (1280, 1024, 1, 1), ] feature_info.append(dict(num_chs=1024, reduction=64, module='head.3')) cfg['num_features'] = 1024 else: cfg['head'] = [ (416, 756, 3, 2), (756, 1024, 3, 1), (1024, 1024, 3, 2), (1024, 1280, 1, 1), ] feature_info.append(dict(num_chs=1280, reduction=64, module='head.3')) cfg['num_features'] = 1280 elif variant == 'selecsls84': cfg['block'] = SelecSLSBlock # Define configuration of the network after the initial neck cfg['features'] = [ # in_chs, skip_chs, mid_chs, out_chs, is_first, stride (32, 0, 64, 64, True, 2), (64, 64, 64, 144, False, 1), (144, 0, 144, 144, True, 2), (144, 144, 144, 144, False, 1), (144, 144, 144, 144, False, 1), (144, 144, 144, 144, False, 1), (144, 144, 144, 304, False, 1), (304, 0, 304, 304, True, 2), (304, 304, 304, 304, False, 1), (304, 304, 304, 304, False, 1), (304, 304, 304, 304, False, 1), (304, 304, 304, 304, False, 1), (304, 304, 304, 512, False, 1), ] feature_info.extend([ dict(num_chs=144, reduction=4, module='features.1'), dict(num_chs=304, reduction=8, module='features.6'), dict(num_chs=512, reduction=16, module='features.12'), ]) # Head can be replaced with alternative configurations depending on the problem cfg['head'] = [ (512, 960, 3, 2), (960, 1024, 3, 1), (1024, 1024, 3, 2), (1024, 1280, 3, 1), ] cfg['num_features'] = 1280 feature_info.extend([ dict(num_chs=1024, reduction=32, module='head.1'), dict(num_chs=1280, reduction=64, module='head.3') ]) else: raise ValueError('Invalid net configuration ' + variant + ' !!!') cfg['feature_info'] = feature_info # this model can do 6 feature levels by default, unlike most others, leave as 0-4 to avoid surprises? return build_model_with_cfg( SelecSLS, variant, pretrained, default_cfg=default_cfgs[variant], model_cfg=cfg, feature_cfg=dict(out_indices=(0, 1, 2, 3, 4), flatten_sequential=True), **kwargs) @register_model def selecsls42(pretrained=False, **kwargs): """Constructs a SelecSLS42 model. """ return _create_selecsls('selecsls42', pretrained, **kwargs) @register_model def selecsls42b(pretrained=False, **kwargs): """Constructs a SelecSLS42_B model. """ return _create_selecsls('selecsls42b', pretrained, **kwargs) @register_model def selecsls60(pretrained=False, **kwargs): """Constructs a SelecSLS60 model. """ return _create_selecsls('selecsls60', pretrained, **kwargs) @register_model def selecsls60b(pretrained=False, **kwargs): """Constructs a SelecSLS60_B model. """ return _create_selecsls('selecsls60b', pretrained, **kwargs) @register_model def selecsls84(pretrained=False, **kwargs): """Constructs a SelecSLS84 model. """ return _create_selecsls('selecsls84', pretrained, **kwargs)
//filter will reemit the data if cb(err,pass) pass is truthy // reduce is more tricky // maybe we want to group the reductions or emit progress updates occasionally // the most basic reduce just emits one 'data' event after it has recieved 'end' var Stream = require('stream').Stream //create an event stream and apply function to each .write //emitting each response as data //unless it's an empty callback module.exports = function (mapper, opts) { var stream = new Stream() , self = this , inputs = 0 , outputs = 0 , ended = false , paused = false , destroyed = false , lastWritten = 0 , inNext = false this.opts = opts || {}; var errorEventName = this.opts.failures ? 'failure' : 'error'; // Items that are not ready to be written yet (because they would come out of // order) get stuck in a queue for later. var writeQueue = {} stream.writable = true stream.readable = true function queueData (data, number) { var nextToWrite = lastWritten + 1 if (number === nextToWrite) { // If it's next, and its not undefined write it if (data !== undefined) { stream.emit.apply(stream, ['data', data]) } lastWritten ++ nextToWrite ++ } else { // Otherwise queue it for later. writeQueue[number] = data } // If the next value is in the queue, write it if (writeQueue.hasOwnProperty(nextToWrite)) { var dataToWrite = writeQueue[nextToWrite] delete writeQueue[nextToWrite] return queueData(dataToWrite, nextToWrite) } outputs ++ if(inputs === outputs) { if(paused) paused = false, stream.emit('drain') //written all the incoming events if(ended) end() } } function next (err, data, number) { if(destroyed) return inNext = true if (!err || self.opts.failures) { queueData(data, number) } if (err) { stream.emit.apply(stream, [ errorEventName, err ]); } inNext = false; } // Wrap the mapper function by calling its callback with the order number of // the item in the stream. function wrappedMapper (input, number, callback) { return mapper.call(null, input, function(err, data){ callback(err, data, number) }) } stream.write = function (data) { if(ended) throw new Error('map stream is not writable') inNext = false inputs ++ try { //catch sync errors and handle them like async errors var written = wrappedMapper(data, inputs, next) paused = (written === false) return !paused } catch (err) { //if the callback has been called syncronously, and the error //has occured in an listener, throw it again. if(inNext) throw err next(err) return !paused } } function end (data) { //if end was called with args, write it, ended = true //write will emit 'end' if ended is true stream.writable = false if(data !== undefined) { return queueData(data, inputs) } else if (inputs == outputs) { //wait for processing stream.readable = false, stream.emit('end'), stream.destroy() } } stream.end = function (data) { if(ended) return end() } stream.destroy = function () { ended = destroyed = true stream.writable = stream.readable = paused = false process.nextTick(function () { stream.emit('close') }) } stream.pause = function () { paused = true } stream.resume = function () { paused = false } return stream }
// simple data types var myStringVariable = 'Some Name' var myNumberVariable = 200 var myBooleanVariable = true // uncomment one of the following lines and check the console console.log(myStringVariable) console.log(myNumberVariable) // console.log(myBooleanVariable) // the object type, this is an emtpy object var myObject = {} // object with properties // each prop can be string, number, boolean or another object or list var myObjectWithProps = { some_prop: "I'm a String", other_prop: 200 } // uncomment and check the console // console.log(myObjectWithProps) // the array or list data type, empty var myEmptyList = [] // an array can contain multiple variables // each variable must be separated with comma var myListNumber = [200, 40, 50] var myListString = ['some name', 'other name', 'different name'] var myListBoolean = [true, false, true, true, false] // uncomment and check the console // console.log(myListNumber) // console.log(myListString) // console.log(myListBoolean) // an array can contains also objects or other arrays var myListOfObject = [ { prop: 'some prop 1' }, { prop: 'some prop 2' }, { prop: 'some prop 3' } ] // console.log(myListOfObject) // this is an array that contains other arrays var myListOfArray = [ [2, 5, 3], [5, 7, 8], [1, 1, 1] ] // console.log(myListOfArray)
import warning from 'warning' import useBeforeUnload from './useBeforeUnload' function enableBeforeUnload() { warning( false, 'enableBeforeUnload is deprecated, use useBeforeUnload instead' ) return useBeforeUnload.apply(this, arguments) } export default enableBeforeUnload
import datetime from unittest import TestCase from pytz import utc from freezegun import freeze_time from redash.query_runner.mongodb import ( parse_query_json, parse_results, _get_column_by_name, ) from redash.utils import json_dumps, parse_human_time class TestParseQueryJson(TestCase): def test_ignores_non_isodate_fields(self): query = {"test": 1, "test_list": ["a", "b", "c"], "test_dict": {"a": 1, "b": 2}} query_data = parse_query_json(json_dumps(query)) self.assertDictEqual(query_data, query) def test_parses_isodate_fields(self): query = { "test": 1, "test_list": ["a", "b", "c"], "test_dict": {"a": 1, "b": 2}, "testIsoDate": 'ISODate("2014-10-03T00:00")', } query_data = parse_query_json(json_dumps(query)) self.assertEqual( query_data["testIsoDate"], datetime.datetime(2014, 10, 3, 0, 0) ) def test_parses_isodate_in_nested_fields(self): query = { "test": 1, "test_list": ["a", "b", "c"], "test_dict": {"a": 1, "b": {"date": 'ISODate("2014-10-04T00:00")'}}, "testIsoDate": 'ISODate("2014-10-03T00:00")', } query_data = parse_query_json(json_dumps(query)) self.assertEqual( query_data["testIsoDate"], datetime.datetime(2014, 10, 3, 0, 0) ) self.assertEqual( query_data["test_dict"]["b"]["date"], datetime.datetime(2014, 10, 4, 0, 0) ) def test_handles_nested_fields(self): # https://github.com/getredash/redash/issues/597 query = { "collection": "bus", "aggregate": [ { "$geoNear": { "near": { "type": "Point", "coordinates": [-22.910079, -43.205161], }, "maxDistance": 100000000, "distanceField": "dist.calculated", "includeLocs": "dist.location", "spherical": True, } } ], } query_data = parse_query_json(json_dumps(query)) self.assertDictEqual(query, query_data) def test_supports_extended_json_types(self): query = { "test": 1, "test_list": ["a", "b", "c"], "test_dict": {"a": 1, "b": 2}, "testIsoDate": 'ISODate("2014-10-03T00:00")', "test$date": {"$date": "2014-10-03T00:00:00.0"}, "test$undefined": {"$undefined": None}, } query_data = parse_query_json(json_dumps(query)) self.assertEqual(query_data["test$undefined"], None) self.assertEqual( query_data["test$date"], datetime.datetime(2014, 10, 3, 0, 0).replace(tzinfo=utc), ) @freeze_time("2019-01-01 12:00:00") def test_supports_relative_timestamps(self): query = {"ts": {"$humanTime": "1 hour ago"}} one_hour_ago = parse_human_time("1 hour ago") query_data = parse_query_json(json_dumps(query)) self.assertEqual(query_data["ts"], one_hour_ago) class TestMongoResults(TestCase): def test_parses_regular_results(self): raw_results = [ {"column": 1, "column2": "test"}, {"column": 2, "column2": "test", "column3": "hello"}, ] rows, columns = parse_results(raw_results) for i, row in enumerate(rows): self.assertDictEqual(row, raw_results[i]) self.assertIsNotNone(_get_column_by_name(columns, "column")) self.assertIsNotNone(_get_column_by_name(columns, "column2")) self.assertIsNotNone(_get_column_by_name(columns, "column3")) def test_parses_nested_results(self): raw_results = [ {"column": 1, "column2": "test", "nested": {"a": 1, "b": "str"}}, { "column": 2, "column2": "test", "column3": "hello", "nested": {"a": 2, "b": "str2", "c": "c"}, }, ] rows, columns = parse_results(raw_results) self.assertDictEqual( rows[0], {"column": 1, "column2": "test", "nested.a": 1, "nested.b": "str"} ) self.assertDictEqual( rows[1], { "column": 2, "column2": "test", "column3": "hello", "nested.a": 2, "nested.b": "str2", "nested.c": "c", }, ) self.assertIsNotNone(_get_column_by_name(columns, "column")) self.assertIsNotNone(_get_column_by_name(columns, "column2")) self.assertIsNotNone(_get_column_by_name(columns, "column3")) self.assertIsNotNone(_get_column_by_name(columns, "nested.a")) self.assertIsNotNone(_get_column_by_name(columns, "nested.b")) self.assertIsNotNone(_get_column_by_name(columns, "nested.c"))