text
stringlengths
3
1.05M
/** * Licensed to The Apereo Foundation under one or more contributor license * agreements. See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * * * The Apereo Foundation licenses this file to you under the Educational * Community License, Version 2.0 (the "License"); you may not use this file * except in compliance with the License. You may obtain a copy of the License * at: * * http://opensource.org/licenses/ecl2.txt * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. * */ /* global define, escapeRegExp */ define(['jquery'], function($) { 'use strict'; var entityMap = { '&': '&amp;', '<': '&lt;', '>': '&gt;', '"': '&quot;', '\'': '&#39;', '/': '&#x2F;' }; function Utils() { // nothing to see here } Utils.prototype.escapeHtml = function(string) { return String(string).replace(/[&<>"'/]/g, function(s) { return entityMap[s]; }); }; Utils.prototype.getAspectRatioWidth = function(originalWidth, originalHeight, height) { var width = Math.round(height * originalWidth / originalHeight); return width; }; Utils.prototype.getAspectRatioHeight = function(originalWidth, originalHeight, width) { var height = Math.round(originalHeight / originalWidth * width); return height; }; Utils.prototype.escapeRegExp = function(string) { return string.replace(/([.*+?^=!:${}()|[\]/\\])/g, '\\$1'); }; Utils.prototype.replaceAll = function(string, find, replace) { return string.replace(new RegExp(escapeRegExp(find), 'g'), replace); }; Utils.prototype.getFormattedPlaybackRate = function(rate) { return (rate * 100) + '%'; }; /** * Returns the input time in milliseconds * * @param data data in the format ab:cd:ef * @return time from the data in milliseconds */ Utils.prototype.getTimeInMilliseconds = function(data) { if ((data != undefined) && (data != null) && (data != 0) && (data.length) && (data.indexOf(':') != -1)) { var values = data.split(':'); // when the format is correct if (values.length == 3) { // try to convert to numbers var val0 = values[0] * 1; var val1 = values[1] * 1; var val2 = values[2] * 1; // check and parse the seconds if (!isNaN(val0) && !isNaN(val1) && !isNaN(val2)) { // convert hours, minutes and seconds to milliseconds val0 *= 60 * 60 * 1000; // 1 hour = 60 minutes = 60 * 60 Seconds = 60 * 60 * 1000 milliseconds val1 *= 60 * 1000; // 1 minute = 60 seconds = 60 * 1000 milliseconds val2 *= 1000; // 1 second = 1000 milliseconds return val0 + val1 + val2; } } } return 0; }; /** * Returns the formatted seconds * * @param seconds seconds to format * @return formatted seconds */ Utils.prototype.formatSeconds = function(seconds) { if (!seconds) { seconds = 0; } seconds = (seconds < 0) ? 0 : seconds; var result = ''; if (parseInt(seconds / 3600) < 10) { result += '0'; } result += parseInt(seconds / 3600); result += ':'; if ((parseInt(seconds / 60) - parseInt(seconds / 3600) * 60) < 10) { result += '0'; } result += parseInt(seconds / 60) - parseInt(seconds / 3600) * 60; result += ':'; if (seconds % 60 < 10) { result += '0'; } result += seconds % 60; if (result.indexOf('.') != -1) { result = result.substring(0, result.lastIndexOf('.')); // get rid of the .ms } return result; }; /** * enable * * @param id */ Utils.prototype.enable = function(id) { $('#' + id).removeAttr('disabled'); }; /** * disable * * @param id */ Utils.prototype.disable = function(id) { $('#' + id).attr('disabled', 'disabled'); }; Utils.prototype.removeParentIfElementExists = function(elemenId) { if ($('#' + elemenId) && $('#' + elemenId).parent()) { + $('#' + elemenId).parent().remove(); } }; /** * greyIn * * @param id */ Utils.prototype.greyIn = function(id) { $('#' + id).animate({ opacity: 1.0 }); }; /** * greyOut * * @param id */ Utils.prototype.greyOut = function(id) { $('#' + id).animate({ opacity: 0.5 }); }; Utils.prototype.repairSegmentLength = function(segments, duration, min_segment_duration) { if (segments && duration) { var total = 0, result = new Array(); for (let i = 0; i < segments.length; i++) { if (segments[i].time < parseInt(duration)) { if (segments[i].duration) { total += parseInt(segments[i].duration); if (parseInt(segments[i].duration) < min_segment_duration) { if (result.length === 0) { result.push(segments[i]); } else { result[result.length - 1].duration = parseInt(result[result.length - 1].duration) + parseInt(segments[i].duration); } } else { result.push(segments[i]); } } } } if (total > parseInt(duration)) { let diff = total - parseInt(duration); for (let i = result.length - 1; i >= 0; i-- ) { if (parseInt(result[i].duration) > diff) { result[i].duration = parseInt(result[i].duration) - diff; break; } } } if (total < parseInt(duration)) { let diff = parseInt(duration) - total; if (result[result.length - 1]) { result[result.length - 1].duration = parseInt(result[result.length - 1].duration) + diff; } } } return result; }; /** * get starttime next segment and 0 if the last segment has been reached * * @param id */ Utils.prototype.nextSegmentStart = function(segments, currentTime) { for (var i = 0; i < segments.length; i++) { if (segments[i].time > currentTime * 1000) { return segments[i].time; } } return 0; // if currentTime is beyond last segment start }; /** * get starttime next segment and 0 if the last segment has been reached * * @param id */ Utils.prototype.previousSegmentStart = function(segments, currentTime) { for (var i = (segments.length - 1); i >= 0; i--) { // added limit that last segment can jump to previous segment and not only segment start if (segments[i].time < (currentTime * 1000) - 900) { return segments[i].time; } } return 0; // jump only to the start }; /** * Timer object, that can be renewed (to reset the delay). * @type {Object} */ Utils.prototype.timer = { setup: function(callback, delay) { this.callback = function() { callback.call(); this.timeoutID = undefined; }; this.delay = delay; if (typeof this.timeoutID === 'number') { this.cancel(); } else { this.timeoutID = window.setTimeout(this.callback.bind(this), this.delay); } return this; }, renew: function() { window.clearTimeout(this.timeoutID); this.timeoutID = window.setTimeout(this.callback.bind(this), this.delay); }, cancel: function() { window.clearTimeout(this.timeoutID); this.timeoutID = undefined; } }; return Utils; });
## # Copyright (c) 2015-2017 Apple Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ## from txdav.xml.element import WebDAVElement, dav_namespace, registerElement, \ WebDAVTextElement, WebDAVEmptyElement, Bind, \ SyncCollection, AddMember @registerElement class ServerInfo (WebDAVElement): namespace = dav_namespace name = "server-info" allowed_children = { (dav_namespace, "token"): (1, 1), (dav_namespace, "features"): (0, 1), (dav_namespace, "services"): (0, 1), } @registerElement class Token (WebDAVTextElement): namespace = dav_namespace name = "token" @registerElement class Features (WebDAVElement): namespace = dav_namespace name = "features" allowed_children = {} @registerElement class Applications (WebDAVElement): namespace = dav_namespace name = "applications" allowed_children = { (dav_namespace, "application"): (0, None), } @registerElement class Application (WebDAVElement): namespace = dav_namespace name = "application" allowed_children = { (dav_namespace, "name"): (1, 1), (dav_namespace, "features"): (1, 1), } @registerElement class Name_Service (WebDAVTextElement): namespace = dav_namespace name = "name" @registerElement class Class1_Feature (WebDAVEmptyElement): namespace = dav_namespace name = "class-1" @registerElement class Class2_Feature (WebDAVEmptyElement): namespace = dav_namespace name = "class-2" @registerElement class Class3_Feature (WebDAVEmptyElement): namespace = dav_namespace name = "class-3" @registerElement class AccessControl_Feature (WebDAVEmptyElement): namespace = dav_namespace name = "access-control" @registerElement class VersionControl_Feature (WebDAVEmptyElement): namespace = dav_namespace name = "version-control" @registerElement class ExtendedMkcol_Feature (WebDAVEmptyElement): namespace = dav_namespace name = "extended-mkcol" @registerElement class Quota_Feature (WebDAVEmptyElement): namespace = dav_namespace name = "quota" Bind_Feature = Bind @registerElement class Search_Feature (WebDAVEmptyElement): namespace = dav_namespace name = "search" SyncCollection_Feature = SyncCollection AddMember_Feature = AddMember
class HtmlExtrator: def __init__(self): pass def getCSV(self,url): pass
(function() { 'use strict'; angular.module('experiments.views', [ 'experiments.views.albums', 'experiments.views.comments', 'experiments.views.photos', 'experiments.views.posts', 'experiments.views.todos', 'experiments.views.users', 'experiments.views.upload', 'experiments.views.home', 'experiments.views.view1', 'experiments.views.view2' ]) .value('version', '0.1'); })();
/* globals FileUploadBase, UploadFS, fileUploadHandler:true */ /* exported fileUploadHandler */ import { Meteor } from 'meteor/meteor'; new UploadFS.Store({ collection: RocketChat.models.Uploads.model, name: 'Uploads', filter: new UploadFS.Filter({ onCheck: FileUpload.validateFileUpload, }), }); new UploadFS.Store({ collection: RocketChat.models.Avatars.model, name: 'Avatars', filter: new UploadFS.Filter({ onCheck: FileUpload.validateFileUpload, }), }); fileUploadHandler = (directive, meta, file) => { const store = UploadFS.getStore(directive); if (store) { return new FileUploadBase(store, meta, file); } else { console.error('Invalid file store', directive); } }; Tracker.autorun(function() { if (Meteor.userId()) { document.cookie = `rc_uid=${ escape(Meteor.userId()) }; path=/`; document.cookie = `rc_token=${ escape(Accounts._storedLoginToken()) }; path=/`; } });
export default { host: process.env.MAIL_HOST, port: 2525, secure: false, auth: { user: process.env.MAIL_USER, pass: process.env.MAIL_PASS, }, default: { from: 'Fastfeet <noreply@fastfeet.com', }, };
import Quaternion from './Quaternion'; import Mat3 from './Mat3'; import Mat4 from './Mat4'; import Vec3 from './Vec3'; /** * @namespace math * @memberOf collision-detection * * @description * ``` * import {math} from 'collision-detection'; * ``` */ export {Quaternion, Mat3, Mat4, Vec3};
import React from 'react' import starIcon from '../../assets/images/star-icon.png' import process1 from '../../assets/images/process/process1.png' import process2 from '../../assets/images/process/process2.png' import process3 from '../../assets/images/process/process3.png' import process4 from '../../assets/images/process/process4.png' import process5 from '../../assets/images/process/process5.png' import process6 from '../../assets/images/process/process6.png' import shape1 from '../../assets/images/shape/circle-shape1.png' const HowItWork = () => { return ( <section className="process-area pt-100 pb-70"> <div className="container"> <div className="section-title"> <span className="sub-title"> <img src={starIcon} alt="banner" /> How It's Work </span> <h2>The Data Science Process</h2> <p>Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna.</p> </div> <div className="row"> <div className="col-lg-4 col-md-6"> <div className="single-process-box"> <div className="number">1</div> <div className="image"> <img src={process1} alt="banner" /> </div> <h3>Frame the Problem</h3> <p>Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt.</p> </div> </div> <div className="col-lg-4 col-md-6"> <div className="single-process-box"> <div className="number">2</div> <div className="image"> <img src={process2} alt="banner" /> </div> <h3>Collect the Raw Data</h3> <p>Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt.</p> </div> </div> <div className="col-lg-4 col-md-6"> <div className="single-process-box"> <div className="number">3</div> <div className="image"> <img src={process3} alt="banner" /> </div> <h3>Process the Data</h3> <p>Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt.</p> </div> </div> <div className="col-lg-4 col-md-6"> <div className="single-process-box "> <div className="number">4</div> <div className="image"> <img src={process4} alt="banner" /> </div> <h3>Explore the Data</h3> <p>Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt.</p> </div> </div> <div className="col-lg-4 col-md-6"> <div className="single-process-box"> <div className="number">5</div> <div className="image"> <img src={process5} alt="banner" /> </div> <h3>Perform In-depth Analysis</h3> <p>Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt.</p> </div> </div> <div className="col-lg-4 col-md-6"> <div className="single-process-box"> <div className="number">6</div> <div className="image"> <img src={process6} alt="banner" /> </div> <h3>Communicate Results</h3> <p>Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt.</p> </div> </div> </div> </div> <div className="circle-shape1"> <img src={shape1} alt="banner" /> </div> </section> ) } export default HowItWork
# -*- coding: utf-8 -*- """ The MIT License (MIT) Copyright (c) 2015-2019 Rapptz Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import itertools from operator import attrgetter import discord.abc from . import utils from .user import BaseUser, User from .activity import create_activity from .permissions import Permissions from .enums import Status, try_enum from .colour import Colour from .object import Object class VoiceState: """Represents a Discord user's voice state. Attributes ------------ deaf: :class:`bool` Indicates if the user is currently deafened by the guild. mute: :class:`bool` Indicates if the user is currently muted by the guild. self_mute: :class:`bool` Indicates if the user is currently muted by their own accord. self_deaf: :class:`bool` Indicates if the user is currently deafened by their own accord. self_video: :class:`bool` Indicates if the user is currently broadcasting video. afk: :class:`bool` Indicates if the user is currently in the AFK channel in the guild. channel: :class:`VoiceChannel` The voice channel that the user is currently connected to. None if the user is not currently in a voice channel. """ __slots__ = ('session_id', 'deaf', 'mute', 'self_mute', 'self_video', 'self_deaf', 'afk', 'channel') def __init__(self, *, data, channel=None): self.session_id = data.get('session_id') self._update(data, channel) def _update(self, data, channel): self.self_mute = data.get('self_mute', False) self.self_deaf = data.get('self_deaf', False) self.self_video = data.get('self_video', False) self.afk = data.get('suppress', False) self.mute = data.get('mute', False) self.deaf = data.get('deaf', False) self.channel = channel def __repr__(self): return '<VoiceState self_mute={0.self_mute} self_deaf={0.self_deaf} self_video={0.self_video} channel={0.channel!r}>'.format(self) def flatten_user(cls): for attr, value in itertools.chain(BaseUser.__dict__.items(), User.__dict__.items()): # ignore private/special methods if attr.startswith('_'): continue # don't override what we already have if attr in cls.__dict__: continue # if it's a slotted attribute or a property, redirect it # slotted members are implemented as member_descriptors in Type.__dict__ if not hasattr(value, '__annotations__'): getter = attrgetter('_user.' + attr) setattr(cls, attr, property(getter, doc='Equivalent to :attr:`User.%s`' % attr)) else: # Technically, this can also use attrgetter # However I'm not sure how I feel about "functions" returning properties # It probably breaks something in Sphinx. # probably a member function by now def generate_function(x): def general(self, *args, **kwargs): return getattr(self._user, x)(*args, **kwargs) general.__name__ = x return general func = generate_function(attr) func.__doc__ = value.__doc__ setattr(cls, attr, func) return cls _BaseUser = discord.abc.User @flatten_user class Member(discord.abc.Messageable, _BaseUser): """Represents a Discord member to a :class:`Guild`. This implements a lot of the functionality of :class:`User`. .. container:: operations .. describe:: x == y Checks if two members are equal. Note that this works with :class:`User` instances too. .. describe:: x != y Checks if two members are not equal. Note that this works with :class:`User` instances too. .. describe:: hash(x) Returns the member's hash. .. describe:: str(x) Returns the member's name with the discriminator. Attributes ---------- joined_at: Optional[:class:`datetime.datetime`] A datetime object that specifies the date and time in UTC that the member joined the guild for the first time. In certain cases, this can be ``None``. activities: Tuple[Union[:class:`Game`, :class:`Streaming`, :class:`Spotify`, :class:`Activity`]] The activities that the user is currently doing. guild: :class:`Guild` The guild that the member belongs to. nick: Optional[:class:`str`] The guild specific nickname of the user. premium_since: Optional[:class:`datetime.datetime`] A datetime object that specifies the date and time in UTC when the member used their Nitro boost on the guild, if available. This could be ``None``. """ __slots__ = ('_roles', 'joined_at', 'premium_since', '_client_status', 'activities', 'guild', 'nick', '_user', '_state') def __init__(self, *, data, guild, state): self._state = state self._user = state.store_user(data['user']) self.guild = guild self.joined_at = utils.parse_time(data.get('joined_at')) self.premium_since = utils.parse_time(data.get('premium_since')) self._update_roles(data) self._client_status = { None: 'offline' } self.activities = tuple(map(create_activity, data.get('activities', []))) self.nick = data.get('nick', None) def __str__(self): return str(self._user) def __repr__(self): return '<Member id={1.id} name={1.name!r} discriminator={1.discriminator!r}' \ ' bot={1.bot} nick={0.nick!r} guild={0.guild!r}>'.format(self, self._user) def __eq__(self, other): return isinstance(other, _BaseUser) and other.id == self.id def __ne__(self, other): return not self.__eq__(other) def __hash__(self): return hash(self._user) @classmethod def _from_message(cls, *, message, data): author = message.author data['user'] = { attr: getattr(author, attr) for attr in author.__slots__ if attr[0] != '_' } return cls(data=data, guild=message.guild, state=message._state) @classmethod def _from_presence_update(cls, *, data, guild, state): clone = cls(data=data, guild=guild, state=state) to_return = cls(data=data, guild=guild, state=state) to_return._client_status = { key: value for key, value in data.get('client_status', {}).items() } to_return._client_status[None] = data['status'] return to_return, clone @classmethod def _copy(cls, member): self = cls.__new__(cls) # to bypass __init__ self._roles = utils.SnowflakeList(member._roles, is_sorted=True) self.joined_at = member.joined_at self._client_status = member._client_status.copy() self.guild = member.guild self.nick = member.nick self.activities = member.activities self._state = member._state # Reference will not be copied unless necessary by PRESENCE_UPDATE # See below self._user = member._user return self async def _get_channel(self): ch = await self.create_dm() return ch def _update_roles(self, data): self._roles = utils.SnowflakeList(map(int, data['roles'])) def _update(self, data): # the nickname change is optional, # if it isn't in the payload then it didn't change try: self.nick = data['nick'] except KeyError: pass self._update_roles(data) def _presence_update(self, data, user): self.activities = tuple(map(create_activity, data.get('activities', []))) self._client_status = { key: value for key, value in data.get('client_status', {}).items() } self._client_status[None] = data['status'] if len(user) > 1: u = self._user original = (u.name, u.avatar, u.discriminator) # These keys seem to always be available modified = (user['username'], user['avatar'], user['discriminator']) if original != modified: to_return = User._copy(self._user) u.name, u.avatar, u.discriminator = modified # Signal to dispatch on_user_update return to_return, u return False @property def status(self): """:class:`Status`: The member's overall status. If the value is unknown, then it will be a :class:`str` instead.""" return try_enum(Status, self._client_status[None]) @status.setter def status(self, value): # internal use only self._client_status[None] = str(value) @property def mobile_status(self): """:class:`Status`: The member's status on a mobile device, if applicable.""" return try_enum(Status, self._client_status.get('mobile', 'offline')) @property def desktop_status(self): """:class:`Status`: The member's status on the desktop client, if applicable.""" return try_enum(Status, self._client_status.get('desktop', 'offline')) @property def web_status(self): """:class:`Status`: The member's status on the web client, if applicable.""" return try_enum(Status, self._client_status.get('web', 'offline')) def is_on_mobile(self): """A helper function that determines if a member is active on a mobile device.""" return 'mobile' in self._client_status @property def colour(self): """:class:`Colour`: A property that returns a colour denoting the rendered colour for the member. If the default colour is the one rendered then an instance of :meth:`Colour.default` is returned. There is an alias for this named :meth:`color`. """ roles = self.roles[1:] # remove @everyone # highest order of the colour is the one that gets rendered. # if the highest is the default colour then the next one with a colour # is chosen instead for role in reversed(roles): if role.colour.value: return role.colour return Colour.default() @property def color(self): """:class:`Colour`: A property that returns a color denoting the rendered color for the member. If the default color is the one rendered then an instance of :meth:`Colour.default` is returned. There is an alias for this named :meth:`colour`. """ return self.colour @property def roles(self): """List[:class:`Role`]: A :class:`list` of :class:`Role` that the member belongs to. Note that the first element of this list is always the default '@everyone' role. These roles are sorted by their position in the role hierarchy. """ result = [] g = self.guild for role_id in self._roles: role = g.get_role(role_id) if role: result.append(role) result.append(g.default_role) result.sort() return result @property def mention(self): """:class:`str`: Returns a string that allows you to mention the member.""" if self.nick: return '<@!%s>' % self.id return '<@%s>' % self.id @property def display_name(self): """:class:`str`: Returns the user's display name. For regular users this is just their username, but if they have a guild specific nickname then that is returned instead. """ return self.nick if self.nick is not None else self.name @property def activity(self): """Union[:class:`Game`, :class:`Streaming`, :class:`Spotify`, :class:`Activity`]: Returns the primary activity the user is currently doing. Could be None if no activity is being done. .. note:: A user may have multiple activities, these can be accessed under :attr:`activities`. """ if self.activities: return self.activities[0] def mentioned_in(self, message): """Checks if the member is mentioned in the specified message. Parameters ----------- message: :class:`Message` The message to check if you're mentioned in. """ if self._user.mentioned_in(message): return True for role in message.role_mentions: has_role = utils.get(self.roles, id=role.id) is not None if has_role: return True return False def permissions_in(self, channel): """An alias for :meth:`abc.GuildChannel.permissions_for`. Basically equivalent to: .. code-block:: python3 channel.permissions_for(self) Parameters ----------- channel: :class:`Channel` The channel to check your permissions for. """ return channel.permissions_for(self) @property def top_role(self): """:class:`Role`: Returns the member's highest role. This is useful for figuring where a member stands in the role hierarchy chain. """ return self.roles[-1] @property def guild_permissions(self): """Returns the member's guild permissions. This only takes into consideration the guild permissions and not most of the implied permissions or any of the channel permission overwrites. For 100% accurate permission calculation, please use either :meth:`permissions_in` or :meth:`abc.GuildChannel.permissions_for`. This does take into consideration guild ownership and the administrator implication. """ if self.guild.owner == self: return Permissions.all() base = Permissions.none() for r in self.roles: base.value |= r.permissions.value if base.administrator: return Permissions.all() return base @property def voice(self): """Optional[:class:`VoiceState`]: Returns the member's current voice state.""" return self.guild._voice_state_for(self._user.id) async def ban(self, **kwargs): """|coro| Bans this member. Equivalent to :meth:`Guild.ban`. """ await self.guild.ban(self, **kwargs) async def unban(self, *, reason=None): """|coro| Unbans this member. Equivalent to :meth:`Guild.unban`. """ await self.guild.unban(self, reason=reason) async def kick(self, *, reason=None): """|coro| Kicks this member. Equivalent to :meth:`Guild.kick`. """ await self.guild.kick(self, reason=reason) async def edit(self, *, reason=None, **fields): """|coro| Edits the member's data. Depending on the parameter passed, this requires different permissions listed below: +---------------+--------------------------------------+ | Parameter | Permission | +---------------+--------------------------------------+ | nick | :attr:`Permissions.manage_nicknames` | +---------------+--------------------------------------+ | mute | :attr:`Permissions.mute_members` | +---------------+--------------------------------------+ | deafen | :attr:`Permissions.deafen_members` | +---------------+--------------------------------------+ | roles | :attr:`Permissions.manage_roles` | +---------------+--------------------------------------+ | voice_channel | :attr:`Permissions.move_members` | +---------------+--------------------------------------+ All parameters are optional. .. versionchanged:: 1.1.0 Can now pass ``None`` to ``voice_channel`` to kick a member from voice. Parameters ----------- nick: Optional[:class:`str`] The member's new nickname. Use ``None`` to remove the nickname. mute: :class:`bool` Indicates if the member should be guild muted or un-muted. deafen: :class:`bool` Indicates if the member should be guild deafened or un-deafened. roles: Optional[List[:class:`Role`]] The member's new list of roles. This *replaces* the roles. voice_channel: Optional[:class:`VoiceChannel`] The voice channel to move the member to. Pass ``None`` to kick them from voice. reason: Optional[:class:`str`] The reason for editing this member. Shows up on the audit log. Raises ------- Forbidden You do not have the proper permissions to the action requested. HTTPException The operation failed. """ http = self._state.http guild_id = self.guild.id payload = {} try: nick = fields['nick'] except KeyError: # nick not present so... pass else: nick = nick if nick else '' if self._state.self_id == self.id: await http.change_my_nickname(guild_id, nick, reason=reason) else: payload['nick'] = nick deafen = fields.get('deafen') if deafen is not None: payload['deaf'] = deafen mute = fields.get('mute') if mute is not None: payload['mute'] = mute try: vc = fields['voice_channel'] except KeyError: pass else: payload['channel_id'] = vc and vc.id try: roles = fields['roles'] except KeyError: pass else: payload['roles'] = tuple(r.id for r in roles) await http.edit_member(guild_id, self.id, reason=reason, **payload) # TODO: wait for WS event for modify-in-place behaviour async def move_to(self, channel, *, reason=None): """|coro| Moves a member to a new voice channel (they must be connected first). You must have the :attr:`~Permissions.move_members` permission to use this. This raises the same exceptions as :meth:`edit`. .. versionchanged:: 1.1.0 Can now pass ``None`` to kick a member from voice. Parameters ----------- channel: Optional[:class:`VoiceChannel`] The new voice channel to move the member to. Pass ``None`` to kick them from voice. reason: Optional[:class:`str`] The reason for doing this action. Shows up on the audit log. """ await self.edit(voice_channel=channel, reason=reason) async def add_roles(self, *roles, reason=None, atomic=True): r"""|coro| Gives the member a number of :class:`Role`\s. You must have the :attr:`~Permissions.manage_roles` permission to use this. Parameters ----------- \*roles: :class:`abc.Snowflake` An argument list of :class:`abc.Snowflake` representing a :class:`Role` to give to the member. reason: Optional[:class:`str`] The reason for adding these roles. Shows up on the audit log. atomic: :class:`bool` Whether to atomically add roles. This will ensure that multiple operations will always be applied regardless of the current state of the cache. Raises ------- Forbidden You do not have permissions to add these roles. HTTPException Adding roles failed. """ if not atomic: new_roles = utils._unique(Object(id=r.id) for s in (self.roles[1:], roles) for r in s) await self.edit(roles=new_roles, reason=reason) else: req = self._state.http.add_role guild_id = self.guild.id user_id = self.id for role in roles: await req(guild_id, user_id, role.id, reason=reason) async def remove_roles(self, *roles, reason=None, atomic=True): r"""|coro| Removes :class:`Role`\s from this member. You must have the :attr:`~Permissions.manage_roles` permission to use this. Parameters ----------- \*roles: :class:`abc.Snowflake` An argument list of :class:`abc.Snowflake` representing a :class:`Role` to remove from the member. reason: Optional[:class:`str`] The reason for removing these roles. Shows up on the audit log. atomic: :class:`bool` Whether to atomically remove roles. This will ensure that multiple operations will always be applied regardless of the current state of the cache. Raises ------- Forbidden You do not have permissions to remove these roles. HTTPException Removing the roles failed. """ if not atomic: new_roles = [Object(id=r.id) for r in self.roles[1:]] # remove @everyone for role in roles: try: new_roles.remove(Object(id=role.id)) except ValueError: pass await self.edit(roles=new_roles, reason=reason) else: req = self._state.http.remove_role guild_id = self.guild.id user_id = self.id for role in roles: await req(guild_id, user_id, role.id, reason=reason)
#!/usr/bin/env python import rospy from std_msgs.msg import String from geometry_msgs.msg import Vector3 import math import inverse_kinematics_module def inverse_kinematics_publisher(): publisher_ik = rospy.Publisher('ros_arm_control', Vector3, queue_size=10) rospy.init_node('inverse_kinematics_publisher', anonymous=True) rate = rospy.Rate(10) #10hz calculated_angle = Vector3() #ROS message type while not rospy.is_shutdown(): #Input coordinates x = float(input("Enter x: ")) y = float(input("Enter y: ")) z = float(input("Enter z: ")) angle = inverse_kinematics_module.compute_angles(x,y,z) #Print Joint angles print "*************************" print "{:15s}{:15s}{:15s}".format("theta_base", "theta_shoulder", "theta_elbow") print "{:<15f}{:<15f}{:<15f}".format(angle[0][0], angle[0][1], angle[0][2]) print "{:<15f}{:<15f}{:<15f}".format(angle[1][0], angle[1][1], angle[1][2]) # Sending, if angles are in range of servos i.e 0 to 180 index = -1 count = 0 if 0.0 <= int(angle[0][0]) <= 180.0 and 0.0 <= int(angle[0][1]) <= 180.0 and 0.0 <= int(angle[0][2]) <= 180.0: # angles_found = True index = 0 count += 1 if 0.0 <= int(angle[1][0]) <= 180.0 and 0.0 <= int(angle[1][1]) <= 180.0 and 0.0 <= int(angle[1][2]) <= 180.0: # angles_found = True index = 1 count += 1 if index != -1: # Send angles if(count == 2): # To handle redundancy # Always sending those angles having approach vector as (0, 0, 1) # Condition for that is theta_shoulder + theta_elbow should be odd multiple of 180 if (int(angle[0][1]) + int(angle[0][2])) % (2 * 180) != 0 and (int(angle[0][1]) + int(angle[0][2])) % 180 == 0: index = 0 elif (int(angle[1][1]) + int(angle[1][2])) % (2 * 180) != 0 and (int(angle[1][1]) + int(angle[1][2])) % 180 == 0: index = 1 calculated_angle.x = int(angle[index][0]) calculated_angle.y = int(angle[index][1]) calculated_angle.z = int(angle[index][2]) rospy.loginfo("\ntheta_base = %f\ntheta_shoulder = %f\ntheta_elbow = %f", calculated_angle.x, calculated_angle.y, calculated_angle.z) publisher_ik.publish(calculated_angle) print "=========================\n" else: # Print error message print "Angles not sent due to constraints" print "Please enter points in range" rate.sleep() if __name__ == '__main__': try: inverse_kinematics_publisher() except rospy.ROSInterruptException: pass
const observableModule = require("tns-core-modules/data/observable"); function CarDetailViewModel(carModel) { const viewModel = observableModule.fromObject({ car: carModel }); return viewModel; } module.exports = CarDetailViewModel;
import reglFactory from "regl"; import fsh from "./fragment.glsl"; import vsh from "./vertex.glsl"; import { getSpeed } from "./logic"; function renderArena(canvas, gameState, side) { let { width, height } = canvas.getBoundingClientRect(); const regl = reglFactory({ pixelRatio: window.pixelRatio, canvas: canvas }); const orbStates = () => { let orbs = [0, 1].map(i => { return { [`orbs[${i}]`]: ({ pixelRatio, viewportWidth, viewportHeight }) => [ gameState().dots[i].x, gameState().dots[i].y, 0 ] }; }); return Object.assign({}, ...orbs); }; let uniforms = { resolution: ({ framebufferWidth, framebufferHeight, pixelRatio }) => [ framebufferWidth, framebufferHeight ], side, t: ({ tick }) => 0.005 * tick * getSpeed(), health: () => gameState().health, fightDistance: () => gameState().d, ...orbStates() }; const drawFeedback = regl({ frag: fsh, vert: vsh, attributes: { position: [-2, 0, 0, -2, 2, 2] }, uniforms, count: 3 }); regl.frame(function(context) { regl.clear({ color: [0, 0, 0, 1] }); drawFeedback(); }); } export { renderArena };
# Copyright 2019 Uber Technologies, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= import collections from six.moves import BaseHTTPServer, SimpleHTTPServer from horovod.run.util.network import find_port import threading import socket # Timeout for reading from a single request SINGLE_REQUEST_TIMEOUT = 3 # Timeout for accepting new request TOTAL_TIMEOUT = 60 BAD_REQUEST = 400 TIMEOUT = 408 OK = 200 class KVStoreHandler(SimpleHTTPServer.SimpleHTTPRequestHandler): # Set timeout timeout = SINGLE_REQUEST_TIMEOUT # Override GET handler def do_GET(self): paths = self.path.split('/') if len(paths) < 3: print( 'KVStore ERROR: Invalid request path: {path}.'.format( path=self.path)) self.send_status_code(BAD_REQUEST) return _, scope, key = paths with self.server.cache_lock: value = self.server.cache.get(scope, {}).get(key) if value is None: self.send_status_code(404) else: self.send_response(200) self.send_header("Content-Length", str(len(value))) self.end_headers() self.wfile.write(value) # Override PUT handler def do_PUT(self): paths = self.path.split('/') if len(paths) < 3: print( 'KVStore ERROR: Invalid request path: {path}.'.format( path=self.path)) self.send_status_code(BAD_REQUEST) return _, scope, key = paths # Get body length content_length = int(self.headers['Content-Length']) try: value = self.rfile.read(content_length) except socket.timeout: if self.server.verbose: print( 'KVStore ERROR: Timeout when receiving {content_bytes} ' 'bytes, aborting this incomplete request.' .format( content_bytes=content_length)) # If timeout, abort this request self.send_status_code(TIMEOUT) return with self.server.cache_lock: scope_dict = self.server.cache.setdefault(scope, {}) scope_dict[key] = value if self.server.verbose: print(scope, self.server.cache[scope].keys()) self.send_status_code(OK) def send_status_code(self, status_code): self.send_response(status_code) self.send_header("Content-Length", 0) self.end_headers() # Override this function to prevent SimpleHTTPServer printing every # request out. def log_message(self, format, *args): pass class RendezvousHandler(KVStoreHandler): # Override DELETE handler def do_DELETE(self): paths = self.path.split('/') if len(paths) < 3: print( 'Rendezvous ERROR: Invalid request path: {path}.'.format( path=self.path)) self.send_status_code(BAD_REQUEST) return _, scope, key = paths with self.server.finished_list_lock: self.server.finished_list[scope].append(key) self.send_status_code(OK) class RendezvousHTTPServer(BaseHTTPServer.HTTPServer, object): def __init__(self, addr, handler, verbose): # This class has to inherit from object since HTTPServer is an old-style # class that does not inherit from object. super(RendezvousHTTPServer, self).__init__(addr, handler) # Lists for finished rendezvous workers self.finished_list_lock = threading.Lock() self.finished_list = collections.defaultdict(list) # Total size for scopes self.scope_size = {} # Cache that provides the store self.cache_lock = threading.Lock() self.cache = {} self.verbose = verbose def extract_scope_size(self, host_alloc_plan): for slot_info in host_alloc_plan: self.scope_size['global'] = slot_info.size cross_rank = slot_info.cross_rank self.scope_size['local_' + str(cross_rank)] = slot_info.local_size local_rank = slot_info.local_rank self.scope_size['cross_' + str(local_rank)] = slot_info.cross_size # Decide whether all ranks have confirmed rendezvous completion. def should_continue(self): should_continue = False with self.finished_list_lock: for scope, cnt in self.scope_size.items(): if cnt > len(self.finished_list[scope]): should_continue = True return should_continue def handle_timeout(self): error_msg = 'Rendezvous ERROR: Rendezvous server timeout after ' \ '{time} seconds while waiting for all the ranks to send finalize ' \ 'messages.\n'.format(time=TOTAL_TIMEOUT) for scope, finished_list in self.finished_list: if self.scope_size[scope] > len(finished_list): error_msg += 'Scope {scope} expects {size} workers, only received' \ 'finalized message from [{ranks}].\n'.format( scope=scope, size=self.scope_size[scope], ranks=' '.join(finished_list)) raise RuntimeError(error_msg) class RendezvousServer: def __init__(self, verbose): self.httpd = None self.listen_thread = None self.verbose = verbose # Rendezvous function finds a available port, create http socket, # and start listening loop to handle request def start_server(self, host_alloc_plan, pedl_provisioned_port): self.httpd, port = find_port( lambda addr: RendezvousHTTPServer( addr, RendezvousHandler, self.verbose), pedl_provisioned_port=pedl_provisioned_port, verbose=self.verbose, ) self.httpd.extract_scope_size(host_alloc_plan) if self.verbose: print('Rendezvous INFO: HTTP rendezvous server started.') # start the listening loop self.listen_thread = threading.Thread(target=self.listen_loop) self.listen_thread.daemon = True self.listen_thread.start() return port # Listening loop for handle request def listen_loop(self): while self.httpd.should_continue(): self.httpd.handle_request() self.httpd.server_close() if self.verbose: print('Rendezvous INFO: Rendezvous finishes.') # Because this thread is daemonized, no need to join. class KVStoreHTTPServer(BaseHTTPServer.HTTPServer, object): def __init__(self, addr, handler, verbose): super(KVStoreHTTPServer, self).__init__(addr, handler) # Cache that provides the store self.cache_lock = threading.Lock() self.cache = {} self.verbose = verbose class KVStoreServer: def __init__(self, verbose): self.httpd = None self.listen_thread = None self.verbose = verbose # KVStore server finds a available port, create http socket, # and start listening loop to handle request def start_server(self): self.httpd, port = find_port( lambda addr: KVStoreHTTPServer( addr, KVStoreHandler, self.verbose)) self.listen_thread = threading.Thread( target=lambda: self.httpd.serve_forever()) self.listen_thread.daemon = True self.listen_thread.start() if self.verbose: print('KVStoreServer INFO: KVStore server started. Listen on port ' + str(port)) return port def shutdown_server(self): self.httpd.shutdown() self.httpd.server_close() if self.verbose: print('KVStoreServer INFO: KVStore server finishes.') # Because this thread is daemonized, no need to join.
"""V1.0-RC.1 Revision ID: b389abb05262 Revises: Create Date: 2020-07-25 15:41:23.517147 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = 'b389abb05262' down_revision = None branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.create_table('requests', sa.Column('id', sa.Integer(), nullable=False), sa.Column('submit_time', sa.DateTime(), nullable=False), sa.Column('did', sa.String(length=512), nullable=False), sa.Column('columns', sa.String(length=1024), nullable=True), sa.Column('selection', sa.String(length=10485760), nullable=True), sa.Column('tree_name', sa.String(length=512), nullable=True), sa.Column('request_id', sa.String(length=48), nullable=False), sa.Column('image', sa.String(length=128), nullable=True), sa.Column('chunk_size', sa.Integer(), nullable=True), sa.Column('workers', sa.Integer(), nullable=True), sa.Column('result_destination', sa.String(length=32), nullable=False), sa.Column('result_format', sa.String(length=32), nullable=False), sa.Column('kafka_broker', sa.String(length=128), nullable=True), sa.Column('workflow_name', sa.String(length=40), nullable=False), sa.Column('files', sa.Integer(), nullable=True), sa.Column('files_skipped', sa.Integer(), nullable=True), sa.Column('total_events', sa.BigInteger(), nullable=True), sa.Column('total_bytes', sa.BigInteger(), nullable=True), sa.Column('did_lookup_time', sa.Integer(), nullable=True), sa.Column('generated_code_cm', sa.String(length=128), nullable=True), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('request_id') ) op.create_table('users', sa.Column('admin', sa.Boolean(), nullable=True), sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('id', sa.Integer(), nullable=False), sa.Column('email', sa.String(length=320), nullable=False), sa.Column('full_name', sa.String(length=120), nullable=False), sa.Column('institution', sa.String(length=120), nullable=True), sa.Column('key', sa.String(length=120), nullable=False), sa.Column('pending', sa.Boolean(), nullable=True), sa.Column('experiment', sa.String(length=120), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('email') ) op.create_table('file_status', sa.Column('id', sa.Integer(), nullable=False), sa.Column('file_id', sa.Integer(), nullable=False), sa.Column('request_id', sa.String(length=48), nullable=False), sa.Column('status', sa.String(length=128), nullable=False), sa.Column('timestamp', sa.DateTime(), nullable=False), sa.Column('pod_name', sa.String(length=128), nullable=True), sa.Column('info', sa.String(length=10485760), nullable=True), sa.ForeignKeyConstraint(['request_id'], ['requests.request_id'], ), sa.PrimaryKeyConstraint('id') ) op.create_table('files', sa.Column('id', sa.Integer(), nullable=False), sa.Column('request_id', sa.String(length=48), nullable=False), sa.Column('file_path', sa.String(length=512), nullable=False), sa.Column('adler32', sa.String(length=48), nullable=True), sa.Column('file_size', sa.BigInteger(), nullable=True), sa.Column('file_events', sa.BigInteger(), nullable=True), sa.ForeignKeyConstraint(['request_id'], ['requests.request_id'], ), sa.PrimaryKeyConstraint('id') ) op.create_table('transform_result', sa.Column('id', sa.Integer(), nullable=False), sa.Column('did', sa.String(length=512), nullable=False), sa.Column('file_id', sa.Integer(), nullable=True), sa.Column('file_path', sa.String(length=512), nullable=False), sa.Column('request_id', sa.String(length=48), nullable=False), sa.Column('transform_status', sa.String(length=120), nullable=False), sa.Column('transform_time', sa.Integer(), nullable=True), sa.Column('total_events', sa.BigInteger(), nullable=True), sa.Column('total_bytes', sa.BigInteger(), nullable=True), sa.Column('avg_rate', sa.Float(), nullable=True), sa.Column('messages', sa.Integer(), nullable=True), sa.ForeignKeyConstraint(['file_id'], ['files.id'], ), sa.PrimaryKeyConstraint('id') ) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.drop_table('transform_result') op.drop_table('files') op.drop_table('file_status') op.drop_table('users') op.drop_table('requests') # ### end Alembic commands ###
import Toast from './components/toast' let currentToast export default { install(Vue, options) { Vue.prototype.$toast = function (message, toastOptions) { if (currentToast) { currentToast.close() } currentToast = createCoast({ Vue, message, toastOptions, onClose: () => { currentToast = null } }) } } } function createCoast({ Vue, message, toastOptions, onClose }) { let Constructor = Vue.extend(Toast) let toast = new Constructor({ propsData: toastOptions }) toast.$slots.default = message toast.$mount() toast.$on('close', onClose) document.body.appendChild(toast.$el) return toast }
import * as TYPE from 'consts/actionTypes'; const initialState = { output: [], paused: false, }; export default (state = initialState, action) => { switch (action.type) { case TYPE.PRINT_CORE_OUTPUT: var newOutput = [...action.payload, ...state.output]; if (newOutput.length > 1000) { newOutput = newOutput.slice(0, 1000); } return { ...state, output: newOutput, }; case TYPE.PAUSE_CORE_OUTPUT: return { ...state, paused: true, }; case TYPE.UNPAUSE_CORE_OUTPUT: return { ...state, paused: false, }; default: return state; } };
# Copyright (c) 2016 Catalyst IT Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from django.conf.urls import url from adjutant_ui.content.notifications import views urlpatterns = [ url(r'^$', views.IndexView.as_view(), name='index'), url(r'^(?P<notif_id>[^/]+)/$', views.NotificationDetailView.as_view(), name='detail'), ]
import { StructureSearchingGate } from './searching/structure-searching.gate'; import { StructureColumnHeaderGate } from './column/header/structure-column-header.gate'; import { StructurePagingGate } from './paging/structure-paging.gate'; import { StructureSelectionGate } from './source/structure-selection.gate'; import { StructureL10nGate } from './l10n/structure-l10n.gate'; import { StructurePanelGate } from './panel/structure-panel.gate'; import { StructureRowDetailGate } from './row/detail/structure-row-detail.gate'; import { StructureColumnMenuGate } from './column/menu/structure-column-menu.gate'; import { StructureSummariesGate } from '../../../summaries/feature/gate/structure-summaries.gate'; import { StructureInfoPanelGate } from './panel/info/structure-info-panel.gate'; import { StructureRowClassGate } from './row/class/structure-row-class.gate'; import { StructureRowColoringGate } from './row/coloring/structure-row-coloring.gate'; import { StructureRowStyleGate } from './row/style/structure-row-style.gate'; import { ThemeGridGate } from '../../../../schema/feature/gate/grid/theme-grid.gate'; import { SourceLoadingGate } from './source/source-loading.gate'; import { VerticalFormationGate } from '../../../vertical-formation/feature/gate/vertical-formation.gate'; import { StructureSortingGate } from '../../../sorting/feature/gate/structure-sorting.gate'; import { StructureFilterGate } from '../../../filter/feature/gate/structure-filter.gate'; import { StructureQuickFiltersGate } from '../../../filter/feature/gate/structure-quick-filters.gate'; export const structureGates = [ StructureColumnHeaderGate, StructurePagingGate, StructureSearchingGate, // StructureSourceGate, StructureSelectionGate, StructureL10nGate, StructurePanelGate, StructureRowDetailGate, StructureColumnMenuGate, StructureSummariesGate, StructureInfoPanelGate, StructureRowClassGate, StructureRowStyleGate, StructureRowColoringGate, ThemeGridGate, StructureSortingGate, SourceLoadingGate, StructureFilterGate, StructureQuickFiltersGate, VerticalFormationGate ]; //# sourceMappingURL=data:application/json;base64,eyJ2ZXJzaW9uIjozLCJmaWxlIjoic3RydWN0dXJlLmdhdGVzLmpzIiwic291cmNlUm9vdCI6IiIsInNvdXJjZXMiOlsiLi4vLi4vLi4vLi4vLi4vLi4vYnVpbGQtY2xpL3Byb2plY3RzL25neC1ncmlkL3NyYy9zdHJ1Y3R1cmUvZ3JpZC9mZWF0dXJlL2dhdGUvc3RydWN0dXJlLmdhdGVzLnRzIl0sIm5hbWVzIjpbXSwibWFwcGluZ3MiOiJBQUFBLE9BQU8sRUFBRSxzQkFBc0IsRUFBRSxNQUFNLHNDQUFzQyxDQUFDO0FBQzlFLE9BQU8sRUFBRSx5QkFBeUIsRUFBRSxNQUFNLDhDQUE4QyxDQUFDO0FBQ3pGLE9BQU8sRUFBRSxtQkFBbUIsRUFBRSxNQUFNLGdDQUFnQyxDQUFDO0FBQ3JFLE9BQU8sRUFBRSxzQkFBc0IsRUFBRSxNQUFNLG1DQUFtQyxDQUFDO0FBQzNFLE9BQU8sRUFBRSxpQkFBaUIsRUFBRSxNQUFNLDRCQUE0QixDQUFDO0FBQy9ELE9BQU8sRUFBRSxrQkFBa0IsRUFBRSxNQUFNLDhCQUE4QixDQUFDO0FBQ2xFLE9BQU8sRUFBRSxzQkFBc0IsRUFBRSxNQUFNLHdDQUF3QyxDQUFDO0FBQ2hGLE9BQU8sRUFBRSx1QkFBdUIsRUFBRSxNQUFNLDBDQUEwQyxDQUFDO0FBQ25GLE9BQU8sRUFBRSxzQkFBc0IsRUFBRSxNQUFNLDBEQUEwRCxDQUFDO0FBQ2xHLE9BQU8sRUFBRSxzQkFBc0IsRUFBRSxNQUFNLHdDQUF3QyxDQUFDO0FBQ2hGLE9BQU8sRUFBRSxxQkFBcUIsRUFBRSxNQUFNLHNDQUFzQyxDQUFDO0FBQzdFLE9BQU8sRUFBRSx3QkFBd0IsRUFBRSxNQUFNLDRDQUE0QyxDQUFDO0FBQ3RGLE9BQU8sRUFBRSxxQkFBcUIsRUFBRSxNQUFNLHNDQUFzQyxDQUFDO0FBQzdFLE9BQU8sRUFBRSxhQUFhLEVBQUUsTUFBTSxzREFBc0QsQ0FBQztBQUNyRixPQUFPLEVBQUUsaUJBQWlCLEVBQUUsTUFBTSw4QkFBOEIsQ0FBQztBQUNqRSxPQUFPLEVBQUUscUJBQXFCLEVBQUUsTUFBTSxrRUFBa0UsQ0FBQztBQUN6RyxPQUFPLEVBQUUsb0JBQW9CLEVBQUUsTUFBTSxzREFBc0QsQ0FBQztBQUM1RixPQUFPLEVBQUUsbUJBQW1CLEVBQUUsTUFBTSxvREFBb0QsQ0FBQztBQUN6RixPQUFPLEVBQUUseUJBQXlCLEVBQUUsTUFBTSwyREFBMkQsQ0FBQztBQUV0RyxNQUFNLENBQUMsTUFBTSxjQUFjLEdBQUc7SUFDN0IseUJBQXlCO0lBQ3pCLG1CQUFtQjtJQUNuQixzQkFBc0I7SUFDdEIsdUJBQXVCO0lBQ3ZCLHNCQUFzQjtJQUN0QixpQkFBaUI7SUFDakIsa0JBQWtCO0lBQ2xCLHNCQUFzQjtJQUN0Qix1QkFBdUI7SUFDdkIsc0JBQXNCO0lBQ3RCLHNCQUFzQjtJQUN0QixxQkFBcUI7SUFDckIscUJBQXFCO0lBQ3JCLHdCQUF3QjtJQUN4QixhQUFhO0lBQ2Isb0JBQW9CO0lBQ3BCLGlCQUFpQjtJQUNqQixtQkFBbUI7SUFDbkIseUJBQXlCO0lBQ3pCLHFCQUFxQjtDQUNyQixDQUFDIiwic291cmNlc0NvbnRlbnQiOlsiaW1wb3J0IHsgU3RydWN0dXJlU2VhcmNoaW5nR2F0ZSB9IGZyb20gJy4vc2VhcmNoaW5nL3N0cnVjdHVyZS1zZWFyY2hpbmcuZ2F0ZSc7XG5pbXBvcnQgeyBTdHJ1Y3R1cmVDb2x1bW5IZWFkZXJHYXRlIH0gZnJvbSAnLi9jb2x1bW4vaGVhZGVyL3N0cnVjdHVyZS1jb2x1bW4taGVhZGVyLmdhdGUnO1xuaW1wb3J0IHsgU3RydWN0dXJlUGFnaW5nR2F0ZSB9IGZyb20gJy4vcGFnaW5nL3N0cnVjdHVyZS1wYWdpbmcuZ2F0ZSc7XG5pbXBvcnQgeyBTdHJ1Y3R1cmVTZWxlY3Rpb25HYXRlIH0gZnJvbSAnLi9zb3VyY2Uvc3RydWN0dXJlLXNlbGVjdGlvbi5nYXRlJztcbmltcG9ydCB7IFN0cnVjdHVyZUwxMG5HYXRlIH0gZnJvbSAnLi9sMTBuL3N0cnVjdHVyZS1sMTBuLmdhdGUnO1xuaW1wb3J0IHsgU3RydWN0dXJlUGFuZWxHYXRlIH0gZnJvbSAnLi9wYW5lbC9zdHJ1Y3R1cmUtcGFuZWwuZ2F0ZSc7XG5pbXBvcnQgeyBTdHJ1Y3R1cmVSb3dEZXRhaWxHYXRlIH0gZnJvbSAnLi9yb3cvZGV0YWlsL3N0cnVjdHVyZS1yb3ctZGV0YWlsLmdhdGUnO1xuaW1wb3J0IHsgU3RydWN0dXJlQ29sdW1uTWVudUdhdGUgfSBmcm9tICcuL2NvbHVtbi9tZW51L3N0cnVjdHVyZS1jb2x1bW4tbWVudS5nYXRlJztcbmltcG9ydCB7IFN0cnVjdHVyZVN1bW1hcmllc0dhdGUgfSBmcm9tICcuLi8uLi8uLi9zdW1tYXJpZXMvZmVhdHVyZS9nYXRlL3N0cnVjdHVyZS1zdW1tYXJpZXMuZ2F0ZSc7XG5pbXBvcnQgeyBTdHJ1Y3R1cmVJbmZvUGFuZWxHYXRlIH0gZnJvbSAnLi9wYW5lbC9pbmZvL3N0cnVjdHVyZS1pbmZvLXBhbmVsLmdhdGUnO1xuaW1wb3J0IHsgU3RydWN0dXJlUm93Q2xhc3NHYXRlIH0gZnJvbSAnLi9yb3cvY2xhc3Mvc3RydWN0dXJlLXJvdy1jbGFzcy5nYXRlJztcbmltcG9ydCB7IFN0cnVjdHVyZVJvd0NvbG9yaW5nR2F0ZSB9IGZyb20gJy4vcm93L2NvbG9yaW5nL3N0cnVjdHVyZS1yb3ctY29sb3JpbmcuZ2F0ZSc7XG5pbXBvcnQgeyBTdHJ1Y3R1cmVSb3dTdHlsZUdhdGUgfSBmcm9tICcuL3Jvdy9zdHlsZS9zdHJ1Y3R1cmUtcm93LXN0eWxlLmdhdGUnO1xuaW1wb3J0IHsgVGhlbWVHcmlkR2F0ZSB9IGZyb20gJy4uLy4uLy4uLy4uL3NjaGVtYS9mZWF0dXJlL2dhdGUvZ3JpZC90aGVtZS1ncmlkLmdhdGUnO1xuaW1wb3J0IHsgU291cmNlTG9hZGluZ0dhdGUgfSBmcm9tICcuL3NvdXJjZS9zb3VyY2UtbG9hZGluZy5nYXRlJztcbmltcG9ydCB7IFZlcnRpY2FsRm9ybWF0aW9uR2F0ZSB9IGZyb20gJy4uLy4uLy4uL3ZlcnRpY2FsLWZvcm1hdGlvbi9mZWF0dXJlL2dhdGUvdmVydGljYWwtZm9ybWF0aW9uLmdhdGUnO1xuaW1wb3J0IHsgU3RydWN0dXJlU29ydGluZ0dhdGUgfSBmcm9tICcuLi8uLi8uLi9zb3J0aW5nL2ZlYXR1cmUvZ2F0ZS9zdHJ1Y3R1cmUtc29ydGluZy5nYXRlJztcbmltcG9ydCB7IFN0cnVjdHVyZUZpbHRlckdhdGUgfSBmcm9tICcuLi8uLi8uLi9maWx0ZXIvZmVhdHVyZS9nYXRlL3N0cnVjdHVyZS1maWx0ZXIuZ2F0ZSc7XG5pbXBvcnQgeyBTdHJ1Y3R1cmVRdWlja0ZpbHRlcnNHYXRlIH0gZnJvbSAnLi4vLi4vLi4vZmlsdGVyL2ZlYXR1cmUvZ2F0ZS9zdHJ1Y3R1cmUtcXVpY2stZmlsdGVycy5nYXRlJztcblxuZXhwb3J0IGNvbnN0IHN0cnVjdHVyZUdhdGVzID0gW1xuXHRTdHJ1Y3R1cmVDb2x1bW5IZWFkZXJHYXRlLFxuXHRTdHJ1Y3R1cmVQYWdpbmdHYXRlLFxuXHRTdHJ1Y3R1cmVTZWFyY2hpbmdHYXRlLFxuXHQvLyBTdHJ1Y3R1cmVTb3VyY2VHYXRlLFxuXHRTdHJ1Y3R1cmVTZWxlY3Rpb25HYXRlLFxuXHRTdHJ1Y3R1cmVMMTBuR2F0ZSxcblx0U3RydWN0dXJlUGFuZWxHYXRlLFxuXHRTdHJ1Y3R1cmVSb3dEZXRhaWxHYXRlLFxuXHRTdHJ1Y3R1cmVDb2x1bW5NZW51R2F0ZSxcblx0U3RydWN0dXJlU3VtbWFyaWVzR2F0ZSxcblx0U3RydWN0dXJlSW5mb1BhbmVsR2F0ZSxcblx0U3RydWN0dXJlUm93Q2xhc3NHYXRlLFxuXHRTdHJ1Y3R1cmVSb3dTdHlsZUdhdGUsXG5cdFN0cnVjdHVyZVJvd0NvbG9yaW5nR2F0ZSxcblx0VGhlbWVHcmlkR2F0ZSxcblx0U3RydWN0dXJlU29ydGluZ0dhdGUsXG5cdFNvdXJjZUxvYWRpbmdHYXRlLFxuXHRTdHJ1Y3R1cmVGaWx0ZXJHYXRlLFxuXHRTdHJ1Y3R1cmVRdWlja0ZpbHRlcnNHYXRlLFxuXHRWZXJ0aWNhbEZvcm1hdGlvbkdhdGVcbl07XG4iXX0=
'use strict'; define(['lib/Constants','assets/AssetManager', 'assets/AssetGroups','search/SearchController','geo/Object3DUtil', 'ui/ScaleLine','controls/CameraControls'], function (Constants, AssetManager, AssetGroups, SearchController, Object3DUtil, ScaleLine, CameraControls) { function ModelScaler(container) { Constants.worldUp = new THREE.Vector3(0,0,1); // Set world front to -y so all models are aligned to that and our camera faces it Constants.worldFront = new THREE.Vector3(0,-1,0); this.submitSizeUrl = Constants.baseUrl + '/submitSize'; this.container = null; this.controls = null; this.camera = null; this.scene = null; this.renderer = null; this.mouseX = 0; this.mouseY = 0; this.assetManager = null; this.searchController = null; this.sizeTextbox = null; this.scaleLine = null; // The ground plane this.groundModel = null; // The model to resize this.targetModelInstance = null; // Reference models to show next to the target model this.bracketingRefModels = { small: { name: 'car', modelInst: null, center: new THREE.Vector3(-200, 0, 0) }, // 2m to the left large: { name: 'man', modelInst: null, center: new THREE.Vector3(200, 0, 0) } // 2m to the right }; // TODO: Abstract this into some data structure this.targetObjectCenterPoint = new THREE.Vector3(0, 0, 0); this.defaultCameraPosition = new THREE.Vector3(0, -500, 200); // 5m back, 2m up this.defaultCameraNear = 0.1; // 0.1cm this.defaultCameraFar = 2000; // 20m this.defaultCameraFOV = 45; this.sizeBy = 'height'; // Does the reference models also need to be sized? // Set to false once they are stored in the database... this.sizeRefModels = false; // Report reference models on preload? // Used to do initial reporting of what units the reference objects should be in // Also requires this.sizeRefModels be true this.preloadAndReportRefSizes = false; AssetGroups.setDefaultFormat('utf8v2'); this.init(container); } // Loads information needed to display reference objects (i.e. which models to use?) // Does not load actual models themselves ModelScaler.prototype.loadRefObjectInfo = function () { // TODO: Load reference objects from file // Dims are used to size reference objects if this.sizeRefModels is true this.refObjects = { 'ring': { dim: [0.7, 1.82, 2], source: 'archive3d', id: 'f353432d' }, 'pen': { dim: [1, 2, 14.2], source: 'archive3d', id: 'cff4e585' }, 'sodacan': { dim: [6.4, 6.4, 12.2], source: 'archive3d', id: '5a0837f9' }, 'hand': { dim: [8.4, 4, 18.9], source: 'archive3d', id: '6ec53bd7' }, 'cat': { dim: [12, 46, 25], source: 'archive3d', id: 'f7de164c' }, 'computer': { dim: [60.8, 53.2, 35.7], source: 'archive3d', id: 'e288a2fb' }, 'man': { dim: [45.7, 29.8, 175.3], source: 'wss', id: '4a6b7b8de43bf3925c8e7963449f8577' }, 'bicycle': { dim: [61, 168, 110], source: 'archive3d', id: 'a696ebd4' }, 'car': { dim: [169.5, 467.5, 134], source: 'archive3d', id: '85a317ef' }, //c51d9857" }, 'house': { dim: [183.7, 609.9, 353.2], source: 'archive3d', id: 'fb5044a4' } }; }; // Reference model is preloaded ModelScaler.prototype.onRefModelPreload = function (deferredObj, refObjInfo, loadedModelInst) { if (this.sizeRefModels) { // Scale so physical size (e.g. height) of modelInstance equal // to reported refModel physical size (e.g. height) var refSize = Object3DUtil.convertBbDimsToSize(refObjInfo.dim, this.sizeBy); loadedModelInst.setToPhysicalSize(this.sizeBy, refSize); if (this.preloadAndReportRefSizes) { // Report sized reference model... this.submitSize(loadedModelInst); } } refObjInfo.modelInst = loadedModelInst; if (deferredObj) { deferredObj.resolve(); } }; // Preloads reference models // Sizes and report reference sizes (if this.sizeRefModels and this.preloadAndReportRefSizes are set) ModelScaler.prototype.preloadRefModels = function (callback) { var deferred = []; for (var name in this.refObjects) { if (this.refObjects.hasOwnProperty(name)) { var refObjInfo = this.refObjects[name]; var func = $.Deferred( function (refObjInfo, deferredObj) { this.assetManager.getModelInstance(refObjInfo.source, refObjInfo.id, this.onRefModelPreload.bind(this, deferredObj, refObjInfo) ); }.bind(this, refObjInfo) ).promise(); deferred.push(func); } } $.when.apply($, deferred).done( function () { if (callback) callback(); } ); }; function isFiniteNumber(n) { return !isNaN(parseFloat(n)) && isFinite(n); } ModelScaler.prototype.init = function (container) { this.assetManager = new AssetManager({ autoAlignModels: true }); this.searchController = new SearchController({ searchSucceededCallback: this.searchSucceeded.bind(this), getImagePreviewUrlCallback: this.assetManager.getImagePreviewUrl.bind(this.assetManager), onClickResultCallback: this.loadModel.bind(this), sources: Constants.assetSources.model, searchPanel: $('#searchPanel') }); this.assetManager.setSearchController(this.searchController); this.loadRefObjectInfo(); this.container = container; var width = this.container.clientWidth; var height = this.container.clientHeight; this.camera = new THREE.PerspectiveCamera(this.defaultCameraFOV, width / height, this.defaultCameraNear, this.defaultCameraFar); this.resetCamera(); this.controls = new CameraControls({ camera: this.camera, container: this.container, autoRotateCheckbox: $('#autoRotate'), renderCallback: this.render.bind(this) }); this.scene = new THREE.Scene(); this.scene.add(this.camera); var ambient = new THREE.AmbientLight(0x998877); this.scene.add(ambient); var directionalLight = new THREE.DirectionalLight(0xffeedd); directionalLight.position.set(100, -100, 100); this.scene.add(directionalLight); // RENDERER this.renderer = new THREE.WebGLRenderer({ antialias: true, alpha: true }); this.renderer.setSize(width, height); this.container.appendChild(this.renderer.domElement); window.addEventListener('resize', this.onWindowResize.bind(this), false); // Load reference models this.loadReferenceModels(this.bracketingRefModels); // Load ground this.groundModel = Object3DUtil.makeGrid(10000,10000,100,100); this.scene.add(this.groundModel); this.sizeByElem = $('#sizeBy'); if (this.sizeByElem) { var sizeByOptions = Object3DUtil.getSizeByOptions(); for (var i = 0; i < sizeByOptions.length; i++) { var s = sizeByOptions[i]; this.sizeByElem.append('<option value="' + s + '">' + s + '</option>'); } var that = this; this.sizeByElem.change(function () { that.sizeByElem.find('option:selected').each(function () { that.setSizeBy($(this).val()); }); }); } this.scaleTo = $('#scaleTo'); // Populate this.scaleTo.append('<option value="NONE">None</option>'); this.scaleTo.append('<option value="INCHES">Inches</option>'); this.scaleTo.append('<option value="CM">Centimeters</option>'); this.scaleTo.append('<option value="M">Meters</option>'); this.scaleTo.append('<option value="UNIT">Unit</option>'); this.scaleTo.append('<option value="REF">Reference</option>'); this.scaleTo.change(function () { if (this.targetModelInstance) { this.scaleAndPositionTargetModel(); } }.bind(this)); this.scaleTo.val('UNIT'); this.submitButton = $('#submitSize'); if (this.submitButton) { this.submitButton.click(function () { this.submitSize(this.targetModelInstance); }.bind(this)); } this.sizeTextbox = $('#sizeTextbox'); if (this.sizeTextbox) { this.sizeTextbox.change(function () { var val = this.sizeTextbox.val(); if (isFiniteNumber(val)) { var f = parseFloat(val); this.scaleLine.setSize(f, 'textbox'); this.rescaleToSize(f); } else { this.sizeTextbox.val(''); } }.bind(this)); } this.preloadRefModels(this.createScaleLine_.bind(this)); this.repositionCamera(); }; ModelScaler.prototype.createScaleLine_ = function () { this.scaleLine = new ScaleLine( { container: $('#scaleLine'), resizeCallback: this.rescaleToSize.bind(this), refObjects: this.refObjects, sizeBy: this.sizeBy, useRefModelDims: !this.sizeRefModels } ); }; // Function to respond to clicks on ScaleLine and changed numerical value in sizeTextbox ModelScaler.prototype.rescaleToSize = function (size) { if (this.targetModelInstance) { var bbBoxRefPoint = new THREE.Vector3(0.5, 0.5, 0); this.targetModelInstance.setToPhysicalSize(this.sizeBy, size); Object3DUtil.placeObject3D(this.targetModelInstance.object3D, this.targetObjectCenterPoint, bbBoxRefPoint); this.updateBracketingRefModels(); var newSize = this.targetModelInstance.getPhysicalSize(this.sizeBy); this.sizeTextbox.val(newSize); } }; // Respond to change in sizeBy list ModelScaler.prototype.setSizeBy = function (sizeBy) { this.sizeBy = sizeBy; this.scaleLine.setSizeBy(sizeBy); var targetSize = this.targetModelInstance ? this.targetModelInstance.getPhysicalSize(this.sizeBy) : ''; if (targetSize) { this.scaleLine.setSize(targetSize, null); } if (this.sizeTextbox) { this.sizeTextbox.val(targetSize); } this.updateBracketingRefModels(); }; // Figures out what the bracketing reference objects should be and loads them ModelScaler.prototype.updateBracketingRefModels = function () { if (this.targetModelInstance) { var size = this.targetModelInstance.getPhysicalSize(this.sizeBy); var refs = this.scaleLine.findBracketingRefModels(size); this.bracketingRefModels['small'].name = refs['min']; this.bracketingRefModels['large'].name = refs['max']; this.loadReferenceModels(this.bracketingRefModels); } }; ModelScaler.prototype.searchSucceeded = function (source, resultList) { this.assetManager.cacheModelInfos(source, resultList); return { source: source, resultList: resultList }; }; ModelScaler.prototype.loadModel = function (source, id) { this.clear(); this.start = new Date().getTime(); this.assetManager.getModelInstance(source, id, this.onTargetModelLoad.bind(this)); }; ModelScaler.prototype.clear = function () { if (this.targetModelInstance) { this.scene.remove(this.targetModelInstance.object3D); this.targetModelInstance = null; } }; /** * Load reference models - assumes that models has already been removed from scene * @param refModelEntry - contains information about which model to load, and where to position it * @param deferredObj - jquery deferred parameters (if this is was called as part of jquery wait for deferred objects) */ ModelScaler.prototype.loadReferenceModel = function (refModelEntry, deferredObj) { if (!refModelEntry.name) { if (deferredObj) { // Part of jQuery.deferred chain... deferredObj.resolve(); } return; } var refObjInfo = this.refObjects[refModelEntry.name]; var refModelCenter = refModelEntry.center; var bbBoxRefPoint = new THREE.Vector3(0.5, 0.5, 0); var onRefModelLoaded = function (modelInstance) { if (this.sizeRefModels) { // Scale so physical size (e.g. height) of modelInstance equal to reported refModel physical size (e.g. height) var refSize = Object3DUtil.convertBbDimsToSize(refObjInfo.dim, this.sizeBy); modelInstance.setToPhysicalSize(this.sizeBy, refSize); } Object3DUtil.placeObject3D(modelInstance.object3D, refModelCenter, bbBoxRefPoint); this.scene.add(modelInstance.object3D); refModelEntry.modelInst = modelInstance; refObjInfo.modelInst = modelInstance; if (deferredObj) { // Part of jQuery.deferred chain... deferredObj.resolve(); } else { // don't know when both reference models have been loaded... reposition now this.repositionRefModelsAndCamera(); } }.bind(this); if (refObjInfo.modelInst) { onRefModelLoaded(refObjInfo.modelInst); } else { this.assetManager.getModelInstance(refObjInfo.source, refObjInfo.id, onRefModelLoaded); } }; ModelScaler.prototype.loadReferenceModels = function (refModelsMap) { // Clear current ref models var deferred = []; for (var prop in refModelsMap) { if (refModelsMap.hasOwnProperty(prop)) { // Clear models from scene var refModelEntry = refModelsMap[prop]; if (refModelEntry.modelInst) { this.scene.remove(refModelEntry.modelInst.object3D); refModelEntry.modelInst = null; } } } for (var prop in refModelsMap) { if (refModelsMap.hasOwnProperty(prop)) { // Push function onto deferred stack var refModelEntry = refModelsMap[prop]; var func = $.Deferred(this.loadReferenceModel.bind(this, refModelEntry)).promise(); deferred.push(func); // this.loadReferenceModel(refModelsMap[prop]); } } // Do deferred actions $.when.apply($, deferred).done( function () { // Wait for all models to be loaded and reposition them if needed this.repositionRefModelsAndCamera(); }.bind(this) ); }; // Call onModelLoad and when changing scaleTo ModelScaler.prototype.scaleAndPositionTargetModel = function () { var modelInstance = this.targetModelInstance; var centerPoint = this.targetObjectCenterPoint; var bbBoxRefPoint = new THREE.Vector3(0.5, 0.5, 0); switch (this.scaleTo.val()) { case 'NONE': modelInstance.setScale(1 / modelInstance.model.getVirtualUnit()); break; case 'INCHES': modelInstance.setScale(Constants.metersToVirtualUnit * Constants.modelUnitInches / modelInstance.model.getVirtualUnit()); break; case 'CM': modelInstance.setScale(Constants.metersToVirtualUnit * Constants.modelUnitCentimeters / modelInstance.model.getVirtualUnit()); break; case 'M': modelInstance.setScale(Constants.metersToVirtualUnit * Constants.modelUnitMeters / modelInstance.model.getVirtualUnit()); break; case 'UNIT': modelInstance.setScale(1); break; case 'REF': var refModel = (this.bracketingRefModels['small'].modelInst) ? this.bracketingRefModels['small'].modelInst : this.bracketingRefModels['large'].modelInst; var refSize = refModel.getPhysicalSize(this.sizeBy); modelInstance.setToPhysicalSize(this.sizeBy, refSize); break; default: console.error('Unknown scaleTo option: ' + this.scaleTo.val()); } Object3DUtil.placeObject3D(modelInstance.object3D, centerPoint, bbBoxRefPoint); var size = modelInstance.getPhysicalSize(this.sizeBy); this.scaleLine.setSize(size, 'scaleTo.' + this.scaleTo); if (this.sizeTextbox) { this.sizeTextbox.val(size); } this.updateBracketingRefModels(); }; ModelScaler.prototype.repositionRefModelsAndCamera = function () { this.repositionRefModels(); this.repositionCamera(); }; // Repositions the bracketing reference models as appropriate ModelScaler.prototype.repositionRefModels = function () { var epsilon = 50; var bbBoxRefPoint = new THREE.Vector3(0.5, 0.5, 0); var focusWidth = (this.targetModelInstance) ? this.targetModelInstance.getPhysicalDims().x : null; if (this.bracketingRefModels.small.modelInst) { var smallWidth = this.bracketingRefModels.small.modelInst.getPhysicalDims().x; if (focusWidth != null) { this.bracketingRefModels.small.center.x = -(focusWidth / 2 + smallWidth / 2 + epsilon); } Object3DUtil.placeObject3D(this.bracketingRefModels.small.modelInst.object3D, this.bracketingRefModels.small.center, bbBoxRefPoint); } if (this.bracketingRefModels.large.modelInst) { var largeWidth = this.bracketingRefModels.large.modelInst.getPhysicalDims().x; if (focusWidth != null) { this.bracketingRefModels.large.center.x = (focusWidth / 2 + largeWidth / 2 + epsilon); } Object3DUtil.placeObject3D(this.bracketingRefModels.large.modelInst.object3D, this.bracketingRefModels.large.center, bbBoxRefPoint); } }; ModelScaler.prototype.repositionCamera = function () { this.resetCamera(); var arr = []; if (this.bracketingRefModels.small.modelInst) arr.push(this.bracketingRefModels.small.modelInst.object3D); if (this.bracketingRefModels.large.modelInst) arr.push(this.bracketingRefModels.large.modelInst.object3D); if (this.targetModelInstance) arr.push(this.targetModelInstance.object3D); if (arr.length > 0) this.controls.viewObject3DArray(arr); }; ModelScaler.prototype.onTargetModelLoad = function (modelInstance) { var end = new Date().getTime(); var time = end - this.start; console.log('Load time for model: ' + time); this.targetModelInstance = modelInstance; this.scaleAndPositionTargetModel(); this.scene.add(modelInstance.object3D); }; ModelScaler.prototype.resetCamera = function () { this.camera.up = new THREE.Vector3(0, 0, 1); this.camera.position.copy(this.defaultCameraPosition); }; ModelScaler.prototype.onWindowResize = function () { if (!this.renderer || !this.camera) return; var width = this.container.clientWidth; var height = this.container.clientHeight; this.camera.aspect = width / height; this.camera.updateProjectionMatrix(); this.renderer.setSize(width, height); this.controls.handleResize(); this.render(); this.searchController.onResize(); }; ModelScaler.prototype.redisplay = function () { requestAnimationFrame(this.redisplay.bind(this)); this.controls.update(); this.render(); }; ModelScaler.prototype.render = function () { if (!this.renderer) return; this.renderer.render(this.scene, this.camera); }; ModelScaler.prototype.submitSize = function (modelInst) { if (!modelInst) modelInst = this.targetModelInstance; if (!modelInst) return; // Resize everything to meters for backend storage var modelId = modelInst.model.getFullID(); var selected = this.scaleLine.getSelected(); var params = { modelId: modelId, sizeBy: this.sizeBy, unit: modelInst.getVirtualUnit() * Constants.virtualUnitToMeters, // TODO: get from scaleline selected sizeTo and selected pickedRefId sizeTo: modelInst.getPhysicalSize(this.sizeBy) * Constants.virtualUnitToMeters, pickedRefId: selected.pickedRef.id, method: selected.method, // userId: null updateMain: Constants.submitUpdateMain }; var sizeData = jQuery.param(params); var inputs = this.submitButton; inputs.prop('disabled', true); $.ajax ({ type: 'POST', url: this.submitSizeUrl, data: sizeData, success: function (response, textStatus, jqXHR) { console.log('Size successfully submitted for ' + modelId + '!!!'); }, error: function (jqXHR, textStatus, errorThrown) { console.error('Error submitting size for ' + modelId + '!!!'); }, complete: function () { // Re-enable inputs inputs.prop('disabled', false); } }); }; // Exports return ModelScaler; });
import Acomplishments from "../components/Acomplishments/Acomplishments"; import BgAnimation from "../components/BackgrooundAnimation/BackgroundAnimation"; import Hero from "../components/Hero/Hero"; import Projects from "../components/Projects/Projects"; import Technologies from "../components/Technologies/Technologies"; import Timeline from "../components/TimeLine/TimeLine"; import { Layout } from "../layout/Layout"; import { Section } from "../styles/GlobalComponents"; const Home = () => { return ( <Layout> <Section grid> <Hero /> <BgAnimation /> </Section> <br /> <Projects /> <br /> <Technologies /> <br /> <Timeline /> <br /> <Acomplishments /> </Layout> ); }; export default Home;
var k = 0; var filtroGlobal = []; function removeURLParameter(url, parameter) { //prefer to use l.search if you have a location/link object var urlparts= url.split('?'); if (urlparts.length>=2) { var prefix= encodeURIComponent(parameter)+'='; var pars= urlparts[1].split(/[&;]/g); //reverse iteration as may be destructive for (var i= pars.length; i-- > 0;) { //idiom for string.startsWith if (pars[i].lastIndexOf(prefix, 0) !== -1) { pars.splice(i, 1); } } url= urlparts[0]+'?'+pars.join('&'); return url; } else { return url; } } function getObjectKeyIndex(obj, keyToFind) { var i = 0, key; for (key in obj) { if (key == keyToFind) { return i; } i++; } return null; } function addHtml(){ } function addFilters(query_string) { pegaQuerysting(); //Filtro array $('#filtros-body :input').each(function (index) { var type = $(this).val().split('-')[1]; if ($(this).is(':checked')) { filtroGlobal[$( this ).val()] = ''; if(type == 'string'){ filtroGlobal[$( this ).val()+'_option'] = ''; }else if(type == 'integer'){ filtroGlobal[$( this ).val()+'_option'] = ''; filtroGlobal[$( this ).val()+'_final'] = ''; }else if(type == 'date'){ filtroGlobal[$( this ).val()+'_initial'] = ''; filtroGlobal[$( this ).val()+'_final'] = ''; } }else{ delete filtroGlobal[$( this ).val()]; if(type == 'string'){ delete filtroGlobal[$( this ).val()+'_option']; }else if(type == 'integer'){ delete filtroGlobal[$( this ).val()+'_option']; delete filtroGlobal[$( this ).val()+'_final']; }else if(type == 'date'){ delete filtroGlobal[$( this ).val()+'_initial']; delete filtroGlobal[$( this ).val()+'_final']; } } }); var cb_filter = $('.cb_filter'); var cb_filter_label = $('.cb_filter_label'); var block_fields = $('#block_fields'); var filters_add = false; var msg = ''; if(!query_string){ query_string = []; } $('.filter_added').remove(); block_fields.addClass('thumbnail').append('\ <div class="col-md-12 page-header filter_added" id="filters_add">\ <div class="col-md-11">\ <h2>Filtros</h2>\ </div>\ <div class="col-md-1" style="padding-top: 15px;">\ <button type="button" class="btn btn-default" onclick="minimizeFilters();">\ <span class="glyphicon glyphicon-minus" aria-hidden="true"></span>\ </button>\ </div>\ </div>\ '); for( i=0; i < cb_filter.length; i++ ) { if(cb_filter[i].checked) { if (cb_filter[i].value.split('-')[1] == 'integer') { var what = "'select_integer_" + cb_filter[i].value.split('-')[0] + "'"; var row_integer = "'row_" + cb_filter[i].value + "'"; block_fields.append('\ <div class="row form-group col-md-12 filter_added">\ <div class="col-md-3">\ <label>' + cb_filter_label[i].innerHTML + '</label>\ <input v-model="filtrolist" type="number" id="' + cb_filter[i].value + '" class="form-control filters" onkeyup="addFilterFields(' + row_integer + ', this.value, \'integer\', this.id)">\ </div>\ <div class="col-md-3" style="margin-top: 25px;">\ <input v-model="filtrolist" type="number" id="' + cb_filter[i].value + '_final" name="' + cb_filter[i].value + '" class="form-control filters select_integer_' + cb_filter[i].value.split('-')[0] + '" onkeyup="addFilterFields(' + row_integer + ', this.value, \'integer\', this.name)">\ </div>\ <div class="col-md-6" style="margin-top: 25px;">\ <select v-model="filtrolist" class="form-control filters" id="' + cb_filter[i].value + '_option" name="' + cb_filter[i].value + '" onchange="filterFieldInteger(this.value, ' + what + '); addFilterFields(' + row_integer + ', this.value, \'integer\', this.name)">\ <option value="">Selecione</option>\ <option value="between">Entre</option>\ <option value="bigger">Maior que</option>\ <option value="smaller">Menor que</option>\ <option value="bigger_equal">Maior ou igual que</option>\ <option value="smaller_equal">Menor ou igual que</option>\ <option value="equal">Igual</option>\ </select>\ </div>\ </div>\ '); var value_session_integer = query_string[cb_filter[i].value] ? query_string[cb_filter[i].value] : ''; var value_session_integer_final = query_string[cb_filter[i].value+'_final'] ? query_string[cb_filter[i].value+'_final'] : ''; var value_session_integer_option = query_string[cb_filter[i].value+'_option'] ? query_string[cb_filter[i].value+'_option'] : 'between'; $('#'+cb_filter[i].value).val(value_session_integer); $('#'+cb_filter[i].value+'_final').val(value_session_integer_final); $('#'+cb_filter[i].value+'_option').val(value_session_integer_option); if(value_session_integer_option == 'between'){ $('#'+cb_filter[i].value+'_final').css('display', ''); }else{ $('#'+cb_filter[i].value+'_final').css('display', 'none'); } var value_integer_option = document.getElementById(cb_filter[i].value + '_option').options[document.getElementById(cb_filter[i].value + '_option').selectedIndex].text; var value_integer_initial = $('#' + cb_filter[i].value).val(); var value_integer_final = $('#' + cb_filter[i].value + '_final').val(); if (value_integer_option == 'Entre') { msg = value_integer_option + ' ' + value_integer_initial + ' e ' + value_integer_final; } else { msg = value_integer_option + ' ' + value_integer_initial; } $('#block_fields_minimize').append('<label class="filter_added">' + cb_filter_label[i].innerHTML.replace(/\s+$/, '') + ':</label><span id="row_' + cb_filter[i].value + '" class="filter_added"> ' + msg + ' </span>'); filters_add = true; } else if (cb_filter[i].value.split('-')[1] == 'boolean') { var row_boolean = "'row_" + cb_filter[i].value + "'"; block_fields.append('\ <div class="form-group col-md-6 filter_added" style="width: 48.8%;">\ <label>' + cb_filter_label[i].innerHTML + '</label>\ <select v-model="filtrolist" class="form-control filters" id="' + cb_filter[i].value + '" onchange="addFilterFields(' + row_boolean + ', this.value, \'boolean\', this.id)">\ <option value="1">Sim</option>\ <option value="0">Não</option>\ </select>\ </div>\ '); var value_session_boolean = query_string[cb_filter[i].value] ? query_string[cb_filter[i].value] : 1; $('#'+cb_filter[i].value).val(value_session_boolean); $('#block_fields_minimize').append('<label class="filter_added">' + cb_filter_label[i].innerHTML.replace(/\s+$/, '') + ':</label><span id="row_' + cb_filter[i].value + '" class="filter_added"> ' + document.getElementById(cb_filter[i].value).options[document.getElementById(cb_filter[i].value).selectedIndex].text + ' </span>'); filters_add = true; } else if (cb_filter[i].value.split('-')[1] == 'foreign_key') { var label = cb_filter_label[i].innerHTML; var value = cb_filter[i].value; var row_foreign_key = "'row_" + cb_filter[i].value + "'"; foreign(label, value, row_foreign_key, cb_filter[i], query_string, block_fields); filters_add = true; } else if (cb_filter[i].value.split('-')[1] == 'date') { date = new Date(); day = ("0" + (date.getDate())).slice(-2); month = ("0" + (date.getMonth() + 1)).slice(-2); year = date.getFullYear(); date_actual = year + '-' + month + '-' + day; var row_date = "'row_" + cb_filter[i].value + "'"; block_fields.append('\ <div class="row form-group col-md-12 filter_added">\ <div class="col-md-6">\ <label>' + cb_filter_label[i].innerHTML + '</label>\ <input v-model="filtrolist" type="date" value="' + date_actual + '" id="' + cb_filter[i].value + '_initial" name="' + cb_filter[i].value + '" class="form-control filters" onchange="addFilterFields(' + row_date + ', this.value, \'date\', this.name)">\ </div>\ <div class="col-md-6" style="margin-top: 25px;">\ <input v-model="filtrolist" type="date" value="' + date_actual + '" id="' + cb_filter[i].value + '_final" name="' + cb_filter[i].value + '" class="form-control filters" onchange="addFilterFields(' + row_date + ', this.value, \'date\', this.name)">\ </div>\ </div>\ '); var value_session_date_initial = query_string[cb_filter[i].value+'_initial'] ? query_string[cb_filter[i].value+'_initial'] : date_actual; var value_session_date_final = query_string[cb_filter[i].value+'_final'] ? query_string[cb_filter[i].value+'_final'] : date_actual; $('#'+cb_filter[i].value+'_initial').val(value_session_date_initial); $('#'+cb_filter[i].value+'_final').val(value_session_date_final); value_date_initial = new Date($('#' + cb_filter[i].value + '_initial').val().replace(/-/g, ',')); value_day_initial = ("0" + (value_date_initial.getDate())).slice(-2); value_month_initial = ("0" + (value_date_initial.getMonth() + 1)).slice(-2); value_year_initial = value_date_initial.getFullYear(); value_date_initial = value_day_initial + '/' + value_month_initial + '/' + value_year_initial; value_date_final = new Date($('#' + cb_filter[i].value + '_final').val().replace(/-/g, ',')); value_day_final = ("0" + (value_date_final.getDate())).slice(-2); value_month_final = ("0" + (value_date_final.getMonth() + 1)).slice(-2); value_year_final = value_date_final.getFullYear(); value_date_final = value_day_final + '/' + value_month_final + '/' + value_year_final; if (value_date_initial != value_date_final) { msg = 'Entre ' + value_date_initial + ' e ' + value_date_final; } else { msg = value_date_initial; } $('#block_fields_minimize').append('<label class="filter_added">' + cb_filter_label[i].innerHTML.replace(/\s+$/, '') + ':</label><span id="row_' + cb_filter[i].value + '" class="filter_added"> ' + msg + ' </span>'); filters_add = true; } else { var row_string = "'row_" + cb_filter[i].value + "'"; block_fields.append('\ <div class="row form-group col-md-12 filter_added">\ <div class="col-md-6">\ <label>' + cb_filter_label[i].innerHTML + '</label>\ <input v-model="filtrolist" type="text" id="' + cb_filter[i].value + '" class="form-control filters" onkeyup="addFilterFields(' + row_string + ', this.value, \'string\')">\ </div>\ <div class="col-md-6" style="margin-top: 25px;">\ <select v-model="filtrolist" class="form-control filters" id="' + cb_filter[i].value + '_option" onchange="addFilterFields()">\ <option value="">Selecione</option>\ <option value="between">Entre</option>\ <option value="start">Começa com</option>\ <option value="end">Termina com</option>\ </select>\ </div>\ </div>\ '); var value_session_string = query_string[cb_filter[i].value] ? query_string[cb_filter[i].value] : ''; var value_session_string_option = query_string[cb_filter[i].value+'_option'] ? query_string[cb_filter[i].value+'_option'] : 'between'; $('#'+cb_filter[i].value).val(value_session_string); $('#'+cb_filter[i].value+'_option').val(value_session_string_option); $('#block_fields_minimize').append('<label class="filter_added">' + cb_filter_label[i].innerHTML.replace(/\s+$/, '') + ':</label><span id="row_' + cb_filter[i].value + '" class="filter_added"> ' + value_session_string + ' </span>'); filters_add = true; } } } if(!filters_add){ $('#filters_add').remove(); block_fields.removeClass('thumbnail'); //history.pushState("", document.title, '' + window.location.href.split("?")[0]); } addFilterFields(); addQuery(); } function addFilterFields(target_id, value, type, element_id) { var msg = ''; if(type == 'integer'){ var value_integer_option = document.getElementById(element_id+'_option').options[document.getElementById(element_id+'_option').selectedIndex].text; var value_integer_initial = $('#'+element_id).val(); var value_integer_final = $('#'+element_id+'_final').val(); if(value_integer_option == 'Entre'){ msg = value_integer_option + ' ' + value_integer_initial + ' e ' + value_integer_final; }else{ msg = value_integer_option + ' ' + value_integer_initial; } $('#'+target_id).html(msg+' '); }else if(type == 'foreign_key'){ element_value = document.getElementById(element_id).options[document.getElementById(element_id).selectedIndex].text; $('#'+target_id).html(element_value+' '); }else if(type == 'date'){ value_date_initial = new Date($('#'+element_id+'_initial').val().replace(/-/g, ',')); value_day_initial = ("0" + (value_date_initial.getDate())).slice(-2); value_month_initial = ("0" + (value_date_initial.getMonth() + 1)).slice(-2); value_year_initial = value_date_initial.getFullYear(); value_date_initial = value_day_initial+'/'+value_month_initial+'/'+value_year_initial; value_date_final = new Date($('#'+element_id+'_final').val().replace(/-/g, ',')); value_day_final = ("0" + (value_date_final.getDate())).slice(-2); value_month_final = ("0" + (value_date_final.getMonth() + 1)).slice(-2); value_year_final = value_date_final.getFullYear(); value_date_final = value_day_final+'/'+value_month_final+'/'+value_year_final; if(value_date_initial != value_date_final){ msg = 'Entre ' + value_date_initial + ' e ' + value_date_final; }else{ msg = value_date_initial; } $('#'+target_id).html(msg+' '); }else if(type == 'boolean'){ element_value = document.getElementById(element_id).options[document.getElementById(element_id).selectedIndex].text; $('#'+target_id).html(element_value+' '); }else{ $('#'+target_id).html(value+' '); } addQuery(); } function filterFieldInteger(value, what){ if(value == 'between'){ $('.'+what).css('display', '').val(''); }else{ $('.'+what).css('display', 'none').val(''); } addFilterFields(); } function minimizeFilters() { $('#block_fields_thumbnail').css('display', ''); $('#block_fields').toggle('slow'); } function maximizeFilters() { $('#block_fields_thumbnail').css('display', 'none'); $('#block_fields').toggle('slow'); } function verifyQueryString() { $('input').iCheck({ checkboxClass: 'icheckbox_square-green', radioClass: 'iradio_square-green', increaseArea: '20%' // optional }); k =0; var result = {}; keyValuePairs = location.search.slice(1).split("&"); if(keyValuePairs!=""){ keyValuePairs.forEach(function(keyValuePair) { keyValuePair = keyValuePair.split('='); result[decodeURIComponent(keyValuePair[0])] = decodeURIComponent(keyValuePair[1]); }); } if(Object.keys(result).length){ $.each(result, function (index, value) { $('#check_'+index).prop('checked', true).parent().addClass('checked'); $('#check_'+index.replace('_initial', '')).prop('checked', true).parent().addClass('checked'); if(index == 'periodo'){ filterPeriod(value); } if(index == 'procurar'){ filterFind(value); } }); addFilters(result); } } function foreign(label, value, row_foreign_key, cb_filter_i, query_string, block_fields){ $.ajax({ url: "/getForeignKey", data: { foreign_key: cb_filter_i.value.split('-')[0], model: cb_filter_i.value.split('-')[2], field_value: cb_filter_i.value.split('-')[3], field_key: cb_filter_i.value.split('-')[4] } }).done(function (json) { if (json.success == true && json.foreign_key) { var options = ''; $.each(json.foreign_key, function (index, value) { options += '<option value="' + index + '">' + value + '</option>'; }); block_fields.append('\ <div class="form-group col-md-6 filter_added" style="width: 48.8%;">\ <label>' + label + '</label>\ <select v-model="filtrolist" class="form-control filters" id="' + value + '" onchange="addFilterFields(' + row_foreign_key + ', this.value, \'foreign_key\', this.id)">\ <option value="">Selecione</option>' + options + '\ </select>\ </div>\ '); var value_session_foreign_key = query_string[value] ? query_string[value] : ''; $('#'+value).val(value_session_foreign_key); $('#block_fields_minimize').append('<label class="filter_added">' + label.replace(/\s+$/, '') + ':</label><span id="row_' + value + '" class="filter_added"> ' + document.getElementById(value).options[document.getElementById(value).selectedIndex].text + ' </span>'); filters_add = true; } addQuery(); }); } function pegaQuerysting(){ var search = window.location.href.substr(window.location.href.indexOf("?") + 1); var array = []; if(search.indexOf('http') === -1){ search = search?JSON.parse('{"' + search.replace(/&/g, '","').replace(/=/g,'":"') + '"}', function(key, value) { return key===""?value:decodeURIComponent(value) }):{} for (var key in search) { filtroGlobal[key] = search[key]; } } return array; } function addQuery() { var filters_fields = $('.filters'); if($('#period_find').val()){ var period_find = JSON.parse('{"'+$('#period_find').val().replace(/&/g, '","').replace(/=/g,'":"')+ '"}'); for(var key in period_find){ if(period_find[key] != ''){ filtroGlobal[key] = period_find[key]; } } } if(filters_fields.length > 0){ for( i=0; i < filters_fields.length; i++ ){ filtroGlobal[filters_fields[i].id] = filters_fields[i].value; // filters += ''+filters_fields[i].id+'='+filters_fields[i].value+'&'; } } var filter_string = ''; for (var fil in filtroGlobal){ // if(filtroGlobal[fil] != ''){ filter_string += fil+'='+filtroGlobal[fil]+'&'; // } } filter_string = filter_string.substring(0,(filter_string.length - 1)); // Previnir que quando acessa fica inserindo mais de uma vez if(k<1){ history.pushState("", document.title, window.location.href.split("?")[0] + '?'+ filter_string); } } function filterPeriod(period) { var period_find = $('#period_find'); var period_find_split = period_find.val().split("&"); var period_value = period_find.val().replace(period_find_split[0], 'periodo='+period); if(period != 'hoje' && period != '7' && period != '15' && period != '30'){ $('#other_period').val(period); } period_find.val(period_value); $('.period').css('color', '#9b9b9b'); $('#period_'+period).css('color', '#4a4a4a'); addQuery(); } function filterFind(find) { var period_find = $('#period_find'); if(period_find.val() != undefined){ var period_find_split = period_find.val().split("&"); var find_value = period_find.val().replace(period_find_split[1], 'procurar='+find); period_find.val(find_value); $('#find').val(find); } addQuery(); }
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Set of functions to guide the dataloading process, and to clean up dataset.py. Created on Wed 1 Oct, 2020. Adapted from Jupyter notebook style (Colab). @author: calmac """ # Standard packages for reading/processing data import numpy as np from numpy.random import default_rng import os import cv2 # PyTorch import torch # My stuff import settings args = settings.parse_arguments() ########################################################################## # Helper functions ########################################################################## """ Convert string labels to OHE versions """ # Take in csv/txt file, split, and extract 3rd column (which contains the str labels of pathologies) # Next, encode these strings as integers using pathology_mapping. # Send to _one_hot_encode(). # Return OHE labels to __init__(). # choose to return either int_labels for in-built loss (here: nn.CrossEntropyLoss), or ohe_labels for custom loss. def _convert_labels(csvfile): int_labels, ids = _get_int_labels(csvfile) ohe_labels = _one_hot_encode(int_labels) return int_labels, ids # return chosen label format along with patient ids for counting later """ Take in .txt file, split apart, and return 3rd column as integer labels """ def _get_int_labels(csvfile): pathology_mapping = { 'normal': 0, 'pneumonia': 1, 'COVID-19': 2 } # assign integers to pathology labels int_labels = np.empty(len(csvfile)).astype(np.int) # initialise storage array ids = [] for i, patient_i in enumerate(csvfile): # for each patient in our file list, get index i and string of info for that patient patient_list = patient_i.split() # extract ith patient info as a list using split() pathology = patient_list[2] # get pathology name (in 3rd column) int_labels[i] = pathology_mapping[pathology] # now map that name to its corresponding int_label # Count patients: use id since some patients have multiple images ids.append(patient_list[0]) return int_labels, ids # return int_labels for all pathologies """ Return one hot encoded equivalents of integer labels from _get_int_labels(). """ def _one_hot_encode(int_labels): labels = torch.from_numpy(int_labels) return F.one_hot(labels, n_classes) """ Stack pixels over RGB channels and resize """ def _config_images(image): if len(image.shape) != 3: # if image is grayscale (H,W) image = np.stack((image, image, image), axis=2) # stack to give new shape: (H, W, 3) elif image.shape[2] == 4: imgray = image[:, :, 0] # remove colour channel image = np.stack((imgray, imgray, imgray), axis=2) # new shape: (H, W, 3) return image """ Read .txt file and store as list """ def _process_txt_file(file): with open(file, 'r') as fr: files = fr.readlines() return files """Rescales images to be [0, 255].""" def denormalize(image, maxval): return image.astype(np.float32) * maxval """ For resizing the image without needing to convert to PIL image (as with torchvision.transforms.Resize()) """ class Xray_resize(object): def __init__(self): self.size = args.input_resize def __call__(self, img): return cv2.resize(img, (self.size, self.size)) """ Scales image pixels from [0,255] to [0,1] """ class rescale(object): def __init__(self): self.maxval = 255 def __call__(self, img): return torch.div(img,self.maxval) ########################################################################## # Functions for balancing the dataset relative to COVID sample size ########################################################################## class balanceToCovid(data.Dataset): def __init__(self, csv_path): self.csvfile = _process_txt_file(csv_path) self.labels, self.ids = _convert_labels(self.csvfile) self.n_covids = count_covids(self.labels) self.all_files, self.n_files = self._getfiles() def _getfiles(self): y = self.labels normal_files = [] pneum_files = [] covid_files = [] for j in range(len(y)): if y[j]==0: # if the label at row j is 'normal' normal_files.append(self.csvfile[j].split()[1]) # append normal_files with filename at that row elif y[j]==1: # if the label at row j is 'pneum' pneum_files.append(self.csvfile[j].split()[1]) elif y[j]==2: # if the label at row j is 'covid' covid_files.append(self.csvfile[j].split()[1]) # Store files as separate keys in a dict() all_files = {'normal': normal_files, 'pneum': pneum_files, 'covid': covid_files} n_files = [len(all_files[x]) for x in ['normal', 'pneum', 'covid']] return all_files, n_files def __call__(self): # clunky way: # Create lists for storing randomly selected 265 files of each class, # and add all lists together at the end to form our balanced dataset. # TODO: there will be a slicker way to do this, but get it working first. x_n = [] x_p = [] y_list = [] rng = default_rng() for i in range(n_classes): rand_idx = rng.choice(range(self.n_files[i]), self.n_covids, replace=False) # choose random instances if i==0: [x_n.append(self.all_files['normal'][j]) for j in rand_idx] elif i==1: [x_p.append(self.all_files['pneum'][j]) for j in rand_idx] X_list = x_n + x_p + self.all_files['covid'] # all filenames y_list = np.concatenate((np.zeros(self.n_covids),np.ones(self.n_covids),np.full(self.n_covids,2)), axis=None) return X_list, y_list # Count patients by checking for duplicate ids def countPatients(ids): patientCount = 0 for i in range(0, len(ids)): if i==0: patientCount += 1 continue if ids[i]==ids[i-1]: continue else: patientCount += 1 return patientCount # Count covid patients using class label def count_covids(labels): n_covids = 0 for (i, y) in enumerate(labels): if y == 2: n_covids += 1 # only count covid images else: continue return n_covids
# -*- coding: utf-8 -*- import numpy as np import sys import os import pickle import json last_path = os.path.join(os.path.dirname(__file__), 'last/') result_path = os.path.join(os.path.dirname(__file__), 'result/') turn_path = os.path.join(os.path.dirname(__file__), 'turn/') if not os.path.isfile(last_path + "0.pickle"): print "No such a file: " + last_path + "0.pickle" print "exit..." sys.exit() with open(last_path + "0.pickle", "rb") as f: left = pickle.load(f) if not os.path.isfile(last_path + "1.pickle"): print "No such a file: " + last_path + "1.pickle" print "exit..." sys.exit() with open(last_path + "1.pickle", "rb") as f: right = pickle.load(f) if not os.path.isfile(result_path + 'painted.txt'): print "No such a file: " + result_path + "painted.txt" print "exit..." sys.exit() with open(result_path + 'painted.txt') as f: result = map(int, f.read().split(' ')) if result[0] > result[1]: print "left side win" with open(turn_path + "win.pickle", 'wb') as f: pickle.dump([left[0], left[1], 100 * result[0], np.zeros((17, 15), dtype=np.int32), True], f) elif result[0] < result[1]: print "right side win" with open(turn_path + "win.pickle", 'wb') as f: pickle.dump([right[0], right[1], 100 * result[1], np.zeros((17, 15), dtype=np.int32), True], f) else: print "draw" os.remove(turn_path + "win.pickle")
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ var App = require('app'); require('views/main/admin/stack_upgrade/upgrade_task_view'); describe('App.upgradeTaskView', function () { var view = App.upgradeTaskView.create({ content: Em.Object.create(), taskDetailsProperties: ['prop1'] }); view.removeObserver('content.isExpanded', view, 'doPolling'); view.removeObserver('outsideView', view, 'doPolling'); App.TestAliases.testAsComputedOr(view, 'showContent', ['outsideView', 'content.isExpanded']); describe("#logTabId", function() { it("depends on `elementId`", function() { view.reopen({ elementId: 'elementId' }); expect(view.get('logTabId')).to.equal('elementId-log-tab'); }); }); describe("#errorTabId", function() { it("depends on `elementId`", function() { view.reopen({ elementId: 'elementId' }); expect(view.get('errorTabId')).to.equal('elementId-error-tab'); }); }); describe("#logTabIdLink", function() { it("depends on `logTabId`", function() { view.reopen({ logTabId: 'elementId-log-tab' }); expect(view.get('logTabIdLink')).to.equal('#elementId-log-tab'); }); }); describe("#errorTabIdLInk", function() { it("depends on `errorTabId`", function() { view.reopen({ errorTabId: 'elementId-error-tab' }); expect(view.get('errorTabIdLInk')).to.equal('#elementId-error-tab'); }); }); describe("#copyErrLog()", function () { before(function () { sinon.stub(view, 'toggleProperty', Em.K); }); after(function () { view.toggleProperty.restore(); }); it("`errorLogOpened` is toggled", function () { view.copyErrLog(); expect(view.toggleProperty.calledWith('errorLogOpened')).to.be.true; }); }); describe("#copyOutLog()", function () { before(function () { sinon.stub(view, 'toggleProperty', Em.K); }); after(function () { view.toggleProperty.restore(); }); it("outputLogOpened is toggled", function () { view.copyOutLog(); expect(view.toggleProperty.calledWith('outputLogOpened')).to.be.true; }); }); describe("#openErrorLog()", function () { before(function () { sinon.stub(view, 'openLogWindow', Em.K); }); after(function () { view.openLogWindow.restore(); }); it("stderr is open with openLogWindow", function () { view.set('content.stderr', 'stderr'); view.openErrorLog(); expect(view.openLogWindow.calledWith('stderr')).to.be.true; }); }); describe("#openOutLog()", function () { before(function () { sinon.stub(view, 'openLogWindow', Em.K); }); after(function () { view.openLogWindow.restore(); }); it("stdout is open with openLogWindow", function () { view.set('content.stdout', 'stdout'); view.openOutLog(); expect(view.openLogWindow.calledWith('stdout')).to.be.true; }); }); describe("#openLogWindow()", function () { var mockAppendChild = { appendChild: Em.K }, mockWindow = { document: { write: Em.K, close: Em.K, createElement: function () { return mockAppendChild; }, createTextNode: Em.K, body: mockAppendChild } }; beforeEach(function () { sinon.stub(window, 'open').returns(mockWindow); sinon.spy(mockWindow.document, 'write'); sinon.spy(mockWindow.document, 'close'); sinon.spy(mockWindow.document, 'createElement'); sinon.spy(mockWindow.document, 'createTextNode'); sinon.spy(mockAppendChild, 'appendChild'); view.openLogWindow('log'); }); afterEach(function () { window.open.restore(); mockWindow.document.write.restore(); mockWindow.document.close.restore(); mockWindow.document.createElement.restore(); mockWindow.document.createTextNode.restore(); mockAppendChild.appendChild.restore(); }); it("window.open is called once", function () { expect(window.open.calledOnce).to.be.true; }); it("pre-element is created", function () { expect(mockWindow.document.createElement.calledWith('pre')).to.be.true; }); it("log-node is created", function () { expect(mockWindow.document.createTextNode.calledWith('log')).to.be.true; }); it("two nodes are appended", function () { expect(mockAppendChild.appendChild.calledTwice).to.be.true; }); it("document is closed", function () { expect(mockWindow.document.close.calledOnce).to.be.true; }); }); });
""" Check platform status after reboot. Three types of reboot are covered in this script: * Cold reboot * Fast reboot * Warm reboot This script is to cover the test case 'Reload configuration' in the SONiC platform test plan: https://github.com/Azure/SONiC/blob/master/doc/pmon/sonic_platform_test_plan.md """ import logging import re import time from datetime import datetime import pytest from tests.common.fixtures.conn_graph_facts import conn_graph_facts from tests.common.utilities import wait_until from tests.common.reboot import * from tests.common.platform.transceiver_utils import check_transceiver_basic from tests.common.platform.interface_utils import check_all_interface_information, get_port_map from tests.common.platform.daemon_utils import check_pmon_daemon_status from tests.common.platform.processes_utils import wait_critical_processes, check_critical_processes from tests.common.helpers.assertions import pytest_assert pytestmark = [ pytest.mark.disable_loganalyzer, pytest.mark.topology('any') ] MAX_WAIT_TIME_FOR_INTERFACES = 300 MAX_WAIT_TIME_FOR_REBOOT_CAUSE = 120 @pytest.fixture(scope="module", autouse=True) def teardown_module(duthosts, enum_rand_one_per_hwsku_hostname, conn_graph_facts, xcvr_skip_list): duthost = duthosts[enum_rand_one_per_hwsku_hostname] yield logging.info("Tearing down: to make sure all the critical services, interfaces and transceivers are good") interfaces = conn_graph_facts["device_conn"][duthost.hostname] check_critical_processes(duthost, watch_secs=10) check_interfaces_and_services(duthost, interfaces, xcvr_skip_list) def reboot_and_check(localhost, dut, interfaces, xcvr_skip_list, reboot_type=REBOOT_TYPE_COLD, reboot_helper=None, reboot_kwargs=None): """ Perform the specified type of reboot and check platform status. @param localhost: The Localhost object. @param dut: The AnsibleHost object of DUT. @param interfaces: DUT's interfaces defined by minigraph @param xcvr_skip_list: list of DUT's interfaces for which transeiver checks are skipped @param reboot_type: The reboot type, pre-defined const that has name convention of REBOOT_TYPE_XXX. @param reboot_helper: The helper function used only by power off reboot @param reboot_kwargs: The argument used by reboot_helper """ logging.info("Sync reboot cause history queue with DUT reboot cause history queue") sync_reboot_history_queue_with_dut(dut) logging.info("Run %s reboot on DUT" % reboot_type) reboot(dut, localhost, reboot_type=reboot_type, reboot_helper=reboot_helper, reboot_kwargs=reboot_kwargs) # Append the last reboot type to the queue logging.info("Append the latest reboot type to the queue") REBOOT_TYPE_HISTOYR_QUEUE.append(reboot_type) check_interfaces_and_services(dut, interfaces, xcvr_skip_list, reboot_type) def check_interfaces_and_services(dut, interfaces, xcvr_skip_list, reboot_type = None): """ Perform a further check after reboot-cause, including transceiver status, interface status @param localhost: The Localhost object. @param dut: The AnsibleHost object of DUT. @param interfaces: DUT's interfaces defined by minigraph """ logging.info("Wait until all critical services are fully started") wait_critical_processes(dut) if reboot_type is not None: logging.info("Check reboot cause") assert wait_until(MAX_WAIT_TIME_FOR_REBOOT_CAUSE, 20, 30, check_reboot_cause, dut, reboot_type), \ "got reboot-cause failed after rebooted by %s" % reboot_type if "201811" in dut.os_version or "201911" in dut.os_version: logging.info("Skip check reboot-cause history for version before 202012") else: logger.info("Check reboot-cause history") assert wait_until(MAX_WAIT_TIME_FOR_REBOOT_CAUSE, 20, 0, check_reboot_cause_history, dut, REBOOT_TYPE_HISTOYR_QUEUE), "Check reboot-cause history failed after rebooted by %s" % reboot_type if reboot_ctrl_dict[reboot_type]["test_reboot_cause_only"]: logging.info("Further checking skipped for %s test which intends to verify reboot-cause only" % reboot_type) return if dut.is_supervisor_node(): logging.info("skipping interfaces related check for supervisor") else: logging.info("Wait {} seconds for all the transceivers to be detected".format(MAX_WAIT_TIME_FOR_INTERFACES)) result = wait_until(MAX_WAIT_TIME_FOR_INTERFACES, 20, 0, check_all_interface_information, dut, interfaces, xcvr_skip_list) assert result, "Not all transceivers are detected or interfaces are up in {} seconds".format( MAX_WAIT_TIME_FOR_INTERFACES) logging.info("Check transceiver status") for asic_index in dut.get_frontend_asic_ids(): # Get the interfaces pertaining to that asic interface_list = get_port_map(dut, asic_index) interfaces_per_asic = {k:v for k, v in interface_list.items() if k in interfaces} check_transceiver_basic(dut, asic_index, interfaces_per_asic, xcvr_skip_list) logging.info("Check pmon daemon status") assert check_pmon_daemon_status(dut), "Not all pmon daemons running." if dut.facts["asic_type"] in ["mellanox"]: from .mellanox.check_hw_mgmt_service import check_hw_management_service from .mellanox.check_sysfs import check_sysfs logging.info("Check the hw-management service") check_hw_management_service(dut) logging.info("Check sysfs") check_sysfs(dut) def test_cold_reboot(duthosts, enum_rand_one_per_hwsku_hostname, localhost, conn_graph_facts, xcvr_skip_list): """ @summary: This test case is to perform cold reboot and check platform status """ duthost = duthosts[enum_rand_one_per_hwsku_hostname] reboot_and_check(localhost, duthost, conn_graph_facts["device_conn"][duthost.hostname], xcvr_skip_list, reboot_type=REBOOT_TYPE_COLD) def test_soft_reboot(duthosts, enum_rand_one_per_hwsku_hostname, localhost, conn_graph_facts, xcvr_skip_list): """ @summary: This test case is to perform soft reboot and check platform status """ duthost = duthosts[enum_rand_one_per_hwsku_hostname] soft_reboot_supported = duthost.command('which soft-reboot', module_ignore_errors=True)["stdout"] if "" == soft_reboot_supported: pytest.skip("Soft-reboot is not supported on this DUT, skip this test case") if duthost.is_multi_asic: pytest.skip("Multi-ASIC devices not supporting soft reboot") reboot_and_check(localhost, duthost, conn_graph_facts["device_conn"][duthost.hostname], xcvr_skip_list, reboot_type=REBOOT_TYPE_SOFT) def test_fast_reboot(duthosts, enum_rand_one_per_hwsku_hostname, localhost, conn_graph_facts, xcvr_skip_list): """ @summary: This test case is to perform fast reboot and check platform status """ duthost = duthosts[enum_rand_one_per_hwsku_hostname] if duthost.is_multi_asic: pytest.skip("Multi-ASIC devices not supporting fast reboot") reboot_and_check(localhost, duthost, conn_graph_facts["device_conn"][duthost.hostname], xcvr_skip_list, reboot_type=REBOOT_TYPE_FAST) def test_warm_reboot(duthosts, enum_rand_one_per_hwsku_hostname, localhost, conn_graph_facts, xcvr_skip_list): """ @summary: This test case is to perform warm reboot and check platform status """ duthost = duthosts[enum_rand_one_per_hwsku_hostname] if duthost.is_multi_asic: pytest.skip("Multi-ASIC devices not supporting warm reboot") asic_type = duthost.facts["asic_type"] if asic_type in ["mellanox"]: issu_capability = duthost.command("show platform mlnx issu")["stdout"] if "disabled" in issu_capability: pytest.skip("ISSU is not supported on this DUT, skip this test case") reboot_and_check(localhost, duthost, conn_graph_facts["device_conn"][duthost.hostname], xcvr_skip_list, reboot_type=REBOOT_TYPE_WARM) def _power_off_reboot_helper(kwargs): """ @summary: used to parametrized test cases on power_off_delay @param kwargs: the delay time between turning off and on the PSU """ pdu_ctrl = kwargs["pdu_ctrl"] all_outlets = kwargs["all_outlets"] power_on_seq = kwargs["power_on_seq"] delay_time = kwargs["delay_time"] for outlet in all_outlets: logging.debug("turning off {}".format(outlet)) pdu_ctrl.turn_off_outlet(outlet) time.sleep(delay_time) logging.info("Power on {}".format(power_on_seq)) for outlet in power_on_seq: logging.debug("turning on {}".format(outlet)) pdu_ctrl.turn_on_outlet(outlet) def test_power_off_reboot(duthosts, enum_rand_one_per_hwsku_hostname, localhost, conn_graph_facts, xcvr_skip_list, pdu_controller, power_off_delay): """ @summary: This test case is to perform reboot via powercycle and check platform status @param duthost: Fixture for DUT AnsibleHost object @param localhost: Fixture for interacting with localhost through ansible @param conn_graph_facts: Fixture parse and return lab connection graph @param xcvr_skip_list: list of DUT's interfaces for which transeiver checks are skipped @param pdu_controller: The python object of psu controller @param power_off_delay: Pytest parameter. The delay between turning off and on the PSU """ duthost = duthosts[enum_rand_one_per_hwsku_hostname] UNSUPPORTED_ASIC_TYPE = ["cisco-8000"] if duthost.facts["asic_type"] in UNSUPPORTED_ASIC_TYPE: pytest.skip("Skipping test_power_off_reboot. Test unsupported on {} platform".format(duthost.facts["asic_type"])) pdu_ctrl = pdu_controller if pdu_ctrl is None: pytest.skip("No PSU controller for %s, skip rest of the testing in this case" % duthost.hostname) all_outlets = pdu_ctrl.get_outlet_status() # If PDU supports returning output_watts, making sure that all outlets has power. no_power = [item for item in all_outlets if int(item.get('output_watts', '1')) == 0] pytest_assert(not no_power, "Not all outlets have power output: {}".format(no_power)) # Purpose of this list is to control sequence of turning on PSUs in power off testing. # If there are 2 PSUs, then 3 scenarios would be covered: # 1. Turn off all PSUs, turn on PSU1, then check. # 2. Turn off all PSUs, turn on PSU2, then check. # 3. Turn off all PSUs, turn on one of the PSU, then turn on the other PSU, then check. power_on_seq_list = [] if all_outlets: power_on_seq_list = [[item] for item in all_outlets] power_on_seq_list.append(all_outlets) logging.info("Got all power on sequences {}".format(power_on_seq_list)) poweroff_reboot_kwargs = {"dut": duthost} try: for power_on_seq in power_on_seq_list: poweroff_reboot_kwargs["pdu_ctrl"] = pdu_ctrl poweroff_reboot_kwargs["all_outlets"] = all_outlets poweroff_reboot_kwargs["power_on_seq"] = power_on_seq poweroff_reboot_kwargs["delay_time"] = power_off_delay reboot_and_check(localhost, duthost, conn_graph_facts["device_conn"][duthost.hostname], xcvr_skip_list, REBOOT_TYPE_POWEROFF, _power_off_reboot_helper, poweroff_reboot_kwargs) except Exception as e: logging.debug("Restore power after test failure") for outlet in all_outlets: logging.debug("turning on {}".format(outlet)) pdu_ctrl.turn_on_outlet(outlet) # Sleep 120 for dut to boot up time.sleep(120) wait_critical_processes(duthost) raise e def test_watchdog_reboot(duthosts, enum_rand_one_per_hwsku_hostname, localhost, conn_graph_facts, xcvr_skip_list): """ @summary: This test case is to perform reboot via watchdog and check platform status """ duthost = duthosts[enum_rand_one_per_hwsku_hostname] watchdogutil_status_result = duthost.command("watchdogutil status", module_ignore_errors=True) if "" != watchdogutil_status_result["stderr"] or "" == watchdogutil_status_result["stdout"]: pytest.skip("Watchdog is not supported on this DUT, skip this test case") reboot_and_check(localhost, duthost, conn_graph_facts["device_conn"][duthost.hostname], xcvr_skip_list, REBOOT_TYPE_WATCHDOG) def test_continuous_reboot(duthosts, enum_rand_one_per_hwsku_hostname, localhost, conn_graph_facts, xcvr_skip_list): """ @summary: This test case is to perform 3 cold reboot in a row """ duthost = duthosts[enum_rand_one_per_hwsku_hostname] ls_starting_out = set(duthost.shell("ls /dev/C0-*", module_ignore_errors=True)["stdout"].split()) for i in range(3): reboot_and_check(localhost, duthost, conn_graph_facts["device_conn"][duthost.hostname], xcvr_skip_list, reboot_type=REBOOT_TYPE_COLD) ls_ending_out = set(duthost.shell("ls /dev/C0-*", module_ignore_errors=True)["stdout"].split()) pytest_assert(ls_ending_out == ls_starting_out, "Console devices have changed: expected console devices: {}, got: {}".format(", ".join(sorted(ls_starting_out)), ", ".join(sorted(ls_ending_out))))
function __processArg(obj, key) { var arg = null; if (obj) { arg = obj[key] || null; delete obj[key]; } return arg; } function Controller() { function __alloyId9() { $.__views.win.removeEventListener("open", __alloyId9); if ($.__views.win.activity) $.__views.win.activity.onCreateOptionsMenu = function(e) { var __alloyId1 = { id: "item1", title: "Expand", showAsAction: Titanium.Android.SHOW_AS_ACTION_IF_ROOM }; $.__views.item1 = e.menu.add(_.pick(__alloyId1, Alloy.Android.menuItemCreateArgs)); $.__views.item1.applyProperties(_.omit(__alloyId1, Alloy.Android.menuItemCreateArgs)); $.item1 = $.__views.item1; expand ? $.__views.item1.addEventListener("click", expand) : __defers["$.__views.item1!click!expand"] = true; var __alloyId3 = { title: "Collapse", showAsAction: Titanium.Android.SHOW_AS_ACTION_IF_ROOM, id: "__alloyId2" }; $.__views.__alloyId2 = e.menu.add(_.pick(__alloyId3, Alloy.Android.menuItemCreateArgs)); $.__views.__alloyId2.applyProperties(_.omit(__alloyId3, Alloy.Android.menuItemCreateArgs)); $.__alloyId2 = $.__views.__alloyId2; collapse ? $.__views.__alloyId2.addEventListener("click", collapse) : __defers["$.__views.__alloyId2!click!collapse"] = true; $.__views.__alloyId5 = Ti.UI.createView({ layout: "horizontal", id: "__alloyId5" }); $.__views.__alloyId6 = Ti.UI.createButton({ title: "Search", left: "0", id: "__alloyId6" }); $.__views.__alloyId5.add($.__views.__alloyId6); $.__views.__alloyId7 = Ti.UI.createTextField({ right: "0", hintText: "Type Something", id: "__alloyId7" }); $.__views.__alloyId5.add($.__views.__alloyId7); var __alloyId8 = { id: "item3", title: "Item 3", showAsAction: Titanium.Android.SHOW_AS_ACTION_COLLAPSE_ACTION_VIEW }; $.__views.__alloyId5 && (__alloyId8.actionView = $.__views.__alloyId5); $.__views.item3 = e.menu.add(_.pick(__alloyId8, Alloy.Android.menuItemCreateArgs)); $.__views.item3.applyProperties(_.omit(__alloyId8, Alloy.Android.menuItemCreateArgs)); $.item3 = $.__views.item3; report ? $.__views.item3.addEventListener("expand", report) : __defers["$.__views.item3!expand!report"] = true; report ? $.__views.item3.addEventListener("collapse", report) : __defers["$.__views.item3!collapse!report"] = true; }; else { Ti.API.warn("You attempted to attach an Android Menu to a lightweight Window"); Ti.API.warn("or other UI component which does not have an Android activity."); Ti.API.warn("Android Menus can only be opened on TabGroups and heavyweight Windows."); } } function expand() { $.item3.expandActionView(); } function collapse() { $.item3.collapseActionView(); } function report(e) { Ti.API.info(e.type); Ti.API.info($.item3.actionViewExpanded); } require("alloy/controllers/BaseController").apply(this, Array.prototype.slice.call(arguments)); this.__controllerPath = "index"; this.args = arguments[0] || {}; if (arguments[0]) { { __processArg(arguments[0], "__parentSymbol"); } { __processArg(arguments[0], "$model"); } { __processArg(arguments[0], "__itemTemplate"); } } var $ = this; var exports = {}; var __defers = {}; $.__views.win = Ti.UI.createWindow({ id: "win" }); $.__views.win && $.addTopLevelView($.__views.win); $.__views.win.addEventListener("open", __alloyId9); $.__views.msg = Ti.UI.createLabel({ color: "white", text: "Tap the menu buttons", font: { fontSize: "16dp" }, id: "msg" }); $.__views.win.add($.__views.msg); exports.destroy = function() {}; _.extend($, $.__views); $.win.open(); __defers["$.__views.item1!click!expand"] && $.__views.item1.addEventListener("click", expand); __defers["$.__views.__alloyId2!click!collapse"] && $.__views.__alloyId2.addEventListener("click", collapse); __defers["$.__views.item3!expand!report"] && $.__views.item3.addEventListener("expand", report); __defers["$.__views.item3!collapse!report"] && $.__views.item3.addEventListener("collapse", report); _.extend($, exports); } var Alloy = require("alloy"), Backbone = Alloy.Backbone, _ = Alloy._; module.exports = Controller;
# -*- coding: utf-8 -*- from ccxt.base.exchange import Exchange from ccxt.base.errors import ExchangeError from ccxt.base.errors import NotSupported class bitstamp1 (Exchange): def describe(self): return self.deep_extend(super(bitstamp1, self).describe(), { 'id': 'bitstamp1', 'name': 'Bitstamp v1', 'countries': 'GB', 'rateLimit': 1000, 'version': 'v1', 'has': { 'CORS': True, }, 'urls': { 'logo': 'https://user-images.githubusercontent.com/1294454/27786377-8c8ab57e-5fe9-11e7-8ea4-2b05b6bcceec.jpg', 'api': 'https://www.bitstamp.net/api', 'www': 'https://www.bitstamp.net', 'doc': 'https://www.bitstamp.net/api', }, 'requiredCredentials': { 'apiKey': True, 'secret': True, 'uid': True, }, 'api': { 'public': { 'get': [ 'ticker', 'ticker_hour', 'order_book', 'transactions', 'eur_usd', ], }, 'private': { 'post': [ 'balance', 'user_transactions', 'open_orders', 'order_status', 'cancel_order', 'cancel_all_orders', 'buy', 'sell', 'bitcoin_deposit_address', 'unconfirmed_btc', 'ripple_withdrawal', 'ripple_address', 'withdrawal_requests', 'bitcoin_withdrawal', ], }, }, 'markets': { 'BTC/USD': {'id': 'btcusd', 'symbol': 'BTC/USD', 'base': 'BTC', 'quote': 'USD', 'maker': 0.0025, 'taker': 0.0025}, 'BTC/EUR': {'id': 'btceur', 'symbol': 'BTC/EUR', 'base': 'BTC', 'quote': 'EUR', 'maker': 0.0025, 'taker': 0.0025}, 'EUR/USD': {'id': 'eurusd', 'symbol': 'EUR/USD', 'base': 'EUR', 'quote': 'USD', 'maker': 0.0025, 'taker': 0.0025}, 'XRP/USD': {'id': 'xrpusd', 'symbol': 'XRP/USD', 'base': 'XRP', 'quote': 'USD', 'maker': 0.0025, 'taker': 0.0025}, 'XRP/EUR': {'id': 'xrpeur', 'symbol': 'XRP/EUR', 'base': 'XRP', 'quote': 'EUR', 'maker': 0.0025, 'taker': 0.0025}, 'XRP/BTC': {'id': 'xrpbtc', 'symbol': 'XRP/BTC', 'base': 'XRP', 'quote': 'BTC', 'maker': 0.0025, 'taker': 0.0025}, 'LTC/USD': {'id': 'ltcusd', 'symbol': 'LTC/USD', 'base': 'LTC', 'quote': 'USD', 'maker': 0.0025, 'taker': 0.0025}, 'LTC/EUR': {'id': 'ltceur', 'symbol': 'LTC/EUR', 'base': 'LTC', 'quote': 'EUR', 'maker': 0.0025, 'taker': 0.0025}, 'LTC/BTC': {'id': 'ltcbtc', 'symbol': 'LTC/BTC', 'base': 'LTC', 'quote': 'BTC', 'maker': 0.0025, 'taker': 0.0025}, 'ETH/USD': {'id': 'ethusd', 'symbol': 'ETH/USD', 'base': 'ETH', 'quote': 'USD', 'maker': 0.0025, 'taker': 0.0025}, 'ETH/EUR': {'id': 'etheur', 'symbol': 'ETH/EUR', 'base': 'ETH', 'quote': 'EUR', 'maker': 0.0025, 'taker': 0.0025}, 'ETH/BTC': {'id': 'ethbtc', 'symbol': 'ETH/BTC', 'base': 'ETH', 'quote': 'BTC', 'maker': 0.0025, 'taker': 0.0025}, }, }) def fetch_order_book(self, symbol, params={}): if symbol != 'BTC/USD': raise ExchangeError(self.id + ' ' + self.version + " fetchOrderBook doesn't support " + symbol + ', use it for BTC/USD only') orderbook = self.publicGetOrderBook(params) timestamp = int(orderbook['timestamp']) * 1000 return self.parse_order_book(orderbook, timestamp) def fetch_ticker(self, symbol, params={}): if symbol != 'BTC/USD': raise ExchangeError(self.id + ' ' + self.version + " fetchTicker doesn't support " + symbol + ', use it for BTC/USD only') ticker = self.publicGetTicker(params) timestamp = int(ticker['timestamp']) * 1000 vwap = float(ticker['vwap']) baseVolume = float(ticker['volume']) quoteVolume = baseVolume * vwap return { 'symbol': symbol, 'timestamp': timestamp, 'datetime': self.iso8601(timestamp), 'high': float(ticker['high']), 'low': float(ticker['low']), 'bid': float(ticker['bid']), 'ask': float(ticker['ask']), 'vwap': vwap, 'open': float(ticker['open']), 'close': None, 'first': None, 'last': float(ticker['last']), 'change': None, 'percentage': None, 'average': None, 'baseVolume': baseVolume, 'quoteVolume': quoteVolume, 'info': ticker, } def parse_trade(self, trade, market=None): timestamp = None if 'date' in trade: timestamp = int(trade['date']) * 1000 elif 'datetime' in trade: # timestamp = self.parse8601(trade['datetime']) timestamp = int(trade['datetime']) * 1000 side = 'buy' if (trade['type'] == 0) else 'sell' order = None if 'order_id' in trade: order = str(trade['order_id']) if 'currency_pair' in trade: if trade['currency_pair'] in self.markets_by_id: market = self.markets_by_id[trade['currency_pair']] return { 'id': str(trade['tid']), 'info': trade, 'timestamp': timestamp, 'datetime': self.iso8601(timestamp), 'symbol': market['symbol'], 'order': order, 'type': None, 'side': side, 'price': float(trade['price']), 'amount': float(trade['amount']), } def fetch_trades(self, symbol, since=None, limit=None, params={}): if symbol != 'BTC/USD': raise ExchangeError(self.id + ' ' + self.version + " fetchTrades doesn't support " + symbol + ', use it for BTC/USD only') market = self.market(symbol) response = self.publicGetTransactions(self.extend({ 'time': 'minute', }, params)) return self.parse_trades(response, market, since, limit) def fetch_balance(self, params={}): balance = self.privatePostBalance() result = {'info': balance} currencies = list(self.currencies.keys()) for i in range(0, len(currencies)): currency = currencies[i] lowercase = currency.lower() total = lowercase + '_balance' free = lowercase + '_available' used = lowercase + '_reserved' account = self.account() account['free'] = self.safe_float(balance, free, 0.0) account['used'] = self.safe_float(balance, used, 0.0) account['total'] = self.safe_float(balance, total, 0.0) result[currency] = account return self.parse_balance(result) def create_order(self, symbol, type, side, amount, price=None, params={}): if type != 'limit': raise ExchangeError(self.id + ' ' + self.version + ' accepts limit orders only') if symbol != 'BTC/USD': raise ExchangeError(self.id + ' v1 supports BTC/USD orders only') method = 'privatePost' + self.capitalize(side) order = { 'amount': amount, 'price': price, } response = getattr(self, method)(self.extend(order, params)) return { 'info': response, 'id': response['id'], } def cancel_order(self, id, symbol=None, params={}): return self.privatePostCancelOrder({'id': id}) def parse_order_status(self, order): if (order['status'] == 'Queue') or (order['status'] == 'Open'): return 'open' if order['status'] == 'Finished': return 'closed' return order['status'] def fetch_order_status(self, id, symbol=None): self.load_markets() response = self.privatePostOrderStatus({'id': id}) return self.parse_order_status(response) def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}): self.load_markets() market = None if symbol: market = self.market(symbol) pair = market['id'] if market else 'all' request = self.extend({'id': pair}, params) response = self.privatePostOpenOrdersId(request) return self.parse_trades(response, market, since, limit) def fetch_order(self, id, symbol=None, params={}): raise NotSupported(self.id + ' fetchOrder is not implemented yet') def sign(self, path, api='public', method='GET', params={}, headers=None, body=None): url = self.urls['api'] + '/' + self.implode_params(path, params) query = self.omit(params, self.extract_params(path)) if api == 'public': if query: url += '?' + self.urlencode(query) else: self.check_required_credentials() nonce = str(self.nonce()) auth = nonce + self.uid + self.apiKey signature = self.encode(self.hmac(self.encode(auth), self.encode(self.secret))) query = self.extend({ 'key': self.apiKey, 'signature': signature.upper(), 'nonce': nonce, }, query) body = self.urlencode(query) headers = { 'Content-Type': 'application/x-www-form-urlencoded', } return {'url': url, 'method': method, 'body': body, 'headers': headers} def request(self, path, api='public', method='GET', params={}, headers=None, body=None): response = self.fetch2(path, api, method, params, headers, body) if 'status' in response: if response['status'] == 'error': raise ExchangeError(self.id + ' ' + self.json(response)) return response
import unittest import os from unittest.mock import patch import sqlalchemy as db import pandas as pd from asset_manager.mappers.AssetMapper import AssetMapper from asset_manager.mappers.PriceMapper import PriceMapper from asset_manager.mappers.MapperConnection import MapperConnection from asset_manager.assets.Asset import Asset from asset_manager.assets.Crypto import Crypto class AssetMapperTest(unittest.TestCase): @classmethod def setUpClass(cls): MapperConnection("testengine") def setUp(self): self.engine = MapperConnection() with self.engine.connect() as con: con.execute("insert into assets(asset_id, asset_class) values ('AST', 'ASSET')") con.execute("insert into assets(asset_id, asset_class) values ('T', 'CRYPTO')") def tearDown(self): self.engine = MapperConnection() with self.engine.connect() as con: con.execute("delete from assets where true") def test_get_assets(self): prices = pd.read_pickle("tests/test_data/etheur.pkl") with patch.object(PriceMapper, 'get_prices', return_value=prices): test_assets = [Asset("AST")] test_cryptos = [Crypto("T")] loaded_assets = AssetMapper().get_assets() loaded_cryptos = AssetMapper(asset_class=Crypto).get_assets() self.assertEqual(test_assets, loaded_assets) self.assertEqual(test_cryptos, loaded_cryptos)
import AsyncStorage from '@react-native-community/async-storage/jest/async-storage-mock'; export default AsyncStorage;
$(function(){ initOwlCarousel(); }) function initOwlCarousel(){ $('.owl-one').owlCarousel({ loop:true, margin:10, nav:true, autoplay:true, autoplayTimeout:2700, autoplayHoverPause:false, responsive:{ 0:{ items:1 }, 767:{ items:2 }, 991:{ items:3 } } }) $('.owl-two').owlCarousel({ loop:true, margin:10, nav:false, autoplay:true, autoplayTimeout:3000, autoplayHoverPause:false, responsive:{ 0:{ items:1 }, 476:{ items:2 }, 767:{ items:4 }, 991:{ items:6 } } }) }
#! /usr/bin/env python # coding=utf-8 # Copyright (c) 2019 Uber Technologies, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== import argparse import logging import os import sys from functools import partial from typing import List, Union import numpy as np import pandas as pd import sklearn from scipy.stats import entropy from sklearn.calibration import calibration_curve from sklearn.metrics import brier_score_loss from ludwig.constants import * from ludwig.contrib import contrib_command, contrib_import from ludwig.utils import visualization_utils from ludwig.utils.data_utils import load_from_file, load_json, load_array from ludwig.utils.print_utils import logging_level_registry from ludwig.utils.data_utils import CACHEABLE_FORMATS, \ figure_data_format_dataset, external_data_reader_registry from ludwig.utils.misc_utils import get_from_registry logger = logging.getLogger(__name__) def validate_conf_treshholds_and_probabilities_2d_3d( probabilities, treshhold_output_feature_names ): """Ensure probabilities and treshhold output_feature_names arrays have two members each. :param probabilities: List of probabilities per model :param threshhold_output_feature_names: List of threshhold output_feature_names per model :raise: RuntimeError """ validation_mapping = { 'probabilities': probabilities, 'treshhold_output_feature_names': treshhold_output_feature_names } for item, value in validation_mapping.items(): item_len = len(value) if item_len != 2: exception_message = 'Two {} should be provided - ' \ '{} was given.'.format(item, item_len) logging.error(exception_message) raise RuntimeError(exception_message) def load_data_for_viz(load_type, model_file_statistics, **kwargs): """Load model file data in to list of . :param load_type: type of the data loader to be used. :param model_file_statistics: JSON file or list of json files containing any model experiment stats. :return List of training statistics loaded as json objects. """ supported_load_types = dict(load_json=load_json, load_from_file=partial( load_from_file, dtype=kwargs.get('dtype', int), ground_truth_split=kwargs.get( 'ground_truth_split', 2) ) ) loader = supported_load_types[load_type] try: stats_per_model = [loader(stats_f) for stats_f in model_file_statistics] except (TypeError, AttributeError): logging.exception( 'Unable to open model statistics file {}!'.format( model_file_statistics ) ) raise return stats_per_model def convert_to_list(item): """If item is not list class instance or None put inside a list. :param item: object to be checked and converted :return: original item if it is a list instance or list containing the item. """ return item if item is None or isinstance(item, list) else [item] def _validate_output_feature_name_from_train_stats( output_feature_name, train_stats_per_model ): """Validate prediction output_feature_name from model train stats and return it as list. :param output_feature_name: output_feature_name containing ground truth :param train_stats_per_model: list of per model train stats :return output_feature_names: list of output_feature_name(s) containing ground truth """ output_feature_names_set = set() for ls in train_stats_per_model: for _, values in ls.items(): for key in values: output_feature_names_set.add(key) try: if output_feature_name in output_feature_names_set: return [output_feature_name] else: return output_feature_names_set # raised if output_feature_name is emtpy iterable (e.g. [] in set()) except TypeError: return output_feature_names_set def _validate_output_feature_name_from_test_stats( output_feature_name, test_stats_per_model ): """Validate prediction output_feature_name from model test stats and return it as list. :param output_feature_name: output_feature_name containing ground truth :param test_stats_per_model: list of per model test stats :return output_feature_names: list of output_feature_name(s) containing ground truth """ output_feature_names_set = set() for ls in test_stats_per_model: for key in ls: output_feature_names_set.add(key) try: if output_feature_name in output_feature_names_set: return [output_feature_name] else: return output_feature_names_set # raised if output_feature_name is emtpy iterable (e.g. [] in set()) except TypeError: return output_feature_names_set def _encode_categorical_feature( raw: np.array, str2idx: dict ) -> np.array: """encodes raw categorical string value to encoded numeric value Args: :param raw: (np.array) string categorical representation :param str2idx: (dict) dictionary that maps string representation to encoded value. Returns: np.array """ return str2idx[raw] def _extract_ground_truth_values( ground_truth: str, output_feature_name: str, ground_truth_split: int, split_file: Union[str, None] = None) -> pd.Series: """Helper function to extract ground truth values Args: :param ground_truth: (str) path to source data containing ground truth. :param output_feature_name: (str) output feature name for ground truth values. :param ground_truth_split: (int) dataset split to use for ground truth, defaults to 2. :param split_file: (Union[str, None]) optional file path to split values. # Return :return pd.Series: ground truth values from source data set """ # determine ground truth data format and get appropriate reader data_format = figure_data_format_dataset(ground_truth) if data_format not in CACHEABLE_FORMATS: raise ValueError( '{} is not supported for ground truth file, ' 'valid types are {}'.format(data_format, CACHEABLE_FORMATS) ) reader = get_from_registry( data_format, external_data_reader_registry ) # retrieve ground truth from source data set gt_df = reader(ground_truth) # extract ground truth for visualization if SPLIT in gt_df: # get split value from source data set split = gt_df[SPLIT] gt = gt_df[output_feature_name][ split == ground_truth_split] elif split_file is not None: # retrieve from split file split = load_array(split_file) gt = gt_df[output_feature_name][ split == ground_truth_split] else: # use all the data in ground_truth gt = gt_df[output_feature_name] return gt def generate_filename_template_path(output_dir, filename_template): """Ensure path to template file can be constructed given an output dir. Create output directory if yet does exist. :param output_dir: Directory that will contain the filename_template file :param filename_template: name of the file template to be appended to the filename template path :return: path to filename template inside the output dir or None if the output dir is None """ if output_dir: os.makedirs(output_dir, exist_ok=True) return os.path.join(output_dir, filename_template) return None def compare_performance_cli( test_statistics: Union[str, List[str]], **kwargs: dict ) -> None: """Load model data from files to be shown by compare_performance. # Inputs :param test_statistics: (Union[str, List[str]]) path to experiment test statistics file. :param kwargs: (dict) parameters for the requested visualizations. # Return :return None: """ test_stats_per_model = load_data_for_viz('load_json', test_statistics) compare_performance(test_stats_per_model, **kwargs) def learning_curves_cli( training_statistics: Union[str, List[str]], **kwargs: dict ) -> None: """Load model data from files to be shown by learning_curves. # Inputs :param training_statistics: (Union[str, List[str]]) path to experiment training statistics file :param kwargs: (dict) parameters for the requested visualizations. # Return :return None: """ train_stats_per_model = load_data_for_viz('load_json', training_statistics) learning_curves(train_stats_per_model, **kwargs) def compare_classifiers_performance_from_prob_cli( probabilities: Union[str, List[str]], ground_truth: str, ground_truth_split: int, split_file: str, ground_truth_metadata: str, output_feature_name: str, output_directory: str, **kwargs: dict ) -> None: """Load model data from files to be shown by compare_classifiers_from_prob. # Inputs :param probabilities: (Union[str, List[str]]) path to experiment probabilities file :param ground_truth: (str) path to ground truth file :param ground_truth_split: (str) type of ground truth split - `0` for training split, `1` for validation split or 2 for `'test'` split. :param split_file: (str, None) file path to csv file containing split values :param ground_truth_metadata: (str) file path to feature metadata json file created during training. :param output_feature_name: (str) name of the output feature to visualize. :param output_directory: (str) name of output directory containing training results. :param kwargs: (dict) parameters for the requested visualizations. # Return :return None: """ # retrieve feature metadata to convert raw predictions to encoded value metadata = load_json(ground_truth_metadata) # translate string to encoded numeric value # retrieve ground truth from source data set ground_truth = _extract_ground_truth_values( ground_truth, output_feature_name, ground_truth_split, split_file=split_file ) probabilities_per_model = load_data_for_viz( 'load_from_file', probabilities, dtype=float ) compare_classifiers_performance_from_prob( probabilities_per_model, ground_truth, metadata, output_feature_name, output_directory=output_directory, **kwargs ) def compare_classifiers_performance_from_pred_cli( predictions: List[str], ground_truth: str, ground_truth_metadata: str, ground_truth_split: int, split_file: str, output_feature_name: str, output_directory: str, **kwargs: dict ) -> None: """Load model data from files to be shown by compare_classifiers_from_pred # Inputs :param predictions: (List[str]) path to experiment predictions file. :param ground_truth: (str) path to ground truth file. :param ground_truth_metadata: (str) path to ground truth metadata file. :param ground_truth_split: (str) type of ground truth split - `0` for training split, `1` for validation split or 2 for `'test'` split. :param split_file: (str, None) file path to csv file containing split values :param ground_truth_metadata: (str) file path to feature metadata json file created during training. :param output_feature_name: (str) name of the output feature to visualize. :param output_directory: (str) name of output directory containing training results. :param kwargs: (dict) parameters for the requested visualizations. # Return :return None: """ # retrieve feature metadata to convert raw predictions to encoded value metadata = load_json(ground_truth_metadata) # retrieve ground truth from source data set ground_truth = _extract_ground_truth_values( ground_truth, output_feature_name, ground_truth_split, split_file ) predictions_per_model = load_data_for_viz( 'load_from_file', predictions, dtype=str ) compare_classifiers_performance_from_pred( predictions_per_model, ground_truth, metadata, output_feature_name, output_directory=output_directory, **kwargs ) def compare_classifiers_performance_subset_cli( probabilities: Union[str, List[str]], ground_truth: str, ground_truth_split: int, split_file: str, ground_truth_metadata: str, output_feature_name: str, output_directory: str, **kwargs: dict ) -> None: """Load model data from files to be shown by compare_classifiers_subset. # Inputs :param probabilities: (Union[str, List[str]]) path to experiment probabilities file :param ground_truth: (str) path to ground truth file :param ground_truth_split: (str) type of ground truth split - `0` for training split, `1` for validation split or 2 for `'test'` split. :param split_file: (str, None) file path to csv file containing split values :param ground_truth_metadata: (str) file path to feature metadata json file created during training. :param output_feature_name: (str) name of the output feature to visualize. :param output_directory: (str) name of output directory containing training results. :param kwargs: (dict) parameters for the requested visualizations. # Return :return None: """ # retrieve feature metadata to convert raw predictions to encoded value metadata = load_json(ground_truth_metadata) # retrieve ground truth from source data set ground_truth = _extract_ground_truth_values( ground_truth, output_feature_name, ground_truth_split, split_file ) probabilities_per_model = load_data_for_viz( 'load_from_file', probabilities, dtype=float ) compare_classifiers_performance_subset( probabilities_per_model, ground_truth, metadata, output_feature_name, output_directory=output_directory, **kwargs ) def compare_classifiers_performance_changing_k_cli( probabilities: Union[str, List[str]], ground_truth: str, ground_truth_split: int, split_file: str, ground_truth_metadata: str, output_feature_name: str, **kwargs: dict ) -> None: """Load model data from files to be shown by compare_classifiers_changing_k. # Inputs :param probabilities: (Union[str, List[str]]) path to experiment probabilities file :param ground_truth: (str) path to ground truth file :param ground_truth_split: (str) type of ground truth split - `0` for training split, `1` for validation split or 2 for `'test'` split. :param split_file: (str, None) file path to csv file containing split values :param split_file: (str, None) file path to csv file containing split values :param ground_truth_metadata: (str) file path to feature metadata json file created during training. :param output_feature_name: (str) name of the output feature to visualize. :param kwargs: (dict) parameters for the requested visualizations. # Return :return None: """ # retrieve feature metadata to convert raw predictions to encoded value metadata = load_json(ground_truth_metadata) # retrieve ground truth from source data set ground_truth = _extract_ground_truth_values( ground_truth, output_feature_name, ground_truth_split, split_file ) probabilities_per_model = load_data_for_viz( 'load_from_file', probabilities, dtype=float ) compare_classifiers_performance_changing_k( probabilities_per_model, ground_truth, metadata, output_feature_name, **kwargs ) def compare_classifiers_multiclass_multimetric_cli( test_statistics: Union[str, List[str]], ground_truth_metadata: str, **kwargs: dict ) -> None: """Load model data from files to be shown by compare_classifiers_multiclass # Inputs :param test_statistics: (Union[str, List[str]]) path to experiment test statistics file. :param ground_truth_metadata: (str) path to ground truth metadata file. :param kwargs: (dict) parameters for the requested visualizations. # Return :return None: """ test_stats_per_model = load_data_for_viz('load_json', test_statistics) metadata = load_json(ground_truth_metadata) compare_classifiers_multiclass_multimetric( test_stats_per_model, metadata=metadata, **kwargs ) def compare_classifiers_predictions_cli( predictions: List[str], ground_truth: str, ground_truth_split: int, split_file: str, ground_truth_metadata: str, output_feature_name: str, **kwargs: dict ) -> None: """Load model data from files to be shown by compare_classifiers_predictions # Inputs :param predictions: (List[str]) path to experiment predictions file. :param ground_truth: (str) path to grpound truth file. :param ground_truth_split: (str) type of ground truth split - `0` for training split, `1` for validation split or 2 for `'test'` split. :param split_file: (str, None) file path to csv file containing split values :param ground_truth_metadata: (str) file path to feature metadata json file created during training. :param output_feature_name: (str) name of the output feature to visualize. :param kwargs: (dict) parameters for the requested visualizations. # Return :return None: """ # retrieve feature metadata to convert raw predictions to encoded value metadata = load_json(ground_truth_metadata) # retrieve ground truth from source data set ground_truth = _extract_ground_truth_values( ground_truth, output_feature_name, ground_truth_split, split_file ) predictions_per_model = load_data_for_viz( 'load_from_file', predictions, dtype=str ) compare_classifiers_predictions( predictions_per_model, ground_truth, metadata, output_feature_name, **kwargs ) def compare_classifiers_predictions_distribution_cli( predictions: List[str], ground_truth: str, ground_truth_split: int, split_file: str, ground_truth_metadata: str, output_feature_name: str, **kwargs: dict ) -> None: """Load model data from files to be shown by compare_predictions_distribution # Inputs :param predictions: (List[str]) path to experiment predictions file. :param ground_truth: (str) path to grpound truth file. :param ground_truth_split: (str) type of ground truth split - `0` for training split, `1` for validation split or 2 for `'test'` split. :param split_file: (str, None) file path to csv file containing split values :param ground_truth_metadata: (str) file path to feature metadata json file created during training. :param output_feature_name: (str) name of the output feature to visualize. :param kwargs: (dict) parameters for the requested visualizations. # Return :return None: """ # retrieve feature metadata to convert raw predictions to encoded value metadata = load_json(ground_truth_metadata) # retrieve ground truth from source data set ground_truth = _extract_ground_truth_values( ground_truth, output_feature_name, ground_truth_split, split_file ) predictions_per_model = load_data_for_viz( 'load_from_file', predictions, dtype=str ) compare_classifiers_predictions_distribution( predictions_per_model, ground_truth, metadata, output_feature_name, **kwargs ) def confidence_thresholding_cli( probabilities: Union[str, List[str]], ground_truth: str, ground_truth_split: int, split_file: str, ground_truth_metadata: str, output_feature_name: str, **kwargs: dict ) -> None: """Load model data from files to be shown by confidence_thresholding. # Inputs :param probabilities: (Union[str, List[str]]) path to experiment probabilities file. :param ground_truth: (str) path to ground truth file. :param ground_truth_split: (str) type of ground truth split - `0` for training split, `1` for validation split or 2 for `'test'` split. :param split_file: (str, None) file path to csv file containing split values :param ground_truth_metadata: (str) file path to feature metadata json file created during training. :param output_feature_name: (str) name of the output feature to visualize. :param kwargs: (dict) parameters for the requested visualizations. # Return :return None: """ # retrieve feature metadata to convert raw predictions to encoded value metadata = load_json(ground_truth_metadata) # retrieve ground truth from source data set ground_truth = _extract_ground_truth_values( ground_truth, output_feature_name, ground_truth_split, split_file ) probabilities_per_model = load_data_for_viz( 'load_from_file', probabilities, dtype=float ) confidence_thresholding( probabilities_per_model, ground_truth, metadata, output_feature_name, **kwargs ) def confidence_thresholding_data_vs_acc_cli( probabilities: Union[str, List[str]], ground_truth: str, ground_truth_split: int, split_file: str, ground_truth_metadata: str, output_feature_name: str, **kwargs: dict ) -> None: """Load model data from files to be shown by confidence_thresholding_data_vs_acc_cli. # Inputs :param probabilities: (Union[str, List[str]]) path to experiment probabilities file. :param ground_truth: (str) path to ground truth file. :param ground_truth_split: (str) type of ground truth split - `0` for training split, `1` for validation split or 2 for `'test'` split. :param split_file: (str, None) file path to csv file containing split values :param ground_truth_metadata: (str) file path to feature metadata json file created during training. :param output_feature_name: (str) name of the output feature to visualize. :param kwargs: (dict) parameters for the requested visualizations. # Return :return None: """ # retrieve feature metadata to convert raw predictions to encoded value metadata = load_json(ground_truth_metadata) # retrieve ground truth from source data set ground_truth = _extract_ground_truth_values( ground_truth, output_feature_name, ground_truth_split, split_file ) probabilities_per_model = load_data_for_viz( 'load_from_file', probabilities, dtype=float ) confidence_thresholding_data_vs_acc( probabilities_per_model, ground_truth, metadata, output_feature_name, **kwargs ) def confidence_thresholding_data_vs_acc_subset_cli( probabilities: Union[str, List[str]], ground_truth: str, ground_truth_split: int, split_file: str, ground_truth_metadata: str, output_feature_name: str, **kwargs: dict ) -> None: """Load model data from files to be shown by confidence_thresholding_data_vs_acc_subset. # Inputs :param probabilities: (Union[str, List[str]]) path to experiment probabilities file. :param ground_truth: (str) path to ground truth file. :param ground_truth_split: (str) type of ground truth split - `0` for training split, `1` for validation split or 2 for `'test'` split. :param split_file: (str, None) file path to csv file containing split values :param ground_truth_metadata: (str) file path to feature metadata json file created during training. :param output_feature_name: (str) name of the output feature to visualize. :param kwargs: (dict) parameters for the requested visualizations. # Return :return None: """ # retrieve feature metadata to convert raw predictions to encoded value metadata = load_json(ground_truth_metadata) # retrieve ground truth from source data set ground_truth = _extract_ground_truth_values( ground_truth, output_feature_name, ground_truth_split, split_file ) probabilities_per_model = load_data_for_viz( 'load_from_file', probabilities, dtype=float ) confidence_thresholding_data_vs_acc_subset( probabilities_per_model, ground_truth, metadata, output_feature_name, **kwargs ) def confidence_thresholding_data_vs_acc_subset_per_class_cli( probabilities: Union[str, List[str]], ground_truth: str, ground_truth_metadata: str, ground_truth_split: int, split_file: str, output_feature_name: str, **kwargs: dict ) -> None: """Load model data from files to be shown by compare_classifiers_multiclass # Inputs :param probabilities: (Union[str, List[str]]) path to experiment probabilities file. :param ground_truth: (str) path to ground truth file. :param ground_truth_metadata: (str) path to ground truth metadata file. :param ground_truth_split: (str) type of ground truth split - `0` for training split, `1` for validation split or 2 for `'test'` split. :param split_file: (str, None) file path to csv file containing split values :param output_feature_name: (str) name of the output feature to visualize. :param kwargs: (dict) parameters for the requested visualizations. # Return :return None: """ # retrieve feature metadata to convert raw predictions to encoded value metadata = load_json(ground_truth_metadata) # retrieve ground truth from source data set ground_truth = _extract_ground_truth_values( ground_truth, output_feature_name, ground_truth_split, split_file ) probabilities_per_model = load_data_for_viz( 'load_from_file', probabilities, dtype=float ) confidence_thresholding_data_vs_acc_subset_per_class( probabilities_per_model, ground_truth, metadata, output_feature_name, **kwargs ) def confidence_thresholding_2thresholds_2d_cli( probabilities: Union[str, List[str]], ground_truth: str, ground_truth_split: int, split_file: str, ground_truth_metadata: str, threshold_output_feature_names: List[str], **kwargs: dict ) -> None: """Load model data from files to be shown by confidence_thresholding_2thresholds_2d_cli # Inputs :param probabilities: (Union[str, List[str]]) path to experiment probabilities file. :param ground_truth: (str) path to ground truth file. :param ground_truth_split: (str) type of ground truth split - `0` for training split, `1` for validation split or 2 for `'test'` split. :param split_file: (str, None) file path to csv file containing split values :param ground_truth_metadata: (str) file path to feature metadata json file created during training. :param threshold_output_feature_names: (List[str]) name of the output feature to visualizes. :param kwargs: (dict) parameters for the requested visualizations. # Return :return None: """ # retrieve feature metadata to convert raw predictions to encoded value metadata = load_json(ground_truth_metadata) # retrieve ground truth from source data set ground_truth0 = _extract_ground_truth_values( ground_truth, threshold_output_feature_names[0], ground_truth_split, split_file ) ground_truth1 = _extract_ground_truth_values( ground_truth, threshold_output_feature_names[1], ground_truth_split, split_file ) probabilities_per_model = load_data_for_viz( 'load_from_file', probabilities, dtype=float ) confidence_thresholding_2thresholds_2d( probabilities_per_model, [ground_truth0, ground_truth1], metadata, threshold_output_feature_names, **kwargs ) def confidence_thresholding_2thresholds_3d_cli( probabilities: Union[str, List[str]], ground_truth: str, ground_truth_split: int, split_file: str, ground_truth_metadata: str, threshold_output_feature_names: List[str], **kwargs: dict ) -> None: """Load model data from files to be shown by confidence_thresholding_2thresholds_3d_cli # Inputs :param probabilities: (Union[str, List[str]]) path to experiment probabilities file. :param ground_truth: (str) path to ground truth file. :param ground_truth_split: (str) type of ground truth split - `0` for training split, `1` for validation split or 2 for `'test'` split. :param split_file: (str, None) file path to csv file containing split values :param ground_truth_metadata: (str) file path to feature metadata json file created during training. :param threshold_output_feature_names: (List[str]) name of the output feature to visualizes. :param kwargs: (dict) parameters for the requested visualizations. # Return :return None: """ # retrieve feature metadata to convert raw predictions to encoded value metadata = load_json(ground_truth_metadata) # retrieve ground truth from source data set ground_truth0 = _extract_ground_truth_values( ground_truth, threshold_output_feature_names[0], ground_truth_split, split_file ) ground_truth1 = _extract_ground_truth_values( ground_truth, threshold_output_feature_names[1], ground_truth_split, split_file ) probabilities_per_model = load_data_for_viz( 'load_from_file', probabilities, dtype=float ) confidence_thresholding_2thresholds_3d( probabilities_per_model, [ground_truth0, ground_truth1], metadata, threshold_output_feature_names, **kwargs ) def binary_threshold_vs_metric_cli( probabilities: Union[str, List[str]], ground_truth: str, ground_truth_split: int, split_file: str, ground_truth_metadata: str, output_feature_name: str, **kwargs: dict ) -> None: """Load model data from files to be shown by binary_threshold_vs_metric_cli. # Inputs :param probabilities: (Union[str, List[str]]) path to experiment probabilities file. :param ground_truth: (str) path to ground truth file. :param ground_truth_split: (str) type of ground truth split - `0` for training split, `1` for validation split or 2 for `'test'` split. :param split_file: (str, None) file path to csv file containing split values :param ground_truth_metadata: (str) file path to feature metadata json file created during training. :param output_feature_name: (str) name of the output feature to visualize. :param kwargs: (dict) parameters for the requested visualizations. # Return :return None: """ # retrieve feature metadata to convert raw predictions to encoded value metadata = load_json(ground_truth_metadata) # retrieve ground truth from source data set ground_truth = _extract_ground_truth_values( ground_truth, output_feature_name, ground_truth_split, split_file ) probabilities_per_model = load_data_for_viz( 'load_from_file', probabilities, dtype=float ) binary_threshold_vs_metric( probabilities_per_model, ground_truth, metadata, output_feature_name, **kwargs ) def roc_curves_cli( probabilities: Union[str, List[str]], ground_truth: str, ground_truth_split: int, split_file: str, ground_truth_metadata: str, output_feature_name: str, **kwargs: dict ) -> None: """Load model data from files to be shown by roc_curves_cli. # Inputs :param probabilities: (Union[str, List[str]]) path to experiment probabilities file. :param ground_truth: (str) path to ground truth file. :param ground_truth_split: (str) type of ground truth split - `0` for training split, `1` for validation split or 2 for `'test'` split. :param split_file: (str, None) file path to csv file containing split values :param ground_truth_metadata: (str) file path to feature metadata json file created during training. :param output_feature_name: (str) name of the output feature to visualize. :param kwargs: (dict) parameters for the requested visualizations. # Return :return None: """ # retrieve feature metadata to convert raw predictions to encoded value metadata = load_json(ground_truth_metadata) # retrieve ground truth from source data set ground_truth = _extract_ground_truth_values( ground_truth, output_feature_name, ground_truth_split, split_file ) probabilities_per_model = load_data_for_viz( 'load_from_file', probabilities, dtype=float ) roc_curves( probabilities_per_model, ground_truth, metadata, output_feature_name, **kwargs ) def roc_curves_from_test_statistics_cli( test_statistics: Union[str, List[str]], **kwargs: dict ) -> None: """Load model data from files to be shown by roc_curves_from_test_statistics_cli. # Inputs :param test_statistics: (Union[str, List[str]]) path to experiment test statistics file. :param kwargs: (dict) parameters for the requested visualizations. # Return :return None: """ test_stats_per_model = load_data_for_viz('load_json', test_statistics) roc_curves_from_test_statistics( test_stats_per_model, **kwargs ) def calibration_1_vs_all_cli( probabilities: Union[str, List[str]], ground_truth: str, ground_truth_split: int, split_file: str, ground_truth_metadata: str, output_feature_name: str, **kwargs: dict ) -> None: """Load model data from files to be shown by calibration_1_vs_all_cli. # Inputs :param probabilities: (Union[str, List[str]]) path to experiment probabilities file :param ground_truth: (str) path to ground truth file :param ground_truth_split: (str) type of ground truth split - `0` for training split, `1` for validation split or 2 for `'test'` split. :param split_file: (str, None) file path to csv file containing split values :param ground_truth_metadata: (str) file path to feature metadata json file created during training. :param output_feature_name: (str) name of the output feature to visualize. :param kwargs: (dict) parameters for the requested visualizations. # Return :return None: """ # retrieve feature metadata to convert raw predictions to encoded value metadata = load_json(ground_truth_metadata) # retrieve ground truth from source data set ground_truth = _extract_ground_truth_values( ground_truth, output_feature_name, ground_truth_split, split_file ) feature_metadata = metadata[output_feature_name] vfunc = np.vectorize(_encode_categorical_feature) ground_truth = vfunc(ground_truth, feature_metadata['str2idx']) probabilities_per_model = load_data_for_viz( 'load_from_file', probabilities, dtype=float ) calibration_1_vs_all( probabilities_per_model, ground_truth, metadata, output_feature_name, **kwargs ) def calibration_multiclass_cli( probabilities: Union[str, List[str]], ground_truth: str, ground_truth_split: int, split_file: str, ground_truth_metadata: str, output_feature_name: str, **kwargs: dict ) -> None: """Load model data from files to be shown by calibration_multiclass_cli. # Inputs :param probabilities: (Union[str, List[str]]) path to experiment probabilities file :param ground_truth: (str) path to ground truth file :param ground_truth_split: (str) type of ground truth split - `0` for training split, `1` for validation split or 2 for `'test'` split. :param split_file: (str, None) file path to csv file containing split values :param ground_truth_metadata: (str) file path to feature metadata json file created during training. :param output_feature_name: (str) name of the output feature to visualize. :param kwargs: (dict) parameters for the requested visualizations. # Return :return None: """ # retrieve feature metadata to convert raw predictions to encoded value metadata = load_json(ground_truth_metadata) # retrieve ground truth from source data set ground_truth = _extract_ground_truth_values( ground_truth, output_feature_name, ground_truth_split, split_file ) probabilities_per_model = load_data_for_viz( 'load_from_file', probabilities, dtype=float ) calibration_multiclass( probabilities_per_model, ground_truth, metadata, output_feature_name, **kwargs ) def confusion_matrix_cli( test_statistics: Union[str, List[str]], ground_truth_metadata: str, **kwargs: dict ) -> None: """Load model data from files to be shown by confusion_matrix. # Inputs :param test_statistics: (Union[str, List[str]]) path to experiment test statistics file. :param ground_truth_metadata: (str) path to ground truth metadata file. :param kwargs: (dict) parameters for the requested visualizations. # Return :return None: """ test_stats_per_model = load_data_for_viz('load_json', test_statistics) metadata = load_json(ground_truth_metadata) confusion_matrix(test_stats_per_model, metadata, **kwargs) def frequency_vs_f1_cli( test_statistics: Union[str, List[str]], ground_truth_metadata: str, **kwargs: dict ) -> None: """Load model data from files to be shown by frequency_vs_f1. # Inputs :param test_statistics: (Union[str, List[str]]) path to experiment test statistics file. :param ground_truth_metadata: (str) path to ground truth metadata file. :param kwargs: (dict) parameters for the requested visualizations. # Return :return None: """ test_stats_per_model = load_data_for_viz('load_json', test_statistics) metadata = load_json(ground_truth_metadata) frequency_vs_f1(test_stats_per_model, metadata, **kwargs) def learning_curves( train_stats_per_model: List[dict], output_feature_name: Union[str, None] = None, model_names: Union[str, List[str]] = None, output_directory: str = None, file_format: str = 'pdf', **kwargs ) -> None: """Show how model metrics change over training and validation data epochs. For each model and for each output feature and metric of the model, it produces a line plot showing how that metric changed over the course of the epochs of training on the training and validation sets. # Inputs :param train_stats_per_model: (List[dict]) list containing dictionary of training statistics per model. :param output_feature_name: (Union[str, `None`], default: `None`) name of the output feature to use for the visualization. If `None`, use all output features. :param model_names: (Union[str, List[str]], default: `None`) model name or list of the model names to use as labels. :param output_directory: (str, default: `None`) directory where to save plots. If not specified, plots will be displayed in a window :param file_format: (str, default: `'pdf'`) file format of output plots - `'pdf'` or `'png'`. # Return :return: (None) """ filename_template = 'learning_curves_{}_{}.' + file_format filename_template_path = generate_filename_template_path( output_directory, filename_template ) train_stats_per_model_list = convert_to_list(train_stats_per_model) model_names_list = convert_to_list(model_names) output_feature_names = _validate_output_feature_name_from_train_stats( output_feature_name, train_stats_per_model_list ) metrics = [LOSS, ACCURACY, HITS_AT_K, EDIT_DISTANCE] for output_feature_name in output_feature_names: for metric in metrics: if metric in train_stats_per_model_list[0][TRAINING][ output_feature_name]: filename = None if filename_template_path: filename = filename_template_path.format( output_feature_name, metric) training_stats = [ learning_stats[TRAINING][output_feature_name][metric] for learning_stats in train_stats_per_model_list] validation_stats = [] for learning_stats in train_stats_per_model_list: if VALIDATION in learning_stats: validation_stats.append( learning_stats[VALIDATION][output_feature_name][ metric] ) else: validation_stats.append(None) visualization_utils.learning_curves_plot( training_stats, validation_stats, metric, model_names_list, title='Learning Curves {}'.format(output_feature_name), filename=filename ) def compare_performance( test_stats_per_model: List[dict], output_feature_name: Union[str, None] = None, model_names: Union[str, List[str]] = None, output_directory: str = None, file_format: str = 'pdf', **kwargs ) -> None: """Produces model comparison barplot visualization for each overall metric For each model (in the aligned lists of test_statistics and model_names) it produces bars in a bar plot, one for each overall metric available in the test_statistics file for the specified output_feature_name. # Inputs :param test_stats_per_model: (List[dict]) dictionary containing evaluation performance statistics. :param output_feature_name: (Union[str, `None`], default: `None`) name of the output feature to use for the visualization. If `None`, use all output features. :param model_names: (Union[str, List[str]], default: `None`) model name or list of the model names to use as labels. :param output_directory: (str, default: `None`) directory where to save plots. If not specified, plots will be displayed in a window :param file_format: (str, default: `'pdf'`) file format of output plots - `'pdf'` or `'png'`. # Return :return: (None) """ ignore_names = ['overall_stats', 'confusion_matrix', 'per_class_stats', 'predictions', 'probabilities'] filename_template = 'compare_performance_{}.' + file_format filename_template_path = generate_filename_template_path( output_directory, filename_template ) test_stats_per_model_list = convert_to_list(test_stats_per_model) model_names_list = convert_to_list(model_names) output_feature_names = _validate_output_feature_name_from_test_stats( output_feature_name, test_stats_per_model_list ) for output_feature_name in output_feature_names: metric_names_sets = list( set(tspr[output_feature_name].keys()) for tspr in test_stats_per_model_list ) metric_names = metric_names_sets[0] for metric_names_set in metric_names_sets: metric_names = metric_names.intersection(metric_names_set) metric_names.remove(LOSS) for name in ignore_names: if name in metric_names: metric_names.remove(name) metrics_dict = {name: [] for name in metric_names} for test_stats_per_model in test_stats_per_model_list: for metric_name in metric_names: metrics_dict[metric_name].append( test_stats_per_model[output_feature_name][metric_name] ) # are there any metrics to compare? if metrics_dict: metrics = [] metrics_names = [] min_val = float("inf") max_val = float("-inf") for metric_name, metric_vals in metrics_dict.items(): if len(metric_vals) > 0: metrics.append(metric_vals) metrics_names.append(metric_name) curr_min = min(metric_vals) if curr_min < min_val: min_val = curr_min curr_max = max(metric_vals) if curr_max > max_val: max_val = curr_max filename = None if filename_template_path: filename = filename_template_path.format(output_feature_name) os.makedirs(output_directory, exist_ok=True) visualization_utils.compare_classifiers_plot( metrics, metrics_names, model_names_list, adaptive=min_val < 0 or max_val > 1, title='Performance comparison on {}'.format( output_feature_name), filename=filename ) def compare_classifiers_performance_from_prob( probabilities_per_model: List[np.ndarray], ground_truth: Union[pd.Series, np.ndarray], metadata: dict, output_feature_name: str, labels_limit: int = 0, top_n_classes: Union[List[int], int] = 3, model_names: Union[str, List[str]] = None, output_directory: str = None, file_format: str = 'pdf', **kwargs ) -> None: """Produces model comparison barplot visualization from probabilities. For each model it produces bars in a bar plot, one for each overall metric computed on the fly from the probabilities of predictions for the specified `model_names`. # Inputs :param probabilities_per_model: (List[np.ndarray]) path to experiment probabilities file :param ground_truth: (pd.Series) ground truth values :param metadata: (dict) feature metadata dictionary :param output_feature_name: (str) output feature name :param top_n_classes: (List[int]) list containing the number of classes to plot. :param labels_limit: (int) upper limit on the numeric encoded label value. Encoded numeric label values in dataset that are higher than `label_limit` are considered to be "rare" labels. :param model_names: (Union[str, List[str]], default: `None`) model name or list of the model names to use as labels. :param output_directory: (str, default: `None`) directory where to save plots. If not specified, plots will be displayed in a window :param file_format: (str, default: `'pdf'`) file format of output plots - `'pdf'` or `'png'`. # Return :return: (None) """ if not isinstance(ground_truth, np.ndarray): # not np array, assume we need to translate raw value to encoded value feature_metadata = metadata[output_feature_name] vfunc = np.vectorize(_encode_categorical_feature) ground_truth = vfunc(ground_truth, feature_metadata['str2idx']) top_n_classes_list = convert_to_list(top_n_classes) k = top_n_classes_list[0] model_names_list = convert_to_list(model_names) if labels_limit > 0: ground_truth[ground_truth > labels_limit] = labels_limit probs = probabilities_per_model accuracies = [] hits_at_ks = [] mrrs = [] for i, prob in enumerate(probs): if labels_limit > 0 and prob.shape[1] > labels_limit + 1: prob_limit = prob[:, :labels_limit + 1] prob_limit[:, labels_limit] = prob[:, labels_limit:].sum(1) prob = prob_limit prob = np.argsort(prob, axis=1) top1 = prob[:, -1] topk = prob[:, -k:] accuracies.append((ground_truth == top1).sum() / len(ground_truth)) hits_at_k = 0 for j in range(len(ground_truth)): hits_at_k += np.in1d(ground_truth[j], topk[j]) hits_at_ks.append(np.asscalar(hits_at_k) / len(ground_truth)) mrr = 0 for j in range(len(ground_truth)): ground_truth_pos_in_probs = prob[j] == ground_truth[j] if np.any(ground_truth_pos_in_probs): mrr += (1 / -(np.asscalar( np.argwhere(ground_truth_pos_in_probs)) - prob.shape[1])) mrrs.append(mrr / len(ground_truth)) filename = None if output_directory: os.makedirs(output_directory, exist_ok=True) filename = os.path.join( output_directory, 'compare_classifiers_performance_from_prob.' + file_format ) visualization_utils.compare_classifiers_plot( [accuracies, hits_at_ks, mrrs], [ACCURACY, HITS_AT_K, 'mrr'], model_names_list, filename=filename ) def compare_classifiers_performance_from_pred( predictions_per_model: List[np.ndarray], ground_truth: Union[pd.Series, np.ndarray], metadata: dict, output_feature_name: str, labels_limit: int, model_names: Union[str, List[str]] = None, output_directory: str = None, file_format: str = 'pdf', **kwargs ) -> None: """Produces model comparison barplot visualization from predictions. For each model it produces bars in a bar plot, one for each overall metric computed on the fly from the predictions for the specified `model_names`. # Inputs :param predictions_per_model: (List[str]) path to experiment predictions file. :param ground_truth: (pd.Series) ground truth values :param metadata: (dict) feature metadata dictionary. :param output_feature_name: (str) name of the output feature to visualize. :param labels_limit: (int) upper limit on the numeric encoded label value. Encoded numeric label values in dataset that are higher than `label_limit` are considered to be "rare" labels. :param model_names: (Union[str, List[str]], default: `None`) model name or list of the model names to use as labels. :param output_directory: (str, default: `None`) directory where to save plots. If not specified, plots will be displayed in a window :param file_format: (str, default: `'pdf'`) file format of output plots - `'pdf'` or `'png'`. # Return :return: (None) """ if not isinstance(ground_truth, np.ndarray): # not np array, assume we need to translate raw value to encoded value feature_metadata = metadata[output_feature_name] vfunc = np.vectorize(_encode_categorical_feature) ground_truth = vfunc(ground_truth, feature_metadata['str2idx']) predictions_per_model = [ np.ndarray.flatten(np.array(pred)) for pred in predictions_per_model ] if labels_limit > 0: ground_truth[ground_truth > labels_limit] = labels_limit preds = predictions_per_model model_names_list = convert_to_list(model_names) mapped_preds = [] try: for pred in preds: mapped_preds.append( [metadata[output_feature_name]['str2idx'][val] for val in pred]) preds = mapped_preds # If predictions are coming from npy file there is no need to convert to # numeric labels using metadata except (TypeError, KeyError): pass accuracies = [] precisions = [] recalls = [] f1s = [] for i, pred in enumerate(preds): accuracies.append(sklearn.metrics.accuracy_score(ground_truth, pred)) precisions.append( sklearn.metrics.precision_score(ground_truth, pred, average='macro') ) recalls.append(sklearn.metrics.recall_score( ground_truth, pred, average='macro') ) f1s.append(sklearn.metrics.f1_score( ground_truth, pred, average='macro') ) filename = None if output_directory: os.makedirs(output_directory, exist_ok=True) filename = os.path.join( output_directory, 'compare_classifiers_performance_from_pred.' + file_format ) visualization_utils.compare_classifiers_plot( [accuracies, precisions, recalls, f1s], [ACCURACY, 'precision', 'recall', 'f1'], model_names_list, filename=filename ) def compare_classifiers_performance_subset( probabilities_per_model: List[np.array], ground_truth: Union[pd.Series, np.ndarray], metadata: dict, output_feature_name: str, top_n_classes: List[int], labels_limit: (int), subset: str, model_names: Union[str, List[str]] = None, output_directory: str = None, file_format: str = 'pdf', **kwargs ) -> None: """Produces model comparison barplot visualization from train subset. For each model it produces bars in a bar plot, one for each overall metric computed on the fly from the probabilities predictions for the specified `model_names`, considering only a subset of the full training set. The way the subset is obtained is using the `top_n_classes` and `subset` parameters. # Inputs :param probabilities_per_model: (List[numpy.array]) list of model probabilities. :param ground_truth: (Union[pd.Series, np.ndarray]) ground truth values :param metadata: (dict) feature metadata dictionary :param output_feature_name: (str) output feature name :param top_n_classes: (List[int]) list containing the number of classes to plot. :param labels_limit: (int) upper limit on the numeric encoded label value. Encoded numeric label values in dataset that are higher than `label_limit` are considered to be "rare" labels. :param subset: (str) string specifying type of subset filtering. Valid values are `ground_truth` or `predictions`. :param model_names: (Union[str, List[str]], default: `None`) model name or list of the model names to use as labels. :param output_directory: (str, default: `None`) directory where to save plots. If not specified, plots will be displayed in a window :param file_format: (str, default: `'pdf'`) file format of output plots - `'pdf'` or `'png'`. # Return :return: (None) """ if not isinstance(ground_truth, np.ndarray): # not np array, assume we need to translate raw value to encoded value feature_metadata = metadata[output_feature_name] vfunc = np.vectorize(_encode_categorical_feature) ground_truth = vfunc(ground_truth, feature_metadata['str2idx']) top_n_classes_list = convert_to_list(top_n_classes) k = top_n_classes_list[0] model_names_list = convert_to_list(model_names) if labels_limit > 0: ground_truth[ground_truth > labels_limit] = labels_limit subset_indices = ground_truth > 0 gt_subset = ground_truth if subset == 'ground_truth': subset_indices = ground_truth < k gt_subset = ground_truth[subset_indices] logger.info('Subset is {:.2f}% of the data'.format( len(gt_subset) / len(ground_truth) * 100) ) probs = probabilities_per_model accuracies = [] hits_at_ks = [] for i, prob in enumerate(probs): if labels_limit > 0 and prob.shape[1] > labels_limit + 1: prob_limit = prob[:, :labels_limit + 1] prob_limit[:, labels_limit] = prob[:, labels_limit:].sum(1) prob = prob_limit if subset == PREDICTIONS: subset_indices = np.argmax(prob, axis=1) < k gt_subset = ground_truth[subset_indices] logger.info( 'Subset for model_name {} is {:.2f}% of the data'.format( model_names[i] if model_names and i < len( model_names) else i, len(gt_subset) / len(ground_truth) * 100 ) ) model_names[i] = '{} ({:.2f}%)'.format( model_names[i] if model_names and i < len(model_names) else i, len(gt_subset) / len(ground_truth) * 100 ) prob_subset = prob[subset_indices] prob_subset = np.argsort(prob_subset, axis=1) top1_subset = prob_subset[:, -1] top3_subset = prob_subset[:, -3:] accuracies.append(np.sum((gt_subset == top1_subset)) / len(gt_subset)) hits_at_k = 0 for j in range(len(gt_subset)): hits_at_k += np.in1d(gt_subset[j], top3_subset[i, :]) hits_at_ks.append(np.asscalar(hits_at_k) / len(gt_subset)) title = None if subset == 'ground_truth': title = 'Classifier performance on first {} class{} ({:.2f}%)'.format( k, 'es' if k > 1 else '', len(gt_subset) / len(ground_truth) * 100 ) elif subset == PREDICTIONS: title = 'Classifier performance on first {} class{}'.format( k, 'es' if k > 1 else '' ) filename = None if output_directory: os.makedirs(output_directory, exist_ok=True) filename = os.path.join( output_directory, 'compare_classifiers_performance_subset.' + file_format ) visualization_utils.compare_classifiers_plot( [accuracies, hits_at_ks], [ACCURACY, HITS_AT_K], model_names_list, title=title, filename=filename ) def compare_classifiers_performance_changing_k( probabilities_per_model: List[np.array], ground_truth: Union[pd.Series, np.ndarray], metadata: dict, output_feature_name: str, top_k: int, labels_limit: int, model_names: Union[str, List[str]] = None, output_directory: str = None, file_format: str = 'pdf', **kwargs ) -> None: """Produce lineplot that show Hits@K metric while k goes from 1 to `top_k`. For each model it produces a line plot that shows the Hits@K metric (that counts a prediction as correct if the model produces it among the first k) while changing k from 1 to top_k for the specified `output_feature_name`. # Inputs :param probabilities_per_model: (List[numpy.array]) list of model probabilities. :param ground_truth: (Union[pd.Series, np.ndarray]) ground truth values :param metadata: (dict) feature metadata dictionary :param output_feature_name: (str) output feature name :param top_k: (int) number of elements in the ranklist to consider. :param labels_limit: (int) upper limit on the numeric encoded label value. Encoded numeric label values in dataset that are higher than `label_limit` are considered to be "rare" labels. :param model_names: (Union[str, List[str]], default: `None`) model name or list of the model names to use as labels. :param output_directory: (str, default: `None`) directory where to save plots. If not specified, plots will be displayed in a window :param file_format: (str, default: `'pdf'`) file format of output plots - `'pdf'` or `'png'`. # Return :return: (None) """ if not isinstance(ground_truth, np.ndarray): # not np array, assume we need to translate raw value to encoded value feature_metadata = metadata[output_feature_name] vfunc = np.vectorize(_encode_categorical_feature) ground_truth = vfunc(ground_truth, feature_metadata['str2idx']) k = top_k if labels_limit > 0: ground_truth[ground_truth > labels_limit] = labels_limit probs = probabilities_per_model hits_at_ks = [] model_names_list = convert_to_list(model_names) for i, prob in enumerate(probs): if labels_limit > 0 and prob.shape[1] > labels_limit + 1: prob_limit = prob[:, :labels_limit + 1] prob_limit[:, labels_limit] = prob[:, labels_limit:].sum(1) prob = prob_limit prob = np.argsort(prob, axis=1) hits_at_k = [0.0] * k for g in range(len(ground_truth)): for j in range(k): hits_at_k[j] += np.in1d(ground_truth[g], prob[g, -j - 1:]) hits_at_ks.append(np.array(hits_at_k) / len(ground_truth)) filename = None if output_directory: os.makedirs(output_directory, exist_ok=True) filename = os.path.join( output_directory, 'compare_classifiers_performance_changing_k.' + file_format ) visualization_utils.compare_classifiers_line_plot( np.arange(1, k + 1), hits_at_ks, 'hits@k', model_names_list, title='Classifier comparison (hits@k)', filename=filename ) def compare_classifiers_multiclass_multimetric( test_stats_per_model: List[dict], metadata: dict, output_feature_name: str, top_n_classes: List[int], model_names: Union[str, List[str]] = None, output_directory: str = None, file_format: str = 'pdf', **kwargs ) -> None: """Show the precision, recall and F1 of the model for the specified output_feature_name. For each model it produces four plots that show the precision, recall and F1 of the model on several classes for the specified output_feature_name. # Inputs :param test_stats_per_model: (List[dict]) list containing dictionary of evaluation performance statistics :param metadata: (dict) intermediate preprocess structure created during training containing the mappings of the input dataset. :param output_feature_name: (Union[str, `None`]) name of the output feature to use for the visualization. If `None`, use all output features. :param top_n_classes: (List[int]) list containing the number of classes to plot. :param model_names: (Union[str, List[str]], default: `None`) model name or list of the model names to use as labels. :param output_directory: (str, default: `None`) directory where to save plots. If not specified, plots will be displayed in a window :param file_format: (str, default: `'pdf'`) file format of output plots - `'pdf'` or `'png'`. # Return :return: (None) """ filename_template = 'compare_classifiers_multiclass_multimetric_{}_{}_{}.' \ + file_format filename_template_path = generate_filename_template_path( output_directory, filename_template ) test_stats_per_model_list = convert_to_list(test_stats_per_model) model_names_list = convert_to_list(model_names) output_feature_names = _validate_output_feature_name_from_test_stats( output_feature_name, test_stats_per_model_list ) for i, test_statistics in enumerate( test_stats_per_model_list): for output_feature_name in output_feature_names: model_name_name = ( model_names_list[i] if model_names_list is not None and i < len(model_names_list) else '' ) if 'per_class_stats' not in test_statistics[output_feature_name]: logging.warning( 'The output_feature_name {} in test statistics does not contain "{}", ' 'skipping it'.format(output_feature_name, per_class_stats) ) break per_class_stats = test_statistics[output_feature_name][ 'per_class_stats'] precisions = [] recalls = [] f1_scores = [] labels = [] for _, class_name in sorted( [(metadata[output_feature_name]['str2idx'][key], key) for key in per_class_stats.keys()], key=lambda tup: tup[0]): class_stats = per_class_stats[class_name] precisions.append(class_stats['precision']) recalls.append(class_stats['recall']) f1_scores.append(class_stats['f1_score']) labels.append(class_name) for k in top_n_classes: k = min(k, len(precisions)) if k > 0 else len(precisions) ps = precisions[0:k] rs = recalls[0:k] fs = f1_scores[0:k] ls = labels[0:k] filename = None if filename_template_path: os.makedirs(output_directory, exist_ok=True) filename = filename_template_path.format( model_name_name, output_feature_name, 'top{}'.format(k) ) visualization_utils.compare_classifiers_multiclass_multimetric_plot( [ps, rs, fs], ['precision', 'recall', 'f1 score'], labels=ls, title='{} Multiclass Precision / Recall / ' 'F1 Score top {} {}'.format(model_name_name, k, output_feature_name), filename=filename ) p_np = np.nan_to_num(np.array(precisions, dtype=np.float32)) r_np = np.nan_to_num(np.array(recalls, dtype=np.float32)) f1_np = np.nan_to_num(np.array(f1_scores, dtype=np.float32)) labels_np = np.nan_to_num(np.array(labels)) sorted_indices = f1_np.argsort() higher_f1s = sorted_indices[-k:][::-1] filename = None if filename_template_path: os.makedirs(output_directory, exist_ok=True) filename = filename_template_path.format( model_name_name, output_feature_name, 'best{}'.format(k) ) visualization_utils.compare_classifiers_multiclass_multimetric_plot( [p_np[higher_f1s], r_np[higher_f1s], f1_np[higher_f1s]], ['precision', 'recall', 'f1 score'], labels=labels_np[higher_f1s].tolist(), title='{} Multiclass Precision / Recall / ' 'F1 Score best {} classes {}'.format( model_name_name, k, output_feature_name), filename=filename ) lower_f1s = sorted_indices[:k] filename = None if filename_template_path: filename = filename_template_path.format( model_name_name, output_feature_name, 'worst{}'.format(k) ) visualization_utils.compare_classifiers_multiclass_multimetric_plot( [p_np[lower_f1s], r_np[lower_f1s], f1_np[lower_f1s]], ['precision', 'recall', 'f1 score'], labels=labels_np[lower_f1s].tolist(), title='{} Multiclass Precision / Recall / F1 Score worst ' 'k classes {}'.format(model_name_name, k, output_feature_name), filename=filename ) filename = None if filename_template_path: filename = filename_template_path.format( model_name_name, output_feature_name, 'sorted' ) visualization_utils.compare_classifiers_multiclass_multimetric_plot( [p_np[sorted_indices[::-1]], r_np[sorted_indices[::-1]], f1_np[sorted_indices[::-1]]], ['precision', 'recall', 'f1 score'], labels=labels_np[sorted_indices[::-1]].tolist(), title='{} Multiclass Precision / Recall / F1 Score ' '{} sorted'.format(model_name_name, output_feature_name), filename=filename ) logging.info('\n') logging.info(model_name_name) tmp_str = '{0} best 5 classes: '.format(output_feature_name) tmp_str += '{}' logging.info(tmp_str.format(higher_f1s)) logging.info(f1_np[higher_f1s]) tmp_str = '{0} worst 5 classes: '.format(output_feature_name) tmp_str += '{}' logging.info(tmp_str.format(lower_f1s)) logging.info(f1_np[lower_f1s]) tmp_str = '{0} number of classes with f1 score > 0: '.format( output_feature_name) tmp_str += '{}' logging.info(tmp_str.format(np.sum(f1_np > 0))) tmp_str = '{0} number of classes with f1 score = 0: '.format( output_feature_name) tmp_str += '{}' logging.info(tmp_str.format(np.sum(f1_np == 0))) def compare_classifiers_predictions( predictions_per_model: List[list], ground_truth: Union[pd.Series, np.ndarray], metadata: dict, output_feature_name: str, labels_limit: int, model_names: Union[str, List[str]] = None, output_directory: str = None, file_format: str = 'pdf', **kwargs ) -> None: """Show two models comparison of their output_feature_name predictions. # Inputs :param predictions_per_model: (List[list]) list containing the model predictions for the specified output_feature_name. :param ground_truth: (Union[pd.Series, np.ndarray]) ground truth values :param metadata: (dict) feature metadata dictionary :param output_feature_name: (str) output feature name :param labels_limit: (int) upper limit on the numeric encoded label value. Encoded numeric label values in dataset that are higher than `label_limit` are considered to be "rare" labels. :param model_names: (Union[str, List[str]], default: `None`) model name or list of the model names to use as labels. :param output_directory: (str, default: `None`) directory where to save plots. If not specified, plots will be displayed in a window :param file_format: (str, default: `'pdf'`) file format of output plots - `'pdf'` or `'png'`. # Return :return: (None) """ if not isinstance(ground_truth, np.ndarray): # not np array, assume we need to translate raw value to encoded value feature_metadata = metadata[output_feature_name] vfunc = np.vectorize(_encode_categorical_feature) ground_truth = vfunc(ground_truth, feature_metadata['str2idx']) model_names_list = convert_to_list(model_names) name_c1 = ( model_names_list[0] if model_names is not None and len(model_names) > 0 else 'c1') name_c2 = ( model_names_list[1] if model_names is not None and len(model_names) > 1 else 'c2') pred_c1 = predictions_per_model[0] pred_c2 = predictions_per_model[1] if labels_limit > 0: ground_truth[ground_truth > labels_limit] = labels_limit pred_c1[pred_c1 > labels_limit] = labels_limit pred_c2[pred_c2 > labels_limit] = labels_limit # DOTO all shadows built in name - come up with a more descriptive name all = len(ground_truth) if all == 0: logger.error('No labels in the ground truth') return both_right = 0 both_wrong_same = 0 both_wrong_different = 0 c1_right_c2_wrong = 0 c1_wrong_c2_right = 0 for i in range(all): if ground_truth[i] == pred_c1[i] and ground_truth[i] == pred_c2[i]: both_right += 1 elif ground_truth[i] != pred_c1[i] and ground_truth[i] != pred_c2[i]: if pred_c1[i] == pred_c2[i]: both_wrong_same += 1 else: both_wrong_different += 1 elif ground_truth[i] == pred_c1[i] and ground_truth[i] != pred_c2[i]: c1_right_c2_wrong += 1 elif ground_truth[i] != pred_c1[i] and ground_truth[i] == pred_c2[i]: c1_wrong_c2_right += 1 one_right = c1_right_c2_wrong + c1_wrong_c2_right both_wrong = both_wrong_same + both_wrong_different logger.info('Test datapoints: {}'.format(all)) logger.info( 'Both right: {} {:.2f}%'.format(both_right, 100 * both_right / all)) logger.info( 'One right: {} {:.2f}%'.format(one_right, 100 * one_right / all)) logger.info( ' {} right / {} wrong: {} {:.2f}% {:.2f}%'.format( name_c1, name_c2, c1_right_c2_wrong, 100 * c1_right_c2_wrong / all, 100 * c1_right_c2_wrong / one_right if one_right > 0 else 0 ) ) logger.info( ' {} wrong / {} right: {} {:.2f}% {:.2f}%'.format( name_c1, name_c2, c1_wrong_c2_right, 100 * c1_wrong_c2_right / all, 100 * c1_wrong_c2_right / one_right if one_right > 0 else 0 ) ) logger.info( 'Both wrong: {} {:.2f}%'.format(both_wrong, 100 * both_wrong / all) ) logger.info(' same prediction: {} {:.2f}% {:.2f}%'.format( both_wrong_same, 100 * both_wrong_same / all, 100 * both_wrong_same / both_wrong if both_wrong > 0 else 0 ) ) logger.info(' different prediction: {} {:.2f}% {:.2f}%'.format( both_wrong_different, 100 * both_wrong_different / all, 100 * both_wrong_different / both_wrong if both_wrong > 0 else 0 ) ) filename = None if output_directory: os.makedirs(output_directory, exist_ok=True) filename = os.path.join( output_directory, 'compare_classifiers_predictions_{}_{}.{}'.format( name_c1, name_c2, file_format ) ) visualization_utils.donut( [both_right, one_right, both_wrong], ['both right', 'one right', 'both wrong'], [both_right, c1_right_c2_wrong, c1_wrong_c2_right, both_wrong_same, both_wrong_different], ['both right', '{} right / {} wrong'.format(name_c1, name_c2), '{} wrong / {} right'.format(name_c1, name_c2), 'same prediction', 'different prediction'], [0, 1, 1, 2, 2], title='{} vs {}'.format(name_c1, name_c2), tight_layout=kwargs.pop('tight_layout', True), filename=filename ) def compare_classifiers_predictions_distribution( predictions_per_model: List[list], ground_truth: Union[pd.Series, np.ndarray], metadata: dict, output_feature_name: str, labels_limit: int, model_names: Union[str, List[str]] = None, output_directory: str = None, file_format: str = 'pdf', **kwargs ) -> None: """Show comparision of models predictions distribution for 10 output_feature_name classes This visualization produces a radar plot comparing the distributions of predictions of the models for the first 10 classes of the specified output_feature_name. # Inputs :param predictions_per_model: (List[list]) list containing the model predictions for the specified output_feature_name. :param ground_truth: (Union[pd.Series, np.ndarray]) ground truth values :param metadata: (dict) feature metadata dictionary :param output_feature_name: (str) output feature name :param labels_limit: (int) upper limit on the numeric encoded label value. Encoded numeric label values in dataset that are higher than `label_limit` are considered to be "rare" labels. :param model_names: (Union[str, List[str]], default: `None`) model name or list of the model names to use as labels. :param output_directory: (str, default: `None`) directory where to save plots. If not specified, plots will be displayed in a window :param file_format: (str, default: `'pdf'`) file format of output plots - `'pdf'` or `'png'`. # Return :return: (None) """ if not isinstance(ground_truth, np.ndarray): # not np array, assume we need to translate raw value to encoded value feature_metadata = metadata[output_feature_name] vfunc = np.vectorize(_encode_categorical_feature) ground_truth = vfunc(ground_truth, feature_metadata['str2idx']) model_names_list = convert_to_list(model_names) if labels_limit > 0: ground_truth[ground_truth > labels_limit] = labels_limit for i in range(len(predictions_per_model)): predictions_per_model[i][predictions_per_model[i] > labels_limit] \ = labels_limit max_gt = max(ground_truth) max_pred = max([max(alg_predictions) for alg_predictions in predictions_per_model]) max_val = max(max_gt, max_pred) + 1 counts_gt = np.bincount(ground_truth, minlength=max_val) prob_gt = counts_gt / counts_gt.sum() counts_predictions = [np.bincount(alg_predictions, minlength=max_val) for alg_predictions in predictions_per_model] prob_predictions = [alg_count_prediction / alg_count_prediction.sum() for alg_count_prediction in counts_predictions] filename = None if output_directory: os.makedirs(output_directory, exist_ok=True) filename = os.path.join( output_directory, 'compare_classifiers_predictions_distribution.' + file_format ) visualization_utils.radar_chart( prob_gt, prob_predictions, model_names_list, filename=filename ) def confidence_thresholding( probabilities_per_model: List[np.array], ground_truth: Union[pd.Series, np.ndarray], metadata: dict, output_feature_name: str, labels_limit: int, model_names: Union[str, List[str]] = None, output_directory: str = None, file_format: str = 'pdf', **kwargs ) -> None: """Show models accuracy and data coverage while increasing treshold For each model it produces a pair of lines indicating the accuracy of the model and the data coverage while increasing a threshold (x axis) on the probabilities of predictions for the specified output_feature_name. # Inputs :param probabilities_per_model: (List[numpy.array]) list of model probabilities. :param ground_truth: (Union[pd.Series, np.ndarray]) ground truth values :param metadata: (dict) feature metadata dictionary :param output_feature_name: (str) output feature name :param labels_limit: (int) upper limit on the numeric encoded label value. Encoded numeric label values in dataset that are higher than `label_limit` are considered to be "rare" labels. :param model_names: (Union[str, List[str]], default: `None`) model name or list of the model names to use as labels. :param output_directory: (str, default: `None`) directory where to save plots. If not specified, plots will be displayed in a window :param file_format: (str, default: `'pdf'`) file format of output plots - `'pdf'` or `'png'`. # Return :return: (None) """ if not isinstance(ground_truth, np.ndarray): # not np array, assume we need to translate raw value to encoded value feature_metadata = metadata[output_feature_name] vfunc = np.vectorize(_encode_categorical_feature) ground_truth = vfunc(ground_truth, feature_metadata['str2idx']) if labels_limit > 0: ground_truth[ground_truth > labels_limit] = labels_limit probs = probabilities_per_model model_names_list = convert_to_list(model_names) thresholds = [t / 100 for t in range(0, 101, 5)] accuracies = [] dataset_kept = [] for i, prob in enumerate(probs): if labels_limit > 0 and prob.shape[1] > labels_limit + 1: prob_limit = prob[:, :labels_limit + 1] prob_limit[:, labels_limit] = prob[:, labels_limit:].sum(1) prob = prob_limit max_prob = np.max(prob, axis=1) predictions = np.argmax(prob, axis=1) accuracies_alg = [] dataset_kept_alg = [] for threshold in thresholds: threshold = threshold if threshold < 1 else 0.999 filtered_indices = max_prob >= threshold filtered_gt = ground_truth[filtered_indices] filtered_predictions = predictions[filtered_indices] accuracy = ( (filtered_gt == filtered_predictions).sum() / len(filtered_gt) ) accuracies_alg.append(accuracy) dataset_kept_alg.append(len(filtered_gt) / len(ground_truth)) accuracies.append(accuracies_alg) dataset_kept.append(dataset_kept_alg) filename = None if output_directory: os.makedirs(output_directory, exist_ok=True) filename = os.path.join( output_directory, 'confidence_thresholding.' + file_format ) visualization_utils.confidence_fitlering_plot( thresholds, accuracies, dataset_kept, model_names_list, title='Confidence_Thresholding', filename=filename ) def confidence_thresholding_data_vs_acc( probabilities_per_model: List[np.array], ground_truth: Union[pd.Series, np.ndarray], metadata: dict, output_feature_name: str, labels_limit: int, model_names: Union[str, List[str]] = None, output_directory: str = None, file_format: str = 'pdf', **kwargs ) -> None: """Show models comparison of confidence threshold data vs accuracy. For each model it produces a line indicating the accuracy of the model and the data coverage while increasing a threshold on the probabilities of predictions for the specified output_feature_name. The difference with confidence_thresholding is that it uses two axes instead of three, not visualizing the threshold and having coverage as x axis instead of the threshold. # Inputs :param probabilities_per_model: (List[numpy.array]) list of model probabilities. :param ground_truth: (Union[pd.Series, np.ndarray]) ground truth values :param metadata: (dict) feature metadata dictionary :param output_feature_name: (str) output feature name :param labels_limit: (int) upper limit on the numeric encoded label value. Encoded numeric label values in dataset that are higher than `label_limit` are considered to be "rare" labels. :param model_names: (Union[str, List[str]], default: `None`) model name or list of the model names to use as labels. :param output_directory: (str, default: `None`) directory where to save plots. If not specified, plots will be displayed in a window :param file_format: (str, default: `'pdf'`) file format of output plots - `'pdf'` or `'png'`. # Return :return: (None) """ if not isinstance(ground_truth, np.ndarray): # not np array, assume we need to translate raw value to encoded value feature_metadata = metadata[output_feature_name] vfunc = np.vectorize(_encode_categorical_feature) ground_truth = vfunc(ground_truth, feature_metadata['str2idx']) if labels_limit > 0: ground_truth[ground_truth > labels_limit] = labels_limit probs = probabilities_per_model model_names_list = convert_to_list(model_names) thresholds = [t / 100 for t in range(0, 101, 5)] accuracies = [] dataset_kept = [] for i, prob in enumerate(probs): if labels_limit > 0 and prob.shape[1] > labels_limit + 1: prob_limit = prob[:, :labels_limit + 1] prob_limit[:, labels_limit] = prob[:, labels_limit:].sum(1) prob = prob_limit max_prob = np.max(prob, axis=1) predictions = np.argmax(prob, axis=1) accuracies_alg = [] dataset_kept_alg = [] for threshold in thresholds: threshold = threshold if threshold < 1 else 0.999 filtered_indices = max_prob >= threshold filtered_gt = ground_truth[filtered_indices] filtered_predictions = predictions[filtered_indices] accuracy = ((filtered_gt == filtered_predictions).sum() / len(filtered_gt)) accuracies_alg.append(accuracy) dataset_kept_alg.append(len(filtered_gt) / len(ground_truth)) accuracies.append(accuracies_alg) dataset_kept.append(dataset_kept_alg) filename = None if output_directory: os.makedirs(output_directory, exist_ok=True) filename = os.path.join( output_directory, 'confidence_thresholding_data_vs_acc.' + file_format ) visualization_utils.confidence_fitlering_data_vs_acc_plot( accuracies, dataset_kept, model_names_list, title='Confidence_Thresholding (Data vs Accuracy)', filename=filename ) def confidence_thresholding_data_vs_acc_subset( probabilities_per_model: List[np.array], ground_truth: Union[pd.Series, np.ndarray], metadata: dict, output_feature_name: str, top_n_classes: List[int], labels_limit: int, subset: str, model_names: Union[str, List[str]] = None, output_directory: str = None, file_format: str = 'pdf', **kwargs ) -> None: """Show models comparison of confidence threshold data vs accuracy on a subset of data. For each model it produces a line indicating the accuracy of the model and the data coverage while increasing a threshold on the probabilities of predictions for the specified output_feature_name, considering only a subset of the full training set. The way the subset is obtained is using the `top_n_classes` and subset parameters. The difference with confidence_thresholding is that it uses two axes instead of three, not visualizing the threshold and having coverage as x axis instead of the threshold. If the values of subset is `ground_truth`, then only datapoints where the ground truth class is within the top n most frequent ones will be considered as test set, and the percentage of datapoints that have been kept from the original set will be displayed. If the values of subset is `predictions`, then only datapoints where the the model predicts a class that is within the top n most frequent ones will be considered as test set, and the percentage of datapoints that have been kept from the original set will be displayed for each model. # Inputs :param probabilities_per_model: (List[numpy.array]) list of model probabilities. :param ground_truth: (Union[pd.Series, np.ndarray]) ground truth values :param metadata: (dict) feature metadata dictionary :param output_feature_name: (str) output feature name :param top_n_classes: (List[int]) list containing the number of classes to plot. :param labels_limit: (int) upper limit on the numeric encoded label value. Encoded numeric label values in dataset that are higher than `label_limit` are considered to be "rare" labels. :param subset: (str) string specifying type of subset filtering. Valid values are `ground_truth` or `predictions`. :param model_names: (Union[str, List[str]], default: `None`) model name or list of the model names to use as labels. :param output_directory: (str, default: `None`) directory where to save plots. If not specified, plots will be displayed in a window :param file_format: (str, default: `'pdf'`) file format of output plots - `'pdf'` or `'png'`. # Return :return: (None) """ if not isinstance(ground_truth, np.ndarray): # not np array, assume we need to translate raw value to encoded value feature_metadata = metadata[output_feature_name] vfunc = np.vectorize(_encode_categorical_feature) ground_truth = vfunc(ground_truth, feature_metadata['str2idx']) top_n_classes_list = convert_to_list(top_n_classes) k = top_n_classes_list[0] if labels_limit > 0: ground_truth[ground_truth > labels_limit] = labels_limit probs = probabilities_per_model model_names_list = convert_to_list(model_names) thresholds = [t / 100 for t in range(0, 101, 5)] accuracies = [] dataset_kept = [] subset_indices = ground_truth > 0 gt_subset = ground_truth if subset == 'ground_truth': subset_indices = ground_truth < k gt_subset = ground_truth[subset_indices] logger.info('Subset is {:.2f}% of the data'.format( len(gt_subset) / len(ground_truth) * 100) ) for i, prob in enumerate(probs): if labels_limit > 0 and prob.shape[1] > labels_limit + 1: prob_limit = prob[:, :labels_limit + 1] prob_limit[:, labels_limit] = prob[:, labels_limit:].sum(1) prob = prob_limit if subset == PREDICTIONS: subset_indices = np.argmax(prob, axis=1) < k gt_subset = ground_truth[subset_indices] logger.info( 'Subset for model_name {} is {:.2f}% of the data'.format( model_names[i] if model_names and i < len( model_names) else i, len(gt_subset) / len(ground_truth) * 100 ) ) prob_subset = prob[subset_indices] max_prob = np.max(prob_subset, axis=1) predictions = np.argmax(prob_subset, axis=1) accuracies_alg = [] dataset_kept_alg = [] for threshold in thresholds: threshold = threshold if threshold < 1 else 0.999 filtered_indices = max_prob >= threshold filtered_gt = gt_subset[filtered_indices] filtered_predictions = predictions[filtered_indices] accuracy = ((filtered_gt == filtered_predictions).sum() / len(filtered_gt)) accuracies_alg.append(accuracy) dataset_kept_alg.append(len(filtered_gt) / len(ground_truth)) accuracies.append(accuracies_alg) dataset_kept.append(dataset_kept_alg) filename = None if output_directory: os.makedirs(output_directory, exist_ok=True) filename = os.path.join( output_directory, 'confidence_thresholding_data_vs_acc_subset.' + file_format ) visualization_utils.confidence_fitlering_data_vs_acc_plot( accuracies, dataset_kept, model_names_list, title='Confidence_Thresholding (Data vs Accuracy)', filename=filename ) def confidence_thresholding_data_vs_acc_subset_per_class( probabilities_per_model: List[np.array], ground_truth: Union[pd.Series, np.ndarray], metadata: dict, output_feature_name: str, top_n_classes: Union[int, List[int]], labels_limit: int, subset: str, model_names: Union[str, List[str]] = None, output_directory: str = None, file_format: str = 'pdf', **kwargs ) -> None: """Show models comparison of confidence threshold data vs accuracy on a subset of data per class in top n classes. For each model (in the aligned lists of probabilities and model_names) it produces a line indicating the accuracy of the model and the data coverage while increasing a threshold on the probabilities of predictions for the specified output_feature_name, considering only a subset of the full training set. The way the subset is obtained is using the `top_n_classes` and `subset` parameters. The difference with confidence_thresholding is that it uses two axes instead of three, not visualizing the threshold and having coverage as x axis instead of the threshold. If the values of subset is `ground_truth`, then only datapoints where the ground truth class is within the top n most frequent ones will be considered as test set, and the percentage of datapoints that have been kept from the original set will be displayed. If the values of subset is `predictions`, then only datapoints where the the model predicts a class that is within the top n most frequent ones will be considered as test set, and the percentage of datapoints that have been kept from the original set will be displayed for each model. The difference with confidence_thresholding_data_vs_acc_subset is that it produces one plot per class within the top_n_classes. # Inputs :param probabilities_per_model: (List[numpy.array]) list of model probabilities. :param ground_truth: (Union[pd.Series, np.ndarray]) ground truth values :param metadata: (dict) intermediate preprocess structure created during training containing the mappings of the input dataset. :param output_feature_name: (str) name of the output feature to use for the visualization. :param top_n_classes: (Union[int, List[int]]) number of top classes or list containing the number of top classes to plot. :param labels_limit: (int) upper limit on the numeric encoded label value. Encoded numeric label values in dataset that are higher than `label_limit` are considered to be "rare" labels. :param subset: (str) string specifying type of subset filtering. Valid values are `ground_truth` or `predictions`. :param model_names: (Union[str, List[str]], default: `None`) model name or list of the model names to use as labels. :param output_directory: (str, default: `None`) directory where to save plots. If not specified, plots will be displayed in a window :param file_format: (str, default: `'pdf'`) file format of output plots - `'pdf'` or `'png'`. # Return :return: (None) """ if not isinstance(ground_truth, np.ndarray): # not np array, assume we need to translate raw value to encoded value feature_metadata = metadata[output_feature_name] vfunc = np.vectorize(_encode_categorical_feature) ground_truth = vfunc(ground_truth, feature_metadata['str2idx']) filename_template = \ 'confidence_thresholding_data_vs_acc_subset_per_class_{}.' + file_format filename_template_path = generate_filename_template_path( output_directory, filename_template ) top_n_classes_list = convert_to_list(top_n_classes) k = top_n_classes_list[0] if labels_limit > 0: ground_truth[ground_truth > labels_limit] = labels_limit probs = probabilities_per_model model_names_list = convert_to_list(model_names) thresholds = [t / 100 for t in range(0, 101, 5)] for curr_k in range(k): accuracies = [] dataset_kept = [] subset_indices = ground_truth > 0 gt_subset = ground_truth if subset == 'ground_truth': subset_indices = ground_truth == curr_k gt_subset = ground_truth[subset_indices] logger.info('Subset is {:.2f}% of the data'.format( len(gt_subset) / len(ground_truth) * 100) ) for i, prob in enumerate(probs): if labels_limit > 0 and prob.shape[1] > labels_limit + 1: prob_limit = prob[:, :labels_limit + 1] prob_limit[:, labels_limit] = prob[:, labels_limit:].sum(1) prob = prob_limit if subset == PREDICTIONS: subset_indices = np.argmax(prob, axis=1) == curr_k gt_subset = ground_truth[subset_indices] logger.info( 'Subset for model_name {} is {:.2f}% of the data'.format( model_names_list[i] if model_names_list and i < len( model_names_list) else i, len(gt_subset) / len(ground_truth) * 100 ) ) prob_subset = prob[subset_indices] max_prob = np.max(prob_subset, axis=1) predictions = np.argmax(prob_subset, axis=1) accuracies_alg = [] dataset_kept_alg = [] for threshold in thresholds: threshold = threshold if threshold < 1 else 0.999 filtered_indices = max_prob >= threshold filtered_gt = gt_subset[filtered_indices] filtered_predictions = predictions[filtered_indices] accuracy = ((filtered_gt == filtered_predictions).sum() / len(filtered_gt) if len(filtered_gt) > 0 else 0) accuracies_alg.append(accuracy) dataset_kept_alg.append(len(filtered_gt) / len(ground_truth)) accuracies.append(accuracies_alg) dataset_kept.append(dataset_kept_alg) output_feature_name_name = metadata[output_feature_name]['idx2str'][ curr_k] filename = None if filename_template_path: os.makedirs(output_directory, exist_ok=True) filename = filename_template_path.format(output_feature_name_name) visualization_utils.confidence_fitlering_data_vs_acc_plot( accuracies, dataset_kept, model_names_list, decimal_digits=2, title='Confidence_Thresholding (Data vs Accuracy) ' 'for class {}'.format(output_feature_name_name), filename=filename ) def confidence_thresholding_2thresholds_2d( probabilities_per_model: List[np.array], ground_truths: Union[List[np.array], List[pd.Series]], metadata, threshold_output_feature_names: List[str], labels_limit: int, model_names: Union[str, List[str]] = None, output_directory: str = None, file_format: str = 'pdf', **kwargs ) -> None: """Show confidence threshold data vs accuracy for two output feature names. The first plot shows several semi transparent lines. They summarize the 3d surfaces displayed by confidence_thresholding_2thresholds_3d that have thresholds on the confidence of the predictions of the two `threshold_output_feature_names` as x and y axes and either the data coverage percentage or the accuracy as z axis. Each line represents a slice of the data coverage surface projected onto the accuracy surface. # Inputs :param probabilities_per_model: (List[numpy.array]) list of model probabilities. :param ground_truth: (Union[List[np.array], List[pd.Series]]) containing ground truth data :param metadata: (dict) feature metadata dictionary :param threshold_output_feature_names: (List[str]) List containing two output feature names for visualization. :param labels_limit: (int) upper limit on the numeric encoded label value. Encoded numeric label values in dataset that are higher than `label_limit` are considered to be "rare" labels. :param model_names: (Union[str, List[str]], default: `None`) model name or list of the model names to use as labels. :param output_directory: (str, default: `None`) directory where to save plots. If not specified, plots will be displayed in a window :param file_format: (str, default: `'pdf'`) file format of output plots - `'pdf'` or `'png'`. # Return :return: (None) """ try: validate_conf_treshholds_and_probabilities_2d_3d( probabilities_per_model, threshold_output_feature_names ) except RuntimeError: return probs = probabilities_per_model model_names_list = convert_to_list(model_names) filename_template = \ 'confidence_thresholding_2thresholds_2d_{}.' + file_format filename_template_path = generate_filename_template_path( output_directory, filename_template ) if not isinstance(ground_truths[0], np.ndarray): # not np array, assume we need to translate raw value to encoded value feature_metadata = metadata[threshold_output_feature_names[0]] vfunc = np.vectorize(_encode_categorical_feature) gt_1 = vfunc(ground_truths[0], feature_metadata['str2idx']) feature_metadata = metadata[threshold_output_feature_names[1]] gt_2 = vfunc(ground_truths[1], feature_metadata['str2idx']) else: gt_1 = ground_truths[0] gt_2 = ground_truths[1] if labels_limit > 0: gt_1[gt_1 > labels_limit] = labels_limit gt_2[gt_2 > labels_limit] = labels_limit thresholds = [t / 100 for t in range(0, 101, 5)] fixed_step_coverage = thresholds name_t1 = '{} threshold'.format(threshold_output_feature_names[0]) name_t2 = '{} threshold'.format(threshold_output_feature_names[1]) accuracies = [] dataset_kept = [] interps = [] table = [[name_t1, name_t2, 'coverage', ACCURACY]] if labels_limit > 0 and probs[0].shape[1] > labels_limit + 1: prob_limit = probs[0][:, :labels_limit + 1] prob_limit[:, labels_limit] = probs[0][:, labels_limit:].sum(1) probs[0] = prob_limit if labels_limit > 0 and probs[1].shape[1] > labels_limit + 1: prob_limit = probs[1][:, :labels_limit + 1] prob_limit[:, labels_limit] = probs[1][:, labels_limit:].sum(1) probs[1] = prob_limit max_prob_1 = np.max(probs[0], axis=1) predictions_1 = np.argmax(probs[0], axis=1) max_prob_2 = np.max(probs[1], axis=1) predictions_2 = np.argmax(probs[1], axis=1) for threshold_1 in thresholds: threshold_1 = threshold_1 if threshold_1 < 1 else 0.999 curr_accuracies = [] curr_dataset_kept = [] for threshold_2 in thresholds: threshold_2 = threshold_2 if threshold_2 < 1 else 0.999 filtered_indices = np.logical_and( max_prob_1 >= threshold_1, max_prob_2 >= threshold_2 ) filtered_gt_1 = gt_1[filtered_indices] filtered_predictions_1 = predictions_1[filtered_indices] filtered_gt_2 = gt_2[filtered_indices] filtered_predictions_2 = predictions_2[filtered_indices] coverage = len(filtered_gt_1) / len(gt_1) accuracy = ( np.logical_and( filtered_gt_1 == filtered_predictions_1, filtered_gt_2 == filtered_predictions_2 ) ).sum() / len(filtered_gt_1) curr_accuracies.append(accuracy) curr_dataset_kept.append(coverage) table.append([threshold_1, threshold_2, coverage, accuracy]) accuracies.append(curr_accuracies) dataset_kept.append(curr_dataset_kept) interps.append( np.interp( fixed_step_coverage, list(reversed(curr_dataset_kept)), list(reversed(curr_accuracies)), left=1, right=0 ) ) logger.info('CSV table') for row in table: logger.info(','.join([str(e) for e in row])) # ===========# # Multiline # # ===========# filename = None if filename_template_path: os.makedirs(output_directory, exist_ok=True) filename = filename_template_path.format('multiline') visualization_utils.confidence_fitlering_data_vs_acc_multiline_plot( accuracies, dataset_kept, model_names_list, title='Coverage vs Accuracy, two thresholds', filename=filename ) # ==========# # Max line # # ==========# filename = None if filename_template_path: filename = filename_template_path.format('maxline') max_accuracies = np.amax(np.array(interps), 0) visualization_utils.confidence_fitlering_data_vs_acc_plot( [max_accuracies], [thresholds], model_names_list, title='Coverage vs Accuracy, two thresholds', filename=filename ) # ==========================# # Max line with thresholds # # ==========================# acc_matrix = np.array(accuracies) cov_matrix = np.array(dataset_kept) t1_maxes = [1] t2_maxes = [1] for i in range(len(fixed_step_coverage) - 1): lower = fixed_step_coverage[i] upper = fixed_step_coverage[i + 1] indices = np.logical_and(cov_matrix >= lower, cov_matrix < upper) selected_acc = acc_matrix.copy() selected_acc[np.logical_not(indices)] = -1 threshold_indices = np.unravel_index( np.argmax(selected_acc, axis=None), selected_acc.shape) t1_maxes.append(thresholds[threshold_indices[0]]) t2_maxes.append(thresholds[threshold_indices[1]]) model_name = model_names_list[0] if model_names_list is not None and len( model_names_list) > 0 else '' filename = None if filename_template_path: os.makedirs(output_directory, exist_ok=True) filename = filename_template_path.format('maxline_with_thresholds') visualization_utils.confidence_fitlering_data_vs_acc_plot( [max_accuracies, t1_maxes, t2_maxes], [fixed_step_coverage, fixed_step_coverage, fixed_step_coverage], model_names=[model_name + ' accuracy', name_t1, name_t2], dotted=[False, True, True], y_label='', title='Coverage vs Accuracy & Threshold', filename=filename ) def confidence_thresholding_2thresholds_3d( probabilities_per_model: List[np.array], ground_truths: Union[List[np.array], List[pd.Series]], metadata, threshold_output_feature_names: List[str], labels_limit: int, output_directory: str = None, file_format: str = 'pdf', **kwargs ) -> None: """Show 3d confidence threshold data vs accuracy for two output feature names. The plot shows the 3d surfaces displayed by confidence_thresholding_2thresholds_3d that have thresholds on the confidence of the predictions of the two `threshold_output_feature_names` as x and y axes and either the data coverage percentage or the accuracy as z axis. # Inputs :param probabilities_per_model: (List[numpy.array]) list of model probabilities. :param ground_truth: (Union[List[np.array], List[pd.Series]]) containing ground truth data :param metadata: (dict) feature metadata dictionary :param threshold_output_feature_names: (List[str]) List containing two output feature names for visualization. :param labels_limit: (int) upper limit on the numeric encoded label value. Encoded numeric label values in dataset that are higher than `label_limit` are considered to be "rare" labels. :param output_directory: (str, default: `None`) directory where to save plots. If not specified, plots will be displayed in a window :param file_format: (str, default: `'pdf'`) file format of output plots - `'pdf'` or `'png'`. # Return :return: (None) """ try: validate_conf_treshholds_and_probabilities_2d_3d( probabilities_per_model, threshold_output_feature_names ) except RuntimeError: return probs = probabilities_per_model if not isinstance(ground_truths[0], np.ndarray): # not np array, assume we need to translate raw value to encoded value feature_metadata = metadata[threshold_output_feature_names[0]] vfunc = np.vectorize(_encode_categorical_feature) gt_1 = vfunc(ground_truths[0], feature_metadata['str2idx']) feature_metadata = metadata[threshold_output_feature_names[1]] gt_2 = vfunc(ground_truths[1], feature_metadata['str2idx']) else: gt_1 = ground_truths[0] gt_2 = ground_truths[1] if labels_limit > 0: gt_1[gt_1 > labels_limit] = labels_limit gt_2[gt_2 > labels_limit] = labels_limit thresholds = [t / 100 for t in range(0, 101, 5)] accuracies = [] dataset_kept = [] if labels_limit > 0 and probs[0].shape[1] > labels_limit + 1: prob_limit = probs[0][:, :labels_limit + 1] prob_limit[:, labels_limit] = probs[0][:, labels_limit:].sum(1) probs[0] = prob_limit if labels_limit > 0 and probs[1].shape[1] > labels_limit + 1: prob_limit = probs[1][:, :labels_limit + 1] prob_limit[:, labels_limit] = probs[1][:, labels_limit:].sum(1) probs[1] = prob_limit max_prob_1 = np.max(probs[0], axis=1) predictions_1 = np.argmax(probs[0], axis=1) max_prob_2 = np.max(probs[1], axis=1) predictions_2 = np.argmax(probs[1], axis=1) for threshold_1 in thresholds: threshold_1 = threshold_1 if threshold_1 < 1 else 0.999 curr_accuracies = [] curr_dataset_kept = [] for threshold_2 in thresholds: threshold_2 = threshold_2 if threshold_2 < 1 else 0.999 filtered_indices = np.logical_and( max_prob_1 >= threshold_1, max_prob_2 >= threshold_2 ) filtered_gt_1 = gt_1[filtered_indices] filtered_predictions_1 = predictions_1[filtered_indices] filtered_gt_2 = gt_2[filtered_indices] filtered_predictions_2 = predictions_2[filtered_indices] accuracy = ( np.logical_and( filtered_gt_1 == filtered_predictions_1, filtered_gt_2 == filtered_predictions_2 ) ).sum() / len(filtered_gt_1) curr_accuracies.append(accuracy) curr_dataset_kept.append(len(filtered_gt_1) / len(gt_1)) accuracies.append(curr_accuracies) dataset_kept.append(curr_dataset_kept) filename = None if output_directory: os.makedirs(output_directory, exist_ok=True) filename = os.path.join( output_directory, 'confidence_thresholding_2thresholds_3d.' + file_format ) visualization_utils.confidence_fitlering_3d_plot( np.array(thresholds), np.array(thresholds), np.array(accuracies), np.array(dataset_kept), threshold_output_feature_names, title='Confidence_Thresholding, two thresholds', filename=filename ) def binary_threshold_vs_metric( probabilities_per_model: List[np.array], ground_truth: Union[pd.Series, np.ndarray], metadata: dict, output_feature_name: str, metrics: List[str], positive_label: int = 1, model_names: List[str] = None, output_directory: str = None, file_format: str = 'pdf', **kwargs ) -> None: """Show confidence of the model against metric for the specified output_feature_name. For each metric specified in metrics (options are `f1`, `precision`, `recall`, `accuracy`), this visualization produces a line chart plotting a threshold on the confidence of the model against the metric for the specified output_feature_name. If output_feature_name is a category feature, positive_label, which is specified as the numeric encoded value, indicates the class to be considered positive class and all others will be considered negative. To figure out the association between classes and numeric encoded values check the ground_truth_metadata JSON file. # Inputs :param probabilities_per_model: (List[numpy.array]) list of model probabilities. :param ground_truth: (Union[pd.Series, np.ndarray]) ground truth values :param metadata: (dict) feature metadata dictionary :param output_feature_name: (str) output feature name :param metrics: (List[str]) metrics to display (`'f1'`, `'precision'`, `'recall'`, `'accuracy'`). :param positive_label: (int, default: `1`) numeric encoded value for the positive class. :param model_names: (List[str], default: `None`) list of the names of the models to use as labels. :param output_directory: (str, default: `None`) directory where to save plots. If not specified, plots will be displayed in a window :param file_format: (str, default: `'pdf'`) file format of output plots - `'pdf'` or `'png'`. # Return :return: (`None`) """ if not isinstance(ground_truth, np.ndarray): # not np array, assume we need to translate raw value to encoded value feature_metadata = metadata[output_feature_name] vfunc = np.vectorize(_encode_categorical_feature) ground_truth = vfunc(ground_truth, feature_metadata['str2idx']) probs = probabilities_per_model model_names_list = convert_to_list(model_names) metrics_list = convert_to_list(metrics) filename_template = 'binary_threshold_vs_metric_{}.' + file_format filename_template_path = generate_filename_template_path( output_directory, filename_template ) thresholds = [t / 100 for t in range(0, 101, 5)] supported_metrics = {'f1', 'precision', 'recall', 'accuracy'} for metric in metrics_list: if metric not in supported_metrics: logger.error("Metric {} not supported".format(metric)) continue scores = [] for i, prob in enumerate(probs): scores_alg = [] if len(prob.shape) == 2: if prob.shape[1] > positive_label: prob = prob[:, positive_label] else: raise Exception( 'the specified positive label {} is not ' 'present in the probabilities'.format( positive_label ) ) for threshold in thresholds: threshold = threshold if threshold < 1 else 0.99 t_gt = ground_truth[prob >= threshold] predictions = prob >= threshold t_predictions = predictions[prob >= threshold] if metric == 'f1': metric_score = sklearn.metrics.f1_score( t_gt, t_predictions ) elif metric == 'precision': metric_score = sklearn.metrics.precision_score( t_gt, t_predictions ) elif metric == 'recall': metric_score = sklearn.metrics.recall_score( t_gt, t_predictions ) elif metric == ACCURACY: metric_score = sklearn.metrics.accuracy_score( t_gt, t_predictions ) scores_alg.append(metric_score) scores.append(scores_alg) filename = None if output_directory: os.makedirs(output_directory, exist_ok=True) filename = filename_template_path.format(metric) visualization_utils.threshold_vs_metric_plot( thresholds, scores, model_names_list, title='Binary threshold vs {}'.format(metric), filename=filename ) def roc_curves( probabilities_per_model: List[np.array], ground_truth: Union[pd.Series, np.ndarray], metadata: dict, output_feature_name: str, positive_label: int = 1, model_names: Union[str, List[str]] = None, output_directory: str = None, file_format: str = 'pdf', **kwargs ) -> None: """Show the roc curves for output features in the specified models. This visualization produces a line chart plotting the roc curves for the specified output feature name. If output feature name is a category feature, `positive_label` indicates which is the class to be considered positive class and all the others will be considered negative. `positive_label` is the encoded numeric value for category classes. The numeric value can be determined by association between classes and integers captured in the training metadata JSON file. # Inputs :param probabilities_per_model: (List[numpy.array]) list of model probabilities. :param ground_truth: (Union[pd.Series, np.ndarray]) ground truth values :param metadata: (dict) feature metadata dictionary :param output_feature_name: (str) output feature name :param positive_label: (int, default: `1`) numeric encoded value for the positive class. :param model_names: (Union[str, List[str]], default: `None`) model name or list of the model names to use as labels. :param output_directory: (str, default: `None`) directory where to save plots. If not specified, plots will be displayed in a window :param file_format: (str, default: `'pdf'`) file format of output plots - `'pdf'` or `'png'`. # Return :return: (None) """ if not isinstance(ground_truth, np.ndarray): # not np array, assume we need to translate raw value to encoded value feature_metadata = metadata[output_feature_name] vfunc = np.vectorize(_encode_categorical_feature) ground_truth = vfunc(ground_truth, feature_metadata['str2idx']) probs = probabilities_per_model model_names_list = convert_to_list(model_names) fpr_tprs = [] for i, prob in enumerate(probs): if len(prob.shape) > 1: prob = prob[:, positive_label] fpr, tpr, _ = sklearn.metrics.roc_curve( ground_truth, prob, pos_label=positive_label ) fpr_tprs.append((fpr, tpr)) filename = None if output_directory: os.makedirs(output_directory, exist_ok=True) filename = os.path.join( output_directory, 'roc_curves.' + file_format ) visualization_utils.roc_curves( fpr_tprs, model_names_list, title='ROC curves', filename=filename ) def roc_curves_from_test_statistics( test_stats_per_model: List[dict], output_feature_name: str, model_names: Union[str, List[str]] = None, output_directory: str = None, file_format: str = 'pdf', **kwargs ) -> None: """Show the roc curves for the specified models output binary `output_feature_name`. This visualization uses `output_feature_name`, `test_stats_per_model` and `model_names` parameters. `output_feature_name` needs to be binary feature. This visualization produces a line chart plotting the roc curves for the specified `output_feature_name`. # Inputs :param test_stats_per_model: (List[dict]) dictionary containing evaluation performance statistics. :param output_feature_name: (str) name of the output feature to use for the visualization. :param model_names: (Union[str, List[str]], default: `None`) model name or list of the model names to use as labels. :param output_directory: (str, default: `None`) directory where to save plots. If not specified, plots will be displayed in a window :param file_format: (str, default: `'pdf'`) file format of output plots - `'pdf'` or `'png'`. # Return :return: (None) """ model_names_list = convert_to_list(model_names) filename_template = 'roc_curves_from_prediction_statistics.' + file_format filename_template_path = generate_filename_template_path( output_directory, filename_template ) fpr_tprs = [] for curr_test_statistics in test_stats_per_model: fpr = curr_test_statistics[output_feature_name]['roc_curve'][ 'false_positive_rate'] tpr = curr_test_statistics[output_feature_name]['roc_curve'][ 'true_positive_rate'] fpr_tprs.append((fpr, tpr)) visualization_utils.roc_curves( fpr_tprs, model_names_list, title='ROC curves', filename=filename_template_path ) def calibration_1_vs_all( probabilities_per_model: List[np.array], ground_truth: Union[pd.Series, np.ndarray], metadata: dict, output_feature_name: str, top_n_classes: List[int], labels_limit: int, model_names: List[str] = None, output_directory: str = None, file_format: str = 'pdf', **kwargs ) -> None: """Show models probability of predictions for the specified output_feature_name. For each class or each of the k most frequent classes if top_k is specified, it produces two plots computed on the fly from the probabilities of predictions for the specified output_feature_name. The first plot is a calibration curve that shows the calibration of the predictions considering the current class to be the true one and all others to be a false one, drawing one line for each model (in the aligned lists of probabilities and model_names). The second plot shows the distributions of the predictions considering the current class to be the true one and all others to be a false one, drawing the distribution for each model (in the aligned lists of probabilities and model_names). # Inputs :param probabilities_per_model: (List[numpy.array]) list of model probabilities. :param ground_truth: (Union[pd.Series, np.ndarray]) ground truth values :param metadata: (dict) feature metadata dictionary :param output_feature_name: (str) output feature name :param top_n_classes: (list) List containing the number of classes to plot. :param labels_limit: (int) upper limit on the numeric encoded label value. Encoded numeric label values in dataset that are higher than `label_limit` are considered to be "rare" labels. :param model_names: (List[str], default: `None`) list of the names of the models to use as labels. :param output_directory: (str, default: `None`) directory where to save plots. If not specified, plots will be displayed in a window :param file_format: (str, default: `'pdf'`) file format of output plots - `'pdf'` or `'png'`. # String :return: (None) """ if not isinstance(ground_truth, np.ndarray): # not np array, assume we need to translate raw value to encoded value feature_metadata = metadata[output_feature_name] vfunc = np.vectorize(_encode_categorical_feature) ground_truth = vfunc(ground_truth, feature_metadata['str2idx']) probs = probabilities_per_model model_names_list = convert_to_list(model_names) filename_template = 'calibration_1_vs_all_{}.' + file_format filename_template_path = generate_filename_template_path( output_directory, filename_template ) if labels_limit > 0: ground_truth[ground_truth > labels_limit] = labels_limit for i, prob in enumerate(probs): if labels_limit > 0 and prob.shape[1] > labels_limit + 1: prob_limit = prob[:, :labels_limit + 1] prob_limit[:, labels_limit] = prob[:, labels_limit:].sum(1) probs[i] = prob_limit num_classes = max(ground_truth) + 1 brier_scores = [] classes = (min(num_classes, top_n_classes[0]) if top_n_classes[0] > 0 else num_classes) for class_idx in range(classes): fraction_positives_class = [] mean_predicted_vals_class = [] probs_class = [] brier_scores_class = [] for prob in probs: # ground_truth is an vector of integers, each integer is a class # index to have a [0,1] vector we have to check if the value equals # the input class index and convert the resulting boolean vector # into an integer vector probabilities is a n x c matrix, n is the # number of datapoints and c number of classes; its values are the # probabilities of the ith datapoint to be classified as belonging # to the jth class according to the learned model. For this reason # we need to take only the column of predictions that is about the # class we are interested in, the input class index gt_class = (ground_truth == class_idx).astype(int) prob_class = prob[:, class_idx] ( curr_fraction_positives, curr_mean_predicted_vals ) = calibration_curve(gt_class, prob_class, n_bins=21) if len(curr_fraction_positives) < 2: curr_fraction_positives = np.concatenate( (np.array([0.]), curr_fraction_positives) ) if len(curr_mean_predicted_vals) < 2: curr_mean_predicted_vals = np.concatenate( (np.array([0.]), curr_mean_predicted_vals) ) fraction_positives_class.append(curr_fraction_positives) mean_predicted_vals_class.append(curr_mean_predicted_vals) probs_class.append(prob[:, class_idx]) brier_scores_class.append( brier_score_loss( gt_class, prob_class, pos_label=1 ) ) brier_scores.append(brier_scores_class) filename = None if output_directory: os.makedirs(output_directory, exist_ok=True) filename = filename_template_path.format(class_idx) visualization_utils.calibration_plot( fraction_positives_class, mean_predicted_vals_class, model_names_list, filename=filename ) filename = None if output_directory: os.makedirs(output_directory, exist_ok=True) filename = filename_template_path.format( 'prediction_distribution_' + str(class_idx) ) visualization_utils.predictions_distribution_plot( probs_class, model_names_list, filename=filename ) filename = None if output_directory: os.makedirs(output_directory, exist_ok=True) filename = filename_template_path.format('brier') visualization_utils.brier_plot( np.array(brier_scores), model_names_list, filename=filename ) def calibration_multiclass( probabilities_per_model: List[np.array], ground_truth: Union[pd.Series, np.ndarray], metadata: dict, output_feature_name: str, labels_limit: int, model_names: Union[str, List[str]] = None, output_directory: str = None, file_format: str = 'pdf', **kwargs ) -> None: """Show models probability of predictions for each class of the specified output_feature_name. # Inputs :param probabilities_per_model: (List[numpy.array]) list of model probabilities. :param ground_truth: (Union[pd.Series, np.ndarray]) ground truth values :param metadata: (dict) feature metadata dictionary :param output_feature_name: (str) output feature name :param labels_limit: (int) upper limit on the numeric encoded label value. Encoded numeric label values in dataset that are higher than `label_limit` are considered to be "rare" labels. :param model_names: (List[str], default: `None`) list of the names of the models to use as labels. :param output_directory: (str, default: `None`) directory where to save plots. If not specified, plots will be displayed in a window :param file_format: (str, default: `'pdf'`) file format of output plots - `'pdf'` or `'png'`. # Return :return: (None) """ if not isinstance(ground_truth, np.ndarray): # not np array, assume we need to translate raw value to encoded value feature_metadata = metadata[output_feature_name] vfunc = np.vectorize(_encode_categorical_feature) ground_truth = vfunc(ground_truth, feature_metadata['str2idx']) probs = probabilities_per_model model_names_list = convert_to_list(model_names) filename_template = 'calibration_multiclass{}.' + file_format filename_template_path = generate_filename_template_path( output_directory, filename_template ) if labels_limit > 0: ground_truth[ground_truth > labels_limit] = labels_limit prob_classes = 0 for i, prob in enumerate(probs): if labels_limit > 0 and prob.shape[1] > labels_limit + 1: prob_limit = prob[:, :labels_limit + 1] prob_limit[:, labels_limit] = prob[:, labels_limit:].sum(1) probs[i] = prob_limit if probs[i].shape[1] > prob_classes: prob_classes = probs[i].shape[1] gt_one_hot_dim_2 = max(prob_classes, max(ground_truth) + 1) gt_one_hot = np.zeros((len(ground_truth), gt_one_hot_dim_2)) gt_one_hot[np.arange(len(ground_truth)), ground_truth] = 1 gt_one_hot_flat = gt_one_hot.flatten() fraction_positives = [] mean_predicted_vals = [] brier_scores = [] for prob in probs: # flatten probabilities to be compared to flatten ground truth prob_flat = prob.flatten() curr_fraction_positives, curr_mean_predicted_vals = calibration_curve( gt_one_hot_flat, prob_flat, n_bins=21 ) fraction_positives.append(curr_fraction_positives) mean_predicted_vals.append(curr_mean_predicted_vals) brier_scores.append( brier_score_loss( gt_one_hot_flat, prob_flat, pos_label=1 ) ) filename = None if output_directory: os.makedirs(output_directory, exist_ok=True) filename = filename_template_path.format('') visualization_utils.calibration_plot( fraction_positives, mean_predicted_vals, model_names_list, filename=filename ) filename = None if output_directory: filename = filename_template_path.format('_brier') visualization_utils.compare_classifiers_plot( [brier_scores], ['brier'], model_names, adaptive=True, decimals=8, filename=filename ) for i, brier_score in enumerate(brier_scores): if i < len(model_names): tokenizer_name = '{}: '.format(model_names[i]) tokenizer_name += '{}' else: tokenizer_name = '{}' logger.info(tokenizer_name.format(brier_score)) def confusion_matrix( test_stats_per_model: List[dict], metadata: dict, output_feature_name: Union[str, None], top_n_classes: List[int], normalize: bool, model_names: Union[str, List[str]] = None, output_directory: str = None, file_format: str = 'pdf', **kwargs ) -> None: """Show confision matrix in the models predictions for each `output_feature_name`. For each model (in the aligned lists of test_statistics and model_names) it produces a heatmap of the confusion matrix in the predictions for each output_feature_name that has a confusion matrix in test_statistics. The value of `top_n_classes` limits the heatmap to the n most frequent classes. # Inputs :param test_stats_per_model: (List[dict]) dictionary containing evaluation performance statistics. :param metadata: (dict) intermediate preprocess structure created during training containing the mappings of the input dataset. :param output_feature_name: (Union[str, `None`]) name of the output feature to use for the visualization. If `None`, use all output features. :param top_n_classes: (List[int]) number of top classes or list containing the number of top classes to plot. :param normalize: (bool) flag to normalize rows in confusion matrix. :param model_names: (Union[str, List[str]], default: `None`) model name or list of the model names to use as labels. :param output_directory: (str, default: `None`) directory where to save plots. If not specified, plots will be displayed in a window :param file_format: (str, default: `'pdf'`) file format of output plots - `'pdf'` or `'png'`. # Return :return: (None) """ test_stats_per_model_list = test_stats_per_model model_names_list = convert_to_list(model_names) filename_template = 'confusion_matrix_{}_{}_{}.' + file_format filename_template_path = generate_filename_template_path( output_directory, filename_template ) output_feature_names = _validate_output_feature_name_from_test_stats( output_feature_name, test_stats_per_model_list ) for i, test_statistics in enumerate( test_stats_per_model_list): for output_feature_name in output_feature_names: if 'confusion_matrix' in test_statistics[output_feature_name]: _confusion_matrix = np.array( test_statistics[output_feature_name]['confusion_matrix'] ) model_name_name = model_names_list[i] if ( model_names_list is not None and i < len( model_names_list) ) else '' if metadata is not None and output_feature_name in metadata and 'idx2str' in \ metadata[output_feature_name]: labels = metadata[output_feature_name]['idx2str'] else: labels = list(range(len(_confusion_matrix))) for k in top_n_classes: k = (min(k, _confusion_matrix.shape[0]) if k > 0 else _confusion_matrix.shape[0]) cm = _confusion_matrix[:k, :k] if normalize: with np.errstate(divide='ignore', invalid='ignore'): cm_norm = np.true_divide(cm, cm.sum(1)[:, np.newaxis]) cm_norm[cm_norm == np.inf] = 0 cm_norm = np.nan_to_num(cm_norm) cm = cm_norm filename = None if output_directory: os.makedirs(output_directory, exist_ok=True) filename = filename_template_path.format( model_name_name, output_feature_name, 'top' + str(k) ) visualization_utils.confusion_matrix_plot( cm, labels[:k], output_feature_name=output_feature_name, filename=filename ) entropies = [] for row in cm: if np.count_nonzero(row) > 0: entropies.append(entropy(row)) else: entropies.append(0) class_entropy = np.array(entropies) class_desc_entropy = np.argsort(class_entropy)[::-1] desc_entropy = class_entropy[class_desc_entropy] filename = None if output_directory: filename = filename_template_path.format( 'entropy_' + model_name_name, output_feature_name, 'top' + str(k) ) visualization_utils.bar_plot( class_desc_entropy, desc_entropy, labels=[labels[i] for i in class_desc_entropy], title='Classes ranked by entropy of ' 'Confusion Matrix row', filename=filename ) def frequency_vs_f1( test_stats_per_model: List[dict], metadata: dict, output_feature_name: Union[str, None], top_n_classes: List[int], model_names: Union[str, List[str]] = None, output_directory: str = None, file_format: str = 'pdf', **kwargs ): """Show prediction statistics for the specified `output_feature_name` for each model. For each model (in the aligned lists of `test_stats_per_model` and `model_names`), produces two plots statistics of predictions for the specified `output_feature_name`. The first plot is a line plot with one x axis representing the different classes and two vertical axes colored in orange and blue respectively. The orange one is the frequency of the class and an orange line is plotted to show the trend. The blue one is the F1 score for that class and a blue line is plotted to show the trend. The classes on the x axis are sorted by f1 score. The second plot has the same structure of the first one, but the axes are flipped and the classes on the x axis are sorted by frequency. # Inputs :param test_stats_per_model: (List[dict]) dictionary containing evaluation performance statistics. :param metadata: (dict) intermediate preprocess structure created during training containing the mappings of the input dataset. :param output_feature_name: (Union[str, `None`]) name of the output feature to use for the visualization. If `None`, use all output features. :param top_n_classes: (List[int]) number of top classes or list containing the number of top classes to plot. :param model_names: (Union[str, List[str]], default: `None`) model name or list of the model names to use as labels. :param output_directory: (str, default: `None`) directory where to save plots. If not specified, plots will be displayed in a window :param file_format: (str, default: `'pdf'`) file format of output plots - `'pdf'` or `'png'`. # Return :return: (None) """ test_stats_per_model_list = test_stats_per_model model_names_list = convert_to_list(model_names) filename_template = 'frequency_vs_f1_{}_{}.' + file_format filename_template_path = generate_filename_template_path( output_directory, filename_template ) output_feature_names = _validate_output_feature_name_from_test_stats( output_feature_name, test_stats_per_model_list ) k = top_n_classes[0] for i, test_stats in enumerate(test_stats_per_model_list): for of_name in output_feature_names: # Figure out model name model_name = ( model_names_list[i] if model_names_list is not None and i < len(model_names_list) else '' ) # setup directory and filename filename = None if output_directory: os.makedirs(output_directory, exist_ok=True) filename = filename_template_path.format(model_name, of_name) # setup local variables per_class_stats = test_stats[of_name]['per_class_stats'] class_names = metadata[of_name]['idx2str'] if k > 0: class_names = class_names[:k] f1_scores = [] labels = [] for class_name in class_names: class_stats = per_class_stats[class_name] f1_scores.append(class_stats['f1_score']) labels.append(class_name) # get np arrays of frequencies, f1s and labels idx2freq = { metadata[of_name]['str2idx'][key]: val for key, val in metadata[of_name]['str2freq'].items() } freq_np = np.array( [idx2freq[class_id] for class_id in sorted(idx2freq)], dtype=np.int32 ) f1_np = np.nan_to_num(np.array(f1_scores, dtype=np.float32)) labels_np = np.array(labels) # sort by f1 f1_sort_idcs = f1_np.argsort()[::-1] len_f1_sort_idcs = len(f1_sort_idcs) freq_sorted_by_f1 = freq_np[f1_sort_idcs] freq_sorted_by_f1 = freq_sorted_by_f1[:len_f1_sort_idcs] f1_sorted_by_f1 = f1_np[f1_sort_idcs] f1_sorted_by_f1 = f1_sorted_by_f1[:len_f1_sort_idcs] labels_sorted_by_f1 = labels_np[f1_sort_idcs] labels_sorted_by_f1 = labels_sorted_by_f1[:len_f1_sort_idcs] # create viz sorted by f1 visualization_utils.double_axis_line_plot( f1_sorted_by_f1, freq_sorted_by_f1, 'F1 score', 'frequency', labels=labels_sorted_by_f1, title='{} F1 Score vs Frequency {}'.format( model_name, of_name ), filename=filename ) # sort by freq freq_sort_idcs = freq_np.argsort()[::-1] len_freq_sort_idcs = len(freq_sort_idcs) freq_sorted_by_freq = freq_np[freq_sort_idcs] freq_sorted_by_freq = freq_sorted_by_freq[:len_freq_sort_idcs] f1_sorted_by_freq = f1_np[freq_sort_idcs] f1_sorted_by_freq = f1_sorted_by_freq[:len_freq_sort_idcs] labels_sorted_by_freq = labels_np[freq_sort_idcs] labels_sorted_by_freq = labels_sorted_by_freq[:len_freq_sort_idcs] # create viz sorted by freq visualization_utils.double_axis_line_plot( freq_sorted_by_freq, f1_sorted_by_freq, 'frequency', 'F1 score', labels=labels_sorted_by_freq, title='{} F1 Score vs Frequency {}'.format( model_name, of_name ), filename=filename ) def hyperopt_report_cli( hyperopt_stats_path, output_directory=None, file_format='pdf', **kwargs ) -> None: """ Produces a report about hyperparameter optimization creating one graph per hyperparameter to show the distribution of results and one additional graph of pairwise hyperparameters interactions. :param hyperopt_stats_path: path to the hyperopt results JSON file :param output_directory: path where to save the output plots :param file_format: format of the output plot, pdf or png :return: """ hyperopt_report( hyperopt_stats_path, output_directory=output_directory, file_format=file_format ) def hyperopt_report( hyperopt_stats_path: str, output_directory: str = None, file_format: str = 'pdf', **kwargs ) -> None: """ Produces a report about hyperparameter optimization creating one graph per hyperparameter to show the distribution of results and one additional graph of pairwise hyperparameters interactions. # Inputs :param hyperopt_stats_path: (str) path to the hyperopt results JSON file. :param output_directory: (str, default: `None`) directory where to save plots. If not specified, plots will be displayed in a window. :param file_format: (str, default: `'pdf'`) file format of output plots - `'pdf'` or `'png'`. # Return :return: (None) """ filename_template = 'hyperopt_{}.' + file_format filename_template_path = generate_filename_template_path( output_directory, filename_template ) hyperopt_stats = load_json(hyperopt_stats_path) visualization_utils.hyperopt_report( hyperopt_stats['hyperopt_config']['parameters'], hyperopt_results_to_dataframe( hyperopt_stats['hyperopt_results'], hyperopt_stats['hyperopt_config']['parameters'], hyperopt_stats['hyperopt_config']['metric'] ), metric=hyperopt_stats['hyperopt_config']['metric'], filename_template=filename_template_path ) def hyperopt_hiplot_cli( hyperopt_stats_path, output_directory=None, **kwargs ): """ Produces a parallel coordinate plot about hyperparameter optimization creating one HTML file and optionally a CSV file to be read by hiplot :param hyperopt_stats_path: path to the hyperopt results JSON file :param output_directory: path where to save the output plots :return: """ hyperopt_hiplot( hyperopt_stats_path, output_directory=output_directory ) def hyperopt_hiplot( hyperopt_stats_path, output_directory=None, **kwargs ): """ Produces a parallel coordinate plot about hyperparameter optimization creating one HTML file and optionally a CSV file to be read by hiplot # Inputs :param hyperopt_stats_path: (str) path to the hyperopt results JSON file. :param output_directory: (str, default: `None`) directory where to save plots. If not specified, plots will be displayed in a window. # Return :return: (None) """ filename = 'hyperopt_hiplot.html' filename_path = generate_filename_template_path( output_directory, filename ) hyperopt_stats = load_json(hyperopt_stats_path) hyperopt_df = hyperopt_results_to_dataframe( hyperopt_stats['hyperopt_results'], hyperopt_stats['hyperopt_config']['parameters'], hyperopt_stats['hyperopt_config']['metric'] ) visualization_utils.hyperopt_hiplot( hyperopt_df, filename=filename_path, ) def hyperopt_results_to_dataframe( hyperopt_results, hyperopt_parameters, metric ): df = pd.DataFrame( [{metric: res['metric_score'], **res['parameters']} for res in hyperopt_results] ) df = df.astype( {hp_name: hp_params[TYPE] for hp_name, hp_params in hyperopt_parameters.items()} ) return df visualizations_registry = { 'compare_performance': compare_performance_cli, 'compare_classifiers_performance_from_prob': compare_classifiers_performance_from_prob_cli, 'compare_classifiers_performance_from_pred': compare_classifiers_performance_from_pred_cli, 'compare_classifiers_performance_subset': compare_classifiers_performance_subset_cli, 'compare_classifiers_performance_changing_k': compare_classifiers_performance_changing_k_cli, 'compare_classifiers_multiclass_multimetric': compare_classifiers_multiclass_multimetric_cli, 'compare_classifiers_predictions': compare_classifiers_predictions_cli, 'compare_classifiers_predictions_distribution': compare_classifiers_predictions_distribution_cli, 'confidence_thresholding': confidence_thresholding_cli, 'confidence_thresholding_data_vs_acc': confidence_thresholding_data_vs_acc_cli, 'confidence_thresholding_data_vs_acc_subset': confidence_thresholding_data_vs_acc_subset_cli, 'confidence_thresholding_data_vs_acc_subset_per_class': confidence_thresholding_data_vs_acc_subset_per_class_cli, 'confidence_thresholding_2thresholds_2d': confidence_thresholding_2thresholds_2d_cli, 'confidence_thresholding_2thresholds_3d': confidence_thresholding_2thresholds_3d_cli, 'binary_threshold_vs_metric': binary_threshold_vs_metric_cli, 'roc_curves': roc_curves_cli, 'roc_curves_from_test_statistics': roc_curves_from_test_statistics_cli, 'calibration_1_vs_all': calibration_1_vs_all_cli, 'calibration_multiclass': calibration_multiclass_cli, 'confusion_matrix': confusion_matrix_cli, 'frequency_vs_f1': frequency_vs_f1_cli, 'learning_curves': learning_curves_cli, 'hyperopt_report': hyperopt_report_cli, 'hyperopt_hiplot': hyperopt_hiplot_cli } def cli(sys_argv): parser = argparse.ArgumentParser( description='This script analyzes results and shows some nice plots.', prog='ludwig visualize', usage='%(prog)s [options]') parser.add_argument('-g', '--ground_truth', help='ground truth file') parser.add_argument( '-gm', '--ground_truth_metadata', help='input metadata JSON file' ) parser.add_argument( '-sf', '--split_file', default=None, help='file containing split values used in conjunction with ' 'ground truth file.' ) parser.add_argument( '-od', '--output_directory', help='directory where to save plots.' 'If not specified, plots will be displayed in a window' ) parser.add_argument( '-ff', '--file_format', help='file format of output plots', default='pdf', choices=['pdf', 'png'] ) parser.add_argument( '-v', '--visualization', choices=sorted(list(visualizations_registry.keys())), help='type of visualization', required=True ) parser.add_argument( '-ofn', '--output_feature_name', default=[], help='name of the output feature to visualize' ) parser.add_argument( '-gts', '--ground_truth_split', default=2, help='ground truth split - 0:train, 1:validation, 2:test split' ) parser.add_argument( '-tf', '--threshold_output_feature_names', default=[], nargs='+', help='names of output features for 2d threshold' ) parser.add_argument( '-pred', '--predictions', default=[], nargs='+', type=str, help='predictions files' ) parser.add_argument( '-prob', '--probabilities', default=[], nargs='+', type=str, help='probabilities files' ) parser.add_argument( '-trs', '--training_statistics', default=[], nargs='+', type=str, help='training stats files' ) parser.add_argument( '-tes', '--test_statistics', default=[], nargs='+', type=str, help='test stats files' ) parser.add_argument( '-hs', '--hyperopt_stats_path', default=None, type=str, help='hyperopt stats file' ) parser.add_argument( '-mn', '--model_names', default=[], nargs='+', type=str, help='names of the models to use as labels' ) parser.add_argument( '-tn', '--top_n_classes', default=[0], nargs='+', type=int, help='number of classes to plot' ) parser.add_argument( '-k', '--top_k', default=3, type=int, help='number of elements in the ranklist to consider' ) parser.add_argument( '-ll', '--labels_limit', default=0, type=int, help='maximum numbers of labels. ' 'If labels in dataset are higher than this number, "rare" label' ) parser.add_argument( '-ss', '--subset', default='ground_truth', choices=['ground_truth', PREDICTIONS], help='type of subset filtering' ) parser.add_argument( '-n', '--normalize', action='store_true', default=False, help='normalize rows in confusion matrix' ) parser.add_argument( '-m', '--metrics', default=['f1'], nargs='+', type=str, help='metrics to dispay in threshold_vs_metric' ) parser.add_argument( '-pl', '--positive_label', type=int, default=1, help='label of the positive class for the roc curve' ) parser.add_argument( '-l', '--logging_level', default='info', help='the level of logging to use', choices=['critical', 'error', 'warning', 'info', 'debug', 'notset'] ) args = parser.parse_args(sys_argv) args.logging_level = logging_level_registry[args.logging_level] logging.getLogger('ludwig').setLevel( args.logging_level ) global logger logger = logging.getLogger('ludwig.visualize') try: vis_func = visualizations_registry[args.visualization] except KeyError: logging.info('Visualization argument not recognized') raise vis_func(**vars(args)) if __name__ == '__main__': contrib_import() contrib_command("visualize", *sys.argv) cli(sys.argv[1:])
import React from 'react'; import PropTypes from 'prop-types'; import { Table, Input, Alert, Form } from 'antd'; import { Link } from 'react-router-dom'; import './ModelListView.css'; import { getModelPageRoute, getModelVersionPageRoute } from '../routes'; import Utils from '../../common/utils/Utils'; import { AntdTableSortOrder, Stages, StageTagComponents, EMPTY_CELL_PLACEHOLDER, registeredModel_PER_PAGE, registeredModel_SEARCH_NAME_FIELD, registeredModel_SEARCH_TIMESTAMP_FIELD, } from '../constants'; import { ModelRegistryDocUrl, ModelRegistryOnboardingString, onboarding, } from '../../common/constants'; import { SimplePagination } from '../../common/components/SimplePagination'; import { Spinner } from '../../common/components/Spinner'; import { CreateModelButton } from './CreateModelButton'; import LocalStorageUtils from '../../common/utils/LocalStorageUtils'; import { css } from 'emotion'; import { CollapsibleTagsCell } from '../../common/components/CollapsibleTagsCell'; import { RegisteredModelTag } from '../sdk/ModelRegistryMessages'; import filterIcon from '../../common/static/filter-icon.svg'; import { CSSTransition } from 'react-transition-group'; import { PageHeader } from '../../shared/building_blocks/PageHeader'; import { FlexBar } from '../../shared/building_blocks/FlexBar'; import { Button } from '../../shared/building_blocks/Button'; import { Spacer } from '../../shared/building_blocks/Spacer'; import { SearchBox } from '../../shared/building_blocks/SearchBox'; import { mlPagePadding } from '../../shared/styleConstants'; import { FormattedMessage, injectIntl } from 'react-intl'; const NAME_COLUMN_INDEX = 'name'; const LAST_MODIFIED_COLUMN_INDEX = 'lastUpdatedTimestamp'; const getOverallLatestVersionNumber = (latest_versions) => latest_versions && Math.max(...latest_versions.map((v) => v.version)); const getLatestVersionNumberByStage = (latest_versions, stage) => { const modelVersion = latest_versions && latest_versions.find((v) => v.currentStage === stage); return modelVersion && modelVersion.version; }; export class ModelListViewImpl extends React.Component { constructor(props) { super(props); this.state = { loading: false, lastNavigationActionWasClickPrev: false, maxResultsSelection: registeredModel_PER_PAGE, showOnboardingHelper: this.showOnboardingHelper(), showFilters: false, nameSearchInput: props.nameSearchInput, tagSearchInput: props.tagSearchInput, }; } static propTypes = { models: PropTypes.array.isRequired, nameSearchInput: PropTypes.string.isRequired, tagSearchInput: PropTypes.string.isRequired, orderByKey: PropTypes.string.isRequired, orderByAsc: PropTypes.bool.isRequired, currentPage: PropTypes.number.isRequired, // To know if there is a next page. If null, there is no next page. If undefined, we haven't // gotten an answer from the backend yet. nextPageToken: PropTypes.string, loading: PropTypes.bool, onSearch: PropTypes.func.isRequired, onClear: PropTypes.func.isRequired, onClickNext: PropTypes.func.isRequired, onClickPrev: PropTypes.func.isRequired, onClickSortableColumn: PropTypes.func.isRequired, onSetMaxResult: PropTypes.func.isRequired, getMaxResultValue: PropTypes.func.isRequired, intl: PropTypes.any, }; static defaultProps = { models: [], nameSearchInput: '', tagSearchInput: '', }; showOnboardingHelper() { const onboardingInformationStore = ModelListViewImpl.getLocalStore(onboarding); return onboardingInformationStore.getItem('showRegistryHelper') === null; } disableOnboardingHelper() { const onboardingInformationStore = ModelListViewImpl.getLocalStore(onboarding); onboardingInformationStore.setItem('showRegistryHelper', 'false'); } /** * Returns a LocalStorageStore instance that can be used to persist data associated with the * ModelRegistry component. */ static getLocalStore(key) { return LocalStorageUtils.getStoreForComponent('ModelListView', key); } componentDidMount() { const pageTitle = 'MLflow Models'; Utils.updatePageTitle(pageTitle); } renderModelVersionLink(name, versionNumber) { return ( <FormattedMessage defaultMessage='<link>Version {versionNumber}</link>' description='Row entry for version columns in the registered model page' values={{ versionNumber: versionNumber, link: (chunks) => ( <Link to={getModelVersionPageRoute(name, versionNumber)}>{chunks}</Link> ), }} /> ); } getSortOrder = (key) => { const { orderByKey, orderByAsc } = this.props; if (key !== orderByKey) { return null; } return { sortOrder: orderByAsc ? AntdTableSortOrder.ASC : AntdTableSortOrder.DESC }; }; handleCellToggle = () => { this.forceUpdate(); }; getColumns = () => { const columns = [ { title: this.props.intl.formatMessage({ defaultMessage: 'Name', description: 'Column title for model name in the registered model page', }), className: 'model-name', dataIndex: NAME_COLUMN_INDEX, render: (text, row) => { return <Link to={getModelPageRoute(row.name)}>{text}</Link>; }, sorter: true, ...this.getSortOrder(registeredModel_SEARCH_NAME_FIELD), }, { title: this.props.intl.formatMessage({ defaultMessage: 'Latest Version', description: 'Column title for latest model version in the registered model page', }), className: 'latest-version', render: ({ name, latest_versions }) => { const versionNumber = getOverallLatestVersionNumber(latest_versions); return versionNumber ? this.renderModelVersionLink(name, versionNumber) : EMPTY_CELL_PLACEHOLDER; }, }, { title: StageTagComponents[Stages.STAGING], className: 'latest-staging', render: ({ name, latest_versions }) => { const versionNumber = getLatestVersionNumberByStage(latest_versions, Stages.STAGING); return versionNumber ? this.renderModelVersionLink(name, versionNumber) : EMPTY_CELL_PLACEHOLDER; }, }, { title: StageTagComponents[Stages.PRODUCTION], className: 'latest-production', render: ({ name, latest_versions }) => { const versionNumber = getLatestVersionNumberByStage(latest_versions, Stages.PRODUCTION); return versionNumber ? this.renderModelVersionLink(name, versionNumber) : EMPTY_CELL_PLACEHOLDER; }, }, { title: this.props.intl.formatMessage({ defaultMessage: 'Last Modified', description: 'Column title for last modified timestamp for a model in the registered model page', }), className: 'last-modified', dataIndex: LAST_MODIFIED_COLUMN_INDEX, render: (text, row) => <span>{Utils.formatTimestamp(row.lastUpdatedTimestamp)}</span>, sorter: true, ...this.getSortOrder(registeredModel_SEARCH_TIMESTAMP_FIELD), }, { title: this.props.intl.formatMessage({ defaultMessage: 'Tags', description: 'Column title for model tags in the registered model page', }), className: 'table-tag-container', render: (row, index) => { return index.tags && index.tags.length > 0 ? ( <div style={{ wordWrap: 'break-word', wordBreak: 'break-word' }}> <CollapsibleTagsCell tags={{ ...index.tags.map((tag) => RegisteredModelTag.fromJs(tag)) }} onToggle={this.handleCellToggle} /> </div> ) : ( EMPTY_CELL_PLACEHOLDER ); }, }, ]; return columns; }; getRowKey = (record) => record.name; setLoadingFalse = () => { this.setState({ loading: false }); }; handleSearch = (event) => { event.preventDefault(); this.setState({ loading: true, lastNavigationActionWasClickPrev: false }); this.props.onSearch( this.state.nameSearchInput, this.state.tagSearchInput, this.setLoadingFalse, this.setLoadingFalse, ); }; static getSortFieldName = (column) => { switch (column) { case NAME_COLUMN_INDEX: return registeredModel_SEARCH_NAME_FIELD; case LAST_MODIFIED_COLUMN_INDEX: return registeredModel_SEARCH_TIMESTAMP_FIELD; default: return null; } }; handleTableChange = (pagination, filters, sorter) => { this.setState({ loading: true, lastNavigationActionWasClickPrev: false }); this.props.onClickSortableColumn( ModelListViewImpl.getSortFieldName(sorter.field), sorter.order, this.setLoadingFalse, this.setLoadingFalse, ); }; renderOnboardingContent() { const learnMoreLinkUrl = ModelListViewImpl.getLearnMoreLinkUrl(); const learnMoreDisplayString = ModelListViewImpl.getLearnMoreDisplayString(); const content = ( <div> {learnMoreDisplayString}{' '} <FormattedMessage defaultMessage='<link>Learn more</link>' description='Learn more link on the model list page with cloud-specific link' values={{ link: (chunks) => ( <a href={learnMoreLinkUrl} target='_blank' rel='noopener noreferrer' className='LinkColor' > {chunks} </a> ), }} /> </div> ); return this.state.showOnboardingHelper ? ( <Alert className='onboarding-information' description={content} type='info' showIcon closable onClose={() => this.disableOnboardingHelper()} /> ) : null; } getEmptyTextComponent() { const { nameSearchInput, tagSearchInput } = this.props; const { lastNavigationActionWasClickPrev } = this.state; // Handle the case when emptiness is caused by search filter if (nameSearchInput || tagSearchInput) { if (lastNavigationActionWasClickPrev) { return ( 'No models found for the page. Please refresh the page as the underlying data may ' + 'have changed significantly.' ); } else { return 'No models found.'; } } return ( <div> <span> <FormattedMessage defaultMessage='No models yet. <link>Create a model</link> to get started.' description='Placeholder text for empty models table in the registered model list page' values={{ link: (chunks) => <CreateModelButton buttonType='link' buttonText={chunks} />, }} /> </span> </div> ); } static getLearnMoreLinkUrl = () => ModelRegistryDocUrl; static getLearnMoreDisplayString = () => ModelRegistryOnboardingString; handleClickNext = () => { this.setState({ loading: true, lastNavigationActionWasClickPrev: false }); this.props.onClickNext(this.setLoadingFalse, this.setLoadingFalse); }; handleClickPrev = () => { this.setState({ loading: true, lastNavigationActionWasClickPrev: true }); this.props.onClickPrev(this.setLoadingFalse, this.setLoadingFalse); }; handleSetMaxResult = ({ item, key, keyPath, domEvent }) => { this.setState({ loading: true }); this.props.onSetMaxResult(key, this.setLoadingFalse, this.setLoadingFalse); }; handleFilterToggle = () => { this.setState((previousState) => ({ showFilters: !previousState.showFilters })); }; handleNameSearchInput = (event) => { this.setState({ nameSearchInput: event.target.value }); }; handleTagSearchInput = (event) => { this.setState({ tagSearchInput: event.target.value }); }; handleClear = () => { this.setState({ nameSearchInput: '', tagSearchInput: '' }); this.props.onClear(this.setLoadingFalse, this.setLoadingFalse); }; render() { const { models, currentPage, nextPageToken } = this.props; const { loading } = this.state; const title = ( <Spacer size='small' direction='horizontal'> <span> <FormattedMessage defaultMessage='Registered Models' description='Header for displaying models in the model registry' /> </span> </Spacer> ); return ( <div data-test-id='ModelListView-container' className={styles.rootContainer}> <PageHeader title={title}> <></> </PageHeader> {this.renderOnboardingContent()} <FlexBar left={ <Spacer size='small' direction='horizontal'> <span className={`${styles.createModelButtonWrapper}`}> <CreateModelButton /> </span> </Spacer> } right={ <Spacer direction='horizontal' size='small'> <div className={styles.nameSearchBox}> <SearchBox onChange={this.handleNameSearchInput} value={this.state.nameSearchInput} onSearch={this.handleSearch} onPressEnter={this.handleSearch} placeholder={this.props.intl.formatMessage({ defaultMessage: 'Search by model name', description: 'Placeholder text inside model search bar', })} /> </div> <Button dataTestId='filter-button' onClick={this.handleFilterToggle}> <img className='filterIcon' src={filterIcon} alt='Filter' /> <FormattedMessage defaultMessage='Filter' description='String for the filter button to filter model registry table for models' /> </Button> <Button dataTestId='clear-button' onClick={this.handleClear}> <FormattedMessage defaultMessage='Clear' description='String for the clear button to clear the text for searching models' /> </Button> </Spacer> } /> <div className='ModelListView-filter-dropdown'> <CSSTransition in={this.state.showFilters} timeout={300} classNames='lifecycleButtons' unmountOnExit > <FlexBar left={<div />} right={ <Form.Item className={styles.tagLabelWrapper} label='Tags' labelCol={{ span: 24 }}> <Input name='tags-search' data-testid='ModelListView-tagSearchBox' aria-label='Search Tags' type='text' placeholder={`Search tags: tags.key='value'`} value={this.state.tagSearchInput} onChange={this.handleTagSearchInput} onPressEnter={this.handleSearch} /> </Form.Item> } /> </CSSTransition> </div> <Table size='middle' rowKey={this.getRowKey} className='model-version-table' dataSource={models} columns={this.getColumns()} locale={{ emptyText: this.getEmptyTextComponent() }} pagination={{ hideOnSinglePage: true, pageSize: this.props.getMaxResultValue(), }} loading={loading && { indicator: <Spinner /> }} onChange={this.handleTableChange} /> <div> <SimplePagination currentPage={currentPage} loading={this.props.loading} isLastPage={nextPageToken === null} onClickNext={this.handleClickNext} onClickPrev={this.handleClickPrev} handleSetMaxResult={this.handleSetMaxResult} maxResultOptions={[registeredModel_PER_PAGE, 25, 50, 100]} getSelectedPerPageSelection={this.props.getMaxResultValue} /> </div> </div> ); } } export const ModelListView = injectIntl(ModelListViewImpl); const styles = { tagLabelWrapper: css({ paddingBottom: '0', paddingTop: '16px', width: '614px', }), createModelButtonWrapper: css({ marginLeft: 'auto', order: 2, height: '40px', width: '120px', }), nameSearchBox: css({ width: '446px', }), rootContainer: css({ margin: mlPagePadding, }), };
# flake8: noqa: F811, F401 import asyncio import logging import multiprocessing import time from dataclasses import replace from secrets import token_bytes import pytest from blspy import AugSchemeMPL, G2Element from alvm.casts import int_to_bytes from achi.consensus.block_rewards import calculate_base_farmer_reward from achi.consensus.blockchain import ReceiveBlockResult from achi.consensus.coinbase import create_farmer_coin from achi.consensus.pot_iterations import is_overflow_block from achi.full_node.bundle_tools import detect_potential_template_generator from achi.types.blockchain_format.classgroup import ClassgroupElement from achi.types.blockchain_format.coin import Coin from achi.types.blockchain_format.foliage import TransactionsInfo from achi.types.blockchain_format.program import SerializedProgram from achi.types.blockchain_format.sized_bytes import bytes32 from achi.types.blockchain_format.slots import InfusedChallengeChainSubSlot from achi.types.blockchain_format.vdf import VDFInfo, VDFProof from achi.types.condition_opcodes import ConditionOpcode from achi.types.condition_with_args import ConditionWithArgs from achi.types.end_of_slot_bundle import EndOfSubSlotBundle from achi.types.full_block import FullBlock from achi.types.spend_bundle import SpendBundle from achi.types.unfinished_block import UnfinishedBlock from achi.util.block_tools import BlockTools, get_vdf_info_and_proof from achi.util.errors import Err from achi.util.hash import std_hash from achi.util.ints import uint8, uint64, uint32 from achi.util.merkle_set import MerkleSet from achi.util.recursive_replace import recursive_replace from achi.util.wallet_tools import WalletTool from tests.core.fixtures import default_400_blocks # noqa: F401; noqa: F401 from tests.core.fixtures import default_1000_blocks # noqa: F401 from tests.core.fixtures import default_10000_blocks # noqa: F401 from tests.core.fixtures import default_10000_blocks_compact # noqa: F401 from tests.core.fixtures import empty_blockchain # noqa: F401 from tests.core.fixtures import create_blockchain from tests.setup_nodes import bt, test_constants log = logging.getLogger(__name__) bad_element = ClassgroupElement.from_bytes(b"\x00") @pytest.fixture(scope="session") def event_loop(): loop = asyncio.get_event_loop() yield loop class TestGenesisBlock: @pytest.mark.asyncio async def test_block_tools_proofs_400(self, default_400_blocks): vdf, proof = get_vdf_info_and_proof( test_constants, ClassgroupElement.get_default_element(), test_constants.GENESIS_CHALLENGE, uint64(231) ) if proof.is_valid(test_constants, ClassgroupElement.get_default_element(), vdf) is False: raise Exception("invalid proof") @pytest.mark.asyncio async def test_block_tools_proofs_1000(self, default_1000_blocks): vdf, proof = get_vdf_info_and_proof( test_constants, ClassgroupElement.get_default_element(), test_constants.GENESIS_CHALLENGE, uint64(231) ) if proof.is_valid(test_constants, ClassgroupElement.get_default_element(), vdf) is False: raise Exception("invalid proof") @pytest.mark.asyncio async def test_block_tools_proofs(self): vdf, proof = get_vdf_info_and_proof( test_constants, ClassgroupElement.get_default_element(), test_constants.GENESIS_CHALLENGE, uint64(231) ) if proof.is_valid(test_constants, ClassgroupElement.get_default_element(), vdf) is False: raise Exception("invalid proof") @pytest.mark.asyncio async def test_non_overflow_genesis(self, empty_blockchain): assert empty_blockchain.get_peak() is None genesis = bt.get_consecutive_blocks(1, force_overflow=False)[0] result, err, _ = await empty_blockchain.receive_block(genesis) assert err is None assert result == ReceiveBlockResult.NEW_PEAK assert empty_blockchain.get_peak().height == 0 @pytest.mark.asyncio async def test_overflow_genesis(self, empty_blockchain): genesis = bt.get_consecutive_blocks(1, force_overflow=True)[0] result, err, _ = await empty_blockchain.receive_block(genesis) assert err is None assert result == ReceiveBlockResult.NEW_PEAK @pytest.mark.asyncio async def test_genesis_empty_slots(self, empty_blockchain): genesis = bt.get_consecutive_blocks(1, force_overflow=False, skip_slots=30)[0] result, err, _ = await empty_blockchain.receive_block(genesis) assert err is None assert result == ReceiveBlockResult.NEW_PEAK @pytest.mark.asyncio async def test_overflow_genesis_empty_slots(self, empty_blockchain): genesis = bt.get_consecutive_blocks(1, force_overflow=True, skip_slots=3)[0] result, err, _ = await empty_blockchain.receive_block(genesis) assert err is None assert result == ReceiveBlockResult.NEW_PEAK @pytest.mark.asyncio async def test_genesis_validate_1(self, empty_blockchain): genesis = bt.get_consecutive_blocks(1, force_overflow=False)[0] bad_prev = bytes([1] * 32) genesis = recursive_replace(genesis, "foliage.prev_block_hash", bad_prev) result, err, _ = await empty_blockchain.receive_block(genesis) assert err == Err.INVALID_PREV_BLOCK_HASH class TestBlockHeaderValidation: @pytest.mark.asyncio async def test_long_chain(self, empty_blockchain, default_1000_blocks): blocks = default_1000_blocks for block in blocks: if ( len(block.finished_sub_slots) > 0 and block.finished_sub_slots[0].challenge_chain.subepoch_summary_hash is not None ): # Sub/Epoch. Try using a bad ssi and difficulty to test 2m and 2n new_finished_ss = recursive_replace( block.finished_sub_slots[0], "challenge_chain.new_sub_slot_iters", uint64(10000000), ) block_bad = recursive_replace( block, "finished_sub_slots", [new_finished_ss] + block.finished_sub_slots[1:] ) result, err, _ = await empty_blockchain.receive_block(block_bad) assert err == Err.INVALID_NEW_SUB_SLOT_ITERS new_finished_ss_2 = recursive_replace( block.finished_sub_slots[0], "challenge_chain.new_difficulty", uint64(10000000), ) block_bad_2 = recursive_replace( block, "finished_sub_slots", [new_finished_ss_2] + block.finished_sub_slots[1:] ) result, err, _ = await empty_blockchain.receive_block(block_bad_2) assert err == Err.INVALID_NEW_DIFFICULTY # 3c new_finished_ss_3: EndOfSubSlotBundle = recursive_replace( block.finished_sub_slots[0], "challenge_chain.subepoch_summary_hash", bytes([0] * 32), ) new_finished_ss_3 = recursive_replace( new_finished_ss_3, "reward_chain.challenge_chain_sub_slot_hash", new_finished_ss_3.challenge_chain.get_hash(), ) block_bad_3 = recursive_replace( block, "finished_sub_slots", [new_finished_ss_3] + block.finished_sub_slots[1:] ) result, err, _ = await empty_blockchain.receive_block(block_bad_3) assert err == Err.INVALID_SUB_EPOCH_SUMMARY # 3d new_finished_ss_4 = recursive_replace( block.finished_sub_slots[0], "challenge_chain.subepoch_summary_hash", None, ) new_finished_ss_4 = recursive_replace( new_finished_ss_4, "reward_chain.challenge_chain_sub_slot_hash", new_finished_ss_4.challenge_chain.get_hash(), ) block_bad_4 = recursive_replace( block, "finished_sub_slots", [new_finished_ss_4] + block.finished_sub_slots[1:] ) result, err, _ = await empty_blockchain.receive_block(block_bad_4) assert err == Err.INVALID_SUB_EPOCH_SUMMARY or err == Err.INVALID_NEW_SUB_SLOT_ITERS result, err, _ = await empty_blockchain.receive_block(block) assert err is None assert result == ReceiveBlockResult.NEW_PEAK log.info( f"Added block {block.height} total iters {block.total_iters} " f"new slot? {len(block.finished_sub_slots)}" ) assert empty_blockchain.get_peak().height == len(blocks) - 1 @pytest.mark.asyncio async def test_unfinished_blocks(self, empty_blockchain): blockchain = empty_blockchain blocks = bt.get_consecutive_blocks(3) for block in blocks[:-1]: result, err, _ = await blockchain.receive_block(block) assert result == ReceiveBlockResult.NEW_PEAK block = blocks[-1] unf = UnfinishedBlock( block.finished_sub_slots, block.reward_chain_block.get_unfinished(), block.challenge_chain_sp_proof, block.reward_chain_sp_proof, block.foliage, block.foliage_transaction_block, block.transactions_info, block.transactions_generator, [], ) validate_res = await blockchain.validate_unfinished_block(unf, False) err = validate_res.error assert err is None result, err, _ = await blockchain.receive_block(block) blocks = bt.get_consecutive_blocks(1, block_list_input=blocks, force_overflow=True) block = blocks[-1] unf = UnfinishedBlock( block.finished_sub_slots, block.reward_chain_block.get_unfinished(), block.challenge_chain_sp_proof, block.reward_chain_sp_proof, block.foliage, block.foliage_transaction_block, block.transactions_info, block.transactions_generator, [], ) validate_res = await blockchain.validate_unfinished_block(unf, False) assert validate_res.error is None @pytest.mark.asyncio async def test_empty_genesis(self, empty_blockchain): blockchain = empty_blockchain for block in bt.get_consecutive_blocks(2, skip_slots=3): result, err, _ = await blockchain.receive_block(block) assert err is None assert result == ReceiveBlockResult.NEW_PEAK @pytest.mark.asyncio async def test_empty_slots_non_genesis(self, empty_blockchain): blockchain = empty_blockchain blocks = bt.get_consecutive_blocks(10) for block in blocks: result, err, _ = await blockchain.receive_block(block) assert err is None assert result == ReceiveBlockResult.NEW_PEAK blocks = bt.get_consecutive_blocks(10, skip_slots=2, block_list_input=blocks) for block in blocks[10:]: result, err, _ = await blockchain.receive_block(block) assert err is None assert blockchain.get_peak().height == 19 @pytest.mark.asyncio async def test_one_sb_per_slot(self, empty_blockchain): blockchain = empty_blockchain num_blocks = 20 blocks = [] for i in range(num_blocks): blocks = bt.get_consecutive_blocks(1, block_list_input=blocks, skip_slots=1) result, err, _ = await blockchain.receive_block(blocks[-1]) assert result == ReceiveBlockResult.NEW_PEAK assert blockchain.get_peak().height == num_blocks - 1 @pytest.mark.asyncio async def test_all_overflow(self, empty_blockchain): blockchain = empty_blockchain num_rounds = 5 blocks = [] num_blocks = 0 for i in range(1, num_rounds): num_blocks += i blocks = bt.get_consecutive_blocks(i, block_list_input=blocks, skip_slots=1, force_overflow=True) for block in blocks[-i:]: result, err, _ = await blockchain.receive_block(block) assert result == ReceiveBlockResult.NEW_PEAK assert err is None assert blockchain.get_peak().height == num_blocks - 1 @pytest.mark.asyncio async def test_unf_block_overflow(self, empty_blockchain): blockchain = empty_blockchain blocks = [] while True: # This creates an overflow block, then a normal block, and then an overflow in the next sub-slot # blocks = bt.get_consecutive_blocks(1, block_list_input=blocks, force_overflow=True) blocks = bt.get_consecutive_blocks(1, block_list_input=blocks) blocks = bt.get_consecutive_blocks(1, block_list_input=blocks, force_overflow=True) await blockchain.receive_block(blocks[-2]) sb_1 = blockchain.block_record(blocks[-2].header_hash) sb_2_next_ss = blocks[-1].total_iters - blocks[-2].total_iters < sb_1.sub_slot_iters # We might not get a normal block for sb_2, and we might not get them in the right slots # So this while loop keeps trying if sb_1.overflow and sb_2_next_ss: block = blocks[-1] unf = UnfinishedBlock( [], block.reward_chain_block.get_unfinished(), block.challenge_chain_sp_proof, block.reward_chain_sp_proof, block.foliage, block.foliage_transaction_block, block.transactions_info, block.transactions_generator, [], ) validate_res = await blockchain.validate_unfinished_block(unf, skip_overflow_ss_validation=True) assert validate_res.error is None return None await blockchain.receive_block(blocks[-1]) @pytest.mark.asyncio async def test_one_sb_per_two_slots(self, empty_blockchain): blockchain = empty_blockchain num_blocks = 20 blocks = [] for i in range(num_blocks): # Same thing, but 2 sub-slots per block blocks = bt.get_consecutive_blocks(1, block_list_input=blocks, skip_slots=2) result, err, _ = await blockchain.receive_block(blocks[-1]) assert result == ReceiveBlockResult.NEW_PEAK assert blockchain.get_peak().height == num_blocks - 1 @pytest.mark.asyncio async def test_one_sb_per_five_slots(self, empty_blockchain): blockchain = empty_blockchain num_blocks = 10 blocks = [] for i in range(num_blocks): # Same thing, but 5 sub-slots per block blocks = bt.get_consecutive_blocks(1, block_list_input=blocks, skip_slots=5) result, err, _ = await blockchain.receive_block(blocks[-1]) assert result == ReceiveBlockResult.NEW_PEAK assert blockchain.get_peak().height == num_blocks - 1 @pytest.mark.asyncio async def test_basic_chain_overflow(self, empty_blockchain): blocks = bt.get_consecutive_blocks(5, force_overflow=True) for block in blocks: result, err, _ = await empty_blockchain.receive_block(block) assert err is None assert result == ReceiveBlockResult.NEW_PEAK assert empty_blockchain.get_peak().height == len(blocks) - 1 @pytest.mark.asyncio async def test_one_sb_per_two_slots_force_overflow(self, empty_blockchain): blockchain = empty_blockchain num_blocks = 10 blocks = [] for i in range(num_blocks): blocks = bt.get_consecutive_blocks(1, block_list_input=blocks, skip_slots=2, force_overflow=True) result, err, _ = await blockchain.receive_block(blocks[-1]) assert err is None assert result == ReceiveBlockResult.NEW_PEAK assert blockchain.get_peak().height == num_blocks - 1 @pytest.mark.asyncio async def test_invalid_prev(self, empty_blockchain): # 1 blocks = bt.get_consecutive_blocks(2, force_overflow=False) assert (await empty_blockchain.receive_block(blocks[0]))[0] == ReceiveBlockResult.NEW_PEAK block_1_bad = recursive_replace(blocks[-1], "foliage.prev_block_hash", bytes([0] * 32)) result, err, _ = await empty_blockchain.receive_block(block_1_bad) assert result == ReceiveBlockResult.DISCONNECTED_BLOCK @pytest.mark.asyncio async def test_invalid_pospace(self, empty_blockchain): # 2 blocks = bt.get_consecutive_blocks(2, force_overflow=False) assert (await empty_blockchain.receive_block(blocks[0]))[0] == ReceiveBlockResult.NEW_PEAK block_1_bad = recursive_replace(blocks[-1], "reward_chain_block.proof_of_space.proof", bytes([0] * 32)) result, err, _ = await empty_blockchain.receive_block(block_1_bad) assert result == ReceiveBlockResult.INVALID_BLOCK assert err == Err.INVALID_POSPACE @pytest.mark.asyncio async def test_invalid_sub_slot_challenge_hash_genesis(self, empty_blockchain): # 2a blocks = bt.get_consecutive_blocks(1, force_overflow=False, skip_slots=1) new_finished_ss = recursive_replace( blocks[0].finished_sub_slots[0], "challenge_chain.challenge_chain_end_of_slot_vdf.challenge", bytes([2] * 32), ) block_0_bad = recursive_replace( blocks[0], "finished_sub_slots", [new_finished_ss] + blocks[0].finished_sub_slots[1:] ) result, err, _ = await empty_blockchain.receive_block(block_0_bad) assert result == ReceiveBlockResult.INVALID_BLOCK assert err == Err.INVALID_PREV_CHALLENGE_SLOT_HASH @pytest.mark.asyncio async def test_invalid_sub_slot_challenge_hash_non_genesis(self, empty_blockchain): # 2b blocks = bt.get_consecutive_blocks(1, force_overflow=False, skip_slots=0) blocks = bt.get_consecutive_blocks(1, force_overflow=False, skip_slots=1, block_list_input=blocks) new_finished_ss = recursive_replace( blocks[1].finished_sub_slots[0], "challenge_chain.challenge_chain_end_of_slot_vdf.challenge", bytes([2] * 32), ) block_1_bad = recursive_replace( blocks[1], "finished_sub_slots", [new_finished_ss] + blocks[1].finished_sub_slots[1:] ) _, _, _ = await empty_blockchain.receive_block(blocks[0]) result, err, _ = await empty_blockchain.receive_block(block_1_bad) assert result == ReceiveBlockResult.INVALID_BLOCK assert err == Err.INVALID_PREV_CHALLENGE_SLOT_HASH @pytest.mark.asyncio async def test_invalid_sub_slot_challenge_hash_empty_ss(self, empty_blockchain): # 2c blocks = bt.get_consecutive_blocks(1, force_overflow=False, skip_slots=0) blocks = bt.get_consecutive_blocks(1, force_overflow=False, skip_slots=2, block_list_input=blocks) new_finished_ss = recursive_replace( blocks[1].finished_sub_slots[-1], "challenge_chain.challenge_chain_end_of_slot_vdf.challenge", bytes([2] * 32), ) block_1_bad = recursive_replace( blocks[1], "finished_sub_slots", blocks[1].finished_sub_slots[:-1] + [new_finished_ss] ) _, _, _ = await empty_blockchain.receive_block(blocks[0]) result, err, _ = await empty_blockchain.receive_block(block_1_bad) assert result == ReceiveBlockResult.INVALID_BLOCK assert err == Err.INVALID_PREV_CHALLENGE_SLOT_HASH @pytest.mark.asyncio async def test_genesis_no_icc(self, empty_blockchain): # 2d blocks = bt.get_consecutive_blocks(1, force_overflow=False, skip_slots=1) new_finished_ss = recursive_replace( blocks[0].finished_sub_slots[0], "infused_challenge_chain", InfusedChallengeChainSubSlot( VDFInfo( bytes([0] * 32), uint64(1200), ClassgroupElement.get_default_element(), ) ), ) block_0_bad = recursive_replace( blocks[0], "finished_sub_slots", [new_finished_ss] + blocks[0].finished_sub_slots[1:] ) result, err, _ = await empty_blockchain.receive_block(block_0_bad) assert result == ReceiveBlockResult.INVALID_BLOCK assert err == Err.SHOULD_NOT_HAVE_ICC @pytest.mark.asyncio async def test_invalid_icc_sub_slot_vdf(self): bt_high_iters = BlockTools( constants=test_constants.replace(SUB_SLOT_ITERS_STARTING=(2 ** 12), DIFFICULTY_STARTING=(2 ** 14)) ) bc1, connection, db_path = await create_blockchain(bt_high_iters.constants) blocks = bt_high_iters.get_consecutive_blocks(10) for block in blocks: if len(block.finished_sub_slots) > 0 and block.finished_sub_slots[-1].infused_challenge_chain is not None: # Bad iters new_finished_ss = recursive_replace( block.finished_sub_slots[-1], "infused_challenge_chain", InfusedChallengeChainSubSlot( replace( block.finished_sub_slots[ -1 ].infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf, number_of_iterations=10000000, ) ), ) block_bad = recursive_replace( block, "finished_sub_slots", block.finished_sub_slots[:-1] + [new_finished_ss] ) result, err, _ = await bc1.receive_block(block_bad) assert err == Err.INVALID_ICC_EOS_VDF # Bad output new_finished_ss_2 = recursive_replace( block.finished_sub_slots[-1], "infused_challenge_chain", InfusedChallengeChainSubSlot( replace( block.finished_sub_slots[ -1 ].infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf, output=ClassgroupElement.get_default_element(), ) ), ) log.warning(f"Proof: {block.finished_sub_slots[-1].proofs}") block_bad_2 = recursive_replace( block, "finished_sub_slots", block.finished_sub_slots[:-1] + [new_finished_ss_2] ) result, err, _ = await bc1.receive_block(block_bad_2) assert err == Err.INVALID_ICC_EOS_VDF # Bad challenge hash new_finished_ss_3 = recursive_replace( block.finished_sub_slots[-1], "infused_challenge_chain", InfusedChallengeChainSubSlot( replace( block.finished_sub_slots[ -1 ].infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf, challenge=bytes([0] * 32), ) ), ) block_bad_3 = recursive_replace( block, "finished_sub_slots", block.finished_sub_slots[:-1] + [new_finished_ss_3] ) result, err, _ = await bc1.receive_block(block_bad_3) assert err == Err.INVALID_ICC_EOS_VDF # Bad proof new_finished_ss_5 = recursive_replace( block.finished_sub_slots[-1], "proofs.infused_challenge_chain_slot_proof", VDFProof(uint8(0), b"1239819023890", False), ) block_bad_5 = recursive_replace( block, "finished_sub_slots", block.finished_sub_slots[:-1] + [new_finished_ss_5] ) result, err, _ = await bc1.receive_block(block_bad_5) assert err == Err.INVALID_ICC_EOS_VDF result, err, _ = await bc1.receive_block(block) assert err is None assert result == ReceiveBlockResult.NEW_PEAK await connection.close() bc1.shut_down() db_path.unlink() @pytest.mark.asyncio async def test_invalid_icc_into_cc(self, empty_blockchain): blockchain = empty_blockchain blocks = bt.get_consecutive_blocks(1) assert (await blockchain.receive_block(blocks[0]))[0] == ReceiveBlockResult.NEW_PEAK case_1, case_2 = False, False while not case_1 or not case_2: blocks = bt.get_consecutive_blocks(1, block_list_input=blocks, skip_slots=1) block = blocks[-1] if len(block.finished_sub_slots) > 0 and block.finished_sub_slots[-1].infused_challenge_chain is not None: if block.finished_sub_slots[-1].reward_chain.deficit == test_constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK: # 2g case_1 = True new_finished_ss = recursive_replace( block.finished_sub_slots[-1], "challenge_chain", replace( block.finished_sub_slots[-1].challenge_chain, infused_challenge_chain_sub_slot_hash=bytes([1] * 32), ), ) else: # 2h case_2 = True new_finished_ss = recursive_replace( block.finished_sub_slots[-1], "challenge_chain", replace( block.finished_sub_slots[-1].challenge_chain, infused_challenge_chain_sub_slot_hash=block.finished_sub_slots[ -1 ].infused_challenge_chain.get_hash(), ), ) block_bad = recursive_replace( block, "finished_sub_slots", block.finished_sub_slots[:-1] + [new_finished_ss] ) result, err, _ = await blockchain.receive_block(block_bad) assert err == Err.INVALID_ICC_HASH_CC # 2i new_finished_ss_bad_rc = recursive_replace( block.finished_sub_slots[-1], "reward_chain", replace(block.finished_sub_slots[-1].reward_chain, infused_challenge_chain_sub_slot_hash=None), ) block_bad = recursive_replace( block, "finished_sub_slots", block.finished_sub_slots[:-1] + [new_finished_ss_bad_rc] ) result, err, _ = await blockchain.receive_block(block_bad) assert err == Err.INVALID_ICC_HASH_RC elif len(block.finished_sub_slots) > 0 and block.finished_sub_slots[-1].infused_challenge_chain is None: # 2j new_finished_ss_bad_cc = recursive_replace( block.finished_sub_slots[-1], "challenge_chain", replace( block.finished_sub_slots[-1].challenge_chain, infused_challenge_chain_sub_slot_hash=bytes([1] * 32), ), ) block_bad = recursive_replace( block, "finished_sub_slots", block.finished_sub_slots[:-1] + [new_finished_ss_bad_cc] ) result, err, _ = await blockchain.receive_block(block_bad) assert err == Err.INVALID_ICC_HASH_CC # 2k new_finished_ss_bad_rc = recursive_replace( block.finished_sub_slots[-1], "reward_chain", replace( block.finished_sub_slots[-1].reward_chain, infused_challenge_chain_sub_slot_hash=bytes([1] * 32) ), ) block_bad = recursive_replace( block, "finished_sub_slots", block.finished_sub_slots[:-1] + [new_finished_ss_bad_rc] ) result, err, _ = await blockchain.receive_block(block_bad) assert err == Err.INVALID_ICC_HASH_RC # Finally, add the block properly result, err, _ = await blockchain.receive_block(block) assert err is None assert result == ReceiveBlockResult.NEW_PEAK @pytest.mark.asyncio async def test_empty_slot_no_ses(self, empty_blockchain): # 2l blockchain = empty_blockchain blocks = bt.get_consecutive_blocks(1) assert (await blockchain.receive_block(blocks[0]))[0] == ReceiveBlockResult.NEW_PEAK blocks = bt.get_consecutive_blocks(1, block_list_input=blocks, skip_slots=4) new_finished_ss = recursive_replace( blocks[-1].finished_sub_slots[-1], "challenge_chain", replace(blocks[-1].finished_sub_slots[-1].challenge_chain, subepoch_summary_hash=std_hash(b"0")), ) block_bad = recursive_replace( blocks[-1], "finished_sub_slots", blocks[-1].finished_sub_slots[:-1] + [new_finished_ss] ) result, err, _ = await blockchain.receive_block(block_bad) assert err == Err.INVALID_SUB_EPOCH_SUMMARY_HASH @pytest.mark.asyncio async def test_empty_sub_slots_epoch(self, empty_blockchain): # 2m # Tests adding an empty sub slot after the sub-epoch / epoch. # Also tests overflow block in epoch blocks_base = bt.get_consecutive_blocks(test_constants.EPOCH_BLOCKS) blocks_1 = bt.get_consecutive_blocks(1, block_list_input=blocks_base, force_overflow=True) blocks_2 = bt.get_consecutive_blocks(1, skip_slots=1, block_list_input=blocks_base, force_overflow=True) blocks_3 = bt.get_consecutive_blocks(1, skip_slots=2, block_list_input=blocks_base, force_overflow=True) blocks_4 = bt.get_consecutive_blocks(1, block_list_input=blocks_base) for block in blocks_base: result, err, _ = await empty_blockchain.receive_block(block) assert err is None assert result == ReceiveBlockResult.NEW_PEAK for block in [blocks_1[-1], blocks_2[-1], blocks_3[-1], blocks_4[-1]]: result, err, _ = await empty_blockchain.receive_block(block) assert err is None @pytest.mark.asyncio async def test_wrong_cc_hash_rc(self, empty_blockchain): # 2o blockchain = empty_blockchain blocks = bt.get_consecutive_blocks(1, skip_slots=1) blocks = bt.get_consecutive_blocks(1, skip_slots=1, block_list_input=blocks) assert (await blockchain.receive_block(blocks[0]))[0] == ReceiveBlockResult.NEW_PEAK new_finished_ss = recursive_replace( blocks[-1].finished_sub_slots[-1], "reward_chain", replace(blocks[-1].finished_sub_slots[-1].reward_chain, challenge_chain_sub_slot_hash=bytes([3] * 32)), ) block_1_bad = recursive_replace( blocks[-1], "finished_sub_slots", blocks[-1].finished_sub_slots[:-1] + [new_finished_ss] ) result, err, _ = await blockchain.receive_block(block_1_bad) assert result == ReceiveBlockResult.INVALID_BLOCK assert err == Err.INVALID_CHALLENGE_SLOT_HASH_RC @pytest.mark.asyncio async def test_invalid_cc_sub_slot_vdf(self, empty_blockchain): # 2q blocks = bt.get_consecutive_blocks(10) index = 1 for block in blocks: print(f"processing block: {index}") index += 1 if len(block.finished_sub_slots): # Bad iters new_finished_ss = recursive_replace( block.finished_sub_slots[-1], "challenge_chain", recursive_replace( block.finished_sub_slots[-1].challenge_chain, "challenge_chain_end_of_slot_vdf.number_of_iterations", uint64(10000000), ), ) new_finished_ss = recursive_replace( new_finished_ss, "reward_chain.challenge_chain_sub_slot_hash", new_finished_ss.challenge_chain.get_hash(), ) block_bad = recursive_replace( block, "finished_sub_slots", block.finished_sub_slots[:-1] + [new_finished_ss] ) result, err, _ = await empty_blockchain.receive_block(block_bad) assert err == Err.INVALID_CC_EOS_VDF # Bad output new_finished_ss_2 = recursive_replace( block.finished_sub_slots[-1], "challenge_chain", recursive_replace( block.finished_sub_slots[-1].challenge_chain, "challenge_chain_end_of_slot_vdf.output", ClassgroupElement.get_default_element(), ), ) new_finished_ss_2 = recursive_replace( new_finished_ss_2, "reward_chain.challenge_chain_sub_slot_hash", new_finished_ss_2.challenge_chain.get_hash(), ) block_bad_2 = recursive_replace( block, "finished_sub_slots", block.finished_sub_slots[:-1] + [new_finished_ss_2] ) result, err, _ = await empty_blockchain.receive_block(block_bad_2) assert err == Err.INVALID_CC_EOS_VDF # Bad challenge hash new_finished_ss_3 = recursive_replace( block.finished_sub_slots[-1], "challenge_chain", recursive_replace( block.finished_sub_slots[-1].challenge_chain, "challenge_chain_end_of_slot_vdf.challenge", bytes([1] * 32), ), ) new_finished_ss_3 = recursive_replace( new_finished_ss_3, "reward_chain.challenge_chain_sub_slot_hash", new_finished_ss_3.challenge_chain.get_hash(), ) block_bad_3 = recursive_replace( block, "finished_sub_slots", block.finished_sub_slots[:-1] + [new_finished_ss_3] ) result, err, _ = await empty_blockchain.receive_block(block_bad_3) assert err == Err.INVALID_CC_EOS_VDF or err == Err.INVALID_PREV_CHALLENGE_SLOT_HASH # Bad proof new_finished_ss_5 = recursive_replace( block.finished_sub_slots[-1], "proofs.challenge_chain_slot_proof", VDFProof(uint8(0), b"1239819023890", False), ) block_bad_5 = recursive_replace( block, "finished_sub_slots", block.finished_sub_slots[:-1] + [new_finished_ss_5] ) result, err, _ = await empty_blockchain.receive_block(block_bad_5) assert err == Err.INVALID_CC_EOS_VDF result, err, _ = await empty_blockchain.receive_block(block) assert err is None assert result == ReceiveBlockResult.NEW_PEAK @pytest.mark.asyncio async def test_invalid_rc_sub_slot_vdf(self, empty_blockchain): # 2p blocks = bt.get_consecutive_blocks(10) for block in blocks: if len(block.finished_sub_slots): # Bad iters new_finished_ss = recursive_replace( block.finished_sub_slots[-1], "reward_chain", recursive_replace( block.finished_sub_slots[-1].reward_chain, "end_of_slot_vdf.number_of_iterations", uint64(10000000), ), ) block_bad = recursive_replace( block, "finished_sub_slots", block.finished_sub_slots[:-1] + [new_finished_ss] ) result, err, _ = await empty_blockchain.receive_block(block_bad) assert err == Err.INVALID_RC_EOS_VDF # Bad output new_finished_ss_2 = recursive_replace( block.finished_sub_slots[-1], "reward_chain", recursive_replace( block.finished_sub_slots[-1].reward_chain, "end_of_slot_vdf.output", ClassgroupElement.get_default_element(), ), ) block_bad_2 = recursive_replace( block, "finished_sub_slots", block.finished_sub_slots[:-1] + [new_finished_ss_2] ) result, err, _ = await empty_blockchain.receive_block(block_bad_2) assert err == Err.INVALID_RC_EOS_VDF # Bad challenge hash new_finished_ss_3 = recursive_replace( block.finished_sub_slots[-1], "reward_chain", recursive_replace( block.finished_sub_slots[-1].reward_chain, "end_of_slot_vdf.challenge", bytes([1] * 32), ), ) block_bad_3 = recursive_replace( block, "finished_sub_slots", block.finished_sub_slots[:-1] + [new_finished_ss_3] ) result, err, _ = await empty_blockchain.receive_block(block_bad_3) assert err == Err.INVALID_RC_EOS_VDF # Bad proof new_finished_ss_5 = recursive_replace( block.finished_sub_slots[-1], "proofs.reward_chain_slot_proof", VDFProof(uint8(0), b"1239819023890", False), ) block_bad_5 = recursive_replace( block, "finished_sub_slots", block.finished_sub_slots[:-1] + [new_finished_ss_5] ) result, err, _ = await empty_blockchain.receive_block(block_bad_5) assert err == Err.INVALID_RC_EOS_VDF result, err, _ = await empty_blockchain.receive_block(block) assert err is None assert result == ReceiveBlockResult.NEW_PEAK @pytest.mark.asyncio async def test_genesis_bad_deficit(self, empty_blockchain): # 2r block = bt.get_consecutive_blocks(1, skip_slots=2)[0] new_finished_ss = recursive_replace( block.finished_sub_slots[-1], "reward_chain", recursive_replace( block.finished_sub_slots[-1].reward_chain, "deficit", test_constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1, ), ) block_bad = recursive_replace(block, "finished_sub_slots", block.finished_sub_slots[:-1] + [new_finished_ss]) result, err, _ = await empty_blockchain.receive_block(block_bad) assert err == Err.INVALID_DEFICIT @pytest.mark.asyncio async def test_reset_deficit(self, empty_blockchain): # 2s, 2t blockchain = empty_blockchain blocks = bt.get_consecutive_blocks(2) await empty_blockchain.receive_block(blocks[0]) await empty_blockchain.receive_block(blocks[1]) case_1, case_2 = False, False while not case_1 or not case_2: blocks = bt.get_consecutive_blocks(1, block_list_input=blocks, skip_slots=1) if len(blocks[-1].finished_sub_slots) > 0: new_finished_ss = recursive_replace( blocks[-1].finished_sub_slots[-1], "reward_chain", recursive_replace( blocks[-1].finished_sub_slots[-1].reward_chain, "deficit", uint8(0), ), ) if blockchain.block_record(blocks[-2].header_hash).deficit == 0: case_1 = True else: case_2 = True block_bad = recursive_replace( blocks[-1], "finished_sub_slots", blocks[-1].finished_sub_slots[:-1] + [new_finished_ss] ) result, err, _ = await empty_blockchain.receive_block(block_bad) assert err == Err.INVALID_DEFICIT or err == Err.INVALID_ICC_HASH_CC result, err, _ = await empty_blockchain.receive_block(blocks[-1]) assert result == ReceiveBlockResult.NEW_PEAK @pytest.mark.asyncio async def test_genesis_has_ses(self, empty_blockchain): # 3a block = bt.get_consecutive_blocks(1, skip_slots=1)[0] new_finished_ss = recursive_replace( block.finished_sub_slots[0], "challenge_chain", recursive_replace( block.finished_sub_slots[0].challenge_chain, "subepoch_summary_hash", bytes([0] * 32), ), ) new_finished_ss = recursive_replace( new_finished_ss, "reward_chain", replace( new_finished_ss.reward_chain, challenge_chain_sub_slot_hash=new_finished_ss.challenge_chain.get_hash() ), ) block_bad = recursive_replace(block, "finished_sub_slots", [new_finished_ss] + block.finished_sub_slots[1:]) result, err, _ = await empty_blockchain.receive_block(block_bad) assert err == Err.INVALID_SUB_EPOCH_SUMMARY_HASH @pytest.mark.asyncio async def test_no_ses_if_no_se(self, empty_blockchain): # 3b blocks = bt.get_consecutive_blocks(1) assert (await empty_blockchain.receive_block(blocks[0]))[0] == ReceiveBlockResult.NEW_PEAK while True: blocks = bt.get_consecutive_blocks(1, block_list_input=blocks) if len(blocks[-1].finished_sub_slots) > 0: new_finished_ss: EndOfSubSlotBundle = recursive_replace( blocks[-1].finished_sub_slots[0], "challenge_chain", recursive_replace( blocks[-1].finished_sub_slots[0].challenge_chain, "subepoch_summary_hash", bytes([0] * 32), ), ) new_finished_ss = recursive_replace( new_finished_ss, "reward_chain", replace( new_finished_ss.reward_chain, challenge_chain_sub_slot_hash=new_finished_ss.challenge_chain.get_hash(), ), ) block_bad = recursive_replace( blocks[-1], "finished_sub_slots", [new_finished_ss] + blocks[-1].finished_sub_slots[1:] ) result, err, _ = await empty_blockchain.receive_block(block_bad) assert err == Err.INVALID_SUB_EPOCH_SUMMARY_HASH return None await empty_blockchain.receive_block(blocks[-1]) @pytest.mark.asyncio async def test_too_many_blocks(self, empty_blockchain): # 4: TODO pass @pytest.mark.asyncio async def test_bad_pos(self, empty_blockchain): # 5 blocks = bt.get_consecutive_blocks(2) assert (await empty_blockchain.receive_block(blocks[0]))[0] == ReceiveBlockResult.NEW_PEAK block_bad = recursive_replace(blocks[-1], "reward_chain_block.proof_of_space.challenge", std_hash(b"")) assert (await empty_blockchain.receive_block(block_bad))[1] == Err.INVALID_POSPACE block_bad = recursive_replace( blocks[-1], "reward_chain_block.proof_of_space.pool_contract_puzzle_hash", std_hash(b"") ) assert (await empty_blockchain.receive_block(block_bad))[1] == Err.INVALID_POSPACE block_bad = recursive_replace(blocks[-1], "reward_chain_block.proof_of_space.size", 62) assert (await empty_blockchain.receive_block(block_bad))[1] == Err.INVALID_POSPACE block_bad = recursive_replace( blocks[-1], "reward_chain_block.proof_of_space.plot_public_key", AugSchemeMPL.key_gen(std_hash(b"1231n")).get_g1(), ) assert (await empty_blockchain.receive_block(block_bad))[1] == Err.INVALID_POSPACE block_bad = recursive_replace( blocks[-1], "reward_chain_block.proof_of_space.size", 32, ) assert (await empty_blockchain.receive_block(block_bad))[1] == Err.INVALID_POSPACE block_bad = recursive_replace( blocks[-1], "reward_chain_block.proof_of_space.proof", bytes([1] * int(blocks[-1].reward_chain_block.proof_of_space.size * 64 / 8)), ) assert (await empty_blockchain.receive_block(block_bad))[1] == Err.INVALID_POSPACE # TODO: test not passing the plot filter @pytest.mark.asyncio async def test_bad_signage_point_index(self, empty_blockchain): # 6 blocks = bt.get_consecutive_blocks(2) assert (await empty_blockchain.receive_block(blocks[0]))[0] == ReceiveBlockResult.NEW_PEAK with pytest.raises(ValueError): block_bad = recursive_replace( blocks[-1], "reward_chain_block.signage_point_index", test_constants.NUM_SPS_SUB_SLOT ) assert (await empty_blockchain.receive_block(block_bad))[1] == Err.INVALID_SP_INDEX with pytest.raises(ValueError): block_bad = recursive_replace( blocks[-1], "reward_chain_block.signage_point_index", test_constants.NUM_SPS_SUB_SLOT + 1 ) assert (await empty_blockchain.receive_block(block_bad))[1] == Err.INVALID_SP_INDEX @pytest.mark.asyncio async def test_sp_0_no_sp(self, empty_blockchain): # 7 blocks = [] case_1, case_2 = False, False while not case_1 or not case_2: blocks = bt.get_consecutive_blocks(1, block_list_input=blocks) if blocks[-1].reward_chain_block.signage_point_index == 0: case_1 = True block_bad = recursive_replace(blocks[-1], "reward_chain_block.signage_point_index", uint8(1)) assert (await empty_blockchain.receive_block(block_bad))[1] == Err.INVALID_SP_INDEX elif not is_overflow_block(test_constants, blocks[-1].reward_chain_block.signage_point_index): case_2 = True block_bad = recursive_replace(blocks[-1], "reward_chain_block.signage_point_index", uint8(0)) error_code = (await empty_blockchain.receive_block(block_bad))[1] assert error_code == Err.INVALID_SP_INDEX or error_code == Err.INVALID_POSPACE assert (await empty_blockchain.receive_block(blocks[-1]))[0] == ReceiveBlockResult.NEW_PEAK @pytest.mark.asyncio async def test_epoch_overflows(self, empty_blockchain): # 9. TODO. This is hard to test because it requires modifying the block tools to make these special blocks pass @pytest.mark.asyncio async def test_bad_total_iters(self, empty_blockchain): # 10 blocks = bt.get_consecutive_blocks(2) assert (await empty_blockchain.receive_block(blocks[0]))[0] == ReceiveBlockResult.NEW_PEAK block_bad = recursive_replace( blocks[-1], "reward_chain_block.total_iters", blocks[-1].reward_chain_block.total_iters + 1 ) assert (await empty_blockchain.receive_block(block_bad))[1] == Err.INVALID_TOTAL_ITERS @pytest.mark.asyncio async def test_bad_rc_sp_vdf(self, empty_blockchain): # 11 blocks = bt.get_consecutive_blocks(1) assert (await empty_blockchain.receive_block(blocks[0]))[0] == ReceiveBlockResult.NEW_PEAK while True: blocks = bt.get_consecutive_blocks(1, block_list_input=blocks) if blocks[-1].reward_chain_block.signage_point_index != 0: block_bad = recursive_replace( blocks[-1], "reward_chain_block.reward_chain_sp_vdf.challenge", std_hash(b"1") ) assert (await empty_blockchain.receive_block(block_bad))[1] == Err.INVALID_RC_SP_VDF block_bad = recursive_replace( blocks[-1], "reward_chain_block.reward_chain_sp_vdf.output", bad_element, ) assert (await empty_blockchain.receive_block(block_bad))[1] == Err.INVALID_RC_SP_VDF block_bad = recursive_replace( blocks[-1], "reward_chain_block.reward_chain_sp_vdf.number_of_iterations", uint64(1111111111111), ) assert (await empty_blockchain.receive_block(block_bad))[1] == Err.INVALID_RC_SP_VDF block_bad = recursive_replace( blocks[-1], "reward_chain_sp_proof", VDFProof(uint8(0), std_hash(b""), False), ) assert (await empty_blockchain.receive_block(block_bad))[1] == Err.INVALID_RC_SP_VDF return None assert (await empty_blockchain.receive_block(blocks[-1]))[0] == ReceiveBlockResult.NEW_PEAK @pytest.mark.asyncio async def test_bad_rc_sp_sig(self, empty_blockchain): # 12 blocks = bt.get_consecutive_blocks(2) assert (await empty_blockchain.receive_block(blocks[0]))[0] == ReceiveBlockResult.NEW_PEAK block_bad = recursive_replace(blocks[-1], "reward_chain_block.reward_chain_sp_signature", G2Element.generator()) assert (await empty_blockchain.receive_block(block_bad))[1] == Err.INVALID_RC_SIGNATURE @pytest.mark.asyncio async def test_bad_cc_sp_vdf(self, empty_blockchain): # 13. Note: does not validate fully due to proof of space being validated first blocks = bt.get_consecutive_blocks(1) assert (await empty_blockchain.receive_block(blocks[0]))[0] == ReceiveBlockResult.NEW_PEAK while True: blocks = bt.get_consecutive_blocks(1, block_list_input=blocks) if blocks[-1].reward_chain_block.signage_point_index != 0: block_bad = recursive_replace( blocks[-1], "reward_chain_block.challenge_chain_sp_vdf.challenge", std_hash(b"1") ) assert (await empty_blockchain.receive_block(block_bad))[0] == ReceiveBlockResult.INVALID_BLOCK block_bad = recursive_replace( blocks[-1], "reward_chain_block.challenge_chain_sp_vdf.output", bad_element, ) assert (await empty_blockchain.receive_block(block_bad))[0] == ReceiveBlockResult.INVALID_BLOCK block_bad = recursive_replace( blocks[-1], "reward_chain_block.challenge_chain_sp_vdf.number_of_iterations", uint64(1111111111111), ) assert (await empty_blockchain.receive_block(block_bad))[0] == ReceiveBlockResult.INVALID_BLOCK block_bad = recursive_replace( blocks[-1], "challenge_chain_sp_proof", VDFProof(uint8(0), std_hash(b""), False), ) assert (await empty_blockchain.receive_block(block_bad))[1] == Err.INVALID_CC_SP_VDF return None assert (await empty_blockchain.receive_block(blocks[-1]))[0] == ReceiveBlockResult.NEW_PEAK @pytest.mark.asyncio async def test_bad_cc_sp_sig(self, empty_blockchain): # 14 blocks = bt.get_consecutive_blocks(2) assert (await empty_blockchain.receive_block(blocks[0]))[0] == ReceiveBlockResult.NEW_PEAK block_bad = recursive_replace( blocks[-1], "reward_chain_block.challenge_chain_sp_signature", G2Element.generator() ) assert (await empty_blockchain.receive_block(block_bad))[1] == Err.INVALID_CC_SIGNATURE @pytest.mark.asyncio async def test_is_transaction_block(self, empty_blockchain): # 15: TODO pass @pytest.mark.asyncio async def test_bad_foliage_sb_sig(self, empty_blockchain): # 16 blocks = bt.get_consecutive_blocks(2) assert (await empty_blockchain.receive_block(blocks[0]))[0] == ReceiveBlockResult.NEW_PEAK block_bad = recursive_replace(blocks[-1], "foliage.foliage_block_data_signature", G2Element.generator()) assert (await empty_blockchain.receive_block(block_bad))[1] == Err.INVALID_PLOT_SIGNATURE @pytest.mark.asyncio async def test_bad_foliage_transaction_block_sig(self, empty_blockchain): # 17 blocks = bt.get_consecutive_blocks(1) assert (await empty_blockchain.receive_block(blocks[0]))[0] == ReceiveBlockResult.NEW_PEAK while True: blocks = bt.get_consecutive_blocks(1, block_list_input=blocks) if blocks[-1].foliage_transaction_block is not None: block_bad = recursive_replace( blocks[-1], "foliage.foliage_transaction_block_signature", G2Element.generator() ) assert (await empty_blockchain.receive_block(block_bad))[1] == Err.INVALID_PLOT_SIGNATURE return None assert (await empty_blockchain.receive_block(blocks[-1]))[0] == ReceiveBlockResult.NEW_PEAK @pytest.mark.asyncio async def test_unfinished_reward_chain_sb_hash(self, empty_blockchain): # 18 blocks = bt.get_consecutive_blocks(2) assert (await empty_blockchain.receive_block(blocks[0]))[0] == ReceiveBlockResult.NEW_PEAK block_bad: FullBlock = recursive_replace( blocks[-1], "foliage.foliage_block_data.unfinished_reward_block_hash", std_hash(b"2") ) new_m = block_bad.foliage.foliage_block_data.get_hash() new_fsb_sig = bt.get_plot_signature(new_m, blocks[-1].reward_chain_block.proof_of_space.plot_public_key) block_bad = recursive_replace(block_bad, "foliage.foliage_block_data_signature", new_fsb_sig) assert (await empty_blockchain.receive_block(block_bad))[1] == Err.INVALID_URSB_HASH @pytest.mark.asyncio async def test_pool_target_height(self, empty_blockchain): # 19 blocks = bt.get_consecutive_blocks(3) assert (await empty_blockchain.receive_block(blocks[0]))[0] == ReceiveBlockResult.NEW_PEAK assert (await empty_blockchain.receive_block(blocks[1]))[0] == ReceiveBlockResult.NEW_PEAK block_bad: FullBlock = recursive_replace(blocks[-1], "foliage.foliage_block_data.pool_target.max_height", 1) new_m = block_bad.foliage.foliage_block_data.get_hash() new_fsb_sig = bt.get_plot_signature(new_m, blocks[-1].reward_chain_block.proof_of_space.plot_public_key) block_bad = recursive_replace(block_bad, "foliage.foliage_block_data_signature", new_fsb_sig) assert (await empty_blockchain.receive_block(block_bad))[1] == Err.OLD_POOL_TARGET @pytest.mark.asyncio async def test_pool_target_pre_farm(self, empty_blockchain): # 20a blocks = bt.get_consecutive_blocks(1) block_bad: FullBlock = recursive_replace( blocks[-1], "foliage.foliage_block_data.pool_target.puzzle_hash", std_hash(b"12") ) new_m = block_bad.foliage.foliage_block_data.get_hash() new_fsb_sig = bt.get_plot_signature(new_m, blocks[-1].reward_chain_block.proof_of_space.plot_public_key) block_bad = recursive_replace(block_bad, "foliage.foliage_block_data_signature", new_fsb_sig) assert (await empty_blockchain.receive_block(block_bad))[1] == Err.INVALID_PREFARM @pytest.mark.asyncio async def test_pool_target_signature(self, empty_blockchain): # 20b blocks_initial = bt.get_consecutive_blocks(2) assert (await empty_blockchain.receive_block(blocks_initial[0]))[0] == ReceiveBlockResult.NEW_PEAK assert (await empty_blockchain.receive_block(blocks_initial[1]))[0] == ReceiveBlockResult.NEW_PEAK attempts = 0 while True: # Go until we get a block that has a pool pk, as opposed to a pool contract blocks = bt.get_consecutive_blocks( 1, blocks_initial, seed=std_hash(attempts.to_bytes(4, byteorder="big", signed=False)) ) if blocks[-1].foliage.foliage_block_data.pool_signature is not None: block_bad: FullBlock = recursive_replace( blocks[-1], "foliage.foliage_block_data.pool_signature", G2Element.generator() ) new_m = block_bad.foliage.foliage_block_data.get_hash() new_fsb_sig = bt.get_plot_signature(new_m, blocks[-1].reward_chain_block.proof_of_space.plot_public_key) block_bad = recursive_replace(block_bad, "foliage.foliage_block_data_signature", new_fsb_sig) assert (await empty_blockchain.receive_block(block_bad))[1] == Err.INVALID_POOL_SIGNATURE return None attempts += 1 @pytest.mark.asyncio async def test_pool_target_contract(self, empty_blockchain): # 20c invalid pool target with contract blocks_initial = bt.get_consecutive_blocks(2) assert (await empty_blockchain.receive_block(blocks_initial[0]))[0] == ReceiveBlockResult.NEW_PEAK assert (await empty_blockchain.receive_block(blocks_initial[1]))[0] == ReceiveBlockResult.NEW_PEAK attempts = 0 while True: # Go until we get a block that has a pool contract opposed to a pool pk blocks = bt.get_consecutive_blocks( 1, blocks_initial, seed=std_hash(attempts.to_bytes(4, byteorder="big", signed=False)) ) if blocks[-1].foliage.foliage_block_data.pool_signature is None: block_bad: FullBlock = recursive_replace( blocks[-1], "foliage.foliage_block_data.pool_target.puzzle_hash", bytes32(token_bytes(32)) ) new_m = block_bad.foliage.foliage_block_data.get_hash() new_fsb_sig = bt.get_plot_signature(new_m, blocks[-1].reward_chain_block.proof_of_space.plot_public_key) block_bad = recursive_replace(block_bad, "foliage.foliage_block_data_signature", new_fsb_sig) assert (await empty_blockchain.receive_block(block_bad))[1] == Err.INVALID_POOL_TARGET return None attempts += 1 @pytest.mark.asyncio async def test_foliage_data_presence(self, empty_blockchain): # 22 blocks = bt.get_consecutive_blocks(1) assert (await empty_blockchain.receive_block(blocks[0]))[0] == ReceiveBlockResult.NEW_PEAK case_1, case_2 = False, False while not case_1 or not case_2: blocks = bt.get_consecutive_blocks(1, block_list_input=blocks) if blocks[-1].foliage_transaction_block is not None: case_1 = True block_bad: FullBlock = recursive_replace(blocks[-1], "foliage.foliage_transaction_block_hash", None) else: case_2 = True block_bad: FullBlock = recursive_replace( blocks[-1], "foliage.foliage_transaction_block_hash", std_hash(b"") ) err_code = (await empty_blockchain.receive_block(block_bad))[1] assert err_code == Err.INVALID_FOLIAGE_BLOCK_PRESENCE or err_code == Err.INVALID_IS_TRANSACTION_BLOCK await empty_blockchain.receive_block(blocks[-1]) @pytest.mark.asyncio async def test_foliage_transaction_block_hash(self, empty_blockchain): # 23 blocks = bt.get_consecutive_blocks(1) assert (await empty_blockchain.receive_block(blocks[0]))[0] == ReceiveBlockResult.NEW_PEAK case_1, case_2 = False, False while not case_1 or not case_2: blocks = bt.get_consecutive_blocks(1, block_list_input=blocks) if blocks[-1].foliage_transaction_block is not None: block_bad: FullBlock = recursive_replace( blocks[-1], "foliage.foliage_transaction_block_hash", std_hash(b"2") ) new_m = block_bad.foliage.foliage_transaction_block_hash new_fbh_sig = bt.get_plot_signature(new_m, blocks[-1].reward_chain_block.proof_of_space.plot_public_key) block_bad = recursive_replace(block_bad, "foliage.foliage_transaction_block_signature", new_fbh_sig) assert (await empty_blockchain.receive_block(block_bad))[1] == Err.INVALID_FOLIAGE_BLOCK_HASH return None await empty_blockchain.receive_block(blocks[-1]) @pytest.mark.asyncio async def test_genesis_bad_prev_block(self, empty_blockchain): # 24a blocks = bt.get_consecutive_blocks(1) block_bad: FullBlock = recursive_replace( blocks[-1], "foliage_transaction_block.prev_transaction_block_hash", std_hash(b"2") ) block_bad: FullBlock = recursive_replace( block_bad, "foliage.foliage_transaction_block_hash", block_bad.foliage_transaction_block.get_hash() ) new_m = block_bad.foliage.foliage_transaction_block_hash new_fbh_sig = bt.get_plot_signature(new_m, blocks[-1].reward_chain_block.proof_of_space.plot_public_key) block_bad = recursive_replace(block_bad, "foliage.foliage_transaction_block_signature", new_fbh_sig) assert (await empty_blockchain.receive_block(block_bad))[1] == Err.INVALID_PREV_BLOCK_HASH @pytest.mark.asyncio async def test_bad_prev_block_non_genesis(self, empty_blockchain): # 24b blocks = bt.get_consecutive_blocks(1) assert (await empty_blockchain.receive_block(blocks[0]))[0] == ReceiveBlockResult.NEW_PEAK while True: blocks = bt.get_consecutive_blocks(1, block_list_input=blocks) if blocks[-1].foliage_transaction_block is not None: block_bad: FullBlock = recursive_replace( blocks[-1], "foliage_transaction_block.prev_transaction_block_hash", std_hash(b"2") ) block_bad: FullBlock = recursive_replace( block_bad, "foliage.foliage_transaction_block_hash", block_bad.foliage_transaction_block.get_hash() ) new_m = block_bad.foliage.foliage_transaction_block_hash new_fbh_sig = bt.get_plot_signature(new_m, blocks[-1].reward_chain_block.proof_of_space.plot_public_key) block_bad = recursive_replace(block_bad, "foliage.foliage_transaction_block_signature", new_fbh_sig) assert (await empty_blockchain.receive_block(block_bad))[1] == Err.INVALID_PREV_BLOCK_HASH return None await empty_blockchain.receive_block(blocks[-1]) @pytest.mark.asyncio async def test_bad_filter_hash(self, empty_blockchain): # 25 blocks = bt.get_consecutive_blocks(1) assert (await empty_blockchain.receive_block(blocks[0]))[0] == ReceiveBlockResult.NEW_PEAK while True: blocks = bt.get_consecutive_blocks(1, block_list_input=blocks) if blocks[-1].foliage_transaction_block is not None: block_bad: FullBlock = recursive_replace( blocks[-1], "foliage_transaction_block.filter_hash", std_hash(b"2") ) block_bad: FullBlock = recursive_replace( block_bad, "foliage.foliage_transaction_block_hash", block_bad.foliage_transaction_block.get_hash() ) new_m = block_bad.foliage.foliage_transaction_block_hash new_fbh_sig = bt.get_plot_signature(new_m, blocks[-1].reward_chain_block.proof_of_space.plot_public_key) block_bad = recursive_replace(block_bad, "foliage.foliage_transaction_block_signature", new_fbh_sig) assert (await empty_blockchain.receive_block(block_bad))[1] == Err.INVALID_TRANSACTIONS_FILTER_HASH return None await empty_blockchain.receive_block(blocks[-1]) @pytest.mark.asyncio async def test_bad_timestamp(self, empty_blockchain): # 26 blocks = bt.get_consecutive_blocks(1) assert (await empty_blockchain.receive_block(blocks[0]))[0] == ReceiveBlockResult.NEW_PEAK while True: blocks = bt.get_consecutive_blocks(1, block_list_input=blocks) if blocks[-1].foliage_transaction_block is not None: block_bad: FullBlock = recursive_replace( blocks[-1], "foliage_transaction_block.timestamp", blocks[0].foliage_transaction_block.timestamp - 10, ) block_bad: FullBlock = recursive_replace( block_bad, "foliage.foliage_transaction_block_hash", block_bad.foliage_transaction_block.get_hash() ) new_m = block_bad.foliage.foliage_transaction_block_hash new_fbh_sig = bt.get_plot_signature(new_m, blocks[-1].reward_chain_block.proof_of_space.plot_public_key) block_bad = recursive_replace(block_bad, "foliage.foliage_transaction_block_signature", new_fbh_sig) assert (await empty_blockchain.receive_block(block_bad))[1] == Err.TIMESTAMP_TOO_FAR_IN_PAST block_bad: FullBlock = recursive_replace( blocks[-1], "foliage_transaction_block.timestamp", blocks[0].foliage_transaction_block.timestamp, ) block_bad: FullBlock = recursive_replace( block_bad, "foliage.foliage_transaction_block_hash", block_bad.foliage_transaction_block.get_hash() ) new_m = block_bad.foliage.foliage_transaction_block_hash new_fbh_sig = bt.get_plot_signature(new_m, blocks[-1].reward_chain_block.proof_of_space.plot_public_key) block_bad = recursive_replace(block_bad, "foliage.foliage_transaction_block_signature", new_fbh_sig) assert (await empty_blockchain.receive_block(block_bad))[1] == Err.TIMESTAMP_TOO_FAR_IN_PAST block_bad: FullBlock = recursive_replace( blocks[-1], "foliage_transaction_block.timestamp", blocks[0].foliage_transaction_block.timestamp + 10000000, ) block_bad: FullBlock = recursive_replace( block_bad, "foliage.foliage_transaction_block_hash", block_bad.foliage_transaction_block.get_hash() ) new_m = block_bad.foliage.foliage_transaction_block_hash new_fbh_sig = bt.get_plot_signature(new_m, blocks[-1].reward_chain_block.proof_of_space.plot_public_key) block_bad = recursive_replace(block_bad, "foliage.foliage_transaction_block_signature", new_fbh_sig) assert (await empty_blockchain.receive_block(block_bad))[1] == Err.TIMESTAMP_TOO_FAR_IN_FUTURE return None await empty_blockchain.receive_block(blocks[-1]) @pytest.mark.asyncio async def test_height(self, empty_blockchain): # 27 blocks = bt.get_consecutive_blocks(2) assert (await empty_blockchain.receive_block(blocks[0]))[0] == ReceiveBlockResult.NEW_PEAK block_bad: FullBlock = recursive_replace(blocks[-1], "reward_chain_block.height", 2) assert (await empty_blockchain.receive_block(block_bad))[1] == Err.INVALID_HEIGHT @pytest.mark.asyncio async def test_height_genesis(self, empty_blockchain): # 27 blocks = bt.get_consecutive_blocks(1) block_bad: FullBlock = recursive_replace(blocks[-1], "reward_chain_block.height", 1) assert (await empty_blockchain.receive_block(block_bad))[1] == Err.INVALID_PREV_BLOCK_HASH @pytest.mark.asyncio async def test_weight(self, empty_blockchain): # 28 blocks = bt.get_consecutive_blocks(2) assert (await empty_blockchain.receive_block(blocks[0]))[0] == ReceiveBlockResult.NEW_PEAK block_bad: FullBlock = recursive_replace(blocks[-1], "reward_chain_block.weight", 22131) assert (await empty_blockchain.receive_block(block_bad))[1] == Err.INVALID_WEIGHT @pytest.mark.asyncio async def test_weight_genesis(self, empty_blockchain): # 28 blocks = bt.get_consecutive_blocks(1) block_bad: FullBlock = recursive_replace(blocks[-1], "reward_chain_block.weight", 0) assert (await empty_blockchain.receive_block(block_bad))[1] == Err.INVALID_WEIGHT @pytest.mark.asyncio async def test_bad_cc_ip_vdf(self, empty_blockchain): # 29 blocks = bt.get_consecutive_blocks(1) assert (await empty_blockchain.receive_block(blocks[0]))[0] == ReceiveBlockResult.NEW_PEAK blocks = bt.get_consecutive_blocks(1, block_list_input=blocks) block_bad = recursive_replace(blocks[-1], "reward_chain_block.challenge_chain_ip_vdf.challenge", std_hash(b"1")) assert (await empty_blockchain.receive_block(block_bad))[1] == Err.INVALID_CC_IP_VDF block_bad = recursive_replace( blocks[-1], "reward_chain_block.challenge_chain_ip_vdf.output", bad_element, ) assert (await empty_blockchain.receive_block(block_bad))[1] == Err.INVALID_CC_IP_VDF block_bad = recursive_replace( blocks[-1], "reward_chain_block.challenge_chain_ip_vdf.number_of_iterations", uint64(1111111111111), ) assert (await empty_blockchain.receive_block(block_bad))[1] == Err.INVALID_CC_IP_VDF block_bad = recursive_replace( blocks[-1], "challenge_chain_ip_proof", VDFProof(uint8(0), std_hash(b""), False), ) assert (await empty_blockchain.receive_block(block_bad))[1] == Err.INVALID_CC_IP_VDF @pytest.mark.asyncio async def test_bad_rc_ip_vdf(self, empty_blockchain): # 30 blocks = bt.get_consecutive_blocks(1) assert (await empty_blockchain.receive_block(blocks[0]))[0] == ReceiveBlockResult.NEW_PEAK blocks = bt.get_consecutive_blocks(1, block_list_input=blocks) block_bad = recursive_replace(blocks[-1], "reward_chain_block.reward_chain_ip_vdf.challenge", std_hash(b"1")) assert (await empty_blockchain.receive_block(block_bad))[1] == Err.INVALID_RC_IP_VDF block_bad = recursive_replace( blocks[-1], "reward_chain_block.reward_chain_ip_vdf.output", bad_element, ) assert (await empty_blockchain.receive_block(block_bad))[1] == Err.INVALID_RC_IP_VDF block_bad = recursive_replace( blocks[-1], "reward_chain_block.reward_chain_ip_vdf.number_of_iterations", uint64(1111111111111), ) assert (await empty_blockchain.receive_block(block_bad))[1] == Err.INVALID_RC_IP_VDF block_bad = recursive_replace( blocks[-1], "reward_chain_ip_proof", VDFProof(uint8(0), std_hash(b""), False), ) assert (await empty_blockchain.receive_block(block_bad))[1] == Err.INVALID_RC_IP_VDF @pytest.mark.asyncio async def test_bad_icc_ip_vdf(self, empty_blockchain): # 31 blocks = bt.get_consecutive_blocks(1) assert (await empty_blockchain.receive_block(blocks[0]))[0] == ReceiveBlockResult.NEW_PEAK blocks = bt.get_consecutive_blocks(1, block_list_input=blocks) block_bad = recursive_replace( blocks[-1], "reward_chain_block.infused_challenge_chain_ip_vdf.challenge", std_hash(b"1") ) assert (await empty_blockchain.receive_block(block_bad))[1] == Err.INVALID_ICC_VDF assert (await empty_blockchain.receive_block(block_bad))[1] == Err.INVALID_ICC_VDF block_bad = recursive_replace( blocks[-1], "reward_chain_block.infused_challenge_chain_ip_vdf.output", bad_element, ) assert (await empty_blockchain.receive_block(block_bad))[1] == Err.INVALID_ICC_VDF block_bad = recursive_replace( blocks[-1], "reward_chain_block.infused_challenge_chain_ip_vdf.number_of_iterations", uint64(1111111111111), ) assert (await empty_blockchain.receive_block(block_bad))[1] == Err.INVALID_ICC_VDF block_bad = recursive_replace( blocks[-1], "infused_challenge_chain_ip_proof", VDFProof(uint8(0), std_hash(b""), False), ) assert (await empty_blockchain.receive_block(block_bad))[1] == Err.INVALID_ICC_VDF @pytest.mark.asyncio async def test_reward_block_hash(self, empty_blockchain): # 32 blocks = bt.get_consecutive_blocks(2) assert (await empty_blockchain.receive_block(blocks[0]))[0] == ReceiveBlockResult.NEW_PEAK block_bad: FullBlock = recursive_replace(blocks[-1], "foliage.reward_block_hash", std_hash(b"")) assert (await empty_blockchain.receive_block(block_bad))[1] == Err.INVALID_REWARD_BLOCK_HASH @pytest.mark.asyncio async def test_reward_block_hash_2(self, empty_blockchain): # 33 blocks = bt.get_consecutive_blocks(1) block_bad: FullBlock = recursive_replace(blocks[0], "reward_chain_block.is_transaction_block", False) block_bad: FullBlock = recursive_replace( block_bad, "foliage.reward_block_hash", block_bad.reward_chain_block.get_hash() ) assert (await empty_blockchain.receive_block(block_bad))[1] == Err.INVALID_FOLIAGE_BLOCK_PRESENCE assert (await empty_blockchain.receive_block(blocks[0]))[0] == ReceiveBlockResult.NEW_PEAK # Test one which should not be a tx block while True: blocks = bt.get_consecutive_blocks(1, block_list_input=blocks) if not blocks[-1].is_transaction_block(): block_bad: FullBlock = recursive_replace(blocks[-1], "reward_chain_block.is_transaction_block", True) block_bad: FullBlock = recursive_replace( block_bad, "foliage.reward_block_hash", block_bad.reward_chain_block.get_hash() ) assert (await empty_blockchain.receive_block(block_bad))[1] == Err.INVALID_FOLIAGE_BLOCK_PRESENCE return None assert (await empty_blockchain.receive_block(blocks[-1]))[0] == ReceiveBlockResult.NEW_PEAK class TestPreValidation: @pytest.mark.asyncio async def test_pre_validation_fails_bad_blocks(self, empty_blockchain): blocks = bt.get_consecutive_blocks(2) assert (await empty_blockchain.receive_block(blocks[0]))[0] == ReceiveBlockResult.NEW_PEAK block_bad = recursive_replace( blocks[-1], "reward_chain_block.total_iters", blocks[-1].reward_chain_block.total_iters + 1 ) res = await empty_blockchain.pre_validate_blocks_multiprocessing([blocks[0], block_bad], {}) assert res[0].error is None assert res[1].error is not None @pytest.mark.asyncio async def test_pre_validation(self, empty_blockchain, default_1000_blocks): blocks = default_1000_blocks[:100] start = time.time() n_at_a_time = min(multiprocessing.cpu_count(), 32) times_pv = [] times_rb = [] for i in range(0, len(blocks), n_at_a_time): end_i = min(i + n_at_a_time, len(blocks)) blocks_to_validate = blocks[i:end_i] start_pv = time.time() res = await empty_blockchain.pre_validate_blocks_multiprocessing(blocks_to_validate, {}) end_pv = time.time() times_pv.append(end_pv - start_pv) assert res is not None for n in range(end_i - i): assert res[n] is not None assert res[n].error is None block = blocks_to_validate[n] start_rb = time.time() result, err, _ = await empty_blockchain.receive_block(block, res[n]) end_rb = time.time() times_rb.append(end_rb - start_rb) assert err is None assert result == ReceiveBlockResult.NEW_PEAK log.info( f"Added block {block.height} total iters {block.total_iters} " f"new slot? {len(block.finished_sub_slots)}, time {end_rb - start_rb}" ) end = time.time() log.info(f"Total time: {end - start} seconds") log.info(f"Average pv: {sum(times_pv)/(len(blocks)/n_at_a_time)}") log.info(f"Average rb: {sum(times_rb)/(len(blocks))}") class TestBodyValidation: @pytest.mark.asyncio async def test_not_tx_block_but_has_data(self, empty_blockchain): # 1 b = empty_blockchain blocks = bt.get_consecutive_blocks(1) while blocks[-1].foliage_transaction_block is not None: assert (await b.receive_block(blocks[-1]))[0] == ReceiveBlockResult.NEW_PEAK blocks = bt.get_consecutive_blocks(1, block_list_input=blocks) original_block: FullBlock = blocks[-1] block = recursive_replace(original_block, "transactions_generator", SerializedProgram()) assert (await b.receive_block(block))[1] == Err.NOT_BLOCK_BUT_HAS_DATA h = std_hash(b"") i = uint64(1) block = recursive_replace( original_block, "transactions_info", TransactionsInfo(h, h, G2Element(), uint64(1), uint64(1), []), ) assert (await b.receive_block(block))[1] == Err.NOT_BLOCK_BUT_HAS_DATA block = recursive_replace(original_block, "transactions_generator_ref_list", [i]) assert (await b.receive_block(block))[1] == Err.NOT_BLOCK_BUT_HAS_DATA @pytest.mark.asyncio async def test_tx_block_missing_data(self, empty_blockchain): # 2 b = empty_blockchain blocks = bt.get_consecutive_blocks(2, guarantee_transaction_block=True) assert (await b.receive_block(blocks[0]))[0] == ReceiveBlockResult.NEW_PEAK block = recursive_replace( blocks[-1], "foliage_transaction_block", None, ) err = (await b.receive_block(block))[1] assert err == Err.IS_TRANSACTION_BLOCK_BUT_NO_DATA or err == Err.INVALID_FOLIAGE_BLOCK_PRESENCE block = recursive_replace( blocks[-1], "transactions_info", None, ) try: err = (await b.receive_block(block))[1] except AssertionError: return None assert err == Err.IS_TRANSACTION_BLOCK_BUT_NO_DATA or err == Err.INVALID_FOLIAGE_BLOCK_PRESENCE @pytest.mark.asyncio async def test_invalid_transactions_info_hash(self, empty_blockchain): # 3 b = empty_blockchain blocks = bt.get_consecutive_blocks(2, guarantee_transaction_block=True) assert (await b.receive_block(blocks[0]))[0] == ReceiveBlockResult.NEW_PEAK h = std_hash(b"") block = recursive_replace( blocks[-1], "foliage_transaction_block.transactions_info_hash", h, ) block = recursive_replace( block, "foliage.foliage_transaction_block_hash", std_hash(block.foliage_transaction_block) ) new_m = block.foliage.foliage_transaction_block_hash new_fsb_sig = bt.get_plot_signature(new_m, blocks[-1].reward_chain_block.proof_of_space.plot_public_key) block = recursive_replace(block, "foliage.foliage_transaction_block_signature", new_fsb_sig) err = (await b.receive_block(block))[1] assert err == Err.INVALID_TRANSACTIONS_INFO_HASH @pytest.mark.asyncio async def test_invalid_transactions_block_hash(self, empty_blockchain): # 4 b = empty_blockchain blocks = bt.get_consecutive_blocks(2, guarantee_transaction_block=True) assert (await b.receive_block(blocks[0]))[0] == ReceiveBlockResult.NEW_PEAK h = std_hash(b"") block = recursive_replace(blocks[-1], "foliage.foliage_transaction_block_hash", h) new_m = block.foliage.foliage_transaction_block_hash new_fsb_sig = bt.get_plot_signature(new_m, blocks[-1].reward_chain_block.proof_of_space.plot_public_key) block = recursive_replace(block, "foliage.foliage_transaction_block_signature", new_fsb_sig) err = (await b.receive_block(block))[1] assert err == Err.INVALID_FOLIAGE_BLOCK_HASH @pytest.mark.asyncio async def test_invalid_reward_claims(self, empty_blockchain): # 5 b = empty_blockchain blocks = bt.get_consecutive_blocks(2, guarantee_transaction_block=True) assert (await b.receive_block(blocks[0]))[0] == ReceiveBlockResult.NEW_PEAK block: FullBlock = blocks[-1] # Too few too_few_reward_claims = block.transactions_info.reward_claims_incorporated[:-1] block_2: FullBlock = recursive_replace( block, "transactions_info.reward_claims_incorporated", too_few_reward_claims ) block_2 = recursive_replace( block_2, "foliage_transaction_block.transactions_info_hash", block_2.transactions_info.get_hash() ) block_2 = recursive_replace( block_2, "foliage.foliage_transaction_block_hash", block_2.foliage_transaction_block.get_hash() ) new_m = block_2.foliage.foliage_transaction_block_hash new_fsb_sig = bt.get_plot_signature(new_m, block.reward_chain_block.proof_of_space.plot_public_key) block_2 = recursive_replace(block_2, "foliage.foliage_transaction_block_signature", new_fsb_sig) err = (await b.receive_block(block_2))[1] assert err == Err.INVALID_REWARD_COINS # Too many h = std_hash(b"") too_many_reward_claims = block.transactions_info.reward_claims_incorporated + [ Coin(h, h, too_few_reward_claims[0].amount) ] block_2 = recursive_replace(block, "transactions_info.reward_claims_incorporated", too_many_reward_claims) block_2 = recursive_replace( block_2, "foliage_transaction_block.transactions_info_hash", block_2.transactions_info.get_hash() ) block_2 = recursive_replace( block_2, "foliage.foliage_transaction_block_hash", block_2.foliage_transaction_block.get_hash() ) new_m = block_2.foliage.foliage_transaction_block_hash new_fsb_sig = bt.get_plot_signature(new_m, block.reward_chain_block.proof_of_space.plot_public_key) block_2 = recursive_replace(block_2, "foliage.foliage_transaction_block_signature", new_fsb_sig) err = (await b.receive_block(block_2))[1] assert err == Err.INVALID_REWARD_COINS # Duplicates duplicate_reward_claims = block.transactions_info.reward_claims_incorporated + [ block.transactions_info.reward_claims_incorporated[-1] ] block_2 = recursive_replace(block, "transactions_info.reward_claims_incorporated", duplicate_reward_claims) block_2 = recursive_replace( block_2, "foliage_transaction_block.transactions_info_hash", block_2.transactions_info.get_hash() ) block_2 = recursive_replace( block_2, "foliage.foliage_transaction_block_hash", block_2.foliage_transaction_block.get_hash() ) new_m = block_2.foliage.foliage_transaction_block_hash new_fsb_sig = bt.get_plot_signature(new_m, block.reward_chain_block.proof_of_space.plot_public_key) block_2 = recursive_replace(block_2, "foliage.foliage_transaction_block_signature", new_fsb_sig) err = (await b.receive_block(block_2))[1] assert err == Err.INVALID_REWARD_COINS @pytest.mark.asyncio async def test_initial_freeze(self, empty_blockchain): # 6 b = empty_blockchain blocks = bt.get_consecutive_blocks( 3, guarantee_transaction_block=True, pool_reward_puzzle_hash=bt.pool_ph, farmer_reward_puzzle_hash=bt.pool_ph, timelord_reward_puzzle_hash=bt.pool_ph, genesis_timestamp=time.time() - 1000, ) assert (await b.receive_block(blocks[0]))[0] == ReceiveBlockResult.NEW_PEAK assert (await b.receive_block(blocks[1]))[0] == ReceiveBlockResult.NEW_PEAK assert (await b.receive_block(blocks[2]))[0] == ReceiveBlockResult.NEW_PEAK wt: WalletTool = bt.get_pool_wallet_tool() tx: SpendBundle = wt.generate_signed_transaction( 10, wt.get_new_puzzlehash(), list(blocks[2].get_included_reward_coins())[0] ) blocks = bt.get_consecutive_blocks( 1, block_list_input=blocks, guarantee_transaction_block=True, transaction_data=tx, ) err = (await b.receive_block(blocks[-1]))[1] assert err == Err.INITIAL_TRANSACTION_FREEZE @pytest.mark.asyncio async def test_invalid_transactions_generator_hash(self, empty_blockchain): # 7 b = empty_blockchain blocks = bt.get_consecutive_blocks(2, guarantee_transaction_block=True) assert (await b.receive_block(blocks[0]))[0] == ReceiveBlockResult.NEW_PEAK # No tx should have all zeroes block: FullBlock = blocks[-1] block_2 = recursive_replace(block, "transactions_info.generator_root", bytes([1] * 32)) block_2 = recursive_replace( block_2, "foliage_transaction_block.transactions_info_hash", block_2.transactions_info.get_hash() ) block_2 = recursive_replace( block_2, "foliage.foliage_transaction_block_hash", block_2.foliage_transaction_block.get_hash() ) new_m = block_2.foliage.foliage_transaction_block_hash new_fsb_sig = bt.get_plot_signature(new_m, block.reward_chain_block.proof_of_space.plot_public_key) block_2 = recursive_replace(block_2, "foliage.foliage_transaction_block_signature", new_fsb_sig) err = (await b.receive_block(block_2))[1] assert err == Err.INVALID_TRANSACTIONS_GENERATOR_HASH assert (await b.receive_block(blocks[1]))[0] == ReceiveBlockResult.NEW_PEAK blocks = bt.get_consecutive_blocks( 2, block_list_input=blocks, guarantee_transaction_block=True, timelord_reward_puzzle_hash=bt.pool_ph, farmer_reward_puzzle_hash=bt.pool_ph, pool_reward_puzzle_hash=bt.pool_ph, ) assert (await b.receive_block(blocks[2]))[0] == ReceiveBlockResult.NEW_PEAK assert (await b.receive_block(blocks[3]))[0] == ReceiveBlockResult.NEW_PEAK wt: WalletTool = bt.get_pool_wallet_tool() tx: SpendBundle = wt.generate_signed_transaction( 10, wt.get_new_puzzlehash(), list(blocks[-1].get_included_reward_coins())[0] ) blocks = bt.get_consecutive_blocks( 1, block_list_input=blocks, guarantee_transaction_block=True, transaction_data=tx ) # Non empty generator hash must be correct block = blocks[-1] block_2 = recursive_replace(block, "transactions_info.generator_root", bytes([0] * 32)) block_2 = recursive_replace( block_2, "foliage_transaction_block.transactions_info_hash", block_2.transactions_info.get_hash() ) block_2 = recursive_replace( block_2, "foliage.foliage_transaction_block_hash", block_2.foliage_transaction_block.get_hash() ) new_m = block_2.foliage.foliage_transaction_block_hash new_fsb_sig = bt.get_plot_signature(new_m, block.reward_chain_block.proof_of_space.plot_public_key) block_2 = recursive_replace(block_2, "foliage.foliage_transaction_block_signature", new_fsb_sig) err = (await b.receive_block(block_2))[1] assert err == Err.INVALID_TRANSACTIONS_GENERATOR_HASH @pytest.mark.asyncio async def test_invalid_transactions_ref_list(self, empty_blockchain): # No generator should have [1]s for the root b = empty_blockchain blocks = bt.get_consecutive_blocks( 3, guarantee_transaction_block=True, timelord_reward_puzzle_hash=bt.pool_ph, farmer_reward_puzzle_hash=bt.pool_ph, pool_reward_puzzle_hash=bt.pool_ph, ) assert (await b.receive_block(blocks[0]))[0] == ReceiveBlockResult.NEW_PEAK assert (await b.receive_block(blocks[1]))[0] == ReceiveBlockResult.NEW_PEAK block: FullBlock = blocks[-1] block_2 = recursive_replace(block, "transactions_info.generator_refs_root", bytes([0] * 32)) block_2 = recursive_replace( block_2, "foliage_transaction_block.transactions_info_hash", block_2.transactions_info.get_hash() ) block_2 = recursive_replace( block_2, "foliage.foliage_transaction_block_hash", block_2.foliage_transaction_block.get_hash() ) new_m = block_2.foliage.foliage_transaction_block_hash new_fsb_sig = bt.get_plot_signature(new_m, block.reward_chain_block.proof_of_space.plot_public_key) block_2 = recursive_replace(block_2, "foliage.foliage_transaction_block_signature", new_fsb_sig) err = (await b.receive_block(block_2))[1] assert err == Err.INVALID_TRANSACTIONS_GENERATOR_REFS_ROOT # No generator should have no refs list block_2 = recursive_replace(block, "transactions_generator_ref_list", [uint32(0)]) err = (await b.receive_block(block_2))[1] assert err == Err.INVALID_TRANSACTIONS_GENERATOR_REFS_ROOT # Hash should be correct when there is a ref list assert (await b.receive_block(blocks[-1]))[0] == ReceiveBlockResult.NEW_PEAK wt: WalletTool = bt.get_pool_wallet_tool() tx: SpendBundle = wt.generate_signed_transaction( 10, wt.get_new_puzzlehash(), list(blocks[-1].get_included_reward_coins())[0] ) blocks = bt.get_consecutive_blocks(5, block_list_input=blocks, guarantee_transaction_block=False) for block in blocks[-5:]: assert (await b.receive_block(block))[0] == ReceiveBlockResult.NEW_PEAK blocks = bt.get_consecutive_blocks( 1, block_list_input=blocks, guarantee_transaction_block=True, transaction_data=tx ) assert (await b.receive_block(blocks[-1]))[0] == ReceiveBlockResult.NEW_PEAK generator_arg = detect_potential_template_generator(blocks[-1].height, blocks[-1].transactions_generator) assert generator_arg is not None blocks = bt.get_consecutive_blocks( 1, block_list_input=blocks, guarantee_transaction_block=True, transaction_data=tx, previous_generator=generator_arg, ) block = blocks[-1] assert len(block.transactions_generator_ref_list) > 0 block_2 = recursive_replace(block, "transactions_info.generator_refs_root", bytes([1] * 32)) block_2 = recursive_replace( block_2, "foliage_transaction_block.transactions_info_hash", block_2.transactions_info.get_hash() ) block_2 = recursive_replace( block_2, "foliage.foliage_transaction_block_hash", block_2.foliage_transaction_block.get_hash() ) new_m = block_2.foliage.foliage_transaction_block_hash new_fsb_sig = bt.get_plot_signature(new_m, block.reward_chain_block.proof_of_space.plot_public_key) block_2 = recursive_replace(block_2, "foliage.foliage_transaction_block_signature", new_fsb_sig) err = (await b.receive_block(block_2))[1] assert err == Err.INVALID_TRANSACTIONS_GENERATOR_REFS_ROOT # Too many heights block_2 = recursive_replace(block, "transactions_generator_ref_list", [block.height - 2, block.height - 1]) err = (await b.receive_block(block_2))[1] assert err == Err.GENERATOR_REF_HAS_NO_GENERATOR assert (await b.pre_validate_blocks_multiprocessing([block_2], {})) is None # Not tx block for h in range(0, block.height - 1): block_2 = recursive_replace(block, "transactions_generator_ref_list", [h]) err = (await b.receive_block(block_2))[1] assert err == Err.GENERATOR_REF_HAS_NO_GENERATOR or err == Err.INVALID_TRANSACTIONS_GENERATOR_REFS_ROOT assert (await b.pre_validate_blocks_multiprocessing([block_2], {})) is None @pytest.mark.asyncio async def test_cost_exceeds_max(self, empty_blockchain): # 7 b = empty_blockchain blocks = bt.get_consecutive_blocks( 3, guarantee_transaction_block=True, timelord_reward_puzzle_hash=bt.pool_ph, farmer_reward_puzzle_hash=bt.pool_ph, pool_reward_puzzle_hash=bt.pool_ph, ) assert (await b.receive_block(blocks[0]))[0] == ReceiveBlockResult.NEW_PEAK assert (await b.receive_block(blocks[1]))[0] == ReceiveBlockResult.NEW_PEAK assert (await b.receive_block(blocks[2]))[0] == ReceiveBlockResult.NEW_PEAK wt: WalletTool = bt.get_pool_wallet_tool() condition_dict = {ConditionOpcode.CREATE_COIN: []} for i in range(7000): output = ConditionWithArgs(ConditionOpcode.CREATE_COIN, [bt.pool_ph, int_to_bytes(i)]) condition_dict[ConditionOpcode.CREATE_COIN].append(output) tx: SpendBundle = wt.generate_signed_transaction( 10, wt.get_new_puzzlehash(), list(blocks[-1].get_included_reward_coins())[0], condition_dic=condition_dict ) blocks = bt.get_consecutive_blocks( 1, block_list_input=blocks, guarantee_transaction_block=True, transaction_data=tx ) assert (await b.receive_block(blocks[-1]))[1] == Err.BLOCK_COST_EXCEEDS_MAX @pytest.mark.asyncio async def test_alvm_must_not_fail(self, empty_blockchain): # 8 pass @pytest.mark.asyncio async def test_invalid_cost_in_block(self, empty_blockchain): # 9 b = empty_blockchain blocks = bt.get_consecutive_blocks( 3, guarantee_transaction_block=True, timelord_reward_puzzle_hash=bt.pool_ph, farmer_reward_puzzle_hash=bt.pool_ph, pool_reward_puzzle_hash=bt.pool_ph, ) assert (await b.receive_block(blocks[0]))[0] == ReceiveBlockResult.NEW_PEAK assert (await b.receive_block(blocks[1]))[0] == ReceiveBlockResult.NEW_PEAK assert (await b.receive_block(blocks[2]))[0] == ReceiveBlockResult.NEW_PEAK wt: WalletTool = bt.get_pool_wallet_tool() tx: SpendBundle = wt.generate_signed_transaction( 10, wt.get_new_puzzlehash(), list(blocks[-1].get_included_reward_coins())[0] ) blocks = bt.get_consecutive_blocks( 1, block_list_input=blocks, guarantee_transaction_block=True, transaction_data=tx ) block: FullBlock = blocks[-1] # zero block_2: FullBlock = recursive_replace(block, "transactions_info.cost", uint64(0)) block_2 = recursive_replace( block_2, "foliage_transaction_block.transactions_info_hash", block_2.transactions_info.get_hash() ) block_2 = recursive_replace( block_2, "foliage.foliage_transaction_block_hash", block_2.foliage_transaction_block.get_hash() ) new_m = block_2.foliage.foliage_transaction_block_hash new_fsb_sig = bt.get_plot_signature(new_m, block.reward_chain_block.proof_of_space.plot_public_key) block_2 = recursive_replace(block_2, "foliage.foliage_transaction_block_signature", new_fsb_sig) err = (await b.receive_block(block_2))[1] assert err == Err.INVALID_BLOCK_COST # too low block_2: FullBlock = recursive_replace(block, "transactions_info.cost", uint64(1)) block_2 = recursive_replace( block_2, "foliage_transaction_block.transactions_info_hash", block_2.transactions_info.get_hash() ) block_2 = recursive_replace( block_2, "foliage.foliage_transaction_block_hash", block_2.foliage_transaction_block.get_hash() ) new_m = block_2.foliage.foliage_transaction_block_hash new_fsb_sig = bt.get_plot_signature(new_m, block.reward_chain_block.proof_of_space.plot_public_key) block_2 = recursive_replace(block_2, "foliage.foliage_transaction_block_signature", new_fsb_sig) err = (await b.receive_block(block_2))[1] assert err == Err.GENERATOR_RUNTIME_ERROR # too high block_2: FullBlock = recursive_replace(block, "transactions_info.cost", uint64(1000000)) block_2 = recursive_replace( block_2, "foliage_transaction_block.transactions_info_hash", block_2.transactions_info.get_hash() ) block_2 = recursive_replace( block_2, "foliage.foliage_transaction_block_hash", block_2.foliage_transaction_block.get_hash() ) new_m = block_2.foliage.foliage_transaction_block_hash new_fsb_sig = bt.get_plot_signature(new_m, block.reward_chain_block.proof_of_space.plot_public_key) block_2 = recursive_replace(block_2, "foliage.foliage_transaction_block_signature", new_fsb_sig) err = (await b.receive_block(block_2))[1] assert err == Err.INVALID_BLOCK_COST err = (await b.receive_block(block))[1] assert err is None @pytest.mark.asyncio async def test_max_coin_amount(self): # 10 # TODO: fix, this is not reaching validation. Because we can't create a block with such amounts due to uint64 # limit in Coin pass # # new_test_constants = test_constants.replace( # **{"GENESIS_PRE_FARM_POOL_PUZZLE_HASH": bt.pool_ph, "GENESIS_PRE_FARM_FARMER_PUZZLE_HASH": bt.pool_ph} # ) # b, connection, db_path = await create_blockchain(new_test_constants) # bt_2 = BlockTools(new_test_constants) # bt_2.constants = bt_2.constants.replace( # **{"GENESIS_PRE_FARM_POOL_PUZZLE_HASH": bt.pool_ph, "GENESIS_PRE_FARM_FARMER_PUZZLE_HASH": bt.pool_ph} # ) # blocks = bt_2.get_consecutive_blocks( # 3, # guarantee_transaction_block=True, # farmer_reward_puzzle_hash=bt.pool_ph, # pool_reward_puzzle_hash=bt.pool_ph, # ) # assert (await b.receive_block(blocks[0]))[0] == ReceiveBlockResult.NEW_PEAK # assert (await b.receive_block(blocks[1]))[0] == ReceiveBlockResult.NEW_PEAK # assert (await b.receive_block(blocks[2]))[0] == ReceiveBlockResult.NEW_PEAK # # wt: WalletTool = bt_2.get_pool_wallet_tool() # # condition_dict = {ConditionOpcode.CREATE_COIN: []} # output = ConditionWithArgs(ConditionOpcode.CREATE_COIN, [bt_2.pool_ph, int_to_bytes(2 ** 64)]) # condition_dict[ConditionOpcode.CREATE_COIN].append(output) # # tx: SpendBundle = wt.generate_signed_transaction_multiple_coins( # 10, # wt.get_new_puzzlehash(), # list(blocks[1].get_included_reward_coins()), # condition_dic=condition_dict, # ) # try: # blocks = bt_2.get_consecutive_blocks( # 1, block_list_input=blocks, guarantee_transaction_block=True, transaction_data=tx # ) # assert False # except Exception as e: # pass # await connection.close() # b.shut_down() # db_path.unlink() @pytest.mark.asyncio async def test_invalid_merkle_roots(self, empty_blockchain): # 11 b = empty_blockchain blocks = bt.get_consecutive_blocks( 3, guarantee_transaction_block=True, timelord_reward_puzzle_hash=bt.pool_ph, farmer_reward_puzzle_hash=bt.pool_ph, pool_reward_puzzle_hash=bt.pool_ph, ) assert (await b.receive_block(blocks[0]))[0] == ReceiveBlockResult.NEW_PEAK assert (await b.receive_block(blocks[1]))[0] == ReceiveBlockResult.NEW_PEAK assert (await b.receive_block(blocks[2]))[0] == ReceiveBlockResult.NEW_PEAK wt: WalletTool = bt.get_pool_wallet_tool() tx: SpendBundle = wt.generate_signed_transaction( 10, wt.get_new_puzzlehash(), list(blocks[-1].get_included_reward_coins())[0] ) blocks = bt.get_consecutive_blocks( 1, block_list_input=blocks, guarantee_transaction_block=True, transaction_data=tx ) block: FullBlock = blocks[-1] merkle_set = MerkleSet() # additions block_2 = recursive_replace(block, "foliage_transaction_block.additions_root", merkle_set.get_root()) block_2 = recursive_replace( block_2, "foliage.foliage_transaction_block_hash", block_2.foliage_transaction_block.get_hash() ) new_m = block_2.foliage.foliage_transaction_block_hash new_fsb_sig = bt.get_plot_signature(new_m, block.reward_chain_block.proof_of_space.plot_public_key) block_2 = recursive_replace(block_2, "foliage.foliage_transaction_block_signature", new_fsb_sig) err = (await b.receive_block(block_2))[1] assert err == Err.BAD_ADDITION_ROOT # removals merkle_set.add_already_hashed(std_hash(b"1")) block_2 = recursive_replace(block, "foliage_transaction_block.removals_root", merkle_set.get_root()) block_2 = recursive_replace( block_2, "foliage.foliage_transaction_block_hash", block_2.foliage_transaction_block.get_hash() ) new_m = block_2.foliage.foliage_transaction_block_hash new_fsb_sig = bt.get_plot_signature(new_m, block.reward_chain_block.proof_of_space.plot_public_key) block_2 = recursive_replace(block_2, "foliage.foliage_transaction_block_signature", new_fsb_sig) err = (await b.receive_block(block_2))[1] assert err == Err.BAD_REMOVAL_ROOT @pytest.mark.asyncio async def test_invalid_filter(self, empty_blockchain): # 12 b = empty_blockchain blocks = bt.get_consecutive_blocks( 3, guarantee_transaction_block=True, timelord_reward_puzzle_hash=bt.pool_ph, farmer_reward_puzzle_hash=bt.pool_ph, pool_reward_puzzle_hash=bt.pool_ph, ) assert (await b.receive_block(blocks[0]))[0] == ReceiveBlockResult.NEW_PEAK assert (await b.receive_block(blocks[1]))[0] == ReceiveBlockResult.NEW_PEAK assert (await b.receive_block(blocks[2]))[0] == ReceiveBlockResult.NEW_PEAK wt: WalletTool = bt.get_pool_wallet_tool() tx: SpendBundle = wt.generate_signed_transaction( 10, wt.get_new_puzzlehash(), list(blocks[-1].get_included_reward_coins())[0] ) blocks = bt.get_consecutive_blocks( 1, block_list_input=blocks, guarantee_transaction_block=True, transaction_data=tx ) block: FullBlock = blocks[-1] block_2 = recursive_replace(block, "foliage_transaction_block.filter_hash", std_hash(b"3")) block_2 = recursive_replace( block_2, "foliage.foliage_transaction_block_hash", block_2.foliage_transaction_block.get_hash() ) new_m = block_2.foliage.foliage_transaction_block_hash new_fsb_sig = bt.get_plot_signature(new_m, block.reward_chain_block.proof_of_space.plot_public_key) block_2 = recursive_replace(block_2, "foliage.foliage_transaction_block_signature", new_fsb_sig) err = (await b.receive_block(block_2))[1] assert err == Err.INVALID_TRANSACTIONS_FILTER_HASH @pytest.mark.asyncio async def test_duplicate_outputs(self, empty_blockchain): # 13 b = empty_blockchain blocks = bt.get_consecutive_blocks( 3, guarantee_transaction_block=True, timelord_reward_puzzle_hash=bt.pool_ph, farmer_reward_puzzle_hash=bt.pool_ph, pool_reward_puzzle_hash=bt.pool_ph, ) assert (await b.receive_block(blocks[0]))[0] == ReceiveBlockResult.NEW_PEAK assert (await b.receive_block(blocks[1]))[0] == ReceiveBlockResult.NEW_PEAK assert (await b.receive_block(blocks[2]))[0] == ReceiveBlockResult.NEW_PEAK wt: WalletTool = bt.get_pool_wallet_tool() condition_dict = {ConditionOpcode.CREATE_COIN: []} for i in range(2): output = ConditionWithArgs(ConditionOpcode.CREATE_COIN, [bt.pool_ph, int_to_bytes(1)]) condition_dict[ConditionOpcode.CREATE_COIN].append(output) tx: SpendBundle = wt.generate_signed_transaction( 10, wt.get_new_puzzlehash(), list(blocks[-1].get_included_reward_coins())[0], condition_dic=condition_dict ) blocks = bt.get_consecutive_blocks( 1, block_list_input=blocks, guarantee_transaction_block=True, transaction_data=tx ) assert (await b.receive_block(blocks[-1]))[1] == Err.DUPLICATE_OUTPUT @pytest.mark.asyncio async def test_duplicate_removals(self, empty_blockchain): # 14 b = empty_blockchain blocks = bt.get_consecutive_blocks( 3, guarantee_transaction_block=True, timelord_reward_puzzle_hash=bt.pool_ph, farmer_reward_puzzle_hash=bt.pool_ph, pool_reward_puzzle_hash=bt.pool_ph, ) assert (await b.receive_block(blocks[0]))[0] == ReceiveBlockResult.NEW_PEAK assert (await b.receive_block(blocks[1]))[0] == ReceiveBlockResult.NEW_PEAK assert (await b.receive_block(blocks[2]))[0] == ReceiveBlockResult.NEW_PEAK wt: WalletTool = bt.get_pool_wallet_tool() tx: SpendBundle = wt.generate_signed_transaction( 10, wt.get_new_puzzlehash(), list(blocks[-1].get_included_reward_coins())[0] ) tx_2: SpendBundle = wt.generate_signed_transaction( 11, wt.get_new_puzzlehash(), list(blocks[-1].get_included_reward_coins())[0] ) agg = SpendBundle.aggregate([tx, tx_2]) blocks = bt.get_consecutive_blocks( 1, block_list_input=blocks, guarantee_transaction_block=True, transaction_data=agg ) assert (await b.receive_block(blocks[-1]))[1] == Err.DOUBLE_SPEND @pytest.mark.asyncio async def test_double_spent_in_coin_store(self, empty_blockchain): # 15 b = empty_blockchain blocks = bt.get_consecutive_blocks( 3, guarantee_transaction_block=True, timelord_reward_puzzle_hash=bt.pool_ph, farmer_reward_puzzle_hash=bt.pool_ph, pool_reward_puzzle_hash=bt.pool_ph, ) assert (await b.receive_block(blocks[0]))[0] == ReceiveBlockResult.NEW_PEAK assert (await b.receive_block(blocks[1]))[0] == ReceiveBlockResult.NEW_PEAK assert (await b.receive_block(blocks[2]))[0] == ReceiveBlockResult.NEW_PEAK wt: WalletTool = bt.get_pool_wallet_tool() tx: SpendBundle = wt.generate_signed_transaction( 10, wt.get_new_puzzlehash(), list(blocks[-1].get_included_reward_coins())[0] ) blocks = bt.get_consecutive_blocks( 1, block_list_input=blocks, guarantee_transaction_block=True, transaction_data=tx ) assert (await b.receive_block(blocks[-1]))[0] == ReceiveBlockResult.NEW_PEAK tx_2: SpendBundle = wt.generate_signed_transaction( 10, wt.get_new_puzzlehash(), list(blocks[-2].get_included_reward_coins())[0] ) blocks = bt.get_consecutive_blocks( 1, block_list_input=blocks, guarantee_transaction_block=True, transaction_data=tx_2 ) assert (await b.receive_block(blocks[-1]))[1] == Err.DOUBLE_SPEND @pytest.mark.asyncio async def test_double_spent_in_reorg(self, empty_blockchain): # 15 b = empty_blockchain blocks = bt.get_consecutive_blocks( 3, guarantee_transaction_block=True, timelord_reward_puzzle_hash=bt.pool_ph, farmer_reward_puzzle_hash=bt.pool_ph, pool_reward_puzzle_hash=bt.pool_ph, ) assert (await b.receive_block(blocks[0]))[0] == ReceiveBlockResult.NEW_PEAK assert (await b.receive_block(blocks[1]))[0] == ReceiveBlockResult.NEW_PEAK assert (await b.receive_block(blocks[2]))[0] == ReceiveBlockResult.NEW_PEAK wt: WalletTool = bt.get_pool_wallet_tool() tx: SpendBundle = wt.generate_signed_transaction( 10, wt.get_new_puzzlehash(), list(blocks[-1].get_included_reward_coins())[0] ) blocks = bt.get_consecutive_blocks( 1, block_list_input=blocks, guarantee_transaction_block=True, transaction_data=tx ) assert (await b.receive_block(blocks[-1]))[0] == ReceiveBlockResult.NEW_PEAK new_coin: Coin = tx.additions()[0] tx_2: SpendBundle = wt.generate_signed_transaction(10, wt.get_new_puzzlehash(), new_coin) # This is fine because coin exists blocks = bt.get_consecutive_blocks( 1, block_list_input=blocks, guarantee_transaction_block=True, transaction_data=tx_2 ) assert (await b.receive_block(blocks[-1]))[0] == ReceiveBlockResult.NEW_PEAK blocks = bt.get_consecutive_blocks(5, block_list_input=blocks, guarantee_transaction_block=True) for block in blocks[-5:]: assert (await b.receive_block(block))[0] == ReceiveBlockResult.NEW_PEAK blocks_reorg = bt.get_consecutive_blocks(2, block_list_input=blocks[:-7], guarantee_transaction_block=True) assert (await b.receive_block(blocks_reorg[-2]))[0] == ReceiveBlockResult.ADDED_AS_ORPHAN assert (await b.receive_block(blocks_reorg[-1]))[0] == ReceiveBlockResult.ADDED_AS_ORPHAN # Coin does not exist in reorg blocks_reorg = bt.get_consecutive_blocks( 1, block_list_input=blocks_reorg, guarantee_transaction_block=True, transaction_data=tx_2 ) assert (await b.receive_block(blocks_reorg[-1]))[1] == Err.UNKNOWN_UNSPENT # Finally add the block to the fork (spending both in same bundle, this is ephemeral) agg = SpendBundle.aggregate([tx, tx_2]) blocks_reorg = bt.get_consecutive_blocks( 1, block_list_input=blocks_reorg[:-1], guarantee_transaction_block=True, transaction_data=agg ) assert (await b.receive_block(blocks_reorg[-1]))[1] is None blocks_reorg = bt.get_consecutive_blocks( 1, block_list_input=blocks_reorg, guarantee_transaction_block=True, transaction_data=tx_2 ) assert (await b.receive_block(blocks_reorg[-1]))[1] == Err.DOUBLE_SPEND_IN_FORK rewards_ph = wt.get_new_puzzlehash() blocks_reorg = bt.get_consecutive_blocks( 10, block_list_input=blocks_reorg[:-1], guarantee_transaction_block=True, farmer_reward_puzzle_hash=rewards_ph, ) for block in blocks_reorg[-10:]: r, e, _ = await b.receive_block(block) assert e is None # ephemeral coin is spent first_coin = await b.coin_store.get_coin_record(new_coin.name()) assert first_coin is not None and first_coin.spent second_coin = await b.coin_store.get_coin_record(tx_2.additions()[0].name()) assert second_coin is not None and not second_coin.spent farmer_coin = create_farmer_coin( blocks_reorg[-1].height, rewards_ph, calculate_base_farmer_reward(blocks_reorg[-1].height), bt.constants.GENESIS_CHALLENGE, ) tx_3: SpendBundle = wt.generate_signed_transaction(10, wt.get_new_puzzlehash(), farmer_coin) blocks_reorg = bt.get_consecutive_blocks( 1, block_list_input=blocks_reorg, guarantee_transaction_block=True, transaction_data=tx_3 ) assert (await b.receive_block(blocks_reorg[-1]))[1] is None farmer_coin = await b.coin_store.get_coin_record(farmer_coin.name()) assert first_coin is not None and farmer_coin.spent @pytest.mark.asyncio async def test_minting_coin(self, empty_blockchain): # 16 TODO # 17 is tested in mempool tests pass @pytest.mark.asyncio async def test_max_coin_amount_fee(self): # 18 TODO: we can't create a block with such amounts due to uint64 pass @pytest.mark.asyncio async def test_invalid_fees_in_block(self, empty_blockchain): # 19 b = empty_blockchain blocks = bt.get_consecutive_blocks( 3, guarantee_transaction_block=True, timelord_reward_puzzle_hash=bt.pool_ph, farmer_reward_puzzle_hash=bt.pool_ph, pool_reward_puzzle_hash=bt.pool_ph, ) assert (await b.receive_block(blocks[0]))[0] == ReceiveBlockResult.NEW_PEAK assert (await b.receive_block(blocks[1]))[0] == ReceiveBlockResult.NEW_PEAK assert (await b.receive_block(blocks[2]))[0] == ReceiveBlockResult.NEW_PEAK wt: WalletTool = bt.get_pool_wallet_tool() tx: SpendBundle = wt.generate_signed_transaction( 10, wt.get_new_puzzlehash(), list(blocks[-1].get_included_reward_coins())[0] ) blocks = bt.get_consecutive_blocks( 1, block_list_input=blocks, guarantee_transaction_block=True, transaction_data=tx ) block: FullBlock = blocks[-1] # wrong feees block_2: FullBlock = recursive_replace(block, "transactions_info.fees", uint64(1239)) block_2 = recursive_replace( block_2, "foliage_transaction_block.transactions_info_hash", block_2.transactions_info.get_hash() ) block_2 = recursive_replace( block_2, "foliage.foliage_transaction_block_hash", block_2.foliage_transaction_block.get_hash() ) new_m = block_2.foliage.foliage_transaction_block_hash new_fsb_sig = bt.get_plot_signature(new_m, block.reward_chain_block.proof_of_space.plot_public_key) block_2 = recursive_replace(block_2, "foliage.foliage_transaction_block_signature", new_fsb_sig) err = (await b.receive_block(block_2))[1] assert err == Err.INVALID_BLOCK_FEE_AMOUNT class TestReorgs: @pytest.mark.asyncio async def test_basic_reorg(self, empty_blockchain): b = empty_blockchain blocks = bt.get_consecutive_blocks(15) for block in blocks: assert (await b.receive_block(block))[0] == ReceiveBlockResult.NEW_PEAK assert b.get_peak().height == 14 blocks_reorg_chain = bt.get_consecutive_blocks(7, blocks[:10], seed=b"2") for reorg_block in blocks_reorg_chain: result, error_code, fork_height = await b.receive_block(reorg_block) if reorg_block.height < 10: assert result == ReceiveBlockResult.ALREADY_HAVE_BLOCK elif reorg_block.height < 14: assert result == ReceiveBlockResult.ADDED_AS_ORPHAN elif reorg_block.height >= 15: assert result == ReceiveBlockResult.NEW_PEAK assert error_code is None assert b.get_peak().height == 16 @pytest.mark.asyncio async def test_long_reorg(self, empty_blockchain, default_10000_blocks): # Reorg longer than a difficulty adjustment # Also tests higher weight chain but lower height b = empty_blockchain num_blocks_chain_1 = 3 * test_constants.EPOCH_BLOCKS + test_constants.MAX_SUB_SLOT_BLOCKS + 10 num_blocks_chain_2_start = test_constants.EPOCH_BLOCKS - 20 num_blocks_chain_2 = 3 * test_constants.EPOCH_BLOCKS + test_constants.MAX_SUB_SLOT_BLOCKS + 8 assert num_blocks_chain_1 < 10000 blocks = default_10000_blocks[:num_blocks_chain_1] for block in blocks: assert (await b.receive_block(block))[0] == ReceiveBlockResult.NEW_PEAK chain_1_height = b.get_peak().height chain_1_weight = b.get_peak().weight assert chain_1_height == (num_blocks_chain_1 - 1) # These blocks will have less time between them (timestamp) and therefore will make difficulty go up # This means that the weight will grow faster, and we can get a heavier chain with lower height blocks_reorg_chain = bt.get_consecutive_blocks( num_blocks_chain_2 - num_blocks_chain_2_start, blocks[:num_blocks_chain_2_start], seed=b"2", time_per_block=8, ) found_orphan = False for reorg_block in blocks_reorg_chain: result, error_code, fork_height = await b.receive_block(reorg_block) if reorg_block.height < num_blocks_chain_2_start: assert result == ReceiveBlockResult.ALREADY_HAVE_BLOCK if reorg_block.weight <= chain_1_weight: if result == ReceiveBlockResult.ADDED_AS_ORPHAN: found_orphan = True assert error_code is None assert result == ReceiveBlockResult.ADDED_AS_ORPHAN or result == ReceiveBlockResult.ALREADY_HAVE_BLOCK elif reorg_block.weight > chain_1_weight: assert reorg_block.height < chain_1_height assert result == ReceiveBlockResult.NEW_PEAK assert error_code is None assert found_orphan assert b.get_peak().weight > chain_1_weight assert b.get_peak().height < chain_1_height @pytest.mark.asyncio async def test_long_compact_blockchain(self, empty_blockchain, default_10000_blocks_compact): b = empty_blockchain for block in default_10000_blocks_compact: assert (await b.receive_block(block))[0] == ReceiveBlockResult.NEW_PEAK assert b.get_peak().height == len(default_10000_blocks_compact) - 1 @pytest.mark.asyncio async def test_reorg_from_genesis(self, empty_blockchain): b = empty_blockchain WALLET_A = WalletTool(b.constants) WALLET_A_PUZZLE_HASHES = [WALLET_A.get_new_puzzlehash() for _ in range(5)] blocks = bt.get_consecutive_blocks(15) for block in blocks: assert (await b.receive_block(block))[0] == ReceiveBlockResult.NEW_PEAK assert b.get_peak().height == 14 # Reorg to alternate chain that is 1 height longer found_orphan = False blocks_reorg_chain = bt.get_consecutive_blocks(16, [], seed=b"2") for reorg_block in blocks_reorg_chain: result, error_code, fork_height = await b.receive_block(reorg_block) if reorg_block.height < 14: if result == ReceiveBlockResult.ADDED_AS_ORPHAN: found_orphan = True assert result == ReceiveBlockResult.ADDED_AS_ORPHAN or result == ReceiveBlockResult.ALREADY_HAVE_BLOCK elif reorg_block.height >= 15: assert result == ReceiveBlockResult.NEW_PEAK assert error_code is None # Back to original chain blocks_reorg_chain_2 = bt.get_consecutive_blocks(3, blocks, seed=b"3") result, error_code, fork_height = await b.receive_block(blocks_reorg_chain_2[-3]) assert result == ReceiveBlockResult.ADDED_AS_ORPHAN result, error_code, fork_height = await b.receive_block(blocks_reorg_chain_2[-2]) assert result == ReceiveBlockResult.NEW_PEAK result, error_code, fork_height = await b.receive_block(blocks_reorg_chain_2[-1]) assert result == ReceiveBlockResult.NEW_PEAK assert found_orphan assert b.get_peak().height == 17 @pytest.mark.asyncio async def test_reorg_transaction(self, empty_blockchain): b = empty_blockchain wallet_a = WalletTool(b.constants) WALLET_A_PUZZLE_HASHES = [wallet_a.get_new_puzzlehash() for _ in range(5)] coinbase_puzzlehash = WALLET_A_PUZZLE_HASHES[0] receiver_puzzlehash = WALLET_A_PUZZLE_HASHES[1] blocks = bt.get_consecutive_blocks(10, farmer_reward_puzzle_hash=coinbase_puzzlehash) blocks = bt.get_consecutive_blocks( 2, blocks, farmer_reward_puzzle_hash=coinbase_puzzlehash, guarantee_transaction_block=True ) spend_block = blocks[10] spend_coin = None for coin in list(spend_block.get_included_reward_coins()): if coin.puzzle_hash == coinbase_puzzlehash: spend_coin = coin spend_bundle = wallet_a.generate_signed_transaction(1000, receiver_puzzlehash, spend_coin) blocks = bt.get_consecutive_blocks( 2, blocks, farmer_reward_puzzle_hash=coinbase_puzzlehash, transaction_data=spend_bundle, guarantee_transaction_block=True, ) blocks_fork = bt.get_consecutive_blocks( 1, blocks[:12], farmer_reward_puzzle_hash=coinbase_puzzlehash, seed=b"123", guarantee_transaction_block=True ) blocks_fork = bt.get_consecutive_blocks( 2, blocks_fork, farmer_reward_puzzle_hash=coinbase_puzzlehash, transaction_data=spend_bundle, guarantee_transaction_block=True, seed=b"1245", ) for block in blocks: result, error_code, _ = await b.receive_block(block) assert error_code is None and result == ReceiveBlockResult.NEW_PEAK for block in blocks_fork: result, error_code, _ = await b.receive_block(block) assert error_code is None @pytest.mark.asyncio async def test_get_header_blocks_in_range_tx_filter(self, empty_blockchain): b = empty_blockchain blocks = bt.get_consecutive_blocks( 3, guarantee_transaction_block=True, timelord_reward_puzzle_hash=bt.pool_ph, pool_reward_puzzle_hash=bt.pool_ph, farmer_reward_puzzle_hash=bt.pool_ph, ) assert (await b.receive_block(blocks[0]))[0] == ReceiveBlockResult.NEW_PEAK assert (await b.receive_block(blocks[1]))[0] == ReceiveBlockResult.NEW_PEAK assert (await b.receive_block(blocks[2]))[0] == ReceiveBlockResult.NEW_PEAK wt: WalletTool = bt.get_pool_wallet_tool() tx: SpendBundle = wt.generate_signed_transaction( 10, wt.get_new_puzzlehash(), list(blocks[2].get_included_reward_coins())[0] ) blocks = bt.get_consecutive_blocks( 1, block_list_input=blocks, guarantee_transaction_block=True, transaction_data=tx, ) err = (await b.receive_block(blocks[-1]))[1] assert not err blocks_with_filter = await b.get_header_blocks_in_range(0, 10, tx_filter=True) blocks_without_filter = await b.get_header_blocks_in_range(0, 10, tx_filter=False) header_hash = blocks[-1].header_hash assert ( blocks_with_filter[header_hash].transactions_filter != blocks_without_filter[header_hash].transactions_filter ) assert blocks_with_filter[header_hash].header_hash == blocks_without_filter[header_hash].header_hash
const elixir = require('laravel-elixir'); require('laravel-elixir-vue-2'); require('laravel-elixir-livereload'); /* |-------------------------------------------------------------------------- | Elixir Asset Management |-------------------------------------------------------------------------- | | Elixir provides a clean, fluent API for defining some basic Gulp tasks | for your Laravel application. By default, we are compiling the Sass | file for our application, as well as publishing vendor resources. | */ elixir(function(mix) { mix.copy(['./node_modules/font-awesome/fonts'], './public/fonts/font-awesome') .copy(['./node_modules/bootstrap/dist/fonts'], './public/fonts/bootstrap'); mix.scripts([ './node_modules/jquery/dist/jquery.min.js', './node_modules/bootstrap/dist/js/bootstrap.min.js', './node_modules/angular/angular.js', './node_modules/angular-ui-router/release/angular-ui-router.min.js', './node_modules/angular-bootstrap/ui-bootstrap.min.js', './node_modules/angular-bootstrap/ui-bootstrap-tpls.min.js', './node_modules/angular-animate/angular-animate.min.js', './node_modules/angular-touch/angular-touch.min.js', 'app.module.js', 'app.router.js', ], './public/js/app.js'); mix.styles([ './node_modules/bootstrap/dist/css/bootstrap.min.css', './node_modules/font-awesome/css/font-awesome.min.css', 'animations.css', 'forms.css', 'widgets.css', 'style.css', 'error-page.css', ], './public/css/app.css'); mix.livereload(); });
/* Copyright 2021 Mozilla Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ class XfaLayer { static setupStorage(html, id, element, storage, intent) { const storedData = storage.getValue(id, { value: null }); switch (element.name) { case "textarea": if (storedData.value !== null) { html.textContent = storedData.value; } if (intent === "print") { break; } html.addEventListener("input", event => { storage.setValue(id, { value: event.target.value }); }); break; case "input": if ( element.attributes.type === "radio" || element.attributes.type === "checkbox" ) { if (storedData.value === element.attributes.exportedValue) { html.setAttribute("checked", true); } if (intent === "print") { break; } html.addEventListener("change", event => { storage.setValue(id, { value: event.target.getAttribute("xfaOn") }); }); } else { if (storedData.value !== null) { html.setAttribute("value", storedData.value); } if (intent === "print") { break; } html.addEventListener("input", event => { storage.setValue(id, { value: event.target.value }); }); } break; case "select": if (storedData.value !== null) { for (const option of element.children) { if (option.attributes.value === storedData.value) { option.attributes.selected = true; } } } html.addEventListener("input", event => { const options = event.target.options; const value = options.selectedIndex === -1 ? "" : options[options.selectedIndex].value; storage.setValue(id, { value }); }); break; } } static setAttributes(html, element, storage, intent) { const { attributes } = element; if (attributes.type === "radio") { // Avoid to have a radio group when printing with the same as one // already displayed. attributes.name = `${attributes.name}-${intent}`; } for (const [key, value] of Object.entries(attributes)) { if (value === null || value === undefined || key === "dataId") { continue; } if (key !== "style") { if (key === "textContent") { html.textContent = value; } else if (key === "class") { html.setAttribute(key, value.join(" ")); } else { html.setAttribute(key, value); } } else { Object.assign(html.style, value); } } // Set the value after the others to be sure overwrite // any other values. if (storage && attributes.dataId) { this.setupStorage(html, attributes.dataId, element, storage); } } static render(parameters) { const storage = parameters.annotationStorage; const root = parameters.xfa; const intent = parameters.intent || "display"; const rootHtml = document.createElement(root.name); if (root.attributes) { this.setAttributes(rootHtml, root); } const stack = [[root, -1, rootHtml]]; const rootDiv = parameters.div; rootDiv.appendChild(rootHtml); const transform = `matrix(${parameters.viewport.transform.join(",")})`; rootDiv.style.transform = transform; // Set defaults. rootDiv.setAttribute("class", "xfaLayer xfaFont"); while (stack.length > 0) { const [parent, i, html] = stack[stack.length - 1]; if (i + 1 === parent.children.length) { stack.pop(); continue; } const child = parent.children[++stack[stack.length - 1][1]]; if (child === null) { continue; } const { name } = child; if (name === "#text") { html.appendChild(document.createTextNode(child.value)); continue; } let childHtml; if (child?.attributes?.xmlns) { childHtml = document.createElementNS(child.attributes.xmlns, name); } else { childHtml = document.createElement(name); } html.appendChild(childHtml); if (child.attributes) { this.setAttributes(childHtml, child, storage, intent); } if (child.children && child.children.length > 0) { stack.push([child, -1, childHtml]); } else if (child.value) { childHtml.appendChild(document.createTextNode(child.value)); } } for (const el of rootDiv.querySelectorAll( ".xfaDisabled input, .xfaDisabled textarea" )) { el.setAttribute("disabled", true); } for (const el of rootDiv.querySelectorAll( ".xfaReadOnly input, .xfaReadOnly textarea" )) { el.setAttribute("readOnly", true); } } /** * Update the xfa layer. * * @public * @param {XfaLayerParameters} parameters * @memberof XfaLayer */ static update(parameters) { const transform = `matrix(${parameters.viewport.transform.join(",")})`; parameters.div.style.transform = transform; parameters.div.hidden = false; } } export { XfaLayer };
/*! https://mths.be/placeholder v2.0.7 by @mathias */ ;(function(window, document, $) { // Opera Mini v7 doesn’t support placeholder although its DOM seems to indicate so var isOperaMini = Object.prototype.toString.call(window.operamini) == '[object OperaMini]'; var isInputSupported = 'placeholder' in document.createElement('input') && !isOperaMini; var isTextareaSupported = 'placeholder' in document.createElement('textarea') && !isOperaMini; var prototype = $.fn; var valHooks = $.valHooks; var propHooks = $.propHooks; var hooks; var placeholder; if (isInputSupported && isTextareaSupported) { placeholder = prototype.placeholder = function() { return this; }; placeholder.input = placeholder.textarea = true; } else { placeholder = prototype.placeholder = function() { var $this = this; $this .filter((isInputSupported ? 'textarea' : ':input') + '[placeholder]') .not('.placeholder') .bind({ 'focus.placeholder': clearPlaceholder, 'blur.placeholder': setPlaceholder }) .data('placeholder-enabled', true) .trigger('blur.placeholder'); return $this; }; placeholder.input = isInputSupported; placeholder.textarea = isTextareaSupported; hooks = { 'get': function(element) { var $element = $(element); var $passwordInput = $element.data('placeholder-password'); if ($passwordInput) { return $passwordInput[0].value; } return $element.data('placeholder-enabled') && $element.hasClass('placeholder') ? '' : element.value; }, 'set': function(element, value) { var $element = $(element); var $passwordInput = $element.data('placeholder-password'); if ($passwordInput) { return $passwordInput[0].value = value; } if (!$element.data('placeholder-enabled')) { return element.value = value; } if (value == '') { element.value = value; // Issue #56: Setting the placeholder causes problems if the element continues to have focus. if (element != safeActiveElement()) { // We can't use `triggerHandler` here because of dummy text/password inputs :( setPlaceholder.call(element); } } else if ($element.hasClass('placeholder')) { clearPlaceholder.call(element, true, value) || (element.value = value); } else { element.value = value; } // `set` can not return `undefined`; see https://jsapi.info/jquery/1.7.1/val#L2363 return $element; } }; if (!isInputSupported) { valHooks.input = hooks; propHooks.value = hooks; } if (!isTextareaSupported) { valHooks.textarea = hooks; propHooks.value = hooks; } $(function() { // Look for forms $(document).delegate('form', 'submit.placeholder', function() { // Clear the placeholder values so they don't get submitted var $inputs = $('.placeholder', this).each(clearPlaceholder); setTimeout(function() { $inputs.each(setPlaceholder); }, 10); }); }); // Clear placeholder values upon page reload $(window).bind('beforeunload.placeholder', function() { $('.placeholder').each(function() { this.value = ''; }); }); } function args(elem) { // Return an object of element attributes var newAttrs = {}; var rinlinejQuery = /^jQuery\d+$/; $.each(elem.attributes, function(i, attr) { if (attr.specified && !rinlinejQuery.test(attr.name)) { newAttrs[attr.name] = attr.value; } }); return newAttrs; } function clearPlaceholder(event, value) { var input = this; var $input = $(input); if (input.value == $input.attr('placeholder') && $input.hasClass('placeholder')) { if ($input.data('placeholder-password')) { $input = $input.hide().next().show().attr('id', $input.removeAttr('id').data('placeholder-id')); // If `clearPlaceholder` was called from `$.valHooks.input.set` if (event === true) { return $input[0].value = value; } $input.focus(); } else { input.value = ''; $input.removeClass('placeholder'); input == safeActiveElement() && input.select(); } } } function setPlaceholder() { var $replacement; var input = this; var $input = $(input); var id = this.id; if (input.value == '') { if (input.type == 'password') { if (!$input.data('placeholder-textinput')) { try { $replacement = $input.clone().attr({ 'type': 'text' }); } catch(e) { $replacement = $('<input>').attr($.extend(args(this), { 'type': 'text' })); } $replacement .removeAttr('name') .data({ 'placeholder-password': $input, 'placeholder-id': id }) .bind('focus.placeholder', clearPlaceholder); $input .data({ 'placeholder-textinput': $replacement, 'placeholder-id': id }) .before($replacement); } $input = $input.removeAttr('id').hide().prev().attr('id', id).show(); // Note: `$input[0] != input` now! } $input.addClass('placeholder'); $input[0].value = $input.attr('placeholder'); } else { $input.removeClass('placeholder'); } } function safeActiveElement() { // Avoid IE9 `document.activeElement` of death // https://github.com/mathiasbynens/jquery-placeholder/pull/99 try { return document.activeElement; } catch (err) {} } }(this, document, jQuery));
'use strict'; describe('$anchorScroll', function() { var elmSpy; function addElements() { var elements = sliceArgs(arguments); return function() { forEach(elements, function(identifier) { var match = identifier.match(/(\w* )?(\w*)=(\w*)/), jqElm = jqLite('<' + (match[1] || 'a ') + match[2] + '="' + match[3] + '"/>'), elm = jqElm[0]; elmSpy[identifier] = spyOn(elm, 'scrollIntoView'); jqLite(document.body).append(jqElm); }); }; } function changeHashAndScroll(hash) { return function($location, $anchorScroll) { $location.hash(hash); $anchorScroll(); }; } function expectScrollingToTop($window) { forEach(elmSpy, function(spy, id) { expect(spy).not.toHaveBeenCalled(); }); expect($window.scrollTo).toHaveBeenCalledWith(0, 0); } function expectScrollingTo(identifier) { return function($window) { forEach(elmSpy, function(spy, id) { if (identifier === id) expect(spy).toHaveBeenCalledOnce(); else expect(spy).not.toHaveBeenCalled(); }); expect($window.scrollTo).not.toHaveBeenCalled(); }; } function expectNoScrolling() { return expectScrollingTo(NaN); } beforeEach(module(function($provide) { elmSpy = {}; $provide.value('$window', { scrollTo: jasmine.createSpy('$window.scrollTo'), document: document, navigator: {} }); })); it('should scroll to top of the window if empty hash', inject( changeHashAndScroll(''), expectScrollingToTop)); it('should not scroll if hash does not match any element', inject( addElements('id=one', 'id=two'), changeHashAndScroll('non-existing'), expectNoScrolling())); it('should scroll to anchor element with name', inject( addElements('a name=abc'), changeHashAndScroll('abc'), expectScrollingTo('a name=abc'))); it('should not scroll to other than anchor element with name', inject( addElements('input name=xxl', 'select name=xxl', 'form name=xxl'), changeHashAndScroll('xxl'), expectNoScrolling())); it('should scroll to anchor even if other element with given name exist', inject( addElements('input name=some', 'a name=some'), changeHashAndScroll('some'), expectScrollingTo('a name=some'))); it('should scroll to element with id with precedence over name', inject( addElements('name=abc', 'id=abc'), changeHashAndScroll('abc'), expectScrollingTo('id=abc'))); it('should scroll to top if hash == "top" and no matching element', inject( changeHashAndScroll('top'), expectScrollingToTop)); it('should scroll to element with id "top" if present', inject( addElements('id=top'), changeHashAndScroll('top'), expectScrollingTo('id=top'))); describe('watcher', function() { function initLocation(config) { return function($provide, $locationProvider) { $provide.value('$sniffer', {history: config.historyApi}); $locationProvider.html5Mode(config.html5Mode); }; } function changeHashTo(hash) { return function ($location, $rootScope, $anchorScroll) { $rootScope.$apply(function() { $location.hash(hash); }); }; } function disableAutoScrolling() { return function($anchorScrollProvider) { $anchorScrollProvider.disableAutoScrolling(); }; } afterEach(inject(function($document) { dealoc($document); })); it('should scroll to element when hash change in hashbang mode', function() { module(initLocation({html5Mode: false, historyApi: true})); inject( addElements('id=some'), changeHashTo('some'), expectScrollingTo('id=some') ); }); it('should scroll to element when hash change in html5 mode with no history api', function() { module(initLocation({html5Mode: true, historyApi: false})); inject( addElements('id=some'), changeHashTo('some'), expectScrollingTo('id=some') ); }); it('should not scroll when element does not exist', function() { module(initLocation({html5Mode: false, historyApi: false})); inject( addElements('id=some'), changeHashTo('other'), expectNoScrolling() ); }); it('should scroll when html5 mode with history api', function() { module(initLocation({html5Mode: true, historyApi: true})); inject( addElements('id=some'), changeHashTo('some'), expectScrollingTo('id=some') ); }); it('should not scroll when disabled', function() { module( disableAutoScrolling(), initLocation({html5Mode: false, historyApi: false}) ); inject( addElements('id=fake'), changeHashTo('fake'), expectNoScrolling() ); }); }); });
# Generated by Django 3.1.12 on 2021-09-18 12:26 from django.db import migrations import taggit.managers class Migration(migrations.Migration): dependencies = [ ('taggit', '0003_taggeditem_add_unique_index'), ('elibrary', '0008_auto_20210918_1119'), ] operations = [ migrations.AlterField( model_name='book', name='tag', field=taggit.managers.TaggableManager(help_text='Groups of characters which appear between double quotes take precedence as multi-word tags (so double quoted tag names may contain commas). An unclosed double quote will be ignored. Otherwise, if there are any unquoted commas in the input, it will be treated as comma-delimited. If not, it will be treated as space-delimited.', through='taggit.TaggedItem', to='taggit.Tag', verbose_name='Tags'), ), ]
import React from 'react'; import PropTypes from 'prop-types'; import {connect} from 'react-redux'; // eslint-disable-next-line import/no-extraneous-dependencies import AJS from 'AJS'; import {RestRegistry} from './RestRegistry'; import {RestScriptDialog} from './RestScriptDialog'; import {CommonMessages} from '../i18n/common.i18n'; @connect( state => { return { scripts: state.scripts, ready: state.ready }; } ) export class RestRegistryContainer extends React.Component { static propTypes ={ scripts: PropTypes.arrayOf(PropTypes.object).isRequired, //todo: shape ready: PropTypes.bool.isRequired }; state = { dialogProps: null }; _triggerDialog = (isNew, id) => this.setState({ dialogProps: {isNew, id} }); _closeDialog = () => this.setState({ dialogProps: null }); componentDidUpdate(prevProps) { if (this.props.ready !== prevProps.ready) { if (this.props.ready) { AJS.undim(); } else { AJS.dim(); } } } render() { const {dialogProps} = this.state; const {scripts, ready} = this.props; let content = null; if (!ready) { console.log('loading'); content = <div>{CommonMessages.loading}</div>; } else { console.log(scripts); content = <RestRegistry scripts={scripts} triggerDialog={this._triggerDialog}/>; } return ( <div> {content} {dialogProps && <RestScriptDialog {...dialogProps} onClose={this._closeDialog}/>} </div> ); } }
import React from 'react' import { connect } from 'react-redux' import LineEditorPanel from './line-editor-panel' import MutationFrequencyPanel from './mutation-frequency-panel' import ParameterFormPanel from './parameter-form-panel' const Root = (props) => { const { lines, timeMax, totalDoseMax, timeGroups, mutationFrequencyMax, doseMax } = props return ( <div> <div className='columns'> <div className='column is-one-third'> <ParameterFormPanel params={props} /> </div> <div className='column'> <MutationFrequencyPanel lines={lines} timeMax={timeMax} totalDoseMax={totalDoseMax} mutationFrequencyMax={mutationFrequencyMax} /> </div> </div> <div className='columns is-multiline'> {lines.map((line, i) => { return ( <div className='column is-half' key={i}> <LineEditorPanel lineIndex={i} line={line} timeMax={timeMax} totalDoseMax={totalDoseMax} doseMax={doseMax} mutationFrequencyMax={mutationFrequencyMax} timeStep={timeGroups} /> </div> ) })} </div> </div> ) } export default connect((state) => state)(Root)
/** * @license Copyright (c) 2003-2020, CKSource - Frederico Knabben. All rights reserved. * For licensing, see LICENSE.md or https://ckeditor.com/legal/ckeditor-oss-license */ /* global window, document */ // Display an info when this file is ran as a standalone test. if ( window.top === window ) { document.getElementById( 'info' ).style.display = 'block'; }
// Vue实例 let app = new Vue({ el: '#app', data: { defaultActive: '线程监控', }, created() { this.init(); //初始化 }, mounted() { this.$refs.loader.style.display = 'none'; }, methods: { _notify(message, type) { this.$message({ message: message, type: type }) }, /** * 初始化 */ init() { let $this = this; this.$http.get(api.mycat.jvm.thread.get).then(response => { if (response.body.code == 200) { this.memory = response.body.data; Highcharts.chart('thread-daemon', { chart: { events: { load: function () { var series = this.series[0]; setInterval(function () { $this.$http.get(api.mycat.jvm.thread.get).then(response => { if (response.body.code == 200) { var data = response.body.data; var x = (new Date()).getTime(), y = data.daemonCount; series.addPoint([x, y], true, true); } else { this._notify(response.body.data, 'error') } }) }, 1e3); } } }, title: { text: "JVM 守护线程数量", }, series: [{ name: "JVM 守护线程数量", data: function () { var data = [], time = new Date().getTime(), i; for (i = -19; i <= 0; i++) { data.push({ x: time + i * 1e3, y: 0 }); } return data; }() }] }); Highcharts.chart('thread-count', { chart: { events: { load: function () { var series = this.series[0]; setInterval(function () { $this.$http.get(api.mycat.jvm.thread.get).then(response => { if (response.body.code == 200) { var data = response.body.data; var x = (new Date()).getTime(), y = data.count; series.addPoint([x, y], true, true); } else { this._notify(response.body.data, 'error') } }) }, 1e3); } } }, title: { text: "JVM 线程总数量", }, series: [{ name: "JVM 线程总数量", data: function () { var data = [], time = new Date().getTime(), i; for (i = -19; i <= 0; i++) { data.push({ x: time + i * 1e3, y: 0 }); } return data; }() }] }); } else { this._notify(response.body.data, 'error'); } }) }, }, }); Highcharts.setOptions({ chart: { type: "spline", animation: Highcharts.svg, marginRight: 10, }, title: { style: { "font-size": "1.2rem" } }, xAxis: { type: 'datetime', tickPixelInterval: 150 }, yAxis: { title: { text: "单位/个" }, plotLines: [{ value: 0, width: 1, color: "#808080" }] }, global: { useUTC: false }, legend: { enabled: false }, plotOptions: { line: { dataLabels: { // 开启数据标签 enabled: true }, // 关闭鼠标跟踪,对应的提示框、点击事件会失效 enableMouseTracking: false } }, tooltip: { formatter: function () { return "<b>" + this.series.name + "</b><br/>" + Highcharts.dateFormat("%Y-%m-%d %H:%M:%S", this.x) + "<br/>" + Highcharts.numberFormat(this.y, 2) + "个"; } }, });
'use strict'; Object.defineProperty(exports, "__esModule", { value: true }); exports.styles = undefined; var _extends2 = require('babel-runtime/helpers/extends'); var _extends3 = _interopRequireDefault(_extends2); var _defineProperty2 = require('babel-runtime/helpers/defineProperty'); var _defineProperty3 = _interopRequireDefault(_defineProperty2); var _objectWithoutProperties2 = require('babel-runtime/helpers/objectWithoutProperties'); var _objectWithoutProperties3 = _interopRequireDefault(_objectWithoutProperties2); var _react = require('react'); var _react2 = _interopRequireDefault(_react); var _propTypes = require('prop-types'); var _propTypes2 = _interopRequireDefault(_propTypes); var _classnames = require('classnames'); var _classnames2 = _interopRequireDefault(_classnames); var _Typography = require('../Typography'); var _Typography2 = _interopRequireDefault(_Typography); var _withStyles = require('../styles/withStyles'); var _withStyles2 = _interopRequireDefault(_withStyles); function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } var styles = exports.styles = function styles(theme) { return { root: { display: 'flex', maxHeight: '2em', alignItems: 'center' }, positionStart: { marginRight: theme.spacing.unit }, positionEnd: { marginLeft: theme.spacing.unit } }; }; function InputAdornment(props) { var _classNames; var children = props.children, Component = props.component, classes = props.classes, className = props.className, disableTypography = props.disableTypography, position = props.position, other = (0, _objectWithoutProperties3.default)(props, ['children', 'component', 'classes', 'className', 'disableTypography', 'position']); return _react2.default.createElement( Component, (0, _extends3.default)({ className: (0, _classnames2.default)(classes.root, (_classNames = {}, (0, _defineProperty3.default)(_classNames, classes.positionStart, position === 'start'), (0, _defineProperty3.default)(_classNames, classes.positionEnd, position === 'end'), _classNames), className) }, other), typeof children === 'string' && !disableTypography ? _react2.default.createElement( _Typography2.default, { color: 'textSecondary' }, children ) : children ); } InputAdornment.propTypes = process.env.NODE_ENV !== "production" ? { /** * The content of the component, normally an `IconButton` or string. */ children: _propTypes2.default.node.isRequired, /** * Useful to extend the style applied to components. */ classes: _propTypes2.default.object.isRequired, /** * @ignore */ className: _propTypes2.default.string, /** * The component used for the root node. * Either a string to use a DOM element or a component. */ component: _propTypes2.default.oneOfType([_propTypes2.default.string, _propTypes2.default.func]), /** * If children is a string then disable wrapping in a Typography component. */ disableTypography: _propTypes2.default.bool, /** * The position this adornment should appear relative to the `Input`. */ position: _propTypes2.default.oneOf(['start', 'end']) } : {}; InputAdornment.defaultProps = { component: 'div', disableTypography: false }; exports.default = (0, _withStyles2.default)(styles, { name: 'MuiInputAdornment' })(InputAdornment);
"use strict"; var mime; (function (mime_1) { function lookup(path) { return MimeTypes.filter(function (mime) { return mime.extensions.some(function (ext) { ext.lastIndex = -1; return ext.test(path); }); }); } mime_1.lookup = lookup; var MimeTypes = [ { title: "3D Crossword Plugin", MIME: "application/vnd.hzn-3d-crossword", extensions: [/\.x3d$/] }, { title: "3GP", MIME: "video/3gpp", extensions: [/\.3gp$/] }, { title: "3GP2", MIME: "video/3gpp2", extensions: [/\.3g2$/] }, { title: "3GPP MSEQ File", MIME: "application/vnd.mseq", extensions: [/\.mseq$/] }, { title: "3M Post It Notes", MIME: "application/vnd.3m.post-it-notes", extensions: [/\.pwn$/] }, { title: "3rd Generation Partnership Project - Pic Large", MIME: "application/vnd.3gpp.pic-bw-large", extensions: [/\.plb$/] }, { title: "3rd Generation Partnership Project - Pic Small", MIME: "application/vnd.3gpp.pic-bw-small", extensions: [/\.psb$/] }, { title: "3rd Generation Partnership Project - Pic Var", MIME: "application/vnd.3gpp.pic-bw-var", extensions: [/\.pvb$/] }, { title: "3rd Generation Partnership Project - Transaction Capabilities Application Part", MIME: "application/vnd.3gpp2.tcap", extensions: [/\.tcap$/] }, { title: "7-Zip", MIME: "application/x-7z-compressed", extensions: [/\.7z$/] }, { title: "AbiWord", MIME: "application/x-abiword", extensions: [/\.abw$/] }, { title: "Ace Archive", MIME: "application/x-ace-compressed", extensions: [/\.ace$/] }, { title: "Active Content Compression", MIME: "application/vnd.americandynamics.acc", extensions: [/\.acc$/] }, { title: "ACU Cobol", MIME: "application/vnd.acucobol", extensions: [/\.acu$/] }, { title: "ACU Cobol", MIME: "application/vnd.acucorp", extensions: [/\.atc$/] }, { title: "Adaptive differential pulse-code modulation", MIME: "audio/adpcm", extensions: [/\.adp$/] }, { title: "Adobe (Macropedia) Authorware - Binary File", MIME: "application/x-authorware-bin", extensions: [/\.aab$/] }, { title: "Adobe (Macropedia) Authorware - Map", MIME: "application/x-authorware-map", extensions: [/\.aam$/] }, { title: "Adobe (Macropedia) Authorware - Segment File", MIME: "application/x-authorware-seg", extensions: [/\.aas$/] }, { title: "Adobe AIR Application", MIME: "application/vnd.adobe.air-application-installer-package+zip", extensions: [/\.air$/] }, { title: "Adobe Flash", MIME: "application/x-shockwave-flash", extensions: [/\.swf$/] }, { title: "Adobe Flex Project", MIME: "application/vnd.adobe.fxp", extensions: [/\.fxp$/] }, { title: "Adobe Portable Document Format", MIME: "application/pdf", extensions: [/\.pdf$/] }, { title: "Adobe PostScript Printer Description File Format", MIME: "application/vnd.cups-ppd", extensions: [/\.ppd$/] }, { title: "Adobe Shockwave Player", MIME: "application/x-director", extensions: [/\.dir$/] }, { title: "Adobe XML Data Package", MIME: "application/vnd.adobe.xdp+xml", extensions: [/\.xdp$/] }, { title: "Adobe XML Forms Data Format", MIME: "application/vnd.adobe.xfdf", extensions: [/\.xfdf$/] }, { title: "Advanced Audio Coding (AAC)", MIME: "audio/x-aac", extensions: [/\.aac$/] }, { title: "Ahead AIR Application", MIME: "application/vnd.ahead.space", extensions: [/\.ahead$/] }, { title: "AirZip FileSECURE", MIME: "application/vnd.airzip.filesecure.azf", extensions: [/\.azf$/] }, { title: "AirZip FileSECURE", MIME: "application/vnd.airzip.filesecure.azs", extensions: [/\.azs$/] }, { title: "Amazon Kindle eBook format", MIME: "application/vnd.amazon.ebook", extensions: [/\.azw$/] }, { title: "AmigaDE", MIME: "application/vnd.amiga.ami", extensions: [/\.ami$/] }, { title: "Android Package Archive", MIME: "application/vnd.android.package-archive", extensions: [/\.apk$/] }, { title: "ANSER-WEB Terminal Client - Certificate Issue", MIME: "application/vnd.anser-web-certificate-issue-initiation", extensions: [/\.cii$/] }, { title: "ANSER-WEB Terminal Client - Web Funds Transfer", MIME: "application/vnd.anser-web-funds-transfer-initiation", extensions: [/\.fti$/] }, { title: "Antix Game Player", MIME: "application/vnd.antix.game-component", extensions: [/\.atx$/] }, { title: "Apple Installer Package", MIME: "application/vnd.apple.installer+xml", extensions: [/\.mpkg$/] }, { title: "Applixware", MIME: "application/applixware", extensions: [/\.aw$/] }, { title: "Archipelago Lesson Player", MIME: "application/vnd.hhe.lesson-player", extensions: [/\.les$/] }, { title: "Arista Networks Software Image", MIME: "application/vnd.aristanetworks.swi", extensions: [/\.swi$/] }, { title: "Assembler Source File", MIME: "text/x-asm", extensions: [/\.s$/] }, { title: "Atom Publishing Protocol", MIME: "application/atomcat+xml", extensions: [/\.atomcat$/] }, { title: "Atom Publishing Protocol Service Document", MIME: "application/atomsvc+xml", extensions: [/\.atomsvc$/] }, { title: "Atom Syndication Format", MIME: "application/atom+xml", extensions: [/\.atom$/] }, { title: "Attribute Certificate", MIME: "application/pkix-attr-cert", extensions: [/\.ac$/] }, { title: "Audio Interchange File Format", MIME: "audio/x-aiff", extensions: [/\.aif$/] }, { title: "Audio Video Interleave (AVI)", MIME: "video/x-msvideo", extensions: [/\.avi$/] }, { title: "Audiograph", MIME: "application/vnd.audiograph", extensions: [/\.aep$/] }, { title: "AutoCAD DXF", MIME: "image/vnd.dxf", extensions: [/\.dxf$/] }, { title: "Autodesk Design Web Format (DWF)", MIME: "model/vnd.dwf", extensions: [/\.dwf$/] }, { title: "BAS Partitur Format", MIME: "text/plain-bas", extensions: [/\.par$/] }, { title: "Binary CPIO Archive", MIME: "application/x-bcpio", extensions: [/\.bcpio$/] }, { title: "Binary Data", MIME: "application/octet-stream", extensions: [/\.bin$/] }, { title: "Bitmap Image File", MIME: "image/bmp", extensions: [/\.bmp$/] }, { title: "BitTorrent", MIME: "application/x-bittorrent", extensions: [/\.torrent$/] }, { title: "Blackberry COD File", MIME: "application/vnd.rim.cod", extensions: [/\.cod$/] }, { title: "Blueice Research Multipass", MIME: "application/vnd.blueice.multipass", extensions: [/\.mpm$/] }, { title: "BMI Drawing Data Interchange", MIME: "application/vnd.bmi", extensions: [/\.bmi$/] }, { title: "Bourne Shell Script", MIME: "application/x-sh", extensions: [/\.sh$/] }, { title: "BTIF", MIME: "image/prs.btif", extensions: [/\.btif$/] }, { title: "BusinessObjects", MIME: "application/vnd.businessobjects", extensions: [/\.rep$/] }, { title: "Bzip Archive", MIME: "application/x-bzip", extensions: [/\.bz$/] }, { title: "Bzip2 Archive", MIME: "application/x-bzip2", extensions: [/\.bz2$/] }, { title: "C Shell Script", MIME: "application/x-csh", extensions: [/\.csh$/] }, { title: "C Source File", MIME: "text/x-c", extensions: [/\.c$/] }, { title: "CambridgeSoft Chem Draw", MIME: "application/vnd.chemdraw+xml", extensions: [/\.cdxml$/] }, { title: "Cascading Style Sheets (CSS)", MIME: "text/css", extensions: [/\.css$/] }, { title: "ChemDraw eXchange file", MIME: "chemical/x-cdx", extensions: [/\.cdx$/] }, { title: "Chemical Markup Language", MIME: "chemical/x-cml", extensions: [/\.cml$/] }, { title: "Chemical Style Markup Language", MIME: "chemical/x-csml", extensions: [/\.csml$/] }, { title: "CIM Database", MIME: "application/vnd.contact.cmsg", extensions: [/\.cdbcmsg$/] }, { title: "Claymore Data Files", MIME: "application/vnd.claymore", extensions: [/\.cla$/] }, { title: "Clonk Game", MIME: "application/vnd.clonk.c4group", extensions: [/\.c4g$/] }, { title: "Close Captioning - Subtitle", MIME: "image/vnd.dvb.subtitle", extensions: [/\.sub$/] }, { title: "Cloud Data Management Interface (CDMI) - Capability", MIME: "application/cdmi-capability", extensions: [/\.cdmia$/] }, { title: "Cloud Data Management Interface (CDMI) - Contaimer", MIME: "application/cdmi-container", extensions: [/\.cdmic$/] }, { title: "Cloud Data Management Interface (CDMI) - Domain", MIME: "application/cdmi-domain", extensions: [/\.cdmid$/] }, { title: "Cloud Data Management Interface (CDMI) - Object", MIME: "application/cdmi-object", extensions: [/\.cdmio$/] }, { title: "Cloud Data Management Interface (CDMI) - Queue", MIME: "application/cdmi-queue", extensions: [/\.cdmiq$/] }, { title: "ClueTrust CartoMobile - Config", MIME: "application/vnd.cluetrust.cartomobile-config", extensions: [/\.c11amc$/] }, { title: "ClueTrust CartoMobile - Config Package", MIME: "application/vnd.cluetrust.cartomobile-config-pkg", extensions: [/\.c11amz$/] }, { title: "CMU Image", MIME: "image/x-cmu-raster", extensions: [/\.ras$/] }, { title: "COLLADA", MIME: "model/vnd.collada+xml", extensions: [/\.dae$/] }, { title: "Comma-Seperated Values", MIME: "text/csv", extensions: [/\.csv$/] }, { title: "Compact Pro", MIME: "application/mac-compactpro", extensions: [/\.cpt$/] }, { title: "Compiled Wireless Markup Language (WMLC)", MIME: "application/vnd.wap.wmlc", extensions: [/\.wmlc$/] }, { title: "Computer Graphics Metafile", MIME: "image/cgm", extensions: [/\.cgm$/] }, { title: "CoolTalk", MIME: "x-conference/x-cooltalk", extensions: [/\.ice$/] }, { title: "Corel Metafile Exchange (CMX)", MIME: "image/x-cmx", extensions: [/\.cmx$/] }, { title: "CorelXARA", MIME: "application/vnd.xara", extensions: [/\.xar$/] }, { title: "CosmoCaller", MIME: "application/vnd.cosmocaller", extensions: [/\.cmc$/] }, { title: "CPIO Archive", MIME: "application/x-cpio", extensions: [/\.cpio$/] }, { title: "CrickSoftware - Clicker", MIME: "application/vnd.crick.clicker", extensions: [/\.clkx$/] }, { title: "CrickSoftware - Clicker - Keyboard", MIME: "application/vnd.crick.clicker.keyboard", extensions: [/\.clkk$/] }, { title: "CrickSoftware - Clicker - Palette", MIME: "application/vnd.crick.clicker.palette", extensions: [/\.clkp$/] }, { title: "CrickSoftware - Clicker - Template", MIME: "application/vnd.crick.clicker.template", extensions: [/\.clkt$/] }, { title: "CrickSoftware - Clicker - Wordbank", MIME: "application/vnd.crick.clicker.wordbank", extensions: [/\.clkw$/] }, { title: "Critical Tools - PERT Chart EXPERT", MIME: "application/vnd.criticaltools.wbs+xml", extensions: [/\.wbs$/] }, { title: "CryptoNote", MIME: "application/vnd.rig.cryptonote", extensions: [/\.cryptonote$/] }, { title: "Crystallographic Interchange Format", MIME: "chemical/x-cif", extensions: [/\.cif$/] }, { title: "CrystalMaker Data Format", MIME: "chemical/x-cmdf", extensions: [/\.cmdf$/] }, { title: "CU-SeeMe", MIME: "application/cu-seeme", extensions: [/\.cu$/] }, { title: "CU-Writer", MIME: "application/prs.cww", extensions: [/\.cww$/] }, { title: "Curl - Applet", MIME: "text/vnd.curl", extensions: [/\.curl$/] }, { title: "Curl - Detached Applet", MIME: "text/vnd.curl.dcurl", extensions: [/\.dcurl$/] }, { title: "Curl - Manifest File", MIME: "text/vnd.curl.mcurl", extensions: [/\.mcurl$/] }, { title: "Curl - Source Code", MIME: "text/vnd.curl.scurl", extensions: [/\.scurl$/] }, { title: "CURL Applet", MIME: "application/vnd.curl.car", extensions: [/\.car$/] }, { title: "CURL Applet", MIME: "application/vnd.curl.pcurl", extensions: [/\.pcurl$/] }, { title: "CustomMenu", MIME: "application/vnd.yellowriver-custom-menu", extensions: [/\.cmp$/] }, { title: "Data Structure for the Security Suitability of Cryptographic Algorithms", MIME: "application/dssc+der", extensions: [/\.dssc$/] }, { title: "Data Structure for the Security Suitability of Cryptographic Algorithms", MIME: "application/dssc+xml", extensions: [/\.xdssc$/] }, { title: "Debian Package", MIME: "application/x-debian-package", extensions: [/\.deb$/] }, { title: "DECE Audio", MIME: "audio/vnd.dece.audio", extensions: [/\.uva$/] }, { title: "DECE Graphic", MIME: "image/vnd.dece.graphic", extensions: [/\.uvi$/] }, { title: "DECE High Definition Video", MIME: "video/vnd.dece.hd", extensions: [/\.uvh$/] }, { title: "DECE Mobile Video", MIME: "video/vnd.dece.mobile", extensions: [/\.uvm$/] }, { title: "DECE MP4", MIME: "video/vnd.uvvu.mp4", extensions: [/\.uvu$/] }, { title: "DECE PD Video", MIME: "video/vnd.dece.pd", extensions: [/\.uvp$/] }, { title: "DECE SD Video", MIME: "video/vnd.dece.sd", extensions: [/\.uvs$/] }, { title: "DECE Video", MIME: "video/vnd.dece.video", extensions: [/\.uvv$/] }, { title: "Device Independent File Format (DVI)", MIME: "application/x-dvi", extensions: [/\.dvi$/] }, { title: "Digital Siesmograph Networks - SEED Datafiles", MIME: "application/vnd.fdsn.seed", extensions: [/\.seed$/] }, { title: "Digital Talking Book", MIME: "application/x-dtbook+xml", extensions: [/\.dtb$/] }, { title: "Digital Talking Book - Resource File", MIME: "application/x-dtbresource+xml", extensions: [/\.res$/] }, { title: "Digital Video Broadcasting", MIME: "application/vnd.dvb.ait", extensions: [/\.ait$/] }, { title: "Digital Video Broadcasting", MIME: "application/vnd.dvb.service", extensions: [/\.svc$/] }, { title: "Digital Winds Music", MIME: "audio/vnd.digital-winds", extensions: [/\.eol$/] }, { title: "DjVu", MIME: "image/vnd.djvu", extensions: [/\.djvu$/] }, { title: "Document Type Definition", MIME: "application/xml-dtd", extensions: [/\.dtd$/] }, { title: "Dolby Meridian Lossless Packing", MIME: "application/vnd.dolby.mlp", extensions: [/\.mlp$/] }, { title: "Doom Video Game", MIME: "application/x-doom", extensions: [/\.wad$/] }, { title: "DPGraph", MIME: "application/vnd.dpgraph", extensions: [/\.dpg$/] }, { title: "DRA Audio", MIME: "audio/vnd.dra", extensions: [/\.dra$/] }, { title: "DreamFactory", MIME: "application/vnd.dreamfactory", extensions: [/\.dfac$/] }, { title: "DTS Audio", MIME: "audio/vnd.dts", extensions: [/\.dts$/] }, { title: "DTS High Definition Audio", MIME: "audio/vnd.dts.hd", extensions: [/\.dtshd$/] }, { title: "DWG Drawing", MIME: "image/vnd.dwg", extensions: [/\.dwg$/] }, { title: "DynaGeo", MIME: "application/vnd.dynageo", extensions: [/\.geo$/] }, { title: "ECMAScript", MIME: "application/ecmascript", extensions: [/\.es$/] }, { title: "EcoWin Chart", MIME: "application/vnd.ecowin.chart", extensions: [/\.mag$/] }, { title: "EDMICS 2000", MIME: "image/vnd.fujixerox.edmics-mmr", extensions: [/\.mmr$/] }, { title: "EDMICS 2000", MIME: "image/vnd.fujixerox.edmics-rlc", extensions: [/\.rlc$/] }, { title: "Efficient XML Interchange", MIME: "application/exi", extensions: [/\.exi$/] }, { title: "EFI Proteus", MIME: "application/vnd.proteus.magazine", extensions: [/\.mgz$/] }, { title: "Electronic Publication", MIME: "application/epub+zip", extensions: [/\.epub$/] }, { title: "Email Message", MIME: "message/rfc822", extensions: [/\.eml$/] }, { title: "Enliven Viewer", MIME: "application/vnd.enliven", extensions: [/\.nml$/] }, { title: "Express by Infoseek", MIME: "application/vnd.is-xpr", extensions: [/\.xpr$/] }, { title: "eXtended Image File Format (XIFF)", MIME: "image/vnd.xiff", extensions: [/\.xif$/] }, { title: "Extensible Forms Description Language", MIME: "application/vnd.xfdl", extensions: [/\.xfdl$/] }, { title: "Extensible MultiModal Annotation", MIME: "application/emma+xml", extensions: [/\.emma$/] }, { title: "EZPix Secure Photo Album", MIME: "application/vnd.ezpix-album", extensions: [/\.ez2$/] }, { title: "EZPix Secure Photo Album", MIME: "application/vnd.ezpix-package", extensions: [/\.ez3$/] }, { title: "FAST Search & Transfer ASA", MIME: "image/vnd.fst", extensions: [/\.fst$/] }, { title: "FAST Search & Transfer ASA", MIME: "video/vnd.fvt", extensions: [/\.fvt$/] }, { title: "FastBid Sheet", MIME: "image/vnd.fastbidsheet", extensions: [/\.fbs$/] }, { title: "FCS Express Layout Link", MIME: "application/vnd.denovo.fcselayout-link", extensions: [/\.fe_launch$/] }, { title: "Flash Video", MIME: "video/x-f4v", extensions: [/\.f4v$/] }, { title: "Flash Video", MIME: "video/x-flv", extensions: [/\.flv$/] }, { title: "FlashPix", MIME: "image/vnd.fpx", extensions: [/\.fpx$/] }, { title: "FlashPix", MIME: "image/vnd.net-fpx", extensions: [/\.npx$/] }, { title: "FLEXSTOR", MIME: "text/vnd.fmi.flexstor", extensions: [/\.flx$/] }, { title: "FLI/FLC Animation Format", MIME: "video/x-fli", extensions: [/\.fli$/] }, { title: "FluxTime Clip", MIME: "application/vnd.fluxtime.clip", extensions: [/\.ftc$/] }, { title: "Forms Data Format", MIME: "application/vnd.fdf", extensions: [/\.fdf$/] }, { title: "Fortran Source File", MIME: "text/x-fortran", extensions: [/\.f$/] }, { title: "FrameMaker Interchange Format", MIME: "application/vnd.mif", extensions: [/\.mif$/] }, { title: "FrameMaker Normal Format", MIME: "application/vnd.framemaker", extensions: [/\.fm$/] }, { title: "FreeHand MX", MIME: "image/x-freehand", extensions: [/\.fh$/] }, { title: "Friendly Software Corporation", MIME: "application/vnd.fsc.weblaunch", extensions: [/\.fsc$/] }, { title: "Frogans Player", MIME: "application/vnd.frogans.fnc", extensions: [/\.fnc$/] }, { title: "Frogans Player", MIME: "application/vnd.frogans.ltf", extensions: [/\.ltf$/] }, { title: "Fujitsu - Xerox 2D CAD Data", MIME: "application/vnd.fujixerox.ddd", extensions: [/\.ddd$/] }, { title: "Fujitsu - Xerox DocuWorks", MIME: "application/vnd.fujixerox.docuworks", extensions: [/\.xdw$/] }, { title: "Fujitsu - Xerox DocuWorks Binder", MIME: "application/vnd.fujixerox.docuworks.binder", extensions: [/\.xbd$/] }, { title: "Fujitsu Oasys", MIME: "application/vnd.fujitsu.oasys", extensions: [/\.oas$/] }, { title: "Fujitsu Oasys", MIME: "application/vnd.fujitsu.oasys2", extensions: [/\.oa2$/] }, { title: "Fujitsu Oasys", MIME: "application/vnd.fujitsu.oasys3", extensions: [/\.oa3$/] }, { title: "Fujitsu Oasys", MIME: "application/vnd.fujitsu.oasysgp", extensions: [/\.fg5$/] }, { title: "Fujitsu Oasys", MIME: "application/vnd.fujitsu.oasysprs", extensions: [/\.bh2$/] }, { title: "FutureSplash Animator", MIME: "application/x-futuresplash", extensions: [/\.spl$/] }, { title: "FuzzySheet", MIME: "application/vnd.fuzzysheet", extensions: [/\.fzs$/] }, { title: "G3 Fax Image", MIME: "image/g3fax", extensions: [/\.g3$/] }, { title: "GameMaker ActiveX", MIME: "application/vnd.gmx", extensions: [/\.gmx$/] }, { title: "Gen-Trix Studio", MIME: "model/vnd.gtw", extensions: [/\.gtw$/] }, { title: "Genomatix Tuxedo Framework", MIME: "application/vnd.genomatix.tuxedo", extensions: [/\.txd$/] }, { title: "GeoGebra", MIME: "application/vnd.geogebra.file", extensions: [/\.ggb$/] }, { title: "GeoGebra", MIME: "application/vnd.geogebra.tool", extensions: [/\.ggt$/] }, { title: "Geometric Description Language (GDL)", MIME: "model/vnd.gdl", extensions: [/\.gdl$/] }, { title: "GeoMetry Explorer", MIME: "application/vnd.geometry-explorer", extensions: [/\.gex$/] }, { title: "GEONExT and JSXGraph", MIME: "application/vnd.geonext", extensions: [/\.gxt$/] }, { title: "GeoplanW", MIME: "application/vnd.geoplan", extensions: [/\.g2w$/] }, { title: "GeospacW", MIME: "application/vnd.geospace", extensions: [/\.g3w$/] }, { title: "Ghostscript Font", MIME: "application/x-font-ghostscript", extensions: [/\.gsf$/] }, { title: "Glyph Bitmap Distribution Format", MIME: "application/x-font-bdf", extensions: [/\.bdf$/] }, { title: "GNU Tar Files", MIME: "application/x-gtar", extensions: [/\.gtar$/] }, { title: "GNU Texinfo Document", MIME: "application/x-texinfo", extensions: [/\.texinfo$/] }, { title: "Gnumeric", MIME: "application/x-gnumeric", extensions: [/\.gnumeric$/] }, { title: "Google Earth - KML", MIME: "application/vnd.google-earth.kml+xml", extensions: [/\.kml$/] }, { title: "Google Earth - Zipped KML", MIME: "application/vnd.google-earth.kmz", extensions: [/\.kmz$/] }, { title: "GrafEq", MIME: "application/vnd.grafeq", extensions: [/\.gqf$/] }, { title: "Graphics Interchange Format", MIME: "image/gif", extensions: [/\.gif$/] }, { title: "Graphviz", MIME: "text/vnd.graphviz", extensions: [/\.gv$/] }, { title: "Groove - Account", MIME: "application/vnd.groove-account", extensions: [/\.gac$/] }, { title: "Groove - Help", MIME: "application/vnd.groove-help", extensions: [/\.ghf$/] }, { title: "Groove - Identity Message", MIME: "application/vnd.groove-identity-message", extensions: [/\.gim$/] }, { title: "Groove - Injector", MIME: "application/vnd.groove-injector", extensions: [/\.grv$/] }, { title: "Groove - Tool Message", MIME: "application/vnd.groove-tool-message", extensions: [/\.gtm$/] }, { title: "Groove - Tool Template", MIME: "application/vnd.groove-tool-template", extensions: [/\.tpl$/] }, { title: "Groove - Vcard", MIME: "application/vnd.groove-vcard", extensions: [/\.vcg$/] }, { title: "H.261", MIME: "video/h261", extensions: [/\.h261$/] }, { title: "H.263", MIME: "video/h263", extensions: [/\.h263$/] }, { title: "H.264", MIME: "video/h264", extensions: [/\.h264$/] }, { title: "Hewlett Packard Instant Delivery", MIME: "application/vnd.hp-hpid", extensions: [/\.hpid$/] }, { title: "Hewlett-Packard's WebPrintSmart", MIME: "application/vnd.hp-hps", extensions: [/\.hps$/] }, { title: "Hierarchical Data Format", MIME: "application/x-hdf", extensions: [/\.hdf$/] }, { title: "Hit'n'Mix", MIME: "audio/vnd.rip", extensions: [/\.rip$/] }, { title: "Homebanking Computer Interface (HBCI)", MIME: "application/vnd.hbci", extensions: [/\.hbci$/] }, { title: "HP Indigo Digital Press - Job Layout Languate", MIME: "application/vnd.hp-jlyt", extensions: [/\.jlt$/] }, { title: "HP Printer Command Language", MIME: "application/vnd.hp-pcl", extensions: [/\.pcl$/] }, { title: "HP-GL/2 and HP RTL", MIME: "application/vnd.hp-hpgl", extensions: [/\.hpgl$/] }, { title: "HV Script", MIME: "application/vnd.yamaha.hv-script", extensions: [/\.hvs$/] }, { title: "HV Voice Dictionary", MIME: "application/vnd.yamaha.hv-dic", extensions: [/\.hvd$/] }, { title: "HV Voice Parameter", MIME: "application/vnd.yamaha.hv-voice", extensions: [/\.hvp$/] }, { title: "Hydrostatix Master Suite", MIME: "application/vnd.hydrostatix.sof-data", extensions: [/\.sfd-hdstx$/] }, { title: "Hyperstudio", MIME: "application/hyperstudio", extensions: [/\.stk$/] }, { title: "Hypertext Application Language", MIME: "application/vnd.hal+xml", extensions: [/\.hal$/] }, { title: "HyperText Markup Language (HTML)", MIME: "text/html", extensions: [/\.html$/] }, { title: "IBM DB2 Rights Manager", MIME: "application/vnd.ibm.rights-management", extensions: [/\.irm$/] }, { title: "IBM Electronic Media Management System - Secure Container", MIME: "application/vnd.ibm.secure-container", extensions: [/\.sc$/] }, { title: "iCalendar", MIME: "text/calendar", extensions: [/\.ics$/] }, { title: "ICC profile", MIME: "application/vnd.iccprofile", extensions: [/\.icc$/] }, { title: "Icon Image", MIME: "image/x-icon", extensions: [/\.ico$/] }, { title: "igLoader", MIME: "application/vnd.igloader", extensions: [/\.igl$/] }, { title: "Image Exchange Format", MIME: "image/ief", extensions: [/\.ief$/] }, { title: "ImmerVision PURE Players", MIME: "application/vnd.immervision-ivp", extensions: [/\.ivp$/] }, { title: "ImmerVision PURE Players", MIME: "application/vnd.immervision-ivu", extensions: [/\.ivu$/] }, { title: "IMS Networks", MIME: "application/reginfo+xml", extensions: [/\.rif$/] }, { title: "In3D - 3DML", MIME: "text/vnd.in3d.3dml", extensions: [/\.3dml$/] }, { title: "In3D - 3DML", MIME: "text/vnd.in3d.spot", extensions: [/\.spot$/] }, { title: "Initial Graphics Exchange Specification (IGES)", MIME: "model/iges", extensions: [/\.igs$/] }, { title: "Interactive Geometry Software", MIME: "application/vnd.intergeo", extensions: [/\.i2g$/] }, { title: "Interactive Geometry Software Cinderella", MIME: "application/vnd.cinderella", extensions: [/\.cdy$/] }, { title: "Intercon FormNet", MIME: "application/vnd.intercon.formnet", extensions: [/\.xpw$/] }, { title: "International Society for Advancement of Cytometry", MIME: "application/vnd.isac.fcs", extensions: [/\.fcs$/] }, { title: "Internet Protocol Flow Information Export", MIME: "application/ipfix", extensions: [/\.ipfix$/] }, { title: "Internet Public Key Infrastructure - Certificate", MIME: "application/pkix-cert", extensions: [/\.cer$/] }, { title: "Internet Public Key Infrastructure - Certificate Management Protocole", MIME: "application/pkixcmp", extensions: [/\.pki$/] }, { title: "Internet Public Key Infrastructure - Certificate Revocation Lists", MIME: "application/pkix-crl", extensions: [/\.crl$/] }, { title: "Internet Public Key Infrastructure - Certification Path", MIME: "application/pkix-pkipath", extensions: [/\.pkipath$/] }, { title: "IOCOM Visimeet", MIME: "application/vnd.insors.igm", extensions: [/\.igm$/] }, { title: "IP Unplugged Roaming Client", MIME: "application/vnd.ipunplugged.rcprofile", extensions: [/\.rcprofile$/] }, { title: "iRepository / Lucidoc Editor", MIME: "application/vnd.irepository.package+xml", extensions: [/\.irp$/] }, { title: "J2ME App Descriptor", MIME: "text/vnd.sun.j2me.app-descriptor", extensions: [/\.jad$/] }, { title: "Java Archive", MIME: "application/java-archive", extensions: [/\.jar$/] }, { title: "Java Bytecode File", MIME: "application/java-vm", extensions: [/\.class$/] }, { title: "Java Network Launching Protocol", MIME: "application/x-java-jnlp-file", extensions: [/\.jnlp$/] }, { title: "Java Serialized Object", MIME: "application/java-serialized-object", extensions: [/\.ser$/] }, { title: "Java Source File", MIME: "text/x-java-source,java", extensions: [/\.java$/] }, { title: "JavaScript", MIME: "application/javascript", extensions: [/\.js$/] }, { title: "JavaScript Object Notation (JSON)", MIME: "application/json", extensions: [/\.json$/] }, { title: "Joda Archive", MIME: "application/vnd.joost.joda-archive", extensions: [/\.joda$/] }, { title: "JPEG 2000 Compound Image File Format", MIME: "video/jpm", extensions: [/\.jpm$/] }, { title: "JPEG Image", MIME: "image/jpeg", extensions: [/\.jpeg$/, /\.jpg$/] }, { title: "JPGVideo", MIME: "video/jpeg", extensions: [/\.jpgv$/] }, { title: "Kahootz", MIME: "application/vnd.kahootz", extensions: [/\.ktz$/] }, { title: "Karaoke on Chipnuts Chipsets", MIME: "application/vnd.chipnuts.karaoke-mmd", extensions: [/\.mmd$/] }, { title: "KDE KOffice Office Suite - Karbon", MIME: "application/vnd.kde.karbon", extensions: [/\.karbon$/] }, { title: "KDE KOffice Office Suite - KChart", MIME: "application/vnd.kde.kchart", extensions: [/\.chrt$/] }, { title: "KDE KOffice Office Suite - Kformula", MIME: "application/vnd.kde.kformula", extensions: [/\.kfo$/] }, { title: "KDE KOffice Office Suite - Kivio", MIME: "application/vnd.kde.kivio", extensions: [/\.flw$/] }, { title: "KDE KOffice Office Suite - Kontour", MIME: "application/vnd.kde.kontour", extensions: [/\.kon$/] }, { title: "KDE KOffice Office Suite - Kpresenter", MIME: "application/vnd.kde.kpresenter", extensions: [/\.kpr$/] }, { title: "KDE KOffice Office Suite - Kspread", MIME: "application/vnd.kde.kspread", extensions: [/\.ksp$/] }, { title: "KDE KOffice Office Suite - Kword", MIME: "application/vnd.kde.kword", extensions: [/\.kwd$/] }, { title: "Kenamea App", MIME: "application/vnd.kenameaapp", extensions: [/\.htke$/] }, { title: "Kidspiration", MIME: "application/vnd.kidspiration", extensions: [/\.kia$/] }, { title: "Kinar Applications", MIME: "application/vnd.kinar", extensions: [/\.kne$/] }, { title: "Kodak Storyshare", MIME: "application/vnd.kodak-descriptor", extensions: [/\.sse$/] }, { title: "Laser App Enterprise", MIME: "application/vnd.las.las+xml", extensions: [/\.lasxml$/] }, { title: "LaTeX", MIME: "application/x-latex", extensions: [/\.latex$/] }, { title: "Life Balance - Desktop Edition", MIME: "application/vnd.llamagraphics.life-balance.desktop", extensions: [/\.lbd$/] }, { title: "Life Balance - Exchange Format", MIME: "application/vnd.llamagraphics.life-balance.exchange+xml", extensions: [/\.lbe$/] }, { title: "Lightspeed Audio Lab", MIME: "application/vnd.jam", extensions: [/\.jam$/] }, { title: "Lotus 1-2-3", MIME: "application/vnd.lotus-1-2-3", extensions: [/\0.123$/] }, { title: "Lotus Approach", MIME: "application/vnd.lotus-approach", extensions: [/\.apr$/] }, { title: "Lotus Freelance", MIME: "application/vnd.lotus-freelance", extensions: [/\.pre$/] }, { title: "Lotus Notes", MIME: "application/vnd.lotus-notes", extensions: [/\.nsf$/] }, { title: "Lotus Organizer", MIME: "application/vnd.lotus-organizer", extensions: [/\.org$/] }, { title: "Lotus Screencam", MIME: "application/vnd.lotus-screencam", extensions: [/\.scm$/] }, { title: "Lotus Wordpro", MIME: "application/vnd.lotus-wordpro", extensions: [/\.lwp$/] }, { title: "Lucent Voice", MIME: "audio/vnd.lucent.voice", extensions: [/\.lvp$/] }, { title: "M3U (Multimedia Playlist)", MIME: "audio/x-mpegurl", extensions: [/\.m3u$/] }, { title: "M4v", MIME: "video/x-m4v", extensions: [/\.m4v$/] }, { title: "Macintosh BinHex 4.0", MIME: "application/mac-binhex40", extensions: [/\.hqx$/] }, { title: "MacPorts Port System", MIME: "application/vnd.macports.portpkg", extensions: [/\.portpkg$/] }, { title: "MapGuide DBXML", MIME: "application/vnd.osgeo.mapguide.package", extensions: [/\.mgp$/] }, { title: "MARC Formats", MIME: "application/marc", extensions: [/\.mrc$/] }, { title: "MARC21 XML Schema", MIME: "application/marcxml+xml", extensions: [/\.mrcx$/] }, { title: "Material Exchange Format", MIME: "application/mxf", extensions: [/\.mxf$/] }, { title: "Mathematica Notebook Player", MIME: "application/vnd.wolfram.player", extensions: [/\.nbp$/] }, { title: "Mathematica Notebooks", MIME: "application/mathematica", extensions: [/\.ma$/] }, { title: "Mathematical Markup Language", MIME: "application/mathml+xml", extensions: [/\.mathml$/] }, { title: "Mbox database files", MIME: "application/mbox", extensions: [/\.mbox$/] }, { title: "MedCalc", MIME: "application/vnd.medcalcdata", extensions: [/\.mc1$/] }, { title: "Media Server Control Markup Language", MIME: "application/mediaservercontrol+xml", extensions: [/\.mscml$/] }, { title: "MediaRemote", MIME: "application/vnd.mediastation.cdkey", extensions: [/\.cdkey$/] }, { title: "Medical Waveform Encoding Format", MIME: "application/vnd.mfer", extensions: [/\.mwf$/] }, { title: "Melody Format for Mobile Platform", MIME: "application/vnd.mfmp", extensions: [/\.mfm$/] }, { title: "Mesh Data Type", MIME: "model/mesh", extensions: [/\.msh$/] }, { title: "Metadata Authority Description Schema", MIME: "application/mads+xml", extensions: [/\.mads$/] }, { title: "Metadata Encoding and Transmission Standard", MIME: "application/mets+xml", extensions: [/\.mets$/] }, { title: "Metadata Object Description Schema", MIME: "application/mods+xml", extensions: [/\.mods$/] }, { title: "Metalink", MIME: "application/metalink4+xml", extensions: [/\.meta4$/] }, { title: "Micosoft PowerPoint - Macro-Enabled Template File", MIME: "application/vnd.ms-powerpoint.template.macroenabled.12", extensions: [/\.potm$/] }, { title: "Micosoft Word - Macro-Enabled Document", MIME: "application/vnd.ms-word.document.macroenabled.12", extensions: [/\.docm$/] }, { title: "Micosoft Word - Macro-Enabled Template", MIME: "application/vnd.ms-word.template.macroenabled.12", extensions: [/\.dotm$/] }, { title: "Micro CADAM Helix D&D", MIME: "application/vnd.mcd", extensions: [/\.mcd$/] }, { title: "Micrografx", MIME: "application/vnd.micrografx.flo", extensions: [/\.flo$/] }, { title: "Micrografx iGrafx Professional", MIME: "application/vnd.micrografx.igx", extensions: [/\.igx$/] }, { title: "MICROSEC e-Szign¢", MIME: "application/vnd.eszigno3+xml", extensions: [/\.es3$/] }, { title: "Microsoft Access", MIME: "application/x-msaccess", extensions: [/\.mdb$/] }, { title: "Microsoft Advanced Systems Format (ASF)", MIME: "video/x-ms-asf", extensions: [/\.asf$/] }, { title: "Microsoft Application", MIME: "application/x-msdownload", extensions: [/\.exe$/] }, { title: "Microsoft Artgalry", MIME: "application/vnd.ms-artgalry", extensions: [/\.cil$/] }, { title: "Microsoft Cabinet File", MIME: "application/vnd.ms-cab-compressed", extensions: [/\.cab$/] }, { title: "Microsoft Class Server", MIME: "application/vnd.ms-ims", extensions: [/\.ims$/] }, { title: "Microsoft ClickOnce", MIME: "application/x-ms-application", extensions: [/\.application$/] }, { title: "Microsoft Clipboard Clip", MIME: "application/x-msclip", extensions: [/\.clp$/] }, { title: "Microsoft Document Imaging Format", MIME: "image/vnd.ms-modi", extensions: [/\.mdi$/] }, { title: "Microsoft Embedded OpenType", MIME: "application/vnd.ms-fontobject", extensions: [/\.eot$/] }, { title: "Microsoft Excel", MIME: "application/vnd.ms-excel", extensions: [/\.xls$/] }, { title: "Microsoft Excel - Add-In File", MIME: "application/vnd.ms-excel.addin.macroenabled.12", extensions: [/\.xlam$/] }, { title: "Microsoft Excel - Binary Workbook", MIME: "application/vnd.ms-excel.sheet.binary.macroenabled.12", extensions: [/\.xlsb$/] }, { title: "Microsoft Excel - Macro-Enabled Template File", MIME: "application/vnd.ms-excel.template.macroenabled.12", extensions: [/\.xltm$/] }, { title: "Microsoft Excel - Macro-Enabled Workbook", MIME: "application/vnd.ms-excel.sheet.macroenabled.12", extensions: [/\.xlsm$/] }, { title: "Microsoft Html Help File", MIME: "application/vnd.ms-htmlhelp", extensions: [/\.chm$/] }, { title: "Microsoft Information Card", MIME: "application/x-mscardfile", extensions: [/\.crd$/] }, { title: "Microsoft Learning Resource Module", MIME: "application/vnd.ms-lrm", extensions: [/\.lrm$/] }, { title: "Microsoft MediaView", MIME: "application/x-msmediaview", extensions: [/\.mvb$/] }, { title: "Microsoft Money", MIME: "application/x-msmoney", extensions: [/\.mny$/] }, { title: "Microsoft Office - OOXML - Presentation", MIME: "application/vnd.openxmlformats-officedocument.presentationml.presentation", extensions: [/\.pptx$/] }, { title: "Microsoft Office - OOXML - Presentation (Slide)", MIME: "application/vnd.openxmlformats-officedocument.presentationml.slide", extensions: [/\.sldx$/] }, { title: "Microsoft Office - OOXML - Presentation (Slideshow)", MIME: "application/vnd.openxmlformats-officedocument.presentationml.slideshow", extensions: [/\.ppsx$/] }, { title: "Microsoft Office - OOXML - Presentation Template", MIME: "application/vnd.openxmlformats-officedocument.presentationml.template", extensions: [/\.potx$/] }, { title: "Microsoft Office - OOXML - Spreadsheet", MIME: "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", extensions: [/\.xlsx$/] }, { title: "Microsoft Office - OOXML - Spreadsheet Teplate", MIME: "application/vnd.openxmlformats-officedocument.spreadsheetml.template", extensions: [/\.xltx$/] }, { title: "Microsoft Office - OOXML - Word Document", MIME: "application/vnd.openxmlformats-officedocument.wordprocessingml.document", extensions: [/\.docx$/] }, { title: "Microsoft Office - OOXML - Word Document Template", MIME: "application/vnd.openxmlformats-officedocument.wordprocessingml.template", extensions: [/\.dotx$/] }, { title: "Microsoft Office Binder", MIME: "application/x-msbinder", extensions: [/\.obd$/] }, { title: "Microsoft Office System Release Theme", MIME: "application/vnd.ms-officetheme", extensions: [/\.thmx$/] }, { title: "Microsoft OneNote", MIME: "application/onenote", extensions: [/\.onetoc$/] }, { title: "Microsoft PlayReady Ecosystem", MIME: "audio/vnd.ms-playready.media.pya", extensions: [/\.pya$/] }, { title: "Microsoft PlayReady Ecosystem Video", MIME: "video/vnd.ms-playready.media.pyv", extensions: [/\.pyv$/] }, { title: "Microsoft PowerPoint", MIME: "application/vnd.ms-powerpoint", extensions: [/\.ppt$/] }, { title: "Microsoft PowerPoint - Add-in file", MIME: "application/vnd.ms-powerpoint.addin.macroenabled.12", extensions: [/\.ppam$/] }, { title: "Microsoft PowerPoint - Macro-Enabled Open XML Slide", MIME: "application/vnd.ms-powerpoint.slide.macroenabled.12", extensions: [/\.sldm$/] }, { title: "Microsoft PowerPoint - Macro-Enabled Presentation File", MIME: "application/vnd.ms-powerpoint.presentation.macroenabled.12", extensions: [/\.pptm$/] }, { title: "Microsoft PowerPoint - Macro-Enabled Slide Show File", MIME: "application/vnd.ms-powerpoint.slideshow.macroenabled.12", extensions: [/\.ppsm$/] }, { title: "Microsoft Project", MIME: "application/vnd.ms-project", extensions: [/\.mpp$/] }, { title: "Microsoft Publisher", MIME: "application/x-mspublisher", extensions: [/\.pub$/] }, { title: "Microsoft Schedule+", MIME: "application/x-msschedule", extensions: [/\.scd$/] }, { title: "Microsoft Silverlight", MIME: "application/x-silverlight-app", extensions: [/\.xap$/] }, { title: "Microsoft Trust UI Provider - Certificate Trust Link", MIME: "application/vnd.ms-pki.stl", extensions: [/\.stl$/] }, { title: "Microsoft Trust UI Provider - Security Catalog", MIME: "application/vnd.ms-pki.seccat", extensions: [/\.cat$/] }, { title: "Microsoft Visio", MIME: "application/vnd.visio", extensions: [/\.vsd$/] }, { title: "Microsoft Windows Media", MIME: "video/x-ms-wm", extensions: [/\.wm$/] }, { title: "Microsoft Windows Media Audio", MIME: "audio/x-ms-wma", extensions: [/\.wma$/] }, { title: "Microsoft Windows Media Audio Redirector", MIME: "audio/x-ms-wax", extensions: [/\.wax$/] }, { title: "Microsoft Windows Media Audio/Video Playlist", MIME: "video/x-ms-wmx", extensions: [/\.wmx$/] }, { title: "Microsoft Windows Media Player Download Package", MIME: "application/x-ms-wmd", extensions: [/\.wmd$/] }, { title: "Microsoft Windows Media Player Playlist", MIME: "application/vnd.ms-wpl", extensions: [/\.wpl$/] }, { title: "Microsoft Windows Media Player Skin Package", MIME: "application/x-ms-wmz", extensions: [/\.wmz$/] }, { title: "Microsoft Windows Media Video", MIME: "video/x-ms-wmv", extensions: [/\.wmv$/] }, { title: "Microsoft Windows Media Video Playlist", MIME: "video/x-ms-wvx", extensions: [/\.wvx$/] }, { title: "Microsoft Windows Metafile", MIME: "application/x-msmetafile", extensions: [/\.wmf$/] }, { title: "Microsoft Windows Terminal Services", MIME: "application/x-msterminal", extensions: [/\.trm$/] }, { title: "Microsoft Word", MIME: "application/msword", extensions: [/\.doc$/] }, { title: "Microsoft Wordpad", MIME: "application/x-mswrite", extensions: [/\.wri$/] }, { title: "Microsoft Works", MIME: "application/vnd.ms-works", extensions: [/\.wps$/] }, { title: "Microsoft XAML Browser Application", MIME: "application/x-ms-xbap", extensions: [/\.xbap$/] }, { title: "Microsoft XML Paper Specification", MIME: "application/vnd.ms-xpsdocument", extensions: [/\.xps$/] }, { title: "MIDI - Musical Instrument Digital Interface", MIME: "audio/midi", extensions: [/\.mid$/] }, { title: "MiniPay", MIME: "application/vnd.ibm.minipay", extensions: [/\.mpy$/] }, { title: "MO:DCA-P", MIME: "application/vnd.ibm.modcap", extensions: [/\.afp$/] }, { title: "Mobile Information Device Profile", MIME: "application/vnd.jcp.javame.midlet-rms", extensions: [/\.rms$/] }, { title: "MobileTV", MIME: "application/vnd.tmobile-livetv", extensions: [/\.tmo$/] }, { title: "Mobipocket", MIME: "application/x-mobipocket-ebook", extensions: [/\.prc$/] }, { title: "Mobius Management Systems - Basket file", MIME: "application/vnd.mobius.mbk", extensions: [/\.mbk$/] }, { title: "Mobius Management Systems - Distribution Database", MIME: "application/vnd.mobius.dis", extensions: [/\.dis$/] }, { title: "Mobius Management Systems - Policy Definition Language File", MIME: "application/vnd.mobius.plc", extensions: [/\.plc$/] }, { title: "Mobius Management Systems - Query File", MIME: "application/vnd.mobius.mqy", extensions: [/\.mqy$/] }, { title: "Mobius Management Systems - Script Language", MIME: "application/vnd.mobius.msl", extensions: [/\.msl$/] }, { title: "Mobius Management Systems - Topic Index File", MIME: "application/vnd.mobius.txf", extensions: [/\.txf$/] }, { title: "Mobius Management Systems - UniversalArchive", MIME: "application/vnd.mobius.daf", extensions: [/\.daf$/] }, { title: "mod_fly / fly.cgi", MIME: "text/vnd.fly", extensions: [/\.fly$/] }, { title: "Mophun Certificate", MIME: "application/vnd.mophun.certificate", extensions: [/\.mpc$/] }, { title: "Mophun VM", MIME: "application/vnd.mophun.application", extensions: [/\.mpn$/] }, { title: "Motion JPEG 2000", MIME: "video/mj2", extensions: [/\.mj2$/] }, { title: "MPEG Audio", MIME: "audio/mpeg", extensions: [/\.mpga$/] }, { title: "MPEG Url", MIME: "video/vnd.mpegurl", extensions: [/\.mxu$/] }, { title: "MPEG Video", MIME: "video/mpeg", extensions: [/\.mpeg$/] }, { title: "MPEG-21", MIME: "application/mp21", extensions: [/\.m21$/] }, { title: "MPEG-4 Audio", MIME: "audio/mp4", extensions: [/\.mp4a$/] }, { title: "MPEG-4 Video", MIME: "video/mp4", extensions: [/\.mp4$/] }, { title: "MPEG4", MIME: "application/mp4", extensions: [/\.mp4$/] }, { title: "Multimedia Playlist Unicode", MIME: "application/vnd.apple.mpegurl", extensions: [/\.m3u8$/] }, { title: "MUsical Score Interpreted Code Invented for the ASCII designation of Notation", MIME: "application/vnd.musician", extensions: [/\.mus$/] }, { title: "Muvee Automatic Video Editing", MIME: "application/vnd.muvee.style", extensions: [/\.msty$/] }, { title: "MXML", MIME: "application/xv+xml", extensions: [/\.mxml$/] }, { title: "N-Gage Game Data", MIME: "application/vnd.nokia.n-gage.data", extensions: [/\.ngdat$/] }, { title: "N-Gage Game Installer", MIME: "application/vnd.nokia.n-gage.symbian.install", extensions: [/\.n-gage$/] }, { title: "Navigation Control file for XML (for ePub)", MIME: "application/x-dtbncx+xml", extensions: [/\.ncx$/] }, { title: "Network Common Data Form (NetCDF)", MIME: "application/x-netcdf", extensions: [/\.nc$/] }, { title: "neuroLanguage", MIME: "application/vnd.neurolanguage.nlu", extensions: [/\.nlu$/] }, { title: "New Moon Liftoff/DNA", MIME: "application/vnd.dna", extensions: [/\.dna$/] }, { title: "NobleNet Directory", MIME: "application/vnd.noblenet-directory", extensions: [/\.nnd$/] }, { title: "NobleNet Sealer", MIME: "application/vnd.noblenet-sealer", extensions: [/\.nns$/] }, { title: "NobleNet Web", MIME: "application/vnd.noblenet-web", extensions: [/\.nnw$/] }, { title: "Nokia Radio Application - Preset", MIME: "application/vnd.nokia.radio-preset", extensions: [/\.rpst$/] }, { title: "Nokia Radio Application - Preset", MIME: "application/vnd.nokia.radio-presets", extensions: [/\.rpss$/] }, { title: "Notation3", MIME: "text/n3", extensions: [/\.n3$/] }, { title: "Novadigm's RADIA and EDM products", MIME: "application/vnd.novadigm.edm", extensions: [/\.edm$/] }, { title: "Novadigm's RADIA and EDM products", MIME: "application/vnd.novadigm.edx", extensions: [/\.edx$/] }, { title: "Novadigm's RADIA and EDM products", MIME: "application/vnd.novadigm.ext", extensions: [/\.ext$/] }, { title: "NpGraphIt", MIME: "application/vnd.flographit", extensions: [/\.gph$/] }, { title: "Nuera ECELP 4800", MIME: "audio/vnd.nuera.ecelp4800", extensions: [/\.ecelp4800$/] }, { title: "Nuera ECELP 7470", MIME: "audio/vnd.nuera.ecelp7470", extensions: [/\.ecelp7470$/] }, { title: "Nuera ECELP 9600", MIME: "audio/vnd.nuera.ecelp9600", extensions: [/\.ecelp9600$/] }, { title: "Office Document Architecture", MIME: "application/oda", extensions: [/\.oda$/] }, { title: "Ogg", MIME: "application/ogg", extensions: [/\.ogx$/] }, { title: "Ogg Audio", MIME: "audio/ogg", extensions: [/\.oga$/] }, { title: "Ogg Video", MIME: "video/ogg", extensions: [/\.ogv$/] }, { title: "OMA Download Agents", MIME: "application/vnd.oma.dd2+xml", extensions: [/\.dd2$/] }, { title: "Open Document Text Web", MIME: "application/vnd.oasis.opendocument.text-web", extensions: [/\.oth$/] }, { title: "Open eBook Publication Structure", MIME: "application/oebps-package+xml", extensions: [/\.opf$/] }, { title: "Open Financial Exchange", MIME: "application/vnd.intu.qbo", extensions: [/\.qbo$/] }, { title: "Open Office Extension", MIME: "application/vnd.openofficeorg.extension", extensions: [/\.oxt$/] }, { title: "Open Score Format", MIME: "application/vnd.yamaha.openscoreformat", extensions: [/\.osf$/] }, { title: "Open Web Media Project - Audio", MIME: "audio/webm", extensions: [/\.weba$/] }, { title: "Open Web Media Project - Video", MIME: "video/webm", extensions: [/\.webm$/] }, { title: "OpenDocument Chart", MIME: "application/vnd.oasis.opendocument.chart", extensions: [/\.odc$/] }, { title: "OpenDocument Chart Template", MIME: "application/vnd.oasis.opendocument.chart-template", extensions: [/\.otc$/] }, { title: "OpenDocument Database", MIME: "application/vnd.oasis.opendocument.database", extensions: [/\.odb$/] }, { title: "OpenDocument Formula", MIME: "application/vnd.oasis.opendocument.formula", extensions: [/\.odf$/] }, { title: "OpenDocument Formula Template", MIME: "application/vnd.oasis.opendocument.formula-template", extensions: [/\.odft$/] }, { title: "OpenDocument Graphics", MIME: "application/vnd.oasis.opendocument.graphics", extensions: [/\.odg$/] }, { title: "OpenDocument Graphics Template", MIME: "application/vnd.oasis.opendocument.graphics-template", extensions: [/\.otg$/] }, { title: "OpenDocument Image", MIME: "application/vnd.oasis.opendocument.image", extensions: [/\.odi$/] }, { title: "OpenDocument Image Template", MIME: "application/vnd.oasis.opendocument.image-template", extensions: [/\.oti$/] }, { title: "OpenDocument Presentation", MIME: "application/vnd.oasis.opendocument.presentation", extensions: [/\.odp$/] }, { title: "OpenDocument Presentation Template", MIME: "application/vnd.oasis.opendocument.presentation-template", extensions: [/\.otp$/] }, { title: "OpenDocument Spreadsheet", MIME: "application/vnd.oasis.opendocument.spreadsheet", extensions: [/\.ods$/] }, { title: "OpenDocument Spreadsheet Template", MIME: "application/vnd.oasis.opendocument.spreadsheet-template", extensions: [/\.ots$/] }, { title: "OpenDocument Text", MIME: "application/vnd.oasis.opendocument.text", extensions: [/\.odt$/] }, { title: "OpenDocument Text Master", MIME: "application/vnd.oasis.opendocument.text-master", extensions: [/\.odm$/] }, { title: "OpenDocument Text Template", MIME: "application/vnd.oasis.opendocument.text-template", extensions: [/\.ott$/] }, { title: "OpenGL Textures (KTX)", MIME: "image/ktx", extensions: [/\.ktx$/] }, { title: "OpenOffice - Calc (Spreadsheet)", MIME: "application/vnd.sun.xml.calc", extensions: [/\.sxc$/] }, { title: "OpenOffice - Calc Template (Spreadsheet)", MIME: "application/vnd.sun.xml.calc.template", extensions: [/\.stc$/] }, { title: "OpenOffice - Draw (Graphics)", MIME: "application/vnd.sun.xml.draw", extensions: [/\.sxd$/] }, { title: "OpenOffice - Draw Template (Graphics)", MIME: "application/vnd.sun.xml.draw.template", extensions: [/\.std$/] }, { title: "OpenOffice - Impress (Presentation)", MIME: "application/vnd.sun.xml.impress", extensions: [/\.sxi$/] }, { title: "OpenOffice - Impress Template (Presentation)", MIME: "application/vnd.sun.xml.impress.template", extensions: [/\.sti$/] }, { title: "OpenOffice - Math (Formula)", MIME: "application/vnd.sun.xml.math", extensions: [/\.sxm$/] }, { title: "OpenOffice - Writer (Text - HTML)", MIME: "application/vnd.sun.xml.writer", extensions: [/\.sxw$/] }, { title: "OpenOffice - Writer (Text - HTML)", MIME: "application/vnd.sun.xml.writer.global", extensions: [/\.sxg$/] }, { title: "OpenOffice - Writer Template (Text - HTML)", MIME: "application/vnd.sun.xml.writer.template", extensions: [/\.stw$/] }, { title: "OpenType Font File", MIME: "application/x-font-otf", extensions: [/\.otf$/] }, { title: "OSFPVG", MIME: "application/vnd.yamaha.openscoreformat.osfpvg+xml", extensions: [/\.osfpvg$/] }, { title: "OSGi Deployment Package", MIME: "application/vnd.osgi.dp", extensions: [/\.dp$/] }, { title: "PalmOS Data", MIME: "application/vnd.palm", extensions: [/\.pdb$/] }, { title: "Pascal Source File", MIME: "text/x-pascal", extensions: [/\.p$/] }, { title: "PawaaFILE", MIME: "application/vnd.pawaafile", extensions: [/\.paw$/] }, { title: "PCL 6 Enhanced (Formely PCL XL)", MIME: "application/vnd.hp-pclxl", extensions: [/\.pclxl$/] }, { title: "Pcsel eFIF File", MIME: "application/vnd.picsel", extensions: [/\.efif$/] }, { title: "PCX Image", MIME: "image/x-pcx", extensions: [/\.pcx$/] }, { title: "Photoshop Document", MIME: "image/vnd.adobe.photoshop", extensions: [/\.psd$/] }, { title: "PICSRules", MIME: "application/pics-rules", extensions: [/\.prf$/] }, { title: "PICT Image", MIME: "image/x-pict", extensions: [/\.pic$/] }, { title: "pIRCh", MIME: "application/x-chat", extensions: [/\.chat$/] }, { title: "PKCS #10 - Certification Request Standard", MIME: "application/pkcs10", extensions: [/\.p10$/] }, { title: "PKCS #12 - Personal Information Exchange Syntax Standard", MIME: "application/x-pkcs12", extensions: [/\.p12$/] }, { title: "PKCS #7 - Cryptographic Message Syntax Standard", MIME: "application/pkcs7-mime", extensions: [/\.p7m$/] }, { title: "PKCS #7 - Cryptographic Message Syntax Standard", MIME: "application/pkcs7-signature", extensions: [/\.p7s$/] }, { title: "PKCS #7 - Cryptographic Message Syntax Standard (Certificate Request Response)", MIME: "application/x-pkcs7-certreqresp", extensions: [/\.p7r$/] }, { title: "PKCS #7 - Cryptographic Message Syntax Standard (Certificates)", MIME: "application/x-pkcs7-certificates", extensions: [/\.p7b$/] }, { title: "PKCS #8 - Private-Key Information Syntax Standard", MIME: "application/pkcs8", extensions: [/\.p8$/] }, { title: "PocketLearn Viewers", MIME: "application/vnd.pocketlearn", extensions: [/\.plf$/] }, { title: "Portable Anymap Image", MIME: "image/x-portable-anymap", extensions: [/\.pnm$/] }, { title: "Portable Bitmap Format", MIME: "image/x-portable-bitmap", extensions: [/\.pbm$/] }, { title: "Portable Compiled Format", MIME: "application/x-font-pcf", extensions: [/\.pcf$/] }, { title: "Portable Font Resource", MIME: "application/font-tdpfr", extensions: [/\.pfr$/] }, { title: "Portable Game Notation (Chess Games)", MIME: "application/x-chess-pgn", extensions: [/\.pgn$/] }, { title: "Portable Graymap Format", MIME: "image/x-portable-graymap", extensions: [/\.pgm$/] }, { title: "Portable Network Graphics (PNG)", MIME: "image/png", extensions: [/\.png$/] }, { title: "Portable Pixmap Format", MIME: "image/x-portable-pixmap", extensions: [/\.ppm$/] }, { title: "Portable Symmetric Key Container", MIME: "application/pskc+xml", extensions: [/\.pskcxml$/] }, { title: "PosML", MIME: "application/vnd.ctc-posml", extensions: [/\.pml$/] }, { title: "PostScript", MIME: "application/postscript", extensions: [/\.ai$/] }, { title: "PostScript Fonts", MIME: "application/x-font-type1", extensions: [/\.pfa$/] }, { title: "PowerBuilder", MIME: "application/vnd.powerbuilder6", extensions: [/\.pbd$/] }, { title: "Pretty Good Privacy", MIME: "application/pgp-encrypted", extensions: [/\$/] }, { title: "Pretty Good Privacy - Signature", MIME: "application/pgp-signature", extensions: [/\.pgp$/] }, { title: "Preview Systems ZipLock/VBox", MIME: "application/vnd.previewsystems.box", extensions: [/\.box$/] }, { title: "Princeton Video Image", MIME: "application/vnd.pvi.ptid1", extensions: [/\.ptid$/] }, { title: "Pronunciation Lexicon Specification", MIME: "application/pls+xml", extensions: [/\.pls$/] }, { title: "Proprietary P&G Standard Reporting System", MIME: "application/vnd.pg.format", extensions: [/\.str$/] }, { title: "Proprietary P&G Standard Reporting System", MIME: "application/vnd.pg.osasli", extensions: [/\.ei6$/] }, { title: "PRS Lines Tag", MIME: "text/prs.lines.tag", extensions: [/\.dsc$/] }, { title: "PSF Fonts", MIME: "application/x-font-linux-psf", extensions: [/\.psf$/] }, { title: "PubliShare Objects", MIME: "application/vnd.publishare-delta-tree", extensions: [/\.qps$/] }, { title: "Qualcomm's Plaza Mobile Internet", MIME: "application/vnd.pmi.widget", extensions: [/\.wg$/] }, { title: "QuarkXpress", MIME: "application/vnd.quark.quarkxpress", extensions: [/\.qxd$/] }, { title: "QUASS Stream Player", MIME: "application/vnd.epson.esf", extensions: [/\.esf$/] }, { title: "QUASS Stream Player", MIME: "application/vnd.epson.msf", extensions: [/\.msf$/] }, { title: "QUASS Stream Player", MIME: "application/vnd.epson.ssf", extensions: [/\.ssf$/] }, { title: "QuickAnime Player", MIME: "application/vnd.epson.quickanime", extensions: [/\.qam$/] }, { title: "Quicken", MIME: "application/vnd.intu.qfx", extensions: [/\.qfx$/] }, { title: "Quicktime Video", MIME: "video/quicktime", extensions: [/\.qt$/] }, { title: "RAR Archive", MIME: "application/x-rar-compressed", extensions: [/\.rar$/] }, { title: "Real Audio Sound", MIME: "audio/x-pn-realaudio", extensions: [/\.ram$/] }, { title: "Real Audio Sound", MIME: "audio/x-pn-realaudio-plugin", extensions: [/\.rmp$/] }, { title: "Really Simple Discovery", MIME: "application/rsd+xml", extensions: [/\.rsd$/] }, { title: "RealMedia", MIME: "application/vnd.rn-realmedia", extensions: [/\.rm$/] }, { title: "RealVNC", MIME: "application/vnd.realvnc.bed", extensions: [/\.bed$/] }, { title: "Recordare Applications", MIME: "application/vnd.recordare.musicxml", extensions: [/\.mxl$/] }, { title: "Recordare Applications", MIME: "application/vnd.recordare.musicxml+xml", extensions: [/\.musicxml$/] }, { title: "Relax NG Compact Syntax", MIME: "application/relax-ng-compact-syntax", extensions: [/\.rnc$/] }, { title: "RemoteDocs R-Viewer", MIME: "application/vnd.data-vision.rdz", extensions: [/\.rdz$/] }, { title: "Resource Description Framework", MIME: "application/rdf+xml", extensions: [/\.rdf$/] }, { title: "RetroPlatform Player", MIME: "application/vnd.cloanto.rp9", extensions: [/\.rp9$/] }, { title: "RhymBox", MIME: "application/vnd.jisp", extensions: [/\.jisp$/] }, { title: "Rich Text Format", MIME: "application/rtf", extensions: [/\.rtf$/] }, { title: "Rich Text Format (RTF)", MIME: "text/richtext", extensions: [/\.rtx$/] }, { title: "ROUTE 66 Location Based Services", MIME: "application/vnd.route66.link66+xml", extensions: [/\.link66$/] }, { title: "RSS - Really Simple Syndication", MIME: "application/rss+xml", extensions: [/\.rss$/] }, { title: "S Hexdump Format", MIME: "application/shf+xml", extensions: [/\.shf$/] }, { title: "SailingTracker", MIME: "application/vnd.sailingtracker.track", extensions: [/\.st$/] }, { title: "Scalable Vector Graphics (SVG)", MIME: "image/svg+xml", extensions: [/\.svg$/] }, { title: "ScheduleUs", MIME: "application/vnd.sus-calendar", extensions: [/\.sus$/] }, { title: "Search/Retrieve via URL Response Format", MIME: "application/sru+xml", extensions: [/\.sru$/] }, { title: "Secure Electronic Transaction - Payment", MIME: "application/set-payment-initiation", extensions: [/\.setpay$/] }, { title: "Secure Electronic Transaction - Registration", MIME: "application/set-registration-initiation", extensions: [/\.setreg$/] }, { title: "Secured eMail", MIME: "application/vnd.sema", extensions: [/\.sema$/] }, { title: "Secured eMail", MIME: "application/vnd.semd", extensions: [/\.semd$/] }, { title: "Secured eMail", MIME: "application/vnd.semf", extensions: [/\.semf$/] }, { title: "SeeMail", MIME: "application/vnd.seemail", extensions: [/\.see$/] }, { title: "Server Normal Format", MIME: "application/x-font-snf", extensions: [/\.snf$/] }, { title: "Server-Based Certificate Validation Protocol - Validation Policies - Request", MIME: "application/scvp-vp-request", extensions: [/\.spq$/] }, { title: "Server-Based Certificate Validation Protocol - Validation Policies - Response", MIME: "application/scvp-vp-response", extensions: [/\.spp$/] }, { title: "Server-Based Certificate Validation Protocol - Validation Request", MIME: "application/scvp-cv-request", extensions: [/\.scq$/] }, { title: "Server-Based Certificate Validation Protocol - Validation Response", MIME: "application/scvp-cv-response", extensions: [/\.scs$/] }, { title: "Session Description Protocol", MIME: "application/sdp", extensions: [/\.sdp$/] }, { title: "Setext", MIME: "text/x-setext", extensions: [/\.etx$/] }, { title: "SGI Movie", MIME: "video/x-sgi-movie", extensions: [/\.movie$/] }, { title: "Shana Informed Filler", MIME: "application/vnd.shana.informed.formdata", extensions: [/\.ifm$/] }, { title: "Shana Informed Filler", MIME: "application/vnd.shana.informed.formtemplate", extensions: [/\.itp$/] }, { title: "Shana Informed Filler", MIME: "application/vnd.shana.informed.interchange", extensions: [/\.iif$/] }, { title: "Shana Informed Filler", MIME: "application/vnd.shana.informed.package", extensions: [/\.ipk$/] }, { title: "Sharing Transaction Fraud Data", MIME: "application/thraud+xml", extensions: [/\.tfi$/] }, { title: "Shell Archive", MIME: "application/x-shar", extensions: [/\.shar$/] }, { title: "Silicon Graphics RGB Bitmap", MIME: "image/x-rgb", extensions: [/\.rgb$/] }, { title: "SimpleAnimeLite Player", MIME: "application/vnd.epson.salt", extensions: [/\.slt$/] }, { title: "Simply Accounting", MIME: "application/vnd.accpac.simply.aso", extensions: [/\.aso$/] }, { title: "Simply Accounting - Data Import", MIME: "application/vnd.accpac.simply.imp", extensions: [/\.imp$/] }, { title: "SimTech MindMapper", MIME: "application/vnd.simtech-mindmapper", extensions: [/\.twd$/] }, { title: "Sixth Floor Media - CommonSpace", MIME: "application/vnd.commonspace", extensions: [/\.csp$/] }, { title: "SMAF Audio", MIME: "application/vnd.yamaha.smaf-audio", extensions: [/\.saf$/] }, { title: "SMAF File", MIME: "application/vnd.smaf", extensions: [/\.mmf$/] }, { title: "SMAF Phrase", MIME: "application/vnd.yamaha.smaf-phrase", extensions: [/\.spf$/] }, { title: "SMART Technologies Apps", MIME: "application/vnd.smart.teacher", extensions: [/\.teacher$/] }, { title: "SourceView Document", MIME: "application/vnd.svd", extensions: [/\.svd$/] }, { title: "SPARQL - Query", MIME: "application/sparql-query", extensions: [/\.rq$/] }, { title: "SPARQL - Results", MIME: "application/sparql-results+xml", extensions: [/\.srx$/] }, { title: "Speech Recognition Grammar Specification", MIME: "application/srgs", extensions: [/\.gram$/] }, { title: "Speech Recognition Grammar Specification - XML", MIME: "application/srgs+xml", extensions: [/\.grxml$/] }, { title: "Speech Synthesis Markup Language", MIME: "application/ssml+xml", extensions: [/\.ssml$/] }, { title: "SSEYO Koan Play File", MIME: "application/vnd.koan", extensions: [/\.skp$/] }, { title: "Standard Generalized Markup Language (SGML)", MIME: "text/sgml", extensions: [/\.sgml$/] }, { title: "StarOffice - Calc", MIME: "application/vnd.stardivision.calc", extensions: [/\.sdc$/] }, { title: "StarOffice - Draw", MIME: "application/vnd.stardivision.draw", extensions: [/\.sda$/] }, { title: "StarOffice - Impress", MIME: "application/vnd.stardivision.impress", extensions: [/\.sdd$/] }, { title: "StarOffice - Math", MIME: "application/vnd.stardivision.math", extensions: [/\.smf$/] }, { title: "StarOffice - Writer", MIME: "application/vnd.stardivision.writer", extensions: [/\.sdw$/] }, { title: "StarOffice - Writer (Global)", MIME: "application/vnd.stardivision.writer-global", extensions: [/\.sgl$/] }, { title: "StepMania", MIME: "application/vnd.stepmania.stepchart", extensions: [/\.sm$/] }, { title: "Stuffit Archive", MIME: "application/x-stuffit", extensions: [/\.sit$/] }, { title: "Stuffit Archive", MIME: "application/x-stuffitx", extensions: [/\.sitx$/] }, { title: "SudokuMagic", MIME: "application/vnd.solent.sdkm+xml", extensions: [/\.sdkm$/] }, { title: "Sugar Linux Application Bundle", MIME: "application/vnd.olpc-sugar", extensions: [/\.xo$/] }, { title: "Sun Audio - Au file format", MIME: "audio/basic", extensions: [/\.au$/] }, { title: "SundaHus WQ", MIME: "application/vnd.wqd", extensions: [/\.wqd$/] }, { title: "Symbian Install Package", MIME: "application/vnd.symbian.install", extensions: [/\.sis$/] }, { title: "Synchronized Multimedia Integration Language", MIME: "application/smil+xml", extensions: [/\.smi$/] }, { title: "SyncML", MIME: "application/vnd.syncml+xml", extensions: [/\.xsm$/] }, { title: "SyncML - Device Management", MIME: "application/vnd.syncml.dm+wbxml", extensions: [/\.bdm$/] }, { title: "SyncML - Device Management", MIME: "application/vnd.syncml.dm+xml", extensions: [/\.xdm$/] }, { title: "System V Release 4 CPIO Archive", MIME: "application/x-sv4cpio", extensions: [/\.sv4cpio$/] }, { title: "System V Release 4 CPIO Checksum Data", MIME: "application/x-sv4crc", extensions: [/\.sv4crc$/] }, { title: "Systems Biology Markup Language", MIME: "application/sbml+xml", extensions: [/\.sbml$/] }, { title: "Tab Seperated Values", MIME: "text/tab-separated-values", extensions: [/\.tsv$/] }, { title: "Tagged Image File Format", MIME: "image/tiff", extensions: [/\.tiff$/] }, { title: "Tao Intent", MIME: "application/vnd.tao.intent-module-archive", extensions: [/\.tao$/] }, { title: "Tar File (Tape Archive)", MIME: "application/x-tar", extensions: [/\.tar$/] }, { title: "Tcl Script", MIME: "application/x-tcl", extensions: [/\.tcl$/] }, { title: "TeX", MIME: "application/x-tex", extensions: [/\.tex$/] }, { title: "TeX Font Metric", MIME: "application/x-tex-tfm", extensions: [/\.tfm$/] }, { title: "Text Encoding and Interchange", MIME: "application/tei+xml", extensions: [/\.tei$/] }, { title: "Text File", MIME: "text/plain", extensions: [/\.txt$/] }, { title: "TIBCO Spotfire", MIME: "application/vnd.spotfire.dxp", extensions: [/\.dxp$/] }, { title: "TIBCO Spotfire", MIME: "application/vnd.spotfire.sfs", extensions: [/\.sfs$/] }, { title: "Time Stamped Data Envelope", MIME: "application/timestamped-data", extensions: [/\.tsd$/] }, { title: "TRI Systems Config", MIME: "application/vnd.trid.tpt", extensions: [/\.tpt$/] }, { title: "Triscape Map Explorer", MIME: "application/vnd.triscape.mxs", extensions: [/\.mxs$/] }, { title: "troff", MIME: "text/troff", extensions: [/\.t$/] }, { title: "True BASIC", MIME: "application/vnd.trueapp", extensions: [/\.tra$/] }, { title: "TrueType Font", MIME: "application/x-font-ttf", extensions: [/\.ttf$/] }, { title: "Turtle (Terse RDF Triple Language)", MIME: "text/turtle", extensions: [/\.ttl$/] }, { title: "UMAJIN", MIME: "application/vnd.umajin", extensions: [/\.umj$/] }, { title: "Unique Object Markup Language", MIME: "application/vnd.uoml+xml", extensions: [/\.uoml$/] }, { title: "Unity 3d", MIME: "application/vnd.unity", extensions: [/\.unityweb$/] }, { title: "Universal Forms Description Language", MIME: "application/vnd.ufdl", extensions: [/\.ufd$/] }, { title: "URI Resolution Services", MIME: "text/uri-list", extensions: [/\.uri$/] }, { title: "User Interface Quartz - Theme (Symbian)", MIME: "application/vnd.uiq.theme", extensions: [/\.utz$/] }, { title: "Ustar (Uniform Standard Tape Archive)", MIME: "application/x-ustar", extensions: [/\.ustar$/] }, { title: "UUEncode", MIME: "text/x-uuencode", extensions: [/\.uu$/] }, { title: "vCalendar", MIME: "text/x-vcalendar", extensions: [/\.vcs$/] }, { title: "vCard", MIME: "text/x-vcard", extensions: [/\.vcf$/] }, { title: "Video CD", MIME: "application/x-cdlink", extensions: [/\.vcd$/] }, { title: "Viewport+", MIME: "application/vnd.vsf", extensions: [/\.vsf$/] }, { title: "Virtual Reality Modeling Language", MIME: "model/vrml", extensions: [/\.wrl$/] }, { title: "VirtualCatalog", MIME: "application/vnd.vcx", extensions: [/\.vcx$/] }, { title: "Virtue MTS", MIME: "model/vnd.mts", extensions: [/\.mts$/] }, { title: "Virtue VTU", MIME: "model/vnd.vtu", extensions: [/\.vtu$/] }, { title: "Visionary", MIME: "application/vnd.visionary", extensions: [/\.vis$/] }, { title: "Vivo", MIME: "video/vnd.vivo", extensions: [/\.viv$/] }, { title: "Voice Browser Call Control", MIME: "application/ccxml+xml,", extensions: [/\.ccxml$/] }, { title: "VoiceXML", MIME: "application/voicexml+xml", extensions: [/\.vxml$/] }, { title: "WAIS Source", MIME: "application/x-wais-source", extensions: [/\.src$/] }, { title: "WAP Binary XML (WBXML)", MIME: "application/vnd.wap.wbxml", extensions: [/\.wbxml$/] }, { title: "WAP Bitamp (WBMP)", MIME: "image/vnd.wap.wbmp", extensions: [/\.wbmp$/] }, { title: "Waveform Audio File Format (WAV)", MIME: "audio/x-wav", extensions: [/\.wav$/] }, { title: "Web Distributed Authoring and Versioning", MIME: "application/davmount+xml", extensions: [/\.davmount$/] }, { title: "Web Open Font Format", MIME: "application/x-font-woff", extensions: [/\.woff$/] }, { title: "Web Services Policy", MIME: "application/wspolicy+xml", extensions: [/\.wspolicy$/] }, { title: "WebP Image", MIME: "image/webp", extensions: [/\.webp$/] }, { title: "WebTurbo", MIME: "application/vnd.webturbo", extensions: [/\.wtb$/] }, { title: "Widget Packaging and XML Configuration", MIME: "application/widget", extensions: [/\.wgt$/] }, { title: "WinHelp", MIME: "application/winhlp", extensions: [/\.hlp$/] }, { title: "Wireless Markup Language (WML)", MIME: "text/vnd.wap.wml", extensions: [/\.wml$/] }, { title: "Wireless Markup Language Script (WMLScript)", MIME: "text/vnd.wap.wmlscript", extensions: [/\.wmls$/] }, { title: "WMLScript", MIME: "application/vnd.wap.wmlscriptc", extensions: [/\.wmlsc$/] }, { title: "Wordperfect", MIME: "application/vnd.wordperfect", extensions: [/\.wpd$/] }, { title: "Worldtalk", MIME: "application/vnd.wt.stf", extensions: [/\.stf$/] }, { title: "WSDL - Web Services Description Language", MIME: "application/wsdl+xml", extensions: [/\.wsdl$/] }, { title: "X BitMap", MIME: "image/x-xbitmap", extensions: [/\.xbm$/] }, { title: "X PixMap", MIME: "image/x-xpixmap", extensions: [/\.xpm$/] }, { title: "X Window Dump", MIME: "image/x-xwindowdump", extensions: [/\.xwd$/] }, { title: "X.509 Certificate", MIME: "application/x-x509-ca-cert", extensions: [/\.der$/] }, { title: "Xfig", MIME: "application/x-xfig", extensions: [/\.fig$/] }, { title: "XHTML - The Extensible HyperText Markup Language", MIME: "application/xhtml+xml", extensions: [/\.xhtml$/] }, { title: "XML - Extensible Markup Language", MIME: "application/xml", extensions: [/\.xml$/] }, { title: "XML Configuration Access Protocol - XCAP Diff", MIME: "application/xcap-diff+xml", extensions: [/\.xdf$/] }, { title: "XML Encryption Syntax and Processing", MIME: "application/xenc+xml", extensions: [/\.xenc$/] }, { title: "XML Patch Framework", MIME: "application/patch-ops-error+xml", extensions: [/\.xer$/] }, { title: "XML Resource Lists", MIME: "application/resource-lists+xml", extensions: [/\.rl$/] }, { title: "XML Resource Lists", MIME: "application/rls-services+xml", extensions: [/\.rs$/] }, { title: "XML Resource Lists Diff", MIME: "application/resource-lists-diff+xml", extensions: [/\.rld$/] }, { title: "XML Transformations", MIME: "application/xslt+xml", extensions: [/\.xslt$/] }, { title: "XML-Binary Optimized Packaging", MIME: "application/xop+xml", extensions: [/\.xop$/] }, { title: "XPInstall - Mozilla", MIME: "application/x-xpinstall", extensions: [/\.xpi$/] }, { title: "XSPF - XML Shareable Playlist Format", MIME: "application/xspf+xml", extensions: [/\.xspf$/] }, { title: "XUL - XML User Interface Language", MIME: "application/vnd.mozilla.xul+xml", extensions: [/\.xul$/] }, { title: "XYZ File Format", MIME: "chemical/x-xyz", extensions: [/\.xyz$/] }, { title: "YAML Ain't Markup Language / Yet Another Markup Language", MIME: "text/yaml", extensions: [/\.yaml$/] }, { title: "YANG Data Modeling Language", MIME: "application/yang", extensions: [/\.yang$/] }, { title: "YIN (YANG - XML)", MIME: "application/yin+xml", extensions: [/\.yin$/] }, { title: "Z.U.L. Geometry", MIME: "application/vnd.zul", extensions: [/\.zir$/] }, { title: "Zip Archive", MIME: "application/zip", extensions: [/\.zip$/] }, { title: "ZVUE Media Manager", MIME: "application/vnd.handheld-entertainment+xml", extensions: [/\.zmm$/] }, { title: "Zzazz Deck", MIME: "application/vnd.zzazz.deck+xml", extensions: [/\.zaz$/] } ]; })(mime = exports.mime || (exports.mime = {})); //# sourceMappingURL=mime.sys.js.map
import os import os.path import sys import shutil import tempfile import unittest from Cheetah.Template import Template from Cheetah.compat import unicode class TemplateTest(unittest.TestCase): pass class ClassMethods_compile(TemplateTest): """I am using the same Cheetah source for each test to root out clashes caused by the compile caching in Template.compile(). """ def test_basicUsage(self): klass = Template.compile(source='$foo') t = klass(namespaces={'foo': 1234}) assert str(t) == '1234' def test_baseclassArg(self): klass = Template.compile(source='$foo', baseclass=dict) t = klass({'foo': 1234}) assert str(t) == '1234' klass2 = Template.compile(source='$foo', baseclass=klass) t = klass2({'foo': 1234}) assert str(t) == '1234' klass3 = Template.compile(source='#implements dummy\n$bar', baseclass=klass2) t = klass3({'foo': 1234}) assert str(t) == '1234' klass4 = Template.compile(source='$foo', baseclass='dict') t = klass4({'foo': 1234}) assert str(t) == '1234' def test_moduleFileCaching(self): tmpDir = tempfile.mkdtemp() try: # print tmpDir assert os.path.exists(tmpDir) klass = Template.compile(source='$foo', cacheModuleFilesForTracebacks=True, cacheDirForModuleFiles=tmpDir) mod = sys.modules[klass.__module__] # print mod.__file__ assert os.path.exists(mod.__file__) assert os.path.dirname(mod.__file__) == tmpDir finally: shutil.rmtree(tmpDir, True) def test_classNameArg(self): klass = Template.compile(source='$foo', className='foo123') assert klass.__name__ == 'foo123' t = klass(namespaces={'foo': 1234}) assert str(t) == '1234' def test_moduleNameArg(self): klass = Template.compile(source='$foo', moduleName='foo99') assert klass.__name__ == 'foo99' t = klass(namespaces={'foo': 1234}) assert str(t) == '1234' klass = Template.compile(source='$foo', moduleName='foo1', className='foo2') assert klass.__name__ == 'foo2' t = klass(namespaces={'foo': 1234}) assert str(t) == '1234' def test_mainMethodNameArg(self): klass = Template.compile(source='$foo', className='foo123', mainMethodName='testMeth') assert klass.__name__ == 'foo123' t = klass(namespaces={'foo': 1234}) # print t.generatedClassCode() assert str(t) == '1234' assert t.testMeth() == '1234' klass = Template.compile(source='$foo', moduleName='fooXXX', className='foo123', mainMethodName='testMeth', baseclass=dict) assert klass.__name__ == 'foo123' t = klass({'foo': 1234}) # print t.generatedClassCode() assert str(t) == '1234' assert t.testMeth() == '1234' def test_moduleGlobalsArg(self): klass = Template.compile(source='$foo', moduleGlobals={'foo': 1234}) t = klass() assert str(t) == '1234' klass2 = Template.compile(source='$foo', baseclass='Test1', moduleGlobals={'Test1': dict}) t = klass2({'foo': 1234}) assert str(t) == '1234' klass3 = Template.compile(source='$foo', baseclass='Test1', moduleGlobals={'Test1': dict, 'foo': 1234}) t = klass3() assert str(t) == '1234' def test_keepRefToGeneratedCodeArg(self): klass = Template.compile(source='$foo', className='unique58', cacheCompilationResults=False, keepRefToGeneratedCode=False) t = klass(namespaces={'foo': 1234}) assert str(t) == '1234' assert not t.generatedModuleCode() klass2 = Template.compile(source='$foo', className='unique58', keepRefToGeneratedCode=True) t = klass2(namespaces={'foo': 1234}) assert str(t) == '1234' assert t.generatedModuleCode() klass3 = Template.compile(source='$foo', className='unique58', keepRefToGeneratedCode=False) t = klass3(namespaces={'foo': 1234}) assert str(t) == '1234' # still there as this class came from the cache assert t.generatedModuleCode() def test_compilationCache(self): klass = Template.compile(source='$foo', className='unique111', cacheCompilationResults=False) t = klass(namespaces={'foo': 1234}) assert str(t) == '1234' assert not klass._CHEETAH_isInCompilationCache # this time it will place it in the cache klass = Template.compile(source='$foo', className='unique111', cacheCompilationResults=True) t = klass(namespaces={'foo': 1234}) assert str(t) == '1234' assert klass._CHEETAH_isInCompilationCache # by default it will be in the cache klass = Template.compile(source='$foo', className='unique999099') t = klass(namespaces={'foo': 1234}) assert str(t) == '1234' assert klass._CHEETAH_isInCompilationCache class OpenFileTest(TemplateTest): def setUp(self): self.template_file = \ tempfile.NamedTemporaryFile(mode='w', delete=False) self.template_file.write("<h1>Test open file</h1>") self.template_file.close() def tearDown(self): os.remove(self.template_file.name) def test_compileFile(self): tmpl_file = open(self.template_file.name) try: Template.compile(file=tmpl_file) finally: tmpl_file.close() class ClassMethods_subclass(TemplateTest): def test_basicUsage(self): klass = Template.compile(source='$foo', baseclass=dict) t = klass({'foo': 1234}) assert str(t) == '1234' klass2 = klass.subclass(source='$foo') t = klass2({'foo': 1234}) assert str(t) == '1234' klass3 = klass2.subclass(source='#implements dummy\n$bar') t = klass3({'foo': 1234}) assert str(t) == '1234' class Preprocessors(TemplateTest): def test_basicUsage1(self): src = '''\ %set foo = @a $(@foo*10) @a''' src = '\n'.join([ln.strip() for ln in src.splitlines()]) preprocessors = {'tokens': '@ %', 'namespaces': {'a': 99} } klass = Template.compile(src, preprocessors=preprocessors) assert str(klass()) == '990\n99' def test_normalizePreprocessorArgVariants(self): src = '%set foo = 12\n%%comment\n$(@foo*10)' class Settings1: tokens = '@ %' Settings1 = Settings1() from Cheetah.Template import TemplatePreprocessor settings = Template._normalizePreprocessorSettings(Settings1) preprocObj = TemplatePreprocessor(settings) def preprocFunc(source, file): return '$(12*10)', None class TemplateSubclass(Template): pass compilerSettings = {'cheetahVarStartToken': '@', 'directiveStartToken': '%', 'commentStartToken': '%%', } for arg in ['@ %', {'tokens': '@ %'}, {'compilerSettings': compilerSettings}, {'compilerSettings': compilerSettings, 'templateInitArgs': {}}, {'tokens': '@ %', 'templateAPIClass': TemplateSubclass}, Settings1, preprocObj, preprocFunc, ]: klass = Template.compile(src, preprocessors=arg) assert str(klass()) == '120' def test_complexUsage(self): src = '''\ %set foo = @a %def func1: #def func(arg): $arg("***") %% comment $(@foo*10) @func1 $func(lambda x:c"--$x--@a")''' src = '\n'.join([ln.strip() for ln in src.splitlines()]) for arg in [{'tokens': '@ %', 'namespaces': {'a': 99}}, {'tokens': '@ %', 'namespaces': {'a': 99}}, ]: klass = Template.compile(src, preprocessors=arg) t = klass() assert str(t) == '990\n--***--99' def test_i18n(self): src = '''\ %i18n: This is a $string that needs translation %i18n id="foo", domain="root": This is a $string that needs translation ''' src = '\n'.join([ln.strip() for ln in src.splitlines()]) klass = Template.compile(src, preprocessors='@ %', baseclass=dict) t = klass({'string': 'bit of text'}) # print str(t), repr(str(t)) assert str(t) == ( 'This is a bit of text that needs translation\n'*2)[:-1] # noqa: E226,E501 missing whitespace around operator class TryExceptImportTest(TemplateTest): def test_FailCase(self): """ Test situation where an inline #import statement will get relocated """ source = ''' #def myFunction() Ahoy! #try #import sys #except ImportError $print "This will never happen!" #end try #end def ''' # This should raise an IndentationError (if the bug exists) klass = Template.compile( source=source, compilerSettings={'useLegacyImportMode': False}) t = klass(namespaces={'foo': 1234}) # noqa: F841 class ClassMethodSupport(TemplateTest): def test_BasicDecorator(self): template = ''' #@classmethod #def myClassMethod() #return '$foo = %s' % $foo #end def ''' template = Template.compile(source=template) try: rc = template.myClassMethod(foo='bar') assert rc == '$foo = bar', \ (rc, 'Template class method didn\'t return what I expected') except AttributeError as ex: self.fail(ex) class StaticMethodSupport(TemplateTest): def test_BasicDecorator(self): template = ''' #@staticmethod #def myStaticMethod() #return '$foo = %s' % $foo #end def ''' template = Template.compile(source=template) try: rc = template.myStaticMethod(foo='bar') assert rc == '$foo = bar', \ (rc, 'Template class method didn\'t return what I expected') except AttributeError as ex: self.fail(ex) class MultipleInheritanceSupport(TemplateTest): def runTest(self): template = ''' #extends Cheetah.Tests.Boinker, Cheetah.Tests.Pinger #def foo() #return [4,5] + $boink() #end def ''' template = Template.compile(template) template = template() result = template.foo() assert result == [4, 5, 1, 2, 3], (result, 'Unexpected result') class SubclassSearchListTest(TemplateTest): ''' Verify that if we subclass Template, we can still use attributes on that subclass in the searchList ''' def runTest(self): class Sub(Template): greeting = 'Hola' tmpl = Sub('''When we meet, I say "${greeting}"''') self.assertEqual(unicode(tmpl), 'When we meet, I say "Hola"')
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import collections import pretend from warehouse import csp class TestCSPTween: def test_csp_policy(self): response = pretend.stub(headers={}) handler = pretend.call_recorder(lambda request: response) settings = { "csp": {"default-src": ["*"], "style-src": ["'self'", "example.net"]} } registry = pretend.stub(settings=settings) tween = csp.content_security_policy_tween_factory(handler, registry) request = pretend.stub( path="/project/foobar/", find_service=pretend.call_recorder(lambda *args, **kwargs: settings["csp"]), ) assert tween(request) is response assert response.headers == { "Content-Security-Policy": "default-src *; style-src 'self' example.net" } def test_csp_policy_default(self): response = pretend.stub(headers={}) handler = pretend.call_recorder(lambda request: response) registry = pretend.stub(settings={}) tween = csp.content_security_policy_tween_factory(handler, registry) request = pretend.stub( path="/path/to/nowhere/", find_service=pretend.raiser(LookupError) ) assert tween(request) is response assert response.headers == {} def test_csp_policy_debug_disables(self): response = pretend.stub(headers={}) handler = pretend.call_recorder(lambda request: response) settings = { "csp": {"default-src": ["*"], "style-src": ["'self'", "example.net"]} } registry = pretend.stub(settings=settings) tween = csp.content_security_policy_tween_factory(handler, registry) request = pretend.stub( path="/_debug_toolbar/foo/", find_service=pretend.call_recorder(lambda *args, **kwargs: settings["csp"]), ) assert tween(request) is response assert response.headers == {} def test_csp_policy_inject(self): response = pretend.stub(headers={}) def handler(request): request.find_service("csp")["default-src"].append("example.com") return response settings = {"csp": {"default-src": ["*"], "style-src": ["'self'"]}} registry = pretend.stub(settings=settings) tween = csp.content_security_policy_tween_factory(handler, registry) request = pretend.stub( path="/example", find_service=pretend.call_recorder(lambda *args, **kwargs: settings["csp"]), ) assert tween(request) is response assert response.headers == { "Content-Security-Policy": "default-src * example.com; style-src 'self'" } def test_csp_policy_default_inject(self): settings = collections.defaultdict(list) response = pretend.stub(headers={}) registry = pretend.stub(settings=settings) def handler(request): request.find_service("csp")["default-src"].append("example.com") return response tween = csp.content_security_policy_tween_factory(handler, registry) request = pretend.stub( path="/path/to/nowhere/", find_service=pretend.call_recorder(lambda *args, **kwargs: settings), ) assert tween(request) is response assert response.headers == { "Content-Security-Policy": "default-src example.com" } def test_devel_csp(self): settings = {"csp": {"script-src": ["{request.scheme}://{request.host}"]}} response = pretend.stub(headers={}) registry = pretend.stub(settings=settings) handler = pretend.call_recorder(lambda request: response) tween = csp.content_security_policy_tween_factory(handler, registry) request = pretend.stub( scheme="https", host="example.com", path="/path/to/nowhere", find_service=pretend.call_recorder(lambda *args, **kwargs: settings["csp"]), ) assert tween(request) is response assert response.headers == { "Content-Security-Policy": "script-src https://example.com" } def test_simple_csp(self): settings = { "csp": {"default-src": ["'none'"], "sandbox": ["allow-top-navigation"]} } response = pretend.stub(headers={}) registry = pretend.stub(settings=settings) handler = pretend.call_recorder(lambda request: response) tween = csp.content_security_policy_tween_factory(handler, registry) request = pretend.stub( scheme="https", host="example.com", path="/simple/", find_service=pretend.call_recorder(lambda *args, **kwargs: settings["csp"]), ) assert tween(request) is response assert response.headers == { "Content-Security-Policy": ( "default-src 'none'; sandbox allow-top-navigation" ) } class TestCSPPolicy: def test_create(self): policy = csp.CSPPolicy({"foo": ["bar"]}) assert isinstance(policy, collections.defaultdict) def test_merge(self): policy = csp.CSPPolicy({"foo": ["bar"]}) policy.merge({"foo": ["baz"], "something": ["else"]}) assert policy == {"foo": ["bar", "baz"], "something": ["else"]} def test_includeme(): config = pretend.stub( register_service_factory=pretend.call_recorder(lambda fact, name: None), add_settings=pretend.call_recorder(lambda settings: None), add_tween=pretend.call_recorder(lambda tween: None), registry=pretend.stub( settings={ "camo.url": "camo.url.value", "statuspage.url": "https://2p66nmmycsj3.statuspage.io", } ), ) csp.includeme(config) assert config.register_service_factory.calls == [ pretend.call(csp.csp_factory, name="csp") ] assert config.add_tween.calls == [ pretend.call("warehouse.csp.content_security_policy_tween_factory") ] assert config.add_settings.calls == [ pretend.call( { "csp": { "base-uri": ["'self'"], "block-all-mixed-content": [], "connect-src": [ "'self'", "https://api.github.com/repos/", "fastly-insights.com", "*.fastly-insights.com", "*.ethicalads.io", "https://api.pwnedpasswords.com", "https://2p66nmmycsj3.statuspage.io", ], "default-src": ["'none'"], "font-src": ["'self'", "fonts.gstatic.com"], "form-action": ["'self'"], "frame-ancestors": ["'none'"], "frame-src": ["'none'"], "img-src": [ "'self'", "camo.url.value", "www.google-analytics.com", "*.fastly-insights.com", "*.ethicalads.io", ], "script-src": [ "'self'", "www.googletagmanager.com", "www.google-analytics.com", "*.fastly-insights.com", "*.ethicalads.io", "'sha256-U3hKDidudIaxBDEzwGJApJgPEf2mWk6cfMWghrAa6i0='", ], "style-src": [ "'self'", "fonts.googleapis.com", "*.ethicalads.io", "'sha256-2YHqZokjiizkHi1Zt+6ar0XJ0OeEy/egBnlm+MDMtrM='", "'sha256-47DEQpj8HBSa+/TImW+5JCeuQeRkm5NMpJWZG3hSuFU='", ], "worker-src": ["*.fastly-insights.com"], } } ) ] class TestFactory: def test_copy(self): settings = {"csp": {"foo": "bar"}} request = pretend.stub(registry=pretend.stub(settings=settings)) result = csp.csp_factory(None, request) assert isinstance(result, csp.CSPPolicy) assert result == settings["csp"] # ensure changes to factory result don't propagate back to the # settings result["baz"] = "foo" assert result == {"foo": "bar", "baz": "foo"} assert settings == {"csp": {"foo": "bar"}} def test_default(self): request = pretend.stub(registry=pretend.stub(settings={})) result = csp.csp_factory(None, request) assert isinstance(result, csp.CSPPolicy) assert result == {}
// minify the Rollup bundle import { terser } from 'rollup-plugin-terser' import rollupPlugins from './rollip.plugins' import pkg from '../package.json' import isDev from './isDev' // 驼峰 function toCamel(name) { return name.replace(/\-(\w)/g, function(all, letter) { return letter.toUpperCase() }) } // 首字母大写 const name = toCamel(pkg.name.replace(/^\S/, s => s.toUpperCase())) const input = 'src/index.ts' const banner = `/* * ${name}.js v${pkg.version} * (c) ${new Date().getFullYear()} ${pkg.author} * Released under the MIT License. */` export default [ { input, output: [ // { // // 浏览器端的模块规范, 可通过 RequireJS 可加载 // // https://github.com/amdjs/amdjs-api/blob/master/AMD.md // // https://github.com/amdjs/amdjs-api/wiki/AMD-(%E4%B8%AD%E6%96%87%E7%89%88) // file: pkg.amd, // format: 'amd', // sourcemap: isDev, // banner, // extends: ['lib/hello', 'lodash'] // }, { // Node 默认的模块规范, 可通过 Webpack 加载 // https://javascript.ruanyifeng.com/nodejs/module.html // https://zh.wikipedia.org/wiki/CommonJS file: pkg.cjs, format: 'cjs', sourcemap: isDev, banner }, { // ES2015 Module 规范, // https://exploringjs.com/es6/ch_modules.html file: pkg.esm, format: 'esm', sourcemap: isDev, banner }, { //自执行函数, 可通过 <script> 标签加载 // https://developer.mozilla.org/zh-CN/docs/Glossary/%E7%AB%8B%E5%8D%B3%E6%89%A7%E8%A1%8C%E5%87%BD%E6%95%B0%E8%A1%A8%E8%BE%BE%E5%BC%8F file: pkg.iife, format: 'iife', sourcemap: isDev, name: name, banner }, { // UMD (Universal Module Definition), 希望提供一个前后端跨平台的解决方案(支持AMD与CommonJS模块方式),。 // https://github.com/umdjs/umd // https://leohxj.gitbooks.io/front-end-database/javascript-modules/about-umd.html file: pkg.umd, format: 'umd', name: name, sourcemap: isDev, banner } ], plugins: [...rollupPlugins] // external: ['rxjs'] // 如果你不想第三方库被打包进来,而可以在外面引入,配合使用的话,可以在rollup.config.js中配置external }, // { // input: 'src/lib/hello.ts', // output: { // file: 'dist/lib/hello.js', // format: 'amd', // amd: { // id: 'lib/hello' // } // }, // plugins: [...rollupPlugins] // }, { input, output: [ // umd with compress version { file: pkg.umdMin, format: 'umd', name: name, banner } ], plugins: [...rollupPlugins, ...[terser()]] // external: ['rxjs'] // 如果你不想第三方库被打包进来,而可以在外面引入,配合使用的话,可以在rollup.config.js中配置external } ]
import { createStore, applyMiddleware, compose } from 'redux'; import { routerMiddleware } from 'react-router-redux'; import thunk from 'redux-thunk'; import createHistory from 'history/createBrowserHistory'; import rootReducer from './modules'; export const history = createHistory(); const initialState = {}; const enhancers = []; const middleware = [thunk, routerMiddleware(history)]; if (process.env.NODE_ENV === 'development') { const devToolsExtension = window.__REDUX_DEVTOOLS_EXTENSION__; if (typeof devToolsExtension === 'function') { enhancers.push(devToolsExtension()); } } const composedEnhancers = compose(applyMiddleware(...middleware), ...enhancers); export default createStore(rootReducer, initialState, composedEnhancers);
// Copyright (c) 2012 Ecma International. All rights reserved. // This code is governed by the BSD license found in the LICENSE file. /*--- es5id: 15.4.4.16-7-c-iii-11 description: > Array.prototype.every - return value of callbackfn is a number (value is -Infinity) ---*/ var accessed = false; function callbackfn(val, idx, obj) { accessed = true; return -Infinity; } assert([11].every(callbackfn), '[11].every(callbackfn) !== true'); assert(accessed, 'accessed !== true');
/* * Wallpaper v3.1.18 - 2014-06-16 * A jQuery plugin for smooth-scaling image and video backgrounds. Part of the Formstone Library. * http://formstone.it/wallpaper/ * * Copyright 2014 Ben Plum; MIT Licensed */ !function(a,b){"use strict";function c(b){var c=a.extend({},F,b);t=a("body"),u=s(),v=u!==!1,v||(u="transitionend.wallpaper");for(var e=a(this),f=0,g=e.length;g>f;f++)d.apply(e.eq(f),[a.extend({},c)]);return t.hasClass("wallpaper-inititalized")||(t.addClass("wallpaper-inititalized"),x.on("resize.wallpaper",c,l)),e}function d(b){var c=a(this);if(!c.hasClass("wallpaper")){a.extend(b,c.data("wallpaper-options")),c.addClass("wallpaper").append('<div class="wallpaper-container"></div>'),b.guid="wallpaper-"+A++,b.youTubeGuid=0,b.$target=c,b.$container=b.$target.find(".wallpaper-container"),b.$target.data("wallpaper",b).on("resize.wallpaper",b,k);var d=b.source;b.source=null,e(d,b,!0),b.onReady.call()}}function e(a,c,d){if(a!==c.source){if(c.source=a,c.isYouTube=!1,"object"==typeof a&&"string"==typeof a.video){var e=a.video.match(/(?:youtube\.com\/(?:[^\/]+\/.+\/|(?:v|e(?:mbed)?)\/|.*[?&]v=)|youtu\.be\/)([^"&?\/ ]{11})/i);e&&e.length>=1&&(c.isYouTube=!0,c.videoId=e[1])}if(c.isYouTube)c.playing=!1,c.playerReady=!1,c.posterLoaded=!1,h(a,c,d);else if("object"!=typeof a||a.hasOwnProperty("fallback")){if(c.responsiveSource)for(var i in c.responsiveSource)c.responsiveSource.hasOwnProperty(i)&&c.responsiveSource[i].mq.removeListener(m);if(c.responsive=!1,c.responsiveSource=null,"object"==typeof a){var j,k=[];for(var l in a)if(a.hasOwnProperty(l)){var n="fallback"===l?"(min-width: 0px)":l;if(n){var o=b.matchMedia(n.replace(1/0,"100000px"));o.addListener(m),k.push({mq:o,source:a[l]}),o.matches&&(j=a[l])}}c.responsive=!0,c.responsiveSource=k,a=j}f(a,c,!1,d)}else g(a,c,d)}else c.$target.trigger("wallpaper.loaded"),c.onLoad.call(c.$target)}function f(b,c,d,e){var f=a('<div class="wallpaper-media wallpaper-image'+(e!==!0?" animated":"")+'"><img /></div>'),g=f.find("img"),h=b;g.one("load.wallpaper",function(){z&&f.addClass("native").css({backgroundImage:"url('"+h+"')"}),f.on(u,function(b){r(b),a(b.target).is(f)&&(f.off(u),d||i(c))}),setTimeout(function(){f.css({opacity:1}),c.responsive&&e&&i(c)},0),k({data:c}),(!d||e)&&(c.$target.trigger("wallpaper.loaded"),c.onLoad.call(c.$target)),y=a(".wallpaper-responsive")}).attr("src",h),c.responsive&&f.addClass("wallpaper-responsive"),c.$container.append(f),(g[0].complete||4===g[0].readyState)&&g.trigger("load.wallpaper")}function g(b,c,d){if(c.source.poster&&(f(c.source.poster,c,!0,!0),d=!1),!E){var e='<div class="wallpaper-media wallpaper-video'+(d!==!0?" animated":"")+'">';e+="<video",c.loop&&(e+=" loop"),c.mute&&(e+=" muted"),e+=">",c.source.webm&&(e+='<source src="'+c.source.webm+'" type="video/webm" />'),c.source.mp4&&(e+='<source src="'+c.source.mp4+'" type="video/mp4" />'),c.source.ogg&&(e+='<source src="'+c.source.ogg+'" type="video/ogg" />'),e+="</video>",e+="</div>";var g=a(e),h=g.find("video");h.one("loadedmetadata.wallpaper",function(){g.on(u,function(b){r(b),a(b.target).is(g)&&(g.off(u),i(c))}),setTimeout(function(){g.css({opacity:1})},0),k({data:c}),c.$target.trigger("wallpaper.loaded"),c.onLoad.call(c.$target),c.hoverPlay?c.$target.on("mouseover.boxer",G.play).on("mouseout.boxer",G.pause):c.autoPlay&&this.play()}),c.$container.append(g)}}function h(c,d,e){if(!d.videoId){var g=c.match(/^.*(?:youtu.be\/|v\/|e\/|u\/\w+\/|embed\/|v=)([^#\&\?]*).*/);d.videoId=g[1]}if(d.posterLoaded||(d.source.poster||(d.source.poster="http://img.youtube.com/vi/"+d.videoId+"/0.jpg"),d.posterLoaded=!0,f(d.source.poster,d,!0,e),e=!1),!E)if(a("script[src*='youtube.com/iframe_api']").length||a("head").append('<script src="//www.youtube.com/iframe_api"></script>'),B){var h=d.guid+"_"+d.youTubeGuid++,j="";j+='<div class="wallpaper-media wallpaper-embed'+(e!==!0?" animated":"")+'">',j+='<div id="'+h+'"></div>',j+="</div>";var l=a(j);d.$container.append(l),d.player&&(d.oldPlayer=d.player,d.player=null),d.player=new b.YT.Player(h,{videoId:d.videoId,playerVars:{controls:0,rel:0,showinfo:0,wmode:"transparent",enablejsapi:1,version:3,playerapiid:h,loop:d.loop?1:0,autoplay:1,origin:b.location.protocol+"//"+b.location.host},events:{onReady:function(){d.playerReady=!0,d.mute&&d.player.mute(),d.hoverPlay?d.$target.on("mouseover.boxer",G.play).on("mouseout.boxer",G.pause):d.autoPlay&&d.player.playVideo()},onStateChange:function(c){d.playing||c.data!==b.YT.PlayerState.PLAYING?d.loop&&d.playing&&c.data===b.YT.PlayerState.ENDED&&d.player.playVideo():(d.playing=!0,(d.hoverPlay||!d.autoPlay)&&d.player.pauseVideo(),d.$target.trigger("wallpaper.loaded"),d.onLoad.call(d.$target),l.on(u,function(b){r(b),a(b.target).is(l)&&(l.off(u),i(d))}),l.css({opacity:1})),d.$target.find(".wallpaper-embed").addClass("ready")},onPlaybackQualityChange:function(){},onPlaybackRateChange:function(){},onError:function(){},onApiChange:function(){}}}),k({data:d})}else C.push({source:c,data:d})}function i(b){var c=b.$container.find(".wallpaper-media");c.length>=1&&(c.not(":last").remove(),b.oldPlayer=null),y=a(".wallpaper-responsive")}function j(b){var c=b.$container.find(".wallpaper-media");c.length>=1&&c.on(u,function(d){r(d),a(d.target).is(c)&&(a(this).remove(),delete b.source)}).css({opacity:0})}function k(a){r(a);for(var b=a.data,c=b.$container.find(".wallpaper-media"),d=0,e=c.length;e>d;d++){var f=c.eq(d),g=b.isYouTube?"iframe":f.find("video").length?"video":"img",h=f.find(g);if(h.length&&("img"!==g||!b.nativeSupport)){var i=b.$target.outerWidth(),j=b.$target.outerHeight(),k=q(b,h);b.width=k.naturalWidth,b.height=k.naturalHeight,b.left=0,b.top=0;var l=b.isYouTube?b.embedRatio:b.width/b.height;b.height=j,b.width=b.height*l,b.width<i&&(b.width=i,b.height=b.width/l),b.left=-(b.width-i)/2,b.top=-(b.height-j)/2,f.css({height:b.height,width:b.width,left:b.left,top:b.top})}}}function l(){a(".wallpaper").each(function(){var b=a(this).data("wallpaper");k({data:b})})}function m(){w=o(w,5,n)}function n(){p(w),y.each(function(){for(var b=a(this),c=(b.find("img"),b.parents(".wallpaper").data("wallpaper")),d=c.responsiveSource,e=0,g=0,h=d.length;h>g;g++)if(d.hasOwnProperty(g)){var i=d[g].mq;i&&i.matches&&(e=g)}f(d[e].source,c,!1,!0),b.trigger("change.wallpaper")})}function o(a,b,c,d){return p(a,d),setTimeout(c,b)}function p(a){null!==a&&(clearInterval(a),a=null)}function q(a,b){if(a.isYouTube)return{naturalHeight:500,naturalWidth:500/a.embedRatio};if(b.is("img")){var c=b[0];if("undefined"!=typeof c.naturalHeight)return{naturalHeight:c.naturalHeight,naturalWidth:c.naturalWidth};var d=new Image;return d.src=c.src,{naturalHeight:d.height,naturalWidth:d.width}}return{naturalHeight:b[0].videoHeight,naturalWidth:b[0].videoWidth}}function r(a){a.preventDefault&&(a.stopPropagation(),a.preventDefault())}function s(){var a={WebkitTransition:"webkitTransitionEnd",MozTransition:"transitionend",OTransition:"oTransitionEnd",transition:"transitionend"},b=document.createElement("div");for(var c in a)if(a.hasOwnProperty(c)&&c in b.style)return a[c]+".wallpaper";return!1}var t,u,v,w,x=a(b),y=null,z="backgroundSize"in document.documentElement.style,A=0,B=!1,C=[],D=b.navigator.userAgent||b.navigator.vendor||b.opera,E=/Android|webOS|iPhone|iPad|iPod|BlackBerry/i.test(D),F=(D.toLowerCase().indexOf("safari")>=0&&D.toLowerCase().indexOf("chrome")<0,{autoPlay:!0,embedRatio:1.777777,hoverPlay:!1,loop:!0,mute:!0,onLoad:a.noop,onReady:a.noop,source:null}),G={defaults:function(b){return F=a.extend(F,b||{}),a(this)},destroy:function(){var b=a(this).each(function(){var b=a(this).data("wallpaper");b&&(b.$container.remove(),b.$target.removeClass("wallpaper").off(".boxer").data("wallpaper",null))});return"undefined"!=typeof t&&"undefined"!=typeof x&&a(".wallpaper").length<1&&(t.removeClass("wallpaper-inititalized"),x.off(".wallpaper")),b},load:function(b){return a(this).each(function(){var c=a(this).data("wallpaper");c&&e(b,c)})},pause:function(){return a(this).each(function(){var b=a(this).data("wallpaper");if(b)if(b.isYouTube&&b.playerReady)b.player.pauseVideo();else{var c=b.$container.find("video");c.length&&c[0].pause()}})},play:function(){return a(this).each(function(){var b=a(this).data("wallpaper");if(b)if(b.isYouTube&&b.playerReady)b.player.playVideo();else{var c=b.$container.find("video");c.length&&c[0].play()}})},stop:function(){G.pause.apply(this)},unload:function(){return a(this).each(function(){var b=a(this).data("wallpaper");b&&j(b)})}};b.onYouTubeIframeAPIReady=function(){B=!0;for(var a in C)C.hasOwnProperty(a)&&h(C[a].source,C[a].data);C=[]},a.fn.wallpaper=function(a){return G[a]?G[a].apply(this,Array.prototype.slice.call(arguments,1)):"object"!=typeof a&&a?this:c.apply(this,arguments)},a.wallpaper=function(a){"defaults"===a&&G.defaults.apply(this,Array.prototype.slice.call(arguments,1))}}(jQuery,window);
"use strict"; Object.defineProperty(exports, "__esModule", { value: true }); //# sourceMappingURL=asset-storage-strategy.js.map
import React from "react" import Image from "gatsby-image" // import Bio from "../components/bio" import { useStaticQuery, graphql } from "gatsby" import { rhythm } from "../utils/typography" import './profile.css' const Profile = () => { const data = useStaticQuery(graphql` query BioQuerye { pleague: file(absolutePath: { regex: "/pl_destkop.png/" }) { childImageSharp { fixed(width: 280, height: 140) { ...GatsbyImageSharpFixed } } } daris: file(absolutePath: { regex: "/daris.png/" }) { childImageSharp { fixed(width: 280, height: 140) { ...GatsbyImageSharpFixed } } } hijabie: file(absolutePath: { regex: "/hijabie.png/" }) { childImageSharp { fixed(width: 280, height: 140) { ...GatsbyImageSharpFixed } } } weatherapp: file(absolutePath: { regex: "/weatherapp.png/" }) { childImageSharp { fixed(width: 280, height: 140) { ...GatsbyImageSharpFixed } } } site { siteMetadata { author { name summary } social { twitter } } } } `) return ( <> <div className="wrapper-profile"> {/* sec1 */} <h1 style={{ marginTop: rhythm(1), marginBottom: rhythm(1.5), }}> It’s a nice to meet you here &#128075; </h1> <p className="text-higlight"> Hii, My name is Syafrizal. I am a Student at AMIKOM University Yogyakarta. Majoring on informatics engineering. Currently working as Freelance Web Programmer. Catch me on contact button below. </p> <a href="mailto:muhammadsyafr@gmail.com"><button style={{ marginTop: rhythm(0.5), marginBottom: rhythm(1.5), }} className="btn btn-contact"> Contact</button></a> <a href="https://s.id/cv_syafrizal"><button style={{ marginTop: rhythm(0.5), marginBottom: rhythm(1.5), }} className="btn btn-cv"> Download CV </button></a> </div> {/* endsec1 */} {/* sec2 */} <h2 style={{ marginTop: rhythm(2), marginBottom: rhythm(0), fontFamily: 'Montserrat', fontWeight: 'bold', color: '#303952', fontSize: '36px' }}> A few projects that I have worked on </h2> <div className="row"> <div className="col-6 col-s-12"> <div className="wrap-project"> <a href="https://premierleague-concept.netlify.app/"><h3>Premier League</h3></a> <span>Simple weatherApp using OpenWeather as an API and Ionic React as FE</span> <Image fixed={data.pleague.childImageSharp.fixed} alt="hai" style={{ marginTop: rhythm(0.5), marginBottom: rhythm(0), boxShadow: '0 0px 10px 1px rgba(0, 0, 0, 0.2)' }} /> </div> </div> <div className="col-6 col-s-12"> <div className="wrap-project"> <a href="https://mokima.netlify.app/"><h3>Mokima</h3></a> <span>Exploration about slicing landingpage for alumni website, using bootstrap</span> <Image fixed={data.daris.childImageSharp.fixed} alt="hai" style={{ marginTop: rhythm(0.5), marginBottom: rhythm(0), boxShadow: '0 0px 10px 1px rgba(0, 0, 0, 0.2)' }} /> </div> </div> <div className="col-6 col-s-12"> <div className="wrap-project"> <a href="https://hijabie.netlify.app/"><h3>Hijabie</h3></a> <span>Non commercial project, slicing landingpage for women to use their hijab on daily activities </span> <Image fixed={data.hijabie.childImageSharp.fixed} alt="hai" style={{ marginTop: rhythm(0.5), marginBottom: rhythm(0), boxShadow: '0 0px 10px 1px rgba(0, 0, 0, 0.2)' }} /> </div> </div> <div className="col-6 col-s-12"> <div className="wrap-project"> <a href="https://weatherandlocal.netlify.app/"><h3>Weather App</h3></a> <span>Simple predict weather apps using OpenWeather as API and Ionic React as FE</span> <Image fixed={data.weatherapp.childImageSharp.fixed} alt="hai" style={{ marginTop: rhythm(0.5), marginBottom: rhythm(0), boxShadow: '0 0px 10px 1px rgba(0, 0, 0, 0.2)' }} /> </div> </div> </div> {/* endsec2 */} {/* sec3 */} <h2 style={{ marginTop: rhythm(3), marginBottom: rhythm(0.5), fontFamily: 'Montserrat', fontWeight: 'bold', color: '#303952', fontSize: '36px' }}> I'm always down for a coffee, feel free to get in touch! </h2> <a href="mailto:muhammadsyafr@gmail.com"> <button style={{ marginTop: rhythm(0.5), marginBottom: rhythm(3), }} className="btn btn-contact"> Contact</button></a> {/* endsec3 */} <footer> Follow me for more thoughts and updates on {''} <a className="footer-social" href="https://instagram.com/muhammadsyafr">Instagram</a> </footer> </> ) } export default Profile
# -*- coding: utf-8 -*- # ------------------------------------------------------------------------------ # # Copyright 2018-2019 Fetch.AI Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # ------------------------------------------------------------------------------ """Implementation of background profiling daemon.""" import asyncio import datetime import gc import logging import platform import textwrap import threading import time from collections import Counter from concurrent.futures._base import CancelledError from functools import wraps from typing import Any, Callable, Dict, List, Type from aea.helpers.async_utils import Runnable lock = threading.Lock() _default_logger = logging.getLogger(__file__) if platform.system() == "Windows": # pragma: nocover import win32process # type: ignore # pylint: disable=import-error WIN32_PROCESS_TIMES_TICKS_PER_SECOND = 1e7 def get_current_process_memory_usage() -> float: """Get current process memory usage in MB.""" d = win32process.GetProcessMemoryInfo(win32process.GetCurrentProcess()) # type: ignore return 1.0 * d["WorkingSetSize"] / 1024 ** 2 def get_current_process_cpu_time() -> float: """Get current process cpu time in seconds.""" d = win32process.GetProcessTimes(win32process.GetCurrentProcess()) # type: ignore return d["UserTime"] / WIN32_PROCESS_TIMES_TICKS_PER_SECOND else: import resource _MAC_MEM_STATS_MB = 1024 ** 2 _LINUX_MEM_STATS_MB = 1024 def get_current_process_memory_usage() -> float: """Get current process memory usage in MB.""" if platform.system() == "Darwin": # pragma: nocover divider = _MAC_MEM_STATS_MB else: divider = _LINUX_MEM_STATS_MB return 1.0 * resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / divider def get_current_process_cpu_time() -> float: """Get current process cpu time in seconds.""" return resource.getrusage(resource.RUSAGE_SELF).ru_utime class Profiling(Runnable): """Profiling service.""" def __init__( self, period: int = 0, objects_instances_to_count: List[Type] = None, objects_created_to_count: List[Type] = None, output_function: Callable[[str], None] = lambda x: print(x, flush=True), ) -> None: """ Init profiler. :param period: delay between profiling output in seconds. :param output_function: function to display output, one str argument. """ if period < 1: # pragma: nocover raise ValueError("Period should be at least 1 second!") super().__init__(threaded=True) self._period = period self._start_ts = time.time() self._objects_instances_to_count = objects_instances_to_count or [] self._objects_created_to_count = objects_created_to_count or [] self._output_function = output_function self._counter: Dict[Type, int] = Counter() def set_counters(self) -> None: """Modify obj.__new__ to count objects created created.""" for obj in self._objects_created_to_count: self._counter[obj] = 0 def make_fn(obj: Any) -> Callable: orig_new = obj.__new__ # pylint: disable=protected-access # type: ignore @wraps(orig_new) def new(*args: Any, **kwargs: Any) -> Callable: self._counter[obj] += 1 if orig_new is object.__new__: return orig_new(args[0]) # pragma: nocover return orig_new(*args, **kwargs) # pragma: nocover return new obj.__new__ = make_fn(obj) # type: ignore async def run(self) -> None: """Run profiling.""" try: self.set_counters() while True: await asyncio.sleep(self._period) self.output_profile_data() except CancelledError: # pragma: nocover pass except Exception: # pragma: nocover _default_logger.exception("Exception in Profiling") raise def output_profile_data(self) -> None: """Render profiling data and call output_function.""" data = self.get_profile_data() text = ( textwrap.dedent( f""" Profiling details for current AEA process: {datetime.datetime.now()} ============================================= Run time: {data["run_time"]:.6f} seconds Cpu time: {data["cpu_time"]:.6f} seconds, Cpu/Run time: {100*data["cpu_time"]/data["run_time"]:.6f}% Memory: {data["mem"]:.6f} MB Threads: {data["threads"]['amount']} {data["threads"]['names']} Objects present: """ ) + "\n".join([f" * {i}: {c}" for i, c in data["objects_present"].items()]) + "\n" + """Objects created:\n""" + "\n".join( [f" * {i.__name__}: {c}" for i, c in data["objects_created"].items()] ) + "\n" ) self._output_function(text) def get_profile_data(self) -> Dict: """Get profiling data dict.""" return { "run_time": time.time() - self._start_ts, "cpu_time": get_current_process_cpu_time(), "mem": get_current_process_memory_usage(), "threads": { "amount": threading.active_count(), "names": [i.name for i in threading.enumerate()], }, "objects_present": self.get_objects_instances(), "objects_created": self.get_objecst_created(), } def get_objects_instances(self) -> Dict: """Return dict with counted object instances present now.""" result: Dict = Counter() lock.acquire() try: for obj_type in self._objects_instances_to_count: result[obj_type.__name__] += 0 for obj in gc.get_objects(): for obj_type in self._objects_instances_to_count: if isinstance(obj, obj_type): result[obj_type.__name__] += 1 finally: lock.release() return result def get_objecst_created(self) -> Dict: """Return dict with counted object instances created.""" return self._counter
'use strict'; angular.module('google-voice-app.version', [ 'google-voice-app.version.interpolate-filter', 'google-voice-app.version.version-directive' ]) .value('version', '0.1');
/** * Values for classifying a point or object to a plane * @namespace * * @author derschmale <http://www.derschmale.com> */ export var PlaneSide = { /** * Entirely on the front side of the plane */ FRONT: 1, /** * Entirely on the back side of the plane */ BACK: -1, /** * Intersecting the plane. */ INTERSECTING: 0 };
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. // See LICENSE.txt for license information. // *************************************************************** // - [#] indicates a test step (e.g. # Go to a page) // - [*] indicates an assertion (e.g. * Check the title) // - Use element ID when selecting an element. Create one if none. // *************************************************************** /** * Note: This test requires webhook server running. Initiate `npm run start:webhook` to start. */ import * as TIMEOUTS from '../../fixtures/timeouts'; import users from '../../fixtures/users.json'; import messageMenusOptions from '../../fixtures/interactive_message_menus_options.json'; import {getMessageMenusPayload} from '../../utils'; const options = [ {text: 'Option 1', value: 'option1'}, {text: 'Option 2', value: 'option2'}, {text: 'Option 3', value: 'option3'}, ]; const payload = getMessageMenusPayload({options}); let channelId; let incomingWebhook; describe('Interactive Menu', () => { before(() => { // Set required ServiceSettings const newSettings = { ServiceSettings: { AllowedUntrustedInternalConnections: 'localhost', EnablePostUsernameOverride: true, EnablePostIconOverride: true, }, }; cy.apiUpdateConfig(newSettings); // # Login as sysadmin and ensure that teammate name display setting is set to default 'username' cy.apiLogin('sysadmin'); cy.apiSaveTeammateNameDisplayPreference('username'); cy.apiSaveMessageDisplayPreference('clean'); // # Visit '/' and create incoming webhook cy.visit('/ad-1/channels/town-square'); cy.getCurrentChannelId().then((id) => { channelId = id; const newIncomingHook = { channel_id: id, channel_locked: true, description: 'Incoming webhook interactive menu', display_name: 'menuIn' + Date.now(), }; cy.apiCreateWebhook(newIncomingHook).then((hook) => { incomingWebhook = hook; }); }); }); it('matches elements', () => { // # Post an incoming webhook cy.postIncomingWebhook({url: incomingWebhook.url, data: payload}); // # Get message attachment from the last post cy.getLastPostId().then((postId) => { cy.get(`#messageAttachmentList_${postId}`).as('messageAttachmentList'); }); // * Verify each element of message attachment list cy.get('@messageAttachmentList').within(() => { cy.get('.attachment__thumb-pretext').should('be.visible').and('have.text', 'This is attachment pretext with basic options'); cy.get('.post-message__text-container').should('be.visible').and('have.text', 'This is attachment text with basic options'); cy.get('.attachment-actions').should('be.visible'); cy.get('.select-suggestion-container').should('be.visible'); // * Suggestion list should not be visible before dropdown is clicked cy.get('#suggestionList').should('not.be.visible'); // # Click on the suggestion dropdown input cy.findByPlaceholderText('Select an option...').should('be.visible').click(); // * Suggestion list should now be open cy.get('#suggestionList').should('be.visible').children().should('have.length', options.length); cy.get('#suggestionList').children().each(($el, index) => { cy.wrap($el).should('have.text', options[index].text); }); }); // * Close suggestion list by clicking on other element cy.get('body').click(); }); it('IM15887 - Selected Option is displayed, Ephemeral message is posted', () => { // # Post an incoming webhook cy.postIncomingWebhook({url: incomingWebhook.url, data: payload}); // # Get message attachment from the last post cy.getLastPostId().then((postId) => { cy.get(`#messageAttachmentList_${postId}`).as('messageAttachmentList'); }); cy.get('@messageAttachmentList').within(() => { // # Select option 1 by typing exact text and press enter cy.findByPlaceholderText('Select an option...').click().clear().type(`${options[0].text}{enter}`); // * Verify that the input is updated with the selected option cy.findByDisplayValue(options[0].text).should('exist'); }); cy.wait(TIMEOUTS.SMALL); cy.getLastPostId().then((postId) => { // * Verify that ephemeral message is posted, visible to observer and contains an exact message cy.get(`#${postId}_message`).should('be.visible').and('have.class', 'post--ephemeral'); cy.get('.post__visibility').should('be.visible').and('have.text', '(Only visible to you)'); cy.get(`#postMessageText_${postId}`).should('be.visible').and('have.text', 'Ephemeral | select option: option1'); }); }); it('IM15887 - Reply is displayed in center channel with "commented on [user\'s] message: [text]"', () => { const user1 = users['user-1']; // # Post an incoming webhook cy.postIncomingWebhook({url: incomingWebhook.url, data: payload}); // # Get last post cy.getLastPostId().then((parentMessageId) => { // # Post another message cy.postMessageAs({sender: user1, message: 'Just another message', channelId}); // # Click comment icon to open RHS cy.clickPostCommentIcon(parentMessageId); // * Check that the RHS is open cy.get('#rhsContainer').should('be.visible'); // # Have another user reply to the webhook message cy.postMessageAs({sender: user1, message: 'Reply to webhook', channelId, rootId: parentMessageId}); // # Get the latest post cy.getLastPostId().then((replyMessageId) => { // * Verify that the reply is in the channel view with matching text cy.get(`#post_${replyMessageId}`).within(() => { cy.get('.post__link').should('be.visible').and('have.text', 'Commented on webhook\'s message: This is attachment pretext with basic options'); cy.get(`#postMessageText_${replyMessageId}`).should('be.visible').and('have.text', 'Reply to webhook'); }); // * Verify that the reply is in the RHS with matching text cy.get(`#rhsPost_${replyMessageId}`).within(() => { cy.get('.post__link').should('not.be.visible'); cy.get(`#rhsPostMessageText_${replyMessageId}`).should('be.visible').and('have.text', 'Reply to webhook'); }); // # Close RHS cy.closeRHS(); }); }); }); it('IM21039 - Searching within the list of options', () => { const searchOptions = [ {text: 'SearchOption1', value: 'searchoption1'}, {text: 'SearchOption2', value: 'searchoption2'}, ...options, ]; const searchOptionsPayload = getMessageMenusPayload({options: searchOptions}); // # Post an incoming webhook for interactive menu with search options cy.postIncomingWebhook({url: incomingWebhook.url, data: searchOptionsPayload}); // # Get message attachment from the last post cy.getLastPostId().then((postId) => { cy.get(`#messageAttachmentList_${postId}`).as('messageAttachmentList'); }); cy.get('@messageAttachmentList').within(() => { cy.findByPlaceholderText('Select an option...').click().clear().type('sea'); // * Message attachment menu dropdown should now be open cy.get('#suggestionList').should('exist').children().should('have.length', 2); // # Checking values inside the attachment menu dropdown cy.get('#suggestionList').within(() => { // * Each dropdown should contain the searchOptions text cy.findByText(searchOptions[0].text).should('exist'); cy.findByText(searchOptions[1].text).should('exist'); }); }); }); it('IM21042 - "No items match" feedback', () => { const missingUser = Date.now(); const userOptions = getMessageMenusPayload({dataSource: 'users'}); // # Post an incoming webhook for interactive menu with user options cy.postIncomingWebhook({url: incomingWebhook.url, data: userOptions}); // # Get message attachment from the last post cy.getLastPostId().then((postId) => { cy.get(`#messageAttachmentList_${postId}`).within(() => { // # Type the missing user in the select input cy.findByPlaceholderText('Select an option...').click().clear().type(`${missingUser}`); cy.get('#suggestionList').within(() => { // * Check if we get appropriate message when no options matches entered text cy.get('.suggestion-list__no-results').should('be.visible').should('have.text', `No items match ${missingUser}`); }); }); }); }); it('should truncate properly the selected long basic option', () => { const withLongBasicOption = [ {text: 'Option 0 - This is with very long option', value: 'option0'}, ...options, ]; const basicOptions = getMessageMenusPayload({options: withLongBasicOption}); // # Post an incoming webhook for interactive menu with basic options and verify the post cy.postIncomingWebhook({url: incomingWebhook.url, data: basicOptions}).then(() => { verifyLastPost(); }); }); it('should truncate properly the selected long username option', () => { const userOptions = getMessageMenusPayload({dataSource: 'users'}); // # Post an incoming webhook for interactive menu with user options and verify the post cy.postIncomingWebhook({url: incomingWebhook.url, data: userOptions}).then(() => { verifyLastPost(); }); }); it('should truncate properly the selected long channel display name option', () => { const channelOptions = getMessageMenusPayload({dataSource: 'channels'}); cy.getCurrentTeamId().then((teamId) => { // # Create channel with long display name cy.apiCreateChannel(teamId, 'test-channel', `AAAA Very Long Display Name of a Channel ${Date.now()}`).then(() => { // # Post an incoming webhook for interactive menu with channel options and verify the post cy.postIncomingWebhook({url: incomingWebhook.url, data: channelOptions}).then(() => { verifyLastPost(); }); }); }); }); it('IM21037 - Clicking in / Tapping on the message attachment menu box opens list of selections', () => { // # Create a message attachment with menu const basicOptionPayload = getMessageMenusPayload({options}); cy.postIncomingWebhook({url: incomingWebhook.url, data: basicOptionPayload}); // # Get the last posted message id cy.getLastPostId().then((lastPostId) => { // # Get the last messages attachment container cy.get(`#messageAttachmentList_${lastPostId}`).within(() => { // * Message attachment menu dropdown should be closed cy.get('#suggestionList').should('not.exist'); // // # Open the message attachment menu dropdown cy.findByPlaceholderText('Select an option...').click(); // * Message attachment menu dropdown should now be open cy.get('#suggestionList').should('exist').children().should('have.length', options.length); // # Checking values inside the attachment menu dropdown cy.get('#suggestionList').within(() => { // * Each dropdown should contain the options text cy.findByText(options[0].text).should('exist'); cy.findByText(options[1].text).should('exist'); cy.findByText(options[2].text).should('exist'); }); }); // # Close message attachment menu dropdown cy.get('body').click(); }); }); it('IM21036 - Enter selects the option', () => { // # Create a message attachment with menu const distinctOptions = messageMenusOptions['distinct-options']; const distinctOptionsPayload = getMessageMenusPayload({options: distinctOptions}); cy.postIncomingWebhook({url: incomingWebhook.url, data: distinctOptionsPayload}); // # Get the last posted message id cy.getLastPostId().then((lastPostId) => { // # Get the last messages attachment container cy.get(`#messageAttachmentList_${lastPostId}`).within(() => { // # Find the message attachment menu and assign it to a variable for later use cy.findByPlaceholderText('Select an option...').as('optionInputField'); // # Open the options menu cy.get('@optionInputField').click(); // * Message attachment menu dropdown should now be open cy.get('#suggestionList').should('exist').children().should('have.length', distinctOptions.length); // # Lets make the last option we are interested in finding const selectedOption = distinctOptions[5].text; // # Type the selected word to find in the list cy.get('@optionInputField').type(selectedOption); cy.wait(TIMEOUTS.TINY); // # Checking values inside the attachment menu dropdown cy.get('#suggestionList').within(() => { // * All other options should not be there cy.findByText(distinctOptions[0].text).should('not.exist'); cy.findByText(distinctOptions[1].text).should('not.exist'); cy.findByText(distinctOptions[2].text).should('not.exist'); cy.findByText(distinctOptions[3].text).should('not.exist'); cy.findByText(distinctOptions[4].text).should('not.exist'); // * Selected option should be there in the search list cy.findByText(selectedOption).should('exist'); // * Other matched option should also be there cy.findByText(distinctOptions[6].text).should('exist'); }); // # Enter is clicked to select the correct match cy.get('@optionInputField').type('{enter}'); // * Since option was clicked dropdown should be closed cy.get('#suggestionList').should('not.exist'); // * Verify the input has the selected value cy.findByDisplayValue(selectedOption).should('exist'); }); }); // # Lets wait a little for the webhook to return confirmation message cy.wait(TIMEOUTS.TINY); // # Get the emphemirical message from webhook, which is only visible to us cy.getLastPostId().then((lastPostId) => { cy.get(`#post_${lastPostId}`).within(() => { // * Check if Bot message is the last message cy.findByText('(Only visible to you)').should('exist'); // * Check if we got ephemeral message of our selection cy.findByText(/Ephemeral | select option: mango/).should('exist'); }); }); }); it('IM21035 - Long lists of selections are scrollable', () => { const manyOptions = messageMenusOptions['many-options']; const manyOptionsPayload = getMessageMenusPayload({options: manyOptions}); // # Create a message attachment with long menu options cy.postIncomingWebhook({url: incomingWebhook.url, data: manyOptionsPayload}); // # Get the last posted message id cy.getLastPostId().then((lastPostId) => { // # Get the last messages attachment container cy.get(`#messageAttachmentList_${lastPostId}`).within(() => { // * Message attachment menu dropdown should be closed cy.get('#suggestionList').should('not.exist'); // // # Open the message attachment menu dropdown cy.findByPlaceholderText('Select an option...').click(); // * Message attachment menu dropdown should now be open cy.get('#suggestionList').should('exist').children().should('have.length', manyOptions.length); const lenghtOfLongListOptions = manyOptions.length; // # Scroll to bottom of the options cy.get('#suggestionList').scrollTo('bottom').then((listContainer) => { // * When scrolled to bottom, the top options should be not visible but should exist in dom cy.findByText(manyOptions[0].text, {listContainer}).should('exist').and('not.be.visible'); cy.findByText(manyOptions[1].text, {listContainer}).should('exist').and('not.be.visible'); // # But the last options should be visible cy.findByText(manyOptions[lenghtOfLongListOptions - 1].text, {listContainer}).should('exist').and('be.visible'); cy.findByText(manyOptions[lenghtOfLongListOptions - 2].text, {listContainer}).should('exist').and('be.visible'); }); // # Scroll to top of the options cy.get('#suggestionList').scrollTo('top').then((listContainer) => { // * When scrolled to top, the bottom options should be not visible cy.findByText(manyOptions[lenghtOfLongListOptions - 1].text, {listContainer}).should('not.be.visible'); cy.findByText(manyOptions[lenghtOfLongListOptions - 2].text, {listContainer}).should('not.be.visible'); // # But the top options should be visible cy.findByText(manyOptions[0].text, {listContainer}).should('be.visible'); cy.findByText(manyOptions[1].text, {listContainer}).should('be.visible'); }); }); // # Close message attachment menu dropdown cy.get('body').click(); }); }); it('IM21040 - Selection is mirrored in RHS / Message Thread', () => { // # Create a webhook with distinct options const distinctOptions = messageMenusOptions['distinct-options']; const distinctListOptionPayload = getMessageMenusPayload({options: distinctOptions}); cy.postIncomingWebhook({url: incomingWebhook.url, data: distinctListOptionPayload}); const selectedItem = distinctOptions[2].text; const firstFewLettersOfSelectedItem = selectedItem.substring(0, 3); // Make sure the options have minimum length of 3 // # Get the last posted message id cy.getLastPostId().then((lastPostId) => { // # Get the last messages attachment container cy.get(`#messageAttachmentList_${lastPostId}`).within(() => { // # Start typing only first few letters in the input cy.findByPlaceholderText('Select an option...').clear().type(`${firstFewLettersOfSelectedItem}`); // * Message attachment dropdown with the selected item should be visible cy.get('#suggestionList').should('exist').within(() => { cy.findByText(selectedItem).should('exist'); }); // # Now that we know selected option appeared in the list, Click enter on input field cy.findByPlaceholderText('Select an option...').clear().type('{enter}'); // * Verify the input has the selected value cy.findByDisplayValue(selectedItem).should('exist'); }); }); // # Lets wait a little for the webhook to return confirmation message cy.wait(TIMEOUTS.TINY); // # Checking if we got the ephemeral message with the selection we made cy.getLastPostId().then((botLastPostId) => { cy.get(`#post_${botLastPostId}`).within(() => { // * Check if Bot message is the last message cy.findByText('(Only visible to you)').should('exist'); // * Check if we got ephemeral message of our selection cy.findByText(/Ephemeral | select option: banana/).should('exist'); }); }); cy.getNthPostId(-2).then((webhookMessageId) => { // # Click on reply icon to open message in RHS cy.clickPostCommentIcon(webhookMessageId); // * Verify RHS has opened cy.get('#rhsContainer').should('exist'); // # Same id as parent post in center, only opened in RHS cy.get(`#rhsPost_${webhookMessageId}`).within(() => { // * Verify the input has the selected value same as that of Center cy.findByDisplayValue(selectedItem).should('exist'); }); // # Close RHS cy.closeRHS(); }); }); it('IM21044 - Change selection in RHS / Message Thread', () => { // # Create a webhook with distinct options const distinctOptions = messageMenusOptions['distinct-options']; const distinctListOptionPayload = getMessageMenusPayload({options: distinctOptions}); cy.postIncomingWebhook({url: incomingWebhook.url, data: distinctListOptionPayload}); const firstSelectedItem = distinctOptions[2].text; const secondSelectedItem = distinctOptions[7].text; // # Verify the webhook posted the message cy.getLastPostId().then((parentPostId) => { // # Get the last messages attachment container cy.get(`#messageAttachmentList_${parentPostId}`).within(() => { // # Open the message attachment menu dropdown by clickin on input cy.findByPlaceholderText('Select an option...').click(); // * Message attachment dropdown with the selected item should be visible cy.get('#suggestionList').should('exist').within(() => { // # Make a first selection from the given options cy.findByText(firstSelectedItem).should('exist').click(); }); // * Verify the input has the selected value you clicked cy.findByDisplayValue(firstSelectedItem).should('exist'); }); // # Lets wait a little for the webhook to return confirmation message cy.wait(TIMEOUTS.TINY); // # Checking if we got the ephemeral message with the selection we made cy.getLastPostId().then((botLastPostId) => { cy.get(`#post_${botLastPostId}`).within(() => { // * Check if Bot message only visible to you cy.findByText('(Only visible to you)').should('exist'); // * Check if we got ephemeral message of our selection ie. firstSelectedItem cy.findByText(/Ephemeral | select option: banana/).should('exist'); }); }); // # Click on reply icon to original message with attachment message in RHS cy.clickPostCommentIcon(parentPostId); // * Verify RHS has opened cy.get('#rhsContainer').should('exist'); // # Same id as parent post in center should be opened in RHS since we clicked reply button cy.get(`#rhsPost_${parentPostId}`).within(() => { // * Verify the input has the selected value same as that of Center and open dropdown to make new selection cy.findByDisplayValue(firstSelectedItem).should('exist').click(); // * Message attachment dropdown with the selected item should be visible cy.get('#suggestionList').should('exist').within(() => { // # Make a second selection different from first from options cy.findByText(secondSelectedItem).should('exist').click(); }); // * Verify the input has the new selected value in the RHS message cy.findByDisplayValue(secondSelectedItem).should('exist'); }); // # Lets wait a little for the webhook to return confirmation message cy.wait(TIMEOUTS.TINY); // * Verify the original message with attacment's selection is also changed cy.get(`#messageAttachmentList_${parentPostId}`).within(() => { // * Verify the input in center has the new selected value i.e secondSelectedItem cy.findByDisplayValue(secondSelectedItem).should('exist'); }); // # Checking if we got updated ephemeral message with the new selection we made cy.getLastPostId().then((secondBotLastPostId) => { cy.get(`#post_${secondBotLastPostId}`).within(() => { // * Check if Bot message only for you cy.findByText('(Only visible to you)').should('exist'); // * Check if we got ephemeral message of second selection cy.findByText(/Ephemeral | select option: avacodo/).should('exist'); }); }); cy.closeRHS(); }); }); }); function verifyMessageAttachmentList(postId, isRhs, text) { return cy.get(`#messageAttachmentList_${postId}`).within(() => { cy.queryByTestId('autoCompleteSelector').should('be.visible'); if (isRhs) { // * Verify that the selected option from center view matches the one in RHS cy.findByPlaceholderText('Select an option...').should('have.value', text); } else { // # Select an option (long) in center view cy.findByPlaceholderText('Select an option...').should('be.visible').click(); cy.get('#suggestionList').should('be.visible').children().first().click({force: true}); } // * Verify exact height, width and padding of suggestion container and its input cy.get('.select-suggestion-container'). should('be.visible'). and('have.css', 'height', '32px'). and('have.css', 'width', '220px'); cy.findByPlaceholderText('Select an option...'). and('have.css', 'height', '32px'). and('have.css', 'width', '220px'). and('have.css', 'padding-right', '30px'); return cy.findByPlaceholderText('Select an option...').invoke('attr', 'value').then((value) => { return cy.wrap({value}); }); }); } function verifyLastPost() { // # Get message attachment from the last post, and // * Verify its content in center view cy.getLastPostId().then((postId) => { verifyMessageAttachmentList(postId, false).then(({value}) => { // Open the same post in RHS, and // * Verify its content in RHS cy.clickPostCommentIcon(postId); cy.get(`#rhsPost_${postId}`).within(() => { verifyMessageAttachmentList(postId, true, value); }); // # Wait for sometime for checks cy.wait(TIMEOUTS.TINY); // # Close the RHS cy.closeRHS(); }); }); }
#coding: utf-8 from Tkinter import * main_window = Tk() count_label = Label(main_window, text="count 0") count_label.grid(row=0, column=1) count_value = 0 def increment_count(): global count_value, count_label count_value = count_value + 1 count_label.configure(text ='Count:' + str(count_value)) incr_button = Button(main_window, text="Increment" ,command= increment_count) incr_button.grid(row=0,column=0) quit_button = Button(main_window, text="Quit", command = main_window.destroy) quit_button.grid(row=1, column=0) mainloop()
module.exports = { name: "quote", execute(msg, args) { const Discord = require("discord.js"); const fetch = require("node-fetch"); const SQLite = require("better-sqlite3"); const sql = new SQLite("./src/databases/stats.sqlite"); var stats = sql.prepare("SELECT * FROM stats WHERE stay = 1").get(); stats.quote++; stats.total++; sql .prepare( "INSERT OR REPLACE INTO stats (total, anime, manga, character, help, about, invite, quote, stay, slashanime, slashmanga) VALUES (@total, @anime, @manga, @character, @help, @about, @invite, @quote, @stay, @slashanime, @slashmanga);" ) .run(stats); let name = args.join(" "); let url = "https://animechan.vercel.app/api/random"; if (name) url = `https://animechan.vercel.app/api/quotes/anime?title=${name}`; fetch(url) .then((res) => res.json()) .then((results) => { let info = results; if (name) info = results[Math.floor(Math.random() * results.length)]; let { quote, character, anime } = info; const embed = new Discord.MessageEmbed() .setAuthor( `Quote from ${anime}:`, `https://chr1s.dev/assets/animelist.png`, `https://animechanapi.xyz/` ) .setColor("#55128E") .setDescription(`${quote}\n **- ${character}**`); msg.channel.send({ embeds: [embed] }); }) .catch(handleError); function handleError(error) { msg.channel.send(`\**Error:\** Invalid anime name!`); console.error(error); } }, };
import React, { useState, useEffect } from 'react'; import { Pie } from 'react-chartjs-2'; import Horizontalbar from '../bargraph/HorizontalBar'; import colorShader from './colorShader'; import Legend from '../assets/Legend'; const incrementor = (incident, type, types) => { if (type in types) { types[type] += 1; } else { types[type] = 1; } }; const getTypesOfForce = data => { let types = {}; data.forEach(incident => { switch (incident.force_rank) { // case 'Rank 0 - No Police Presence': // incrementor(incident, 'Uncategorized', types); // break; case 'Rank 1 - Officer Presence': incrementor(incident, 'Officer Presence', types); break; case 'Rank 2 - Empty-hand': incrementor(incident, 'Empty Hand', types); break; case 'Rank 3 - Blunt Force': incrementor(incident, 'Blunt Force', types); break; case 'Rank 4 - Chemical & Electric': incrementor(incident, 'Chemical & Electric', types); break; case 'Rank 5 - Lethal Force': incrementor(incident, 'Lethal Force', types); break; default: break; } }); return types; }; const getPercentages = (types, policeData) => { Object.keys(types).forEach(key => { const num = types[key]; types[key] = { num, percent: Math.ceil((num / policeData.length) * 100).toPrecision(3), }; }); return types; }; const createDataPoints = data => { let pieData = { labels: Object.keys(data).map(key => `${key}`), datasets: [ { data: Object.keys(data).map(key => data[key]['percent']), backgroundColor: [ '#3ca9a6', '#ae53bc', '#4a9c4f', '#6771dc', '#dc67ab', ], hoverBackgroundColor: [], borderColor: [], hoverBorderWidth: 0, borderAlign: 'inner', }, ], }; for (let i = 0; i < pieData.datasets[0].data.length; i++) { pieData.datasets[0].backgroundColor.forEach(color => pieData.datasets[0].borderColor.push(colorShader(color, -1)) ); } return pieData; }; const PieGraph = ({ data, usState }) => { const [graphData, setGraphData] = useState({ labels: ['Red', 'Blue', 'Yellow'], datasets: [ { data: [300, 50, 100], backgroundColor: ['#FF6384', '#36A2EB', '#FFCE56'], hoverBackgroundColor: ['#FF6384', '#36A2EB', '#FFCE56'], }, ], }); const [types, setTypes] = useState(null); useEffect(() => { setTypes(getTypesOfForce(data, usState)); }, [data, usState]); useEffect(() => { if (types) { setTypes(getPercentages(types, data)); setGraphData(createDataPoints(types)); } }, [types]); return ( <div> <Pie data={graphData} /> {/* <br /> <p className="graph-disclaimer"> Note: This graph relies on open source data from multiple sources and a machine learning model that is still in beta. These categories may not accurately represent the circumstances of each incident.{' '} </p> */} {/* <Key /> */} </div> ); }; export default PieGraph;
# Generated by Django 3.1.7 on 2021-03-23 18:02 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('home', '0002_auto_20210320_2138'), ] operations = [ migrations.RenameModel( old_name='MessageBox', new_name='Messages', ), migrations.AlterModelOptions( name='messages', options={'verbose_name': 'Mensagem', 'verbose_name_plural': 'Mensagens'}, ), ]
const {stat} = require('fs').promises; async function getMetaData(filePath) { let metaData; try { metaData = await stat(filePath); }catch(err){ if(err.code === "ENOENT") return "file not exist" else throw err } return metaData; } getMetaData(process.argv[2]).then( resolve => console.log(resolve), failure => console.log(failure));
"use strict"; var _interopRequireDefault = require("@babel/runtime/helpers/interopRequireDefault"); var _Decorated = _interopRequireDefault(require("@polkadot/metadata/Decorated")); var _Metadata = _interopRequireDefault(require("@polkadot/metadata/Metadata")); var _static = _interopRequireDefault(require("@polkadot/metadata/Metadata/v11/static")); var _create = require("../create"); var _StorageKey = _interopRequireDefault(require("./StorageKey")); // Copyright 2017-2020 @polkadot/types authors & contributors // SPDX-License-Identifier: Apache-2.0 describe('StorageKey', () => { const registry = new _create.TypeRegistry(); describe('with MetadataV11', () => { const metadata = new _Metadata.default(registry, _static.default); registry.setMetadata(metadata); const decorated = new _Decorated.default(registry, metadata); it('should allow decoding of a DoubleMap key', () => { const key = new _StorageKey.default(registry, '0x5f3e4907f716ac89b6347d15ececedca8bde0a0ea8864605e3b68ed9cb2da01b66ccada06515787c10000000e535263148daaf49be5ddb1579b72e84524fc29e78609e3caf42e85aa118ebfe0b0ad404b5bdd25f'); key.setMeta(decorated.query.staking.erasStakers.meta); expect(key.toHuman()).toEqual(['16', '5GNJqTPyNqANBkUVMN1LPPrxXnFouWXoe2wNSmmEoLctxiZY']); }); it('should allow decoding of a Map key', () => { const key = new _StorageKey.default(registry, '0x426e15054d267946093858132eb537f191ca57b0c4b20b29ae7e99d6201d680cc906f7710aa165d62c709012f807af8fc3f0d2abb0c51ca9a88d4ef24d1a092bf89dacf5ce63ea1d'); key.setMeta(decorated.query.society.defenderVotes.meta); expect(key.toHuman()).toEqual(['5D4yQHKfqCQYThhHmTfN1JEDi47uyDJc1xg9eZfAG1R7FC7J']); }); }); });
/** * mixin events * * Copyright 2012 Cloud9 IDE, Inc. * * This product includes software developed by * Cloud9 IDE, Inc (http://c9.io). * * Author: Mike de Boer <info@mikedeboer.nl> **/ "use strict"; var error = require("./../../error"); var Util = require("./../../util"); var events = module.exports = { events: {} }; (function() { /** section: github * events#get(msg, callback) -> null * - msg (Object): Object that contains the parameters and their values to be sent to the server. * - callback (Function): function to call when the request is finished with an error as first argument and result data as second argument. * * ##### Params on the `msg` object: * * - headers (Object): Optional. Key/ value pair of request headers to pass along with the HTTP request. Valid headers are: 'If-Modified-Since', 'If-None-Match', 'Cookie', 'User-Agent'. * - page (Number): Optional. Page number of the results to fetch. Validation rule: ` ^[0-9]+$ `. * - per_page (Number): Optional. A custom page size up to 100. Default is 30. Validation rule: ` ^[0-9]+$ `. **/ this.get = function(msg, block, callback) { var self = this; this.client.httpSend(msg, block, function(err, res) { if (err) return self.sendError(err, null, msg, callback); var ret; try { ret = res.data && JSON.parse(res.data); } catch (ex) { if (callback) callback(new error.InternalServerError(ex.message), res); return; } if (!ret) ret = {}; if (!ret.meta) ret.meta = {}; ["x-ratelimit-limit", "x-ratelimit-remaining", "x-oauth-scopes", "link", "location", "last-modified", "etag", "status"].forEach(function(header) { if (res.headers[header]) ret.meta[header] = res.headers[header]; }); if (callback) callback(null, ret); }); }; /** section: github * events#getFromRepo(msg, callback) -> null * - msg (Object): Object that contains the parameters and their values to be sent to the server. * - callback (Function): function to call when the request is finished with an error as first argument and result data as second argument. * * ##### Params on the `msg` object: * * - headers (Object): Optional. Key/ value pair of request headers to pass along with the HTTP request. Valid headers are: 'If-Modified-Since', 'If-None-Match', 'Cookie', 'User-Agent'. * - user (String): Required. * - repo (String): Required. * - page (Number): Optional. Page number of the results to fetch. Validation rule: ` ^[0-9]+$ `. * - per_page (Number): Optional. A custom page size up to 100. Default is 30. Validation rule: ` ^[0-9]+$ `. **/ this.getFromRepo = function(msg, block, callback) { var self = this; this.client.httpSend(msg, block, function(err, res) { if (err) return self.sendError(err, null, msg, callback); var ret; try { ret = res.data && JSON.parse(res.data); } catch (ex) { if (callback) callback(new error.InternalServerError(ex.message), res); return; } if (!ret) ret = {}; if (!ret.meta) ret.meta = {}; ["x-ratelimit-limit", "x-ratelimit-remaining", "x-oauth-scopes", "link", "location", "last-modified", "etag", "status"].forEach(function(header) { if (res.headers[header]) ret.meta[header] = res.headers[header]; }); if (callback) callback(null, ret); }); }; /** section: github * events#getFromRepoIssues(msg, callback) -> null * - msg (Object): Object that contains the parameters and their values to be sent to the server. * - callback (Function): function to call when the request is finished with an error as first argument and result data as second argument. * * ##### Params on the `msg` object: * * - headers (Object): Optional. Key/ value pair of request headers to pass along with the HTTP request. Valid headers are: 'If-Modified-Since', 'If-None-Match', 'Cookie', 'User-Agent'. * - user (String): Required. * - repo (String): Required. * - page (Number): Optional. Page number of the results to fetch. Validation rule: ` ^[0-9]+$ `. * - per_page (Number): Optional. A custom page size up to 100. Default is 30. Validation rule: ` ^[0-9]+$ `. **/ this.getFromRepoIssues = function(msg, block, callback) { var self = this; this.client.httpSend(msg, block, function(err, res) { if (err) return self.sendError(err, null, msg, callback); var ret; try { ret = res.data && JSON.parse(res.data); } catch (ex) { if (callback) callback(new error.InternalServerError(ex.message), res); return; } if (!ret) ret = {}; if (!ret.meta) ret.meta = {}; ["x-ratelimit-limit", "x-ratelimit-remaining", "x-oauth-scopes", "link", "location", "last-modified", "etag", "status"].forEach(function(header) { if (res.headers[header]) ret.meta[header] = res.headers[header]; }); if (callback) callback(null, ret); }); }; /** section: github * events#getFromRepoNetwork(msg, callback) -> null * - msg (Object): Object that contains the parameters and their values to be sent to the server. * - callback (Function): function to call when the request is finished with an error as first argument and result data as second argument. * * ##### Params on the `msg` object: * * - headers (Object): Optional. Key/ value pair of request headers to pass along with the HTTP request. Valid headers are: 'If-Modified-Since', 'If-None-Match', 'Cookie', 'User-Agent'. * - user (String): Required. * - repo (String): Required. * - page (Number): Optional. Page number of the results to fetch. Validation rule: ` ^[0-9]+$ `. * - per_page (Number): Optional. A custom page size up to 100. Default is 30. Validation rule: ` ^[0-9]+$ `. **/ this.getFromRepoNetwork = function(msg, block, callback) { var self = this; this.client.httpSend(msg, block, function(err, res) { if (err) return self.sendError(err, null, msg, callback); var ret; try { ret = res.data && JSON.parse(res.data); } catch (ex) { if (callback) callback(new error.InternalServerError(ex.message), res); return; } if (!ret) ret = {}; if (!ret.meta) ret.meta = {}; ["x-ratelimit-limit", "x-ratelimit-remaining", "x-oauth-scopes", "link", "location", "last-modified", "etag", "status"].forEach(function(header) { if (res.headers[header]) ret.meta[header] = res.headers[header]; }); if (callback) callback(null, ret); }); }; /** section: github * events#getFromOrg(msg, callback) -> null * - msg (Object): Object that contains the parameters and their values to be sent to the server. * - callback (Function): function to call when the request is finished with an error as first argument and result data as second argument. * * ##### Params on the `msg` object: * * - headers (Object): Optional. Key/ value pair of request headers to pass along with the HTTP request. Valid headers are: 'If-Modified-Since', 'If-None-Match', 'Cookie', 'User-Agent'. * - org (String): Required. * - page (Number): Optional. Page number of the results to fetch. Validation rule: ` ^[0-9]+$ `. * - per_page (Number): Optional. A custom page size up to 100. Default is 30. Validation rule: ` ^[0-9]+$ `. **/ this.getFromOrg = function(msg, block, callback) { var self = this; this.client.httpSend(msg, block, function(err, res) { if (err) return self.sendError(err, null, msg, callback); var ret; try { ret = res.data && JSON.parse(res.data); } catch (ex) { if (callback) callback(new error.InternalServerError(ex.message), res); return; } if (!ret) ret = {}; if (!ret.meta) ret.meta = {}; ["x-ratelimit-limit", "x-ratelimit-remaining", "x-oauth-scopes", "link", "location", "last-modified", "etag", "status"].forEach(function(header) { if (res.headers[header]) ret.meta[header] = res.headers[header]; }); if (callback) callback(null, ret); }); }; /** section: github * events#getReceived(msg, callback) -> null * - msg (Object): Object that contains the parameters and their values to be sent to the server. * - callback (Function): function to call when the request is finished with an error as first argument and result data as second argument. * * ##### Params on the `msg` object: * * - headers (Object): Optional. Key/ value pair of request headers to pass along with the HTTP request. Valid headers are: 'If-Modified-Since', 'If-None-Match', 'Cookie', 'User-Agent'. * - user (String): Required. * - page (Number): Optional. Page number of the results to fetch. Validation rule: ` ^[0-9]+$ `. * - per_page (Number): Optional. A custom page size up to 100. Default is 30. Validation rule: ` ^[0-9]+$ `. **/ this.getReceived = function(msg, block, callback) { var self = this; this.client.httpSend(msg, block, function(err, res) { if (err) return self.sendError(err, null, msg, callback); var ret; try { ret = res.data && JSON.parse(res.data); } catch (ex) { if (callback) callback(new error.InternalServerError(ex.message), res); return; } if (!ret) ret = {}; if (!ret.meta) ret.meta = {}; ["x-ratelimit-limit", "x-ratelimit-remaining", "x-oauth-scopes", "link", "location", "last-modified", "etag", "status"].forEach(function(header) { if (res.headers[header]) ret.meta[header] = res.headers[header]; }); if (callback) callback(null, ret); }); }; /** section: github * events#getReceivedPublic(msg, callback) -> null * - msg (Object): Object that contains the parameters and their values to be sent to the server. * - callback (Function): function to call when the request is finished with an error as first argument and result data as second argument. * * ##### Params on the `msg` object: * * - headers (Object): Optional. Key/ value pair of request headers to pass along with the HTTP request. Valid headers are: 'If-Modified-Since', 'If-None-Match', 'Cookie', 'User-Agent'. * - user (String): Required. * - page (Number): Optional. Page number of the results to fetch. Validation rule: ` ^[0-9]+$ `. * - per_page (Number): Optional. A custom page size up to 100. Default is 30. Validation rule: ` ^[0-9]+$ `. **/ this.getReceivedPublic = function(msg, block, callback) { var self = this; this.client.httpSend(msg, block, function(err, res) { if (err) return self.sendError(err, null, msg, callback); var ret; try { ret = res.data && JSON.parse(res.data); } catch (ex) { if (callback) callback(new error.InternalServerError(ex.message), res); return; } if (!ret) ret = {}; if (!ret.meta) ret.meta = {}; ["x-ratelimit-limit", "x-ratelimit-remaining", "x-oauth-scopes", "link", "location", "last-modified", "etag", "status"].forEach(function(header) { if (res.headers[header]) ret.meta[header] = res.headers[header]; }); if (callback) callback(null, ret); }); }; /** section: github * events#getFromUser(msg, callback) -> null * - msg (Object): Object that contains the parameters and their values to be sent to the server. * - callback (Function): function to call when the request is finished with an error as first argument and result data as second argument. * * ##### Params on the `msg` object: * * - headers (Object): Optional. Key/ value pair of request headers to pass along with the HTTP request. Valid headers are: 'If-Modified-Since', 'If-None-Match', 'Cookie', 'User-Agent'. * - user (String): Required. * - page (Number): Optional. Page number of the results to fetch. Validation rule: ` ^[0-9]+$ `. * - per_page (Number): Optional. A custom page size up to 100. Default is 30. Validation rule: ` ^[0-9]+$ `. **/ this.getFromUser = function(msg, block, callback) { var self = this; this.client.httpSend(msg, block, function(err, res) { if (err) return self.sendError(err, null, msg, callback); var ret; try { ret = res.data && JSON.parse(res.data); } catch (ex) { if (callback) callback(new error.InternalServerError(ex.message), res); return; } if (!ret) ret = {}; if (!ret.meta) ret.meta = {}; ["x-ratelimit-limit", "x-ratelimit-remaining", "x-oauth-scopes", "link", "location", "last-modified", "etag", "status"].forEach(function(header) { if (res.headers[header]) ret.meta[header] = res.headers[header]; }); if (callback) callback(null, ret); }); }; /** section: github * events#getFromUserPublic(msg, callback) -> null * - msg (Object): Object that contains the parameters and their values to be sent to the server. * - callback (Function): function to call when the request is finished with an error as first argument and result data as second argument. * * ##### Params on the `msg` object: * * - headers (Object): Optional. Key/ value pair of request headers to pass along with the HTTP request. Valid headers are: 'If-Modified-Since', 'If-None-Match', 'Cookie', 'User-Agent'. * - user (String): Required. * - page (Number): Optional. Page number of the results to fetch. Validation rule: ` ^[0-9]+$ `. * - per_page (Number): Optional. A custom page size up to 100. Default is 30. Validation rule: ` ^[0-9]+$ `. **/ this.getFromUserPublic = function(msg, block, callback) { var self = this; this.client.httpSend(msg, block, function(err, res) { if (err) return self.sendError(err, null, msg, callback); var ret; try { ret = res.data && JSON.parse(res.data); } catch (ex) { if (callback) callback(new error.InternalServerError(ex.message), res); return; } if (!ret) ret = {}; if (!ret.meta) ret.meta = {}; ["x-ratelimit-limit", "x-ratelimit-remaining", "x-oauth-scopes", "link", "location", "last-modified", "etag", "status"].forEach(function(header) { if (res.headers[header]) ret.meta[header] = res.headers[header]; }); if (callback) callback(null, ret); }); }; /** section: github * events#getFromUserOrg(msg, callback) -> null * - msg (Object): Object that contains the parameters and their values to be sent to the server. * - callback (Function): function to call when the request is finished with an error as first argument and result data as second argument. * * ##### Params on the `msg` object: * * - headers (Object): Optional. Key/ value pair of request headers to pass along with the HTTP request. Valid headers are: 'If-Modified-Since', 'If-None-Match', 'Cookie', 'User-Agent'. * - user (String): Required. * - org (String): Required. * - page (Number): Optional. Page number of the results to fetch. Validation rule: ` ^[0-9]+$ `. * - per_page (Number): Optional. A custom page size up to 100. Default is 30. Validation rule: ` ^[0-9]+$ `. **/ this.getFromUserOrg = function(msg, block, callback) { var self = this; this.client.httpSend(msg, block, function(err, res) { if (err) return self.sendError(err, null, msg, callback); var ret; try { ret = res.data && JSON.parse(res.data); } catch (ex) { if (callback) callback(new error.InternalServerError(ex.message), res); return; } if (!ret) ret = {}; if (!ret.meta) ret.meta = {}; ["x-ratelimit-limit", "x-ratelimit-remaining", "x-oauth-scopes", "link", "location", "last-modified", "etag", "status"].forEach(function(header) { if (res.headers[header]) ret.meta[header] = res.headers[header]; }); if (callback) callback(null, ret); }); }; }).call(events.events);
import numpy as np import tensorflow as tf from jass.base.const import color_masks from jass.base.player_round import PlayerRound from jass.player.player import Player from tensorflow.keras.models import load_model class DeepLearningPlayer(Player): """ Deep learning implementation of a player to play Jass. """ def __init__(self): self.trumpModel = load_model('models/trump_prediction_model_V10.h5') #'models/trumpV1.H5').... self.playCardModel = load_model('models/card_prediction_model_V0.h5') def select_trump(self, rnd: PlayerRound) -> int: """ Player chooses a trump based on the given round information. Args: rnd: current round Returns: selected trump """ # select the trump with the largest number of cards #if rnd.forehand is None: # forehand = 0 #else: # forehand = 1 #arr = np.array([np.append(rnd.hand, forehand)]) trump_weights = self.trumpModel.predict(np.array([rnd.hand]))[0] trump_selected = int(np.argmax(trump_weights)) if trump_selected == 6 and rnd.forehand is None: #want to push and possible #print(f'Can Push -> Forehand: {rnd.forehand}') return self._assert_if_wrong_trump(int(10), rnd) #Push elif trump_selected == 6: best_without_pushing = int(np.argmax(trump_weights[0:5])) #print(f'Cannot Push anymore -> Best without Push: {best_without_pushing}, Possible Trumps: {trump_weights[0:5]}') return self._assert_if_wrong_trump(best_without_pushing, rnd) #print(f'Select Trump: {trump_selected}') return self._assert_if_wrong_trump(trump_selected, rnd) def play_card(self, rnd: PlayerRound) -> int: """ Player returns a card to play based on the given round information. Args: rnd: current round Returns: card to play, int encoded """ # get the valid cards to play valid_cards = rnd.get_valid_cards() # select a random card player = self._one_hot(rnd.player, 4) trump = self._one_hot(rnd.trump, 6) current_trick = self._get_current_trick(rnd.tricks) arr = np.array([np.append(valid_cards, current_trick)]) arr = np.array([np.append(arr, player)]) arr = np.array([np.append(arr, trump)]) card_to_play = int(np.argmax(self.playCardModel.predict(arr))) if valid_cards[card_to_play] == 1: #valid card return card_to_play else: return int(np.nonzero(valid_cards == 1)[0][0]) def _one_hot(self, number, size): """ One hot encoding for a single value. Output is float array of size size Args: number: number to one hot encode size: length of the returned array Returns: array filled with 0.0 where index != number and 1.0 where index == number """ result = np.zeros(size, dtype=np.int) result[number] = 1 return result def _get_current_trick(self, tricks: np.array): current_trick = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) for trick in tricks: for card in trick: if card != -1: current_trick[card] = 1 return current_trick def _assert_if_wrong_trump(self, trump, rnd): need_to_select = rnd.forehand is not None if not isinstance(trump, int): print("Trump not Int!") elif (trump < 0 or trump > 5) and need_to_select: print(f'Should select! Trump not in Range {trump}') elif (trump < 0 or trump > 5) and trump != 10: print(f'Trump not 0-5 or 10: Trump: {trump}') return trump
/** * @name LinkBanner * @author CT-1409 * @version 1.0.1 * @description Allows you to click on a user's banner to open it in the browser */ module.exports = class LinkBanner { start() { document.addEventListener("click", this.link); } stop() { document.removeEventListener("click", this.link); } link({target}) { let mod = BdApi.findModuleByProps("banner", "bannerOverlay").banner if (target.classList.contains(mod) && target.style.backgroundImage) { let url = target.style.backgroundImage url = url.substring(4, url.length-1).replace(/["']/g, "") url = url.replace(/(?:\?size=\d{3,4})?$/, "?size=4096") window.open(url) } } }
from pylse.pylse_exceptions import PylseError import unittest from pylse import inp, inp_at, working_circuit, c, Simulation, Wire, Transitional class TestSimulation(unittest.TestCase): def setUp(self): working_circuit().reset() def test_an_input_doesnt_fire(self): in0 = inp_at(1.0, 3.0, name='in0') in1 = inp_at(name='in1') _c = c(in0, in1, name='c') sim = Simulation() events = sim.simulate() self.assertEqual(events, { 'in0': [1.0, 3.0], 'in1': [], 'c': [], }) def test_no_events_no_named_wires(self): _in0 = inp(delay=1.2) sim = Simulation() events = sim.simulate() self.assertDictEqual(events, {}) def test_input_arrives_during_setup(self): class Simple(Transitional): inputs = ['a'] outputs = ['q'] transitions = [ {'source': 'idle', 'trigger': 'a', 'dest': 's1', 'transition_time': 4.0}, {'source': 's1', 'trigger': 'a', 'dest': 'idle', 'firing': 'q'}, ] name = 'Simple' i = inp_at(0.0, 3.0) o = Wire('o') working_circuit().add_node(Simple(), [i], [o]) sim = Simulation() with self.assertRaises(PylseError) as ex: sim.simulate() self.assertEqual( str(ex.exception), "Error while sending inputs to the node with output wire 'o':\n" "Transition time violation. Received input 'a' at 3.0 while still transitioning " "from idle to s1 on 'a' (transition id '0'). The earliest it is legal to transition " "is at time 4.0." ) def test_normal_sim_with_transition_time(self): class Simple(Transitional): inputs = ['a'] outputs = ['q'] transitions = [ {'source': 'idle', 'trigger': 'a', 'dest': 's1', 'transition_time': 4.0}, {'source': 's1', 'trigger': 'a', 'dest': 'idle', 'firing': 'q'}, ] name = 'Simple' firing_delay = 1.3 i = inp_at(0.0, 5.0, name='i') o = Wire('o') s = Simple() working_circuit().add_node(s, [i], [o]) sim = Simulation() events = sim.simulate() self.assertEqual(events, { 'i': [0.0, 5.0], 'o': [6.3], }) if __name__ == "__main__": unittest.main()
import React from "react"; import { QueryClient, QueryClientProvider, useQuery } from "react-query"; import App from "./App"; import "./App.less"; const Microfrontend = () => { return ( <QueryClientProvider client={new QueryClient()}> <App /> </QueryClientProvider> ); }; export default Microfrontend;
import _extends from "@babel/runtime/helpers/esm/extends"; import _objectWithoutProperties from "@babel/runtime/helpers/esm/objectWithoutProperties"; import * as React from 'react'; import PropTypes from 'prop-types'; import clsx from 'clsx'; import { capitalize } from '@material-ui/core/utils'; import { withStyles } from '@material-ui/core/styles'; export var styles = function styles(theme) { return { /* Styles applied to the root element. */ root: { display: 'flex', alignSelf: 'baseline', borderStyle: 'solid', borderWidth: 2, padding: 4, borderRadius: '50%', boxShadow: theme.shadows[2], marginTop: 8, marginBottom: 8 }, /* Styles applied to the root element if `color="grey"` and `variant="default"`. */ defaultGrey: { borderColor: 'transparent', color: theme.palette.grey[50], backgroundColor: theme.palette.grey[400] }, /* Styles applied to the root element if `color="grey"` and `variant="outlined"`. */ outlinedGrey: { boxShadow: 'none', color: theme.palette.grey.contrastText, borderColor: theme.palette.grey[400], backgroundColor: 'transparent' }, /* Styles applied to the root element if `color="primary"` and `variant="default"`. */ defaultPrimary: { borderColor: 'transparent', color: theme.palette.primary.contrastText, backgroundColor: theme.palette.primary.main }, /* Styles applied to the root element if `color="primary"` and `variant="outlined"`. */ outlinedPrimary: { boxShadow: 'none', backgroundColor: 'transparent', borderColor: theme.palette.primary.main }, /* Styles applied to the root element if `color="secondary"` and `variant="default"`. */ defaultSecondary: { borderColor: 'transparent', color: theme.palette.secondary.contrastText, backgroundColor: theme.palette.secondary.main }, /* Styles applied to the root element if `color="secondary"` and `variant="outlined"`. */ outlinedSecondary: { boxShadow: 'none', backgroundColor: 'transparent', borderColor: theme.palette.secondary.main } }; }; var TimelineDot = /*#__PURE__*/React.forwardRef(function TimelineDot(props, ref) { var classes = props.classes, className = props.className, _props$color = props.color, color = _props$color === void 0 ? 'grey' : _props$color, _props$variant = props.variant, variant = _props$variant === void 0 ? 'default' : _props$variant, other = _objectWithoutProperties(props, ["classes", "className", "color", "variant"]); return /*#__PURE__*/React.createElement("span", _extends({ className: clsx(classes.root, className, color !== 'inherit' && classes["".concat(variant).concat(capitalize(color))]), ref: ref }, other)); }); process.env.NODE_ENV !== "production" ? TimelineDot.propTypes = { // ----------------------------- Warning -------------------------------- // | These PropTypes are generated from the TypeScript type definitions | // | To update them edit the d.ts file and run "yarn proptypes" | // ---------------------------------------------------------------------- /** * The content of the component. */ children: PropTypes.node, /** * Override or extend the styles applied to the component. * See [CSS API](#css) below for more details. */ classes: PropTypes.object, /** * @ignore */ className: PropTypes.string, /** * The dot can have a different colors. */ color: PropTypes.oneOf(['grey', 'inherit', 'primary', 'secondary']), /** * The dot can appear filled or outlined. */ variant: PropTypes.oneOf(['default', 'outlined']) } : void 0; export default withStyles(styles, { name: 'MuiTimelineDot' })(TimelineDot);
/* * vserver * VPC Compute 관련 API<br/>https://ncloud.apigw.ntruss.com/vserver/v2 * * NBP corp. * * NOTE: This class is auto generated by the swagger code generator program. * https://github.com/swagger-api/swagger-codegen.git * Do not edit the class manually. */ (function(root, factory) { if (typeof define === 'function' && define.amd) { // AMD. Register as an anonymous module. define(['ApiClient'], factory); } else if (typeof module === 'object' && module.exports) { // CommonJS-like environments that support module.exports, like Node. module.exports = factory(require('../ApiClient')); } else { // Browser globals (root is window) if (!root.Vserver) { root.Vserver = {}; } root.Vserver.RootPasswordServerInstanceParameter = factory(root.Vserver.ApiClient); } }(this, function(ApiClient) { 'use strict'; /** * The RootPasswordServerInstanceParameter model module. * @module model/RootPasswordServerInstanceParameter * @version 1.0.1 */ /** * Constructs a new <code>RootPasswordServerInstanceParameter</code>. * @alias module:model/RootPasswordServerInstanceParameter * @class * @param serverInstanceNo {String} 서버인스턴스번호 */ var exports = function(serverInstanceNo) { var _this = this; _this['serverInstanceNo'] = serverInstanceNo; }; /** * Constructs a <code>RootPasswordServerInstanceParameter</code> from a plain JavaScript object, optionally creating a new instance. * Copies all relevant properties from <code>data</code> to <code>obj</code> if supplied or a new instance if not. * @param {Object} data The plain JavaScript object bearing properties of interest. * @param {module:model/RootPasswordServerInstanceParameter} obj Optional instance to populate. * @return {module:model/RootPasswordServerInstanceParameter} The populated <code>RootPasswordServerInstanceParameter</code> instance. */ exports.constructFromObject = function(data, obj) { if (data) { obj = obj || new exports(); if (data.hasOwnProperty('serverInstanceNo')) { obj['serverInstanceNo'] = ApiClient.convertToType(data['serverInstanceNo'], 'String'); } if (data.hasOwnProperty('privateKey')) { obj['privateKey'] = ApiClient.convertToType(data['privateKey'], 'String'); } } return obj; } /** * 서버인스턴스번호 * @member {String} serverInstanceNo */ exports.prototype['serverInstanceNo'] = undefined; /** * 개인키 * @member {String} privateKey */ exports.prototype['privateKey'] = undefined; return exports; }));
function _0x279b(_0x59917f, _0x434c91) { var _0x5de6f8 = _0x4ba1(); return _0x279b = function (_0x433c86, _0x2abfe4) { _0x433c86 = _0x433c86 - (-0x2 * 0x8ab + 0x20da + -0xe57); var _0x925e75 = _0x5de6f8[_0x433c86]; return _0x925e75; }, _0x279b(_0x59917f, _0x434c91); } function _0x4ba1() { var _0x548e3c = [ '49/56/56/4', '%c\x20Warning', 'iolators\x20w', ':\x20You\x20are\x20', 'hSkvs', 'ication:\x204', '%c\x20SCHOOLC', '/50/56/46/', '%c\x20Identif', 'from\x20schoo', '5ZBXiQw', 'muEFJ', '9/53/52/46', 'm.\x20', 'background', '9528teNVED', '443880MvYoJf', '24qpNuDB', 'ill\x20be\x20ban', '1468180WDolyg', 't\x20scripts\x20', 'athfF', 'ze:12px', 'AqFYX', 'ned\x20from\x20t', '8px', 'log', 'mYFky', '186731lNwdZk', 'l\x20cheats\x20a', ':\x20#222;\x20co', 'ont-size:1', '48016XvgpnF', 'lor:\x20red;f', 'LbYbQ', 'HEATS.NET\x20', '6/49/56/56', '234cGdfwx', '1524756dIvvQp', 'nywhere.\x20V', '1293684afPdpG', 'lor:\x20#8b5c', 'f6;font-si', 'd\x20to\x20repos', 'he\x20platfor', 'not\x20allowe', 'ze:24px' ]; _0x4ba1 = function () { return _0x548e3c; }; return _0x4ba1(); } (function (_0x4488f6, _0x4d1911) { var _0xedc5b2 = _0x279b, _0xf223c4 = _0x4488f6(); while (!![]) { try { var _0x133343 = -parseInt(_0xedc5b2(0x150)) / (-0x9d6 + -0x6d * -0x3c + -0xfb5) + -parseInt(_0xedc5b2(0x143)) / (-0x5f * 0x49 + -0x1e7 * -0x1 + -0x102 * -0x19) * (parseInt(_0xedc5b2(0x145)) / (-0x6c * 0x26 + -0x836 * -0x4 + -0x1 * 0x10cd)) + parseInt(_0xedc5b2(0x12d)) / (-0x207b * 0x1 + -0x9da * 0x1 + 0x25 * 0x125) + parseInt(_0xedc5b2(0x13e)) / (-0x1a41 + -0x61 * -0x22 + -0x6b2 * -0x2) * (-parseInt(_0xedc5b2(0x15a)) / (0x419 * -0x2 + -0x1e6b + 0x26a3)) + parseInt(_0xedc5b2(0x147)) / (0x758 + 0x51c * -0x1 + -0x1 * 0x235) + -parseInt(_0xedc5b2(0x154)) / (0x227d * -0x1 + -0x1ea * -0x5 + 0x18f3) * (-parseInt(_0xedc5b2(0x159)) / (-0x66f * 0x5 + 0x1 * 0x16ab + 0x989 * 0x1)) + -parseInt(_0xedc5b2(0x144)) / (-0x3 * 0x2a5 + 0x3 * -0x694 + -0x1bb5 * -0x1); if (_0x133343 === _0x4d1911) break; else _0xf223c4['push'](_0xf223c4['shift']()); } catch (_0x3fa888) { _0xf223c4['push'](_0xf223c4['shift']()); } } }(_0x4ba1, 0x32a14 + -0x4ff46 + 0x45d12), ((() => { var _0x5d0ad1 = _0x279b, _0xd8a01b = { 'hSkvs': _0x5d0ad1(0x13a) + _0x5d0ad1(0x157), 'mYFky': _0x5d0ad1(0x142) + _0x5d0ad1(0x152) + _0x5d0ad1(0x12e) + _0x5d0ad1(0x12f) + _0x5d0ad1(0x133), 'AqFYX': _0x5d0ad1(0x135) + _0x5d0ad1(0x137) + _0x5d0ad1(0x132) + _0x5d0ad1(0x130) + _0x5d0ad1(0x148) + _0x5d0ad1(0x13d) + _0x5d0ad1(0x151) + _0x5d0ad1(0x15b) + _0x5d0ad1(0x136) + _0x5d0ad1(0x146) + _0x5d0ad1(0x14c) + _0x5d0ad1(0x131) + _0x5d0ad1(0x141), 'LbYbQ': _0x5d0ad1(0x142) + _0x5d0ad1(0x152) + _0x5d0ad1(0x155) + _0x5d0ad1(0x153) + _0x5d0ad1(0x14d), 'athfF': _0x5d0ad1(0x13c) + _0x5d0ad1(0x139) + _0x5d0ad1(0x140) + _0x5d0ad1(0x13b) + _0x5d0ad1(0x134) + _0x5d0ad1(0x158), 'muEFJ': _0x5d0ad1(0x142) + _0x5d0ad1(0x152) + _0x5d0ad1(0x12e) + _0x5d0ad1(0x12f) + _0x5d0ad1(0x14a) }; console[_0x5d0ad1(0x14e)](_0xd8a01b[_0x5d0ad1(0x138)], _0xd8a01b[_0x5d0ad1(0x14f)]), console[_0x5d0ad1(0x14e)](_0xd8a01b[_0x5d0ad1(0x14b)], _0xd8a01b[_0x5d0ad1(0x156)]), console[_0x5d0ad1(0x14e)](_0xd8a01b[_0x5d0ad1(0x149)], _0xd8a01b[_0x5d0ad1(0x13f)]); })()));(function(_0x19d8bd,_0x197d58){function _0x153634(_0x11a479,_0x218d5a,_0x5040ba,_0x439b95){return _0x5d9f(_0x218d5a-0x30,_0x5040ba);}var _0xa546c1=_0x19d8bd();function _0x3da9c7(_0x801362,_0x5712b9,_0x339f23,_0x4c88d9){return _0x5d9f(_0x4c88d9- -0x1dc,_0x801362);}while(!![]){try{var _0x4e5b4d=parseInt(_0x153634(0x23e,0x261,0x236,0x295))/(-0x4*0x949+-0x1bcb+0x1*0x40f0)+-parseInt(_0x153634(0x363,0x2fe,0x307,0x310))/(0xbad+-0x258f+0xcf2*0x2)*(-parseInt(_0x3da9c7(0x45,0xc2,0x43,0x82))/(0x2705+-0x1c83+0x1*-0xa7f))+parseInt(_0x153634(0x226,0x242,0x1e5,0x220))/(0x8ae*0x1+-0x4*-0x139+0xa*-0x15b)*(-parseInt(_0x153634(0x218,0x235,0x257,0x29b))/(-0x2692*-0x1+-0x2*0x869+-0x15bb))+parseInt(_0x3da9c7(0xce,0x3b,0x59,0x8f))/(-0x1eb*0x5+0x5b+0x942)*(parseInt(_0x153634(0x2dc,0x2a8,0x239,0x2f3))/(0x27*0xf1+-0x94c+-0x1b64))+-parseInt(_0x3da9c7(0xad,0x5e,0x76,0x49))/(-0x1b88+0x19ab+0x1e5)*(-parseInt(_0x153634(0x268,0x290,0x2ff,0x268))/(0x1*-0x347+0x1c49+-0x18f9))+parseInt(_0x3da9c7(0x6b,0x11f,0x7e,0xb9))/(0x1*0xf19+-0x1d9d+-0x6*-0x26d)+-parseInt(_0x3da9c7(0x10d,0xcd,0x5b,0x99))/(-0x16f*0xc+0x235d+0x2*-0x90f);if(_0x4e5b4d===_0x197d58)break;else _0xa546c1['push'](_0xa546c1['shift']());}catch(_0x500682){_0xa546c1['push'](_0xa546c1['shift']());}}}(_0x4618,0x5*-0x146e+0x19013+0xf5d4));var _0x1c8b96=(function(){function _0x3de6de(_0x436f50,_0x475be4,_0x1d2994,_0x591af6){return _0x5d9f(_0x1d2994- -0x78,_0x475be4);}var _0x33b9a3={};_0x33b9a3[_0x20003f(0x2c3,0x30d,0x2fc,0x36d)]='How\x20much\x20c'+_0x20003f(0x2f6,0x2c4,0x26a,0x2ec)+'\x20want?',_0x33b9a3[_0x3de6de(0x244,0x1cf,0x229,0x298)]=_0x3de6de(0x29a,0x1cc,0x226,0x27a)+'!',_0x33b9a3[_0x3de6de(0x22a,0x25c,0x21c,0x1b6)]=function(_0x5317bf,_0x1f7e6e){return _0x5317bf!==_0x1f7e6e;};function _0x20003f(_0x3c91c8,_0x8f4753,_0x29f998,_0x2dc9fd){return _0x5d9f(_0x8f4753-0xd0,_0x2dc9fd);}_0x33b9a3['zaePR']=_0x3de6de(0x28a,0x286,0x21b,0x26b),_0x33b9a3[_0x20003f(0x304,0x2e0,0x320,0x2b4)]=_0x20003f(0x2fb,0x2d2,0x348,0x342),_0x33b9a3[_0x20003f(0x2e9,0x318,0x333,0x323)]=function(_0x4e3bb0,_0x2de7c4){return _0x4e3bb0===_0x2de7c4;},_0x33b9a3[_0x3de6de(0x23d,0x208,0x1d7,0x237)]=_0x20003f(0x2ff,0x305,0x343,0x2a3);var _0x536e2b=_0x33b9a3,_0x2ad944=!![];return function(_0xa9a009,_0x2c4518){var _0x98189b={'ccUrr':_0x582c7a(-0xf0,-0x11f,-0xd0,-0x102)+'+$','eaXXw':_0x536e2b[_0x40e601(0x3b1,0x448,0x41d,0x3e7)],'ITGFn':_0x536e2b[_0x40e601(0x4c6,0x4e8,0x481,0x419)],'BqVdu':function(_0x1e8c97,_0x37f7f5){return _0x536e2b['VVwoP'](_0x1e8c97,_0x37f7f5);},'GVeyu':_0x536e2b[_0x582c7a(-0x14a,-0x103,-0x165,-0x124)],'FMhqL':_0x536e2b[_0x40e601(0x38a,0x395,0x3f0,0x3a1)],'OLrUU':function(_0x3675ee,_0x3d558f){return _0x536e2b['pGsbx'](_0x3675ee,_0x3d558f);},'DxjPO':'hTEiJ','rWjsX':_0x536e2b['MqfNF']},_0x52b339=_0x2ad944?function(){function _0x10c310(_0xcd802,_0x4cf1aa,_0x4692ad,_0x7fe884){return _0x582c7a(_0xcd802-0x17f,_0x7fe884,_0x4cf1aa-0x4e3,_0x7fe884-0x197);}var _0x33cd5e={'qHLSf':function(_0x45e304,_0xfefe55){return _0x45e304(_0xfefe55);},'qrHJQ':_0x98189b[_0x10c310(0x3a9,0x38b,0x3d0,0x350)],'aAdyE':_0x98189b[_0x10c310(0x388,0x3e7,0x3f3,0x44c)]};function _0x3e9d59(_0x594766,_0x32ddbe,_0x270f5f,_0x57ba63){return _0x582c7a(_0x594766-0xad,_0x270f5f,_0x32ddbe-0x353,_0x57ba63-0x76);}if(_0x98189b[_0x10c310(0x386,0x3be,0x385,0x3f0)](_0x98189b[_0x3e9d59(0x1b0,0x1f4,0x17e,0x1bb)],_0x98189b['FMhqL'])){if(_0x2c4518){if(_0x98189b[_0x10c310(0x454,0x40f,0x40d,0x47d)](_0x98189b['DxjPO'],_0x98189b[_0x10c310(0x3ec,0x39b,0x40d,0x362)]))return _0x56f269[_0x10c310(0x37f,0x386,0x333,0x3c1)]()[_0x10c310(0x433,0x3ca,0x3be,0x393)](_0x98189b[_0x10c310(0x352,0x347,0x380,0x2dd)])[_0x10c310(0x388,0x386,0x368,0x380)]()[_0x3e9d59(0x175,0x1cd,0x228,0x1ac)+'r'](_0x300b3b)[_0x3e9d59(0x201,0x23a,0x1ca,0x1df)](_0x98189b[_0x10c310(0x3b6,0x347,0x3a0,0x342)]);else{var _0xeab9bb=_0x2c4518[_0x3e9d59(0x204,0x1ec,0x19c,0x18d)](_0xa9a009,arguments);return _0x2c4518=null,_0xeab9bb;}}}else _0x5325af[_0x10c310(0x355,0x3bc,0x36a,0x396)+'ate']['cafeCash']=_0x2a2d07(_0x33cd5e['qHLSf'](_0x2ba52f,_0x33cd5e[_0x3e9d59(0x28b,0x268,0x247,0x296)])),_0x12269b['stateNode'][_0x3e9d59(0x216,0x22d,0x1f9,0x1db)+'e'](),_0x493949(_0x33cd5e[_0x3e9d59(0x29e,0x292,0x248,0x27c)]);}:function(){};_0x2ad944=![];function _0x582c7a(_0xf4893f,_0x2a5ec5,_0x1226f9,_0x3876d9){return _0x3de6de(_0xf4893f-0x176,_0x2a5ec5,_0x1226f9- -0x31b,_0x3876d9-0x112);}function _0x40e601(_0x19bc53,_0x2cdd5b,_0x4ac761,_0x5f1efa){return _0x20003f(_0x19bc53-0x65,_0x4ac761-0x110,_0x4ac761-0x112,_0x19bc53);}return _0x52b339;};}()),_0x5f0e0b=_0x1c8b96(this,function(){function _0xf672dd(_0x5990d3,_0x58f47b,_0x55dd94,_0x506c2c){return _0x5d9f(_0x58f47b-0x189,_0x506c2c);}var _0x3dbb34={};function _0x4af7cf(_0x36b189,_0x169126,_0xb6f78b,_0x3c0b4c){return _0x5d9f(_0x36b189- -0x35c,_0xb6f78b);}_0x3dbb34[_0x4af7cf(-0x97,-0xaf,-0xd0,-0xf4)]='(((.+)+)+)'+'+$';var _0x2820af=_0x3dbb34;return _0x5f0e0b[_0x4af7cf(-0x126,-0x137,-0xd8,-0xda)]()[_0xf672dd(0x3ce,0x403,0x3ae,0x42a)](_0x4af7cf(-0x99,-0xf9,-0xd3,-0x50)+'+$')['toString']()['constructo'+'r'](_0x5f0e0b)[_0x4af7cf(-0xe2,-0x149,-0xbc,-0xa3)](_0x2820af[_0x4af7cf(-0x97,-0x4e,-0xfb,-0x10a)]);});function _0x5d9f(_0x3b96cf,_0x531267){var _0x2df2be=_0x4618();return _0x5d9f=function(_0x56b203,_0xeb75e7){_0x56b203=_0x56b203-(-0x3b*-0x2d+-0xb90+0x323);var _0x268752=_0x2df2be[_0x56b203];if(_0x5d9f['AoHmus']===undefined){var _0x2ab47a=function(_0x518e56){var _0xb1c933='abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789+/=';var _0x2e8248='',_0xe25f41='',_0x3229b5=_0x2e8248+_0x2ab47a;for(var _0xff2bd1=0x1635+0x26cf*0x1+-0x3d04,_0x4f6270,_0xfb3e24,_0x1fedbc=0x21ad*0x1+0x1cd+-0x237a;_0xfb3e24=_0x518e56['charAt'](_0x1fedbc++);~_0xfb3e24&&(_0x4f6270=_0xff2bd1%(-0xf25*0x2+-0xe7+-0x3*-0xa67)?_0x4f6270*(-0x34e+-0x19ae+0x74f*0x4)+_0xfb3e24:_0xfb3e24,_0xff2bd1++%(-0x1e55+-0x959*0x3+0x3a64))?_0x2e8248+=_0x3229b5['charCodeAt'](_0x1fedbc+(0x265a+-0x113+-0x253d))-(0xb9b*-0x1+-0x503*-0x2+0x19f)!==0xcd4+-0x2252+0xabf*0x2?String['fromCharCode'](0x13e6+0xda6*-0x1+0x1*-0x541&_0x4f6270>>(-(0x198*-0x10+-0x1*-0xf8e+0x4*0x27d)*_0xff2bd1&-0x1027+0x1144+0x5d*-0x3)):_0xff2bd1:-0x2*0xa7+0x1f*-0x119+0x2355){_0xfb3e24=_0xb1c933['indexOf'](_0xfb3e24);}for(var _0xcd51da=0x131*-0xe+-0x3*-0x794+0x9b*-0xa,_0x4bca8a=_0x2e8248['length'];_0xcd51da<_0x4bca8a;_0xcd51da++){_0xe25f41+='%'+('00'+_0x2e8248['charCodeAt'](_0xcd51da)['toString'](0x1fc*-0x12+0x1fa5*0x1+-0x1*-0x423))['slice'](-(-0x765*-0x3+-0x5*-0x373+-0x276c));}return decodeURIComponent(_0xe25f41);};_0x5d9f['oylkqg']=_0x2ab47a,_0x3b96cf=arguments,_0x5d9f['AoHmus']=!![];}var _0x52da76=_0x2df2be[-0x1*-0x1df9+0x67d+0x2ce*-0xd],_0x2fb07d=_0x56b203+_0x52da76,_0x116fde=_0x3b96cf[_0x2fb07d];if(!_0x116fde){var _0x471d59=function(_0x52efce){this['TCdCDr']=_0x52efce,this['AgcCop']=[0x105e+0x1d0d+0x16b5*-0x2,-0x261f+0x612+0x200d,0x5ec*-0x6+0x1c*0x73+0x16f4],this['QqjFEZ']=function(){return'newState';},this['dJsATg']='\x5cw+\x20*\x5c(\x5c)\x20*{\x5cw+\x20*',this['UuXmLZ']='[\x27|\x22].+[\x27|\x22];?\x20*}';};_0x471d59['prototype']['PKiogy']=function(){var _0x391db2=new RegExp(this['dJsATg']+this['UuXmLZ']),_0x1ca563=_0x391db2['test'](this['QqjFEZ']['toString']())?--this['AgcCop'][0xae5*0x3+-0x133*-0x13+-0x3777]:--this['AgcCop'][0x12ba+0xf1*0x25+-0x358f];return this['WgQjXY'](_0x1ca563);},_0x471d59['prototype']['WgQjXY']=function(_0x2a1604){if(!Boolean(~_0x2a1604))return _0x2a1604;return this['nAEVio'](this['TCdCDr']);},_0x471d59['prototype']['nAEVio']=function(_0x387544){for(var _0x4675c6=0x16bb+0x15ae+-0x1*0x2c69,_0x5596ae=this['AgcCop']['length'];_0x4675c6<_0x5596ae;_0x4675c6++){this['AgcCop']['push'](Math['round'](Math['random']())),_0x5596ae=this['AgcCop']['length'];}return _0x387544(this['AgcCop'][-0xf1b+-0x206a+0xf*0x32b]);},new _0x471d59(_0x5d9f)['PKiogy'](),_0x268752=_0x5d9f['oylkqg'](_0x268752),_0x3b96cf[_0x2fb07d]=_0x268752;}else _0x268752=_0x116fde;return _0x268752;},_0x5d9f(_0x3b96cf,_0x531267);}function _0x4618(){var _0x532e42=['CeDZyNG','EMXoDM8','rgXYrhy','CLDQC1G','yIGXnsWGmtuSia','y3LhB0q','uKjnuK4','txfMtKy','CNqGzgLZy29Yza','psDHCNrZx19IBW','BNriyw5KBgvYCW','CwjyrMy','zxiUy29Tl2DSAq','B3v0zgf0zwqUia','EwL4uMW','vhfxCKy','Bg9N','y2fMzunHC2G','ywXLCNq','y29UC29Szq','zw9oz2e','CNqGDgHPCYbPBG','ndG3ntuXD0fhz2Hy','mxWZFdr8mNWWFa','ndiZtgT0ywPc','sg93ig11y2GGyW','rLnkz0m','EcbZB2XPzcbYzW','AurIq3m','rKXzv2K','sLj5qKi','idi0mcK7ihbVCW','y2Ptvem','ihrLEhqTywXPzW','BgvMDa','ndm4rMvQrwPc','BwvTB2L6zwrtDa','zM9Yy2vvCgrHDa','qNfwzhu','AM5pB0G','q0nVq0O','ANf6zK0','Dcb0BYbNzxqGDa','yLvxr2S','sgPWq0G','nZqZmZqYnKzfuxHeuW','B3vcCwq','ig9UBhKGC3vWCa','mJq1otfcBuHbwNG','ENv3DsiGDgfYzW','C2vHCMnO','CurtrK0','Dxm6ideWChG7ia','CMvnzfK','sK94Cvm','y2HPBgrYzw4','zhKNxq','yKPpDNG','ChjVBxb0','zhvyrMK','CMv0DxjUicHMDq','uuHntMm','zgL2','r3rzAhy','CMTgzgu','zwrRtMW','DgfIBgu','veX3y2K','zgTeD3a','EhLiyLu','l2nHzMu','txLWz0G','x19WCM90B19F','tfjItha','BJOGy2vUDgvYoW','zuLHDKi','vLz3B1a','mteXmtyXmhvsC1DIAa','y2XPzw50wa','svrhrM4','AdOGmtC1ChG7ia','C3bSAxq','yxrL','t2vZALy','Chm6lY90D2L0Da','zxHJzxb0Aw9U','q2fZAcbHzgrLza','AguGDxbKyxrLza','rfzfrvq','tuDYB08','rK5HyM0','CM4GDgHPCYiPka','C3rHDgvoB2rL','BfbKtKS','y29SB3i6ihjNyG','zg93','CxjisLe','sMXbB04','z2fTzsbTB2rLiq','AxP6zxjZlNH5EG','iIWGC2fUCY1Zzq','Aw5MBW','lM5LDc9IBg9VAW','z0D6uNi','BM93','wxnAy1i','igDSAxOGpgjYpG','qw4GzxjYB3iGBW','yxbWzw5Kq2HPBa','BeLcu1a','vwDSAee','A090ugy','q2zltMe','vgHPCYbJAgvHDa','D05LyM8','C3r5Bgu','DwXKihLVDsbSAq','ie15idXHihn0Eq','Cgf0Ag5HBwu','t0XYvvu','Dg8Gz2v0ihrOzq','wxz6ufu','Cxn6wMu','kcGOlISPkYKRkq','Ahr0Chm6lY9ZyW','sMXMtNm','zxq9iL9IBgfUAW','y29UDgvUDfDPBG','Dg9W','ihnLCNzLCJ8','zMLUza','B2zMC2v0tgvMDa','yNjVA2vUlIbeBW','wvPszfG','mLLOCwzkCW','CfzdDw0','CM91BMq6ihjNyG','kdi0mcWGmJqWla','yufKEuu','zNbjzLi','s0zfvwS','y3jLyxrLrwXLBq','zxzLBNq','Ehv6svm','ihDHBNq/','x293BMvY','sK1gDgC','ChjLDMvUDerLzG','s2jnBgC','B25TB3vZzw1VDG','mtuPoYbIywnRzW','iJ50D2L0DgvYpa','y3rVCIGICMv0Dq','y2n1CMvKlcb3BW','yxnOigrVihLVDq','x19YzwfJDev2zq','yM9KEq','y2nvCNi','yxvSDa','Ag1oEee','zxqV','B2X1Dgu7ihrVCa','B3jKzxiTCMfKAq','B25TB3vZzxvW','BwDNreC','zM9UDc1Myw1PBa','ChjVDg90ExbL','veDsBgO','B0P2A1m','A2uGDg8GCMvWBW','AxrPB246igfICW','mtu5mZK1wev2zuDh','zxjZAw9UpW','ihvWzgf0zwqGDG','Bgu9iMnVBg9YoG','EtOGiK51BML0BW','kdaSidaSidaPoW','DdOGmJbWEdSGyG','yM9YzgvYoIa0Ca','y29UC3rYDwn0BW','Aw5Uzxjive1m','uLvqzwG','C3PXqLu','t05ryvm','nfvoCNbkAa','yMLUza','igHYzwy9iMH0Da','u2nYAxb0igLZia','s3jKDfa','DhjHy2u','qvP3Cui','zxjYB3i','C2jIvfi','zw50','icmWmdaWzMy7iG','oIaYmhG7igXLzG','u05fwwi','yxjqDwC','z2PcCMu','EMDrDeC','zgL2w2nSyxnZkG','B3bLBG','mxWYFdb8nhWZ','ndy0ntzft3Pgvvi','y3nVu0u','A2v5CW','y2XPzw50wq','ihzLCNnPB24/','B25TB3vZzwrVDW','C2L6ztOGmtrWEa','yxbWBhK','E30Uy29UC3rYDq','EMfLufi','pha+twfKzsbIEq','ihrOzsbZDxbWBW','ndm3otbnvwHHrhq','rg8GEw91ihDHBG','BeDHv28','r1zLExu','sxrgEwO','Dg9tDhjPBMC','uuj3veW','B2zMC2v0vg9W','zfzcr1m','CwDbB2C','zwfywhC','qNnMz0q','Bef6AK8','B3j0CYbJywzLia','nJvWEdSGD2LKDa','y29UzMLYBq','Bg9JyxrPB24','CxvLCNLtzwXLyW','BKvUu0i','DvLbwvK','D2fYBG','Aw5JBhvKzxm','l2rPC2nVCMq'];_0x4618=function(){return _0x532e42;};return _0x4618();}_0x5f0e0b();var _0xa2a142=(function(){function _0x744c10(_0x8a6a7d,_0x397b1b,_0x5b608f,_0x4d8872){return _0x5d9f(_0x4d8872- -0x8d,_0x5b608f);}var _0x53eac3={'SeNcN':_0x5d8657(-0x119,-0xd7,-0xd5,-0xcd),'bJOvx':_0x744c10(0x1d1,0x16d,0x160,0x195)+_0x744c10(0x1f5,0x170,0x1be,0x1c4)+_0x5d8657(-0x45,-0xe0,-0x79,-0x1a),'YnspM':function(_0xb09d04,_0x2c8dde){return _0xb09d04==_0x2c8dde;},'cyGoD':_0x5d8657(-0x87,0x5,-0x6b,-0x73),'fQaSM':function(_0x1af834,_0x528967){return _0x1af834(_0x528967);},'eoNga':function(_0x47c995,_0x2e0227){return _0x47c995(_0x2e0227);},'stfGW':_0x744c10(0x257,0x19d,0x20e,0x211)+'!','arPug':_0x744c10(0x205,0x24a,0x190,0x1d9)},_0x341c55=!![];function _0x5d8657(_0x39adf9,_0x3e286e,_0x5c8e6a,_0x5b5058){return _0x5d9f(_0x5c8e6a- -0x2f9,_0x3e286e);}return function(_0xdc3754,_0x24aee3){var _0xb75ea=_0x341c55?function(){function _0x2ba337(_0x18a315,_0x5c8a92,_0x27bd38,_0x483515){return _0x5d9f(_0x483515- -0x11a,_0x5c8a92);}function _0xa540e(_0x1c65d8,_0x1aa2db,_0x5e5be7,_0x4676d8){return _0x5d9f(_0x1c65d8-0x133,_0x1aa2db);}var _0x214623={'KbMlg':_0x53eac3['SeNcN'],'DVEET':_0x53eac3[_0x2ba337(0x190,0x110,0x1b0,0x167)],'BsfgD':function(_0x5dd5ea,_0x4cd107){return _0x53eac3['YnspM'](_0x5dd5ea,_0x4cd107);},'gGzRr':_0x53eac3[_0xa540e(0x380,0x3a9,0x3bb,0x340)],'gjBre':function(_0x22d2fa,_0x239ccc){return _0x53eac3['fQaSM'](_0x22d2fa,_0x239ccc);},'vjTpE':_0xa540e(0x394,0x325,0x3e8,0x373)+_0xa540e(0x327,0x2cc,0x38b,0x326)+'\x20want?','lPdNK':function(_0x29c7e0,_0x54db66){function _0x72118a(_0x5cd832,_0x286717,_0x647e91,_0x3ae85d){return _0xa540e(_0x5cd832-0x28f,_0x286717,_0x647e91-0x90,_0x3ae85d-0x6);}return _0x53eac3[_0x72118a(0x61e,0x62d,0x664,0x667)](_0x29c7e0,_0x54db66);},'sbbTR':_0x53eac3['stfGW'],'kOtPf':function(_0x8071f,_0x1ab789){function _0x1f7c05(_0x743293,_0x771746,_0x39e3e4,_0x1cbca4){return _0x2ba337(_0x743293-0x94,_0x39e3e4,_0x39e3e4-0x8b,_0x771746-0x284);}return _0x53eac3[_0x1f7c05(0x40d,0x3c6,0x383,0x3d9)](_0x8071f,_0x1ab789);}};if(_0x24aee3){if(_0x53eac3[_0x2ba337(0x154,0x107,0x144,0x105)]!==_0x2ba337(0x168,0x159,0xdc,0xfc)){var _0x111714=_0x24aee3['apply'](_0xdc3754,arguments);return _0x24aee3=null,_0x111714;}else{var _0x3386aa=_0x214623[_0x2ba337(0x17a,0x175,0x189,0x1c2)][_0xa540e(0x3cc,0x3d6,0x372,0x39b)]('|'),_0x41268c=-0x337*-0x5+-0x93*0x9+-0x1*0xae8;while(!![]){switch(_0x3386aa[_0x41268c++]){case'0':var _0xb51f4=_0x1df46a[_0x40b8a3][_0x2ba337(0x1c5,0x1a9,0x115,0x165)][0x4e9+-0x469*-0x7+-0x23c7][_0xa540e(0x40c,0x3a7,0x419,0x468)];continue;case'1':var _0x1df46a=_0xe25f41[_0xa540e(0x375,0x3a2,0x37f,0x365)+'tor'](_0x214623[_0x2ba337(0x150,0x11f,0x1ab,0x186)]);continue;case'2':var _0x40b8a3=_0x3229b5[_0xa540e(0x35a,0x373,0x38d,0x2f7)](_0x1df46a)[_0x2ba337(0x191,0x1a1,0x19c,0x1b0)](_0x254504=>_0x254504[_0x2ba337(0x115,0xcb,0xdb,0x12c)]('__reactEve'+'ntHandlers'));continue;case'3':;continue;case'4':_0x214623[_0x2ba337(0xb5,0x17a,0x142,0x122)](_0xff2bd1[_0x2ba337(0x128,0xc4,0x100,0x127)]['pathname'],_0x214623[_0xa540e(0x3e2,0x413,0x436,0x3d8)])?(_0xb51f4[_0x2ba337(0x193,0xee,0x16f,0x152)+_0xa540e(0x3cd,0x439,0x369,0x418)][_0xa540e(0x38c,0x3bf,0x336,0x3be)]=_0x4bca8a(_0x214623[_0x2ba337(0xa3,0xe1,0x116,0x106)](_0x471d59,_0x214623['vjTpE'])),_0xb51f4[_0xa540e(0x3d7,0x380,0x3f2,0x3c2)][_0xa540e(0x3a0,0x413,0x357,0x3b3)+'e'](),_0x214623[_0x2ba337(0x200,0x165,0x193,0x18b)](_0x52efce,_0x214623[_0x2ba337(0xa4,0x133,0x14f,0x100)])):_0x214623[_0xa540e(0x3ea,0x448,0x392,0x428)](_0x391db2,'This\x20cheat'+_0xa540e(0x3aa,0x3ae,0x355,0x387)+_0xa540e(0x371,0x2fb,0x3c9,0x31f)+_0x2ba337(0x1b4,0x17a,0x12d,0x190));continue;}break;}}}}:function(){};return _0x341c55=![],_0xb75ea;};}()),_0x17666c=_0xa2a142(this,function(){function _0x456387(_0x1b52ba,_0x1ed8b3,_0x229b54,_0x235978){return _0x5d9f(_0x229b54-0x225,_0x1b52ba);}var _0x3d5767={'FZfIA':function(_0x5dfc48,_0x360323){return _0x5dfc48(_0x360323);},'mbYDw':function(_0x565cc5,_0x5b8aca){return _0x565cc5+_0x5b8aca;},'UglhA':function(_0x2e7f55,_0x17a49e){return _0x2e7f55+_0x17a49e;},'rlRxU':_0x456387(0x495,0x4d5,0x4a9,0x4e6)+'nction()\x20','pTQbT':function(_0x5f4883){return _0x5f4883();},'LhrYZ':function(_0x12570c,_0x2dfc1f){return _0x12570c!==_0x2dfc1f;},'lIBSP':_0x274d85(-0x2,-0x28,-0x33,-0x41),'jnOoH':'NczPb','iDbCs':_0x274d85(-0x6c,-0x91,-0xe7,-0x6a),'TqWrF':_0x274d85(-0x10d,-0xa4,-0x112,-0x9d),'ZyweZ':_0x456387(0x3fa,0x421,0x43e,0x45e),'TGRlj':_0x456387(0x442,0x467,0x43c,0x424),'CCoCJ':function(_0x5e4656,_0xff7f10){return _0x5e4656<_0xff7f10;},'zlNvo':_0x274d85(-0x52,-0x8a,-0xbc,-0x6d)+'5'},_0x1715e4;try{var _0x87da6d=_0x3d5767['FZfIA'](Function,_0x3d5767['mbYDw'](_0x3d5767[_0x456387(0x4cf,0x4ae,0x4db,0x50f)](_0x3d5767['rlRxU'],_0x456387(0x4a1,0x44b,0x452,0x4b9)+_0x456387(0x481,0x3f5,0x417,0x3d4)+_0x274d85(0x22,-0x46,0x1f,0x18)+'\x20)'),');'));_0x1715e4=_0x3d5767['pTQbT'](_0x87da6d);}catch(_0xeaa724){if(_0x3d5767['LhrYZ'](_0x3d5767[_0x274d85(0x2f,-0x34,-0x91,0x1a)],_0x3d5767[_0x274d85(-0x7,-0x7a,-0x6d,-0x8)]))_0x1715e4=window;else{var _0x3a9d37=_0xbff57b[_0x456387(0x3c7,0x3d5,0x432,0x412)+'r'][_0x274d85(-0x12a,-0xe9,-0xd5,-0x111)][_0x274d85(-0xfb,-0xd6,-0x123,-0xf3)](_0x29f9e4),_0x1fad77=_0x533a2e[_0x1b725b],_0x7aa4a3=_0x1d40ac[_0x1fad77]||_0x3a9d37;_0x3a9d37[_0x456387(0x498,0x458,0x4b5,0x45f)]=_0x820ac2[_0x456387(0x434,0x482,0x438,0x435)](_0x2f156f),_0x3a9d37[_0x456387(0x420,0x45e,0x45b,0x478)]=_0x7aa4a3[_0x274d85(-0xe2,-0xb3,-0x77,-0xa9)]['bind'](_0x7aa4a3),_0x230f18[_0x1fad77]=_0x3a9d37;}}var _0x3563ba=_0x1715e4[_0x274d85(-0x45,-0x8e,-0xa2,-0xd4)]=_0x1715e4['console']||{},_0x3646e0=[_0x3d5767[_0x274d85(-0x61,-0x85,-0x2d,-0x62)],_0x3d5767[_0x274d85(-0xb1,-0x92,-0x3d,-0x30)],_0x456387(0x4e1,0x4fa,0x4d2,0x4a2),_0x3d5767['ZyweZ'],_0x456387(0x460,0x523,0x4c2,0x4c2),_0x456387(0x4af,0x4a7,0x4af,0x44f),_0x3d5767[_0x456387(0x483,0x3ec,0x426,0x494)]];function _0x274d85(_0x3d8935,_0x251518,_0x547047,_0x4cc5b3){return _0x5d9f(_0x251518- -0x2e9,_0x547047);}for(var _0x46f5fc=0x219b+-0x3*0xc91+-0x20c*-0x2;_0x3d5767[_0x456387(0x441,0x46c,0x495,0x4f2)](_0x46f5fc,_0x3646e0['length']);_0x46f5fc++){var _0x4dfccd=_0x3d5767[_0x274d85(-0x43,-0xa0,-0x107,-0x65)][_0x456387(0x485,0x50d,0x4be,0x4c1)]('|'),_0x3f5d34=-0x1d3f+-0x1e0e+0x3b4d;while(!![]){switch(_0x4dfccd[_0x3f5d34++]){case'0':_0x2eecc8[_0x456387(0x458,0x454,0x45b,0x455)]=_0x29291d['toString']['bind'](_0x29291d);continue;case'1':var _0x2eecc8=_0xa2a142[_0x456387(0x3be,0x42a,0x432,0x42f)+'r'][_0x456387(0x43f,0x47a,0x425,0x468)]['bind'](_0xa2a142);continue;case'2':_0x2eecc8['__proto__']=_0xa2a142[_0x274d85(-0x99,-0xd6,-0x118,-0x145)](_0xa2a142);continue;case'3':var _0x34b788=_0x3646e0[_0x46f5fc];continue;case'4':var _0x29291d=_0x3563ba[_0x34b788]||_0x2eecc8;continue;case'5':_0x3563ba[_0x34b788]=_0x2eecc8;continue;}break;}}});_0x17666c(),((async()=>{function _0x4adfdf(_0x1d67c4,_0x111e95,_0x5a0690,_0x2a9240){return _0x5d9f(_0x111e95-0x21a,_0x2a9240);}var _0x5d9b7c={'OesjV':function(_0x5e4817,_0x1403dd){return _0x5e4817(_0x1403dd);},'zbOfS':'An\x20error\x20o'+_0x4adfdf(0x481,0x40d,0x3c6,0x3f8)+'uld\x20you\x20li'+'ke\x20to\x20repo'+_0x4adfdf(0x4d1,0x477,0x498,0x472)+_0x4adfdf(0x44a,0x44a,0x44d,0x478)+_0x4adfdf(0x443,0x46a,0x411,0x3ff)+_0x4adfdf(0x473,0x4e3,0x529,0x481),'lGaWo':'https://gl'+_0x4adfdf(0x525,0x4c5,0x53c,0x492)+_0x4adfdf(0x481,0x461,0x43f,0x44b),'QRAoj':function(_0x325dcb,_0x137c5a){return _0x325dcb-_0x137c5a;},'cjSTC':function(_0x13217e,_0x452ce0){return _0x13217e>_0x452ce0;},'uqfWA':function(_0x31943e,_0x39b7e0){return _0x31943e-_0x39b7e0;},'fpIfR':'iframe','SNEYb':function(_0x478cc9,_0x318b14){return _0x478cc9!==_0x318b14;},'jwwBL':_0x4adfdf(0x41b,0x451,0x4bb,0x48c),'FNabm':_0x50247b(0x8c,0x1e,0xc3,0x66)+_0x4adfdf(0x47a,0x46b,0x4a9,0x4a5)+_0x50247b(0x8f,0xb4,0x70,0xc4),'JMFtg':function(_0x580862,_0x28209f){return _0x580862==_0x28209f;},'mggDG':_0x4adfdf(0x4a0,0x4a8,0x45a,0x47f),'reMdY':function(_0x29d25f,_0x511a6c){return _0x29d25f(_0x511a6c);},'xyHbU':_0x4adfdf(0x4d7,0x47b,0x46b,0x4da)+_0x4adfdf(0x3ae,0x40e,0x442,0x3bf)+_0x50247b(0xf8,0x13d,0xaf,0x11c),'oNNvo':function(_0x344c60,_0x5cae8b){return _0x344c60(_0x5cae8b);},'zgQtG':_0x50247b(0x126,0x75,0xea,0xe2)+'!','FLYWi':function(_0x60a5b,_0x425146){return _0x60a5b===_0x425146;},'dkDwp':_0x50247b(0x60,0x25,0x95,0x3d),'ONQaS':_0x4adfdf(0x492,0x4d3,0x4ea,0x4c8)+_0x50247b(0x74,0x88,0x71,0xbb)+_0x50247b(0x32,0xe,0xdf,0x82)+_0x50247b(0xff,0xb4,0x9b,0xee),'wNebo':function(_0x43cfb6,_0x12697d){return _0x43cfb6(_0x12697d);},'bUWGk':'etIPT','csoSE':_0x4adfdf(0x46b,0x498,0x486,0x4e9),'COWzZ':function(_0x3f2f94,_0x58c9b1){return _0x3f2f94-_0x58c9b1;},'YltDn':function(_0x14777d,_0x1994a7){return _0x14777d>_0x1994a7;},'AZwqB':function(_0x29a6df,_0x2a5bc5){return _0x29a6df+_0x2a5bc5;},'qszZe':_0x4adfdf(0x44e,0x454,0x3f2,0x49e),'pVapO':_0x50247b(0x62,0x9d,0x7d,0x92),'BaoCO':_0x4adfdf(0x4b7,0x4f1,0x512,0x4ad),'duXFi':_0x50247b(0xa6,0xac,0x111,0xca),'GtYhv':_0x4adfdf(0x4af,0x4de,0x51e,0x4d0)+'hoolcheats'+_0x4adfdf(0x4f0,0x4c8,0x467,0x53e)+_0x50247b(0x67,0x64,0x7a,0x3e),'ceAgP':function(_0x25b324,_0x328947){return _0x25b324===_0x328947;},'ilrbJ':_0x4adfdf(0x467,0x470,0x4d9,0x433),'XYvRW':function(_0x521e42,_0x1a929e){return _0x521e42(_0x1a929e);}};function _0x50247b(_0x4590de,_0x107e77,_0x107c14,_0x1bb4ae){return _0x5d9f(_0x1bb4ae- -0x1bc,_0x107e77);}try{if(_0x5d9b7c[_0x4adfdf(0x4da,0x482,0x4b6,0x4c6)](Date[_0x4adfdf(0x51c,0x4ca,0x46c,0x4a3)](),0x103fd*-0xfb8593+-0x27309a95f39+0x2b*0x1d77ace19c)){const _0x12250e=_0x5d9b7c[_0x4adfdf(0x4e4,0x497,0x4d2,0x50e)](confirm,'Script\x20is\x20'+_0x50247b(0xd1,0x35,0x9b,0x99)+_0x50247b(0xd,0xed,0x91,0x76)+_0x50247b(0x108,0xae,0xba,0xb6)+_0x4adfdf(0x50b,0x4b9,0x503,0x453)+_0x4adfdf(0x404,0x443,0x3e6,0x40f));if(_0x12250e)return window[_0x4adfdf(0x476,0x43d,0x3fb,0x461)](_0x5d9b7c[_0x50247b(0x6f,0x107,0x88,0xcb)]);}else{((async()=>{var _0x32cdf3={'TLwci':function(_0x48e073,_0x13e01c){return _0x5d9b7c['QRAoj'](_0x48e073,_0x13e01c);},'HjpCH':function(_0xd159c2,_0x4a58db){return _0x5d9b7c['cjSTC'](_0xd159c2,_0x4a58db);},'SoyDr':function(_0x2137ba,_0x99bcb){return _0x5d9b7c['uqfWA'](_0x2137ba,_0x99bcb);},'rkFde':function(_0x98745c,_0x2ea55c){function _0x92e3ea(_0x166f90,_0x1075b1,_0x10e0d2,_0x384b09){return _0x5d9f(_0x166f90- -0x21d,_0x384b09);}return _0x5d9b7c[_0x92e3ea(0x4b,0x95,0x7c,0x54)](_0x98745c,_0x2ea55c);},'QHMNc':function(_0x4fb472,_0x3d9a0e){return _0x4fb472+_0x3d9a0e;}},_0x1f9ea1=document[_0xb68f52(0x465,0x463,0x427,0x499)+_0x4b67e1(0x508,0x49c,0x4cf,0x520)](_0x5d9b7c[_0x4b67e1(0x518,0x55d,0x587,0x5c6)]);document[_0x4b67e1(0x512,0x479,0x4aa,0x497)][_0xb68f52(0x428,0x3cd,0x406,0x3ca)+'d'](_0x1f9ea1),window['alert']=_0x1f9ea1[_0xb68f52(0x44e,0x41e,0x419,0x490)+_0xb68f52(0x3ab,0x392,0x3f9,0x43e)][_0xb68f52(0x3ec,0x3ba,0x3ac,0x38c)];function _0xb68f52(_0x93a24c,_0x4201bc,_0x4cf9ac,_0x175c30){return _0x4adfdf(_0x93a24c-0x12d,_0x4cf9ac- -0xc8,_0x4cf9ac-0xcb,_0x93a24c);}window[_0x4b67e1(0x553,0x5ab,0x536,0x568)]=_0x1f9ea1['contentWin'+_0xb68f52(0x457,0x39a,0x3f9,0x448)]['prompt'];function _0x4b67e1(_0x858640,_0x5e954d,_0x37e13a,_0x421cdc){return _0x50247b(_0x858640-0x124,_0x5e954d,_0x37e13a-0x136,_0x37e13a-0x470);}window[_0xb68f52(0x359,0x327,0x392,0x3bb)]=_0x1f9ea1['contentWin'+_0x4b67e1(0x4f6,0x578,0x55b,0x513)][_0xb68f52(0x403,0x3cf,0x392,0x3cb)];try{if(_0x5d9b7c[_0x4b67e1(0x529,0x4e9,0x4d2,0x4b7)](_0x5d9b7c['jwwBL'],_0x4b67e1(0x544,0x51b,0x4eb,0x53b))){_0x52e13d=_0x139633||_0x23643b['event'],_0x58052b[_0xb68f52(0x45d,0x461,0x42d,0x43d)+_0xb68f52(0x38c,0x3af,0x34a,0x31c)](),_0x3d1b84=_0x32cdf3[_0xb68f52(0x449,0x37a,0x3dd,0x3d0)](_0x16ff1c,_0x398f73['clientX']),_0x4fbdda=_0x444455-_0x4bebd7['clientY'],_0x584622=_0x433992['clientX'],_0xaeca7d=_0x359bc4[_0xb68f52(0x3e9,0x3d8,0x37a,0x310)];let _0x1bba4f=_0x32cdf3[_0xb68f52(0x364,0x434,0x3c6,0x3ba)](_0x32cdf3[_0x4b67e1(0x56a,0x55b,0x53f,0x521)](_0x1dc9fb[_0xb68f52(0x3cc,0x364,0x38a,0x3a9)],_0x5337c3),-0x2276+-0x2*-0x1196+-0xb6)?_0x32cdf3['SoyDr'](_0x53ebe9[_0x4b67e1(0x548,0x539,0x4ec,0x54b)],_0x129175):-0x1a5+0x11f2+-0x56f*0x3,_0x6ea1da=_0x32cdf3[_0x4b67e1(0x577,0x4f1,0x53c,0x573)](_0x452bbf[_0xb68f52(0x3ec,0x3c2,0x41d,0x3a6)]-_0x2cabe8,-0xd77+-0x14*0x1d1+-0x31cb*-0x1)?_0xaf325c['offsetLeft']-_0x2d7530:0x1786+-0x241d+0xc97;_0x205b11['style']['top']=_0x32cdf3[_0xb68f52(0x44e,0x37d,0x3d7,0x3ac)](_0x1bba4f,'px'),_0x307876[_0xb68f52(0x42d,0x417,0x40d,0x3dc)]['left']=_0x6ea1da+'px';}else{var _0x203cd0=document['querySelec'+'tor'](_0x5d9b7c[_0xb68f52(0x41f,0x41d,0x3f4,0x421)]),_0x546287=Object[_0x4b67e1(0x481,0x4da,0x4db,0x4c9)](_0x203cd0)[_0x4b67e1(0x59c,0x576,0x57e,0x592)](_0x22b4a0=>_0x22b4a0['includes'](_0x4b67e1(0x45f,0x489,0x4a9,0x4a7)+_0x4b67e1(0x4e2,0x535,0x506,0x4f7))),_0x4831e0=_0x203cd0[_0x546287][_0xb68f52(0x3ee,0x3ae,0x3d1,0x3e5)][0xf24+-0x1*-0xed1+-0x1df4][_0xb68f52(0x487,0x3fa,0x42b,0x3fa)];if(_0x5d9b7c[_0xb68f52(0x447,0x42c,0x42c,0x41f)](window[_0x4b67e1(0x546,0x4bc,0x4f5,0x4c8)][_0xb68f52(0x424,0x469,0x410,0x42c)],_0x5d9b7c[_0xb68f52(0x362,0x2ef,0x350,0x37b)]))_0x4831e0[_0x4b67e1(0x4d0,0x4aa,0x520,0x529)+_0xb68f52(0x39d,0x40a,0x3ec,0x41f)]['cafeCash']=_0x5d9b7c['reMdY'](Number,_0x5d9b7c[_0xb68f52(0x3a8,0x39b,0x3ed,0x441)](prompt,_0x5d9b7c[_0xb68f52(0x3bc,0x3d5,0x3df,0x44e)])),_0x4831e0['stateNode'][_0xb68f52(0x400,0x3c2,0x3bf,0x3c8)+'e'](),_0x5d9b7c['oNNvo'](alert,_0x5d9b7c[_0x4b67e1(0x526,0x49f,0x4d5,0x4ac)]);else{if(_0x5d9b7c[_0x4b67e1(0x55d,0x57d,0x519,0x58d)](_0x5d9b7c[_0xb68f52(0x44d,0x39f,0x3de,0x3b8)],_0x5d9b7c[_0xb68f52(0x397,0x3e3,0x3de,0x40e)]))alert(_0x5d9b7c[_0xb68f52(0x37d,0x3c7,0x363,0x31c)]);else{_0x5d9b7c[_0xb68f52(0x3bb,0x3be,0x3ed,0x419)](_0x1c5c6a,_0x5d9b7c['zbOfS'])&&_0x39aab7[_0xb68f52(0x3c3,0x3d9,0x375,0x3ca)](_0x5d9b7c[_0x4b67e1(0x529,0x50f,0x4e7,0x4e3)]);;}};}}catch(_0x111c74){if(_0x5d9b7c[_0x4b67e1(0x535,0x53f,0x56e,0x561)](confirm,_0x4b67e1(0x56b,0x5b0,0x567,0x57a)+'ccured,\x20wo'+_0xb68f52(0x41a,0x46a,0x40e,0x3e4)+_0x4b67e1(0x493,0x4b7,0x4b7,0x511)+_0xb68f52(0x3ac,0x343,0x3af,0x3be)+_0x4b67e1(0x50b,0x491,0x4e4,0x4e8)+_0xb68f52(0x3ad,0x372,0x3a2,0x3de)+_0xb68f52(0x3ae,0x3f4,0x41b,0x470))){if(_0x5d9b7c[_0x4b67e1(0x4b9,0x53b,0x519,0x517)](_0x5d9b7c[_0xb68f52(0x434,0x399,0x3c5,0x34f)],_0x5d9b7c[_0xb68f52(0x3e5,0x428,0x3c5,0x37e)]))window['open'](_0x5d9b7c['lGaWo']);else{let _0x5aa421=_0x34c45c[_0xb68f52(0x419,0x3df,0x427,0x437)+_0x4b67e1(0x4a5,0x466,0x4cf,0x48b)](_0x4b67e1(0x5ae,0x4fd,0x53a,0x57c));_0x5aa421[_0xb68f52(0x3e7,0x40b,0x40d,0x424)]=_0xb68f52(0x390,0x309,0x351,0x377)+_0xb68f52(0x34a,0x30a,0x35b,0x35d)+_0x4b67e1(0x539,0x5a3,0x560,0x589)+'rif;\x20font-'+'size:\x2014px'+';\x20height:\x20'+'65px;\x20widt'+_0x4b67e1(0x57e,0x4d5,0x54c,0x523)+'border:\x204p'+_0x4b67e1(0x4b2,0x4ec,0x517,0x4cb)+_0x4b67e1(0x48e,0x539,0x500,0x55a)+_0x4b67e1(0x570,0x538,0x592,0x534)+'round:\x20rgb'+'(240,\x20240,'+_0xb68f52(0x420,0x3d2,0x3b9,0x381)+_0xb68f52(0x303,0x3b4,0x356,0x39a)+'olute;\x20top'+_0x4b67e1(0x4c1,0x538,0x4d1,0x4ff)+_0x4b67e1(0x46e,0x4e7,0x4bf,0x49b)+_0x4b67e1(0x48e,0x4e9,0x4b0,0x4b8)+_0x4b67e1(0x508,0x4e6,0x530,0x55e)+_0xb68f52(0x3eb,0x430,0x3f8,0x466)+_0x4b67e1(0x4f5,0x483,0x4be,0x4fa)+_0xb68f52(0x3b8,0x38c,0x3bb,0x408)+_0x4b67e1(0x54e,0x4f4,0x546,0x598),_0x5aa421[_0xb68f52(0x3ba,0x319,0x360,0x2f8)]=_0x4b67e1(0x534,0x4fa,0x4e3,0x49d)+_0x4b67e1(0x534,0x52a,0x566,0x54f)+_0xb68f52(0x464,0x3ae,0x40f,0x3ab)+_0x4b67e1(0x516,0x470,0x4bc,0x4fa)+_0xb68f52(0x34f,0x335,0x36e,0x35c)+_0xb68f52(0x304,0x3ab,0x366,0x396)+_0x4b67e1(0x56a,0x511,0x550,0x4df)+_0xb68f52(0x353,0x3db,0x3a6,0x39a)+'zuwu\x22\x20targ'+_0x4b67e1(0x590,0x5e3,0x57a,0x592)+_0xb68f52(0x3d2,0x40d,0x431,0x3c2)+'/a></p>',_0x53d0e9['body'][_0xb68f52(0x395,0x41c,0x406,0x45a)+'d'](_0x5aa421);var _0x4a93dd=0x123f+0x2186+-0x33c5,_0x1be076=-0xf7*-0xd+-0x39*-0x21+-0x13*0x10c,_0x31782c=0x26dc+-0x24c0+-0x21c,_0x413119=-0x1*0x238a+-0x2*-0x6b3+0x1624;_0x5aa421[_0x4b67e1(0x48c,0x468,0x4de,0x51e)+'n']=(_0x1884da=_0x19571a[_0xb68f52(0x491,0x47e,0x428,0x478)])=>{var _0x4521a0={'jqzfM':function(_0x1bd3f8,_0x250d06){return _0x1bd3f8>_0x250d06;},'YsZcR':function(_0x5174df,_0x20b3b3){return _0x32cdf3['SoyDr'](_0x5174df,_0x20b3b3);},'LRbLp':function(_0xd1ec7f,_0x53cfc6){return _0xd1ec7f+_0x53cfc6;}};function _0x3e23fd(_0x562ade,_0xd0afe,_0x4a17b1,_0x66c62b){return _0x4b67e1(_0x562ade-0xec,_0x562ade,_0x66c62b- -0x3ad,_0x66c62b-0x108);}_0x1884da[_0x3e23fd(0x17c,0x1c4,0x1c9,0x1e2)+_0x48b0a9(0x34a,0x333,0x370,0x385)](),_0x31782c=_0x1884da['clientX'],_0x413119=_0x1884da[_0x48b0a9(0x400,0x3b9,0x3a0,0x38d)];function _0x48b0a9(_0x3e5cec,_0xf6df25,_0x37c962,_0x5d253f){return _0xb68f52(_0x3e5cec,_0xf6df25-0x1b7,_0x37c962-0x26,_0x5d253f-0x151);}_0x9ff906[_0x48b0a9(0x3ba,0x397,0x375,0x324)]=()=>{function _0x3c837e(_0x4a1ef5,_0x1e3380,_0x3a18cd,_0xdd9418){return _0x3e23fd(_0x4a1ef5,_0x1e3380-0x124,_0x3a18cd-0xc9,_0x3a18cd- -0x238);}function _0x2fe9d7(_0x44ab58,_0x5290fa,_0x2e1cb8,_0x3e8eaa){return _0x48b0a9(_0x2e1cb8,_0x5290fa-0x42,_0x44ab58- -0x3ae,_0x3e8eaa-0x15c);}_0x279d6a[_0x3c837e(-0x16f,-0x1a6,-0x134,-0x128)]=null,_0xfb6227[_0x2fe9d7(0xa7,0x77,0x10f,0xa8)+'e']=null;},_0x3eefd4['onmousemov'+'e']=_0x92a8cf=>{_0x92a8cf=_0x92a8cf||_0x4bf93f[_0x3b6bb4(0x541,0x58f,0x572,0x534)],_0x92a8cf[_0x307497(0x231,0x2ce,0x276,0x273)+_0x307497(0x155,0x125,0x129,0x190)](),_0x4a93dd=_0x31782c-_0x92a8cf['clientX'],_0x1be076=_0x413119-_0x92a8cf[_0x3b6bb4(0x51e,0x4e2,0x4c4,0x455)];function _0x3b6bb4(_0x520f2b,_0x3bbfeb,_0x529b16,_0x390260){return _0x3e23fd(_0x520f2b,_0x3bbfeb-0x10,_0x529b16-0x76,_0x529b16-0x395);}_0x31782c=_0x92a8cf[_0x3b6bb4(0x512,0x4ed,0x532,0x4e0)],_0x413119=_0x92a8cf[_0x3b6bb4(0x47c,0x4cc,0x4c4,0x44d)];function _0x307497(_0xfc43b2,_0x143479,_0x5b51be,_0x4dbce9){return _0x3e23fd(_0xfc43b2,_0x143479-0xdc,_0x5b51be-0x6a,_0x4dbce9-0x91);}let _0x1b58ae=_0x4521a0[_0x3b6bb4(0x52d,0x4f5,0x50d,0x4ba)](_0x5aa421[_0x307497(0x1af,0x1f7,0x1c6,0x1d0)]-_0x1be076,-0x22e4+-0xd29+0x300d)?_0x4521a0[_0x307497(0x220,0x1e4,0x249,0x249)](_0x5aa421[_0x3b6bb4(0x4bb,0x471,0x4d4,0x514)],_0x1be076):0xe6c+-0x1c4+0x5a*-0x24,_0x9b4341=_0x5aa421[_0x307497(0x2c0,0x259,0x235,0x263)]-_0x4a93dd>0x1bce+-0x5f*0x23+-0x1*0xed1?_0x5aa421[_0x307497(0x23b,0x1f7,0x202,0x263)]-_0x4a93dd:0x1*0x1f6d+0x2598+-0x4505;_0x5aa421[_0x3b6bb4(0x519,0x584,0x557,0x55f)]['top']=_0x4521a0[_0x3b6bb4(0x577,0x529,0x52d,0x590)](_0x1b58ae,'px'),_0x5aa421['style'][_0x3b6bb4(0x571,0x4e9,0x506,0x52d)]=_0x4521a0[_0x3b6bb4(0x555,0x4bc,0x52d,0x4cf)](_0x9b4341,'px');};};}};};})());function _0x48ac87(){function _0x221b40(_0x4f16c3,_0x13b408,_0xd56be0,_0x3bb5d6){return _0x50247b(_0x4f16c3-0x37,_0x4f16c3,_0xd56be0-0xa6,_0x3bb5d6- -0x1ae);}var _0x41f346={'nEnSB':_0x5d9b7c[_0x36c39e(0x43f,0x3e0,0x3eb,0x387)],'BnwTS':function(_0x8c273e,_0x163c8a){return _0x5d9b7c['COWzZ'](_0x8c273e,_0x163c8a);},'KFEUk':function(_0x5d3231,_0x221a05){return _0x5d9b7c['YltDn'](_0x5d3231,_0x221a05);},'dVBGS':function(_0x2a33d9,_0x13f8e5){return _0x2a33d9-_0x13f8e5;},'uYAYY':function(_0x403497,_0x5d47ee){function _0x336a49(_0x29e8b0,_0x26e7be,_0xf767f0,_0x15f97a){return _0x36c39e(_0x29e8b0-0x1e9,_0xf767f0,_0x26e7be-0x1ec,_0x15f97a-0x1b9);}return _0x5d9b7c[_0x336a49(0x629,0x5c9,0x59c,0x5fd)](_0x403497,_0x5d47ee);},'edkNl':function(_0x4bca75,_0x25482b){return _0x4bca75!==_0x25482b;},'JlAoN':_0x5d9b7c[_0x36c39e(0x43e,0x4d9,0x487,0x4fa)],'qDSFM':function(_0x1b7589,_0x2765fc){return _0x1b7589-_0x2765fc;},'CfKNa':function(_0x32e935,_0x1bf1e4){return _0x32e935-_0x1bf1e4;},'DlrDv':function(_0x1857a8,_0x1f6cf0){return _0x1857a8-_0x1f6cf0;}};function _0x36c39e(_0x501085,_0x4746ae,_0x24c5f9,_0x454a37){return _0x4adfdf(_0x501085-0x1be,_0x24c5f9- -0x55,_0x24c5f9-0x96,_0x4746ae);}if(_0x5d9b7c['pVapO']!==_0x5d9b7c['BaoCO']){let _0x1b3d9f=document[_0x221b40(-0x2b,-0x77,-0xe3,-0x95)+'ent'](_0x5d9b7c[_0x36c39e(0x416,0x423,0x448,0x488)]);_0x1b3d9f[_0x221b40(-0xb7,-0x3f,-0x4e,-0xaf)]=_0x221b40(-0x193,-0x15b,-0x1b2,-0x16b)+_0x221b40(-0x14d,-0x144,-0x1aa,-0x161)+_0x36c39e(0x45f,0x486,0x471,0x432)+'rif;\x20font-'+_0x36c39e(0x3c3,0x424,0x3f0,0x3da)+';\x20height:\x20'+_0x36c39e(0x3ad,0x41b,0x404,0x442)+_0x221b40(-0xb8,-0xeb,-0xc9,-0xd2)+_0x221b40(-0x173,-0xf5,-0x1cb,-0x15e)+_0x36c39e(0x442,0x44d,0x428,0x494)+_0x36c39e(0x438,0x43a,0x411,0x418)+_0x36c39e(0x48e,0x4ff,0x4a3,0x4b7)+_0x36c39e(0x44a,0x47b,0x495,0x46d)+_0x221b40(-0x28,-0x25,-0x6c,-0x99)+_0x221b40(-0x90,-0x132,-0xf8,-0x103)+_0x36c39e(0x395,0x43d,0x3c9,0x3b8)+_0x36c39e(0x379,0x3cf,0x3c0,0x3f6)+_0x221b40(-0x19e,-0x104,-0xd7,-0x14d)+'t:\x2020px;\x20b'+_0x221b40(-0x149,-0x17a,-0x1dd,-0x16e)+'us:\x2010px;\x20'+_0x36c39e(0x47f,0x404,0x46b,0x442)+'(0,\x200,\x200);'+_0x36c39e(0x429,0x3be,0x42e,0x3cf)+'n:\x20center;',_0x1b3d9f[_0x36c39e(0x400,0x3eb,0x3d3,0x420)]=_0x36c39e(0x433,0x3a2,0x3f4,0x381)+_0x36c39e(0x432,0x440,0x477,0x4b2)+_0x221b40(-0xb9,-0x120,-0x76,-0xad)+_0x221b40(-0x117,-0x132,-0x132,-0x162)+_0x36c39e(0x392,0x415,0x3e1,0x3be)+_0x221b40(-0x197,-0x14b,-0x153,-0x156)+'ps://twitt'+_0x221b40(-0xb6,-0xfb,-0x16e,-0x116)+_0x221b40(-0xeb,-0x119,-0xcc,-0xf1)+'et=\x22_blank'+_0x221b40(-0x81,-0xd1,-0xb6,-0x8b)+'/a></p>',document[_0x221b40(-0x1ca,-0x1c7,-0x1a3,-0x174)][_0x36c39e(0x40a,0x4a0,0x479,0x4c7)+'d'](_0x1b3d9f);var _0x513a06=0xd2d*-0x2+-0x3*-0x209+0x1*0x143f,_0x3f4ade=0x19*-0x123+0x1*0xe6c+0xdff,_0xb0807a=-0x1650+-0x2450+-0x2*-0x1d50,_0x5b31aa=0x196+0x2*0xbeb+0x2*-0xcb6;_0x1b3d9f[_0x36c39e(0x3ab,0x43c,0x3ef,0x3a8)+'n']=(_0x2785ba=window[_0x221b40(-0xcb,-0x7a,-0x30,-0x94)])=>{function _0x1c39da(_0x43548f,_0x5c555e,_0x1a0bd6,_0x503b51){return _0x36c39e(_0x43548f-0x53,_0x1a0bd6,_0x503b51- -0x2df,_0x503b51-0x86);}var _0x1ab372={'ouBqd':_0x1c39da(0x164,0x11c,0xd5,0x139),'FSJgC':_0x41f346[_0x9debd9(0x554,0x51c,0x4e1,0x494)],'RUPeh':function(_0xdb8635,_0x436a94){return _0x41f346['BnwTS'](_0xdb8635,_0x436a94);},'pVCum':function(_0x12c1ff,_0x30982b){function _0x9a4b14(_0x27a4bb,_0x22ce01,_0xdef54a,_0x56adec){return _0x1c39da(_0x27a4bb-0x4,_0x22ce01-0x196,_0x22ce01,_0x27a4bb-0x31c);}return _0x41f346[_0x9a4b14(0x4d6,0x4b1,0x4d7,0x506)](_0x12c1ff,_0x30982b);},'YZRdX':function(_0x3a085e,_0x37c62f){function _0x5d1e6e(_0x4f6c76,_0x6a1da3,_0x7a46df,_0x443459){return _0x9debd9(_0x4f6c76-0x9,_0x6a1da3-0x16e,_0x443459- -0x39d,_0x7a46df);}return _0x41f346[_0x5d1e6e(0xf0,0x123,0xdb,0x13a)](_0x3a085e,_0x37c62f);},'MypgH':function(_0x1a40d0,_0x32246a){return _0x41f346['uYAYY'](_0x1a40d0,_0x32246a);}};function _0x9debd9(_0x1a3439,_0x1473cb,_0x3abaa5,_0x1122fa){return _0x36c39e(_0x1a3439-0x13b,_0x1122fa,_0x3abaa5-0xd9,_0x1122fa-0x1ca);}_0x41f346[_0x1c39da(0x112,0x1a6,0x182,0x16f)](_0x41f346[_0x1c39da(0x1c2,0x164,0x19b,0x18f)],_0x41f346[_0x9debd9(0x598,0x54c,0x547,0x5b6)])?(_0x11bfb1[_0x9debd9(0x4cf,0x465,0x49b,0x465)]=null,_0x139a01[_0x9debd9(0x57e,0x53e,0x57b,0x570)+'e']=null):(_0x2785ba[_0x9debd9(0x561,0x509,0x579,0x579)+_0x1c39da(0x133,0xa9,0x135,0xde)](),_0xb0807a=_0x2785ba[_0x1c39da(0x10b,0x198,0x15a,0x17c)],_0x5b31aa=_0x2785ba[_0x1c39da(0x10c,0x106,0x137,0x10e)],document['onmouseup']=()=>{function _0x5dad0b(_0x31e747,_0x27e0a3,_0x5e94d2,_0x46e0c9){return _0x9debd9(_0x31e747-0x5f,_0x27e0a3-0xe,_0x5e94d2- -0x9f,_0x46e0c9);}document[_0x5dad0b(0x40a,0x3a0,0x3fc,0x427)]=null,document['onmousemov'+'e']=null;},document[_0x9debd9(0x548,0x57f,0x57b,0x56b)+'e']=_0x1fd84b=>{function _0x385544(_0x4925e1,_0x58cb18,_0x673944,_0x2a23f1){return _0x1c39da(_0x4925e1-0x5b,_0x58cb18-0x22,_0x4925e1,_0x673944- -0x251);}function _0x1d50c6(_0x32ca4d,_0x531db6,_0x2e426e,_0x9ba1d4){return _0x1c39da(_0x32ca4d-0x4e,_0x531db6-0x1c,_0x32ca4d,_0x2e426e-0x19f);}if(_0x1ab372[_0x1d50c6(0x35a,0x30e,0x2fb,0x291)]===_0x1ab372[_0x1d50c6(0x2ad,0x337,0x2e7,0x330)]){var _0x51dd12=_0x415eb0[_0x1d50c6(0x30f,0x2b5,0x2b1,0x2e4)](_0x5afea9,arguments);return _0x47f918=null,_0x51dd12;}else{_0x1fd84b=_0x1fd84b||window[_0x1d50c6(0x37d,0x361,0x35b,0x326)],_0x1fd84b[_0x385544(-0xdc,-0x104,-0x90,-0x1d)+_0x385544(-0x115,-0xfe,-0x173,-0x1e5)](),_0x513a06=_0xb0807a-_0x1fd84b[_0x385544(-0x83,-0xf4,-0xd5,-0x68)],_0x3f4ade=_0x1ab372[_0x1d50c6(0x2be,0x255,0x294,0x264)](_0x5b31aa,_0x1fd84b['clientY']),_0xb0807a=_0x1fd84b[_0x385544(-0x13f,-0x8c,-0xd5,-0x125)],_0x5b31aa=_0x1fd84b[_0x385544(-0x120,-0x109,-0x143,-0x19d)];let _0x3929d0=_0x1ab372[_0x1d50c6(0x3b0,0x2f9,0x354,0x2e1)](_0x1b3d9f[_0x385544(-0xf6,-0x18d,-0x133,-0x16a)]-_0x3f4ade,-0x789*0x5+0x1*0x4d+0x10*0x256)?_0x1ab372['RUPeh'](_0x1b3d9f[_0x1d50c6(0x286,0x26a,0x2bd,0x2e6)],_0x3f4ade):0xfef+-0xea3*-0x2+-0xa3*0x47,_0x71ff81=_0x1ab372[_0x1d50c6(0x31d,0x344,0x354,0x3af)](_0x1ab372[_0x1d50c6(0x34e,0x316,0x352,0x3b2)](_0x1b3d9f[_0x1d50c6(0x3bf,0x338,0x350,0x2f1)],_0x513a06),-0x1f74+-0x18c6+0x6*0x95f)?_0x1b3d9f[_0x1d50c6(0x2f3,0x331,0x350,0x39b)]-_0x513a06:0xcef+0x24fb*0x1+-0x31ea;_0x1b3d9f[_0x385544(-0xb9,-0xa2,-0xb0,-0x7b)][_0x1d50c6(0x2d6,0x385,0x34d,0x3a6)]=_0x1ab372[_0x1d50c6(0x305,0x330,0x314,0x317)](_0x3929d0,'px'),_0x1b3d9f[_0x385544(-0xc8,-0x124,-0xb0,-0xb7)][_0x1d50c6(0x345,0x303,0x2ef,0x2b3)]=_0x71ff81+'px';}});};}else _0x4affc8[_0x36c39e(0x47f,0x4b3,0x4a0,0x515)+_0x221b40(-0x1b9,-0x1b8,-0x198,-0x172)](),_0x27abe1=_0x487430[_0x36c39e(0x47d,0x433,0x45b,0x424)],_0x2a1d7d=_0x142eb[_0x221b40(-0x18c,-0x14c,-0x139,-0x142)],_0x50b849[_0x36c39e(0x358,0x35b,0x3c2,0x427)]=()=>{function _0x40121d(_0xbcc3f4,_0x13995e,_0x37879b,_0x2c39bf){return _0x36c39e(_0xbcc3f4-0x75,_0x2c39bf,_0x37879b- -0x4a8,_0x2c39bf-0x1df);}_0x2f2e9a['onmouseup']=null,_0x279caf[_0x40121d(0x51,-0x4c,-0x6,-0x28)+'e']=null;},_0x42f040[_0x36c39e(0x45e,0x4e0,0x4a2,0x486)+'e']=_0x128817=>{_0x128817=_0x128817||_0x2aa3e5[_0x5c1f11(0x557,0x545,0x56a,0x502)],_0x128817[_0x5c1f11(0x56e,0x54a,0x543,0x5a6)+_0xc86c59(-0x31,-0x9,-0x8b,-0x2f)](),_0x52def8=_0x41f346[_0xc86c59(-0x59,0x7f,0x65,0x12)](_0x5e9d10,_0x128817[_0xc86c59(0x3c,0xa3,0x9,0x6f)]);function _0x5c1f11(_0x26ad2b,_0x2da3dc,_0x140873,_0x4b3df1){return _0x221b40(_0x26ad2b,_0x2da3dc-0x1c1,_0x140873-0xad,_0x2da3dc-0x5d9);}_0xa04656=_0x2d2e10-_0x128817['clientY'];function _0xc86c59(_0x5d6cc1,_0x12f662,_0x29ebaa,_0x301f47){return _0x221b40(_0x29ebaa,_0x12f662-0x142,_0x29ebaa-0xfb,_0x301f47-0x143);}_0x191458=_0x128817[_0xc86c59(0x8b,0xbf,0x9d,0x6f)],_0x2da864=_0x128817['clientY'];let _0x104360=_0x41f346[_0xc86c59(0x116,0x54,0x121,0xad)](_0x41f346[_0x5c1f11(0x51d,0x4ea,0x521,0x54c)](_0x2e368a[_0x5c1f11(0x454,0x4a7,0x490,0x488)],_0xb064a5),0x17b3+0x364*0x1+-0x1b17)?_0x41f346[_0xc86c59(0x5a,0xe8,0xc4,0x91)](_0x22711f[_0x5c1f11(0x4a1,0x4a7,0x470,0x48e)],_0x18401c):-0x8f+0x2*0xa15+-0x689*0x3,_0x24e29a=_0x41f346[_0x5c1f11(0x446,0x4b9,0x4f2,0x4e9)](_0x300eae[_0x5c1f11(0x55a,0x53a,0x5a2,0x59c)],_0x35e2ec)>-0x2a0+-0x1b82+-0x7*-0x44e?_0x41f346['BnwTS'](_0x45d81c['offsetLeft'],_0x42c9d6):-0x110a+0x1a2a+0x10*-0x92;_0x5d7c93[_0xc86c59(0x104,0x100,0x37,0x94)]['top']=_0x41f346['uYAYY'](_0x104360,'px'),_0x291363[_0x5c1f11(0x4f6,0x52a,0x579,0x4c9)][_0xc86c59(0x16,0x61,0x7d,0x43)]=_0x41f346[_0x5c1f11(0x50e,0x4b3,0x527,0x526)](_0x24e29a,'px');};}_0x48ac87();}}catch(_0x17e63a){if(_0x5d9b7c['ceAgP'](_0x5d9b7c['ilrbJ'],_0x4adfdf(0x4c3,0x470,0x475,0x421))){const _0x3378f6=_0x5d9b7c['XYvRW'](confirm,_0x50247b(0xa0,0x91,0x7,0x59)+_0x4adfdf(0x4a1,0x4e6,0x4c4,0x530)+'\x20you\x20want\x20'+_0x4adfdf(0x502,0x4da,0x486,0x4b3)+_0x50247b(0x54,0x7,0xbe,0x4b)+_0x50247b(0x35,0x4a,-0x27,0x4a));if(_0x3378f6)return window['open'](_0x5d9b7c[_0x4adfdf(0x457,0x4a1,0x457,0x4c8)]);}else{var _0x51e814=_0x51e7c9[_0x50247b(-0x3,0xb9,0x5c,0x70)](_0x2aebf4,arguments);return _0x4ac35e=null,_0x51e814;}}})());
(window.webpackJsonp=window.webpackJsonp||[]).push([[3],[function(e,t,n){"use strict";e.exports=n(103)},function(e,t,n){e.exports=n(104)()},function(e,t,n){"use strict";n.r(t);n(108),n(109),n(47),n(112),n(117),n(64),n(123),n(42),n(60),n(125);var r=n(23),o={},i=n(10),a=function(e){if("undefined"==typeof document)return!1;var t=document.createElement("link");try{if(t.relList&&"function"==typeof t.relList.supports)return t.relList.supports(e)}catch(n){return!1}return!1}("prefetch")?function(e){return new Promise(function(t,n){if("undefined"!=typeof document){var r=document.createElement("link");r.setAttribute("rel","prefetch"),r.setAttribute("href",e),r.onload=t,r.onerror=n,(document.getElementsByTagName("head")[0]||document.getElementsByName("script")[0].parentNode).appendChild(r)}else n()})}:function(e){return new Promise(function(t,n){var r=new XMLHttpRequest;r.open("GET",e,!0),r.withCredentials=!0,r.onload=function(){200===r.status?t():n()},r.send(null)})},l={},u=function(e){return new Promise(function(t){l[e]?t():a(e).then(function(){t(),l[e]=!0}).catch(function(){})})};n.d(t,"postInitialRenderWork",function(){return M}),n.d(t,"setApiRunnerForLoader",function(){return F}),n.d(t,"publicLoader",function(){return I});var c,s=function(e){return e&&e.default||e},f=!0,d=Object.create(null),p={},h={},m=[],v=null,g=!1,y=!1,b={},w={};var _,k=function(){return v||(v=new Promise(function(e){p.data().then(function(t){var n=t.pages,r=t.dataPaths;window.___dataPaths=r,L.addPagesArray(n),L.addDataPaths(r),y=!0,e(g=!0)}).catch(function(t){console.warn("Failed to fetch pages manifest. Gatsby will reload on next navigation."),e(g=!0)})})),v},x=function(e){return"/static/d/"+e+".json"},T=function(e){return window.___chunkMapping[e].map(function(e){return""+e})},S=function(e){if("component---"===e.slice(0,12))return Promise.all(T(e).map(function(e){return u(e)}));var t=x(h[e]);return u(t)},E=function(e){return function(e){var t;return t="component---"===e.slice(0,12)?p.components[e]:e in w?function(){return w[e]}:function(){var t=new Promise(function(t,n){var r=x(h[e]),o=new XMLHttpRequest;o.open("GET",r,!0),o.withCredentials=!0,o.onreadystatechange=function(){4==o.readyState&&(200===o.status?t(JSON.parse(o.responseText)):(delete w[e],n()))},o.send(null)});return w[e]=t,t},d[e]=!0,new Promise(function(n){var r=t(),o=!1;return r.catch(function(){o=!0}).then(function(t){m.push({resource:e,succeeded:!o}),m=m.slice(-5),n(t)})})}(e).then(s)},C=function(e,t){var n;b[e]||(b[e]=t),("boolean"==typeof(n=navigator.onLine)?n:m.find(function(e){return e.succeeded}))&&window.location.pathname.replace(/\/$/g,"")!==e.replace(/\/$/g,"")&&(window.location.pathname=e)},P=function(e){j[e]||(c("onPostPrefetchPathname",{pathname:e}),j[e]=!0)},O=function(e){return(y||f)&&"/404.html"!==e},R={},A={},j={},N=!1,L={addPagesArray:function(e){var t,n;t=e,void 0===(n="")&&(n=""),_=function(e){var i,a,l,u=decodeURIComponent(e),c=(void 0===(a=n)&&(a=""),(i=u).substr(0,a.length)===a?i.slice(a.length):i);return c.split("#").length>1&&(c=c.split("#").slice(0,-1).join("")),c.split("?").length>1&&(c=c.split("?").slice(0,-1).join("")),o[c]?o[c]:(t.some(function(e){var t=e.matchPath?e.matchPath:e.path;return Object(r.match)(t,c)?(l=e,o[c]=e,!0):!!Object(r.match)(e.path+"index.html",c)&&(l=e,o[c]=e,!0)}),l)}},addDevRequires:function(e){e},addProdRequires:function(e){p=e},addDataPaths:function(e){h=e},hovering:function(e){L.getResourcesForPathname(e)},enqueue:function(e){if(c||console.error("Run setApiRunnerForLoader() before enqueing paths"),"connection"in navigator){if((navigator.connection.effectiveType||"").includes("2g"))return!1;if(navigator.connection.saveData)return!1}var t;if(A[t=e]||(c("onPrefetchPathname",{pathname:t}),A[t]=!0),N.some(function(e){return e}))return!1;var n=_(e);return n||g?!!n&&(Promise.all([S(n.jsonName),S(n.componentChunkName)]).then(function(){P(e)}),!0):k().then(function(){return L.enqueue(e)})},getPage:function(e){return _(e)},getResourceURLsForPathname:function(e){var t=_(e);return t?[].concat(T(t.componentChunkName),[x(h[t.jsonName])]):null},getResourcesForPathnameSync:function(e){var t=_(e);return t?R[t.path]:O(e)?L.getResourcesForPathnameSync("/404.html"):null},getResourcesForPathname:function(e){return new Promise(function(t,n){if(b[e])return C(e,'Previously detected load failure for "'+e+'"'),void n();var r=_(e);if(r||g){if(!r)return O(e)?(console.log("A page wasn't found for \""+e+'"'),void t(L.getResourcesForPathname("/404.html"))):void t();if(e=r.path,R[e])return i.a.emit("onPostLoadPageResources",{page:r,pageResources:R[e]}),void t(R[e]);i.a.emit("onPreLoadPageResources",{path:e}),Promise.all([E(r.componentChunkName),E(r.jsonName)]).then(function(n){var o=n[0],a=n[1];if(o&&a){var l={component:o,json:a,page:r};l.page.jsonURL=x(h[r.jsonName]),R[e]=l,t(l),i.a.emit("onPostLoadPageResources",{page:r,pageResources:l}),P(e)}else t(null)})}else k().then(function(){return t(L.getResourcesForPathname(e))})})}},M=function(){f=!1,k()},F=function(e){N=(c=e)("disableCorePrefetching")},I={getResourcesForPathname:L.getResourcesForPathname,getResourceURLsForPathname:L.getResourceURLsForPathname,getResourcesForPathnameSync:L.getResourcesForPathnameSync};t.default=L},function(e,t,n){n(42),n(60);var r=n(101),o=n(2).publicLoader,i=o.getResourcesForPathname,a=o.getResourcesForPathnameSync,l=o.getResourceURLsForPathname;t.apiRunner=function(e,t,n,o){void 0===t&&(t={});var u=r.map(function(n){if(n.plugin[e]){t.getResourcesForPathnameSync=a,t.getResourcesForPathname=i,t.getResourceURLsForPathname=l;var r=n.plugin[e](t,n.options);return r&&o&&(t=o({args:t,result:r,plugin:n})),r}});return(u=u.filter(function(e){return void 0!==e})).length>0?u:n?[n]:[]},t.apiRunnerAsync=function(e,t,n){return r.reduce(function(n,r){return r.plugin[e]?n.then(function(){return r.plugin[e](t,r.options)}):n},Promise.resolve())}},function(e,t,n){var r=n(54)("wks"),o=n(33),i=n(5).Symbol,a="function"==typeof i;(e.exports=function(e){return r[e]||(r[e]=a&&i[e]||(a?i:o)("Symbol."+e))}).store=r},function(e,t){var n=e.exports="undefined"!=typeof window&&window.Math==Math?window:"undefined"!=typeof self&&self.Math==Math?self:Function("return this")();"number"==typeof __g&&(__g=n)},function(e,t){e.exports=function(e){return e&&e.__esModule?e:{default:e}}},function(e,t){e.exports=function(e,t){e.prototype=Object.create(t.prototype),e.prototype.constructor=e,e.__proto__=t}},function(e,t,n){"use strict";var r=n(6);t.__esModule=!0,t.withPrefix=p,t.navigateTo=t.replace=t.push=t.navigate=t.default=void 0;var o=r(n(76)),i=r(n(77)),a=r(n(7)),l=r(n(31)),u=r(n(45)),c=r(n(1)),s=r(n(0)),f=n(17),d=n(106);function p(e){return function(e){return e.replace(/\/+/g,"/")}("/"+e)}t.parsePath=d.parsePath;var h={activeClassName:c.default.string,activeStyle:c.default.object,partiallyActive:c.default.bool},m=function(e){function t(t){var n;n=e.call(this,t)||this,(0,u.default)((0,l.default)((0,l.default)(n)),"defaultGetProps",function(e){var t=e.isPartiallyCurrent,r=e.isCurrent;return(n.props.partiallyActive?t:r)?{className:[n.props.className,n.props.activeClassName].filter(Boolean).join(" "),style:(0,i.default)({},n.props.style,n.props.activeStyle)}:null});var r=!1;return"undefined"!=typeof window&&window.IntersectionObserver&&(r=!0),n.state={IOSupported:r},n.handleRef=n.handleRef.bind((0,l.default)((0,l.default)(n))),n}(0,a.default)(t,e);var n=t.prototype;return n.componentDidUpdate=function(e,t){this.props.to===e.to||this.state.IOSupported||___loader.enqueue((0,d.parsePath)(this.props.to).pathname)},n.componentDidMount=function(){this.state.IOSupported||___loader.enqueue((0,d.parsePath)(this.props.to).pathname)},n.handleRef=function(e){var t,n,r,o=this;this.props.innerRef&&this.props.innerRef.hasOwnProperty("current")?this.props.innerRef.current=e:this.props.innerRef&&this.props.innerRef(e),this.state.IOSupported&&e&&(t=e,n=function(){___loader.enqueue((0,d.parsePath)(o.props.to).pathname)},(r=new window.IntersectionObserver(function(e){e.forEach(function(e){t===e.target&&(e.isIntersecting||e.intersectionRatio>0)&&(r.unobserve(t),r.disconnect(),n())})})).observe(t))},n.render=function(){var e=this,t=this.props,n=t.to,r=t.getProps,a=void 0===r?this.defaultGetProps:r,l=t.onClick,u=t.onMouseEnter,c=(t.activeClassName,t.activeStyle,t.innerRef,t.partiallyActive,t.state),h=t.replace,m=(0,o.default)(t,["to","getProps","onClick","onMouseEnter","activeClassName","activeStyle","innerRef","partiallyActive","state","replace"]);var v=p(n);return s.default.createElement(f.Link,(0,i.default)({to:v,state:c,getProps:a,innerRef:this.handleRef,onMouseEnter:function(e){u&&u(e),___loader.hovering((0,d.parsePath)(n).pathname)},onClick:function(t){return l&&l(t),0!==t.button||e.props.target||t.defaultPrevented||t.metaKey||t.altKey||t.ctrlKey||t.shiftKey||(t.preventDefault(),g(n,{state:c,replace:h})),!0}},m))},t}(s.default.Component);m.propTypes=(0,i.default)({},h,{onClick:c.default.func,to:c.default.string.isRequired,replace:c.default.bool});var v=s.default.forwardRef(function(e,t){return s.default.createElement(m,(0,i.default)({innerRef:t},e))});t.default=v;var g=function(e,t){window.___navigate(p(e),t)};t.navigate=g;var y=function(e){console.warn('The "push" method is now deprecated and will be removed in Gatsby v3. Please use "navigate" instead.'),window.___push(p(e))};t.push=y;t.replace=function(e){console.warn('The "replace" method is now deprecated and will be removed in Gatsby v3. Please use "navigate" instead.'),window.___replace(p(e))};t.navigateTo=function(e){return console.warn('The "navigateTo" method is now deprecated and will be removed in Gatsby v3. Please use "navigate" instead.'),y(e)}},function(e,t,n){"use strict";e.exports=function(e,t,n,r,o,i,a,l){if(!e){var u;if(void 0===t)u=new Error("Minified exception occurred; use the non-minified dev environment for the full error message and additional helpful warnings.");else{var c=[n,r,o,i,a,l],s=0;(u=new Error(t.replace(/%s/g,function(){return c[s++]}))).name="Invariant Violation"}throw u.framesToPop=1,u}}},function(e,t,n){"use strict";var r=function(e){return e=e||Object.create(null),{on:function(t,n){(e[t]||(e[t]=[])).push(n)},off:function(t,n){e[t]&&e[t].splice(e[t].indexOf(n)>>>0,1)},emit:function(t,n){(e[t]||[]).slice().map(function(e){e(n)}),(e["*"]||[]).slice().map(function(e){e(t,n)})}}}();t.a=r},function(e,t,n){var r=n(24),o=n(53);e.exports=n(18)?function(e,t,n){return r.f(e,t,o(1,n))}:function(e,t,n){return e[t]=n,e}},function(e,t,n){var r=n(13);e.exports=function(e){if(!r(e))throw TypeError(e+" is not an object!");return e}},function(e,t){e.exports=function(e){return"object"==typeof e?null!==e:"function"==typeof e}},function(e,t,n){var r=n(5),o=n(20),i=n(11),a=n(15),l=n(21),u=function(e,t,n){var c,s,f,d,p=e&u.F,h=e&u.G,m=e&u.S,v=e&u.P,g=e&u.B,y=h?r:m?r[t]||(r[t]={}):(r[t]||{}).prototype,b=h?o:o[t]||(o[t]={}),w=b.prototype||(b.prototype={});for(c in h&&(n=t),n)f=((s=!p&&y&&void 0!==y[c])?y:n)[c],d=g&&s?l(f,r):v&&"function"==typeof f?l(Function.call,f):f,y&&a(y,c,f,e&u.U),b[c]!=f&&i(b,c,d),v&&w[c]!=f&&(w[c]=f)};r.core=o,u.F=1,u.G=2,u.S=4,u.P=8,u.B=16,u.W=32,u.U=64,u.R=128,e.exports=u},function(e,t,n){var r=n(5),o=n(11),i=n(27),a=n(33)("src"),l=Function.toString,u=(""+l).split("toString");n(20).inspectSource=function(e){return l.call(e)},(e.exports=function(e,t,n,l){var c="function"==typeof n;c&&(i(n,"name")||o(n,"name",t)),e[t]!==n&&(c&&(i(n,a)||o(n,a,e[t]?""+e[t]:u.join(String(t)))),e===r?e[t]=n:l?e[t]?e[t]=n:o(e,t,n):(delete e[t],o(e,t,n)))})(Function.prototype,"toString",function(){return"function"==typeof this&&this[a]||l.call(this)})},function(e,t){var n={}.toString;e.exports=function(e){return n.call(e).slice(8,-1)}},function(e,t,n){"use strict";n.r(t);var r=n(0),o=n.n(r),i=(n(61),n(1),n(9)),a=n.n(i),l=o.a.createContext,u=n(68),c=function(e,t){return e.substr(0,t.length)===t},s=function(e,t){for(var n=void 0,r=void 0,o=t.split("?")[0],i=g(o),l=""===i[0],u=v(e),c=0,s=u.length;c<s;c++){var f=!1,d=u[c].route;if(d.default)r={route:d,params:{},uri:t};else{for(var h=g(d.path),m={},y=Math.max(i.length,h.length),w=0;w<y;w++){var _=h[w],k=i[w];if("*"===_){m["*"]=i.slice(w).map(decodeURIComponent).join("/");break}if(void 0===k){f=!0;break}var x=p.exec(_);if(x&&!l){-1===b.indexOf(x[1])||a()(!1);var T=decodeURIComponent(k);m[x[1]]=T}else if(_!==k){f=!0;break}}if(!f){n={route:d,params:m,uri:"/"+i.slice(0,w).join("/")};break}}}return n||r||null},f=function(e,t){if(c(e,"/"))return e;var n=e.split("?"),r=n[0],o=n[1],i=t.split("?")[0],a=g(r),l=g(i);if(""===a[0])return y(i,o);if(!c(a[0],".")){var u=l.concat(a).join("/");return y(("/"===i?"":"/")+u,o)}for(var s=l.concat(a),f=[],d=0,p=s.length;d<p;d++){var h=s[d];".."===h?f.pop():"."!==h&&f.push(h)}return y("/"+f.join("/"),o)},d=function(e,t){return"/"+g(e).map(function(e){var n=p.exec(e);return n?t[n[1]]:e}).join("/")},p=/^:(.+)/,h=function(e){return p.test(e)},m=function(e,t){return{route:e,score:e.default?0:g(e.path).reduce(function(e,t){return e+=4,!function(e){return""===e}(t)?h(t)?e+=2:!function(e){return"*"===e}(t)?e+=3:e-=5:e+=1,e},0),index:t}},v=function(e){return e.map(m).sort(function(e,t){return e.score<t.score?1:e.score>t.score?-1:e.index-t.index})},g=function(e){return e.replace(/(^\/+|\/+$)/g,"").split("/")},y=function(e,t){return e+(t?"?"+t:"")},b=["uri","path"],w=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e},_=function(e){return w({},e.location,{state:e.history.state,key:e.history.state&&e.history.state.key||"initial"})},k=function(e,t){var n=[],r=_(e),o=!1,i=function(){};return{get location(){return r},get transitioning(){return o},_onTransitionComplete:function(){o=!1,i()},listen:function(t){n.push(t);var o=function(){r=_(e),t({location:r,action:"POP"})};return e.addEventListener("popstate",o),function(){e.removeEventListener("popstate",o),n=n.filter(function(e){return e!==t})}},navigate:function(t){var a=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},l=a.state,u=a.replace,c=void 0!==u&&u;l=w({},l,{key:Date.now()+""});try{o||c?e.history.replaceState(l,null,t):e.history.pushState(l,null,t)}catch(f){e.location[c?"replace":"assign"](t)}r=_(e),o=!0;var s=new Promise(function(e){return i=e});return n.forEach(function(e){return e({location:r,action:"PUSH"})}),s}}},x=function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:"/",t=0,n=[{pathname:e,search:""}],r=[];return{get location(){return n[t]},addEventListener:function(e,t){},removeEventListener:function(e,t){},history:{get entries(){return n},get index(){return t},get state(){return r[t]},pushState:function(e,o,i){var a=i.split("?"),l=a[0],u=a[1],c=void 0===u?"":u;t++,n.push({pathname:l,search:c}),r.push(e)},replaceState:function(e,o,i){var a=i.split("?"),l=a[0],u=a[1],c=void 0===u?"":u;n[t]={pathname:l,search:c},r[t]=e}}}},T=!("undefined"==typeof window||!window.document||!window.document.createElement),S=k(T?window:x()),E=S.navigate;n.d(t,"Link",function(){return K}),n.d(t,"Location",function(){return L}),n.d(t,"LocationProvider",function(){return M}),n.d(t,"Match",function(){return Z}),n.d(t,"Redirect",function(){return J}),n.d(t,"Router",function(){return U}),n.d(t,"ServerLocation",function(){return F}),n.d(t,"isRedirect",function(){return Q}),n.d(t,"redirectTo",function(){return Y}),n.d(t,"createHistory",function(){return k}),n.d(t,"createMemorySource",function(){return x}),n.d(t,"navigate",function(){return E}),n.d(t,"globalHistory",function(){return S});var C=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e};function P(e,t){var n={};for(var r in e)t.indexOf(r)>=0||Object.prototype.hasOwnProperty.call(e,r)&&(n[r]=e[r]);return n}function O(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}function R(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t}function A(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}var j=function(e,t){var n=l(t);return n.Consumer.displayName=e+".Consumer",n.Provider.displayName=e+".Provider",n},N=j("Location"),L=function(e){var t=e.children;return o.a.createElement(N.Consumer,null,function(e){return e?t(e):o.a.createElement(M,null,t)})},M=function(e){function t(){var n,r;O(this,t);for(var o=arguments.length,i=Array(o),a=0;a<o;a++)i[a]=arguments[a];return n=r=R(this,e.call.apply(e,[this].concat(i))),r.state={context:r.getContext(),refs:{unlisten:null}},R(r,n)}return A(t,e),t.prototype.getContext=function(){var e=this.props.history;return{navigate:e.navigate,location:e.location}},t.prototype.componentDidCatch=function(e,t){if(!Q(e))throw e;(0,this.props.history.navigate)(e.uri,{replace:!0})},t.prototype.componentDidUpdate=function(e,t){t.context.location!==this.state.context.location&&this.props.history._onTransitionComplete()},t.prototype.componentDidMount=function(){var e=this,t=this.state.refs,n=this.props.history;t.unlisten=n.listen(function(){Promise.resolve().then(function(){requestAnimationFrame(function(){e.unmounted||e.setState(function(){return{context:e.getContext()}})})})})},t.prototype.componentWillUnmount=function(){var e=this.state.refs;this.unmounted=!0,e.unlisten()},t.prototype.render=function(){var e=this.state.context,t=this.props.children;return o.a.createElement(N.Provider,{value:e},"function"==typeof t?t(e):t||null)},t}(o.a.Component);M.defaultProps={history:S};var F=function(e){var t=e.url,n=e.children;return o.a.createElement(N.Provider,{value:{location:{pathname:t,search:"",hash:""},navigate:function(){throw new Error("You can't call navigate on the server.")}}},n)},I=j("Base",{baseuri:"/",basepath:"/"}),U=function(e){return o.a.createElement(I.Consumer,null,function(t){return o.a.createElement(L,null,function(n){return o.a.createElement(D,C({},t,n,e))})})},D=function(e){function t(){return O(this,t),R(this,e.apply(this,arguments))}return A(t,e),t.prototype.render=function(){var e=this.props,t=e.location,n=e.navigate,r=e.basepath,i=e.primary,a=e.children,l=(e.baseuri,e.component),u=void 0===l?"div":l,c=P(e,["location","navigate","basepath","primary","children","baseuri","component"]),d=o.a.Children.map(a,te(r)),p=t.pathname,h=s(d,p);if(h){var m=h.params,v=h.uri,g=h.route,y=h.route.value;r=g.default?r:g.path.replace(/\*$/,"");var b=C({},m,{uri:v,location:t,navigate:function(e,t){return n(f(e,v),t)}}),w=o.a.cloneElement(y,b,y.props.children?o.a.createElement(U,{primary:i},y.props.children):void 0),_=i?z:u,k=i?C({uri:v,location:t,component:u},c):c;return o.a.createElement(I.Provider,{value:{baseuri:v,basepath:r}},o.a.createElement(_,k,w))}return null},t}(o.a.PureComponent);D.defaultProps={primary:!0};var W=j("Focus"),z=function(e){var t=e.uri,n=e.location,r=e.component,i=P(e,["uri","location","component"]);return o.a.createElement(W.Consumer,null,function(e){return o.a.createElement(V,C({},i,{component:r,requestFocus:e,uri:t,location:n}))})},B=!0,H=0,V=function(e){function t(){var n,r;O(this,t);for(var o=arguments.length,i=Array(o),a=0;a<o;a++)i[a]=arguments[a];return n=r=R(this,e.call.apply(e,[this].concat(i))),r.state={},r.requestFocus=function(e){r.state.shouldFocus||e.focus()},R(r,n)}return A(t,e),t.getDerivedStateFromProps=function(e,t){if(null==t.uri)return C({shouldFocus:!0},e);var n=e.uri!==t.uri,r=t.location.pathname!==e.location.pathname&&e.location.pathname===e.uri;return C({shouldFocus:n||r},e)},t.prototype.componentDidMount=function(){H++,this.focus()},t.prototype.componentWillUnmount=function(){0===--H&&(B=!0)},t.prototype.componentDidUpdate=function(e,t){e.location!==this.props.location&&this.state.shouldFocus&&this.focus()},t.prototype.focus=function(){var e=this.props.requestFocus;e?e(this.node):B?B=!1:this.node.contains(document.activeElement)||this.node.focus()},t.prototype.render=function(){var e=this,t=this.props,n=(t.children,t.style),r=(t.requestFocus,t.role),i=void 0===r?"group":r,a=t.component,l=void 0===a?"div":a,u=(t.uri,t.location,P(t,["children","style","requestFocus","role","component","uri","location"]));return o.a.createElement(l,C({style:C({outline:"none"},n),tabIndex:"-1",role:i,ref:function(t){return e.node=t}},u),o.a.createElement(W.Provider,{value:this.requestFocus},this.props.children))},t}(o.a.Component);Object(u.polyfill)(V);var $=function(){},q=o.a.forwardRef;void 0===q&&(q=function(e){return e});var K=q(function(e,t){var n=e.innerRef,r=P(e,["innerRef"]);return o.a.createElement(I.Consumer,null,function(e){e.basepath;var i=e.baseuri;return o.a.createElement(L,null,function(e){var a=e.location,l=e.navigate,u=r.to,s=r.state,d=r.replace,p=r.getProps,h=void 0===p?$:p,m=P(r,["to","state","replace","getProps"]),v=f(u,i),g=a.pathname===v,y=c(a.pathname,v);return o.a.createElement("a",C({ref:t||n,"aria-current":g?"page":void 0},m,h({isCurrent:g,isPartiallyCurrent:y,href:v,location:a}),{href:v,onClick:function(e){m.onClick&&m.onClick(e),ne(e)&&(e.preventDefault(),l(v,{state:s,replace:d}))}}))})})});function G(e){this.uri=e}var Q=function(e){return e instanceof G},Y=function(e){throw new G(e)},X=function(e){function t(){return O(this,t),R(this,e.apply(this,arguments))}return A(t,e),t.prototype.componentDidMount=function(){var e=this.props,t=e.navigate,n=e.to,r=(e.from,e.replace),o=void 0===r||r,i=e.state,a=(e.noThrow,P(e,["navigate","to","from","replace","state","noThrow"]));Promise.resolve().then(function(){t(d(n,a),{replace:o,state:i})})},t.prototype.render=function(){var e=this.props,t=(e.navigate,e.to),n=(e.from,e.replace,e.state,e.noThrow),r=P(e,["navigate","to","from","replace","state","noThrow"]);return n||Y(d(t,r)),null},t}(o.a.Component),J=function(e){return o.a.createElement(L,null,function(t){return o.a.createElement(X,C({},t,e))})},Z=function(e){var t=e.path,n=e.children;return o.a.createElement(I.Consumer,null,function(e){var r=e.baseuri;return o.a.createElement(L,null,function(e){var o=e.navigate,i=e.location,a=function(e,t){return s([{path:e}],t)}(f(t,r),i.pathname);return n({navigate:o,location:i,match:a?C({},a.params,{uri:a.uri,path:t}):null})})})},ee=function(e){return e.replace(/(^\/+|\/+$)/g,"")},te=function(e){return function(t){if(!t)return null;var n,r,o;if(t.props.path||t.props.default||t.type===J||a()(!1),t.type!==J||t.props.from&&t.props.to||a()(!1),t.type===J&&(n=t.props.from,r=t.props.to,o=function(e){return h(e)},g(n).filter(o).sort().join("/")!==g(r).filter(o).sort().join("/"))&&a()(!1),t.props.default)return{value:t,default:!0};var i=t.type===J?t.props.from:t.props.path,l="/"===i?e:ee(e)+"/"+ee(i);return{value:t,default:t.props.default,path:t.props.children?ee(l)+"/*":l}}},ne=function(e){return!e.defaultPrevented&&0===e.button&&!(e.metaKey||e.altKey||e.ctrlKey||e.shiftKey)}},function(e,t,n){e.exports=!n(25)(function(){return 7!=Object.defineProperty({},"a",{get:function(){return 7}}).a})},function(e,t){e.exports=function(e){if(null==e)throw TypeError("Can't call method on "+e);return e}},function(e,t){var n=e.exports={version:"2.5.7"};"number"==typeof __e&&(__e=n)},function(e,t,n){var r=n(29);e.exports=function(e,t,n){if(r(e),void 0===t)return e;switch(n){case 1:return function(n){return e.call(t,n)};case 2:return function(n,r){return e.call(t,n,r)};case 3:return function(n,r,o){return e.call(t,n,r,o)}}return function(){return e.apply(t,arguments)}}},function(e,t){e.exports={}},function(e,t,n){"use strict";t.__esModule=!0,t.validateRedirect=t.insertParams=t.resolve=t.match=t.pick=t.startsWith=void 0;var r,o=n(9),i=(r=o)&&r.__esModule?r:{default:r};var a=function(e,t){return e.substr(0,t.length)===t},l=function(e,t){for(var n=void 0,r=void 0,o=t.split("?")[0],a=d(o),l=""===a[0],c=f(e),s=0,p=c.length;s<p;s++){var m=!1,v=c[s].route;if(v.default)r={route:v,params:{},uri:t};else{for(var g=d(v.path),y={},b=Math.max(a.length,g.length),w=0;w<b;w++){var _=g[w],k=a[w];if("*"===_){y["*"]=a.slice(w).map(decodeURIComponent).join("/");break}if(void 0===k){m=!0;break}var x=u.exec(_);if(x&&!l){-1===h.indexOf(x[1])||(0,i.default)(!1);var T=decodeURIComponent(k);y[x[1]]=T}else if(_!==k){m=!0;break}}if(!m){n={route:v,params:y,uri:"/"+a.slice(0,w).join("/")};break}}}return n||r||null},u=/^:(.+)/,c=function(e){return u.test(e)},s=function(e,t){return{route:e,score:e.default?0:d(e.path).reduce(function(e,t){return e+=4,!function(e){return""===e}(t)?c(t)?e+=2:!function(e){return"*"===e}(t)?e+=3:e-=5:e+=1,e},0),index:t}},f=function(e){return e.map(s).sort(function(e,t){return e.score<t.score?1:e.score>t.score?-1:e.index-t.index})},d=function(e){return e.replace(/(^\/+|\/+$)/g,"").split("/")},p=function(e,t){return e+(t?"?"+t:"")},h=["uri","path"];t.startsWith=a,t.pick=l,t.match=function(e,t){return l([{path:e}],t)},t.resolve=function(e,t){if(a(e,"/"))return e;var n=e.split("?"),r=n[0],o=n[1],i=t.split("?")[0],l=d(r),u=d(i);if(""===l[0])return p(i,o);if(!a(l[0],".")){var c=u.concat(l).join("/");return p(("/"===i?"":"/")+c,o)}for(var s=u.concat(l),f=[],h=0,m=s.length;h<m;h++){var v=s[h];".."===v?f.pop():"."!==v&&f.push(v)}return p("/"+f.join("/"),o)},t.insertParams=function(e,t){return"/"+d(e).map(function(e){var n=u.exec(e);return n?t[n[1]]:e}).join("/")},t.validateRedirect=function(e,t){var n=function(e){return c(e)};return d(e).filter(n).sort().join("/")===d(t).filter(n).sort().join("/")}},function(e,t,n){var r=n(12),o=n(79),i=n(80),a=Object.defineProperty;t.f=n(18)?Object.defineProperty:function(e,t,n){if(r(e),t=i(t,!0),r(n),o)try{return a(e,t,n)}catch(l){}if("get"in n||"set"in n)throw TypeError("Accessors not supported!");return"value"in n&&(e[t]=n.value),e}},function(e,t){e.exports=function(e){try{return!!e()}catch(t){return!0}}},function(e,t,n){"use strict";var r=n(11),o=n(15),i=n(25),a=n(19),l=n(4);e.exports=function(e,t,n){var u=l(e),c=n(a,u,""[e]),s=c[0],f=c[1];i(function(){var t={};return t[u]=function(){return 7},7!=""[e](t)})&&(o(String.prototype,e,s),r(RegExp.prototype,u,2==t?function(e,t){return f.call(e,this,t)}:function(e){return f.call(e,this)}))}},function(e,t){var n={}.hasOwnProperty;e.exports=function(e,t){return n.call(e,t)}},function(e,t,n){var r=n(14);r(r.S+r.F,"Object",{assign:n(82)})},function(e,t){e.exports=function(e){if("function"!=typeof e)throw TypeError(e+" is not a function!");return e}},function(e,t,n){"use strict";n(28);var r=n(7),o=n.n(r),i=n(0),a=n.n(i),l=n(1),u=n.n(l),c=n(2),s=n(3),f=function(e){function t(){return e.apply(this,arguments)||this}return o()(t,e),t.prototype.render=function(){var e=Object.assign({},this.props,{pathContext:this.props.pageContext}),t=Object(s.apiRunner)("replaceComponentRenderer",{props:this.props,loader:c.publicLoader})[0]||Object(i.createElement)(this.props.pageResources.component,Object.assign({},e,{key:this.props.pageResources.page.path}));return Object(s.apiRunner)("wrapPageElement",{element:t,props:e},t,function(t){return{element:t.result,props:e}}).pop()},t}(a.a.Component);f.propTypes={location:u.a.object.isRequired,pageResources:u.a.object.isRequired,data:u.a.object,pageContext:u.a.object.isRequired},t.a=f},function(e,t){e.exports=function(e){if(void 0===e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return e}},function(e,t,n){var r=n(13),o=n(5).document,i=r(o)&&r(o.createElement);e.exports=function(e){return i?o.createElement(e):{}}},function(e,t){var n=0,r=Math.random();e.exports=function(e){return"Symbol(".concat(void 0===e?"":e,")_",(++n+r).toString(36))}},function(e,t){e.exports=!1},function(e,t,n){var r=n(83),o=n(56);e.exports=Object.keys||function(e){return r(e,o)}},function(e,t,n){var r=n(37),o=n(19);e.exports=function(e){return r(o(e))}},function(e,t,n){var r=n(16);e.exports=Object("z").propertyIsEnumerable(0)?Object:function(e){return"String"==r(e)?e.split(""):Object(e)}},function(e,t,n){var r=n(39),o=Math.min;e.exports=function(e){return e>0?o(r(e),9007199254740991):0}},function(e,t){var n=Math.ceil,r=Math.floor;e.exports=function(e){return isNaN(e=+e)?0:(e>0?r:n)(e)}},function(e,t,n){var r=n(54)("keys"),o=n(33);e.exports=function(e){return r[e]||(r[e]=o(e))}},function(e,t,n){var r=n(19);e.exports=function(e){return Object(r(e))}},function(e,t,n){"use strict";var r,o,i,a,l=n(34),u=n(5),c=n(21),s=n(43),f=n(14),d=n(13),p=n(29),h=n(87),m=n(88),v=n(92),g=n(57).set,y=n(94)(),b=n(59),w=n(95),_=n(96),k=n(97),x=u.TypeError,T=u.process,S=T&&T.versions,E=S&&S.v8||"",C=u.Promise,P="process"==s(T),O=function(){},R=o=b.f,A=!!function(){try{var e=C.resolve(1),t=(e.constructor={})[n(4)("species")]=function(e){e(O,O)};return(P||"function"==typeof PromiseRejectionEvent)&&e.then(O)instanceof t&&0!==E.indexOf("6.6")&&-1===_.indexOf("Chrome/66")}catch(r){}}(),j=function(e){var t;return!(!d(e)||"function"!=typeof(t=e.then))&&t},N=function(e,t){if(!e._n){e._n=!0;var n=e._c;y(function(){for(var r=e._v,o=1==e._s,i=0,a=function(t){var n,i,a,l=o?t.ok:t.fail,u=t.resolve,c=t.reject,s=t.domain;try{l?(o||(2==e._h&&F(e),e._h=1),!0===l?n=r:(s&&s.enter(),n=l(r),s&&(s.exit(),a=!0)),n===t.promise?c(x("Promise-chain cycle")):(i=j(n))?i.call(n,u,c):u(n)):c(r)}catch(f){s&&!a&&s.exit(),c(f)}};n.length>i;)a(n[i++]);e._c=[],e._n=!1,t&&!e._h&&L(e)})}},L=function(e){g.call(u,function(){var t,n,r,o=e._v,i=M(e);if(i&&(t=w(function(){P?T.emit("unhandledRejection",o,e):(n=u.onunhandledrejection)?n({promise:e,reason:o}):(r=u.console)&&r.error&&r.error("Unhandled promise rejection",o)}),e._h=P||M(e)?2:1),e._a=void 0,i&&t.e)throw t.v})},M=function(e){return 1!==e._h&&0===(e._a||e._c).length},F=function(e){g.call(u,function(){var t;P?T.emit("rejectionHandled",e):(t=u.onrejectionhandled)&&t({promise:e,reason:e._v})})},I=function(e){var t=this;t._d||(t._d=!0,(t=t._w||t)._v=e,t._s=2,t._a||(t._a=t._c.slice()),N(t,!0))},U=function(e){var t,n=this;if(!n._d){n._d=!0,n=n._w||n;try{if(n===e)throw x("Promise can't be resolved itself");(t=j(e))?y(function(){var r={_w:n,_d:!1};try{t.call(e,c(U,r,1),c(I,r,1))}catch(o){I.call(r,o)}}):(n._v=e,n._s=1,N(n,!1))}catch(r){I.call({_w:n,_d:!1},r)}}};A||(C=function(e){h(this,C,"Promise","_h"),p(e),r.call(this);try{e(c(U,this,1),c(I,this,1))}catch(t){I.call(this,t)}},(r=function(e){this._c=[],this._a=void 0,this._s=0,this._d=!1,this._v=void 0,this._h=0,this._n=!1}).prototype=n(98)(C.prototype,{then:function(e,t){var n=R(v(this,C));return n.ok="function"!=typeof e||e,n.fail="function"==typeof t&&t,n.domain=P?T.domain:void 0,this._c.push(n),this._a&&this._a.push(n),this._s&&N(this,!1),n.promise},catch:function(e){return this.then(void 0,e)}}),i=function(){var e=new r;this.promise=e,this.resolve=c(U,e,1),this.reject=c(I,e,1)},b.f=R=function(e){return e===C||e===a?new i(e):o(e)}),f(f.G+f.W+f.F*!A,{Promise:C}),n(44)(C,"Promise"),n(99)("Promise"),a=n(20).Promise,f(f.S+f.F*!A,"Promise",{reject:function(e){var t=R(this);return(0,t.reject)(e),t.promise}}),f(f.S+f.F*(l||!A),"Promise",{resolve:function(e){return k(l&&this===a?C:this,e)}}),f(f.S+f.F*!(A&&n(100)(function(e){C.all(e).catch(O)})),"Promise",{all:function(e){var t=this,n=R(t),r=n.resolve,o=n.reject,i=w(function(){var n=[],i=0,a=1;m(e,!1,function(e){var l=i++,u=!1;n.push(void 0),a++,t.resolve(e).then(function(e){u||(u=!0,n[l]=e,--a||r(n))},o)}),--a||r(n)});return i.e&&o(i.v),n.promise},race:function(e){var t=this,n=R(t),r=n.reject,o=w(function(){m(e,!1,function(e){t.resolve(e).then(n.resolve,r)})});return o.e&&r(o.v),n.promise}})},function(e,t,n){var r=n(16),o=n(4)("toStringTag"),i="Arguments"==r(function(){return arguments}());e.exports=function(e){var t,n,a;return void 0===e?"Undefined":null===e?"Null":"string"==typeof(n=function(e,t){try{return e[t]}catch(n){}}(t=Object(e),o))?n:i?r(t):"Object"==(a=r(t))&&"function"==typeof t.callee?"Arguments":a}},function(e,t,n){var r=n(24).f,o=n(27),i=n(4)("toStringTag");e.exports=function(e,t,n){e&&!o(e=n?e:e.prototype,i)&&r(e,i,{configurable:!0,value:t})}},function(e,t){e.exports=function(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}},function(e,t,n){var r=n(4)("unscopables"),o=Array.prototype;null==o[r]&&n(11)(o,r,{}),e.exports=function(e){o[r][e]=!0}},function(e,t,n){n(26)("replace",2,function(e,t,n){return[function(r,o){"use strict";var i=e(this),a=null==r?void 0:r[t];return void 0!==a?a.call(r,i,o):n.call(String(i),r,o)},n]})},function(e,t,n){"use strict";t.__esModule=!0,t.default=void 0;var r=!("undefined"==typeof window||!window.document||!window.document.createElement);t.default=r,e.exports=t.default},function(e,t,n){"use strict";!function e(){if("undefined"!=typeof __REACT_DEVTOOLS_GLOBAL_HOOK__&&"function"==typeof __REACT_DEVTOOLS_GLOBAL_HOOK__.checkDCE)try{__REACT_DEVTOOLS_GLOBAL_HOOK__.checkDCE(e)}catch(t){console.error(t)}}(),e.exports=n(130)},function(e,t,n){t.components={"component---node-modules-gatsby-plugin-offline-app-shell-js":function(){return n.e(4).then(n.t.bind(null,146,7))},"component---src-pages-tags-jsx":function(){return Promise.all([n.e(0),n.e(1),n.e(2),n.e(11)]).then(n.bind(null,147))},"component---src-templates-tag-jsx":function(){return Promise.all([n.e(0),n.e(1),n.e(2),n.e(14)]).then(n.bind(null,148))},"component---src-templates-post-jsx":function(){return Promise.all([n.e(0),n.e(1),n.e(2),n.e(13)]).then(n.bind(null,149))},"component---src-pages-404-jsx":function(){return Promise.all([n.e(0),n.e(1),n.e(2),n.e(5)]).then(n.bind(null,150))},"component---src-pages-about-jsx":function(){return Promise.all([n.e(0),n.e(1),n.e(2),n.e(6)]).then(n.bind(null,156))},"component---src-pages-blog-jsx":function(){return Promise.all([n.e(0),n.e(1),n.e(2),n.e(7)]).then(n.bind(null,151))},"component---src-pages-contact-jsx":function(){return Promise.all([n.e(0),n.e(1),n.e(17),n.e(2),n.e(8)]).then(n.bind(null,152))},"component---src-pages-events-jsx":function(){return Promise.all([n.e(0),n.e(1),n.e(2),n.e(9)]).then(n.bind(null,153))},"component---src-pages-index-jsx":function(){return Promise.all([n.e(0),n.e(1),n.e(2),n.e(10)]).then(n.bind(null,154))},"component---src-pages-team-jsx":function(){return Promise.all([n.e(0),n.e(1),n.e(2),n.e(12)]).then(n.bind(null,157))}},t.data=function(){return n.e(15).then(n.t.bind(null,155,3))}},function(e,t,n){"use strict";n.r(t),n.d(t,"graphql",function(){return h}),n.d(t,"StaticQueryContext",function(){return f}),n.d(t,"StaticQuery",function(){return d}),n.d(t,"useStaticQuery",function(){return p});var r=n(0),o=n.n(r),i=n(1),a=n.n(i),l=n(8),u=n.n(l);n.d(t,"Link",function(){return u.a}),n.d(t,"withPrefix",function(){return l.withPrefix}),n.d(t,"parsePath",function(){return l.parsePath}),n.d(t,"navigate",function(){return l.navigate}),n.d(t,"push",function(){return l.push}),n.d(t,"replace",function(){return l.replace}),n.d(t,"navigateTo",function(){return l.navigateTo});var c=n(62),s=n.n(c);n.d(t,"PageRenderer",function(){return s.a});var f=o.a.createContext({}),d=function(e){return o.a.createElement(f.Consumer,null,function(t){return e.data||t[e.query]&&t[e.query].data?(e.render||e.children)(e.data?e.data.data:t[e.query].data):o.a.createElement("div",null,"Loading (StaticQuery)")})},p=function(e){o.a.useContext;var t=o.a.useContext(f);if(t[e]&&t[e].data)return t[e].data;throw new Error("The result of this StaticQuery could not be fetched.\n\nThis is likely a bug in Gatsby and if refreshing the page does not fix it, please open an issue in https://github.com/gatsbyjs/gatsby/issues")};function h(){throw new Error("It appears like Gatsby is misconfigured. Gatsby related `graphql` calls are supposed to only be evaluated at compile time, and then compiled away,. Unfortunately, something went wrong and the query was left in the compiled code.\n\n.Unless your site has a complex or custom babel/Gatsby configuration this is likely a bug in Gatsby.")}d.propTypes={data:a.a.object,query:a.a.string.isRequired,render:a.a.func,children:a.a.func}},function(e,t,n){"use strict";var r=Object.getOwnPropertySymbols,o=Object.prototype.hasOwnProperty,i=Object.prototype.propertyIsEnumerable;e.exports=function(){try{if(!Object.assign)return!1;var e=new String("abc");if(e[5]="de","5"===Object.getOwnPropertyNames(e)[0])return!1;for(var t={},n=0;n<10;n++)t["_"+String.fromCharCode(n)]=n;if("0123456789"!==Object.getOwnPropertyNames(t).map(function(e){return t[e]}).join(""))return!1;var r={};return"abcdefghijklmnopqrst".split("").forEach(function(e){r[e]=e}),"abcdefghijklmnopqrst"===Object.keys(Object.assign({},r)).join("")}catch(o){return!1}}()?Object.assign:function(e,t){for(var n,a,l=function(e){if(null==e)throw new TypeError("Object.assign cannot be called with null or undefined");return Object(e)}(e),u=1;u<arguments.length;u++){for(var c in n=Object(arguments[u]))o.call(n,c)&&(l[c]=n[c]);if(r){a=r(n);for(var s=0;s<a.length;s++)i.call(n,a[s])&&(l[a[s]]=n[a[s]])}}return l}},function(e,t){e.exports=function(e,t){return{enumerable:!(1&e),configurable:!(2&e),writable:!(4&e),value:t}}},function(e,t,n){var r=n(20),o=n(5),i=o["__core-js_shared__"]||(o["__core-js_shared__"]={});(e.exports=function(e,t){return i[e]||(i[e]=void 0!==t?t:{})})("versions",[]).push({version:r.version,mode:n(34)?"pure":"global",copyright:"© 2018 Denis Pushkarev (zloirock.ru)"})},function(e,t,n){var r=n(36),o=n(38),i=n(84);e.exports=function(e){return function(t,n,a){var l,u=r(t),c=o(u.length),s=i(a,c);if(e&&n!=n){for(;c>s;)if((l=u[s++])!=l)return!0}else for(;c>s;s++)if((e||s in u)&&u[s]===n)return e||s||0;return!e&&-1}}},function(e,t){e.exports="constructor,hasOwnProperty,isPrototypeOf,propertyIsEnumerable,toLocaleString,toString,valueOf".split(",")},function(e,t,n){var r,o,i,a=n(21),l=n(93),u=n(58),c=n(32),s=n(5),f=s.process,d=s.setImmediate,p=s.clearImmediate,h=s.MessageChannel,m=s.Dispatch,v=0,g={},y=function(){var e=+this;if(g.hasOwnProperty(e)){var t=g[e];delete g[e],t()}},b=function(e){y.call(e.data)};d&&p||(d=function(e){for(var t=[],n=1;arguments.length>n;)t.push(arguments[n++]);return g[++v]=function(){l("function"==typeof e?e:Function(e),t)},r(v),v},p=function(e){delete g[e]},"process"==n(16)(f)?r=function(e){f.nextTick(a(y,e,1))}:m&&m.now?r=function(e){m.now(a(y,e,1))}:h?(i=(o=new h).port2,o.port1.onmessage=b,r=a(i.postMessage,i,1)):s.addEventListener&&"function"==typeof postMessage&&!s.importScripts?(r=function(e){s.postMessage(e+"","*")},s.addEventListener("message",b,!1)):r="onreadystatechange"in c("script")?function(e){u.appendChild(c("script")).onreadystatechange=function(){u.removeChild(this),y.call(e)}}:function(e){setTimeout(a(y,e,1),0)}),e.exports={set:d,clear:p}},function(e,t,n){var r=n(5).document;e.exports=r&&r.documentElement},function(e,t,n){"use strict";var r=n(29);function o(e){var t,n;this.promise=new e(function(e,r){if(void 0!==t||void 0!==n)throw TypeError("Bad Promise constructor");t=e,n=r}),this.resolve=r(t),this.reject=r(n)}e.exports.f=function(e){return new o(e)}},function(e,t,n){"use strict";var r=n(43),o={};o[n(4)("toStringTag")]="z",o+""!="[object z]"&&n(15)(Object.prototype,"toString",function(){return"[object "+r(this)+"]"},!0)},function(e,t,n){"use strict";e.exports=function(){}},function(e,t,n){var r;e.exports=(r=n(107))&&r.default||r},function(e,t,n){var r=n(13),o=n(16),i=n(4)("match");e.exports=function(e){var t;return r(e)&&(void 0!==(t=e[i])?!!t:"RegExp"==o(e))}},function(e,t,n){"use strict";var r=n(46),o=n(118),i=n(22),a=n(36);e.exports=n(65)(Array,"Array",function(e,t){this._t=a(e),this._i=0,this._k=t},function(){var e=this._t,t=this._k,n=this._i++;return!e||n>=e.length?(this._t=void 0,o(1)):o(0,"keys"==t?n:"values"==t?e[n]:[n,e[n]])},"values"),i.Arguments=i.Array,r("keys"),r("values"),r("entries")},function(e,t,n){"use strict";var r=n(34),o=n(14),i=n(15),a=n(11),l=n(22),u=n(119),c=n(44),s=n(122),f=n(4)("iterator"),d=!([].keys&&"next"in[].keys()),p=function(){return this};e.exports=function(e,t,n,h,m,v,g){u(n,t,h);var y,b,w,_=function(e){if(!d&&e in S)return S[e];switch(e){case"keys":case"values":return function(){return new n(this,e)}}return function(){return new n(this,e)}},k=t+" Iterator",x="values"==m,T=!1,S=e.prototype,E=S[f]||S["@@iterator"]||m&&S[m],C=E||_(m),P=m?x?_("entries"):C:void 0,O="Array"==t&&S.entries||E;if(O&&(w=s(O.call(new e)))!==Object.prototype&&w.next&&(c(w,k,!0),r||"function"==typeof w[f]||a(w,f,p)),x&&E&&"values"!==E.name&&(T=!0,C=function(){return E.call(this)}),r&&!g||!d&&!T&&S[f]||a(S,f,C),l[t]=C,l[k]=p,m)if(y={values:x?C:_("values"),keys:v?C:_("keys"),entries:P},g)for(b in y)b in S||i(S,b,y[b]);else o(o.P+o.F*(d||T),t,y);return y}},function(e,t,n){"use strict";t.a=function(e){var t={};return function(n){return void 0===t[n]&&(t[n]=e(n)),t[n]}}},function(e,t,n){"use strict";t.__esModule=!0,t.default=function(e){return e===e.window?e:9===e.nodeType&&(e.defaultView||e.parentWindow)},e.exports=t.default},function(e,t){t.polyfill=function(e){return e}},function(e,t,n){e.exports=function(){"use strict";return function(e){function t(t){if(t)try{e(t+"}")}catch(n){}}return function(n,r,o,i,a,l,u,c,s,f){switch(n){case 1:if(0===s&&64===r.charCodeAt(0))return e(r+";"),"";break;case 2:if(0===c)return r+"/*|*/";break;case 3:switch(c){case 102:case 112:return e(o[0]+r),"";default:return r+(0===f?"/*|*/":"")}case-2:r.split("/*|*/}").forEach(t)}}}}()},function(e,t,n){"use strict";var r=n(6),o=r(n(133)),i=r(n(143));t.ScrollContainer=i.default,t.ScrollContext=o.default},function(e,t,n){var r,o,i,a,l;e.exports=(o=[],i=document,a=i.documentElement.doScroll,(l=(a?/^loaded|^c/:/^loaded|^i|^c/).test(i.readyState))||i.addEventListener("DOMContentLoaded",r=function(){for(i.removeEventListener("DOMContentLoaded",r),l=1;r=o.shift();)r()}),function(e){l?setTimeout(e,0):o.push(e)})},function(e){e.exports=[]},function(e,t,n){"use strict";var r=n(66),o={animationIterationCount:1,borderImageOutset:1,borderImageSlice:1,borderImageWidth:1,boxFlex:1,boxFlexGroup:1,boxOrdinalGroup:1,columnCount:1,columns:1,flex:1,flexGrow:1,flexPositive:1,flexShrink:1,flexNegative:1,flexOrder:1,gridRow:1,gridRowEnd:1,gridRowSpan:1,gridRowStart:1,gridColumn:1,gridColumnEnd:1,gridColumnSpan:1,gridColumnStart:1,fontWeight:1,lineHeight:1,opacity:1,order:1,orphans:1,tabSize:1,widows:1,zIndex:1,zoom:1,WebkitLineClamp:1,fillOpacity:1,floodOpacity:1,stopOpacity:1,strokeDasharray:1,strokeDashoffset:1,strokeMiterlimit:1,strokeOpacity:1,strokeWidth:1};var i=function(e){for(var t,n=e.length,r=n^n,o=0;n>=4;)t=1540483477*(65535&(t=255&e.charCodeAt(o)|(255&e.charCodeAt(++o))<<8|(255&e.charCodeAt(++o))<<16|(255&e.charCodeAt(++o))<<24))+((1540483477*(t>>>16)&65535)<<16),r=1540483477*(65535&r)+((1540483477*(r>>>16)&65535)<<16)^(t=1540483477*(65535&(t^=t>>>24))+((1540483477*(t>>>16)&65535)<<16)),n-=4,++o;switch(n){case 3:r^=(255&e.charCodeAt(o+2))<<16;case 2:r^=(255&e.charCodeAt(o+1))<<8;case 1:r=1540483477*(65535&(r^=255&e.charCodeAt(o)))+((1540483477*(r>>>16)&65535)<<16)}return r=1540483477*(65535&(r^=r>>>13))+((1540483477*(r>>>16)&65535)<<16),((r^=r>>>15)>>>0).toString(36)};var a=function(e){function t(e,t,r){var o=t.trim().split(h);t=o;var i=o.length,a=e.length;switch(a){case 0:case 1:var l=0;for(e=0===a?"":e[0]+" ";l<i;++l)t[l]=n(e,t[l],r).trim();break;default:var u=l=0;for(t=[];l<i;++l)for(var c=0;c<a;++c)t[u++]=n(e[c]+" ",o[l],r).trim()}return t}function n(e,t,n){var r=t.charCodeAt(0);switch(33>r&&(r=(t=t.trim()).charCodeAt(0)),r){case 38:return t.replace(m,"$1"+e.trim());case 58:return e.trim()+t.replace(m,"$1"+e.trim());default:if(0<1*n&&0<t.indexOf("\f"))return t.replace(m,(58===e.charCodeAt(0)?"":"$1")+e.trim())}return e+t}function r(e,t,n,i){var a=e+";",l=2*t+3*n+4*i;if(944===l){e=a.indexOf(":",9)+1;var u=a.substring(e,a.length-1).trim();return u=a.substring(0,e).trim()+u+";",1===O||2===O&&o(u,1)?"-webkit-"+u+u:u}if(0===O||2===O&&!o(a,1))return a;switch(l){case 1015:return 97===a.charCodeAt(10)?"-webkit-"+a+a:a;case 951:return 116===a.charCodeAt(3)?"-webkit-"+a+a:a;case 963:return 110===a.charCodeAt(5)?"-webkit-"+a+a:a;case 1009:if(100!==a.charCodeAt(4))break;case 969:case 942:return"-webkit-"+a+a;case 978:return"-webkit-"+a+"-moz-"+a+a;case 1019:case 983:return"-webkit-"+a+"-moz-"+a+"-ms-"+a+a;case 883:if(45===a.charCodeAt(8))return"-webkit-"+a+a;if(0<a.indexOf("image-set(",11))return a.replace(S,"$1-webkit-$2")+a;break;case 932:if(45===a.charCodeAt(4))switch(a.charCodeAt(5)){case 103:return"-webkit-box-"+a.replace("-grow","")+"-webkit-"+a+"-ms-"+a.replace("grow","positive")+a;case 115:return"-webkit-"+a+"-ms-"+a.replace("shrink","negative")+a;case 98:return"-webkit-"+a+"-ms-"+a.replace("basis","preferred-size")+a}return"-webkit-"+a+"-ms-"+a+a;case 964:return"-webkit-"+a+"-ms-flex-"+a+a;case 1023:if(99!==a.charCodeAt(8))break;return"-webkit-box-pack"+(u=a.substring(a.indexOf(":",15)).replace("flex-","").replace("space-between","justify"))+"-webkit-"+a+"-ms-flex-pack"+u+a;case 1005:return d.test(a)?a.replace(f,":-webkit-")+a.replace(f,":-moz-")+a:a;case 1e3:switch(t=(u=a.substring(13).trim()).indexOf("-")+1,u.charCodeAt(0)+u.charCodeAt(t)){case 226:u=a.replace(b,"tb");break;case 232:u=a.replace(b,"tb-rl");break;case 220:u=a.replace(b,"lr");break;default:return a}return"-webkit-"+a+"-ms-"+u+a;case 1017:if(-1===a.indexOf("sticky",9))break;case 975:switch(t=(a=e).length-10,l=(u=(33===a.charCodeAt(t)?a.substring(0,t):a).substring(e.indexOf(":",7)+1).trim()).charCodeAt(0)+(0|u.charCodeAt(7))){case 203:if(111>u.charCodeAt(8))break;case 115:a=a.replace(u,"-webkit-"+u)+";"+a;break;case 207:case 102:a=a.replace(u,"-webkit-"+(102<l?"inline-":"")+"box")+";"+a.replace(u,"-webkit-"+u)+";"+a.replace(u,"-ms-"+u+"box")+";"+a}return a+";";case 938:if(45===a.charCodeAt(5))switch(a.charCodeAt(6)){case 105:return u=a.replace("-items",""),"-webkit-"+a+"-webkit-box-"+u+"-ms-flex-"+u+a;case 115:return"-webkit-"+a+"-ms-flex-item-"+a.replace(k,"")+a;default:return"-webkit-"+a+"-ms-flex-line-pack"+a.replace("align-content","").replace(k,"")+a}break;case 973:case 989:if(45!==a.charCodeAt(3)||122===a.charCodeAt(4))break;case 931:case 953:if(!0===T.test(e))return 115===(u=e.substring(e.indexOf(":")+1)).charCodeAt(0)?r(e.replace("stretch","fill-available"),t,n,i).replace(":fill-available",":stretch"):a.replace(u,"-webkit-"+u)+a.replace(u,"-moz-"+u.replace("fill-",""))+a;break;case 962:if(a="-webkit-"+a+(102===a.charCodeAt(5)?"-ms-"+a:"")+a,211===n+i&&105===a.charCodeAt(13)&&0<a.indexOf("transform",10))return a.substring(0,a.indexOf(";",27)+1).replace(p,"$1-webkit-$2")+a}return a}function o(e,t){var n=e.indexOf(1===t?":":"{"),r=e.substring(0,3!==t?n:10);return n=e.substring(n+1,e.length-1),N(2!==t?r:r.replace(x,"$1"),n,t)}function i(e,t){var n=r(t,t.charCodeAt(0),t.charCodeAt(1),t.charCodeAt(2));return n!==t+";"?n.replace(_," or ($1)").substring(4):"("+t+")"}function a(e,t,n,r,o,i,a,l,c,s){for(var f,d=0,p=t;d<j;++d)switch(f=A[d].call(u,e,p,n,r,o,i,a,l,c,s)){case void 0:case!1:case!0:case null:break;default:p=f}if(p!==t)return p}function l(e){return void 0!==(e=e.prefix)&&(N=null,e?"function"!=typeof e?O=1:(O=2,N=e):O=0),l}function u(e,n){var l=e;if(33>l.charCodeAt(0)&&(l=l.trim()),l=[l],0<j){var u=a(-1,n,l,l,C,E,0,0,0,0);void 0!==u&&"string"==typeof u&&(n=u)}var f=function e(n,l,u,f,d){for(var p,h,m,b,_,k=0,x=0,T=0,S=0,A=0,N=0,M=m=p=0,F=0,I=0,U=0,D=0,W=u.length,z=W-1,B="",H="",V="",$="";F<W;){if(h=u.charCodeAt(F),F===z&&0!==x+S+T+k&&(0!==x&&(h=47===x?10:47),S=T=k=0,W++,z++),0===x+S+T+k){if(F===z&&(0<I&&(B=B.replace(s,"")),0<B.trim().length)){switch(h){case 32:case 9:case 59:case 13:case 10:break;default:B+=u.charAt(F)}h=59}switch(h){case 123:for(p=(B=B.trim()).charCodeAt(0),m=1,D=++F;F<W;){switch(h=u.charCodeAt(F)){case 123:m++;break;case 125:m--;break;case 47:switch(h=u.charCodeAt(F+1)){case 42:case 47:e:{for(M=F+1;M<z;++M)switch(u.charCodeAt(M)){case 47:if(42===h&&42===u.charCodeAt(M-1)&&F+2!==M){F=M+1;break e}break;case 10:if(47===h){F=M+1;break e}}F=M}}break;case 91:h++;case 40:h++;case 34:case 39:for(;F++<z&&u.charCodeAt(F)!==h;);}if(0===m)break;F++}switch(m=u.substring(D,F),0===p&&(p=(B=B.replace(c,"").trim()).charCodeAt(0)),p){case 64:switch(0<I&&(B=B.replace(s,"")),h=B.charCodeAt(1)){case 100:case 109:case 115:case 45:I=l;break;default:I=R}if(D=(m=e(l,I,m,h,d+1)).length,0<j&&(_=a(3,m,I=t(R,B,U),l,C,E,D,h,d,f),B=I.join(""),void 0!==_&&0===(D=(m=_.trim()).length)&&(h=0,m="")),0<D)switch(h){case 115:B=B.replace(w,i);case 100:case 109:case 45:m=B+"{"+m+"}";break;case 107:m=(B=B.replace(v,"$1 $2"))+"{"+m+"}",m=1===O||2===O&&o("@"+m,3)?"@-webkit-"+m+"@"+m:"@"+m;break;default:m=B+m,112===f&&(H+=m,m="")}else m="";break;default:m=e(l,t(l,B,U),m,f,d+1)}V+=m,m=U=I=M=p=0,B="",h=u.charCodeAt(++F);break;case 125:case 59:if(1<(D=(B=(0<I?B.replace(s,""):B).trim()).length))switch(0===M&&(p=B.charCodeAt(0),45===p||96<p&&123>p)&&(D=(B=B.replace(" ",":")).length),0<j&&void 0!==(_=a(1,B,l,n,C,E,H.length,f,d,f))&&0===(D=(B=_.trim()).length)&&(B="\0\0"),p=B.charCodeAt(0),h=B.charCodeAt(1),p){case 0:break;case 64:if(105===h||99===h){$+=B+u.charAt(F);break}default:58!==B.charCodeAt(D-1)&&(H+=r(B,p,h,B.charCodeAt(2)))}U=I=M=p=0,B="",h=u.charCodeAt(++F)}}switch(h){case 13:case 10:47===x?x=0:0===1+p&&107!==f&&0<B.length&&(I=1,B+="\0"),0<j*L&&a(0,B,l,n,C,E,H.length,f,d,f),E=1,C++;break;case 59:case 125:if(0===x+S+T+k){E++;break}default:switch(E++,b=u.charAt(F),h){case 9:case 32:if(0===S+k+x)switch(A){case 44:case 58:case 9:case 32:b="";break;default:32!==h&&(b=" ")}break;case 0:b="\\0";break;case 12:b="\\f";break;case 11:b="\\v";break;case 38:0===S+x+k&&(I=U=1,b="\f"+b);break;case 108:if(0===S+x+k+P&&0<M)switch(F-M){case 2:112===A&&58===u.charCodeAt(F-3)&&(P=A);case 8:111===N&&(P=N)}break;case 58:0===S+x+k&&(M=F);break;case 44:0===x+T+S+k&&(I=1,b+="\r");break;case 34:case 39:0===x&&(S=S===h?0:0===S?h:S);break;case 91:0===S+x+T&&k++;break;case 93:0===S+x+T&&k--;break;case 41:0===S+x+k&&T--;break;case 40:if(0===S+x+k){if(0===p)switch(2*A+3*N){case 533:break;default:p=1}T++}break;case 64:0===x+T+S+k+M+m&&(m=1);break;case 42:case 47:if(!(0<S+k+T))switch(x){case 0:switch(2*h+3*u.charCodeAt(F+1)){case 235:x=47;break;case 220:D=F,x=42}break;case 42:47===h&&42===A&&D+2!==F&&(33===u.charCodeAt(D+2)&&(H+=u.substring(D,F+1)),b="",x=0)}}0===x&&(B+=b)}N=A,A=h,F++}if(0<(D=H.length)){if(I=l,0<j&&void 0!==(_=a(2,H,I,n,C,E,D,f,d,f))&&0===(H=_).length)return $+H+V;if(H=I.join(",")+"{"+H+"}",0!=O*P){switch(2!==O||o(H,2)||(P=0),P){case 111:H=H.replace(y,":-moz-$1")+H;break;case 112:H=H.replace(g,"::-webkit-input-$1")+H.replace(g,"::-moz-$1")+H.replace(g,":-ms-input-$1")+H}P=0}}return $+H+V}(R,l,n,0,0);return 0<j&&void 0!==(u=a(-2,f,l,l,C,E,f.length,0,0,0))&&(f=u),P=0,E=C=1,f}var c=/^\0+/g,s=/[\0\r\f]/g,f=/: */g,d=/zoo|gra/,p=/([,: ])(transform)/g,h=/,\r+?/g,m=/([\t\r\n ])*\f?&/g,v=/@(k\w+)\s*(\S*)\s*/,g=/::(place)/g,y=/:(read-only)/g,b=/[svh]\w+-[tblr]{2}/,w=/\(\s*(.*)\s*\)/g,_=/([\s\S]*?);/g,k=/-self|flex-/g,x=/[^]*?(:[rp][el]a[\w-]+)[^]*/,T=/stretch|:\s*\w+\-(?:conte|avail)/,S=/([^-])(image-set\()/,E=1,C=1,P=0,O=1,R=[],A=[],j=0,N=null,L=0;return u.use=function e(t){switch(t){case void 0:case null:j=A.length=0;break;default:switch(t.constructor){case Array:for(var n=0,r=t.length;n<r;++n)e(t[n]);break;case Function:A[j++]=t;break;case Boolean:L=0|!!t}}return e},u.set=l,void 0!==e&&l(e),u},l=n(69),u=n.n(l),c=/[A-Z]|^ms/g,s=Object(r.a)(function(e){return e.replace(c,"-$&").toLowerCase()}),f=function(e,t){return null==t||"boolean"==typeof t?"":1===o[e]||45===e.charCodeAt(1)||isNaN(t)||0===t?t:t+"px"},d=function e(t){for(var n=t.length,r=0,o="";r<n;r++){var i=t[r];if(null!=i){var a=void 0;switch(typeof i){case"boolean":break;case"function":0,a=e([i()]);break;case"object":if(Array.isArray(i))a=e(i);else for(var l in a="",i)i[l]&&l&&(a&&(a+=" "),a+=l);break;default:a=i}a&&(o&&(o+=" "),o+=a)}}return o},p="undefined"!=typeof document;function h(e){var t=document.createElement("style");return t.setAttribute("data-emotion",e.key||""),void 0!==e.nonce&&t.setAttribute("nonce",e.nonce),t.appendChild(document.createTextNode("")),(void 0!==e.container?e.container:document.head).appendChild(t),t}var m=function(){function e(e){this.isSpeedy=!0,this.tags=[],this.ctr=0,this.opts=e}var t=e.prototype;return t.inject=function(){if(this.injected)throw new Error("already injected!");this.tags[0]=h(this.opts),this.injected=!0},t.speedy=function(e){if(0!==this.ctr)throw new Error("cannot change speedy now");this.isSpeedy=!!e},t.insert=function(e,t){if(this.isSpeedy){var n=function(e){if(e.sheet)return e.sheet;for(var t=0;t<document.styleSheets.length;t++)if(document.styleSheets[t].ownerNode===e)return document.styleSheets[t]}(this.tags[this.tags.length-1]);try{n.insertRule(e,n.cssRules.length)}catch(o){0}}else{var r=h(this.opts);this.tags.push(r),r.appendChild(document.createTextNode(e+(t||"")))}this.ctr++,this.ctr%65e3==0&&this.tags.push(h(this.opts))},t.flush=function(){this.tags.forEach(function(e){return e.parentNode.removeChild(e)}),this.tags=[],this.ctr=0,this.injected=!1},e}();t.a=function(e,t){if(void 0!==e.__SECRET_EMOTION__)return e.__SECRET_EMOTION__;void 0===t&&(t={});var n,r,o=t.key||"css",l=u()(function(e){n+=e,p&&h.insert(e,g)});void 0!==t.prefix&&(r={prefix:t.prefix});var c={registered:{},inserted:{},nonce:t.nonce,key:o},h=new m(t);p&&h.inject();var v=new a(r);v.use(t.stylisPlugins)(l);var g="";function y(e,t){if(null==e)return"";switch(typeof e){case"boolean":return"";case"function":if(void 0!==e.__emotion_styles){var n=e.toString();return n}return y.call(this,void 0===this?e():e(this.mergedProps,this.context),t);case"object":return function(e){if(_.has(e))return _.get(e);var t="";return Array.isArray(e)?e.forEach(function(e){t+=y.call(this,e,!1)},this):Object.keys(e).forEach(function(n){"object"!=typeof e[n]?void 0!==c.registered[e[n]]?t+=n+"{"+c.registered[e[n]]+"}":t+=s(n)+":"+f(n,e[n])+";":Array.isArray(e[n])&&"string"==typeof e[n][0]&&void 0===c.registered[e[n][0]]?e[n].forEach(function(e){t+=s(n)+":"+f(n,e)+";"}):t+=n+"{"+y.call(this,e[n],!1)+"}"},this),_.set(e,t),t}.call(this,e);default:var r=c.registered[e];return!1===t&&void 0!==r?r:e}}var b,w,_=new WeakMap,k=/label:\s*([^\s;\n{]+)\s*;/g,x=function(e){var t=!0,n="",r="";null==e||void 0===e.raw?(t=!1,n+=y.call(this,e,!1)):n+=e[0];for(var o=arguments.length,a=new Array(o>1?o-1:0),l=1;l<o;l++)a[l-1]=arguments[l];return a.forEach(function(r,o){n+=y.call(this,r,46===n.charCodeAt(n.length-1)),!0===t&&void 0!==e[o+1]&&(n+=e[o+1])},this),w=n,n=n.replace(k,function(e,t){return r+="-"+t,""}),b=function(e,t){return i(e+t)+t}(n,r),n};function T(e,t){void 0===c.inserted[b]&&(n="",v(e,t),c.inserted[b]=n)}var S=function(){var e=x.apply(this,arguments),t=o+"-"+b;return void 0===c.registered[t]&&(c.registered[t]=w),T("."+t,e),t};function E(e,t){var n="";return t.split(" ").forEach(function(t){void 0!==c.registered[t]?e.push(t):n+=t+" "}),n}function C(e,t){var n=[],r=E(n,e);return n.length<2?e:r+S(n,t)}function P(e){c.inserted[e]=!0}if(p){var O=document.querySelectorAll("[data-emotion-"+o+"]");Array.prototype.forEach.call(O,function(e){h.tags[0].parentNode.insertBefore(e,h.tags[0]),e.getAttribute("data-emotion-"+o).split(" ").forEach(P)})}var R={flush:function(){p&&(h.flush(),h.inject()),c.inserted={},c.registered={}},hydrate:function(e){e.forEach(P)},cx:function(){for(var e=arguments.length,t=new Array(e),n=0;n<e;n++)t[n]=arguments[n];return C(d(t))},merge:C,getRegisteredStyles:E,injectGlobal:function(){T("",x.apply(this,arguments))},keyframes:function(){var e=x.apply(this,arguments),t="animation-"+b;return T("","@keyframes "+t+"{"+e+"}"),t},css:S,sheet:h,caches:c};return e.__SECRET_EMOTION__=R,R}},function(e,t,n){"use strict";n.r(t),function(e){n.d(t,"flush",function(){return a}),n.d(t,"hydrate",function(){return l}),n.d(t,"cx",function(){return u}),n.d(t,"merge",function(){return c}),n.d(t,"getRegisteredStyles",function(){return s}),n.d(t,"injectGlobal",function(){return f}),n.d(t,"keyframes",function(){return d}),n.d(t,"css",function(){return p}),n.d(t,"sheet",function(){return h}),n.d(t,"caches",function(){return m});var r=n(73),o=void 0!==e?e:{},i=Object(r.a)(o),a=i.flush,l=i.hydrate,u=i.cx,c=i.merge,s=i.getRegisteredStyles,f=i.injectGlobal,d=i.keyframes,p=i.css,h=i.sheet,m=i.caches}.call(this,n(75))},function(e,t){var n;n=function(){return this}();try{n=n||new Function("return this")()}catch(r){"object"==typeof window&&(n=window)}e.exports=n},function(e,t){e.exports=function(e,t){if(null==e)return{};var n,r,o={},i=Object.keys(e);for(r=0;r<i.length;r++)n=i[r],t.indexOf(n)>=0||(o[n]=e[n]);return o}},function(e,t){function n(){return e.exports=n=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e},n.apply(this,arguments)}e.exports=n},function(e,t,n){n(26)("search",1,function(e,t,n){return[function(n){"use strict";var r=e(this),o=null==n?void 0:n[t];return void 0!==o?o.call(n,r):new RegExp(n)[t](String(r))},n]})},function(e,t,n){e.exports=!n(18)&&!n(25)(function(){return 7!=Object.defineProperty(n(32)("div"),"a",{get:function(){return 7}}).a})},function(e,t,n){var r=n(13);e.exports=function(e,t){if(!r(e))return e;var n,o;if(t&&"function"==typeof(n=e.toString)&&!r(o=n.call(e)))return o;if("function"==typeof(n=e.valueOf)&&!r(o=n.call(e)))return o;if(!t&&"function"==typeof(n=e.toString)&&!r(o=n.call(e)))return o;throw TypeError("Can't convert object to primitive value")}},function(e,t,n){n(26)("match",1,function(e,t,n){return[function(n){"use strict";var r=e(this),o=null==n?void 0:n[t];return void 0!==o?o.call(n,r):new RegExp(n)[t](String(r))},n]})},function(e,t,n){"use strict";var r=n(35),o=n(85),i=n(86),a=n(41),l=n(37),u=Object.assign;e.exports=!u||n(25)(function(){var e={},t={},n=Symbol(),r="abcdefghijklmnopqrst";return e[n]=7,r.split("").forEach(function(e){t[e]=e}),7!=u({},e)[n]||Object.keys(u({},t)).join("")!=r})?function(e,t){for(var n=a(e),u=arguments.length,c=1,s=o.f,f=i.f;u>c;)for(var d,p=l(arguments[c++]),h=s?r(p).concat(s(p)):r(p),m=h.length,v=0;m>v;)f.call(p,d=h[v++])&&(n[d]=p[d]);return n}:u},function(e,t,n){var r=n(27),o=n(36),i=n(55)(!1),a=n(40)("IE_PROTO");e.exports=function(e,t){var n,l=o(e),u=0,c=[];for(n in l)n!=a&&r(l,n)&&c.push(n);for(;t.length>u;)r(l,n=t[u++])&&(~i(c,n)||c.push(n));return c}},function(e,t,n){var r=n(39),o=Math.max,i=Math.min;e.exports=function(e,t){return(e=r(e))<0?o(e+t,0):i(e,t)}},function(e,t){t.f=Object.getOwnPropertySymbols},function(e,t){t.f={}.propertyIsEnumerable},function(e,t){e.exports=function(e,t,n,r){if(!(e instanceof t)||void 0!==r&&r in e)throw TypeError(n+": incorrect invocation!");return e}},function(e,t,n){var r=n(21),o=n(89),i=n(90),a=n(12),l=n(38),u=n(91),c={},s={};(t=e.exports=function(e,t,n,f,d){var p,h,m,v,g=d?function(){return e}:u(e),y=r(n,f,t?2:1),b=0;if("function"!=typeof g)throw TypeError(e+" is not iterable!");if(i(g)){for(p=l(e.length);p>b;b++)if((v=t?y(a(h=e[b])[0],h[1]):y(e[b]))===c||v===s)return v}else for(m=g.call(e);!(h=m.next()).done;)if((v=o(m,y,h.value,t))===c||v===s)return v}).BREAK=c,t.RETURN=s},function(e,t,n){var r=n(12);e.exports=function(e,t,n,o){try{return o?t(r(n)[0],n[1]):t(n)}catch(a){var i=e.return;throw void 0!==i&&r(i.call(e)),a}}},function(e,t,n){var r=n(22),o=n(4)("iterator"),i=Array.prototype;e.exports=function(e){return void 0!==e&&(r.Array===e||i[o]===e)}},function(e,t,n){var r=n(43),o=n(4)("iterator"),i=n(22);e.exports=n(20).getIteratorMethod=function(e){if(null!=e)return e[o]||e["@@iterator"]||i[r(e)]}},function(e,t,n){var r=n(12),o=n(29),i=n(4)("species");e.exports=function(e,t){var n,a=r(e).constructor;return void 0===a||null==(n=r(a)[i])?t:o(n)}},function(e,t){e.exports=function(e,t,n){var r=void 0===n;switch(t.length){case 0:return r?e():e.call(n);case 1:return r?e(t[0]):e.call(n,t[0]);case 2:return r?e(t[0],t[1]):e.call(n,t[0],t[1]);case 3:return r?e(t[0],t[1],t[2]):e.call(n,t[0],t[1],t[2]);case 4:return r?e(t[0],t[1],t[2],t[3]):e.call(n,t[0],t[1],t[2],t[3])}return e.apply(n,t)}},function(e,t,n){var r=n(5),o=n(57).set,i=r.MutationObserver||r.WebKitMutationObserver,a=r.process,l=r.Promise,u="process"==n(16)(a);e.exports=function(){var e,t,n,c=function(){var r,o;for(u&&(r=a.domain)&&r.exit();e;){o=e.fn,e=e.next;try{o()}catch(i){throw e?n():t=void 0,i}}t=void 0,r&&r.enter()};if(u)n=function(){a.nextTick(c)};else if(!i||r.navigator&&r.navigator.standalone)if(l&&l.resolve){var s=l.resolve(void 0);n=function(){s.then(c)}}else n=function(){o.call(r,c)};else{var f=!0,d=document.createTextNode("");new i(c).observe(d,{characterData:!0}),n=function(){d.data=f=!f}}return function(r){var o={fn:r,next:void 0};t&&(t.next=o),e||(e=o,n()),t=o}}},function(e,t){e.exports=function(e){try{return{e:!1,v:e()}}catch(t){return{e:!0,v:t}}}},function(e,t,n){var r=n(5).navigator;e.exports=r&&r.userAgent||""},function(e,t,n){var r=n(12),o=n(13),i=n(59);e.exports=function(e,t){if(r(e),o(t)&&t.constructor===e)return t;var n=i.f(e);return(0,n.resolve)(t),n.promise}},function(e,t,n){var r=n(15);e.exports=function(e,t,n){for(var o in t)r(e,o,t[o],n);return e}},function(e,t,n){"use strict";var r=n(5),o=n(24),i=n(18),a=n(4)("species");e.exports=function(e){var t=r[e];i&&t&&!t[a]&&o.f(t,a,{configurable:!0,get:function(){return this}})}},function(e,t,n){var r=n(4)("iterator"),o=!1;try{var i=[7][r]();i.return=function(){o=!0},Array.from(i,function(){throw 2})}catch(a){}e.exports=function(e,t){if(!t&&!o)return!1;var n=!1;try{var i=[7],l=i[r]();l.next=function(){return{done:n=!0}},i[r]=function(){return l},e(i)}catch(a){}return n}},function(e,t,n){e.exports=[{plugin:n(102),options:{plugins:[]}},{plugin:n(128),options:{plugins:[],autoLabel:!1,labelFormat:"[filename]--[local]"}},{plugin:n(129),options:{plugins:[]}}]},function(e,t,n){"use strict";var r=n(6),o=n(51),i=r(n(126));t.onClientEntry=function(){(0,i.default)(window,function(e){(0,o.navigate)(e)})}},function(e,t,n){"use strict";var r=n(52),o="function"==typeof Symbol&&Symbol.for,i=o?Symbol.for("react.element"):60103,a=o?Symbol.for("react.portal"):60106,l=o?Symbol.for("react.fragment"):60107,u=o?Symbol.for("react.strict_mode"):60108,c=o?Symbol.for("react.profiler"):60114,s=o?Symbol.for("react.provider"):60109,f=o?Symbol.for("react.context"):60110,d=o?Symbol.for("react.concurrent_mode"):60111,p=o?Symbol.for("react.forward_ref"):60112,h=o?Symbol.for("react.suspense"):60113,m=o?Symbol.for("react.memo"):60115,v=o?Symbol.for("react.lazy"):60116,g="function"==typeof Symbol&&Symbol.iterator;function y(e){for(var t=arguments.length-1,n="https://reactjs.org/docs/error-decoder.html?invariant="+e,r=0;r<t;r++)n+="&args[]="+encodeURIComponent(arguments[r+1]);!function(e,t,n,r,o,i,a,l){if(!e){if(e=void 0,void 0===t)e=Error("Minified exception occurred; use the non-minified dev environment for the full error message and additional helpful warnings.");else{var u=[n,r,o,i,a,l],c=0;(e=Error(t.replace(/%s/g,function(){return u[c++]}))).name="Invariant Violation"}throw e.framesToPop=1,e}}(!1,"Minified React error #"+e+"; visit %s for the full message or use the non-minified dev environment for full errors and additional helpful warnings. ",n)}var b={isMounted:function(){return!1},enqueueForceUpdate:function(){},enqueueReplaceState:function(){},enqueueSetState:function(){}},w={};function _(e,t,n){this.props=e,this.context=t,this.refs=w,this.updater=n||b}function k(){}function x(e,t,n){this.props=e,this.context=t,this.refs=w,this.updater=n||b}_.prototype.isReactComponent={},_.prototype.setState=function(e,t){"object"!=typeof e&&"function"!=typeof e&&null!=e&&y("85"),this.updater.enqueueSetState(this,e,t,"setState")},_.prototype.forceUpdate=function(e){this.updater.enqueueForceUpdate(this,e,"forceUpdate")},k.prototype=_.prototype;var T=x.prototype=new k;T.constructor=x,r(T,_.prototype),T.isPureReactComponent=!0;var S={current:null,currentDispatcher:null},E=Object.prototype.hasOwnProperty,C={key:!0,ref:!0,__self:!0,__source:!0};function P(e,t,n){var r=void 0,o={},a=null,l=null;if(null!=t)for(r in void 0!==t.ref&&(l=t.ref),void 0!==t.key&&(a=""+t.key),t)E.call(t,r)&&!C.hasOwnProperty(r)&&(o[r]=t[r]);var u=arguments.length-2;if(1===u)o.children=n;else if(1<u){for(var c=Array(u),s=0;s<u;s++)c[s]=arguments[s+2];o.children=c}if(e&&e.defaultProps)for(r in u=e.defaultProps)void 0===o[r]&&(o[r]=u[r]);return{$$typeof:i,type:e,key:a,ref:l,props:o,_owner:S.current}}function O(e){return"object"==typeof e&&null!==e&&e.$$typeof===i}var R=/\/+/g,A=[];function j(e,t,n,r){if(A.length){var o=A.pop();return o.result=e,o.keyPrefix=t,o.func=n,o.context=r,o.count=0,o}return{result:e,keyPrefix:t,func:n,context:r,count:0}}function N(e){e.result=null,e.keyPrefix=null,e.func=null,e.context=null,e.count=0,10>A.length&&A.push(e)}function L(e,t,n){return null==e?0:function e(t,n,r,o){var l=typeof t;"undefined"!==l&&"boolean"!==l||(t=null);var u=!1;if(null===t)u=!0;else switch(l){case"string":case"number":u=!0;break;case"object":switch(t.$$typeof){case i:case a:u=!0}}if(u)return r(o,t,""===n?"."+M(t,0):n),1;if(u=0,n=""===n?".":n+":",Array.isArray(t))for(var c=0;c<t.length;c++){var s=n+M(l=t[c],c);u+=e(l,s,r,o)}else if(s=null===t||"object"!=typeof t?null:"function"==typeof(s=g&&t[g]||t["@@iterator"])?s:null,"function"==typeof s)for(t=s.call(t),c=0;!(l=t.next()).done;)u+=e(l=l.value,s=n+M(l,c++),r,o);else"object"===l&&y("31","[object Object]"==(r=""+t)?"object with keys {"+Object.keys(t).join(", ")+"}":r,"");return u}(e,"",t,n)}function M(e,t){return"object"==typeof e&&null!==e&&null!=e.key?function(e){var t={"=":"=0",":":"=2"};return"$"+(""+e).replace(/[=:]/g,function(e){return t[e]})}(e.key):t.toString(36)}function F(e,t){e.func.call(e.context,t,e.count++)}function I(e,t,n){var r=e.result,o=e.keyPrefix;e=e.func.call(e.context,t,e.count++),Array.isArray(e)?U(e,r,n,function(e){return e}):null!=e&&(O(e)&&(e=function(e,t){return{$$typeof:i,type:e.type,key:t,ref:e.ref,props:e.props,_owner:e._owner}}(e,o+(!e.key||t&&t.key===e.key?"":(""+e.key).replace(R,"$&/")+"/")+n)),r.push(e))}function U(e,t,n,r,o){var i="";null!=n&&(i=(""+n).replace(R,"$&/")+"/"),L(e,I,t=j(t,i,r,o)),N(t)}var D={Children:{map:function(e,t,n){if(null==e)return e;var r=[];return U(e,r,null,t,n),r},forEach:function(e,t,n){if(null==e)return e;L(e,F,t=j(null,null,t,n)),N(t)},count:function(e){return L(e,function(){return null},null)},toArray:function(e){var t=[];return U(e,t,null,function(e){return e}),t},only:function(e){return O(e)||y("143"),e}},createRef:function(){return{current:null}},Component:_,PureComponent:x,createContext:function(e,t){return void 0===t&&(t=null),(e={$$typeof:f,_calculateChangedBits:t,_currentValue:e,_currentValue2:e,Provider:null,Consumer:null}).Provider={$$typeof:s,_context:e},e.Consumer=e},forwardRef:function(e){return{$$typeof:p,render:e}},lazy:function(e){return{$$typeof:v,_ctor:e,_status:-1,_result:null}},memo:function(e,t){return{$$typeof:m,type:e,compare:void 0===t?null:t}},Fragment:l,StrictMode:u,unstable_ConcurrentMode:d,Suspense:h,unstable_Profiler:c,createElement:P,cloneElement:function(e,t,n){null==e&&y("267",e);var o=void 0,a=r({},e.props),l=e.key,u=e.ref,c=e._owner;if(null!=t){void 0!==t.ref&&(u=t.ref,c=S.current),void 0!==t.key&&(l=""+t.key);var s=void 0;for(o in e.type&&e.type.defaultProps&&(s=e.type.defaultProps),t)E.call(t,o)&&!C.hasOwnProperty(o)&&(a[o]=void 0===t[o]&&void 0!==s?s[o]:t[o])}if(1===(o=arguments.length-2))a.children=n;else if(1<o){s=Array(o);for(var f=0;f<o;f++)s[f]=arguments[f+2];a.children=s}return{$$typeof:i,type:e.type,key:l,ref:u,props:a,_owner:c}},createFactory:function(e){var t=P.bind(null,e);return t.type=e,t},isValidElement:O,version:"16.6.0",__SECRET_INTERNALS_DO_NOT_USE_OR_YOU_WILL_BE_FIRED:{ReactCurrentOwner:S,assign:r}},W={default:D},z=W&&D||W;e.exports=z.default||z},function(e,t,n){"use strict";var r=n(105);function o(){}e.exports=function(){function e(e,t,n,o,i,a){if(a!==r){var l=new Error("Calling PropTypes validators directly is not supported by the `prop-types` package. Use PropTypes.checkPropTypes() to call them. Read more at http://fb.me/use-check-prop-types");throw l.name="Invariant Violation",l}}function t(){return e}e.isRequired=e;var n={array:e,bool:e,func:e,number:e,object:e,string:e,symbol:e,any:e,arrayOf:t,element:e,instanceOf:t,node:e,objectOf:t,oneOf:t,oneOfType:t,shape:t,exact:t};return n.checkPropTypes=o,n.PropTypes=n,n}},function(e,t,n){"use strict";e.exports="SECRET_DO_NOT_PASS_THIS_OR_YOU_WILL_BE_FIRED"},function(e,t,n){"use strict";t.__esModule=!0,t.parsePath=function(e){var t=e||"/",n="",r="",o=t.indexOf("#");-1!==o&&(r=t.substr(o),t=t.substr(0,o));var i=t.indexOf("?");-1!==i&&(n=t.substr(i),t=t.substr(0,i));return{pathname:t,search:"?"===n?"":n,hash:"#"===r?"":r}}},function(e,t,n){"use strict";n.r(t);n(28);var r=n(0),o=n.n(r),i=n(1),a=n.n(i),l=n(30),u=n(2),c=function(e){var t=e.location,n=u.default.getResourcesForPathnameSync(t.pathname);return o.a.createElement(l.a,Object.assign({location:t,pageResources:n},n.json))};c.propTypes={location:a.a.shape({pathname:a.a.string.isRequired}).isRequired},t.default=c},function(e,t,n){"use strict";var r=n(14),o=n(55)(!0);r(r.P,"Array",{includes:function(e){return o(this,e,arguments.length>1?arguments[1]:void 0)}}),n(46)("includes")},function(e,t,n){"use strict";var r=n(14),o=n(110);r(r.P+r.F*n(111)("includes"),"String",{includes:function(e){return!!~o(this,e,"includes").indexOf(e,arguments.length>1?arguments[1]:void 0)}})},function(e,t,n){var r=n(63),o=n(19);e.exports=function(e,t,n){if(r(t))throw TypeError("String#"+n+" doesn't accept regex!");return String(o(e))}},function(e,t,n){var r=n(4)("match");e.exports=function(e){var t=/./;try{"/./"[e](t)}catch(n){try{return t[r]=!1,!"/./"[e](t)}catch(o){}}return!0}},function(e,t,n){"use strict";var r=n(14),o=n(113)(5),i=!0;"find"in[]&&Array(1).find(function(){i=!1}),r(r.P+r.F*i,"Array",{find:function(e){return o(this,e,arguments.length>1?arguments[1]:void 0)}}),n(46)("find")},function(e,t,n){var r=n(21),o=n(37),i=n(41),a=n(38),l=n(114);e.exports=function(e,t){var n=1==e,u=2==e,c=3==e,s=4==e,f=6==e,d=5==e||f,p=t||l;return function(t,l,h){for(var m,v,g=i(t),y=o(g),b=r(l,h,3),w=a(y.length),_=0,k=n?p(t,w):u?p(t,0):void 0;w>_;_++)if((d||_ in y)&&(v=b(m=y[_],_,g),e))if(n)k[_]=v;else if(v)switch(e){case 3:return!0;case 5:return m;case 6:return _;case 2:k.push(m)}else if(s)return!1;return f?-1:c||s?s:k}}},function(e,t,n){var r=n(115);e.exports=function(e,t){return new(r(e))(t)}},function(e,t,n){var r=n(13),o=n(116),i=n(4)("species");e.exports=function(e){var t;return o(e)&&("function"!=typeof(t=e.constructor)||t!==Array&&!o(t.prototype)||(t=void 0),r(t)&&null===(t=t[i])&&(t=void 0)),void 0===t?Array:t}},function(e,t,n){var r=n(16);e.exports=Array.isArray||function(e){return"Array"==r(e)}},function(e,t,n){for(var r=n(64),o=n(35),i=n(15),a=n(5),l=n(11),u=n(22),c=n(4),s=c("iterator"),f=c("toStringTag"),d=u.Array,p={CSSRuleList:!0,CSSStyleDeclaration:!1,CSSValueList:!1,ClientRectList:!1,DOMRectList:!1,DOMStringList:!1,DOMTokenList:!0,DataTransferItemList:!1,FileList:!1,HTMLAllCollection:!1,HTMLCollection:!1,HTMLFormElement:!1,HTMLSelectElement:!1,MediaList:!0,MimeTypeArray:!1,NamedNodeMap:!1,NodeList:!0,PaintRequestList:!1,Plugin:!1,PluginArray:!1,SVGLengthList:!1,SVGNumberList:!1,SVGPathSegList:!1,SVGPointList:!1,SVGStringList:!1,SVGTransformList:!1,SourceBufferList:!1,StyleSheetList:!0,TextTrackCueList:!1,TextTrackList:!1,TouchList:!1},h=o(p),m=0;m<h.length;m++){var v,g=h[m],y=p[g],b=a[g],w=b&&b.prototype;if(w&&(w[s]||l(w,s,d),w[f]||l(w,f,g),u[g]=d,y))for(v in r)w[v]||i(w,v,r[v],!0)}},function(e,t){e.exports=function(e,t){return{value:t,done:!!e}}},function(e,t,n){"use strict";var r=n(120),o=n(53),i=n(44),a={};n(11)(a,n(4)("iterator"),function(){return this}),e.exports=function(e,t,n){e.prototype=r(a,{next:o(1,n)}),i(e,t+" Iterator")}},function(e,t,n){var r=n(12),o=n(121),i=n(56),a=n(40)("IE_PROTO"),l=function(){},u=function(){var e,t=n(32)("iframe"),r=i.length;for(t.style.display="none",n(58).appendChild(t),t.src="javascript:",(e=t.contentWindow.document).open(),e.write("<script>document.F=Object<\/script>"),e.close(),u=e.F;r--;)delete u.prototype[i[r]];return u()};e.exports=Object.create||function(e,t){var n;return null!==e?(l.prototype=r(e),n=new l,l.prototype=null,n[a]=e):n=u(),void 0===t?n:o(n,t)}},function(e,t,n){var r=n(24),o=n(12),i=n(35);e.exports=n(18)?Object.defineProperties:function(e,t){o(e);for(var n,a=i(t),l=a.length,u=0;l>u;)r.f(e,n=a[u++],t[n]);return e}},function(e,t,n){var r=n(27),o=n(41),i=n(40)("IE_PROTO"),a=Object.prototype;e.exports=Object.getPrototypeOf||function(e){return e=o(e),r(e,i)?e[i]:"function"==typeof e.constructor&&e instanceof e.constructor?e.constructor.prototype:e instanceof Object?a:null}},function(e,t,n){"use strict";var r=n(124)(!0);n(65)(String,"String",function(e){this._t=String(e),this._i=0},function(){var e,t=this._t,n=this._i;return n>=t.length?{value:void 0,done:!0}:(e=r(t,n),this._i+=e.length,{value:e,done:!1})})},function(e,t,n){var r=n(39),o=n(19);e.exports=function(e){return function(t,n){var i,a,l=String(o(t)),u=r(n),c=l.length;return u<0||u>=c?e?"":void 0:(i=l.charCodeAt(u))<55296||i>56319||u+1===c||(a=l.charCodeAt(u+1))<56320||a>57343?e?l.charAt(u):i:e?l.slice(u,u+2):a-56320+(i-55296<<10)+65536}}},function(e,t,n){n(26)("split",2,function(e,t,r){"use strict";var o=n(63),i=r,a=[].push;if("c"=="abbc".split(/(b)*/)[1]||4!="test".split(/(?:)/,-1).length||2!="ab".split(/(?:ab)*/).length||4!=".".split(/(.?)(.?)/).length||".".split(/()()/).length>1||"".split(/.?/).length){var l=void 0===/()??/.exec("")[1];r=function(e,t){var n=String(this);if(void 0===e&&0===t)return[];if(!o(e))return i.call(n,e,t);var r,u,c,s,f,d=[],p=(e.ignoreCase?"i":"")+(e.multiline?"m":"")+(e.unicode?"u":"")+(e.sticky?"y":""),h=0,m=void 0===t?4294967295:t>>>0,v=new RegExp(e.source,p+"g");for(l||(r=new RegExp("^"+v.source+"$(?!\\s)",p));(u=v.exec(n))&&!((c=u.index+u[0].length)>h&&(d.push(n.slice(h,u.index)),!l&&u.length>1&&u[0].replace(r,function(){for(f=1;f<arguments.length-2;f++)void 0===arguments[f]&&(u[f]=void 0)}),u.length>1&&u.index<n.length&&a.apply(d,u.slice(1)),s=u[0].length,h=c,d.length>=m));)v.lastIndex===u.index&&v.lastIndex++;return h===n.length?!s&&v.test("")||d.push(""):d.push(n.slice(h)),d.length>m?d.slice(0,m):d}}else"0".split(void 0,0).length&&(r=function(e,t){return void 0===e&&0===t?[]:i.call(this,e,t)});return[function(n,o){var i=e(this),a=null==n?void 0:n[t];return void 0!==a?a.call(n,i,o):r.call(String(i),n,o)},r]})},function(e,t,n){"use strict";var r=n(6);t.__esModule=!0,t.default=function(e,t){var n=m(t);return e.addEventListener("click",n),function(){return e.removeEventListener("click",n)}},t.routeThroughBrowserOrApp=t.hashShouldBeFollowed=t.pathIsNotHandledByApp=t.urlsAreOnSameOrigin=t.authorIsForcingNavigation=t.anchorsTargetIsEquivalentToSelf=t.findClosestAnchor=t.navigationWasHandledElsewhere=t.slashedPathname=t.userIsForcingNavigation=void 0;var o=r(n(127)),i=n(51),a=function(e){return 0!==e.button||e.altKey||e.ctrlKey||e.metaKey||e.shiftKey};t.userIsForcingNavigation=a;var l=function(e){return"/"===e[0]?e:"/"+e};t.slashedPathname=l;var u=function(e){return e.defaultPrevented};t.navigationWasHandledElsewhere=u;var c=function(e){for(;e.parentNode;e=e.parentNode)if("a"===e.nodeName.toLowerCase())return e;return null};t.findClosestAnchor=c;var s=function(e){return!1===e.hasAttribute("target")||null==e.target||-1!==["_self",""].indexOf(e.target)||"_parent"===e.target&&(!e.ownerDocument.defaultView.parent||e.ownerDocument.defaultView.parent===e.ownerDocument.defaultView)||"_top"===e.target&&(!e.ownerDocument.defaultView.top||e.ownerDocument.defaultView.top===e.ownerDocument.defaultView)};t.anchorsTargetIsEquivalentToSelf=s;var f=function(e){return!0===e.hasAttribute("download")||!1===s(e)};t.authorIsForcingNavigation=f;var d=function(e,t){return e.protocol===t.protocol&&e.host===t.host};t.urlsAreOnSameOrigin=d;var p=function(e,t){return!1===t.test(l(e.pathname))||-1!==e.pathname.search(/^.*\.((?!htm)[a-z0-9]{1,5})$/i)};t.pathIsNotHandledByApp=p;var h=function(e,t){return""!==t.hash&&(""===t.pathname||t.pathname===e.pathname)};t.hashShouldBeFollowed=h;var m=function(e){return function(t){if(a(t))return!0;if(u(t))return!0;var n=c(t.target);if(null==n)return!0;if(f(n))return!0;var r=document.createElement("a");r.href=n.href;var s=document.createElement("a");if(s.href=window.location.href,!1===d(s,r))return!0;var m=new RegExp("^"+(0,o.default)((0,i.withPrefix)("/")));if(p(r,m))return!0;if(h(s,r))return!0;t.preventDefault();var v=l(r.pathname).replace(m,"/");return e(""+v+r.search+r.hash),!1}};t.routeThroughBrowserOrApp=m},function(e,t,n){"use strict";var r=/[|\\{}()[\]^$+*?.]/g;e.exports=function(e){if("string"!=typeof e)throw new TypeError("Expected a string");return e.replace(r,"\\$&")}},function(e,t,n){"use strict";var r=n(74);t.onClientEntry=function(){"undefined"!=typeof window&&void 0!==window.__EMOTION_CRITICAL_CSS_IDS__&&(0,r.hydrate)(window.__EMOTION_CRITICAL_CSS_IDS__)}},function(e,t,n){"use strict";t.registerServiceWorker=function(){return!0};var r=!0,o=[];t.onPostPrefetchPathname=function(e){var t=e.pathname;r&&"serviceWorker"in navigator&&o.push(t)},t.onServiceWorkerActive=function(e){var t=e.getResourceURLsForPathname,n=e.serviceWorker;r=!1;var i=document.querySelectorAll("\n head > script[src],\n head > link[as=script],\n head > link[rel=stylesheet],\n head > style[data-href]\n "),a=[].slice.call(i).map(function(e){return e.src||e.href||e.getAttribute("data-href")}),l=[];o.forEach(function(e){return t(e).forEach(function(e){return l.push(e)})}),n.active.postMessage({api:"gatsby-runtime-cache",resources:a.concat(l)})}},function(e,t,n){"use strict";var r=n(0),o=n(52),i=n(131);function a(e){for(var t=arguments.length-1,n="https://reactjs.org/docs/error-decoder.html?invariant="+e,r=0;r<t;r++)n+="&args[]="+encodeURIComponent(arguments[r+1]);!function(e,t,n,r,o,i,a,l){if(!e){if(e=void 0,void 0===t)e=Error("Minified exception occurred; use the non-minified dev environment for the full error message and additional helpful warnings.");else{var u=[n,r,o,i,a,l],c=0;(e=Error(t.replace(/%s/g,function(){return u[c++]}))).name="Invariant Violation"}throw e.framesToPop=1,e}}(!1,"Minified React error #"+e+"; visit %s for the full message or use the non-minified dev environment for full errors and additional helpful warnings. ",n)}r||a("227");var l=!1,u=null,c=!1,s=null,f={onError:function(e){l=!0,u=e}};function d(e,t,n,r,o,i,a,c,s){l=!1,u=null,function(e,t,n,r,o,i,a,l,u){var c=Array.prototype.slice.call(arguments,3);try{t.apply(n,c)}catch(s){this.onError(s)}}.apply(f,arguments)}var p=null,h={};function m(){if(p)for(var e in h){var t=h[e],n=p.indexOf(e);if(-1<n||a("96",e),!g[n])for(var r in t.extractEvents||a("97",e),g[n]=t,n=t.eventTypes){var o=void 0,i=n[r],l=t,u=r;y.hasOwnProperty(u)&&a("99",u),y[u]=i;var c=i.phasedRegistrationNames;if(c){for(o in c)c.hasOwnProperty(o)&&v(c[o],l,u);o=!0}else i.registrationName?(v(i.registrationName,l,u),o=!0):o=!1;o||a("98",r,e)}}}function v(e,t,n){b[e]&&a("100",e),b[e]=t,w[e]=t.eventTypes[n].dependencies}var g=[],y={},b={},w={},_=null,k=null,x=null;function T(e,t,n,r){t=e.type||"unknown-event",e.currentTarget=x(r),function(e,t,n,r,o,i,f,p,h){if(d.apply(this,arguments),l){if(l){var m=u;l=!1,u=null}else a("198"),m=void 0;c||(c=!0,s=m)}}(t,n,void 0,e),e.currentTarget=null}function S(e,t){return null==t&&a("30"),null==e?t:Array.isArray(e)?Array.isArray(t)?(e.push.apply(e,t),e):(e.push(t),e):Array.isArray(t)?[e].concat(t):[e,t]}function E(e,t,n){Array.isArray(e)?e.forEach(t,n):e&&t.call(n,e)}var C=null;function P(e,t){if(e){var n=e._dispatchListeners,r=e._dispatchInstances;if(Array.isArray(n))for(var o=0;o<n.length&&!e.isPropagationStopped();o++)T(e,t,n[o],r[o]);else n&&T(e,t,n,r);e._dispatchListeners=null,e._dispatchInstances=null,e.isPersistent()||e.constructor.release(e)}}function O(e){return P(e,!0)}function R(e){return P(e,!1)}var A={injectEventPluginOrder:function(e){p&&a("101"),p=Array.prototype.slice.call(e),m()},injectEventPluginsByName:function(e){var t,n=!1;for(t in e)if(e.hasOwnProperty(t)){var r=e[t];h.hasOwnProperty(t)&&h[t]===r||(h[t]&&a("102",t),h[t]=r,n=!0)}n&&m()}};function j(e,t){var n=e.stateNode;if(!n)return null;var r=_(n);if(!r)return null;n=r[t];e:switch(t){case"onClick":case"onClickCapture":case"onDoubleClick":case"onDoubleClickCapture":case"onMouseDown":case"onMouseDownCapture":case"onMouseMove":case"onMouseMoveCapture":case"onMouseUp":case"onMouseUpCapture":(r=!r.disabled)||(r=!("button"===(e=e.type)||"input"===e||"select"===e||"textarea"===e)),e=!r;break e;default:e=!1}return e?null:(n&&"function"!=typeof n&&a("231",t,typeof n),n)}function N(e,t){if(null!==e&&(C=S(C,e)),e=C,C=null,e&&(E(e,t?O:R),C&&a("95"),c))throw t=s,c=!1,s=null,t}var L=Math.random().toString(36).slice(2),M="__reactInternalInstance$"+L,F="__reactEventHandlers$"+L;function I(e){if(e[M])return e[M];for(;!e[M];){if(!e.parentNode)return null;e=e.parentNode}return 5===(e=e[M]).tag||6===e.tag?e:null}function U(e){return!(e=e[M])||5!==e.tag&&6!==e.tag?null:e}function D(e){if(5===e.tag||6===e.tag)return e.stateNode;a("33")}function W(e){return e[F]||null}function z(e){do{e=e.return}while(e&&5!==e.tag);return e||null}function B(e,t,n){(t=j(e,n.dispatchConfig.phasedRegistrationNames[t]))&&(n._dispatchListeners=S(n._dispatchListeners,t),n._dispatchInstances=S(n._dispatchInstances,e))}function H(e){if(e&&e.dispatchConfig.phasedRegistrationNames){for(var t=e._targetInst,n=[];t;)n.push(t),t=z(t);for(t=n.length;0<t--;)B(n[t],"captured",e);for(t=0;t<n.length;t++)B(n[t],"bubbled",e)}}function V(e,t,n){e&&n&&n.dispatchConfig.registrationName&&(t=j(e,n.dispatchConfig.registrationName))&&(n._dispatchListeners=S(n._dispatchListeners,t),n._dispatchInstances=S(n._dispatchInstances,e))}function $(e){e&&e.dispatchConfig.registrationName&&V(e._targetInst,null,e)}function q(e){E(e,H)}var K=!("undefined"==typeof window||!window.document||!window.document.createElement);function G(e,t){var n={};return n[e.toLowerCase()]=t.toLowerCase(),n["Webkit"+e]="webkit"+t,n["Moz"+e]="moz"+t,n}var Q={animationend:G("Animation","AnimationEnd"),animationiteration:G("Animation","AnimationIteration"),animationstart:G("Animation","AnimationStart"),transitionend:G("Transition","TransitionEnd")},Y={},X={};function J(e){if(Y[e])return Y[e];if(!Q[e])return e;var t,n=Q[e];for(t in n)if(n.hasOwnProperty(t)&&t in X)return Y[e]=n[t];return e}K&&(X=document.createElement("div").style,"AnimationEvent"in window||(delete Q.animationend.animation,delete Q.animationiteration.animation,delete Q.animationstart.animation),"TransitionEvent"in window||delete Q.transitionend.transition);var Z=J("animationend"),ee=J("animationiteration"),te=J("animationstart"),ne=J("transitionend"),re="abort canplay canplaythrough durationchange emptied encrypted ended error loadeddata loadedmetadata loadstart pause play playing progress ratechange seeked seeking stalled suspend timeupdate volumechange waiting".split(" "),oe=null,ie=null,ae=null;function le(){if(ae)return ae;var e,t,n=ie,r=n.length,o="value"in oe?oe.value:oe.textContent,i=o.length;for(e=0;e<r&&n[e]===o[e];e++);var a=r-e;for(t=1;t<=a&&n[r-t]===o[i-t];t++);return ae=o.slice(e,1<t?1-t:void 0)}function ue(){return!0}function ce(){return!1}function se(e,t,n,r){for(var o in this.dispatchConfig=e,this._targetInst=t,this.nativeEvent=n,e=this.constructor.Interface)e.hasOwnProperty(o)&&((t=e[o])?this[o]=t(n):"target"===o?this.target=r:this[o]=n[o]);return this.isDefaultPrevented=(null!=n.defaultPrevented?n.defaultPrevented:!1===n.returnValue)?ue:ce,this.isPropagationStopped=ce,this}function fe(e,t,n,r){if(this.eventPool.length){var o=this.eventPool.pop();return this.call(o,e,t,n,r),o}return new this(e,t,n,r)}function de(e){e instanceof this||a("279"),e.destructor(),10>this.eventPool.length&&this.eventPool.push(e)}function pe(e){e.eventPool=[],e.getPooled=fe,e.release=de}o(se.prototype,{preventDefault:function(){this.defaultPrevented=!0;var e=this.nativeEvent;e&&(e.preventDefault?e.preventDefault():"unknown"!=typeof e.returnValue&&(e.returnValue=!1),this.isDefaultPrevented=ue)},stopPropagation:function(){var e=this.nativeEvent;e&&(e.stopPropagation?e.stopPropagation():"unknown"!=typeof e.cancelBubble&&(e.cancelBubble=!0),this.isPropagationStopped=ue)},persist:function(){this.isPersistent=ue},isPersistent:ce,destructor:function(){var e,t=this.constructor.Interface;for(e in t)this[e]=null;this.nativeEvent=this._targetInst=this.dispatchConfig=null,this.isPropagationStopped=this.isDefaultPrevented=ce,this._dispatchInstances=this._dispatchListeners=null}}),se.Interface={type:null,target:null,currentTarget:function(){return null},eventPhase:null,bubbles:null,cancelable:null,timeStamp:function(e){return e.timeStamp||Date.now()},defaultPrevented:null,isTrusted:null},se.extend=function(e){function t(){}function n(){return r.apply(this,arguments)}var r=this;t.prototype=r.prototype;var i=new t;return o(i,n.prototype),n.prototype=i,n.prototype.constructor=n,n.Interface=o({},r.Interface,e),n.extend=r.extend,pe(n),n},pe(se);var he=se.extend({data:null}),me=se.extend({data:null}),ve=[9,13,27,32],ge=K&&"CompositionEvent"in window,ye=null;K&&"documentMode"in document&&(ye=document.documentMode);var be=K&&"TextEvent"in window&&!ye,we=K&&(!ge||ye&&8<ye&&11>=ye),_e=String.fromCharCode(32),ke={beforeInput:{phasedRegistrationNames:{bubbled:"onBeforeInput",captured:"onBeforeInputCapture"},dependencies:["compositionend","keypress","textInput","paste"]},compositionEnd:{phasedRegistrationNames:{bubbled:"onCompositionEnd",captured:"onCompositionEndCapture"},dependencies:"blur compositionend keydown keypress keyup mousedown".split(" ")},compositionStart:{phasedRegistrationNames:{bubbled:"onCompositionStart",captured:"onCompositionStartCapture"},dependencies:"blur compositionstart keydown keypress keyup mousedown".split(" ")},compositionUpdate:{phasedRegistrationNames:{bubbled:"onCompositionUpdate",captured:"onCompositionUpdateCapture"},dependencies:"blur compositionupdate keydown keypress keyup mousedown".split(" ")}},xe=!1;function Te(e,t){switch(e){case"keyup":return-1!==ve.indexOf(t.keyCode);case"keydown":return 229!==t.keyCode;case"keypress":case"mousedown":case"blur":return!0;default:return!1}}function Se(e){return"object"==typeof(e=e.detail)&&"data"in e?e.data:null}var Ee=!1;var Ce={eventTypes:ke,extractEvents:function(e,t,n,r){var o=void 0,i=void 0;if(ge)e:{switch(e){case"compositionstart":o=ke.compositionStart;break e;case"compositionend":o=ke.compositionEnd;break e;case"compositionupdate":o=ke.compositionUpdate;break e}o=void 0}else Ee?Te(e,n)&&(o=ke.compositionEnd):"keydown"===e&&229===n.keyCode&&(o=ke.compositionStart);return o?(we&&"ko"!==n.locale&&(Ee||o!==ke.compositionStart?o===ke.compositionEnd&&Ee&&(i=le()):(ie="value"in(oe=r)?oe.value:oe.textContent,Ee=!0)),o=he.getPooled(o,t,n,r),i?o.data=i:null!==(i=Se(n))&&(o.data=i),q(o),i=o):i=null,(e=be?function(e,t){switch(e){case"compositionend":return Se(t);case"keypress":return 32!==t.which?null:(xe=!0,_e);case"textInput":return(e=t.data)===_e&&xe?null:e;default:return null}}(e,n):function(e,t){if(Ee)return"compositionend"===e||!ge&&Te(e,t)?(e=le(),ae=ie=oe=null,Ee=!1,e):null;switch(e){case"paste":return null;case"keypress":if(!(t.ctrlKey||t.altKey||t.metaKey)||t.ctrlKey&&t.altKey){if(t.char&&1<t.char.length)return t.char;if(t.which)return String.fromCharCode(t.which)}return null;case"compositionend":return we&&"ko"!==t.locale?null:t.data;default:return null}}(e,n))?((t=me.getPooled(ke.beforeInput,t,n,r)).data=e,q(t)):t=null,null===i?t:null===t?i:[i,t]}},Pe=null,Oe=null,Re=null;function Ae(e){if(e=k(e)){"function"!=typeof Pe&&a("280");var t=_(e.stateNode);Pe(e.stateNode,e.type,t)}}function je(e){Oe?Re?Re.push(e):Re=[e]:Oe=e}function Ne(){if(Oe){var e=Oe,t=Re;if(Re=Oe=null,Ae(e),t)for(e=0;e<t.length;e++)Ae(t[e])}}function Le(e,t){return e(t)}function Me(e,t,n){return e(t,n)}function Fe(){}var Ie=!1;function Ue(e,t){if(Ie)return e(t);Ie=!0;try{return Le(e,t)}finally{Ie=!1,(null!==Oe||null!==Re)&&(Fe(),Ne())}}var De={color:!0,date:!0,datetime:!0,"datetime-local":!0,email:!0,month:!0,number:!0,password:!0,range:!0,search:!0,tel:!0,text:!0,time:!0,url:!0,week:!0};function We(e){var t=e&&e.nodeName&&e.nodeName.toLowerCase();return"input"===t?!!De[e.type]:"textarea"===t}function ze(e){return(e=e.target||e.srcElement||window).correspondingUseElement&&(e=e.correspondingUseElement),3===e.nodeType?e.parentNode:e}function Be(e){if(!K)return!1;var t=(e="on"+e)in document;return t||((t=document.createElement("div")).setAttribute(e,"return;"),t="function"==typeof t[e]),t}function He(e){var t=e.type;return(e=e.nodeName)&&"input"===e.toLowerCase()&&("checkbox"===t||"radio"===t)}function Ve(e){e._valueTracker||(e._valueTracker=function(e){var t=He(e)?"checked":"value",n=Object.getOwnPropertyDescriptor(e.constructor.prototype,t),r=""+e[t];if(!e.hasOwnProperty(t)&&void 0!==n&&"function"==typeof n.get&&"function"==typeof n.set){var o=n.get,i=n.set;return Object.defineProperty(e,t,{configurable:!0,get:function(){return o.call(this)},set:function(e){r=""+e,i.call(this,e)}}),Object.defineProperty(e,t,{enumerable:n.enumerable}),{getValue:function(){return r},setValue:function(e){r=""+e},stopTracking:function(){e._valueTracker=null,delete e[t]}}}}(e))}function $e(e){if(!e)return!1;var t=e._valueTracker;if(!t)return!0;var n=t.getValue(),r="";return e&&(r=He(e)?e.checked?"true":"false":e.value),(e=r)!==n&&(t.setValue(e),!0)}var qe=r.__SECRET_INTERNALS_DO_NOT_USE_OR_YOU_WILL_BE_FIRED,Ke=/^(.*)[\\\/]/,Ge="function"==typeof Symbol&&Symbol.for,Qe=Ge?Symbol.for("react.element"):60103,Ye=Ge?Symbol.for("react.portal"):60106,Xe=Ge?Symbol.for("react.fragment"):60107,Je=Ge?Symbol.for("react.strict_mode"):60108,Ze=Ge?Symbol.for("react.profiler"):60114,et=Ge?Symbol.for("react.provider"):60109,tt=Ge?Symbol.for("react.context"):60110,nt=Ge?Symbol.for("react.concurrent_mode"):60111,rt=Ge?Symbol.for("react.forward_ref"):60112,ot=Ge?Symbol.for("react.suspense"):60113,it=Ge?Symbol.for("react.memo"):60115,at=Ge?Symbol.for("react.lazy"):60116,lt="function"==typeof Symbol&&Symbol.iterator;function ut(e){return null===e||"object"!=typeof e?null:"function"==typeof(e=lt&&e[lt]||e["@@iterator"])?e:null}function ct(e){if(null==e)return null;if("function"==typeof e)return e.displayName||e.name||null;if("string"==typeof e)return e;switch(e){case nt:return"ConcurrentMode";case Xe:return"Fragment";case Ye:return"Portal";case Ze:return"Profiler";case Je:return"StrictMode";case ot:return"Suspense"}if("object"==typeof e)switch(e.$$typeof){case tt:return"Context.Consumer";case et:return"Context.Provider";case rt:var t=e.render;return t=t.displayName||t.name||"",e.displayName||(""!==t?"ForwardRef("+t+")":"ForwardRef");case it:return ct(e.type);case at:if(e=1===e._status?e._result:null)return ct(e)}return null}function st(e){var t="";do{e:switch(e.tag){case 2:case 16:case 0:case 1:case 5:case 8:var n=e._debugOwner,r=e._debugSource,o=ct(e.type),i=null;n&&(i=ct(n.type)),n=o,o="",r?o=" (at "+r.fileName.replace(Ke,"")+":"+r.lineNumber+")":i&&(o=" (created by "+i+")"),i="\n in "+(n||"Unknown")+o;break e;default:i=""}t+=i,e=e.return}while(e);return t}var ft=/^[:A-Z_a-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD][:A-Z_a-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD\-.0-9\u00B7\u0300-\u036F\u203F-\u2040]*$/,dt=Object.prototype.hasOwnProperty,pt={},ht={};function mt(e,t,n,r,o){this.acceptsBooleans=2===t||3===t||4===t,this.attributeName=r,this.attributeNamespace=o,this.mustUseProperty=n,this.propertyName=e,this.type=t}var vt={};"children dangerouslySetInnerHTML defaultValue defaultChecked innerHTML suppressContentEditableWarning suppressHydrationWarning style".split(" ").forEach(function(e){vt[e]=new mt(e,0,!1,e,null)}),[["acceptCharset","accept-charset"],["className","class"],["htmlFor","for"],["httpEquiv","http-equiv"]].forEach(function(e){var t=e[0];vt[t]=new mt(t,1,!1,e[1],null)}),["contentEditable","draggable","spellCheck","value"].forEach(function(e){vt[e]=new mt(e,2,!1,e.toLowerCase(),null)}),["autoReverse","externalResourcesRequired","focusable","preserveAlpha"].forEach(function(e){vt[e]=new mt(e,2,!1,e,null)}),"allowFullScreen async autoFocus autoPlay controls default defer disabled formNoValidate hidden loop noModule noValidate open playsInline readOnly required reversed scoped seamless itemScope".split(" ").forEach(function(e){vt[e]=new mt(e,3,!1,e.toLowerCase(),null)}),["checked","multiple","muted","selected"].forEach(function(e){vt[e]=new mt(e,3,!0,e,null)}),["capture","download"].forEach(function(e){vt[e]=new mt(e,4,!1,e,null)}),["cols","rows","size","span"].forEach(function(e){vt[e]=new mt(e,6,!1,e,null)}),["rowSpan","start"].forEach(function(e){vt[e]=new mt(e,5,!1,e.toLowerCase(),null)});var gt=/[\-:]([a-z])/g;function yt(e){return e[1].toUpperCase()}function bt(e,t,n,r){var o=vt.hasOwnProperty(t)?vt[t]:null;(null!==o?0===o.type:!r&&(2<t.length&&("o"===t[0]||"O"===t[0])&&("n"===t[1]||"N"===t[1])))||(function(e,t,n,r){if(null==t||function(e,t,n,r){if(null!==n&&0===n.type)return!1;switch(typeof t){case"function":case"symbol":return!0;case"boolean":return!r&&(null!==n?!n.acceptsBooleans:"data-"!==(e=e.toLowerCase().slice(0,5))&&"aria-"!==e);default:return!1}}(e,t,n,r))return!0;if(r)return!1;if(null!==n)switch(n.type){case 3:return!t;case 4:return!1===t;case 5:return isNaN(t);case 6:return isNaN(t)||1>t}return!1}(t,n,o,r)&&(n=null),r||null===o?function(e){return!!dt.call(ht,e)||!dt.call(pt,e)&&(ft.test(e)?ht[e]=!0:(pt[e]=!0,!1))}(t)&&(null===n?e.removeAttribute(t):e.setAttribute(t,""+n)):o.mustUseProperty?e[o.propertyName]=null===n?3!==o.type&&"":n:(t=o.attributeName,r=o.attributeNamespace,null===n?e.removeAttribute(t):(n=3===(o=o.type)||4===o&&!0===n?"":""+n,r?e.setAttributeNS(r,t,n):e.setAttribute(t,n))))}function wt(e){switch(typeof e){case"boolean":case"number":case"object":case"string":case"undefined":return e;default:return""}}function _t(e,t){var n=t.checked;return o({},t,{defaultChecked:void 0,defaultValue:void 0,value:void 0,checked:null!=n?n:e._wrapperState.initialChecked})}function kt(e,t){var n=null==t.defaultValue?"":t.defaultValue,r=null!=t.checked?t.checked:t.defaultChecked;n=wt(null!=t.value?t.value:n),e._wrapperState={initialChecked:r,initialValue:n,controlled:"checkbox"===t.type||"radio"===t.type?null!=t.checked:null!=t.value}}function xt(e,t){null!=(t=t.checked)&&bt(e,"checked",t,!1)}function Tt(e,t){xt(e,t);var n=wt(t.value),r=t.type;if(null!=n)"number"===r?(0===n&&""===e.value||e.value!=n)&&(e.value=""+n):e.value!==""+n&&(e.value=""+n);else if("submit"===r||"reset"===r)return void e.removeAttribute("value");t.hasOwnProperty("value")?Et(e,t.type,n):t.hasOwnProperty("defaultValue")&&Et(e,t.type,wt(t.defaultValue)),null==t.checked&&null!=t.defaultChecked&&(e.defaultChecked=!!t.defaultChecked)}function St(e,t,n){if(t.hasOwnProperty("value")||t.hasOwnProperty("defaultValue")){var r=t.type;if(!("submit"!==r&&"reset"!==r||void 0!==t.value&&null!==t.value))return;t=""+e._wrapperState.initialValue,n||t===e.value||(e.value=t),e.defaultValue=t}""!==(n=e.name)&&(e.name=""),e.defaultChecked=!e.defaultChecked,e.defaultChecked=!!e._wrapperState.initialChecked,""!==n&&(e.name=n)}function Et(e,t,n){"number"===t&&e.ownerDocument.activeElement===e||(null==n?e.defaultValue=""+e._wrapperState.initialValue:e.defaultValue!==""+n&&(e.defaultValue=""+n))}"accent-height alignment-baseline arabic-form baseline-shift cap-height clip-path clip-rule color-interpolation color-interpolation-filters color-profile color-rendering dominant-baseline enable-background fill-opacity fill-rule flood-color flood-opacity font-family font-size font-size-adjust font-stretch font-style font-variant font-weight glyph-name glyph-orientation-horizontal glyph-orientation-vertical horiz-adv-x horiz-origin-x image-rendering letter-spacing lighting-color marker-end marker-mid marker-start overline-position overline-thickness paint-order panose-1 pointer-events rendering-intent shape-rendering stop-color stop-opacity strikethrough-position strikethrough-thickness stroke-dasharray stroke-dashoffset stroke-linecap stroke-linejoin stroke-miterlimit stroke-opacity stroke-width text-anchor text-decoration text-rendering underline-position underline-thickness unicode-bidi unicode-range units-per-em v-alphabetic v-hanging v-ideographic v-mathematical vector-effect vert-adv-y vert-origin-x vert-origin-y word-spacing writing-mode xmlns:xlink x-height".split(" ").forEach(function(e){var t=e.replace(gt,yt);vt[t]=new mt(t,1,!1,e,null)}),"xlink:actuate xlink:arcrole xlink:href xlink:role xlink:show xlink:title xlink:type".split(" ").forEach(function(e){var t=e.replace(gt,yt);vt[t]=new mt(t,1,!1,e,"http://www.w3.org/1999/xlink")}),["xml:base","xml:lang","xml:space"].forEach(function(e){var t=e.replace(gt,yt);vt[t]=new mt(t,1,!1,e,"http://www.w3.org/XML/1998/namespace")}),vt.tabIndex=new mt("tabIndex",1,!1,"tabindex",null);var Ct={change:{phasedRegistrationNames:{bubbled:"onChange",captured:"onChangeCapture"},dependencies:"blur change click focus input keydown keyup selectionchange".split(" ")}};function Pt(e,t,n){return(e=se.getPooled(Ct.change,e,t,n)).type="change",je(n),q(e),e}var Ot=null,Rt=null;function At(e){N(e,!1)}function jt(e){if($e(D(e)))return e}function Nt(e,t){if("change"===e)return t}var Lt=!1;function Mt(){Ot&&(Ot.detachEvent("onpropertychange",Ft),Rt=Ot=null)}function Ft(e){"value"===e.propertyName&&jt(Rt)&&Ue(At,e=Pt(Rt,e,ze(e)))}function It(e,t,n){"focus"===e?(Mt(),Rt=n,(Ot=t).attachEvent("onpropertychange",Ft)):"blur"===e&&Mt()}function Ut(e){if("selectionchange"===e||"keyup"===e||"keydown"===e)return jt(Rt)}function Dt(e,t){if("click"===e)return jt(t)}function Wt(e,t){if("input"===e||"change"===e)return jt(t)}K&&(Lt=Be("input")&&(!document.documentMode||9<document.documentMode));var zt={eventTypes:Ct,_isInputEventSupported:Lt,extractEvents:function(e,t,n,r){var o=t?D(t):window,i=void 0,a=void 0,l=o.nodeName&&o.nodeName.toLowerCase();if("select"===l||"input"===l&&"file"===o.type?i=Nt:We(o)?Lt?i=Wt:(i=Ut,a=It):(l=o.nodeName)&&"input"===l.toLowerCase()&&("checkbox"===o.type||"radio"===o.type)&&(i=Dt),i&&(i=i(e,t)))return Pt(i,n,r);a&&a(e,o,t),"blur"===e&&(e=o._wrapperState)&&e.controlled&&"number"===o.type&&Et(o,"number",o.value)}},Bt=se.extend({view:null,detail:null}),Ht={Alt:"altKey",Control:"ctrlKey",Meta:"metaKey",Shift:"shiftKey"};function Vt(e){var t=this.nativeEvent;return t.getModifierState?t.getModifierState(e):!!(e=Ht[e])&&!!t[e]}function $t(){return Vt}var qt=0,Kt=0,Gt=!1,Qt=!1,Yt=Bt.extend({screenX:null,screenY:null,clientX:null,clientY:null,pageX:null,pageY:null,ctrlKey:null,shiftKey:null,altKey:null,metaKey:null,getModifierState:$t,button:null,buttons:null,relatedTarget:function(e){return e.relatedTarget||(e.fromElement===e.srcElement?e.toElement:e.fromElement)},movementX:function(e){if("movementX"in e)return e.movementX;var t=qt;return qt=e.screenX,Gt?"mousemove"===e.type?e.screenX-t:0:(Gt=!0,0)},movementY:function(e){if("movementY"in e)return e.movementY;var t=Kt;return Kt=e.screenY,Qt?"mousemove"===e.type?e.screenY-t:0:(Qt=!0,0)}}),Xt=Yt.extend({pointerId:null,width:null,height:null,pressure:null,tangentialPressure:null,tiltX:null,tiltY:null,twist:null,pointerType:null,isPrimary:null}),Jt={mouseEnter:{registrationName:"onMouseEnter",dependencies:["mouseout","mouseover"]},mouseLeave:{registrationName:"onMouseLeave",dependencies:["mouseout","mouseover"]},pointerEnter:{registrationName:"onPointerEnter",dependencies:["pointerout","pointerover"]},pointerLeave:{registrationName:"onPointerLeave",dependencies:["pointerout","pointerover"]}},Zt={eventTypes:Jt,extractEvents:function(e,t,n,r){var o="mouseover"===e||"pointerover"===e,i="mouseout"===e||"pointerout"===e;if(o&&(n.relatedTarget||n.fromElement)||!i&&!o)return null;if(o=r.window===r?r:(o=r.ownerDocument)?o.defaultView||o.parentWindow:window,i?(i=t,t=(t=n.relatedTarget||n.toElement)?I(t):null):i=null,i===t)return null;var a=void 0,l=void 0,u=void 0,c=void 0;"mouseout"===e||"mouseover"===e?(a=Yt,l=Jt.mouseLeave,u=Jt.mouseEnter,c="mouse"):"pointerout"!==e&&"pointerover"!==e||(a=Xt,l=Jt.pointerLeave,u=Jt.pointerEnter,c="pointer");var s=null==i?o:D(i);if(o=null==t?o:D(t),(e=a.getPooled(l,i,n,r)).type=c+"leave",e.target=s,e.relatedTarget=o,(n=a.getPooled(u,t,n,r)).type=c+"enter",n.target=o,n.relatedTarget=s,r=t,i&&r)e:{for(o=r,c=0,a=t=i;a;a=z(a))c++;for(a=0,u=o;u;u=z(u))a++;for(;0<c-a;)t=z(t),c--;for(;0<a-c;)o=z(o),a--;for(;c--;){if(t===o||t===o.alternate)break e;t=z(t),o=z(o)}t=null}else t=null;for(o=t,t=[];i&&i!==o&&(null===(c=i.alternate)||c!==o);)t.push(i),i=z(i);for(i=[];r&&r!==o&&(null===(c=r.alternate)||c!==o);)i.push(r),r=z(r);for(r=0;r<t.length;r++)V(t[r],"bubbled",e);for(r=i.length;0<r--;)V(i[r],"captured",n);return[e,n]}},en=Object.prototype.hasOwnProperty;function tn(e,t){return e===t?0!==e||0!==t||1/e==1/t:e!=e&&t!=t}function nn(e,t){if(tn(e,t))return!0;if("object"!=typeof e||null===e||"object"!=typeof t||null===t)return!1;var n=Object.keys(e),r=Object.keys(t);if(n.length!==r.length)return!1;for(r=0;r<n.length;r++)if(!en.call(t,n[r])||!tn(e[n[r]],t[n[r]]))return!1;return!0}function rn(e){var t=e;if(e.alternate)for(;t.return;)t=t.return;else{if(0!=(2&t.effectTag))return 1;for(;t.return;)if(0!=(2&(t=t.return).effectTag))return 1}return 3===t.tag?2:3}function on(e){2!==rn(e)&&a("188")}function an(e){if(!(e=function(e){var t=e.alternate;if(!t)return 3===(t=rn(e))&&a("188"),1===t?null:e;for(var n=e,r=t;;){var o=n.return,i=o?o.alternate:null;if(!o||!i)break;if(o.child===i.child){for(var l=o.child;l;){if(l===n)return on(o),e;if(l===r)return on(o),t;l=l.sibling}a("188")}if(n.return!==r.return)n=o,r=i;else{l=!1;for(var u=o.child;u;){if(u===n){l=!0,n=o,r=i;break}if(u===r){l=!0,r=o,n=i;break}u=u.sibling}if(!l){for(u=i.child;u;){if(u===n){l=!0,n=i,r=o;break}if(u===r){l=!0,r=i,n=o;break}u=u.sibling}l||a("189")}}n.alternate!==r&&a("190")}return 3!==n.tag&&a("188"),n.stateNode.current===n?e:t}(e)))return null;for(var t=e;;){if(5===t.tag||6===t.tag)return t;if(t.child)t.child.return=t,t=t.child;else{if(t===e)break;for(;!t.sibling;){if(!t.return||t.return===e)return null;t=t.return}t.sibling.return=t.return,t=t.sibling}}return null}var ln=se.extend({animationName:null,elapsedTime:null,pseudoElement:null}),un=se.extend({clipboardData:function(e){return"clipboardData"in e?e.clipboardData:window.clipboardData}}),cn=Bt.extend({relatedTarget:null});function sn(e){var t=e.keyCode;return"charCode"in e?0===(e=e.charCode)&&13===t&&(e=13):e=t,10===e&&(e=13),32<=e||13===e?e:0}var fn={Esc:"Escape",Spacebar:" ",Left:"ArrowLeft",Up:"ArrowUp",Right:"ArrowRight",Down:"ArrowDown",Del:"Delete",Win:"OS",Menu:"ContextMenu",Apps:"ContextMenu",Scroll:"ScrollLock",MozPrintableKey:"Unidentified"},dn={8:"Backspace",9:"Tab",12:"Clear",13:"Enter",16:"Shift",17:"Control",18:"Alt",19:"Pause",20:"CapsLock",27:"Escape",32:" ",33:"PageUp",34:"PageDown",35:"End",36:"Home",37:"ArrowLeft",38:"ArrowUp",39:"ArrowRight",40:"ArrowDown",45:"Insert",46:"Delete",112:"F1",113:"F2",114:"F3",115:"F4",116:"F5",117:"F6",118:"F7",119:"F8",120:"F9",121:"F10",122:"F11",123:"F12",144:"NumLock",145:"ScrollLock",224:"Meta"},pn=Bt.extend({key:function(e){if(e.key){var t=fn[e.key]||e.key;if("Unidentified"!==t)return t}return"keypress"===e.type?13===(e=sn(e))?"Enter":String.fromCharCode(e):"keydown"===e.type||"keyup"===e.type?dn[e.keyCode]||"Unidentified":""},location:null,ctrlKey:null,shiftKey:null,altKey:null,metaKey:null,repeat:null,locale:null,getModifierState:$t,charCode:function(e){return"keypress"===e.type?sn(e):0},keyCode:function(e){return"keydown"===e.type||"keyup"===e.type?e.keyCode:0},which:function(e){return"keypress"===e.type?sn(e):"keydown"===e.type||"keyup"===e.type?e.keyCode:0}}),hn=Yt.extend({dataTransfer:null}),mn=Bt.extend({touches:null,targetTouches:null,changedTouches:null,altKey:null,metaKey:null,ctrlKey:null,shiftKey:null,getModifierState:$t}),vn=se.extend({propertyName:null,elapsedTime:null,pseudoElement:null}),gn=Yt.extend({deltaX:function(e){return"deltaX"in e?e.deltaX:"wheelDeltaX"in e?-e.wheelDeltaX:0},deltaY:function(e){return"deltaY"in e?e.deltaY:"wheelDeltaY"in e?-e.wheelDeltaY:"wheelDelta"in e?-e.wheelDelta:0},deltaZ:null,deltaMode:null}),yn=[["abort","abort"],[Z,"animationEnd"],[ee,"animationIteration"],[te,"animationStart"],["canplay","canPlay"],["canplaythrough","canPlayThrough"],["drag","drag"],["dragenter","dragEnter"],["dragexit","dragExit"],["dragleave","dragLeave"],["dragover","dragOver"],["durationchange","durationChange"],["emptied","emptied"],["encrypted","encrypted"],["ended","ended"],["error","error"],["gotpointercapture","gotPointerCapture"],["load","load"],["loadeddata","loadedData"],["loadedmetadata","loadedMetadata"],["loadstart","loadStart"],["lostpointercapture","lostPointerCapture"],["mousemove","mouseMove"],["mouseout","mouseOut"],["mouseover","mouseOver"],["playing","playing"],["pointermove","pointerMove"],["pointerout","pointerOut"],["pointerover","pointerOver"],["progress","progress"],["scroll","scroll"],["seeking","seeking"],["stalled","stalled"],["suspend","suspend"],["timeupdate","timeUpdate"],["toggle","toggle"],["touchmove","touchMove"],[ne,"transitionEnd"],["waiting","waiting"],["wheel","wheel"]],bn={},wn={};function _n(e,t){var n=e[0],r="on"+((e=e[1])[0].toUpperCase()+e.slice(1));t={phasedRegistrationNames:{bubbled:r,captured:r+"Capture"},dependencies:[n],isInteractive:t},bn[e]=t,wn[n]=t}[["blur","blur"],["cancel","cancel"],["click","click"],["close","close"],["contextmenu","contextMenu"],["copy","copy"],["cut","cut"],["auxclick","auxClick"],["dblclick","doubleClick"],["dragend","dragEnd"],["dragstart","dragStart"],["drop","drop"],["focus","focus"],["input","input"],["invalid","invalid"],["keydown","keyDown"],["keypress","keyPress"],["keyup","keyUp"],["mousedown","mouseDown"],["mouseup","mouseUp"],["paste","paste"],["pause","pause"],["play","play"],["pointercancel","pointerCancel"],["pointerdown","pointerDown"],["pointerup","pointerUp"],["ratechange","rateChange"],["reset","reset"],["seeked","seeked"],["submit","submit"],["touchcancel","touchCancel"],["touchend","touchEnd"],["touchstart","touchStart"],["volumechange","volumeChange"]].forEach(function(e){_n(e,!0)}),yn.forEach(function(e){_n(e,!1)});var kn={eventTypes:bn,isInteractiveTopLevelEventType:function(e){return void 0!==(e=wn[e])&&!0===e.isInteractive},extractEvents:function(e,t,n,r){var o=wn[e];if(!o)return null;switch(e){case"keypress":if(0===sn(n))return null;case"keydown":case"keyup":e=pn;break;case"blur":case"focus":e=cn;break;case"click":if(2===n.button)return null;case"auxclick":case"dblclick":case"mousedown":case"mousemove":case"mouseup":case"mouseout":case"mouseover":case"contextmenu":e=Yt;break;case"drag":case"dragend":case"dragenter":case"dragexit":case"dragleave":case"dragover":case"dragstart":case"drop":e=hn;break;case"touchcancel":case"touchend":case"touchmove":case"touchstart":e=mn;break;case Z:case ee:case te:e=ln;break;case ne:e=vn;break;case"scroll":e=Bt;break;case"wheel":e=gn;break;case"copy":case"cut":case"paste":e=un;break;case"gotpointercapture":case"lostpointercapture":case"pointercancel":case"pointerdown":case"pointermove":case"pointerout":case"pointerover":case"pointerup":e=Xt;break;default:e=se}return q(t=e.getPooled(o,t,n,r)),t}},xn=kn.isInteractiveTopLevelEventType,Tn=[];function Sn(e){var t=e.targetInst,n=t;do{if(!n){e.ancestors.push(n);break}var r;for(r=n;r.return;)r=r.return;if(!(r=3!==r.tag?null:r.stateNode.containerInfo))break;e.ancestors.push(n),n=I(r)}while(n);for(n=0;n<e.ancestors.length;n++){t=e.ancestors[n];var o=ze(e.nativeEvent);r=e.topLevelType;for(var i=e.nativeEvent,a=null,l=0;l<g.length;l++){var u=g[l];u&&(u=u.extractEvents(r,t,i,o))&&(a=S(a,u))}N(a,!1)}}var En=!0;function Cn(e,t){if(!t)return null;var n=(xn(e)?On:Rn).bind(null,e);t.addEventListener(e,n,!1)}function Pn(e,t){if(!t)return null;var n=(xn(e)?On:Rn).bind(null,e);t.addEventListener(e,n,!0)}function On(e,t){Me(Rn,e,t)}function Rn(e,t){if(En){var n=ze(t);if(null===(n=I(n))||"number"!=typeof n.tag||2===rn(n)||(n=null),Tn.length){var r=Tn.pop();r.topLevelType=e,r.nativeEvent=t,r.targetInst=n,e=r}else e={topLevelType:e,nativeEvent:t,targetInst:n,ancestors:[]};try{Ue(Sn,e)}finally{e.topLevelType=null,e.nativeEvent=null,e.targetInst=null,e.ancestors.length=0,10>Tn.length&&Tn.push(e)}}}var An={},jn=0,Nn="_reactListenersID"+(""+Math.random()).slice(2);function Ln(e){return Object.prototype.hasOwnProperty.call(e,Nn)||(e[Nn]=jn++,An[e[Nn]]={}),An[e[Nn]]}function Mn(e){if(void 0===(e=e||("undefined"!=typeof document?document:void 0)))return null;try{return e.activeElement||e.body}catch(t){return e.body}}function Fn(e){for(;e&&e.firstChild;)e=e.firstChild;return e}function In(e,t){var n,r=Fn(e);for(e=0;r;){if(3===r.nodeType){if(n=e+r.textContent.length,e<=t&&n>=t)return{node:r,offset:t-e};e=n}e:{for(;r;){if(r.nextSibling){r=r.nextSibling;break e}r=r.parentNode}r=void 0}r=Fn(r)}}function Un(){for(var e=window,t=Mn();t instanceof e.HTMLIFrameElement;){try{e=t.contentDocument.defaultView}catch(n){break}t=Mn(e.document)}return t}function Dn(e){var t=e&&e.nodeName&&e.nodeName.toLowerCase();return t&&("input"===t&&("text"===e.type||"search"===e.type||"tel"===e.type||"url"===e.type||"password"===e.type)||"textarea"===t||"true"===e.contentEditable)}var Wn=K&&"documentMode"in document&&11>=document.documentMode,zn={select:{phasedRegistrationNames:{bubbled:"onSelect",captured:"onSelectCapture"},dependencies:"blur contextmenu dragend focus keydown keyup mousedown mouseup selectionchange".split(" ")}},Bn=null,Hn=null,Vn=null,$n=!1;function qn(e,t){var n=t.window===t?t.document:9===t.nodeType?t:t.ownerDocument;return $n||null==Bn||Bn!==Mn(n)?null:("selectionStart"in(n=Bn)&&Dn(n)?n={start:n.selectionStart,end:n.selectionEnd}:n={anchorNode:(n=(n.ownerDocument&&n.ownerDocument.defaultView||window).getSelection()).anchorNode,anchorOffset:n.anchorOffset,focusNode:n.focusNode,focusOffset:n.focusOffset},Vn&&nn(Vn,n)?null:(Vn=n,(e=se.getPooled(zn.select,Hn,e,t)).type="select",e.target=Bn,q(e),e))}var Kn={eventTypes:zn,extractEvents:function(e,t,n,r){var o,i=r.window===r?r.document:9===r.nodeType?r:r.ownerDocument;if(!(o=!i)){e:{i=Ln(i),o=w.onSelect;for(var a=0;a<o.length;a++){var l=o[a];if(!i.hasOwnProperty(l)||!i[l]){i=!1;break e}}i=!0}o=!i}if(o)return null;switch(i=t?D(t):window,e){case"focus":(We(i)||"true"===i.contentEditable)&&(Bn=i,Hn=t,Vn=null);break;case"blur":Vn=Hn=Bn=null;break;case"mousedown":$n=!0;break;case"contextmenu":case"mouseup":case"dragend":return $n=!1,qn(n,r);case"selectionchange":if(Wn)break;case"keydown":case"keyup":return qn(n,r)}return null}};function Gn(e,t){return e=o({children:void 0},t),(t=function(e){var t="";return r.Children.forEach(e,function(e){null!=e&&(t+=e)}),t}(t.children))&&(e.children=t),e}function Qn(e,t,n,r){if(e=e.options,t){t={};for(var o=0;o<n.length;o++)t["$"+n[o]]=!0;for(n=0;n<e.length;n++)o=t.hasOwnProperty("$"+e[n].value),e[n].selected!==o&&(e[n].selected=o),o&&r&&(e[n].defaultSelected=!0)}else{for(n=""+wt(n),t=null,o=0;o<e.length;o++){if(e[o].value===n)return e[o].selected=!0,void(r&&(e[o].defaultSelected=!0));null!==t||e[o].disabled||(t=e[o])}null!==t&&(t.selected=!0)}}function Yn(e,t){return null!=t.dangerouslySetInnerHTML&&a("91"),o({},t,{value:void 0,defaultValue:void 0,children:""+e._wrapperState.initialValue})}function Xn(e,t){var n=t.value;null==n&&(n=t.defaultValue,null!=(t=t.children)&&(null!=n&&a("92"),Array.isArray(t)&&(1>=t.length||a("93"),t=t[0]),n=t),null==n&&(n="")),e._wrapperState={initialValue:wt(n)}}function Jn(e,t){var n=wt(t.value),r=wt(t.defaultValue);null!=n&&((n=""+n)!==e.value&&(e.value=n),null==t.defaultValue&&e.defaultValue!==n&&(e.defaultValue=n)),null!=r&&(e.defaultValue=""+r)}function Zn(e){var t=e.textContent;t===e._wrapperState.initialValue&&(e.value=t)}A.injectEventPluginOrder("ResponderEventPlugin SimpleEventPlugin EnterLeaveEventPlugin ChangeEventPlugin SelectEventPlugin BeforeInputEventPlugin".split(" ")),_=W,k=U,x=D,A.injectEventPluginsByName({SimpleEventPlugin:kn,EnterLeaveEventPlugin:Zt,ChangeEventPlugin:zt,SelectEventPlugin:Kn,BeforeInputEventPlugin:Ce});var er={html:"http://www.w3.org/1999/xhtml",mathml:"http://www.w3.org/1998/Math/MathML",svg:"http://www.w3.org/2000/svg"};function tr(e){switch(e){case"svg":return"http://www.w3.org/2000/svg";case"math":return"http://www.w3.org/1998/Math/MathML";default:return"http://www.w3.org/1999/xhtml"}}function nr(e,t){return null==e||"http://www.w3.org/1999/xhtml"===e?tr(t):"http://www.w3.org/2000/svg"===e&&"foreignObject"===t?"http://www.w3.org/1999/xhtml":e}var rr,or=void 0,ir=(rr=function(e,t){if(e.namespaceURI!==er.svg||"innerHTML"in e)e.innerHTML=t;else{for((or=or||document.createElement("div")).innerHTML="<svg>"+t+"</svg>",t=or.firstChild;e.firstChild;)e.removeChild(e.firstChild);for(;t.firstChild;)e.appendChild(t.firstChild)}},"undefined"!=typeof MSApp&&MSApp.execUnsafeLocalFunction?function(e,t,n,r){MSApp.execUnsafeLocalFunction(function(){return rr(e,t)})}:rr);function ar(e,t){if(t){var n=e.firstChild;if(n&&n===e.lastChild&&3===n.nodeType)return void(n.nodeValue=t)}e.textContent=t}var lr={animationIterationCount:!0,borderImageOutset:!0,borderImageSlice:!0,borderImageWidth:!0,boxFlex:!0,boxFlexGroup:!0,boxOrdinalGroup:!0,columnCount:!0,columns:!0,flex:!0,flexGrow:!0,flexPositive:!0,flexShrink:!0,flexNegative:!0,flexOrder:!0,gridArea:!0,gridRow:!0,gridRowEnd:!0,gridRowSpan:!0,gridRowStart:!0,gridColumn:!0,gridColumnEnd:!0,gridColumnSpan:!0,gridColumnStart:!0,fontWeight:!0,lineClamp:!0,lineHeight:!0,opacity:!0,order:!0,orphans:!0,tabSize:!0,widows:!0,zIndex:!0,zoom:!0,fillOpacity:!0,floodOpacity:!0,stopOpacity:!0,strokeDasharray:!0,strokeDashoffset:!0,strokeMiterlimit:!0,strokeOpacity:!0,strokeWidth:!0},ur=["Webkit","ms","Moz","O"];function cr(e,t){for(var n in e=e.style,t)if(t.hasOwnProperty(n)){var r=0===n.indexOf("--"),o=n,i=t[n];o=null==i||"boolean"==typeof i||""===i?"":r||"number"!=typeof i||0===i||lr.hasOwnProperty(o)&&lr[o]?(""+i).trim():i+"px","float"===n&&(n="cssFloat"),r?e.setProperty(n,o):e[n]=o}}Object.keys(lr).forEach(function(e){ur.forEach(function(t){t=t+e.charAt(0).toUpperCase()+e.substring(1),lr[t]=lr[e]})});var sr=o({menuitem:!0},{area:!0,base:!0,br:!0,col:!0,embed:!0,hr:!0,img:!0,input:!0,keygen:!0,link:!0,meta:!0,param:!0,source:!0,track:!0,wbr:!0});function fr(e,t){t&&(sr[e]&&(null!=t.children||null!=t.dangerouslySetInnerHTML)&&a("137",e,""),null!=t.dangerouslySetInnerHTML&&(null!=t.children&&a("60"),"object"==typeof t.dangerouslySetInnerHTML&&"__html"in t.dangerouslySetInnerHTML||a("61")),null!=t.style&&"object"!=typeof t.style&&a("62",""))}function dr(e,t){if(-1===e.indexOf("-"))return"string"==typeof t.is;switch(e){case"annotation-xml":case"color-profile":case"font-face":case"font-face-src":case"font-face-uri":case"font-face-format":case"font-face-name":case"missing-glyph":return!1;default:return!0}}function pr(e,t){var n=Ln(e=9===e.nodeType||11===e.nodeType?e:e.ownerDocument);t=w[t];for(var r=0;r<t.length;r++){var o=t[r];if(!n.hasOwnProperty(o)||!n[o]){switch(o){case"scroll":Pn("scroll",e);break;case"focus":case"blur":Pn("focus",e),Pn("blur",e),n.blur=!0,n.focus=!0;break;case"cancel":case"close":Be(o)&&Pn(o,e);break;case"invalid":case"submit":case"reset":break;default:-1===re.indexOf(o)&&Cn(o,e)}n[o]=!0}}}function hr(){}var mr=null,vr=null;function gr(e,t){switch(e){case"button":case"input":case"select":case"textarea":return!!t.autoFocus}return!1}function yr(e,t){return"textarea"===e||"option"===e||"noscript"===e||"string"==typeof t.children||"number"==typeof t.children||"object"==typeof t.dangerouslySetInnerHTML&&null!==t.dangerouslySetInnerHTML&&null!=t.dangerouslySetInnerHTML.__html}var br=setTimeout,wr=clearTimeout;function _r(e){for(e=e.nextSibling;e&&1!==e.nodeType&&3!==e.nodeType;)e=e.nextSibling;return e}function kr(e){for(e=e.firstChild;e&&1!==e.nodeType&&3!==e.nodeType;)e=e.nextSibling;return e}new Set;var xr=[],Tr=-1;function Sr(e){0>Tr||(e.current=xr[Tr],xr[Tr]=null,Tr--)}function Er(e,t){xr[++Tr]=e.current,e.current=t}var Cr={},Pr={current:Cr},Or={current:!1},Rr=Cr;function Ar(e,t){var n=e.type.contextTypes;if(!n)return Cr;var r=e.stateNode;if(r&&r.__reactInternalMemoizedUnmaskedChildContext===t)return r.__reactInternalMemoizedMaskedChildContext;var o,i={};for(o in n)i[o]=t[o];return r&&((e=e.stateNode).__reactInternalMemoizedUnmaskedChildContext=t,e.__reactInternalMemoizedMaskedChildContext=i),i}function jr(e){return null!=(e=e.childContextTypes)}function Nr(e){Sr(Or),Sr(Pr)}function Lr(e){Sr(Or),Sr(Pr)}function Mr(e,t,n){Pr.current!==Cr&&a("168"),Er(Pr,t),Er(Or,n)}function Fr(e,t,n){var r=e.stateNode;if(e=t.childContextTypes,"function"!=typeof r.getChildContext)return n;for(var i in r=r.getChildContext())i in e||a("108",ct(t)||"Unknown",i);return o({},n,r)}function Ir(e){var t=e.stateNode;return t=t&&t.__reactInternalMemoizedMergedChildContext||Cr,Rr=Pr.current,Er(Pr,t),Er(Or,Or.current),!0}function Ur(e,t,n){var r=e.stateNode;r||a("169"),n?(t=Fr(e,t,Rr),r.__reactInternalMemoizedMergedChildContext=t,Sr(Or),Sr(Pr),Er(Pr,t)):Sr(Or),Er(Or,n)}var Dr=null,Wr=null;function zr(e){return function(t){try{return e(t)}catch(n){}}}function Br(e,t,n,r){this.tag=e,this.key=n,this.sibling=this.child=this.return=this.stateNode=this.type=this.elementType=null,this.index=0,this.ref=null,this.pendingProps=t,this.firstContextDependency=this.memoizedState=this.updateQueue=this.memoizedProps=null,this.mode=r,this.effectTag=0,this.lastEffect=this.firstEffect=this.nextEffect=null,this.childExpirationTime=this.expirationTime=0,this.alternate=null}function Hr(e,t,n,r){return new Br(e,t,n,r)}function Vr(e){return!(!(e=e.prototype)||!e.isReactComponent)}function $r(e,t){var n=e.alternate;return null===n?((n=Hr(e.tag,t,e.key,e.mode)).elementType=e.elementType,n.type=e.type,n.stateNode=e.stateNode,n.alternate=e,e.alternate=n):(n.pendingProps=t,n.effectTag=0,n.nextEffect=null,n.firstEffect=null,n.lastEffect=null),n.childExpirationTime=e.childExpirationTime,n.expirationTime=e.expirationTime,n.child=e.child,n.memoizedProps=e.memoizedProps,n.memoizedState=e.memoizedState,n.updateQueue=e.updateQueue,n.firstContextDependency=e.firstContextDependency,n.sibling=e.sibling,n.index=e.index,n.ref=e.ref,n}function qr(e,t,n,r,o,i){var l=2;if(r=e,"function"==typeof e)Vr(e)&&(l=1);else if("string"==typeof e)l=5;else e:switch(e){case Xe:return Kr(n.children,o,i,t);case nt:return Gr(n,3|o,i,t);case Je:return Gr(n,2|o,i,t);case Ze:return(e=Hr(12,n,t,4|o)).elementType=Ze,e.type=Ze,e.expirationTime=i,e;case ot:return(e=Hr(13,n,t,o)).elementType=ot,e.type=ot,e.expirationTime=i,e;default:if("object"==typeof e&&null!==e)switch(e.$$typeof){case et:l=10;break e;case tt:l=9;break e;case rt:l=11;break e;case it:l=14;break e;case at:l=16,r=null;break e}a("130",null==e?e:typeof e,"")}return(t=Hr(l,n,t,o)).elementType=e,t.type=r,t.expirationTime=i,t}function Kr(e,t,n,r){return(e=Hr(7,e,r,t)).expirationTime=n,e}function Gr(e,t,n,r){return e=Hr(8,e,r,t),t=0==(1&t)?Je:nt,e.elementType=t,e.type=t,e.expirationTime=n,e}function Qr(e,t,n){return(e=Hr(6,e,null,t)).expirationTime=n,e}function Yr(e,t,n){return(t=Hr(4,null!==e.children?e.children:[],e.key,t)).expirationTime=n,t.stateNode={containerInfo:e.containerInfo,pendingChildren:null,implementation:e.implementation},t}function Xr(e,t){e.didError=!1;var n=e.earliestPendingTime;0===n?e.earliestPendingTime=e.latestPendingTime=t:n>t?e.earliestPendingTime=t:e.latestPendingTime<t&&(e.latestPendingTime=t),eo(t,e)}function Jr(e,t){e.didError=!1;var n=e.latestPingedTime;0!==n&&n<=t&&(e.latestPingedTime=0),n=e.earliestPendingTime;var r=e.latestPendingTime;n===t?e.earliestPendingTime=r===t?e.latestPendingTime=0:r:r===t&&(e.latestPendingTime=n),n=e.earliestSuspendedTime,r=e.latestSuspendedTime,0===n?e.earliestSuspendedTime=e.latestSuspendedTime=t:n>t?e.earliestSuspendedTime=t:r<t&&(e.latestSuspendedTime=t),eo(t,e)}function Zr(e,t){var n=e.earliestPendingTime;return e=e.earliestSuspendedTime,(0===t||0!==n&&n<t)&&(t=n),(0===t||0!==e&&e<t)&&(t=e),t}function eo(e,t){var n=t.earliestSuspendedTime,r=t.latestSuspendedTime,o=t.earliestPendingTime,i=t.latestPingedTime;0===(o=0!==o?o:i)&&(0===e||r>e)&&(o=r),0!==(e=o)&&0!==n&&n<e&&(e=n),t.nextExpirationTimeToWorkOn=o,t.expirationTime=e}var to=!1;function no(e){return{baseState:e,firstUpdate:null,lastUpdate:null,firstCapturedUpdate:null,lastCapturedUpdate:null,firstEffect:null,lastEffect:null,firstCapturedEffect:null,lastCapturedEffect:null}}function ro(e){return{baseState:e.baseState,firstUpdate:e.firstUpdate,lastUpdate:e.lastUpdate,firstCapturedUpdate:null,lastCapturedUpdate:null,firstEffect:null,lastEffect:null,firstCapturedEffect:null,lastCapturedEffect:null}}function oo(e){return{expirationTime:e,tag:0,payload:null,callback:null,next:null,nextEffect:null}}function io(e,t){null===e.lastUpdate?e.firstUpdate=e.lastUpdate=t:(e.lastUpdate.next=t,e.lastUpdate=t)}function ao(e,t){var n=e.alternate;if(null===n){var r=e.updateQueue,o=null;null===r&&(r=e.updateQueue=no(e.memoizedState))}else r=e.updateQueue,o=n.updateQueue,null===r?null===o?(r=e.updateQueue=no(e.memoizedState),o=n.updateQueue=no(n.memoizedState)):r=e.updateQueue=ro(o):null===o&&(o=n.updateQueue=ro(r));null===o||r===o?io(r,t):null===r.lastUpdate||null===o.lastUpdate?(io(r,t),io(o,t)):(io(r,t),o.lastUpdate=t)}function lo(e,t){var n=e.updateQueue;null===(n=null===n?e.updateQueue=no(e.memoizedState):uo(e,n)).lastCapturedUpdate?n.firstCapturedUpdate=n.lastCapturedUpdate=t:(n.lastCapturedUpdate.next=t,n.lastCapturedUpdate=t)}function uo(e,t){var n=e.alternate;return null!==n&&t===n.updateQueue&&(t=e.updateQueue=ro(t)),t}function co(e,t,n,r,i,a){switch(n.tag){case 1:return"function"==typeof(e=n.payload)?e.call(a,r,i):e;case 3:e.effectTag=-1025&e.effectTag|64;case 0:if(null==(i="function"==typeof(e=n.payload)?e.call(a,r,i):e))break;return o({},r,i);case 2:to=!0}return r}function so(e,t,n,r,o){to=!1;for(var i=(t=uo(e,t)).baseState,a=null,l=0,u=t.firstUpdate,c=i;null!==u;){var s=u.expirationTime;s>o?(null===a&&(a=u,i=c),(0===l||l>s)&&(l=s)):(c=co(e,0,u,c,n,r),null!==u.callback&&(e.effectTag|=32,u.nextEffect=null,null===t.lastEffect?t.firstEffect=t.lastEffect=u:(t.lastEffect.nextEffect=u,t.lastEffect=u))),u=u.next}for(s=null,u=t.firstCapturedUpdate;null!==u;){var f=u.expirationTime;f>o?(null===s&&(s=u,null===a&&(i=c)),(0===l||l>f)&&(l=f)):(c=co(e,0,u,c,n,r),null!==u.callback&&(e.effectTag|=32,u.nextEffect=null,null===t.lastCapturedEffect?t.firstCapturedEffect=t.lastCapturedEffect=u:(t.lastCapturedEffect.nextEffect=u,t.lastCapturedEffect=u))),u=u.next}null===a&&(t.lastUpdate=null),null===s?t.lastCapturedUpdate=null:e.effectTag|=32,null===a&&null===s&&(i=c),t.baseState=i,t.firstUpdate=a,t.firstCapturedUpdate=s,e.expirationTime=l,e.memoizedState=c}function fo(e,t,n){null!==t.firstCapturedUpdate&&(null!==t.lastUpdate&&(t.lastUpdate.next=t.firstCapturedUpdate,t.lastUpdate=t.lastCapturedUpdate),t.firstCapturedUpdate=t.lastCapturedUpdate=null),po(t.firstEffect,n),t.firstEffect=t.lastEffect=null,po(t.firstCapturedEffect,n),t.firstCapturedEffect=t.lastCapturedEffect=null}function po(e,t){for(;null!==e;){var n=e.callback;if(null!==n){e.callback=null;var r=t;"function"!=typeof n&&a("191",n),n.call(r)}e=e.nextEffect}}function ho(e,t){return{value:e,source:t,stack:st(t)}}var mo={current:null},vo=null,go=null,yo=null;function bo(e,t){var n=e.type._context;Er(mo,n._currentValue),n._currentValue=t}function wo(e){var t=mo.current;Sr(mo),e.type._context._currentValue=t}function _o(e){vo=e,yo=go=null,e.firstContextDependency=null}function ko(e,t){return yo!==e&&!1!==t&&0!==t&&("number"==typeof t&&1073741823!==t||(yo=e,t=1073741823),t={context:e,observedBits:t,next:null},null===go?(null===vo&&a("293"),vo.firstContextDependency=go=t):go=go.next=t),e._currentValue}var xo={},To={current:xo},So={current:xo},Eo={current:xo};function Co(e){return e===xo&&a("174"),e}function Po(e,t){Er(Eo,t),Er(So,e),Er(To,xo);var n=t.nodeType;switch(n){case 9:case 11:t=(t=t.documentElement)?t.namespaceURI:nr(null,"");break;default:t=nr(t=(n=8===n?t.parentNode:t).namespaceURI||null,n=n.tagName)}Sr(To),Er(To,t)}function Oo(e){Sr(To),Sr(So),Sr(Eo)}function Ro(e){Co(Eo.current);var t=Co(To.current),n=nr(t,e.type);t!==n&&(Er(So,e),Er(To,n))}function Ao(e){So.current===e&&(Sr(To),Sr(So))}var jo=qe.ReactCurrentOwner,No=(new r.Component).refs;function Lo(e,t,n,r){n=null==(n=n(r,t=e.memoizedState))?t:o({},t,n),e.memoizedState=n,null!==(r=e.updateQueue)&&0===e.expirationTime&&(r.baseState=n)}var Mo={isMounted:function(e){return!!(e=e._reactInternalFiber)&&2===rn(e)},enqueueSetState:function(e,t,n){e=e._reactInternalFiber;var r=Sa(),o=oo(r=Gi(r,e));o.payload=t,null!=n&&(o.callback=n),ao(e,o),Xi(e,r)},enqueueReplaceState:function(e,t,n){e=e._reactInternalFiber;var r=Sa(),o=oo(r=Gi(r,e));o.tag=1,o.payload=t,null!=n&&(o.callback=n),ao(e,o),Xi(e,r)},enqueueForceUpdate:function(e,t){e=e._reactInternalFiber;var n=Sa(),r=oo(n=Gi(n,e));r.tag=2,null!=t&&(r.callback=t),ao(e,r),Xi(e,n)}};function Fo(e,t,n,r,o,i,a){return"function"==typeof(e=e.stateNode).shouldComponentUpdate?e.shouldComponentUpdate(r,i,a):!t.prototype||!t.prototype.isPureReactComponent||(!nn(n,r)||!nn(o,i))}function Io(e,t,n){var r=!1,o=Cr,i=t.contextType;return"object"==typeof i&&null!==i?i=jo.currentDispatcher.readContext(i):(o=jr(t)?Rr:Pr.current,i=(r=null!=(r=t.contextTypes))?Ar(e,o):Cr),t=new t(n,i),e.memoizedState=null!==t.state&&void 0!==t.state?t.state:null,t.updater=Mo,e.stateNode=t,t._reactInternalFiber=e,r&&((e=e.stateNode).__reactInternalMemoizedUnmaskedChildContext=o,e.__reactInternalMemoizedMaskedChildContext=i),t}function Uo(e,t,n,r){e=t.state,"function"==typeof t.componentWillReceiveProps&&t.componentWillReceiveProps(n,r),"function"==typeof t.UNSAFE_componentWillReceiveProps&&t.UNSAFE_componentWillReceiveProps(n,r),t.state!==e&&Mo.enqueueReplaceState(t,t.state,null)}function Do(e,t,n,r){var o=e.stateNode;o.props=n,o.state=e.memoizedState,o.refs=No;var i=t.contextType;"object"==typeof i&&null!==i?o.context=jo.currentDispatcher.readContext(i):(i=jr(t)?Rr:Pr.current,o.context=Ar(e,i)),null!==(i=e.updateQueue)&&(so(e,i,n,o,r),o.state=e.memoizedState),"function"==typeof(i=t.getDerivedStateFromProps)&&(Lo(e,t,i,n),o.state=e.memoizedState),"function"==typeof t.getDerivedStateFromProps||"function"==typeof o.getSnapshotBeforeUpdate||"function"!=typeof o.UNSAFE_componentWillMount&&"function"!=typeof o.componentWillMount||(t=o.state,"function"==typeof o.componentWillMount&&o.componentWillMount(),"function"==typeof o.UNSAFE_componentWillMount&&o.UNSAFE_componentWillMount(),t!==o.state&&Mo.enqueueReplaceState(o,o.state,null),null!==(i=e.updateQueue)&&(so(e,i,n,o,r),o.state=e.memoizedState)),"function"==typeof o.componentDidMount&&(e.effectTag|=4)}var Wo=Array.isArray;function zo(e,t,n){if(null!==(e=n.ref)&&"function"!=typeof e&&"object"!=typeof e){if(n._owner){n=n._owner;var r=void 0;n&&(1!==n.tag&&a("289"),r=n.stateNode),r||a("147",e);var o=""+e;return null!==t&&null!==t.ref&&"function"==typeof t.ref&&t.ref._stringRef===o?t.ref:((t=function(e){var t=r.refs;t===No&&(t=r.refs={}),null===e?delete t[o]:t[o]=e})._stringRef=o,t)}"string"!=typeof e&&a("284"),n._owner||a("290",e)}return e}function Bo(e,t){"textarea"!==e.type&&a("31","[object Object]"===Object.prototype.toString.call(t)?"object with keys {"+Object.keys(t).join(", ")+"}":t,"")}function Ho(e){function t(t,n){if(e){var r=t.lastEffect;null!==r?(r.nextEffect=n,t.lastEffect=n):t.firstEffect=t.lastEffect=n,n.nextEffect=null,n.effectTag=8}}function n(n,r){if(!e)return null;for(;null!==r;)t(n,r),r=r.sibling;return null}function r(e,t){for(e=new Map;null!==t;)null!==t.key?e.set(t.key,t):e.set(t.index,t),t=t.sibling;return e}function o(e,t,n){return(e=$r(e,t)).index=0,e.sibling=null,e}function i(t,n,r){return t.index=r,e?null!==(r=t.alternate)?(r=r.index)<n?(t.effectTag=2,n):r:(t.effectTag=2,n):n}function l(t){return e&&null===t.alternate&&(t.effectTag=2),t}function u(e,t,n,r){return null===t||6!==t.tag?((t=Qr(n,e.mode,r)).return=e,t):((t=o(t,n)).return=e,t)}function c(e,t,n,r){return null!==t&&t.elementType===n.type?((r=o(t,n.props)).ref=zo(e,t,n),r.return=e,r):((r=qr(n.type,n.key,n.props,null,e.mode,r)).ref=zo(e,t,n),r.return=e,r)}function s(e,t,n,r){return null===t||4!==t.tag||t.stateNode.containerInfo!==n.containerInfo||t.stateNode.implementation!==n.implementation?((t=Yr(n,e.mode,r)).return=e,t):((t=o(t,n.children||[])).return=e,t)}function f(e,t,n,r,i){return null===t||7!==t.tag?((t=Kr(n,e.mode,r,i)).return=e,t):((t=o(t,n)).return=e,t)}function d(e,t,n){if("string"==typeof t||"number"==typeof t)return(t=Qr(""+t,e.mode,n)).return=e,t;if("object"==typeof t&&null!==t){switch(t.$$typeof){case Qe:return(n=qr(t.type,t.key,t.props,null,e.mode,n)).ref=zo(e,null,t),n.return=e,n;case Ye:return(t=Yr(t,e.mode,n)).return=e,t}if(Wo(t)||ut(t))return(t=Kr(t,e.mode,n,null)).return=e,t;Bo(e,t)}return null}function p(e,t,n,r){var o=null!==t?t.key:null;if("string"==typeof n||"number"==typeof n)return null!==o?null:u(e,t,""+n,r);if("object"==typeof n&&null!==n){switch(n.$$typeof){case Qe:return n.key===o?n.type===Xe?f(e,t,n.props.children,r,o):c(e,t,n,r):null;case Ye:return n.key===o?s(e,t,n,r):null}if(Wo(n)||ut(n))return null!==o?null:f(e,t,n,r,null);Bo(e,n)}return null}function h(e,t,n,r,o){if("string"==typeof r||"number"==typeof r)return u(t,e=e.get(n)||null,""+r,o);if("object"==typeof r&&null!==r){switch(r.$$typeof){case Qe:return e=e.get(null===r.key?n:r.key)||null,r.type===Xe?f(t,e,r.props.children,o,r.key):c(t,e,r,o);case Ye:return s(t,e=e.get(null===r.key?n:r.key)||null,r,o)}if(Wo(r)||ut(r))return f(t,e=e.get(n)||null,r,o,null);Bo(t,r)}return null}function m(o,a,l,u){for(var c=null,s=null,f=a,m=a=0,v=null;null!==f&&m<l.length;m++){f.index>m?(v=f,f=null):v=f.sibling;var g=p(o,f,l[m],u);if(null===g){null===f&&(f=v);break}e&&f&&null===g.alternate&&t(o,f),a=i(g,a,m),null===s?c=g:s.sibling=g,s=g,f=v}if(m===l.length)return n(o,f),c;if(null===f){for(;m<l.length;m++)(f=d(o,l[m],u))&&(a=i(f,a,m),null===s?c=f:s.sibling=f,s=f);return c}for(f=r(o,f);m<l.length;m++)(v=h(f,o,m,l[m],u))&&(e&&null!==v.alternate&&f.delete(null===v.key?m:v.key),a=i(v,a,m),null===s?c=v:s.sibling=v,s=v);return e&&f.forEach(function(e){return t(o,e)}),c}function v(o,l,u,c){var s=ut(u);"function"!=typeof s&&a("150"),null==(u=s.call(u))&&a("151");for(var f=s=null,m=l,v=l=0,g=null,y=u.next();null!==m&&!y.done;v++,y=u.next()){m.index>v?(g=m,m=null):g=m.sibling;var b=p(o,m,y.value,c);if(null===b){m||(m=g);break}e&&m&&null===b.alternate&&t(o,m),l=i(b,l,v),null===f?s=b:f.sibling=b,f=b,m=g}if(y.done)return n(o,m),s;if(null===m){for(;!y.done;v++,y=u.next())null!==(y=d(o,y.value,c))&&(l=i(y,l,v),null===f?s=y:f.sibling=y,f=y);return s}for(m=r(o,m);!y.done;v++,y=u.next())null!==(y=h(m,o,v,y.value,c))&&(e&&null!==y.alternate&&m.delete(null===y.key?v:y.key),l=i(y,l,v),null===f?s=y:f.sibling=y,f=y);return e&&m.forEach(function(e){return t(o,e)}),s}return function(e,r,i,u){var c="object"==typeof i&&null!==i&&i.type===Xe&&null===i.key;c&&(i=i.props.children);var s="object"==typeof i&&null!==i;if(s)switch(i.$$typeof){case Qe:e:{for(s=i.key,c=r;null!==c;){if(c.key===s){if(7===c.tag?i.type===Xe:c.elementType===i.type){n(e,c.sibling),(r=o(c,i.type===Xe?i.props.children:i.props)).ref=zo(e,c,i),r.return=e,e=r;break e}n(e,c);break}t(e,c),c=c.sibling}i.type===Xe?((r=Kr(i.props.children,e.mode,u,i.key)).return=e,e=r):((u=qr(i.type,i.key,i.props,null,e.mode,u)).ref=zo(e,r,i),u.return=e,e=u)}return l(e);case Ye:e:{for(c=i.key;null!==r;){if(r.key===c){if(4===r.tag&&r.stateNode.containerInfo===i.containerInfo&&r.stateNode.implementation===i.implementation){n(e,r.sibling),(r=o(r,i.children||[])).return=e,e=r;break e}n(e,r);break}t(e,r),r=r.sibling}(r=Yr(i,e.mode,u)).return=e,e=r}return l(e)}if("string"==typeof i||"number"==typeof i)return i=""+i,null!==r&&6===r.tag?(n(e,r.sibling),(r=o(r,i)).return=e,e=r):(n(e,r),(r=Qr(i,e.mode,u)).return=e,e=r),l(e);if(Wo(i))return m(e,r,i,u);if(ut(i))return v(e,r,i,u);if(s&&Bo(e,i),void 0===i&&!c)switch(e.tag){case 1:case 0:a("152",(u=e.type).displayName||u.name||"Component")}return n(e,r)}}var Vo=Ho(!0),$o=Ho(!1),qo=null,Ko=null,Go=!1;function Qo(e,t){var n=Hr(5,null,null,0);n.elementType="DELETED",n.type="DELETED",n.stateNode=t,n.return=e,n.effectTag=8,null!==e.lastEffect?(e.lastEffect.nextEffect=n,e.lastEffect=n):e.firstEffect=e.lastEffect=n}function Yo(e,t){switch(e.tag){case 5:var n=e.type;return null!==(t=1!==t.nodeType||n.toLowerCase()!==t.nodeName.toLowerCase()?null:t)&&(e.stateNode=t,!0);case 6:return null!==(t=""===e.pendingProps||3!==t.nodeType?null:t)&&(e.stateNode=t,!0);default:return!1}}function Xo(e){if(Go){var t=Ko;if(t){var n=t;if(!Yo(e,t)){if(!(t=_r(n))||!Yo(e,t))return e.effectTag|=2,Go=!1,void(qo=e);Qo(qo,n)}qo=e,Ko=kr(t)}else e.effectTag|=2,Go=!1,qo=e}}function Jo(e){for(e=e.return;null!==e&&5!==e.tag&&3!==e.tag;)e=e.return;qo=e}function Zo(e){if(e!==qo)return!1;if(!Go)return Jo(e),Go=!0,!1;var t=e.type;if(5!==e.tag||"head"!==t&&"body"!==t&&!yr(t,e.memoizedProps))for(t=Ko;t;)Qo(e,t),t=_r(t);return Jo(e),Ko=qo?_r(e.stateNode):null,!0}function ei(){Ko=qo=null,Go=!1}var ti=qe.ReactCurrentOwner;function ni(e,t,n,r){t.child=null===e?$o(t,null,n,r):Vo(t,e.child,n,r)}function ri(e,t,n,r,o){n=n.render;var i=t.ref;return Or.current||t.memoizedProps!==r||i!==(null!==e?e.ref:null)?(ni(e,t,r=n(r,i),o),t.child):pi(e,t,o)}function oi(e,t,n,r,o,i){if(null===e){var a=n.type;return"function"!=typeof a||Vr(a)||void 0!==a.defaultProps||null!==n.compare?((e=qr(n.type,null,r,null,t.mode,i)).ref=t.ref,e.return=t,t.child=e):(t.tag=15,t.type=a,ii(e,t,a,r,o,i))}return a=e.child,(0===o||o>i)&&(o=a.memoizedProps,(n=null!==(n=n.compare)?n:nn)(o,r)&&e.ref===t.ref)?pi(e,t,i):((e=$r(a,r)).ref=t.ref,e.return=t,t.child=e)}function ii(e,t,n,r,o,i){return null!==e&&(0===o||o>i)&&nn(e.memoizedProps,r)&&e.ref===t.ref?pi(e,t,i):li(e,t,n,r,i)}function ai(e,t){var n=t.ref;(null===e&&null!==n||null!==e&&e.ref!==n)&&(t.effectTag|=128)}function li(e,t,n,r,o){var i=jr(n)?Rr:Pr.current;return i=Ar(t,i),_o(t),n=n(r,i),t.effectTag|=1,ni(e,t,n,o),t.child}function ui(e,t,n,r,o){if(jr(n)){var i=!0;Ir(t)}else i=!1;if(_o(t),null===t.stateNode)null!==e&&(e.alternate=null,t.alternate=null,t.effectTag|=2),Io(t,n,r),Do(t,n,r,o),r=!0;else if(null===e){var a=t.stateNode,l=t.memoizedProps;a.props=l;var u=a.context,c=n.contextType;"object"==typeof c&&null!==c?c=jo.currentDispatcher.readContext(c):c=Ar(t,c=jr(n)?Rr:Pr.current);var s=n.getDerivedStateFromProps,f="function"==typeof s||"function"==typeof a.getSnapshotBeforeUpdate;f||"function"!=typeof a.UNSAFE_componentWillReceiveProps&&"function"!=typeof a.componentWillReceiveProps||(l!==r||u!==c)&&Uo(t,a,r,c),to=!1;var d=t.memoizedState;u=a.state=d;var p=t.updateQueue;null!==p&&(so(t,p,r,a,o),u=t.memoizedState),l!==r||d!==u||Or.current||to?("function"==typeof s&&(Lo(t,n,s,r),u=t.memoizedState),(l=to||Fo(t,n,l,r,d,u,c))?(f||"function"!=typeof a.UNSAFE_componentWillMount&&"function"!=typeof a.componentWillMount||("function"==typeof a.componentWillMount&&a.componentWillMount(),"function"==typeof a.UNSAFE_componentWillMount&&a.UNSAFE_componentWillMount()),"function"==typeof a.componentDidMount&&(t.effectTag|=4)):("function"==typeof a.componentDidMount&&(t.effectTag|=4),t.memoizedProps=r,t.memoizedState=u),a.props=r,a.state=u,a.context=c,r=l):("function"==typeof a.componentDidMount&&(t.effectTag|=4),r=!1)}else a=t.stateNode,l=t.memoizedProps,a.props=l,u=a.context,"object"==typeof(c=n.contextType)&&null!==c?c=jo.currentDispatcher.readContext(c):c=Ar(t,c=jr(n)?Rr:Pr.current),(f="function"==typeof(s=n.getDerivedStateFromProps)||"function"==typeof a.getSnapshotBeforeUpdate)||"function"!=typeof a.UNSAFE_componentWillReceiveProps&&"function"!=typeof a.componentWillReceiveProps||(l!==r||u!==c)&&Uo(t,a,r,c),to=!1,u=t.memoizedState,d=a.state=u,null!==(p=t.updateQueue)&&(so(t,p,r,a,o),d=t.memoizedState),l!==r||u!==d||Or.current||to?("function"==typeof s&&(Lo(t,n,s,r),d=t.memoizedState),(s=to||Fo(t,n,l,r,u,d,c))?(f||"function"!=typeof a.UNSAFE_componentWillUpdate&&"function"!=typeof a.componentWillUpdate||("function"==typeof a.componentWillUpdate&&a.componentWillUpdate(r,d,c),"function"==typeof a.UNSAFE_componentWillUpdate&&a.UNSAFE_componentWillUpdate(r,d,c)),"function"==typeof a.componentDidUpdate&&(t.effectTag|=4),"function"==typeof a.getSnapshotBeforeUpdate&&(t.effectTag|=256)):("function"!=typeof a.componentDidUpdate||l===e.memoizedProps&&u===e.memoizedState||(t.effectTag|=4),"function"!=typeof a.getSnapshotBeforeUpdate||l===e.memoizedProps&&u===e.memoizedState||(t.effectTag|=256),t.memoizedProps=r,t.memoizedState=d),a.props=r,a.state=d,a.context=c,r=s):("function"!=typeof a.componentDidUpdate||l===e.memoizedProps&&u===e.memoizedState||(t.effectTag|=4),"function"!=typeof a.getSnapshotBeforeUpdate||l===e.memoizedProps&&u===e.memoizedState||(t.effectTag|=256),r=!1);return ci(e,t,n,r,i,o)}function ci(e,t,n,r,o,i){ai(e,t);var a=0!=(64&t.effectTag);if(!r&&!a)return o&&Ur(t,n,!1),pi(e,t,i);r=t.stateNode,ti.current=t;var l=a&&"function"!=typeof n.getDerivedStateFromError?null:r.render();return t.effectTag|=1,null!==e&&a?(t.child=Vo(t,e.child,null,i),t.child=Vo(t,null,l,i)):ni(e,t,l,i),t.memoizedState=r.state,o&&Ur(t,n,!0),t.child}function si(e){var t=e.stateNode;t.pendingContext?Mr(0,t.pendingContext,t.pendingContext!==t.context):t.context&&Mr(0,t.context,!1),Po(e,t.containerInfo)}function fi(e,t){if(e&&e.defaultProps)for(var n in t=o({},t),e=e.defaultProps)void 0===t[n]&&(t[n]=e[n]);return t}function di(e,t,n){var r=t.mode,o=t.pendingProps,i=t.memoizedState;null!==i&&(i.alreadyCaptured?null!==e&&i===e.memoizedState?i={alreadyCaptured:!0,didTimeout:!0,timedOutAt:i.timedOutAt}:(i.alreadyCaptured=!0,i.didTimeout=!0):i=null);var a=null!==i&&i.didTimeout;if(null===e)a?(a=o.fallback,o=Kr(null,r,0,null),r=Kr(a,r,n,null),o.sibling=r,(n=o).return=r.return=t):n=r=$o(t,null,o.children,n);else{var l=e.memoizedState;null!==l&&l.didTimeout?(e=(r=e.child).sibling,a?(n=o.fallback,(r=$r(r,r.pendingProps)).effectTag|=2,(o=r.sibling=$r(e,n,e.expirationTime)).effectTag|=2,n=r,r.childExpirationTime=0,r=o,n.return=r.return=t):(a=e.child,r=Vo(t,r.child,o.children,n),Vo(t,a,null,n),n=r)):(e=e.child,a?(a=o.fallback,(o=Kr(null,r,0,null)).effectTag|=2,o.child=e,e.return=o,(r=o.sibling=Kr(a,r,n,null)).effectTag|=2,n=o,o.childExpirationTime=0,n.return=r.return=t):r=n=Vo(t,e,o.children,n))}return t.memoizedState=i,t.child=n,r}function pi(e,t,n){null!==e&&(t.firstContextDependency=e.firstContextDependency);var r=t.childExpirationTime;if(0===r||r>n)return null;if(null!==e&&t.child!==e.child&&a("153"),null!==t.child){for(n=$r(e=t.child,e.pendingProps,e.expirationTime),t.child=n,n.return=t;null!==e.sibling;)e=e.sibling,(n=n.sibling=$r(e,e.pendingProps,e.expirationTime)).return=t;n.sibling=null}return t.child}function hi(e,t,n){var r=t.expirationTime;if(null!==e&&e.memoizedProps===t.pendingProps&&!Or.current&&(0===r||r>n)){switch(t.tag){case 3:si(t),ei();break;case 5:Ro(t);break;case 1:jr(t.type)&&Ir(t);break;case 4:Po(t,t.stateNode.containerInfo);break;case 10:bo(t,t.memoizedProps.value);break;case 13:if(null!==(r=t.memoizedState)&&r.didTimeout)return 0!==(r=t.child.childExpirationTime)&&r<=n?di(e,t,n):null!==(t=pi(e,t,n))?t.sibling:null}return pi(e,t,n)}switch(t.expirationTime=0,t.tag){case 2:r=t.elementType,null!==e&&(e.alternate=null,t.alternate=null,t.effectTag|=2),e=t.pendingProps;var o=Ar(t,Pr.current);if(_o(t),o=r(e,o),t.effectTag|=1,"object"==typeof o&&null!==o&&"function"==typeof o.render&&void 0===o.$$typeof){if(t.tag=1,jr(r)){var i=!0;Ir(t)}else i=!1;t.memoizedState=null!==o.state&&void 0!==o.state?o.state:null;var l=r.getDerivedStateFromProps;"function"==typeof l&&Lo(t,r,l,e),o.updater=Mo,t.stateNode=o,o._reactInternalFiber=t,Do(t,r,e,n),t=ci(null,t,r,!0,i,n)}else t.tag=0,ni(null,t,o,n),t=t.child;return t;case 16:switch(o=t.elementType,null!==e&&(e.alternate=null,t.alternate=null,t.effectTag|=2),i=t.pendingProps,e=function(e){var t=e._result;switch(e._status){case 1:return t;case 2:case 0:throw t;default:throw e._status=0,(t=(t=e._ctor)()).then(function(t){0===e._status&&(t=t.default,e._status=1,e._result=t)},function(t){0===e._status&&(e._status=2,e._result=t)}),e._result=t,t}}(o),t.type=e,o=t.tag=function(e){if("function"==typeof e)return Vr(e)?1:0;if(null!=e){if((e=e.$$typeof)===rt)return 11;if(e===it)return 14}return 2}(e),i=fi(e,i),l=void 0,o){case 0:l=li(null,t,e,i,n);break;case 1:l=ui(null,t,e,i,n);break;case 11:l=ri(null,t,e,i,n);break;case 14:l=oi(null,t,e,fi(e.type,i),r,n);break;default:a("283",e)}return l;case 0:return r=t.type,o=t.pendingProps,li(e,t,r,o=t.elementType===r?o:fi(r,o),n);case 1:return r=t.type,o=t.pendingProps,ui(e,t,r,o=t.elementType===r?o:fi(r,o),n);case 3:return si(t),null===(r=t.updateQueue)&&a("282"),o=null!==(o=t.memoizedState)?o.element:null,so(t,r,t.pendingProps,null,n),(r=t.memoizedState.element)===o?(ei(),t=pi(e,t,n)):(o=t.stateNode,(o=(null===e||null===e.child)&&o.hydrate)&&(Ko=kr(t.stateNode.containerInfo),qo=t,o=Go=!0),o?(t.effectTag|=2,t.child=$o(t,null,r,n)):(ni(e,t,r,n),ei()),t=t.child),t;case 5:return Ro(t),null===e&&Xo(t),r=t.type,o=t.pendingProps,i=null!==e?e.memoizedProps:null,l=o.children,yr(r,o)?l=null:null!==i&&yr(r,i)&&(t.effectTag|=16),ai(e,t),1073741823!==n&&1&t.mode&&o.hidden?(t.expirationTime=1073741823,t=null):(ni(e,t,l,n),t=t.child),t;case 6:return null===e&&Xo(t),null;case 13:return di(e,t,n);case 4:return Po(t,t.stateNode.containerInfo),r=t.pendingProps,null===e?t.child=Vo(t,null,r,n):ni(e,t,r,n),t.child;case 11:return r=t.type,o=t.pendingProps,ri(e,t,r,o=t.elementType===r?o:fi(r,o),n);case 7:return ni(e,t,t.pendingProps,n),t.child;case 8:case 12:return ni(e,t,t.pendingProps.children,n),t.child;case 10:e:{if(r=t.type._context,o=t.pendingProps,l=t.memoizedProps,bo(t,i=o.value),null!==l){var u=l.value;if(0===(i=u===i&&(0!==u||1/u==1/i)||u!=u&&i!=i?0:0|("function"==typeof r._calculateChangedBits?r._calculateChangedBits(u,i):1073741823))){if(l.children===o.children&&!Or.current){t=pi(e,t,n);break e}}else for(null!==(l=t.child)&&(l.return=t);null!==l;){if(null!==(u=l.firstContextDependency))do{if(u.context===r&&0!=(u.observedBits&i)){if(1===l.tag){var c=oo(n);c.tag=2,ao(l,c)}(0===l.expirationTime||l.expirationTime>n)&&(l.expirationTime=n),null!==(c=l.alternate)&&(0===c.expirationTime||c.expirationTime>n)&&(c.expirationTime=n);for(var s=l.return;null!==s;){if(c=s.alternate,0===s.childExpirationTime||s.childExpirationTime>n)s.childExpirationTime=n,null!==c&&(0===c.childExpirationTime||c.childExpirationTime>n)&&(c.childExpirationTime=n);else{if(null===c||!(0===c.childExpirationTime||c.childExpirationTime>n))break;c.childExpirationTime=n}s=s.return}}c=l.child,u=u.next}while(null!==u);else c=10===l.tag&&l.type===t.type?null:l.child;if(null!==c)c.return=l;else for(c=l;null!==c;){if(c===t){c=null;break}if(null!==(l=c.sibling)){l.return=c.return,c=l;break}c=c.return}l=c}}ni(e,t,o.children,n),t=t.child}return t;case 9:return o=t.type,r=(i=t.pendingProps).children,_o(t),r=r(o=ko(o,i.unstable_observedBits)),t.effectTag|=1,ni(e,t,r,n),t.child;case 14:return oi(e,t,o=t.type,i=fi(o.type,t.pendingProps),r,n);case 15:return ii(e,t,t.type,t.pendingProps,r,n);case 17:return r=t.type,o=t.pendingProps,o=t.elementType===r?o:fi(r,o),null!==e&&(e.alternate=null,t.alternate=null,t.effectTag|=2),t.tag=1,jr(r)?(e=!0,Ir(t)):e=!1,_o(t),Io(t,r,o),Do(t,r,o,n),ci(null,t,r,!0,e,n);default:a("156")}}function mi(e){e.effectTag|=4}var vi=void 0,gi=void 0,yi=void 0,bi=void 0;function wi(e,t){var n=t.source,r=t.stack;null===r&&null!==n&&(r=st(n)),null!==n&&ct(n.type),t=t.value,null!==e&&1===e.tag&&ct(e.type);try{console.error(t)}catch(o){setTimeout(function(){throw o})}}function _i(e){var t=e.ref;if(null!==t)if("function"==typeof t)try{t(null)}catch(n){Ki(e,n)}else t.current=null}function ki(e){switch("function"==typeof Wr&&Wr(e),e.tag){case 1:_i(e);var t=e.stateNode;if("function"==typeof t.componentWillUnmount)try{t.props=e.memoizedProps,t.state=e.memoizedState,t.componentWillUnmount()}catch(n){Ki(e,n)}break;case 5:_i(e);break;case 4:Si(e)}}function xi(e){return 5===e.tag||3===e.tag||4===e.tag}function Ti(e){e:{for(var t=e.return;null!==t;){if(xi(t)){var n=t;break e}t=t.return}a("160"),n=void 0}var r=t=void 0;switch(n.tag){case 5:t=n.stateNode,r=!1;break;case 3:case 4:t=n.stateNode.containerInfo,r=!0;break;default:a("161")}16&n.effectTag&&(ar(t,""),n.effectTag&=-17);e:t:for(n=e;;){for(;null===n.sibling;){if(null===n.return||xi(n.return)){n=null;break e}n=n.return}for(n.sibling.return=n.return,n=n.sibling;5!==n.tag&&6!==n.tag;){if(2&n.effectTag)continue t;if(null===n.child||4===n.tag)continue t;n.child.return=n,n=n.child}if(!(2&n.effectTag)){n=n.stateNode;break e}}for(var o=e;;){if(5===o.tag||6===o.tag)if(n)if(r){var i=t,l=o.stateNode,u=n;8===i.nodeType?i.parentNode.insertBefore(l,u):i.insertBefore(l,u)}else t.insertBefore(o.stateNode,n);else r?(l=t,u=o.stateNode,8===l.nodeType?(i=l.parentNode).insertBefore(u,l):(i=l).appendChild(u),null!=(l=l._reactRootContainer)||null!==i.onclick||(i.onclick=hr)):t.appendChild(o.stateNode);else if(4!==o.tag&&null!==o.child){o.child.return=o,o=o.child;continue}if(o===e)break;for(;null===o.sibling;){if(null===o.return||o.return===e)return;o=o.return}o.sibling.return=o.return,o=o.sibling}}function Si(e){for(var t=e,n=!1,r=void 0,o=void 0;;){if(!n){n=t.return;e:for(;;){switch(null===n&&a("160"),n.tag){case 5:r=n.stateNode,o=!1;break e;case 3:case 4:r=n.stateNode.containerInfo,o=!0;break e}n=n.return}n=!0}if(5===t.tag||6===t.tag){e:for(var i=t,l=i;;)if(ki(l),null!==l.child&&4!==l.tag)l.child.return=l,l=l.child;else{if(l===i)break;for(;null===l.sibling;){if(null===l.return||l.return===i)break e;l=l.return}l.sibling.return=l.return,l=l.sibling}o?(i=r,l=t.stateNode,8===i.nodeType?i.parentNode.removeChild(l):i.removeChild(l)):r.removeChild(t.stateNode)}else if(4===t.tag?(r=t.stateNode.containerInfo,o=!0):ki(t),null!==t.child){t.child.return=t,t=t.child;continue}if(t===e)break;for(;null===t.sibling;){if(null===t.return||t.return===e)return;4===(t=t.return).tag&&(n=!1)}t.sibling.return=t.return,t=t.sibling}}function Ei(e,t){switch(t.tag){case 1:break;case 5:var n=t.stateNode;if(null!=n){var r=t.memoizedProps,o=null!==e?e.memoizedProps:r;e=t.type;var i=t.updateQueue;if(t.updateQueue=null,null!==i){for(n[F]=r,"input"===e&&"radio"===r.type&&null!=r.name&&xt(n,r),dr(e,o),t=dr(e,r),o=0;o<i.length;o+=2){var l=i[o],u=i[o+1];"style"===l?cr(n,u):"dangerouslySetInnerHTML"===l?ir(n,u):"children"===l?ar(n,u):bt(n,l,u,t)}switch(e){case"input":Tt(n,r);break;case"textarea":Jn(n,r);break;case"select":e=n._wrapperState.wasMultiple,n._wrapperState.wasMultiple=!!r.multiple,null!=(i=r.value)?Qn(n,!!r.multiple,i,!1):e!==!!r.multiple&&(null!=r.defaultValue?Qn(n,!!r.multiple,r.defaultValue,!0):Qn(n,!!r.multiple,r.multiple?[]:"",!1))}}}break;case 6:null===t.stateNode&&a("162"),t.stateNode.nodeValue=t.memoizedProps;break;case 3:case 12:case 13:case 17:break;default:a("163")}}function Ci(e,t,n){(n=oo(n)).tag=3,n.payload={element:null};var r=t.value;return n.callback=function(){La(r),wi(e,t)},n}function Pi(e,t,n){(n=oo(n)).tag=3;var r=e.type.getDerivedStateFromError;if("function"==typeof r){var o=t.value;n.payload=function(){return r(o)}}var i=e.stateNode;return null!==i&&"function"==typeof i.componentDidCatch&&(n.callback=function(){"function"!=typeof r&&(null===Bi?Bi=new Set([this]):Bi.add(this));var n=t.value,o=t.stack;wi(e,t),this.componentDidCatch(n,{componentStack:null!==o?o:""})}),n}function Oi(e){switch(e.tag){case 1:jr(e.type)&&Nr();var t=e.effectTag;return 1024&t?(e.effectTag=-1025&t|64,e):null;case 3:return Oo(),Lr(),0!=(64&(t=e.effectTag))&&a("285"),e.effectTag=-1025&t|64,e;case 5:return Ao(e),null;case 13:if(1024&(t=e.effectTag)){e.effectTag=-1025&t|64,t=null!==(t=e.alternate)?t.memoizedState:null;var n=e.memoizedState;return null===n?n={alreadyCaptured:!0,didTimeout:!1,timedOutAt:0}:t===n?n={alreadyCaptured:!0,didTimeout:n.didTimeout,timedOutAt:n.timedOutAt}:n.alreadyCaptured=!0,e.memoizedState=n,e}return null;case 4:return Oo(),null;case 10:return wo(e),null;default:return null}}vi=function(e,t){for(var n=t.child;null!==n;){if(5===n.tag||6===n.tag)e.appendChild(n.stateNode);else if(4!==n.tag&&null!==n.child){n.child.return=n,n=n.child;continue}if(n===t)break;for(;null===n.sibling;){if(null===n.return||n.return===t)return;n=n.return}n.sibling.return=n.return,n=n.sibling}},gi=function(){},yi=function(e,t,n,r,i){var a=e.memoizedProps;if(a!==r){var l=t.stateNode;switch(Co(To.current),e=null,n){case"input":a=_t(l,a),r=_t(l,r),e=[];break;case"option":a=Gn(l,a),r=Gn(l,r),e=[];break;case"select":a=o({},a,{value:void 0}),r=o({},r,{value:void 0}),e=[];break;case"textarea":a=Yn(l,a),r=Yn(l,r),e=[];break;default:"function"!=typeof a.onClick&&"function"==typeof r.onClick&&(l.onclick=hr)}fr(n,r),l=n=void 0;var u=null;for(n in a)if(!r.hasOwnProperty(n)&&a.hasOwnProperty(n)&&null!=a[n])if("style"===n){var c=a[n];for(l in c)c.hasOwnProperty(l)&&(u||(u={}),u[l]="")}else"dangerouslySetInnerHTML"!==n&&"children"!==n&&"suppressContentEditableWarning"!==n&&"suppressHydrationWarning"!==n&&"autoFocus"!==n&&(b.hasOwnProperty(n)?e||(e=[]):(e=e||[]).push(n,null));for(n in r){var s=r[n];if(c=null!=a?a[n]:void 0,r.hasOwnProperty(n)&&s!==c&&(null!=s||null!=c))if("style"===n)if(c){for(l in c)!c.hasOwnProperty(l)||s&&s.hasOwnProperty(l)||(u||(u={}),u[l]="");for(l in s)s.hasOwnProperty(l)&&c[l]!==s[l]&&(u||(u={}),u[l]=s[l])}else u||(e||(e=[]),e.push(n,u)),u=s;else"dangerouslySetInnerHTML"===n?(s=s?s.__html:void 0,c=c?c.__html:void 0,null!=s&&c!==s&&(e=e||[]).push(n,""+s)):"children"===n?c===s||"string"!=typeof s&&"number"!=typeof s||(e=e||[]).push(n,""+s):"suppressContentEditableWarning"!==n&&"suppressHydrationWarning"!==n&&(b.hasOwnProperty(n)?(null!=s&&pr(i,n),e||c===s||(e=[])):(e=e||[]).push(n,s))}u&&(e=e||[]).push("style",u),i=e,(t.updateQueue=i)&&mi(t)}},bi=function(e,t,n,r){n!==r&&mi(t)};var Ri={readContext:ko},Ai=qe.ReactCurrentOwner,ji=0,Ni=0,Li=!1,Mi=null,Fi=null,Ii=0,Ui=-1,Di=!1,Wi=null,zi=!1,Bi=null;function Hi(){if(null!==Mi)for(var e=Mi.return;null!==e;){var t=e;switch(t.tag){case 1:var n=t.type.childContextTypes;null!=n&&Nr();break;case 3:Oo(),Lr();break;case 5:Ao(t);break;case 4:Oo();break;case 10:wo(t)}e=e.return}Fi=null,Ii=0,Ui=-1,Di=!1,Mi=null}function Vi(e){for(;;){var t=e.alternate,n=e.return,r=e.sibling;if(0==(512&e.effectTag)){var i=t,l=(t=e).pendingProps;switch(t.tag){case 2:case 16:break;case 15:case 0:break;case 1:jr(t.type)&&Nr();break;case 3:Oo(),Lr(),(l=t.stateNode).pendingContext&&(l.context=l.pendingContext,l.pendingContext=null),null!==i&&null!==i.child||(Zo(t),t.effectTag&=-3),gi(t);break;case 5:Ao(t);var u=Co(Eo.current),c=t.type;if(null!==i&&null!=t.stateNode)yi(i,t,c,l,u),i.ref!==t.ref&&(t.effectTag|=128);else if(l){var s=Co(To.current);if(Zo(t)){i=(l=t).stateNode;var f=l.type,d=l.memoizedProps,p=u;switch(i[M]=l,i[F]=d,c=void 0,u=f){case"iframe":case"object":Cn("load",i);break;case"video":case"audio":for(f=0;f<re.length;f++)Cn(re[f],i);break;case"source":Cn("error",i);break;case"img":case"image":case"link":Cn("error",i),Cn("load",i);break;case"form":Cn("reset",i),Cn("submit",i);break;case"details":Cn("toggle",i);break;case"input":kt(i,d),Cn("invalid",i),pr(p,"onChange");break;case"select":i._wrapperState={wasMultiple:!!d.multiple},Cn("invalid",i),pr(p,"onChange");break;case"textarea":Xn(i,d),Cn("invalid",i),pr(p,"onChange")}for(c in fr(u,d),f=null,d)d.hasOwnProperty(c)&&(s=d[c],"children"===c?"string"==typeof s?i.textContent!==s&&(f=["children",s]):"number"==typeof s&&i.textContent!==""+s&&(f=["children",""+s]):b.hasOwnProperty(c)&&null!=s&&pr(p,c));switch(u){case"input":Ve(i),St(i,d,!0);break;case"textarea":Ve(i),Zn(i);break;case"select":case"option":break;default:"function"==typeof d.onClick&&(i.onclick=hr)}c=f,l.updateQueue=c,(l=null!==c)&&mi(t)}else{d=t,i=c,p=l,f=9===u.nodeType?u:u.ownerDocument,s===er.html&&(s=tr(i)),s===er.html?"script"===i?((i=f.createElement("div")).innerHTML="<script><\/script>",f=i.removeChild(i.firstChild)):"string"==typeof p.is?f=f.createElement(i,{is:p.is}):(f=f.createElement(i),"select"===i&&p.multiple&&(f.multiple=!0)):f=f.createElementNS(s,i),(i=f)[M]=d,i[F]=l,vi(i,t,!1,!1),p=i;var h=u,m=dr(f=c,d=l);switch(f){case"iframe":case"object":Cn("load",p),u=d;break;case"video":case"audio":for(u=0;u<re.length;u++)Cn(re[u],p);u=d;break;case"source":Cn("error",p),u=d;break;case"img":case"image":case"link":Cn("error",p),Cn("load",p),u=d;break;case"form":Cn("reset",p),Cn("submit",p),u=d;break;case"details":Cn("toggle",p),u=d;break;case"input":kt(p,d),u=_t(p,d),Cn("invalid",p),pr(h,"onChange");break;case"option":u=Gn(p,d);break;case"select":p._wrapperState={wasMultiple:!!d.multiple},u=o({},d,{value:void 0}),Cn("invalid",p),pr(h,"onChange");break;case"textarea":Xn(p,d),u=Yn(p,d),Cn("invalid",p),pr(h,"onChange");break;default:u=d}fr(f,u),s=void 0;var v=f,g=p,y=u;for(s in y)if(y.hasOwnProperty(s)){var w=y[s];"style"===s?cr(g,w):"dangerouslySetInnerHTML"===s?null!=(w=w?w.__html:void 0)&&ir(g,w):"children"===s?"string"==typeof w?("textarea"!==v||""!==w)&&ar(g,w):"number"==typeof w&&ar(g,""+w):"suppressContentEditableWarning"!==s&&"suppressHydrationWarning"!==s&&"autoFocus"!==s&&(b.hasOwnProperty(s)?null!=w&&pr(h,s):null!=w&&bt(g,s,w,m))}switch(f){case"input":Ve(p),St(p,d,!1);break;case"textarea":Ve(p),Zn(p);break;case"option":null!=d.value&&p.setAttribute("value",""+wt(d.value));break;case"select":(u=p).multiple=!!d.multiple,null!=(p=d.value)?Qn(u,!!d.multiple,p,!1):null!=d.defaultValue&&Qn(u,!!d.multiple,d.defaultValue,!0);break;default:"function"==typeof u.onClick&&(p.onclick=hr)}(l=gr(c,l))&&mi(t),t.stateNode=i}null!==t.ref&&(t.effectTag|=128)}else null===t.stateNode&&a("166");break;case 6:i&&null!=t.stateNode?bi(i,t,i.memoizedProps,l):("string"!=typeof l&&(null===t.stateNode&&a("166")),i=Co(Eo.current),Co(To.current),Zo(t)?(c=(l=t).stateNode,i=l.memoizedProps,c[M]=l,(l=c.nodeValue!==i)&&mi(t)):(c=t,(l=(9===i.nodeType?i:i.ownerDocument).createTextNode(l))[M]=t,c.stateNode=l));break;case 11:break;case 13:l=t.memoizedState,c=null!==i?i.memoizedState:null,(null!==l&&l.didTimeout)!==(null!==c&&c.didTimeout)&&(t.effectTag|=4);break;case 7:case 8:case 12:break;case 4:Oo(),gi(t);break;case 10:wo(t);break;case 9:case 14:break;case 17:jr(t.type)&&Nr();break;default:a("156")}if(Mi=null,t=e,1073741823===Ii||1073741823!==t.childExpirationTime){for(l=0,c=t.child;null!==c;)i=c.expirationTime,u=c.childExpirationTime,(0===l||0!==i&&i<l)&&(l=i),(0===l||0!==u&&u<l)&&(l=u),c=c.sibling;t.childExpirationTime=l}null!==n&&0==(512&n.effectTag)&&(null===n.firstEffect&&(n.firstEffect=e.firstEffect),null!==e.lastEffect&&(null!==n.lastEffect&&(n.lastEffect.nextEffect=e.firstEffect),n.lastEffect=e.lastEffect),1<e.effectTag&&(null!==n.lastEffect?n.lastEffect.nextEffect=e:n.firstEffect=e,n.lastEffect=e))}else{if(null!==(e=Oi(e)))return e.effectTag&=511,e;null!==n&&(n.firstEffect=n.lastEffect=null,n.effectTag|=512)}if(null!==r)return r;if(null===n)break;e=n}return null}function $i(e){var t=hi(e.alternate,e,Ii);return e.memoizedProps=e.pendingProps,null===t&&(t=Vi(e)),Ai.current=null,t}function qi(e,t,n){Li&&a("243"),Li=!0,Ai.currentDispatcher=Ri;var r=e.nextExpirationTimeToWorkOn;r===Ii&&e===Fi&&null!==Mi||(Hi(),Ii=r,Mi=$r((Fi=e).current,null),e.pendingCommitExpirationTime=0);for(var o=!1;;){try{if(t)for(;null!==Mi&&!Na();)Mi=$i(Mi);else for(;null!==Mi;)Mi=$i(Mi)}catch(v){if(null===Mi)o=!0,La(v);else{null===Mi&&a("271");var i=Mi,l=i.return;if(null!==l){e:{var u=e,c=l,s=i,f=v;if(l=Ii,s.effectTag|=512,s.firstEffect=s.lastEffect=null,null!==f&&"object"==typeof f&&"function"==typeof f.then){var d=f;f=c;var p=-1,h=-1;do{if(13===f.tag){var m=f.alternate;if(null!==m&&(null!==(m=m.memoizedState)&&m.didTimeout)){h=10*(m.timedOutAt-2);break}"number"==typeof(m=f.pendingProps.maxDuration)&&(0>=m?p=0:(-1===p||m<p)&&(p=m))}f=f.return}while(null!==f);f=c;do{if((m=13===f.tag)&&(void 0===f.memoizedProps.fallback?m=!1:m=null===(m=f.memoizedState)||!m.didTimeout),m){if(c=Qi.bind(null,u,f,s,0==(1&f.mode)?1:l),d.then(c,c),0==(1&f.mode)){f.effectTag|=32,ni(s.alternate,s,null,l),s.effectTag&=-513,1===s.tag&&(s.effectTag&=-421,null===s.alternate&&(s.tag=17));break e}-1===p?u=1073741823:(-1===h&&(h=10*(Zr(u,l)-2)-5e3),u=h+p),0<=u&&Ui<u&&(Ui=u),f.effectTag|=1024,f.expirationTime=l;break e}f=f.return}while(null!==f);f=Error("An update was suspended, but no placeholder UI was provided.")}Di=!0,f=ho(f,s),u=c;do{switch(u.tag){case 3:s=f,u.effectTag|=1024,u.expirationTime=l,lo(u,l=Ci(u,s,l));break e;case 1:if(s=f,c=u.type,d=u.stateNode,0==(64&u.effectTag)&&("function"==typeof c.getDerivedStateFromError||null!==d&&"function"==typeof d.componentDidCatch&&(null===Bi||!Bi.has(d)))){u.effectTag|=1024,u.expirationTime=l,lo(u,l=Pi(u,s,l));break e}}u=u.return}while(null!==u)}Mi=Vi(i);continue}o=!0,La(v)}}break}if(Li=!1,yo=go=vo=Ai.currentDispatcher=null,o)Fi=null,e.finishedWork=null;else if(null!==Mi)e.finishedWork=null;else{if(null===(t=e.current.alternate)&&a("281"),Fi=null,Di){if(o=e.latestPendingTime,i=e.latestSuspendedTime,l=e.latestPingedTime,0!==o&&o>r||0!==i&&i>r||0!==l&&l>r)return Jr(e,r),void Ta(e,t,r,e.expirationTime,-1);if(!e.didError&&!n)return e.didError=!0,r=e.nextExpirationTimeToWorkOn=r,n=e.expirationTime=1,void Ta(e,t,r,n,-1)}n||-1===Ui?(e.pendingCommitExpirationTime=r,e.finishedWork=t):(Jr(e,r),(n=10*(Zr(e,r)-2))<Ui&&(Ui=n),n=10*(Sa()-2),n=Ui-n,Ta(e,t,r,e.expirationTime,0>n?0:n))}}function Ki(e,t){var n;e:{for(Li&&!zi&&a("263"),n=e.return;null!==n;){switch(n.tag){case 1:var r=n.stateNode;if("function"==typeof n.type.getDerivedStateFromError||"function"==typeof r.componentDidCatch&&(null===Bi||!Bi.has(r))){ao(n,e=Pi(n,e=ho(t,e),1)),Xi(n,1),n=void 0;break e}break;case 3:ao(n,e=Ci(n,e=ho(t,e),1)),Xi(n,1),n=void 0;break e}n=n.return}3===e.tag&&(ao(e,n=Ci(e,n=ho(t,e),1)),Xi(e,1)),n=void 0}return n}function Gi(e,t){return 0!==Ni?e=Ni:Li?e=zi?1:Ii:1&t.mode?(e=pa?2+10*(1+((e-2+15)/10|0)):2+25*(1+((e-2+500)/25|0)),null!==Fi&&e===Ii&&(e+=1)):e=1,pa&&e>aa&&(aa=e),e}function Qi(e,t,n,r){var o=e.earliestSuspendedTime,i=e.latestSuspendedTime;if(0!==o&&r>=o&&r<=i){i=o=r,e.didError=!1;var a=e.latestPingedTime;(0===a||a<i)&&(e.latestPingedTime=i),eo(i,e)}else Xr(e,o=Gi(o=Sa(),t));0!=(1&t.mode)&&e===Fi&&Ii===r&&(Fi=null),Yi(t,o),0==(1&t.mode)&&(Yi(n,o),1===n.tag&&null!==n.stateNode&&((t=oo(o)).tag=2,ao(n,t))),0!==(n=e.expirationTime)&&Ea(e,n)}function Yi(e,t){(0===e.expirationTime||e.expirationTime>t)&&(e.expirationTime=t);var n=e.alternate;null!==n&&(0===n.expirationTime||n.expirationTime>t)&&(n.expirationTime=t);var r=e.return,o=null;if(null===r&&3===e.tag)o=e.stateNode;else for(;null!==r;){if(n=r.alternate,(0===r.childExpirationTime||r.childExpirationTime>t)&&(r.childExpirationTime=t),null!==n&&(0===n.childExpirationTime||n.childExpirationTime>t)&&(n.childExpirationTime=t),null===r.return&&3===r.tag){o=r.stateNode;break}r=r.return}return null===o?null:o}function Xi(e,t){null!==(e=Yi(e,t))&&(!Li&&0!==Ii&&t<Ii&&Hi(),Xr(e,t),Li&&!zi&&Fi===e||Ea(e,e.expirationTime),ba>ya&&(ba=0,a("185")))}function Ji(e,t,n,r,o){var i=Ni;Ni=1;try{return e(t,n,r,o)}finally{Ni=i}}var Zi=null,ea=null,ta=0,na=void 0,ra=!1,oa=null,ia=0,aa=0,la=!1,ua=!1,ca=null,sa=null,fa=!1,da=!1,pa=!1,ha=null,ma=i.unstable_now(),va=2+(ma/10|0),ga=va,ya=50,ba=0,wa=null,_a=1;function ka(){va=2+((i.unstable_now()-ma)/10|0)}function xa(e,t){if(0!==ta){if(t>ta)return;null!==na&&i.unstable_cancelCallback(na)}ta=t,e=i.unstable_now()-ma,na=i.unstable_scheduleCallback(Pa,{timeout:10*(t-2)-e})}function Ta(e,t,n,r,o){e.expirationTime=r,0!==o||Na()?0<o&&(e.timeoutHandle=br(function(e,t,n){e.pendingCommitExpirationTime=n,e.finishedWork=t,ka(),ga=va,Ra(e,n)}.bind(null,e,t,n),o)):(e.pendingCommitExpirationTime=n,e.finishedWork=t)}function Sa(){return ra?ga:(Ca(),0!==ia&&1073741823!==ia||(ka(),ga=va),ga)}function Ea(e,t){if(null===e.nextScheduledRoot)e.expirationTime=t,null===ea?(Zi=ea=e,e.nextScheduledRoot=e):(ea=ea.nextScheduledRoot=e).nextScheduledRoot=Zi;else{var n=e.expirationTime;(0===n||t<n)&&(e.expirationTime=t)}ra||(fa?da&&(oa=e,ia=1,Aa(e,1,!0)):1===t?Oa(1,null):xa(e,t))}function Ca(){var e=0,t=null;if(null!==ea)for(var n=ea,r=Zi;null!==r;){var o=r.expirationTime;if(0===o){if((null===n||null===ea)&&a("244"),r===r.nextScheduledRoot){Zi=ea=r.nextScheduledRoot=null;break}if(r===Zi)Zi=o=r.nextScheduledRoot,ea.nextScheduledRoot=o,r.nextScheduledRoot=null;else{if(r===ea){(ea=n).nextScheduledRoot=Zi,r.nextScheduledRoot=null;break}n.nextScheduledRoot=r.nextScheduledRoot,r.nextScheduledRoot=null}r=n.nextScheduledRoot}else{if((0===e||o<e)&&(e=o,t=r),r===ea)break;if(1===e)break;n=r,r=r.nextScheduledRoot}}oa=t,ia=e}function Pa(e){if(e.didTimeout&&null!==Zi){ka();var t=Zi;do{var n=t.expirationTime;0!==n&&va>=n&&(t.nextExpirationTimeToWorkOn=va),t=t.nextScheduledRoot}while(t!==Zi)}Oa(0,e)}function Oa(e,t){if(sa=t,Ca(),null!==sa)for(ka(),ga=va;null!==oa&&0!==ia&&(0===e||e>=ia)&&(!la||va>=ia);)Aa(oa,ia,va>=ia),Ca(),ka(),ga=va;else for(;null!==oa&&0!==ia&&(0===e||e>=ia);)Aa(oa,ia,!0),Ca();if(null!==sa&&(ta=0,na=null),0!==ia&&xa(oa,ia),sa=null,la=!1,ba=0,wa=null,null!==ha)for(e=ha,ha=null,t=0;t<e.length;t++){var n=e[t];try{n._onComplete()}catch(r){ua||(ua=!0,ca=r)}}if(ua)throw e=ca,ca=null,ua=!1,e}function Ra(e,t){ra&&a("253"),oa=e,ia=t,Aa(e,t,!0),Oa(1,null)}function Aa(e,t,n){if(ra&&a("245"),ra=!0,null===sa||n){var r=e.finishedWork;null!==r?ja(e,r,t):(e.finishedWork=null,-1!==(r=e.timeoutHandle)&&(e.timeoutHandle=-1,wr(r)),qi(e,!1,n),null!==(r=e.finishedWork)&&ja(e,r,t))}else null!==(r=e.finishedWork)?ja(e,r,t):(e.finishedWork=null,-1!==(r=e.timeoutHandle)&&(e.timeoutHandle=-1,wr(r)),qi(e,!0,n),null!==(r=e.finishedWork)&&(Na()?e.finishedWork=r:ja(e,r,t)));ra=!1}function ja(e,t,n){var r=e.firstBatch;if(null!==r&&r._expirationTime<=n&&(null===ha?ha=[r]:ha.push(r),r._defer))return e.finishedWork=t,void(e.expirationTime=0);e.finishedWork=null,e===wa?ba++:(wa=e,ba=0),zi=Li=!0,e.current===t&&a("177");var o=e.pendingCommitExpirationTime;0===o&&a("261"),e.pendingCommitExpirationTime=0;var i=t.expirationTime,l=t.childExpirationTime,u=0===i||0!==l&&l<i?l:i;if(e.didError=!1,0===u)e.earliestPendingTime=0,e.latestPendingTime=0,e.earliestSuspendedTime=0,e.latestSuspendedTime=0,e.latestPingedTime=0;else{var c=e.latestPendingTime;0!==c&&(c<u?e.earliestPendingTime=e.latestPendingTime=0:e.earliestPendingTime<u&&(e.earliestPendingTime=e.latestPendingTime));var s=e.earliestSuspendedTime;0===s?Xr(e,u):u>e.latestSuspendedTime?(e.earliestSuspendedTime=0,e.latestSuspendedTime=0,e.latestPingedTime=0,Xr(e,u)):u<s&&Xr(e,u)}if(eo(0,e),Ai.current=null,1<t.effectTag)if(null!==t.lastEffect){t.lastEffect.nextEffect=t;var f=t.firstEffect}else f=t;else f=t.firstEffect;mr=En;var d=Un();if(Dn(d)){if("selectionStart"in d)var p={start:d.selectionStart,end:d.selectionEnd};else e:{var h=d.ownerDocument,m=h&&h.defaultView||window,v=m.getSelection&&m.getSelection();if(v&&0!==v.rangeCount){var g=v.anchorNode,y=v.anchorOffset,b=v.focusNode,w=v.focusOffset;try{g.nodeType,b.nodeType}catch(ze){p=null;break e}var _=0,k=-1,x=-1,T=0,S=0,E=d,C=null;t:for(;;){for(var P;E!==g||0!==y&&3!==E.nodeType||(k=_+y),E!==b||0!==w&&3!==E.nodeType||(x=_+w),3===E.nodeType&&(_+=E.nodeValue.length),null!==(P=E.firstChild);)C=E,E=P;for(;;){if(E===d)break t;if(C===g&&++T===y&&(k=_),C===b&&++S===w&&(x=_),null!==(P=E.nextSibling))break;C=(E=C).parentNode}E=P}p=-1===k||-1===x?null:{start:k,end:x}}else p=null}var O=p||{start:0,end:0}}else O=null;for(vr={focusedElem:d,selectionRange:O},En=!1,Wi=f;null!==Wi;){var R=!1,A=void 0;try{for(;null!==Wi;){if(256&Wi.effectTag){var j=Wi.alternate;e:{var N=Wi;switch(N.tag){case 1:if(256&N.effectTag&&null!==j){var L=j.memoizedProps,M=j.memoizedState,F=N.stateNode;F.props=N.memoizedProps,F.state=N.memoizedState;var I=F.getSnapshotBeforeUpdate(L,M);F.__reactInternalSnapshotBeforeUpdate=I}break e;case 3:case 5:case 6:case 4:case 17:break e;default:a("163")}}}Wi=Wi.nextEffect}}catch(ze){R=!0,A=ze}R&&(null===Wi&&a("178"),Ki(Wi,A),null!==Wi&&(Wi=Wi.nextEffect))}for(Wi=f;null!==Wi;){var U=!1,D=void 0;try{for(;null!==Wi;){var W=Wi.effectTag;if(16&W&&ar(Wi.stateNode,""),128&W){var z=Wi.alternate;if(null!==z){var B=z.ref;null!==B&&("function"==typeof B?B(null):B.current=null)}}switch(14&W){case 2:Ti(Wi),Wi.effectTag&=-3;break;case 6:Ti(Wi),Wi.effectTag&=-3,Ei(Wi.alternate,Wi);break;case 4:Ei(Wi.alternate,Wi);break;case 8:var H=Wi;Si(H);var V=H;V.return=null,V.child=null,V.alternate&&(V.alternate.child=null,V.alternate.return=null)}Wi=Wi.nextEffect}}catch(ze){U=!0,D=ze}U&&(null===Wi&&a("178"),Ki(Wi,D),null!==Wi&&(Wi=Wi.nextEffect))}var $=vr,q=Un(),K=$.focusedElem,G=$.selectionRange;if(q!==K&&K&&K.ownerDocument&&function e(t,n){return!(!t||!n)&&(t===n||(!t||3!==t.nodeType)&&(n&&3===n.nodeType?e(t,n.parentNode):"contains"in t?t.contains(n):!!t.compareDocumentPosition&&!!(16&t.compareDocumentPosition(n))))}(K.ownerDocument.documentElement,K)){if(null!==G&&Dn(K)){var Q=G.start,Y=G.end;if(void 0===Y&&(Y=Q),"selectionStart"in K)K.selectionStart=Q,K.selectionEnd=Math.min(Y,K.value.length);else{var X=K.ownerDocument||document,J=(X&&X.defaultView||window).getSelection(),Z=K.textContent.length,ee=Math.min(G.start,Z),te=void 0===G.end?ee:Math.min(G.end,Z);if(!J.extend&&ee>te){var ne=te;te=ee,ee=ne}var re=In(K,ee),oe=In(K,te);if(re&&oe&&(1!==J.rangeCount||J.anchorNode!==re.node||J.anchorOffset!==re.offset||J.focusNode!==oe.node||J.focusOffset!==oe.offset)){var ie=X.createRange();ie.setStart(re.node,re.offset),J.removeAllRanges(),ee>te?(J.addRange(ie),J.extend(oe.node,oe.offset)):(ie.setEnd(oe.node,oe.offset),J.addRange(ie))}}}for(var ae=[],le=K;le=le.parentNode;)1===le.nodeType&&ae.push({element:le,left:le.scrollLeft,top:le.scrollTop});"function"==typeof K.focus&&K.focus();for(var ue=0;ue<ae.length;ue++){var ce=ae[ue];ce.element.scrollLeft=ce.left,ce.element.scrollTop=ce.top}}for(vr=null,En=!!mr,mr=null,e.current=t,Wi=f;null!==Wi;){var se=!1,fe=void 0;try{for(;null!==Wi;){var de=Wi.effectTag;if(36&de){var pe=void 0,he=Wi.alternate,me=Wi;switch(me.tag){case 1:var ve=me.stateNode;if(4&me.effectTag)if(null===he)ve.props=me.memoizedProps,ve.state=me.memoizedState,ve.componentDidMount();else{var ge=he.memoizedProps,ye=he.memoizedState;ve.props=me.memoizedProps,ve.state=me.memoizedState,ve.componentDidUpdate(ge,ye,ve.__reactInternalSnapshotBeforeUpdate)}var be=me.updateQueue;null!==be&&(ve.props=me.memoizedProps,ve.state=me.memoizedState,fo(0,be,ve));break;case 3:var we=me.updateQueue;if(null!==we){var _e=null;if(null!==me.child)switch(me.child.tag){case 5:_e=me.child.stateNode;break;case 1:_e=me.child.stateNode}fo(0,we,_e)}break;case 5:var ke=me.stateNode;null===he&&4&me.effectTag&&gr(me.type,me.memoizedProps)&&ke.focus();break;case 6:case 4:case 12:break;case 13:if(32&me.effectTag){me.memoizedState={alreadyCaptured:!0,didTimeout:!1,timedOutAt:0},Xi(me,1);break}var xe=null!==he?he.memoizedState:null,Te=me.memoizedState,Se=null!==xe&&xe.didTimeout,Ee=me;if(null===Te?pe=!1:(pe=Te.didTimeout)&&(Ee=me.child,Te.alreadyCaptured=!1,0===Te.timedOutAt&&(Te.timedOutAt=Sa())),pe!==Se&&null!==Ee)e:for(var Ce=Ee,Pe=pe,Oe=Ce;;){if(5===Oe.tag){var Re=Oe.stateNode;if(Pe)Re.style.display="none";else{var Ae=Oe.stateNode,je=Oe.memoizedProps.style,Ne=null!=je&&je.hasOwnProperty("display")?je.display:null;Ae.style.display=Ne}}else if(6===Oe.tag)Oe.stateNode.nodeValue=Pe?"":Oe.memoizedProps;else if(null!==Oe.child){Oe.child.return=Oe,Oe=Oe.child;continue}if(Oe===Ce)break e;for(;null===Oe.sibling;){if(null===Oe.return||Oe.return===Ce)break e;Oe=Oe.return}Oe.sibling.return=Oe.return,Oe=Oe.sibling}break;case 17:break;default:a("163")}}if(128&de){var Le=Wi.ref;if(null!==Le){var Me=Wi.stateNode;switch(Wi.tag){case 5:var Fe=Me;break;default:Fe=Me}"function"==typeof Le?Le(Fe):Le.current=Fe}}var Ie=Wi.nextEffect;Wi.nextEffect=null,Wi=Ie}}catch(ze){se=!0,fe=ze}se&&(null===Wi&&a("178"),Ki(Wi,fe),null!==Wi&&(Wi=Wi.nextEffect))}Li=zi=!1,"function"==typeof Dr&&Dr(t.stateNode);var Ue=t.expirationTime,De=t.childExpirationTime,We=0===Ue||0!==De&&De<Ue?De:Ue;0===We&&(Bi=null),e.expirationTime=We,e.finishedWork=null}function Na(){return!!la||!(null===sa||sa.timeRemaining()>_a)&&(la=!0)}function La(e){null===oa&&a("246"),oa.expirationTime=0,ua||(ua=!0,ca=e)}function Ma(e,t){var n=fa;fa=!0;try{return e(t)}finally{(fa=n)||ra||Oa(1,null)}}function Fa(e,t){if(fa&&!da){da=!0;try{return e(t)}finally{da=!1}}return e(t)}function Ia(e,t,n){if(pa)return e(t,n);fa||ra||0===aa||(Oa(aa,null),aa=0);var r=pa,o=fa;fa=pa=!0;try{return e(t,n)}finally{pa=r,(fa=o)||ra||Oa(1,null)}}function Ua(e,t,n,r,o){var i=t.current;e:if(n){t:{2===rn(n=n._reactInternalFiber)&&1===n.tag||a("170");var l=n;do{switch(l.tag){case 3:l=l.stateNode.context;break t;case 1:if(jr(l.type)){l=l.stateNode.__reactInternalMemoizedMergedChildContext;break t}}l=l.return}while(null!==l);a("171"),l=void 0}if(1===n.tag){var u=n.type;if(jr(u)){n=Fr(n,u,l);break e}}n=l}else n=Cr;return null===t.context?t.context=n:t.pendingContext=n,t=o,(o=oo(r)).payload={element:e},null!==(t=void 0===t?null:t)&&(o.callback=t),ao(i,o),Xi(i,r),r}function Da(e,t,n,r){var o=t.current;return Ua(e,t,n,o=Gi(Sa(),o),r)}function Wa(e){if(!(e=e.current).child)return null;switch(e.child.tag){case 5:default:return e.child.stateNode}}function za(e){var t=2+25*(1+((Sa()-2+500)/25|0));t<=ji&&(t=ji+1),this._expirationTime=ji=t,this._root=e,this._callbacks=this._next=null,this._hasChildren=this._didComplete=!1,this._children=null,this._defer=!0}function Ba(){this._callbacks=null,this._didCommit=!1,this._onCommit=this._onCommit.bind(this)}function Ha(e,t,n){e={current:t=Hr(3,null,null,t?3:0),containerInfo:e,pendingChildren:null,earliestPendingTime:0,latestPendingTime:0,earliestSuspendedTime:0,latestSuspendedTime:0,latestPingedTime:0,didError:!1,pendingCommitExpirationTime:0,finishedWork:null,timeoutHandle:-1,context:null,pendingContext:null,hydrate:n,nextExpirationTimeToWorkOn:0,expirationTime:0,firstBatch:null,nextScheduledRoot:null},this._internalRoot=t.stateNode=e}function Va(e){return!(!e||1!==e.nodeType&&9!==e.nodeType&&11!==e.nodeType&&(8!==e.nodeType||" react-mount-point-unstable "!==e.nodeValue))}function $a(e,t,n,r,o){Va(n)||a("200");var i=n._reactRootContainer;if(i){if("function"==typeof o){var l=o;o=function(){var e=Wa(i._internalRoot);l.call(e)}}null!=e?i.legacy_renderSubtreeIntoContainer(e,t,o):i.render(t,o)}else{if(i=n._reactRootContainer=function(e,t){if(t||(t=!(!(t=e?9===e.nodeType?e.documentElement:e.firstChild:null)||1!==t.nodeType||!t.hasAttribute("data-reactroot"))),!t)for(var n;n=e.lastChild;)e.removeChild(n);return new Ha(e,!1,t)}(n,r),"function"==typeof o){var u=o;o=function(){var e=Wa(i._internalRoot);u.call(e)}}Fa(function(){null!=e?i.legacy_renderSubtreeIntoContainer(e,t,o):i.render(t,o)})}return Wa(i._internalRoot)}function qa(e,t){var n=2<arguments.length&&void 0!==arguments[2]?arguments[2]:null;return Va(t)||a("200"),function(e,t,n){var r=3<arguments.length&&void 0!==arguments[3]?arguments[3]:null;return{$$typeof:Ye,key:null==r?null:""+r,children:e,containerInfo:t,implementation:n}}(e,t,null,n)}Pe=function(e,t,n){switch(t){case"input":if(Tt(e,n),t=n.name,"radio"===n.type&&null!=t){for(n=e;n.parentNode;)n=n.parentNode;for(n=n.querySelectorAll("input[name="+JSON.stringify(""+t)+'][type="radio"]'),t=0;t<n.length;t++){var r=n[t];if(r!==e&&r.form===e.form){var o=W(r);o||a("90"),$e(r),Tt(r,o)}}}break;case"textarea":Jn(e,n);break;case"select":null!=(t=n.value)&&Qn(e,!!n.multiple,t,!1)}},za.prototype.render=function(e){this._defer||a("250"),this._hasChildren=!0,this._children=e;var t=this._root._internalRoot,n=this._expirationTime,r=new Ba;return Ua(e,t,null,n,r._onCommit),r},za.prototype.then=function(e){if(this._didComplete)e();else{var t=this._callbacks;null===t&&(t=this._callbacks=[]),t.push(e)}},za.prototype.commit=function(){var e=this._root._internalRoot,t=e.firstBatch;if(this._defer&&null!==t||a("251"),this._hasChildren){var n=this._expirationTime;if(t!==this){this._hasChildren&&(n=this._expirationTime=t._expirationTime,this.render(this._children));for(var r=null,o=t;o!==this;)r=o,o=o._next;null===r&&a("251"),r._next=o._next,this._next=t,e.firstBatch=this}this._defer=!1,Ra(e,n),t=this._next,this._next=null,null!==(t=e.firstBatch=t)&&t._hasChildren&&t.render(t._children)}else this._next=null,this._defer=!1},za.prototype._onComplete=function(){if(!this._didComplete){this._didComplete=!0;var e=this._callbacks;if(null!==e)for(var t=0;t<e.length;t++)(0,e[t])()}},Ba.prototype.then=function(e){if(this._didCommit)e();else{var t=this._callbacks;null===t&&(t=this._callbacks=[]),t.push(e)}},Ba.prototype._onCommit=function(){if(!this._didCommit){this._didCommit=!0;var e=this._callbacks;if(null!==e)for(var t=0;t<e.length;t++){var n=e[t];"function"!=typeof n&&a("191",n),n()}}},Ha.prototype.render=function(e,t){var n=this._internalRoot,r=new Ba;return null!==(t=void 0===t?null:t)&&r.then(t),Da(e,n,null,r._onCommit),r},Ha.prototype.unmount=function(e){var t=this._internalRoot,n=new Ba;return null!==(e=void 0===e?null:e)&&n.then(e),Da(null,t,null,n._onCommit),n},Ha.prototype.legacy_renderSubtreeIntoContainer=function(e,t,n){var r=this._internalRoot,o=new Ba;return null!==(n=void 0===n?null:n)&&o.then(n),Da(t,r,e,o._onCommit),o},Ha.prototype.createBatch=function(){var e=new za(this),t=e._expirationTime,n=this._internalRoot,r=n.firstBatch;if(null===r)n.firstBatch=e,e._next=null;else{for(n=null;null!==r&&r._expirationTime<=t;)n=r,r=r._next;e._next=r,null!==n&&(n._next=e)}return e},Le=Ma,Me=Ia,Fe=function(){ra||0===aa||(Oa(aa,null),aa=0)};var Ka={createPortal:qa,findDOMNode:function(e){if(null==e)return null;if(1===e.nodeType)return e;var t=e._reactInternalFiber;return void 0===t&&("function"==typeof e.render?a("188"):a("268",Object.keys(e))),e=null===(e=an(t))?null:e.stateNode},hydrate:function(e,t,n){return $a(null,e,t,!0,n)},render:function(e,t,n){return $a(null,e,t,!1,n)},unstable_renderSubtreeIntoContainer:function(e,t,n,r){return(null==e||void 0===e._reactInternalFiber)&&a("38"),$a(e,t,n,!1,r)},unmountComponentAtNode:function(e){return Va(e)||a("40"),!!e._reactRootContainer&&(Fa(function(){$a(null,null,e,!1,function(){e._reactRootContainer=null})}),!0)},unstable_createPortal:function(){return qa.apply(void 0,arguments)},unstable_batchedUpdates:Ma,unstable_interactiveUpdates:Ia,flushSync:function(e,t){ra&&a("187");var n=fa;fa=!0;try{return Ji(e,t)}finally{fa=n,Oa(1,null)}},unstable_flushControlled:function(e){var t=fa;fa=!0;try{Ji(e)}finally{(fa=t)||ra||Oa(1,null)}},__SECRET_INTERNALS_DO_NOT_USE_OR_YOU_WILL_BE_FIRED:{Events:[U,D,W,A.injectEventPluginsByName,y,q,function(e){E(e,$)},je,Ne,Rn,N]},unstable_createRoot:function(e,t){return Va(e)||a("278"),new Ha(e,!0,null!=t&&!0===t.hydrate)}};!function(e){var t=e.findFiberByHostInstance;(function(e){if("undefined"==typeof __REACT_DEVTOOLS_GLOBAL_HOOK__)return!1;var t=__REACT_DEVTOOLS_GLOBAL_HOOK__;if(t.isDisabled||!t.supportsFiber)return!0;try{var n=t.inject(e);Dr=zr(function(e){return t.onCommitFiberRoot(n,e)}),Wr=zr(function(e){return t.onCommitFiberUnmount(n,e)})}catch(r){}})(o({},e,{findHostInstanceByFiber:function(e){return null===(e=an(e))?null:e.stateNode},findFiberByHostInstance:function(e){return t?t(e):null}}))}({findFiberByHostInstance:I,bundleType:0,version:"16.6.0",rendererPackageName:"react-dom"});var Ga={default:Ka},Qa=Ga&&Ka||Ga;e.exports=Qa.default||Qa},function(e,t,n){"use strict";e.exports=n(132)},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=null,o=3,i=-1,a=-1,l=!1,u=!1,c="object"==typeof performance&&"function"==typeof performance.now,s={timeRemaining:c?function(){if(null!==r&&r.expirationTime<a)return 0;var e=b()-performance.now();return 0<e?e:0}:function(){if(null!==r&&r.expirationTime<a)return 0;var e=b()-Date.now();return 0<e?e:0},didTimeout:!1};function f(){if(!l){var e=r.expirationTime;u?y():u=!0,g(h,e)}}function d(){var e=r,t=r.next;if(r===t)r=null;else{var n=r.previous;r=n.next=t,t.previous=n}e.next=e.previous=null,n=e.callback,t=e.expirationTime,e=e.priorityLevel;var i=o,l=a;o=e,a=t;try{var u=n(s)}finally{o=i,a=l}if("function"==typeof u)if(u={callback:u,priorityLevel:e,expirationTime:t,next:null,previous:null},null===r)r=u.next=u.previous=u;else{n=null,e=r;do{if(e.expirationTime>=t){n=e;break}e=e.next}while(e!==r);null===n?n=r:n===r&&(r=u,f()),(t=n.previous).next=n.previous=u,u.next=n,u.previous=t}}function p(){if(-1===i&&null!==r&&1===r.priorityLevel){l=!0,s.didTimeout=!0;try{do{d()}while(null!==r&&1===r.priorityLevel)}finally{l=!1,null!==r?f():u=!1}}}function h(e){l=!0,s.didTimeout=e;try{if(e)for(;null!==r;){var n=t.unstable_now();if(!(r.expirationTime<=n))break;do{d()}while(null!==r&&r.expirationTime<=n)}else if(null!==r)do{d()}while(null!==r&&0<b()-t.unstable_now())}finally{l=!1,null!==r?f():u=!1,p()}}var m,v,g,y,b,w=Date,_="function"==typeof setTimeout?setTimeout:void 0,k="function"==typeof clearTimeout?clearTimeout:void 0,x="function"==typeof requestAnimationFrame?requestAnimationFrame:void 0,T="function"==typeof cancelAnimationFrame?cancelAnimationFrame:void 0;function S(e){m=x(function(t){k(v),e(t)}),v=_(function(){T(m),e(t.unstable_now())},100)}if(c){var E=performance;t.unstable_now=function(){return E.now()}}else t.unstable_now=function(){return w.now()};if("undefined"!=typeof window&&window._schedMock){var C=window._schedMock;g=C[0],y=C[1],b=C[2]}else if("undefined"==typeof window||"function"!=typeof window.addEventListener){var P=null,O=-1,R=function(e,t){if(null!==P){var n=P;P=null;try{O=t,n(e)}finally{O=-1}}};g=function(e,t){-1!==O?setTimeout(g,0,e,t):(P=e,setTimeout(R,t,!0,t),setTimeout(R,1073741823,!1,1073741823))},y=function(){P=null},b=function(){return 1/0},t.unstable_now=function(){return-1===O?0:O}}else{"undefined"!=typeof console&&("function"!=typeof x&&console.error("This browser doesn't support requestAnimationFrame. Make sure that you load a polyfill in older browsers. https://fb.me/react-polyfills"),"function"!=typeof T&&console.error("This browser doesn't support cancelAnimationFrame. Make sure that you load a polyfill in older browsers. https://fb.me/react-polyfills"));var A=null,j=!1,N=-1,L=!1,M=!1,F=0,I=33,U=33;b=function(){return F};var D="__reactIdleCallback$"+Math.random().toString(36).slice(2);window.addEventListener("message",function(e){if(e.source===window&&e.data===D){j=!1,e=A;var n=N;A=null,N=-1;var r=t.unstable_now(),o=!1;if(0>=F-r){if(!(-1!==n&&n<=r))return L||(L=!0,S(W)),A=e,void(N=n);o=!0}if(null!==e){M=!0;try{e(o)}finally{M=!1}}}},!1);var W=function(e){if(null!==A){S(W);var t=e-F+U;t<U&&I<U?(8>t&&(t=8),U=t<I?I:t):I=t,F=e+U,j||(j=!0,window.postMessage(D,"*"))}else L=!1};g=function(e,t){A=e,N=t,M||0>t?window.postMessage(D,"*"):L||(L=!0,S(W))},y=function(){A=null,j=!1,N=-1}}t.unstable_ImmediatePriority=1,t.unstable_UserBlockingPriority=2,t.unstable_NormalPriority=3,t.unstable_IdlePriority=4,t.unstable_runWithPriority=function(e,n){switch(e){case 1:case 2:case 3:case 4:break;default:e=3}var r=o,a=i;o=e,i=t.unstable_now();try{return n()}finally{o=r,i=a,p()}},t.unstable_scheduleCallback=function(e,n){var a=-1!==i?i:t.unstable_now();if("object"==typeof n&&null!==n&&"number"==typeof n.timeout)n=a+n.timeout;else switch(o){case 1:n=a+-1;break;case 2:n=a+250;break;case 4:n=a+1073741823;break;default:n=a+5e3}if(e={callback:e,priorityLevel:o,expirationTime:n,next:null,previous:null},null===r)r=e.next=e.previous=e,f();else{a=null;var l=r;do{if(l.expirationTime>n){a=l;break}l=l.next}while(l!==r);null===a?a=r:a===r&&(r=e,f()),(n=a.previous).next=a.previous=e,e.next=a,e.previous=n}return e},t.unstable_cancelCallback=function(e){var t=e.next;if(null!==t){if(t===e)r=null;else{e===r&&(r=t);var n=e.previous;n.next=t,t.previous=n}e.next=e.previous=null}},t.unstable_wrapCallback=function(e){var n=o;return function(){var r=o,a=i;o=n,i=t.unstable_now();try{return e.apply(this,arguments)}finally{o=r,i=a,p()}}},t.unstable_getCurrentPriorityLevel=function(){return o}},function(e,t,n){"use strict";var r=n(6);t.__esModule=!0,t.default=void 0;var o=r(n(7)),i=r(n(31)),a=r(n(45)),l=r(n(0)),u=r(n(134)),c=r(n(1)),s=n(141),f=r(n(142)),d={shouldUpdateScroll:c.default.func,children:c.default.element.isRequired,location:c.default.object.isRequired},p={scrollBehavior:c.default.object.isRequired},h=function(e){function t(t,n){var r;return r=e.call(this,t,n)||this,(0,a.default)((0,i.default)((0,i.default)(r)),"shouldUpdateScroll",function(e,t){var n=r.props.shouldUpdateScroll;return!n||n.call(r.scrollBehavior,e,t)}),(0,a.default)((0,i.default)((0,i.default)(r)),"registerElement",function(e,t,n){r.scrollBehavior.registerElement(e,t,n,r.getRouterProps())}),(0,a.default)((0,i.default)((0,i.default)(r)),"unregisterElement",function(e){r.scrollBehavior.unregisterElement(e)}),r.scrollBehavior=new u.default({addTransitionHook:s.globalHistory.listen,stateStorage:new f.default,getCurrentLocation:function(){return r.props.location},shouldUpdateScroll:r.shouldUpdateScroll}),r}(0,o.default)(t,e);var n=t.prototype;return n.getChildContext=function(){return{scrollBehavior:this}},n.componentDidUpdate=function(e){var t=this.props.location;if(t!==e.location){var n={location:e.location};window.__navigatingToLink?t.action="PUSH":t.action="POP",this.scrollBehavior.updateScroll(n,{history:s.globalHistory,location:t})}},n.componentWillUnmount=function(){this.scrollBehavior.stop()},n.getRouterProps=function(){return{location:this.props.location,history:s.globalHistory}},n.render=function(){return l.default.Children.only(this.props.children)},t}(l.default.Component);h.propTypes=d,h.childContextTypes=p;var m=h;t.default=m},function(e,t,n){"use strict";t.__esModule=!0;var r=s(n(135)),o=s(n(136)),i=s(n(137)),a=s(n(138)),l=s(n(139)),u=s(n(9)),c=n(140);function s(e){return e&&e.__esModule?e:{default:e}}var f=2,d=function(){function e(t){var n=this,r=t.addTransitionHook,u=t.stateStorage,s=t.getCurrentLocation,d=t.shouldUpdateScroll;if(function(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}(this,e),this._restoreScrollRestoration=function(){if(n._oldScrollRestoration)try{window.history.scrollRestoration=n._oldScrollRestoration}catch(e){}},this._onWindowScroll=function(){if(n._saveWindowPositionHandle||(n._saveWindowPositionHandle=(0,l.default)(n._saveWindowPosition)),n._windowScrollTarget){var e=n._windowScrollTarget,t=e[0],r=e[1],o=(0,i.default)(window),u=(0,a.default)(window);o===t&&u===r&&(n._windowScrollTarget=null,n._cancelCheckWindowScroll())}},this._saveWindowPosition=function(){n._saveWindowPositionHandle=null,n._savePosition(null,window)},this._checkWindowScrollPosition=function(){n._checkWindowScrollHandle=null,n._windowScrollTarget&&(n.scrollToTarget(window,n._windowScrollTarget),++n._numWindowScrollAttempts,n._numWindowScrollAttempts>=f?n._windowScrollTarget=null:n._checkWindowScrollHandle=(0,l.default)(n._checkWindowScrollPosition))},this._stateStorage=u,this._getCurrentLocation=s,this._shouldUpdateScroll=d,"scrollRestoration"in window.history&&!(0,c.isMobileSafari)()){this._oldScrollRestoration=window.history.scrollRestoration;try{window.history.scrollRestoration="manual",(0,o.default)(window,"beforeunload",this._restoreScrollRestoration)}catch(p){this._oldScrollRestoration=null}}else this._oldScrollRestoration=null;this._saveWindowPositionHandle=null,this._checkWindowScrollHandle=null,this._windowScrollTarget=null,this._numWindowScrollAttempts=0,this._scrollElements={},(0,o.default)(window,"scroll",this._onWindowScroll),this._removeTransitionHook=r(function(){l.default.cancel(n._saveWindowPositionHandle),n._saveWindowPositionHandle=null,Object.keys(n._scrollElements).forEach(function(e){var t=n._scrollElements[e];l.default.cancel(t.savePositionHandle),t.savePositionHandle=null,n._saveElementPosition(e)})})}return e.prototype.registerElement=function(e,t,n,r){var i=this;this._scrollElements[e]&&(0,u.default)(!1);var a=function(){i._saveElementPosition(e)},c={element:t,shouldUpdateScroll:n,savePositionHandle:null,onScroll:function(){c.savePositionHandle||(c.savePositionHandle=(0,l.default)(a))}};this._scrollElements[e]=c,(0,o.default)(t,"scroll",c.onScroll),this._updateElementScroll(e,null,r)},e.prototype.unregisterElement=function(e){this._scrollElements[e]||(0,u.default)(!1);var t=this._scrollElements[e],n=t.element,o=t.onScroll,i=t.savePositionHandle;(0,r.default)(n,"scroll",o),l.default.cancel(i),delete this._scrollElements[e]},e.prototype.updateScroll=function(e,t){var n=this;this._updateWindowScroll(e,t),Object.keys(this._scrollElements).forEach(function(r){n._updateElementScroll(r,e,t)})},e.prototype.stop=function(){this._restoreScrollRestoration(),(0,r.default)(window,"scroll",this._onWindowScroll),this._cancelCheckWindowScroll(),this._removeTransitionHook()},e.prototype._cancelCheckWindowScroll=function(){l.default.cancel(this._checkWindowScrollHandle),this._checkWindowScrollHandle=null},e.prototype._saveElementPosition=function(e){var t=this._scrollElements[e];t.savePositionHandle=null,this._savePosition(e,t.element)},e.prototype._savePosition=function(e,t){this._stateStorage.save(this._getCurrentLocation(),e,[(0,i.default)(t),(0,a.default)(t)])},e.prototype._updateWindowScroll=function(e,t){this._cancelCheckWindowScroll(),this._windowScrollTarget=this._getScrollTarget(null,this._shouldUpdateScroll,e,t),this._numWindowScrollAttempts=0,this._checkWindowScrollPosition()},e.prototype._updateElementScroll=function(e,t,n){var r=this._scrollElements[e],o=r.element,i=r.shouldUpdateScroll,a=this._getScrollTarget(e,i,t,n);a&&this.scrollToTarget(o,a)},e.prototype._getDefaultScrollTarget=function(e){var t=e.hash;return t&&"#"!==t?"#"===t.charAt(0)?t.slice(1):t:[0,0]},e.prototype._getScrollTarget=function(e,t,n,r){var o=!t||t.call(this,n,r);if(!o||Array.isArray(o)||"string"==typeof o)return o;var i=this._getCurrentLocation();return this._getSavedScrollTarget(e,i)||this._getDefaultScrollTarget(i)},e.prototype._getSavedScrollTarget=function(e,t){return"PUSH"===t.action?null:this._stateStorage.read(t,e)},e.prototype.scrollToTarget=function(e,t){if("string"==typeof t){var n=document.getElementById(t)||document.getElementsByName(t)[0];if(n)return void n.scrollIntoView();t=[0,0]}var r=t,o=r[0],l=r[1];(0,i.default)(e,o),(0,a.default)(e,l)},e}();t.default=d,e.exports=t.default},function(e,t,n){"use strict";var r=n(6);t.__esModule=!0,t.default=void 0;var o=function(){};r(n(48)).default&&(o=document.addEventListener?function(e,t,n,r){return e.removeEventListener(t,n,r||!1)}:document.attachEvent?function(e,t,n){return e.detachEvent("on"+t,n)}:void 0);var i=o;t.default=i,e.exports=t.default},function(e,t,n){"use strict";var r=n(6);t.__esModule=!0,t.default=void 0;var o=function(){};r(n(48)).default&&(o=document.addEventListener?function(e,t,n,r){return e.addEventListener(t,n,r||!1)}:document.attachEvent?function(e,t,n){return e.attachEvent("on"+t,function(t){(t=t||window.event).target=t.target||t.srcElement,t.currentTarget=e,n.call(e,t)})}:void 0);var i=o;t.default=i,e.exports=t.default},function(e,t,n){"use strict";var r=n(6);t.__esModule=!0,t.default=function(e,t){var n=(0,o.default)(e);if(void 0===t)return n?"pageXOffset"in n?n.pageXOffset:n.document.documentElement.scrollLeft:e.scrollLeft;n?n.scrollTo(t,"pageYOffset"in n?n.pageYOffset:n.document.documentElement.scrollTop):e.scrollLeft=t};var o=r(n(67));e.exports=t.default},function(e,t,n){"use strict";var r=n(6);t.__esModule=!0,t.default=function(e,t){var n=(0,o.default)(e);if(void 0===t)return n?"pageYOffset"in n?n.pageYOffset:n.document.documentElement.scrollTop:e.scrollTop;n?n.scrollTo("pageXOffset"in n?n.pageXOffset:n.document.documentElement.scrollLeft,t):e.scrollTop=t};var o=r(n(67));e.exports=t.default},function(e,t,n){"use strict";var r=n(6);t.__esModule=!0,t.default=void 0;var o,i=r(n(48)),a="clearTimeout",l=function(e){var t=(new Date).getTime(),n=Math.max(0,16-(t-c)),r=setTimeout(e,n);return c=t,r},u=function(e,t){return e+(e?t[0].toUpperCase()+t.substr(1):t)+"AnimationFrame"};i.default&&["","webkit","moz","o","ms"].some(function(e){var t=u(e,"request");if(t in window)return a=u(e,"cancel"),l=function(e){return window[t](e)}});var c=(new Date).getTime();(o=function(e){return l(e)}).cancel=function(e){window[a]&&"function"==typeof window[a]&&window[a](e)};var s=o;t.default=s,e.exports=t.default},function(e,t,n){"use strict";t.__esModule=!0,t.isMobileSafari=function(){return/iPad|iPhone|iPod/.test(window.navigator.platform)&&/^((?!CriOS).)*Safari/.test(window.navigator.userAgent)}},function(e,t,n){"use strict";t.__esModule=!0;var r=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e},o=function(e){return r({},e.location,{state:e.history.state,key:e.history.state&&e.history.state.key||"initial"})},i=function(e,t){var n=[],i=o(e),a=!1,l=function(){};return{get location(){return i},get transitioning(){return a},_onTransitionComplete:function(){a=!1,l()},listen:function(t){n.push(t);var r=function(){i=o(e),t({location:i,action:"POP"})};return e.addEventListener("popstate",r),function(){e.removeEventListener("popstate",r),n=n.filter(function(e){return e!==t})}},navigate:function(t){var u=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},c=u.state,s=u.replace,f=void 0!==s&&s;c=r({},c,{key:Date.now()+""});try{a||f?e.history.replaceState(c,null,t):e.history.pushState(c,null,t)}catch(p){e.location[f?"replace":"assign"](t)}i=o(e),a=!0;var d=new Promise(function(e){return l=e});return n.forEach(function(e){return e({location:i,action:"PUSH"})}),d}}},a=function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:"/",t=0,n=[{pathname:e,search:""}],r=[];return{get location(){return n[t]},addEventListener:function(e,t){},removeEventListener:function(e,t){},history:{get entries(){return n},get index(){return t},get state(){return r[t]},pushState:function(e,o,i){var a=i.split("?"),l=a[0],u=a[1],c=void 0===u?"":u;t++,n.push({pathname:l,search:c}),r.push(e)},replaceState:function(e,o,i){var a=i.split("?"),l=a[0],u=a[1],c=void 0===u?"":u;n[t]={pathname:l,search:c},r[t]=e}}}},l=i(!("undefined"==typeof window||!window.document||!window.document.createElement)?window:a()),u=l.navigate;t.globalHistory=l,t.navigate=u,t.createHistory=i,t.createMemorySource=a},function(e,t,n){"use strict";t.__esModule=!0,t.default=void 0;var r=function(){function e(){}var t=e.prototype;return t.read=function(e,t){var n=this.getStateKey(e,t);try{var r=window.sessionStorage.getItem(n);return JSON.parse(r)}catch(o){return window&&window.___GATSBY_REACT_ROUTER_SCROLL&&window.___GATSBY_REACT_ROUTER_SCROLL[n]?window.___GATSBY_REACT_ROUTER_SCROLL[n]:{}}},t.save=function(e,t,n){var r=this.getStateKey(e,t),o=JSON.stringify(n);try{window.sessionStorage.setItem(r,o)}catch(i){window&&window.___GATSBY_REACT_ROUTER_SCROLL?window.___GATSBY_REACT_ROUTER_SCROLL[r]=JSON.parse(o):(window.___GATSBY_REACT_ROUTER_SCROLL={},window.___GATSBY_REACT_ROUTER_SCROLL[r]=JSON.parse(o))}},t.getStateKey=function(e,t){var n="@@scroll|"+e.pathname;return null==t?n:n+"|"+t},e}();t.default=r},function(e,t,n){"use strict";var r=n(6);t.__esModule=!0,t.default=void 0;var o=r(n(7)),i=r(n(31)),a=r(n(45)),l=r(n(0)),u=r(n(49)),c=r(n(61)),s=r(n(1)),f={scrollKey:s.default.string.isRequired,shouldUpdateScroll:s.default.func,children:s.default.element.isRequired},d={scrollBehavior:s.default.object},p=function(e){function t(t,n){var r;return r=e.call(this,t,n)||this,(0,a.default)((0,i.default)((0,i.default)(r)),"shouldUpdateScroll",function(e,t){var n=r.props.shouldUpdateScroll;return!n||n.call(r.context.scrollBehavior.scrollBehavior,e,t)}),r.scrollKey=t.scrollKey,r}(0,o.default)(t,e);var n=t.prototype;return n.componentDidMount=function(){this.context.scrollBehavior.registerElement(this.props.scrollKey,u.default.findDOMNode(this),this.shouldUpdateScroll)},n.componentDidUpdate=function(e){(0,c.default)(e.scrollKey===this.props.scrollKey,"<ScrollContainer> does not support changing scrollKey.")},n.componentWillUnmount=function(){this.context.scrollBehavior.unregisterElement(this.scrollKey)},n.render=function(){return this.props.children},t}(l.default.Component);p.propTypes=f,p.contextTypes=d;var h=p;t.default=h},function(e,t,n){"use strict";n.r(t);var r=n(3);"https:"!==window.location.protocol&&"localhost"!==window.location.hostname?console.error("Service workers can only be used over HTTPS, or on localhost for development"):"serviceWorker"in navigator&&navigator.serviceWorker.register("/sw.js").then(function(e){e.addEventListener("updatefound",function(){Object(r.apiRunner)("onServiceWorkerUpdateFound",{serviceWorker:e});var t=e.installing;console.log("installingWorker",t),t.addEventListener("statechange",function(){switch(t.state){case"installed":navigator.serviceWorker.controller?(window.___swUpdated=!0,Object(r.apiRunner)("onServiceWorkerUpdateReady",{serviceWorker:e}),window.___failedResources&&(console.log("resources failed, SW updated - reloading"),window.location.reload())):(console.log("Content is now available offline!"),Object(r.apiRunner)("onServiceWorkerInstalled",{serviceWorker:e}));break;case"redundant":console.error("The installing service worker became redundant."),Object(r.apiRunner)("onServiceWorkerRedundant",{serviceWorker:e});break;case"activated":Object(r.apiRunner)("onServiceWorkerActive",{serviceWorker:e})}})})}).catch(function(e){console.error("Error during service worker registration:",e)})},function(e,t,n){"use strict";n.r(t);n(78),n(81),n(28);var r=n(7),o=n.n(r),i=n(3),a=n(0),l=n.n(a),u=n(49),c=n.n(u),s=n(17),f=n(23),d=n(70),p=n(71),h=n.n(p),m=(n(47),n(1)),v=n.n(m),g=n(2),y=n(72),b=n(10),w=n(8),_=y.reduce(function(e,t){return e[t.fromPath]=t,e},{});function k(e){var t=_[e];return null!=t&&(window.___replace(t.toPath),!0)}var x=function(e,t){k(e.pathname)||Object(i.apiRunner)("onPreRouteUpdate",{location:e,prevLocation:t})},T=function(e,t){k(e.pathname)||(Object(i.apiRunner)("onRouteUpdate",{location:e,prevLocation:t}),window.__navigatingToLink=!1)},S=function(e,t){void 0===t&&(t={}),t.replace||(window.__navigatingToLink=!0);var n=Object(w.parsePath)(e).pathname,r=_[n];if(r&&(e=r.toPath,n=Object(w.parsePath)(e).pathname),window.___swUpdated)window.location=n;else{var o=setTimeout(function(){b.a.emit("onDelayedLoadPageResources",{pathname:n}),Object(i.apiRunner)("onRouteUpdateDelayed",{location:window.location})},1e3);g.default.getResourcesForPathname(n).then(function(n){Object(s.navigate)(e,t),clearTimeout(o)})}};function E(e,t){var n=this,r=t.location,o=r.pathname,a=r.hash,l=Object(i.apiRunner)("shouldUpdateScroll",{prevRouterProps:e,pathname:o,routerProps:{location:r},getSavedScrollPosition:function(e){return n._stateStorage.read(e)}});if(l.length>0)return l[0];if(e&&e.location.pathname===o)return a?a.slice(1):[0,0];return!0}var C=function(e){function t(t){var n;return n=e.call(this,t)||this,x(t.location,null),n}o()(t,e);var n=t.prototype;return n.componentDidMount=function(){T(this.props.location,null)},n.componentDidUpdate=function(e,t,n){n&&T(this.props.location,e.location)},n.getSnapshotBeforeUpdate=function(e){return this.props.location.pathname!==e.location.pathname&&(x(this.props.location,e.location),!0)},n.render=function(){return this.props.children},t}(l.a.Component);C.propTypes={location:v.a.object.isRequired};var P=n(30),O=n(50),R=n.n(O);function A(e,t){for(var n in e)if(!(n in t))return!0;for(var r in t)if(e[r]!==t[r])return!0;return!1}var j=!0,N=function(e){function t(t){var n;n=e.call(this)||this;var r=t.location;return n.state={location:Object.assign({},r),pageResources:g.default.getResourcesForPathnameSync(r.pathname)},n}o()(t,e);var n=t.prototype;return n.reloadPage=function(e){var t=window.location.href;window.history.replaceState({},"",e),window.location.replace(t)},t.getDerivedStateFromProps=function(e,t){var n=e.location;return t.location!==n?{pageResources:g.default.getResourcesForPathnameSync(n.pathname),location:Object.assign({},n)}:null},n.hasResources=function(e){return!(!e||!e.json)},n.retryResources=function(e){var t=this,n=e.location.pathname;if(!g.default.getResourcesForPathnameSync(n)){var r=this.props.location;this.nextLocation=e.location,g.default.getResourcesForPathname(n).then(function(n){t.nextLocation===e.location&&(t.hasResources(n)?t.setState({location:Object.assign({},window.location),pageResources:n}):t.reloadPage(r.href))})}},n.shouldComponentUpdate=function(e,t){return this.hasResources(t.pageResources)?this.state.pageResources!==t.pageResources||(this.state.pageResources.component!==t.pageResources.component||(this.state.pageResources.json!==t.pageResources.json||(!(this.state.location.key===t.location.key||!t.pageResources.page||!t.pageResources.page.matchPath&&!t.pageResources.page.path)||function(e,t,n){return A(e.props,t)||A(e.state,n)}(this,e,t)))):(this.retryResources(e),!1)},n.render=function(){if(!this.hasResources(this.state.pageResources)&&j)throw window.___failedResources=!0,new Error("Missing resources for "+this.state.location.pathname);return j=!1,this.props.children(this.state)},t}(l.a.Component);N.propTypes={location:v.a.object.isRequired,pageResources:v.a.object};var L,M=N;window.asyncRequires=R.a,window.___emitter=b.a,window.___loader=g.default,g.default.addPagesArray([window.page]),g.default.addDataPaths(((L={})[window.page.jsonName]=window.dataPath,L)),g.default.addProdRequires(R.a),Object(g.setApiRunnerForLoader)(i.apiRunner),window.__navigatingToLink=!1,window.___loader=g.default,window.___push=function(e){return S(e,{replace:!1})},window.___replace=function(e){return S(e,{replace:!0})},window.___navigate=function(e,t){return S(e,t)},k(window.location.pathname),Object(i.apiRunnerAsync)("onClientEntry").then(function(){Object(i.apiRunner)("registerServiceWorker").length>0&&n(144);var e=function(e){function t(){return e.apply(this,arguments)||this}return o()(t,e),t.prototype.render=function(){var e=this,t=this.props.location;return l.a.createElement(M,{location:t},function(t){var n=t.pageResources,r=t.location;return l.a.createElement(C,{location:r},l.a.createElement(d.ScrollContext,{location:r,shouldUpdateScroll:E},l.a.createElement(P.a,Object.assign({},e.props,{location:r,pageResources:n},n.json))))})},t}(l.a.Component),t=window,r=t.page,u=t.location;!r||""+r.path===u.pathname||r.matchPath&&Object(f.match)(""+r.matchPath,u.pathname)||"/404.html"===r.path||r.path.match(/^\/404\/?$/)||r.path.match(/^\/offline-plugin-app-shell-fallback\/?$/)||Object(s.navigate)(""+r.path+u.search+u.hash,{replace:!0}),g.default.getResourcesForPathname(u.pathname).then(function(){var t=function(){return Object(a.createElement)(s.Router,{basepath:""},Object(a.createElement)(e,{path:"/*"}))},n=Object(i.apiRunner)("wrapRootElement",{element:l.a.createElement(t,null)},l.a.createElement(t,null),function(e){return{element:e.result}}).pop(),r=function(){return n},o=Object(i.apiRunner)("replaceHydrateFunction",void 0,c.a.hydrate)[0];h()(function(){o(l.a.createElement(r,null),"undefined"!=typeof window?document.getElementById("___gatsby"):void 0,function(){Object(g.postInitialRenderWork)(),Object(i.apiRunner)("onInitialClientRender")})})})})}],[[145,16]]]); //# sourceMappingURL=app-e46b8d19c52f70286cf5.js.map
#!/usr/bin/env python2 # Copyright (c) 2017 The Mlgb developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # Test Proton interface (provides AMQP 1.0 messaging support). # # Requirements: # Python library for Qpid Proton: # https://pypi.python.org/pypi/python-qpid-proton # To install: # pip install python-qpid-proton # from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * from proton.handlers import MessagingHandler from proton.reactor import Container import binascii import struct import threading try: import http.client as httplib except ImportError: import httplib try: import urllib.parse as urlparse except ImportError: import urlparse class Server(MessagingHandler): def __init__(self, url, limit): super(Server, self).__init__() self.url = url self.counter = limit self.blockhashes = [] self.txids = [] self.blockseq = -1 self.txidseq = -1 def on_start(self, event): print "Proton listening on:", self.url self.container = event.container self.acceptor = event.container.listen(self.url) def on_message(self, event): m = event.message hash = bytes_to_hex_str(m.body) sequence = m.properties['x-opt-sequence-number'] if m.subject == "hashtx": self.txids.append(hash) # Test that sequence id is incrementing assert(sequence == 1 + self.txidseq) self.txidseq = sequence elif m.subject == "hashblock": self.blockhashes.append(hash) # Test that sequence id is incrementing assert(sequence == 1 + self.blockseq) self.blockseq = sequence self.counter = self.counter - 1 if self.counter == 0: self.container.stop() class ProtonTest (BitcoinTestFramework): port = 25672 numblocks = 10 # must be even, as two nodes generate equal number assert(numblocks % 2 == 0) def setup_nodes(self): # Launch proton server in background thread # It terminates after receiving numblocks * 2 messages (one for coinbase, one for block) self.server = Server("127.0.0.1:%i" % self.port, self.numblocks * 2) self.container = Container(self.server) self.t1 = threading.Thread(target=self.container.run) self.t1.start() return start_nodes(4, self.options.tmpdir, extra_args=[ ['-experimentalfeatures', '-debug=amqp', '-amqppubhashtx=amqp://127.0.0.1:'+str(self.port), '-amqppubhashblock=amqp://127.0.0.1:'+str(self.port)], [], [], [] ]) def run_test(self): self.sync_all() baseheight = self.nodes[0].getblockcount() # 200 blocks already mined # generate some blocks self.nodes[0].generate(self.numblocks/2) self.sync_all() self.nodes[1].generate(self.numblocks/2) self.sync_all() # wait for server to finish self.t1.join() # sequence numbers have already been checked in the server's message handler # sanity check that we have the right number of block hashes and coinbase txids assert_equal(len(self.server.blockhashes), self.numblocks) assert_equal(len(self.server.txids), self.numblocks) # verify that each block has the correct coinbase txid for i in xrange(0, self.numblocks): height = baseheight + i + 1 blockhash = self.nodes[0].getblockhash(height) assert_equal(blockhash, self.server.blockhashes[i]) resp = self.nodes[0].getblock(blockhash) coinbase = resp["tx"][0] assert_equal(coinbase, self.server.txids[i]) if __name__ == '__main__': ProtonTest().main()
#!/usr/bin/env python3 # Copyright (c) 2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test HD Wallet keypool restore function. Two nodes. Node1 is under test. Node0 is providing transactions and generating blocks. - Start node1, shutdown and backup wallet. - Generate 110 keys (enough to drain the keypool). Store key 90 (in the initial keypool) and key 110 (beyond the initial keypool). Send funds to key 90 and key 110. - Stop node1, clear the datadir, move wallet file back into the datadir and restart node1. - connect node1 to node0. Verify that they sync and node1 receives its funds.""" import shutil from test_framework.test_framework import MANTISCoinTestFramework from test_framework.util import ( assert_equal, connect_nodes, sync_blocks, ) class KeypoolRestoreTest(MANTISCoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 2 self.extra_args = [['-keypool=3'], ['-keypool=100']] def run_test(self): isLegacyWallet = '-legacywallet' in self.nodes[0].extra_args self.tmpdir = self.options.tmpdir self.nodes[0].generate(101) self.log.info("Make backup of wallet") self.stop_node(1) shutil.copyfile(self.tmpdir + "/node1/regtest/wallet.dat", self.tmpdir + "/wallet.bak") self.start_node(1, self.extra_args[1]) connect_nodes(self.nodes[0], 1) self.log.info("Generate keys for wallet") for _ in range(90): addr_oldpool = self.nodes[1].getnewaddress() for _ in range(20): addr_extpool = self.nodes[1].getnewaddress() self.log.info("Send funds to wallet") self.nodes[0].sendtoaddress(addr_oldpool, 10) self.nodes[0].generate(1) self.nodes[0].sendtoaddress(addr_extpool, 5) self.nodes[0].generate(1) sync_blocks(self.nodes) self.log.info("Restart node with wallet backup") self.stop_node(1) shutil.copyfile(self.tmpdir + "/wallet.bak", self.tmpdir + "/node1/regtest/wallet.dat") self.log.info("Verify keypool is restored and balance is correct") self.start_node(1, self.extra_args[1]) connect_nodes(self.nodes[0], 1) self.sync_all() # wallet was not backupped after emptying the key pool. # Legacy wallet can't recover funds in addr_extpool recoveredBalance = 10 if isLegacyWallet else 15 assert_equal(self.nodes[1].getbalance(), recoveredBalance) assert_equal(self.nodes[1].listtransactions()[0]['category'], "receive") # Check that we have marked all keys up to the used keypool key as used if not isLegacyWallet: assert_equal(self.nodes[1].getaddressinfo(self.nodes[1].getnewaddress())['hdkeypath'], "m/44'/119'/0'/0'/110'") if __name__ == '__main__': KeypoolRestoreTest().main()
module.exports = { build: { src: 'src/index.js', dest: 'narrative.js' } };
import React from 'react'; const VideoPlayer = ({selectedVideo}) => { if (!selectedVideo) { return <div className="">Loading...</div>; } // if no video, then we return the text. const videoId = selectedVideo.id.videoId; const url = `https://www.youtube.com/embed/${videoId}`; // syntax sugar using backticks `` (not astris '') // function components acess props as an arguement. compared to class components access props as ```this.props``` return ( <div className="video-detail col-md-8"> <div className="embed-responsive embed-responsive-16by9"> <iframe className="embed-responsive-item" src={url}></iframe> </div> <div className="details"> <div>{selectedVideo.snippet.title}</div> <div>{selectedVideo.snippet.description}</div> </div> </div> ); }; export default VideoPlayer;
kw = ['Game of Thrones', 'GOT', 'Lannister','Jon Snow', 'Sansa Stark','Arya Stark','Cersei Lannister','Dies','death','dies','killed','Bran Stark','Petyr Baelish','Ygritte','Sandor Clegane','Khal Drogo','Margaery Tyrell','Iron throne','stark','killed', 'GOT Spoiler', 'Game of thrones spoiler','spoiler','Spoiler', 'GAME OF THRONES', 'death', 'DEAD', 'Dead', 'Died', 'DEATH', 'Death', 'DIED','winter is comming','Targaryen','Tyrell','Baratheon','Tully','Bolton'] tags = "SPANEMBIULOLI"; total = 0; for(var ii = 0; ii < kw.length; ii++) { o = $(`:contains(${kw[ii]}):not(:has(:contains(${kw[ii]})))`) for(var i = 0; i < o.length; i++) { if (!o[i].parentNode || o[i].parentNode.nodeName === "BODY") { continue; } hideSpoiler(o[i]); total++; } } if(total >= 10) { headings = document.querySelectorAll("h1, h2, h3, h4, h5, h6"); for(var i = 0; i < headings.length; i++) hideNode(headings[i]); } function hideSpoiler(node) { ancestor = node.parentNode; if(ancestor != null) { if (ancestor.parentNode != null && ancestor.tagName != 'BODY') ancestor = ancestor.parentNode; imgs = ancestor.getElementsByTagName('img'); for(var i = 0; i < imgs.length; i++) imgs[i].style.webkitFilter = "blur(50px)" lists = ancestor.getElementsByTagName('li'); for(var i = 0; i < lists.length; i++) hideNode(lists[i]); } if (node == null || node.parentNode == null) return; all_child = node.parentNode.children; for(var i = 0; i < all_child; i++) { var type = all_child[i].tagName; if (tags.match(type) != null) hideNode(all_child[i]); } hideNode(node); } function hideNode(node) { node.textContent = '[TEXT BLOCKED: Spoiler detected]'; node.style.color = 'red' }
import { HTTP } from '../http-commons' import store from '@/store' export default class IndexApi { async getAllEndPoints () { const { data } = await HTTP.get('http://localhost:8181/wms-api/v1/') store.commit('endpoints/SET_ENDPOINT', data) } }
# Purpose: This is the main script of CB2 Bot, a work-in-progress Twitch chat # bot that performs basic notification and chat interaction tasks # including greeting chat users, thanking followers, cheerers, and # subscribers, and storing and executing custom text commands. # Author: Kyle Lander # Date: 2021-11 # TODO: Add sound alert functionality to notify the streamer of when someone # in the chat says hi to them. # TODO: Add functionality for other EventSub topics (currently only follow, # subscribe, and cheer are supported) import json import os import requests import schedule import socket import sqlite3 import threading import logging import urllib.parse as urlparse from dotenv import load_dotenv from helpers import (authorize, get_app_access_token, get_user_data, nuke_eventsubs, subscribe_to_eventsub, verify_signature) from http.server import BaseHTTPRequestHandler, HTTPServer from os.path import join, dirname from time import time from urllib.parse import parse_qs # Load environment variables. dotenv_path = join(dirname(__file__), 'config.env') load_dotenv(dotenv_path) # Get the location of the script for creating paths. __location__ = os.path.realpath( os.path.join(os.getcwd(), os.path.dirname(__file__))) # Define some constants that are needed to connect to the servers. BOT_USERNAME = os.environ['BOT_USERNAME'] CALLBACK = os.environ['CALLBACK'] CHANNEL = f'#{os.environ["CHANNEL"]}' CLIENT_ID = os.environ['CLIENT_ID'] AUTH = {'Accept': 'application/vnd.twitchtv.v5+json', 'Client-ID': CLIENT_ID} COOLDOWN = os.environ['COOLDOWN'] DB = os.path.join(__location__, os.environ['DB']) ENDPOINT = 'https://api.twitch.tv/helix/eventsub/subscriptions' HTTP_PORT = int(os.environ['HTTP_PORT']) IRC_CONNECTION_DATA = ('irc.chat.twitch.tv', 6667) OAUTH = f'oauth:{os.environ["OAUTH"]}' SECRET = os.environ['SECRET'] APP_ACCESS_TOKEN = get_app_access_token(CLIENT_ID, SECRET) # This list contains all users that will be able to execute certain chat # commands that should only be performed by moderators. Names will be # checked against this list before executing such commands. MODS = os.environ['MODS'] # Define a list of phrases to respond to as a greeting. GREETINGS = ['hi', 'hello', 'heyo', 'yo', 'hey', 'salut', 'suh'] # Define a list of users that have said one of the 'hello' variations already. seen_users = [] # Defina a dictionary that will store instances of the CooldownHandler class. # Every command will get its own instance of the class. cooldown_handlers = {} # Create a socket object and make a connection to the chat ircserver. ircserver = socket.socket() ircserver.connect(IRC_CONNECTION_DATA) # Tell the ircserver who we are. ircserver.send(bytes('PASS {}\r\n'.format(OAUTH), 'UTF-8')) ircserver.send(bytes('NICK {}\r\n'.format(BOT_USERNAME), 'UTF-8')) # This list will hold all previously seen Twitch-Eventsub-Message-Id values. seen_message_ids = [] # Define a class that will keep track of when a command was last used and # determine if it can be used again by non-mod users. class CooldownHandler: ''' A class to keep track of cooldowns for IRC chat commands. ... Attributes ---------- command : str name of the command cooldown : int length of cooldown in seconds last_used : float time the command was last used Methods ------- is_useable(): Checks if more time than the cooldown length has passed since the command was last used. Returns a boolean: True if the cooldown has passed, False if the command is still on cooldown. ''' def __init__(self, command: str, cooldown: int) -> None: ''' Constructs the attriutes for the CooldownHandler object. Parameters ---------- command : str name of the command cooldown : int length of cooldown in seconds ''' self.command = command self.cooldown = int(cooldown) self.last_used = time() def is_useable(self) -> bool: if time() > self.cooldown + self.last_used: self.last_used = time() return True return False # Set up the request handler that will listen for requests from Twitch. # Modified from: https://gist.github.com/mdonkers/63e115cc0c79b4f6b8b3a6b797e485c7 class RequestHandler(BaseHTTPRequestHandler): ''' A class to handle HTTP requests from Twitch, subclassed from BaseHTTPRequestHandler. ... Methods ---------- do_GET(): Handles all GET requests from Twitch. This is currently only used for handling the OIDC Authorization Code Flow process of authorizing the bot for EventSub topics like 'subscribe' that require elevated permission from the streamer. do_POST(): Handles all POST requests from Twitch. This is currently used for responding to webhook verifications and receiving EventSub notifications. ''' def _set_response(self): self.send_response(200) self.send_header('Content-type', 'text/html') self.end_headers() def do_GET(self): parsed = urlparse.urlparse(self.path).query print(f'PARSED: {parsed}\n') # Handle GET requests from Twitch try: code = parse_qs(parsed)['code'][0] state = parse_qs(parsed)['state'][0] print(f'STATE: {state}\n') print(f'LOCAL STATE: {os.environ["STATE"]}\n') print(f'CODE: {code}\n') if state == os.environ['STATE']: request_dict = { 'client_id': CLIENT_ID, 'client_secret': SECRET, 'code': code, 'grant_type': 'authorization_code', 'redirect_uri': CALLBACK } response = requests.post('https://id.twitch.tv/oauth2/token', request_dict) print(f'RESPONSE: {response}\n') self._set_response() # Return 403 if the states don't match. else: self.send_response(403) self.end_headers() except: pass logging.info('GET request,\nPath: %s\nHeaders:\n%s\n', str(self.path), str(self.headers)) self._set_response() self.wfile.write('GET request for {}'.format(self.path).encode('utf-8')) def do_POST(self): content_length = int(self.headers['Content-Length']) # <--- Gets the size of data post_data = self.rfile.read(content_length) # <--- Gets the data itself logging.info('POST request,\nPath: %s\nHeaders:\n%s\n\nBody:\n%s\n', str(self.path), str(self.headers), post_data.decode('utf-8')) # This section will handle POST requests that come from Twitch. if self.headers['Twitch-Eventsub-Message-Id']: message_id = self.headers['Twitch-Eventsub-Message-Id'] eventsub_timestamp = self.headers['Twitch-Eventsub-Message-Timestamp'] eventsub_signature = self.headers['Twitch-Eventsub-Message-Signature'] # Return a 200 status if the message ID has been seen before. if message_id in seen_message_ids: self._set_response() print(f'Previously seen message ID: {message_id}, returning 200.\n') # Verify that the request came from Twitch. elif verify_signature(SECRET, message_id, eventsub_timestamp, post_data, eventsub_signature) == True: seen_message_ids.append(message_id) payload = json.loads(post_data) # If the message is a webhook verification, return the challenge. if self.headers['Twitch-Eventsub-Message-Type'] == 'webhook_callback_verification': eventsub_challenge = payload['challenge'] challenge_bytes = eventsub_challenge.encode() self.send_response(200) self.send_header('Content-Length', str(len(challenge_bytes))) self.end_headers() self.wfile.write(challenge_bytes) # If the message is a notification, take the appropriate action. elif self.headers['Twitch-Eventsub-Message-Type'] == 'notification': subscription_type = self.headers['Twitch-Eventsub-Subscription-Type'] user_name = payload['event']['user_name'] # If someone followed, thank them in chat. if subscription_type == 'channel.follow': sendmsg(f'Thank you for following {user_name}!') self._set_response() # If someone subscribed, thank them in chat. elif subscription_type == 'channel.subscribe': sub_tier = int(int(payload['event']['tier']) / 1000) sendmsg(f'{user_name} subscribed at tier {sub_tier}! Thank you for the support!') self._set_response() # If someone cheered, thank them in chat. elif subscription_type == 'channel.cheer': bits = payload['event']['bits'] if payload['event']['is_anonymous'] == False: sendmsg(f'{user_name} cheered {bits} bits! Thank you for the support!') else: sendmsg(f'Anonymous cheered {bits} bits! Thank you for the support!') self._set_response() # More actions for other notification types could be added here # Return 403 if the signature verification failed. else: self.send_response(403) self.end_headers() else: self._set_response() self.wfile.write('POST request for {}'.format(self.path).encode('utf-8')) # This function will define and run an HTTP server with the handler above. def run(server_class=HTTPServer, handler_class=RequestHandler, port=HTTP_PORT): logging.basicConfig(level=logging.INFO) server_address = ('', port) httpd = server_class(server_address, handler_class) logging.info('Starting httpd...\n') try: httpd.serve_forever() except KeyboardInterrupt: pass httpd.server_close() logging.info('Stopping httpd...\n') # Assign a thread to the run() function above, this lets the request handler # run forever in a backgrount thread while the rest of the program continues on. thread = threading.Thread(target=run) thread.daemon = True # Define a function that adds a command to the database if it doesn't already exist. def add_command(message: str, cursor: sqlite3.Cursor): # Split the message into a list. splitmsg = message.split(' ') # Check if this is to be a mod-only command (the 'mod' flag will be provided after the # command name, which will be at index 2). if splitmsg[1].lower() == 'mod': # Set the mod boolean to True if the mod flag is present. mod = 1 # Get the name of the new command, its index depends on whether the 'mod' flag # is present. command = splitmsg[2].lower() # Assemble the command contents into a string, starting index depends on whether # the 'mod' flag is present. content = ' '.join(splitmsg[3:]) else: mod = 0 command = splitmsg[1].lower() content = ' '.join(splitmsg[2:]) # Check if the command already exists. cursor.execute('SELECT command FROM commands WHERE command = :command', {'command': command}) # Insert the new command if it doesn't already exist. if cursor.fetchone() == None: cursor.execute('INSERT INTO commands (command, content, mod) VALUES (?, ?, ?)', (command, content, mod)) return True return False # Schedule a job to clear out the seen_users list every day at midnight. def clear_seen_users(): seen_users.clear() sendmsg('/me Seen users list cleared!') # Define a function that handles commands stored in the database. def command_handler(command: str, user: str, cursor: sqlite3.Cursor) -> str: # Try/except in case of sqlite3 error on query executon. try: cursor.execute('SELECT command, content, mod FROM commands WHERE command = :command', {'command': command}) row = cursor.fetchone() # Check if nothing was returned, meaning no command was found. if row == None: return None # Return any command if the user is a mod. if user in MODS: return row[1] # Non-mod commands are usable by anyone, but are subject to cooldowns. if row[2] == 0: # Check if a handler for the command already exists, and then # check to see if the command is on cooldown. if command in cooldown_handlers: cmd = cooldown_handlers[command] if cmd.is_useable(): return row[1] print(f'Command {command} on cooldown.\n') return None # Create a CooldownHander for the command, # then return the command since this will be its first use. cooldown_handlers[f'{command}'] = CooldownHandler(command, COOLDOWN) return row[1] print(f'command_handler: user {user} does not have permission to use !{command}.\n') # Return None because the command does exist, the user just did not # have permission to use it. return None except sqlite3.Error: print(f'SQLite3 Error raised, returning None.\n') return None # Define a function that takes in text that is decorated with a leading "!", indicating that is # a command, and execute the appropriate command if it exists. def command(message: str, name: str, cursor: sqlite3.Cursor, dbconnection: sqlite3.Connection): # Remove the leading !, save this in case it's a new command that needs added to the DB. message = message[1:] # Split the message on spaces and get just the first part (the command name). cmd = message.split(' ')[0] print(f'Command {cmd} received, issued by user {name}\n') # This handles execution of all commands that are stored in the database. # Used the walrus operator for simplicity. if dbcmd := command_handler(cmd, name, cursor): # Command did not exist or user did not have permission # to execute the command. if dbcmd == None: pass # Execute the command if one was returned. else: sendmsg(dbcmd) # This block handles all the hardcoded commands. # These commands are mod-only and are necessary for the # core functionality of the bot. Commands have been arranged # according to estimated frequency of use. # Shoutout command for referring viewers to other streamers. elif cmd == 'so' and name in MODS: shoutout = message.split(' ')[1] sendmsg(f'Check out {shoutout} at https://twitch.tv/{shoutout} !') # Adds a new command to the database. elif cmd == 'addcom' and name in MODS: if add_command(message, cursor): dbconnection.commit() else: print(f'Failed to add command {cmd}, it may already exist.\n') # Deletes a command stored in the database. elif cmd == 'delcom' and name in MODS: delete_command(message, cursor) dbconnection.commit() # Subscribes the bot to the channel's 'follow' EventSub topic. elif cmd == 'esfollow' and name in MODS: print('Subscribing to EventSub Follow.\n') # Accessing the env variable here because the CHANNEL variable has a leading '#'. subscribe_to_eventsub(APP_ACCESS_TOKEN, CALLBACK, CLIENT_ID, SECRET, get_user_id(os.environ["CHANNEL"]), 'follow') # Subscribes the bot to the channel's 'subscribe' and 'cheer' EventSub topics. elif cmd == 'essub' and name in MODS: print('Subscribing to EventSub Subscribe.\n') subscribe_to_eventsub(APP_ACCESS_TOKEN, CALLBACK, CLIENT_ID, SECRET, get_user_id(os.environ["CHANNEL"]), 'subscribe') print('Subscribing to EventSub Cheer.\n') subscribe_to_eventsub(APP_ACCESS_TOKEN, CALLBACK, CLIENT_ID, SECRET, get_user_id(os.environ["CHANNEL"]), 'cheer') # Unsubscribes the bot from all EventSub topics regardless of channel. elif cmd == 'nukeeventsubs' and name in MODS: print('Deleting all EventSub subscriptions.\n') nuke_eventsubs(APP_ACCESS_TOKEN, CLIENT_ID) # Disconnects the bot from Twitch chat, closes the database connection, # and then performs the rest of the shut down tasks. elif cmd == 'disconnect' and name in MODS: dbconnection.close() shut_down() # Initiates the OIDC Authorization Code Flow process. elif cmd == 'auth' and name in MODS: os.environ['STATE'] = authorize(CALLBACK, CLIENT_ID) else: print(f'Command {cmd} is not a registered command, or {name} does ' 'not have permission to use it, ignoring.\n') # Define a function that deletes a command if it exists. def delete_command(message: str, cursor: sqlite3.Cursor): # Split the message into a list. splitmsg = message.split(' ') # Get just the command name. command = splitmsg[1] cursor.execute('DELETE FROM commands WHERE command = :command', {'command': command}) print(f'Command {command} deleted.\n') # Define a function to get a user ID specifically. def get_user_id(user: str, auth: dict=AUTH) -> str: data = get_user_data(user, auth) user_id = '' for i in data['users']: user_id = str(i['_id']) return user_id # Define a function to join a chat channel. def joinchan(chan: str=CHANNEL): ircserver.send(bytes('JOIN {}\r\n'.format(chan), 'UTF-8')) sendmsg('/me has joined the chat.') # Define a function to post messages in chat. def sendmsg(msg: str, target: str=CHANNEL): ircserver.send(bytes('PRIVMSG {} :{}\r\n'.format(target, msg), 'UTF-8')) # Define a function that shuts down the bot when called. def shut_down(): print('Cancelling EventSubs and shutting down...\n') nuke_eventsubs(APP_ACCESS_TOKEN, CLIENT_ID) sendmsg('/me goes to sleep ResidentSleeper') thread.join() exit(0) # Define the main function. def main(): # Start the HTTP request handler. thread.start() # Connect to the bot's database and create a cursor. dbconnection = sqlite3.connect(DB) dbcursor = dbconnection.cursor() # Join the IRC channel (chat). joinchan() # Schedule the seen users list-clearing task. schedule.every().day.at('00:00').do(clear_seen_users) while True: schedule.run_pending() ircmsg = ircserver.recv(2048).decode('UTF-8') ircmsg = ircmsg.strip('nr') cmd = '' name = '' # Check the type of message received. if ircmsg.find('PRIVMSG') != -1: # strip() removes \n characters name = ircmsg.split('!', 1)[0][1:].strip() message = ircmsg.split('PRIVMSG', 1)[1].split(':', 1)[1].strip() print(f'Message: {message}\n') # If message starts with a !, indicating a bot command. if message[0] == '!': command(message, name, dbcursor, dbconnection) # See if the user is saying hi. elif any(word in message.lower() for word in GREETINGS): # Say hi if the user has not been seen lately. if name not in seen_users: sendmsg('Hi {} :)'.format(name)) seen_users.append(name) # Respond to ircserver pings to maintain connection. elif ircmsg.find('PING') != -1: ircserver.send(bytes('PONG :tmi.twitch.tv\r\n', 'UTF-8')) print('Ping response sent.') if __name__ == '__main__': try: # Start the chat bot. main() except KeyboardInterrupt: shut_down()
/** * Simple object check. * @param item * @returns {boolean} */ export function isObject(item) { return (item && typeof item === 'object' && !Array.isArray(item)); } /** * Deep merge two objects. * @param target * @param ...sources */ export function mergeDeep(target, ...sources) { if (!sources.length) return target; const source = sources.shift(); if (isObject(target) && isObject(source)) { for (const key in source) { if (isObject(source[key])) { if (!target[key]) Object.assign(target, { [key]: {} }); mergeDeep(target[key], source[key]); } else { Object.assign(target, { [key]: source[key] }); } } } return mergeDeep(target, ...sources); }
import cv2 import json import logging import os import pathlib import pytesseract import requests import time from PIL import Image from selenium import webdriver from selenium.webdriver.common.keys import Keys from skimage.metrics import structural_similarity as ssim ##Begin Config## logging.basicConfig(filename='log.log', filemode='a', format='%(asctime)s %(levelname)s %(funcName)s %(lineno)d %(message)s', datefmt="%Y-%m-%dT%H:%M:%S%z", level=logging.INFO) ##End Config## def find_subdirs(path): subdirs = [x for x in os.listdir( path) if os.path.isdir(os.path.join(path, x))] subdirs.sort() last_2_subdirs = subdirs[-2:] return last_2_subdirs def check_response_code(url): response = requests.get(url) if response.status_code == 200: logging.debug(f"{url} is up and running") return True else: logging.error(f"{url} is down with status code {response.status_code}") return False def take_screenshots(dir_path, sites): driver = webdriver.Chrome(executable_path=f"{dir_path}/chromedriver") new_dir = f"{dir_path}/pictures/{round((time.time()))}" pathlib.Path(new_dir).mkdir(parents=True, exist_ok=False) for site in sites: driver.get(site) save_path = f"{new_dir}/{site.replace('/', '_')}.png" driver.save_screenshot(save_path) logging.debug(f"Saved screenshot to {save_path}") driver.close() def compare_images(img_path_1, img_path_2): img1 = cv2.imread(img_path_1) img2 = cv2.imread(img_path_2) img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY) img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY) s = ssim(img1, img2) return s def compare_images_in_dirs(dir_path_1, dir_path_2): images_pass = True for img_path_1 in os.listdir(dir_path_1): img_path_2 = os.path.join(dir_path_2, img_path_1) img_path_1 = os.path.join(dir_path_1, img_path_1) if os.path.isfile(img_path_1) and os.path.isfile(img_path_2): s = compare_images(img_path_1, img_path_2) if s < 0.9: logging.error(f"{img_path_1} and {img_path_2} are different") print(f"{img_path_1} and {img_path_2} are different") images_pass = False else: logging.debug(f"{img_path_1} and {img_path_2} are the same") else: logging.warning(f"{img_path_1} or {img_path_2} does not exist") return images_pass def extract_text_from_image(image_path): result = "" img = Image.open(image_path) result = pytesseract.image_to_string(img) result = result.lower() result = result.strip() return result def check_images_for_error_words(dir_path, error_words): error_word_pass = True for img_path in os.listdir(dir_path): img_path = os.path.join(dir_path, img_path) if os.path.isfile(img_path): result = extract_text_from_image(img_path) if any(error_word in result for error_word in error_words): logging.error(f"{img_path} contains error word") print(f"{img_path} contains error word") error_word_pass = False else: logging.debug(f"{img_path} does not contain error word") else: logging.warning(f"{img_path} does not exist") return error_word_pass if __name__ == '__main__': dir_path = os.path.dirname(os.path.realpath(__file__)) with open('config.json') as json_file: data = json.load(json_file) sites = data["sites"] error_words = data["error_words"] http_status = True for site in sites: if not check_response_code(site): http_status = False take_screenshots(dir_path, sites) dirs = find_subdirs(dir_path+"/pictures") most_recent_dir = dirs[-1] most_recent_dir_path = f"{dir_path}/pictures/{most_recent_dir}" second_most_recent_dir = dirs[-2] second_most_recent_dir_path = f"{dir_path}/pictures/{second_most_recent_dir}" image_status = compare_images_in_dirs( most_recent_dir_path, second_most_recent_dir_path) error_word_status = check_images_for_error_words(most_recent_dir_path, error_words) if http_status & image_status & error_word_status: print("All sites passed the status check") else: print("One or more sites failed the status check")
"use strict"; var webpack = require("webpack"); var HtmlWebpackPlugin = require("html-webpack-plugin"); var ExtractTextPlugin = require("extract-text-webpack-plugin"); var path = require("path"); var srcPath = path.join(__dirname, "src"); function createConfig(isProduction) { var pathName = isProduction ? "deploy" : "dist"; var babelPresets = [ "es2015", // For using es2015 syntax "react", // For using jsx syntax "stage-0" // For using new features of es2015 ]; if (!isProduction) { babelPresets.push("react-hmre"); // For hot reloading } var defaultConfig = { entry: ['whatwg-fetch', path.join(srcPath, "app.js"), path.join(srcPath, "assets/css/style.css")], output: { path: __dirname + "/" + pathName, filename: "[name].js", chunkFilename: "[id].js" }, resolve: { root: srcPath, extensions: ["", ".js"], modulesDirectories: ["node_modules"] }, // Resolve allows us to define the places where the code we use resides, namely our src and node_modules folders. Setting up the moduleDirectories is a nifty trick to help Webpack locate your modules using imports no matter where you are in the code. For example import components/example instead of something like ../../components/example. extensions lists file types that can have optional extensions in your imports; instead of import components/example.js you can then write import components/example. module: { loaders: [ {test: /\.css$/, loader: ExtractTextPlugin.extract("style-loader", "css-loader")}, { test: /\.(js|.jsx)$/, exclude: /node_modules/, loader: "babel-loader", query: {presets: babelPresets} }, {test: /\.json$/, loader: "json"}, { test: /\.(jpe?g|png|gif)$/i, loaders: ["file?hash=sha512&digest=hex&name=[hash].[ext]", "image-webpack?bypassOnDebug&optimizationLevel=7&interlaced=false"] }, { test: /\.woff(2)?(\?v=[0-9]\.[0-9]\.[0-9])?$/, loader: "url-loader?limit=10000&mimetype=application/font-woff" }, {test: /\.svg(\?v=\d+\.\d+\.\d+)?$/, loader: "url?limit=10000&mimetype=image/svg+xml"}, {test: /\.eot(\?v=\d+\.\d+\.\d+)?$/, loader: "file"}, {test: /\.ttf(\?v=\d+\.\d+\.\d+)?$/, loader: "url?limit=10000&mimetype=application/octet-stream"}, {test: /\.(otf)(\?[a-z0-9]+)?$/, loader: "file-loader?name=fonts/[name].[ext]"}, ] }, plugins: [ new webpack.DefinePlugin({RUN_ENVIRONMENT: "'" + env + "'"}), new ExtractTextPlugin("[name].css"), new HtmlWebpackPlugin({template: "./src/index.html"}) ] }; if (!isProduction) { console.log("not production"); defaultConfig.devtool = "cheap-module-eval-source-map"; defaultConfig.devServer = {hot: true, port: 8000}; defaultConfig.plugins.push(new webpack.HotModuleReplacementPlugin()); } return defaultConfig; } var env = process.env.NODE_ENV || "local"; module.exports = createConfig(env === "production");
class Stone { constructor(x, y,r) { let options = { isStatic:false, }; this.body = Bodies.circle(x, y, r, options); this.image = loadImage("assets/stone.png"); this.r =r; World.add(world, this.body); } display() { let pos = this.body.position; push(); noStroke(); fill(148,127,146); imageMode(CENTER); image(this.image,pos.x,pos.y, this.r*2,this.r*2); pop(); } }
const path = require('path'); const { ModuleFederationPlugin } = require('webpack').container; const { CleanWebpackPlugin } = require('clean-webpack-plugin'); const { dependencies } = require('./package.json'); module.exports = { entry: './src/index.js', output: { filename: 'index.js', path: path.resolve(__dirname, 'dist'), libraryTarget: 'commonjs', }, mode: 'production', target: 'node', node: false, externals: ['fs', 'path', 'fsevents'], module: { rules: [ { test: /\.m?js$/, exclude: /node_modules/, use: { loader: 'babel-loader', options: { presets: [ [ '@babel/preset-env', { targets: { node: '12', }, }, ], ], }, }, }, ], }, plugins: [ new CleanWebpackPlugin(), new ModuleFederationPlugin({ name: 'build', library: { type: 'commonjs' }, filename: 'remoteEntry.js', exposes: { './build': './src/build.js', }, shared: dependencies, }), ], };
# Generated by the protocol buffer compiler. DO NOT EDIT! # source: mobile_app_config.proto from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database from google.protobuf import descriptor_pb2 # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() import multi_list_pb2 as multi__list__pb2 import road_feature_pb2 as road__feature__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='mobile_app_config.proto', package='Proto.Config', syntax='proto3', serialized_pb=b'\n\x17mobile_app_config.proto\x12\x0cProto.Config\x1a\x10multi_list.proto\x1a\x12road_feature.proto\"M\n\x13trial_config_object\x12\x36\n\x0e\x61\x63\x63\x65pted_users\x18\x01 \x01(\x0b\x32\x1e.Proto.Config.multilist_object\"\x94\x01\n\x11url_config_object\x12\x0b\n\x03url\x18\x01 \x01(\t\x12\x39\n\x04type\x18\x02 \x01(\x0e\x32+.Proto.Config.url_config_object.server_type\x12\x13\n\x0b\x63redentials\x18\x03 \x01(\t\"\"\n\x0bserver_type\x12\t\n\x05HTTPS\x10\x00\x12\x08\n\x04HTTP\x10\x01\"{\n\x0elogging_object\x12\x0f\n\x07\x65nabled\x18\x01 \x01(\x08\x12\x35\n\x05level\x18\x02 \x01(\x0e\x32&.Proto.Config.logging_object.log_level\"!\n\tlog_level\x12\t\n\x05\x44\x45\x42UG\x10\x00\x12\t\n\x05\x45RROR\x10\x01\"D\n\x13notification_object\x12\x15\n\raudio_enabled\x18\x01 \x01(\x08\x12\x16\n\x0evisual_enabled\x18\x02 \x01(\x08\"\x8f\x02\n\x0etesting_object\x12\x1a\n\x12simulation_enabled\x18\x01 \x01(\x08\x12\x1f\n\x17\x64\x65\x66\x61ult_simulation_file\x18\x02 \x01(\t\x12\x12\n\nlogin_user\x18\x03 \x01(\t\x12\x11\n\tlogin_pwd\x18\x04 \x01(\t\x12\x17\n\x0finitial_balance\x18\x05 \x01(\x05\x12\x18\n\x10selected_vehicle\x18\x06 \x01(\t\x12\x12\n\ntolls_file\x18\x07 \x01(\t\x12\x1a\n\x12tolls_mapping_file\x18\x08 \x01(\t\x12\x36\n\x14\x64\x65\x66\x61ult_road_feature\x18\t \x01(\x0b\x32\x18.Proto.JSON.road_feature\"c\n\x0b\x62\x61se_object\x12\x0f\n\x07\x65nabled\x18\x01 \x01(\x08\x12,\n\x03url\x18\x02 \x01(\x0b\x32\x1f.Proto.Config.url_config_object\x12\x15\n\rbuffering_sec\x18\x03 \x01(\x05\"g\n\x1araw_data_collection_object\x12\x19\n\x11send_interval_sec\x18\x01 \x01(\x05\x12.\n\x0b\x62\x61se_config\x18\x02 \x01(\x0b\x32\x19.Proto.Config.base_object\"\xa9\x01\n\x1cprobe_data_collection_object\x12\x19\n\x11send_interval_sec\x18\x01 \x01(\x05\x12\x1d\n\x15position_interval_sec\x18\x02 \x01(\x05\x12\x1f\n\x17position_interval_meter\x18\x03 \x01(\x05\x12.\n\x0b\x62\x61se_config\x18\x04 \x01(\x0b\x32\x19.Proto.Config.base_object\"\x91\x01\n\x1fvirtual_gantry_detection_object\x12\x1d\n\x15position_interval_sec\x18\x01 \x01(\x05\x12\x1f\n\x17position_interval_meter\x18\x02 \x01(\x05\x12.\n\x0b\x62\x61se_config\x18\x03 \x01(\x0b\x32\x19.Proto.Config.base_object\"f\n\x19service_url_config_object\x12,\n\x03url\x18\x01 \x01(\x0b\x32\x1f.Proto.Config.url_config_object\x12\x1b\n\x13update_interval_sec\x18\x02 \x01(\x05\"\x11\n\x0fsecurity_object\"w\n\x12user_config_object\x12!\n\x19\x65nabled_automatic_tolling\x18\x01 \x01(\x08\x12>\n\x13notification_config\x18\x02 \x01(\x0b\x32!.Proto.Config.notification_object\"\xcc\x05\n\x11mobile_app_config\x12\x34\n\x0elogging_config\x18\x01 \x01(\x0b\x32\x1c.Proto.Config.logging_object\x12\x34\n\x0etesting_config\x18\x02 \x01(\x0b\x32\x1c.Proto.Config.testing_object\x12\x41\n\x0fraw_data_config\x18\x03 \x01(\x0b\x32(.Proto.Config.raw_data_collection_object\x12\x45\n\x11probe_data_config\x18\x04 \x01(\x0b\x32*.Proto.Config.probe_data_collection_object\x12V\n\x1fvirtual_gantry_detection_config\x18\x05 \x01(\x0b\x32-.Proto.Config.virtual_gantry_detection_object\x12/\n\x0c\x65vent_config\x18\x06 \x01(\x0b\x32\x19.Proto.Config.base_object\x12\x35\n\x0buser_config\x18\x07 \x01(\x0b\x32 .Proto.Config.user_config_object\x12J\n\x19\x63onfiguration_service_url\x18\x08 \x01(\x0b\x32\'.Proto.Config.service_url_config_object\x12\x44\n\x13road_feature_config\x18\t \x01(\x0b\x32\'.Proto.Config.service_url_config_object\x12\x36\n\x0fsecurity_config\x18\n \x01(\x0b\x32\x1d.Proto.Config.security_object\x12\x37\n\x0ctrial_config\x18\x0b \x01(\x0b\x32!.Proto.Config.trial_config_objectB5\n net.ktc.miles.model.proto.configB\x0fMobileAppConfigH\x01\x62\x06proto3' , dependencies=[multi__list__pb2.DESCRIPTOR,road__feature__pb2.DESCRIPTOR,]) _sym_db.RegisterFileDescriptor(DESCRIPTOR) _URL_CONFIG_OBJECT_SERVER_TYPE = _descriptor.EnumDescriptor( name='server_type', full_name='Proto.Config.url_config_object.server_type', filename=None, file=DESCRIPTOR, values=[ _descriptor.EnumValueDescriptor( name='HTTPS', index=0, number=0, options=None, type=None), _descriptor.EnumValueDescriptor( name='HTTP', index=1, number=1, options=None, type=None), ], containing_type=None, options=None, serialized_start=273, serialized_end=307, ) _sym_db.RegisterEnumDescriptor(_URL_CONFIG_OBJECT_SERVER_TYPE) _LOGGING_OBJECT_LOG_LEVEL = _descriptor.EnumDescriptor( name='log_level', full_name='Proto.Config.logging_object.log_level', filename=None, file=DESCRIPTOR, values=[ _descriptor.EnumValueDescriptor( name='DEBUG', index=0, number=0, options=None, type=None), _descriptor.EnumValueDescriptor( name='ERROR', index=1, number=1, options=None, type=None), ], containing_type=None, options=None, serialized_start=399, serialized_end=432, ) _sym_db.RegisterEnumDescriptor(_LOGGING_OBJECT_LOG_LEVEL) _TRIAL_CONFIG_OBJECT = _descriptor.Descriptor( name='trial_config_object', full_name='Proto.Config.trial_config_object', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='accepted_users', full_name='Proto.Config.trial_config_object.accepted_users', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=79, serialized_end=156, ) _URL_CONFIG_OBJECT = _descriptor.Descriptor( name='url_config_object', full_name='Proto.Config.url_config_object', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='url', full_name='Proto.Config.url_config_object.url', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='type', full_name='Proto.Config.url_config_object.type', index=1, number=2, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='credentials', full_name='Proto.Config.url_config_object.credentials', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ _URL_CONFIG_OBJECT_SERVER_TYPE, ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=159, serialized_end=307, ) _LOGGING_OBJECT = _descriptor.Descriptor( name='logging_object', full_name='Proto.Config.logging_object', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='enabled', full_name='Proto.Config.logging_object.enabled', index=0, number=1, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='level', full_name='Proto.Config.logging_object.level', index=1, number=2, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ _LOGGING_OBJECT_LOG_LEVEL, ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=309, serialized_end=432, ) _NOTIFICATION_OBJECT = _descriptor.Descriptor( name='notification_object', full_name='Proto.Config.notification_object', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='audio_enabled', full_name='Proto.Config.notification_object.audio_enabled', index=0, number=1, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='visual_enabled', full_name='Proto.Config.notification_object.visual_enabled', index=1, number=2, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=434, serialized_end=502, ) _TESTING_OBJECT = _descriptor.Descriptor( name='testing_object', full_name='Proto.Config.testing_object', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='simulation_enabled', full_name='Proto.Config.testing_object.simulation_enabled', index=0, number=1, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='default_simulation_file', full_name='Proto.Config.testing_object.default_simulation_file', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='login_user', full_name='Proto.Config.testing_object.login_user', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='login_pwd', full_name='Proto.Config.testing_object.login_pwd', index=3, number=4, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='initial_balance', full_name='Proto.Config.testing_object.initial_balance', index=4, number=5, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='selected_vehicle', full_name='Proto.Config.testing_object.selected_vehicle', index=5, number=6, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='tolls_file', full_name='Proto.Config.testing_object.tolls_file', index=6, number=7, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='tolls_mapping_file', full_name='Proto.Config.testing_object.tolls_mapping_file', index=7, number=8, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='default_road_feature', full_name='Proto.Config.testing_object.default_road_feature', index=8, number=9, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=505, serialized_end=776, ) _BASE_OBJECT = _descriptor.Descriptor( name='base_object', full_name='Proto.Config.base_object', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='enabled', full_name='Proto.Config.base_object.enabled', index=0, number=1, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='url', full_name='Proto.Config.base_object.url', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='buffering_sec', full_name='Proto.Config.base_object.buffering_sec', index=2, number=3, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=778, serialized_end=877, ) _RAW_DATA_COLLECTION_OBJECT = _descriptor.Descriptor( name='raw_data_collection_object', full_name='Proto.Config.raw_data_collection_object', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='send_interval_sec', full_name='Proto.Config.raw_data_collection_object.send_interval_sec', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='base_config', full_name='Proto.Config.raw_data_collection_object.base_config', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=879, serialized_end=982, ) _PROBE_DATA_COLLECTION_OBJECT = _descriptor.Descriptor( name='probe_data_collection_object', full_name='Proto.Config.probe_data_collection_object', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='send_interval_sec', full_name='Proto.Config.probe_data_collection_object.send_interval_sec', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='position_interval_sec', full_name='Proto.Config.probe_data_collection_object.position_interval_sec', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='position_interval_meter', full_name='Proto.Config.probe_data_collection_object.position_interval_meter', index=2, number=3, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='base_config', full_name='Proto.Config.probe_data_collection_object.base_config', index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=985, serialized_end=1154, ) _VIRTUAL_GANTRY_DETECTION_OBJECT = _descriptor.Descriptor( name='virtual_gantry_detection_object', full_name='Proto.Config.virtual_gantry_detection_object', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='position_interval_sec', full_name='Proto.Config.virtual_gantry_detection_object.position_interval_sec', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='position_interval_meter', full_name='Proto.Config.virtual_gantry_detection_object.position_interval_meter', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='base_config', full_name='Proto.Config.virtual_gantry_detection_object.base_config', index=2, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1157, serialized_end=1302, ) _SERVICE_URL_CONFIG_OBJECT = _descriptor.Descriptor( name='service_url_config_object', full_name='Proto.Config.service_url_config_object', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='url', full_name='Proto.Config.service_url_config_object.url', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='update_interval_sec', full_name='Proto.Config.service_url_config_object.update_interval_sec', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1304, serialized_end=1406, ) _SECURITY_OBJECT = _descriptor.Descriptor( name='security_object', full_name='Proto.Config.security_object', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1408, serialized_end=1425, ) _USER_CONFIG_OBJECT = _descriptor.Descriptor( name='user_config_object', full_name='Proto.Config.user_config_object', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='enabled_automatic_tolling', full_name='Proto.Config.user_config_object.enabled_automatic_tolling', index=0, number=1, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='notification_config', full_name='Proto.Config.user_config_object.notification_config', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1427, serialized_end=1546, ) _MOBILE_APP_CONFIG = _descriptor.Descriptor( name='mobile_app_config', full_name='Proto.Config.mobile_app_config', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='logging_config', full_name='Proto.Config.mobile_app_config.logging_config', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='testing_config', full_name='Proto.Config.mobile_app_config.testing_config', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='raw_data_config', full_name='Proto.Config.mobile_app_config.raw_data_config', index=2, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='probe_data_config', full_name='Proto.Config.mobile_app_config.probe_data_config', index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='virtual_gantry_detection_config', full_name='Proto.Config.mobile_app_config.virtual_gantry_detection_config', index=4, number=5, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='event_config', full_name='Proto.Config.mobile_app_config.event_config', index=5, number=6, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='user_config', full_name='Proto.Config.mobile_app_config.user_config', index=6, number=7, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='configuration_service_url', full_name='Proto.Config.mobile_app_config.configuration_service_url', index=7, number=8, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='road_feature_config', full_name='Proto.Config.mobile_app_config.road_feature_config', index=8, number=9, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='security_config', full_name='Proto.Config.mobile_app_config.security_config', index=9, number=10, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='trial_config', full_name='Proto.Config.mobile_app_config.trial_config', index=10, number=11, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1549, serialized_end=2265, ) _TRIAL_CONFIG_OBJECT.fields_by_name['accepted_users'].message_type = multi__list__pb2._MULTILIST_OBJECT _URL_CONFIG_OBJECT.fields_by_name['type'].enum_type = _URL_CONFIG_OBJECT_SERVER_TYPE _URL_CONFIG_OBJECT_SERVER_TYPE.containing_type = _URL_CONFIG_OBJECT _LOGGING_OBJECT.fields_by_name['level'].enum_type = _LOGGING_OBJECT_LOG_LEVEL _LOGGING_OBJECT_LOG_LEVEL.containing_type = _LOGGING_OBJECT _TESTING_OBJECT.fields_by_name['default_road_feature'].message_type = road__feature__pb2._ROAD_FEATURE _BASE_OBJECT.fields_by_name['url'].message_type = _URL_CONFIG_OBJECT _RAW_DATA_COLLECTION_OBJECT.fields_by_name['base_config'].message_type = _BASE_OBJECT _PROBE_DATA_COLLECTION_OBJECT.fields_by_name['base_config'].message_type = _BASE_OBJECT _VIRTUAL_GANTRY_DETECTION_OBJECT.fields_by_name['base_config'].message_type = _BASE_OBJECT _SERVICE_URL_CONFIG_OBJECT.fields_by_name['url'].message_type = _URL_CONFIG_OBJECT _USER_CONFIG_OBJECT.fields_by_name['notification_config'].message_type = _NOTIFICATION_OBJECT _MOBILE_APP_CONFIG.fields_by_name['logging_config'].message_type = _LOGGING_OBJECT _MOBILE_APP_CONFIG.fields_by_name['testing_config'].message_type = _TESTING_OBJECT _MOBILE_APP_CONFIG.fields_by_name['raw_data_config'].message_type = _RAW_DATA_COLLECTION_OBJECT _MOBILE_APP_CONFIG.fields_by_name['probe_data_config'].message_type = _PROBE_DATA_COLLECTION_OBJECT _MOBILE_APP_CONFIG.fields_by_name['virtual_gantry_detection_config'].message_type = _VIRTUAL_GANTRY_DETECTION_OBJECT _MOBILE_APP_CONFIG.fields_by_name['event_config'].message_type = _BASE_OBJECT _MOBILE_APP_CONFIG.fields_by_name['user_config'].message_type = _USER_CONFIG_OBJECT _MOBILE_APP_CONFIG.fields_by_name['configuration_service_url'].message_type = _SERVICE_URL_CONFIG_OBJECT _MOBILE_APP_CONFIG.fields_by_name['road_feature_config'].message_type = _SERVICE_URL_CONFIG_OBJECT _MOBILE_APP_CONFIG.fields_by_name['security_config'].message_type = _SECURITY_OBJECT _MOBILE_APP_CONFIG.fields_by_name['trial_config'].message_type = _TRIAL_CONFIG_OBJECT DESCRIPTOR.message_types_by_name['trial_config_object'] = _TRIAL_CONFIG_OBJECT DESCRIPTOR.message_types_by_name['url_config_object'] = _URL_CONFIG_OBJECT DESCRIPTOR.message_types_by_name['logging_object'] = _LOGGING_OBJECT DESCRIPTOR.message_types_by_name['notification_object'] = _NOTIFICATION_OBJECT DESCRIPTOR.message_types_by_name['testing_object'] = _TESTING_OBJECT DESCRIPTOR.message_types_by_name['base_object'] = _BASE_OBJECT DESCRIPTOR.message_types_by_name['raw_data_collection_object'] = _RAW_DATA_COLLECTION_OBJECT DESCRIPTOR.message_types_by_name['probe_data_collection_object'] = _PROBE_DATA_COLLECTION_OBJECT DESCRIPTOR.message_types_by_name['virtual_gantry_detection_object'] = _VIRTUAL_GANTRY_DETECTION_OBJECT DESCRIPTOR.message_types_by_name['service_url_config_object'] = _SERVICE_URL_CONFIG_OBJECT DESCRIPTOR.message_types_by_name['security_object'] = _SECURITY_OBJECT DESCRIPTOR.message_types_by_name['user_config_object'] = _USER_CONFIG_OBJECT DESCRIPTOR.message_types_by_name['mobile_app_config'] = _MOBILE_APP_CONFIG trial_config_object = _reflection.GeneratedProtocolMessageType('trial_config_object', (_message.Message,), dict( DESCRIPTOR = _TRIAL_CONFIG_OBJECT, __module__ = 'mobile_app_config_pb2' # @@protoc_insertion_point(class_scope:Proto.Config.trial_config_object) )) _sym_db.RegisterMessage(trial_config_object) url_config_object = _reflection.GeneratedProtocolMessageType('url_config_object', (_message.Message,), dict( DESCRIPTOR = _URL_CONFIG_OBJECT, __module__ = 'mobile_app_config_pb2' # @@protoc_insertion_point(class_scope:Proto.Config.url_config_object) )) _sym_db.RegisterMessage(url_config_object) logging_object = _reflection.GeneratedProtocolMessageType('logging_object', (_message.Message,), dict( DESCRIPTOR = _LOGGING_OBJECT, __module__ = 'mobile_app_config_pb2' # @@protoc_insertion_point(class_scope:Proto.Config.logging_object) )) _sym_db.RegisterMessage(logging_object) notification_object = _reflection.GeneratedProtocolMessageType('notification_object', (_message.Message,), dict( DESCRIPTOR = _NOTIFICATION_OBJECT, __module__ = 'mobile_app_config_pb2' # @@protoc_insertion_point(class_scope:Proto.Config.notification_object) )) _sym_db.RegisterMessage(notification_object) testing_object = _reflection.GeneratedProtocolMessageType('testing_object', (_message.Message,), dict( DESCRIPTOR = _TESTING_OBJECT, __module__ = 'mobile_app_config_pb2' # @@protoc_insertion_point(class_scope:Proto.Config.testing_object) )) _sym_db.RegisterMessage(testing_object) base_object = _reflection.GeneratedProtocolMessageType('base_object', (_message.Message,), dict( DESCRIPTOR = _BASE_OBJECT, __module__ = 'mobile_app_config_pb2' # @@protoc_insertion_point(class_scope:Proto.Config.base_object) )) _sym_db.RegisterMessage(base_object) raw_data_collection_object = _reflection.GeneratedProtocolMessageType('raw_data_collection_object', (_message.Message,), dict( DESCRIPTOR = _RAW_DATA_COLLECTION_OBJECT, __module__ = 'mobile_app_config_pb2' # @@protoc_insertion_point(class_scope:Proto.Config.raw_data_collection_object) )) _sym_db.RegisterMessage(raw_data_collection_object) probe_data_collection_object = _reflection.GeneratedProtocolMessageType('probe_data_collection_object', (_message.Message,), dict( DESCRIPTOR = _PROBE_DATA_COLLECTION_OBJECT, __module__ = 'mobile_app_config_pb2' # @@protoc_insertion_point(class_scope:Proto.Config.probe_data_collection_object) )) _sym_db.RegisterMessage(probe_data_collection_object) virtual_gantry_detection_object = _reflection.GeneratedProtocolMessageType('virtual_gantry_detection_object', (_message.Message,), dict( DESCRIPTOR = _VIRTUAL_GANTRY_DETECTION_OBJECT, __module__ = 'mobile_app_config_pb2' # @@protoc_insertion_point(class_scope:Proto.Config.virtual_gantry_detection_object) )) _sym_db.RegisterMessage(virtual_gantry_detection_object) service_url_config_object = _reflection.GeneratedProtocolMessageType('service_url_config_object', (_message.Message,), dict( DESCRIPTOR = _SERVICE_URL_CONFIG_OBJECT, __module__ = 'mobile_app_config_pb2' # @@protoc_insertion_point(class_scope:Proto.Config.service_url_config_object) )) _sym_db.RegisterMessage(service_url_config_object) security_object = _reflection.GeneratedProtocolMessageType('security_object', (_message.Message,), dict( DESCRIPTOR = _SECURITY_OBJECT, __module__ = 'mobile_app_config_pb2' # @@protoc_insertion_point(class_scope:Proto.Config.security_object) )) _sym_db.RegisterMessage(security_object) user_config_object = _reflection.GeneratedProtocolMessageType('user_config_object', (_message.Message,), dict( DESCRIPTOR = _USER_CONFIG_OBJECT, __module__ = 'mobile_app_config_pb2' # @@protoc_insertion_point(class_scope:Proto.Config.user_config_object) )) _sym_db.RegisterMessage(user_config_object) mobile_app_config = _reflection.GeneratedProtocolMessageType('mobile_app_config', (_message.Message,), dict( DESCRIPTOR = _MOBILE_APP_CONFIG, __module__ = 'mobile_app_config_pb2' # @@protoc_insertion_point(class_scope:Proto.Config.mobile_app_config) )) _sym_db.RegisterMessage(mobile_app_config) DESCRIPTOR.has_options = True DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), b'\n net.ktc.miles.model.proto.configB\017MobileAppConfigH\001') # @@protoc_insertion_point(module_scope)
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union from ... import _utilities, _tables from . import outputs __all__ = [ 'GetVirtualMachineResult', 'AwaitableGetVirtualMachineResult', 'get_virtual_machine', ] @pulumi.output_type class GetVirtualMachineResult: """ Describes a Virtual Machine. """ def __init__(__self__, additional_capabilities=None, availability_set=None, diagnostics_profile=None, hardware_profile=None, identity=None, instance_view=None, license_type=None, location=None, name=None, network_profile=None, os_profile=None, plan=None, provisioning_state=None, proximity_placement_group=None, resources=None, storage_profile=None, tags=None, type=None, vm_id=None, zones=None): if additional_capabilities and not isinstance(additional_capabilities, dict): raise TypeError("Expected argument 'additional_capabilities' to be a dict") pulumi.set(__self__, "additional_capabilities", additional_capabilities) if availability_set and not isinstance(availability_set, dict): raise TypeError("Expected argument 'availability_set' to be a dict") pulumi.set(__self__, "availability_set", availability_set) if diagnostics_profile and not isinstance(diagnostics_profile, dict): raise TypeError("Expected argument 'diagnostics_profile' to be a dict") pulumi.set(__self__, "diagnostics_profile", diagnostics_profile) if hardware_profile and not isinstance(hardware_profile, dict): raise TypeError("Expected argument 'hardware_profile' to be a dict") pulumi.set(__self__, "hardware_profile", hardware_profile) if identity and not isinstance(identity, dict): raise TypeError("Expected argument 'identity' to be a dict") pulumi.set(__self__, "identity", identity) if instance_view and not isinstance(instance_view, dict): raise TypeError("Expected argument 'instance_view' to be a dict") pulumi.set(__self__, "instance_view", instance_view) if license_type and not isinstance(license_type, str): raise TypeError("Expected argument 'license_type' to be a str") pulumi.set(__self__, "license_type", license_type) if location and not isinstance(location, str): raise TypeError("Expected argument 'location' to be a str") pulumi.set(__self__, "location", location) if name and not isinstance(name, str): raise TypeError("Expected argument 'name' to be a str") pulumi.set(__self__, "name", name) if network_profile and not isinstance(network_profile, dict): raise TypeError("Expected argument 'network_profile' to be a dict") pulumi.set(__self__, "network_profile", network_profile) if os_profile and not isinstance(os_profile, dict): raise TypeError("Expected argument 'os_profile' to be a dict") pulumi.set(__self__, "os_profile", os_profile) if plan and not isinstance(plan, dict): raise TypeError("Expected argument 'plan' to be a dict") pulumi.set(__self__, "plan", plan) if provisioning_state and not isinstance(provisioning_state, str): raise TypeError("Expected argument 'provisioning_state' to be a str") pulumi.set(__self__, "provisioning_state", provisioning_state) if proximity_placement_group and not isinstance(proximity_placement_group, dict): raise TypeError("Expected argument 'proximity_placement_group' to be a dict") pulumi.set(__self__, "proximity_placement_group", proximity_placement_group) if resources and not isinstance(resources, list): raise TypeError("Expected argument 'resources' to be a list") pulumi.set(__self__, "resources", resources) if storage_profile and not isinstance(storage_profile, dict): raise TypeError("Expected argument 'storage_profile' to be a dict") pulumi.set(__self__, "storage_profile", storage_profile) if tags and not isinstance(tags, dict): raise TypeError("Expected argument 'tags' to be a dict") pulumi.set(__self__, "tags", tags) if type and not isinstance(type, str): raise TypeError("Expected argument 'type' to be a str") pulumi.set(__self__, "type", type) if vm_id and not isinstance(vm_id, str): raise TypeError("Expected argument 'vm_id' to be a str") pulumi.set(__self__, "vm_id", vm_id) if zones and not isinstance(zones, list): raise TypeError("Expected argument 'zones' to be a list") pulumi.set(__self__, "zones", zones) @property @pulumi.getter(name="additionalCapabilities") def additional_capabilities(self) -> Optional['outputs.AdditionalCapabilitiesResponse']: """ Specifies additional capabilities enabled or disabled on the virtual machine. """ return pulumi.get(self, "additional_capabilities") @property @pulumi.getter(name="availabilitySet") def availability_set(self) -> Optional['outputs.SubResourceResponse']: """ Specifies information about the availability set that the virtual machine should be assigned to. Virtual machines specified in the same availability set are allocated to different nodes to maximize availability. For more information about availability sets, see [Manage the availability of virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-manage-availability?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json). <br><br> For more information on Azure planned maintenance, see [Planned maintenance for virtual machines in Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-planned-maintenance?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json) <br><br> Currently, a VM can only be added to availability set at creation time. An existing VM cannot be added to an availability set. """ return pulumi.get(self, "availability_set") @property @pulumi.getter(name="diagnosticsProfile") def diagnostics_profile(self) -> Optional['outputs.DiagnosticsProfileResponse']: """ Specifies the boot diagnostic settings state. <br><br>Minimum api-version: 2015-06-15. """ return pulumi.get(self, "diagnostics_profile") @property @pulumi.getter(name="hardwareProfile") def hardware_profile(self) -> Optional['outputs.HardwareProfileResponse']: """ Specifies the hardware settings for the virtual machine. """ return pulumi.get(self, "hardware_profile") @property @pulumi.getter def identity(self) -> Optional['outputs.VirtualMachineIdentityResponse']: """ The identity of the virtual machine, if configured. """ return pulumi.get(self, "identity") @property @pulumi.getter(name="instanceView") def instance_view(self) -> 'outputs.VirtualMachineInstanceViewResponse': """ The virtual machine instance view. """ return pulumi.get(self, "instance_view") @property @pulumi.getter(name="licenseType") def license_type(self) -> Optional[str]: """ Specifies that the image or disk that is being used was licensed on-premises. This element is only used for images that contain the Windows Server operating system. <br><br> Possible values are: <br><br> Windows_Client <br><br> Windows_Server <br><br> If this element is included in a request for an update, the value must match the initial value. This value cannot be updated. <br><br> For more information, see [Azure Hybrid Use Benefit for Windows Server](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-hybrid-use-benefit-licensing?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json) <br><br> Minimum api-version: 2015-06-15 """ return pulumi.get(self, "license_type") @property @pulumi.getter def location(self) -> str: """ Resource location """ return pulumi.get(self, "location") @property @pulumi.getter def name(self) -> str: """ Resource name """ return pulumi.get(self, "name") @property @pulumi.getter(name="networkProfile") def network_profile(self) -> Optional['outputs.NetworkProfileResponse']: """ Specifies the network interfaces of the virtual machine. """ return pulumi.get(self, "network_profile") @property @pulumi.getter(name="osProfile") def os_profile(self) -> Optional['outputs.OSProfileResponse']: """ Specifies the operating system settings for the virtual machine. """ return pulumi.get(self, "os_profile") @property @pulumi.getter def plan(self) -> Optional['outputs.PlanResponse']: """ Specifies information about the marketplace image used to create the virtual machine. This element is only used for marketplace images. Before you can use a marketplace image from an API, you must enable the image for programmatic use. In the Azure portal, find the marketplace image that you want to use and then click **Want to deploy programmatically, Get Started ->**. Enter any required information and then click **Save**. """ return pulumi.get(self, "plan") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> str: """ The provisioning state, which only appears in the response. """ return pulumi.get(self, "provisioning_state") @property @pulumi.getter(name="proximityPlacementGroup") def proximity_placement_group(self) -> Optional['outputs.SubResourceResponse']: """ Specifies information about the proximity placement group that the virtual machine should be assigned to. <br><br>Minimum api-version: 2018-04-01. """ return pulumi.get(self, "proximity_placement_group") @property @pulumi.getter def resources(self) -> Sequence['outputs.VirtualMachineExtensionResponse']: """ The virtual machine child extension resources. """ return pulumi.get(self, "resources") @property @pulumi.getter(name="storageProfile") def storage_profile(self) -> Optional['outputs.StorageProfileResponse']: """ Specifies the storage settings for the virtual machine disks. """ return pulumi.get(self, "storage_profile") @property @pulumi.getter def tags(self) -> Optional[Mapping[str, str]]: """ Resource tags """ return pulumi.get(self, "tags") @property @pulumi.getter def type(self) -> str: """ Resource type """ return pulumi.get(self, "type") @property @pulumi.getter(name="vmId") def vm_id(self) -> str: """ Specifies the VM unique ID which is a 128-bits identifier that is encoded and stored in all Azure IaaS VMs SMBIOS and can be read using platform BIOS commands. """ return pulumi.get(self, "vm_id") @property @pulumi.getter def zones(self) -> Optional[Sequence[str]]: """ The virtual machine zones. """ return pulumi.get(self, "zones") class AwaitableGetVirtualMachineResult(GetVirtualMachineResult): # pylint: disable=using-constant-test def __await__(self): if False: yield self return GetVirtualMachineResult( additional_capabilities=self.additional_capabilities, availability_set=self.availability_set, diagnostics_profile=self.diagnostics_profile, hardware_profile=self.hardware_profile, identity=self.identity, instance_view=self.instance_view, license_type=self.license_type, location=self.location, name=self.name, network_profile=self.network_profile, os_profile=self.os_profile, plan=self.plan, provisioning_state=self.provisioning_state, proximity_placement_group=self.proximity_placement_group, resources=self.resources, storage_profile=self.storage_profile, tags=self.tags, type=self.type, vm_id=self.vm_id, zones=self.zones) def get_virtual_machine(expand: Optional[str] = None, resource_group_name: Optional[str] = None, vm_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVirtualMachineResult: """ Use this data source to access information about an existing resource. :param str expand: The expand expression to apply on the operation. :param str resource_group_name: The name of the resource group. :param str vm_name: The name of the virtual machine. """ __args__ = dict() __args__['expand'] = expand __args__['resourceGroupName'] = resource_group_name __args__['vmName'] = vm_name if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('azure-nextgen:compute/v20180601:getVirtualMachine', __args__, opts=opts, typ=GetVirtualMachineResult).value return AwaitableGetVirtualMachineResult( additional_capabilities=__ret__.additional_capabilities, availability_set=__ret__.availability_set, diagnostics_profile=__ret__.diagnostics_profile, hardware_profile=__ret__.hardware_profile, identity=__ret__.identity, instance_view=__ret__.instance_view, license_type=__ret__.license_type, location=__ret__.location, name=__ret__.name, network_profile=__ret__.network_profile, os_profile=__ret__.os_profile, plan=__ret__.plan, provisioning_state=__ret__.provisioning_state, proximity_placement_group=__ret__.proximity_placement_group, resources=__ret__.resources, storage_profile=__ret__.storage_profile, tags=__ret__.tags, type=__ret__.type, vm_id=__ret__.vm_id, zones=__ret__.zones)
import Ember from 'ember'; export default Ember.Route.extend({ model(params) { return this.get('store').query('job-questionnaire', { workflow_version: params.workflow_version_id }); }, });
"use strict"; var __importDefault = (this && this.__importDefault) || function (mod) { return (mod && mod.__esModule) ? mod : { "default": mod }; }; Object.defineProperty(exports, "__esModule", { value: true }); const cli_1 = __importDefault(require("./cli")); const spacecraft_1 = __importDefault(require("./spacecraft")); const locations_1 = require("./locations"); const spacecraftEmitter_1 = __importDefault(require("./spacecraftEmitter")); const spaceCraftEmitter = new spacecraftEmitter_1.default(process.stdout); const spacecraft = new spacecraft_1.default(locations_1.Earth, locations_1.Moon, spaceCraftEmitter); const cli = new cli_1.default(process.stdin, process.stdout, spacecraft); cli.start(); let latestX = 0; let latestY = 0; setInterval(() => { if (latestX !== spacecraft.x || latestY !== spacecraft.y) { latestX = spacecraft.x; latestY = spacecraft.y; console.log(`(${latestX}, ${latestY}) # ${cli.latestKeyPressed.toUpperCase()}`); } }, 100); //# sourceMappingURL=main.js.map
"use strict"; Object.defineProperty(exports, "__esModule", { value: true }); exports.default = void 0; var _lodash = _interopRequireDefault(require("lodash")); var _Module = _interopRequireDefault(require("./Module")); var _ContentModule = _interopRequireDefault(require("./ContentModule")); var _ContentFolder = _interopRequireDefault(require("./ContentFolder")); var _utils = require("./utils"); function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } function ownKeys(object, enumerableOnly) { var keys = Object.keys(object); if (Object.getOwnPropertySymbols) { var symbols = Object.getOwnPropertySymbols(object); if (enumerableOnly) symbols = symbols.filter(function (sym) { return Object.getOwnPropertyDescriptor(object, sym).enumerable; }); keys.push.apply(keys, symbols); } return keys; } function _objectSpread(target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i] != null ? arguments[i] : {}; if (i % 2) { ownKeys(Object(source), true).forEach(function (key) { _defineProperty(target, key, source[key]); }); } else if (Object.getOwnPropertyDescriptors) { Object.defineProperties(target, Object.getOwnPropertyDescriptors(source)); } else { ownKeys(Object(source)).forEach(function (key) { Object.defineProperty(target, key, Object.getOwnPropertyDescriptor(source, key)); }); } } return target; } function _defineProperty(obj, key, value) { if (key in obj) { Object.defineProperty(obj, key, { value: value, enumerable: true, configurable: true, writable: true }); } else { obj[key] = value; } return obj; } class ConcatenatedModule extends _Module.default { constructor(name, data, parent) { super(name, data, parent); this.name += ' (concatenated)'; this.children = Object.create(null); this.fillContentModules(); } fillContentModules() { _lodash.default.each(this.data.modules, moduleData => this.addContentModule(moduleData)); } addContentModule(moduleData) { const pathParts = (0, _utils.getModulePathParts)(moduleData); if (!pathParts) { return; } const [folders, fileName] = [pathParts.slice(0, -1), _lodash.default.last(pathParts)]; let currentFolder = this; _lodash.default.each(folders, folderName => { let childFolder = currentFolder.getChild(folderName); if (!childFolder) { childFolder = currentFolder.addChildFolder(new _ContentFolder.default(folderName, this)); } currentFolder = childFolder; }); const module = new _ContentModule.default(fileName, moduleData, this); currentFolder.addChildModule(module); } getChild(name) { return this.children[name]; } addChildModule(module) { module.parent = this; this.children[module.name] = module; } addChildFolder(folder) { folder.parent = this; this.children[folder.name] = folder; return folder; } mergeNestedFolders() { _lodash.default.invokeMap(this.children, 'mergeNestedFolders'); } toChartData() { return _objectSpread(_objectSpread({}, super.toChartData()), {}, { concatenated: true, groups: _lodash.default.invokeMap(this.children, 'toChartData') }); } } exports.default = ConcatenatedModule; ;
export { default } from 'ember-flexberry-gis/layers-styles/simple';
load("bf4b12814bc95f34eeb130127d8438ab.js"); load("93fae755edd261212639eed30afa2ca4.js"); // Copyright (c) 2012 Ecma International. All rights reserved. // This code is governed by the BSD license found in the LICENSE file. /*--- es5id: 15.2.3.6-4-165 description: > Object.defineProperty - 'O' is an Array, 'name' is the length property of 'O', the [[Value]] field of 'desc' is less than value of the length property, test the [[Writable]] attribute of the length property is set to true after deleting properties with large index named if the [[Writable]] field of 'desc' is absent (15.4.5.1 step 3.h) ---*/ var arrObj = [0, 1]; Object.defineProperty(arrObj, "length", { value: 1 }); var indexDeleted = !arrObj.hasOwnProperty("1"); arrObj.length = 10; assert(indexDeleted, 'indexDeleted !== true'); assert.sameValue(arrObj.length, 10, 'arrObj.length');
export default { languageName: "Bahasa Indonesia (Indonesian)", checkEverySecond: "Cek Setiap {0} detik.", retryCheckEverySecond: "Coba lagi setiap {0} detik.", retriesDescription: "Percobaan ulang maksimum sebelum layanan dinyatakan tidak aktif dan notifikasi dikirim", ignoreTLSError: "Abaikan kesalahan TLS/SSL untuk situs web HTTPS", upsideDownModeDescription: "Balikkan statusnya. Jika layanan dapat dijangkau, TIDAK AKTIF.", maxRedirectDescription: "Jumlah maksimum pengalihan untuk diikuti. Setel ke 0 untuk menonaktifkan pengalihan.", acceptedStatusCodesDescription: "Pilih kode status yang dianggap sebagai tanggapan yang berhasil.", passwordNotMatchMsg: "Kata sandi kedua tidak cocok.", notificationDescription: "Harap atur notifikasi ke monitor agar berfungsi.", keywordDescription: "Cari kata kunci dalam code html atau JSON huruf besar-kecil berpengaruh", pauseDashboardHome: "Jeda", deleteMonitorMsg: "Apakah Anda mau menghapus monitor ini?", deleteNotificationMsg: "Apakah Anda mau menghapus notifikasi untuk semua monitor?", resoverserverDescription: "Cloudflare adalah server bawaan, Anda dapat mengubah server resolver kapan saja.", rrtypeDescription: "Pilih RR-Type yang mau Anda monitor", pauseMonitorMsg: "Apakah Anda yakin mau menjeda?", enableDefaultNotificationDescription: "Untuk setiap monitor baru, notifikasi ini akan diaktifkan secara bawaan. Anda masih dapat menonaktifkan notifikasi secara terpisah untuk setiap monitor.", clearEventsMsg: "Apakah Anda yakin mau menghapus semua event di monitor ini?", clearHeartbeatsMsg: "Apakah Anda yakin mau menghapus semua heartbeats di monitor ini?", confirmClearStatisticsMsg: "Apakah Anda yakin mau menghapus semua statistik?", importHandleDescription: "Pilih 'Lewati yang ada' jika Anda ingin melewati setiap monitor atau notifikasi dengan nama yang sama. 'Timpa' akan menghapus setiap monitor dan notifikasi yang ada.", confirmImportMsg: "Apakah Anda yakin untuk mengimpor cadangan? Pastikan Anda telah memilih opsi impor yang tepat.", twoFAVerifyLabel: "Silakan ketik token Anda untuk memverifikasi bahwa 2FA berfungsi", tokenValidSettingsMsg: "Token benar! Anda sekarang dapat menyimpan pengaturan 2FA.", confirmEnableTwoFAMsg: "Apakah anda yakin ingin mengaktifkan 2FA?", confirmDisableTwoFAMsg: "Apakah anda yakin ingin menonaktifkan 2FA?", Settings: "Pengaturan", Dashboard: "Dasbor", "New Update": "Pembaruan Baru", Language: "Bahasa", Appearance: "Tampilan", Theme: "Tema", General: "Umum", Version: "Versi", "Check Update On GitHub": "Cek Pembaruan di GitHub", List: "Daftar", Add: "Tambah", "Add New Monitor": "Tambah Monitor Baru", "Quick Stats": "Statistik", Up: "Aktif", Down: "Tidak Aktif", Pending: "Tertunda", Unknown: "Tidak diketahui", Pause: "Jeda", Name: "Nama", Status: "Status", DateTime: "Tanggal Waktu", Message: "Pesan", "No important events": "Tidak ada peristiwa penting", Resume: "Lanjut", Edit: "Ubah", Delete: "Hapus", Current: "Saat ini", Uptime: "Waktu aktif", "Cert Exp.": "Cert Exp.", days: "hari-hari", day: "hari", "-day": "-hari", hour: "Jam", "-hour": "-Jam", Response: "Tanggapan", Ping: "Ping", "Monitor Type": "Tipe Monitor", Keyword: "Keyword", "Friendly Name": "Nama yang Ramah", URL: "URL", Hostname: "Hostname", Port: "Port", "Heartbeat Interval": "Jarak Waktu Heartbeat ", Retries: "Coba lagi", "Heartbeat Retry Interval": "Jarak Waktu Heartbeat Mencoba kembali ", Advanced: "Tingkat Lanjut", "Upside Down Mode": "Mode Terbalik", "Max. Redirects": "Maksimal Pengalihan", "Accepted Status Codes": "Kode Status yang Diterima", Save: "Simpan", Notifications: "Notifikasi", "Not available, please setup.": "Tidak tersedia, silakan atur.", "Setup Notification": "Setel Notifikasi", Light: "Terang", Dark: "Gelap", Auto: "Otomatis", "Theme - Heartbeat Bar": "Tema - Heartbeat Bar", Normal: "Normal", Bottom: "Bawah", None: "Tidak ada", Timezone: "Zona Waktu", "Search Engine Visibility": "Visibilitas Mesin Pencari", "Allow indexing": "Mengizinkan untuk diindex", "Discourage search engines from indexing site": "Mencegah mesin pencari untuk mengindex situs", "Change Password": "Ganti Sandi", "Current Password": "Sandi Lama", "New Password": "Sandi Baru", "Repeat New Password": "Ulangi Sandi Baru", "Update Password": "Perbarui Kata Sandi", "Disable Auth": "Nonaktifkan Autentikasi", "Enable Auth": "Aktifkan Autentikasi", Logout: "Keluar", Leave: "Pergi", "I understand, please disable": "Saya mengerti, silakan dinonaktifkan", Confirm: "Konfirmasi", Yes: "Ya", No: "Tidak", Username: "Nama Pengguna", Password: "Sandi", "Remember me": "Ingat saya", Login: "Masuk", "No Monitors, please": "Tidak ada monitor, silakan", "add one": "tambahkan satu", "Notification Type": "Tipe Notifikasi", Email: "Surel", Test: "Tes", "Certificate Info": "Info Sertifikasi", "Resolver Server": "Resolver Server", "Resource Record Type": "Resource Record Type", "Last Result": "Hasil Terakhir", "Create your admin account": "Buat admin akun Anda", "Repeat Password": "Ulangi Sandi", "Import Backup": "Impor Cadangan", "Export Backup": "Expor Cadangan", Export: "Expor", Import: "Impor", respTime: "Tanggapan. Waktu (milidetik)", notAvailableShort: "N/A", "Default enabled": "Bawaan diaktifkan", "Apply on all existing monitors": "Terapkan pada semua monitor yang ada", Create: "Buat", "Clear Data": "Bersihkan Data", Events: "Peristiwa", Heartbeats: "Heartbeats", "Auto Get": "Ambil Otomatis", backupDescription: "Anda dapat mencadangkan semua monitor dan semua notifikasi ke dalam berkas JSON.", backupDescription2: "Catatan: Data sejarah dan peristiwa tidak disertakan.", backupDescription3: "Data sensitif seperti notifikasi token disertakan dalam berkas ekspor, harap simpan dengan hati-hati.", alertNoFile: "Silakan pilih berkas untuk diimpor.", alertWrongFileType: "Silakan pilih berkas JSON.", "Clear all statistics": "Hapus semua statistik", "Skip existing": "Lewati yang ada", Overwrite: "Timpa", Options: "Opsi", "Keep both": "Simpan keduanya", "Verify Token": "Verifikasi Token", "Setup 2FA": "Pengaturan 2FA", "Enable 2FA": "Aktifkan 2FA", "Disable 2FA": "Nonaktifkan 2FA", "2FA Settings": "Pengaturan 2FA", "Two Factor Authentication": "Autentikasi Dua Faktor", Active: "Aktif", Inactive: "Tidak Aktif", Token: "Token", "Show URI": "Lihat URI", Tags: "Tanda", "Add New below or Select...": "Tambahkan Baru di bawah atau Pilih...", "Tag with this name already exist.": "Tanda dengan nama ini sudah ada.", "Tag with this value already exist.": "Tanda dengan nilai ini sudah ada.", color: "warna", "value (optional)": "nilai (harus diisi)", Gray: "Abu-abu", Red: "Merah", Orange: "Jingga", Green: "Hijau", Blue: "Biru", Indigo: "Biru Tua", Purple: "Ungu", Pink: "Merah Muda", "Search...": "Cari...", "Avg. Ping": "Rata-rata Ping", "Avg. Response": "Rata-rata Tanggapan", "Entry Page": "Halaman Masuk", statusPageNothing: "Tidak ada di sini, silakan tambahkan grup atau monitor.", "No Services": "Tidak ada Layanan", "All Systems Operational": "Semua Sistem Berfungsi", "Partially Degraded Service": "Layanan Terdegradasi Sebagian", "Degraded Service": "Layanan Terdegradasi", "Add Group": "Tambah Grup", "Add a monitor": "Tambah monitor", "Edit Status Page": "Edit Halaman Status", "Go to Dashboard": "Pergi ke Dasbor", "Status Page": "Halaman Status", defaultNotificationName: "{notification} saya Peringatan ({number})", here: "di sini", Required: "Dibutuhkan", telegram: "Telegram", "Bot Token": "Bot Token", "You can get a token from": "Anda bisa mendapatkan token dari", "Chat ID": "Chat ID", supportTelegramChatID: "Mendukung Obrolan Langsung / Grup / Channel Chat ID", wayToGetTelegramChatID: "Anda bisa mendapatkan chat id Anda dengan mengirim pesan ke bot dan pergi ke url ini untuk melihat chat_id:", "YOUR BOT TOKEN HERE": "BOT TOKEN ANDA DI SINI", chatIDNotFound: "Chat ID tidak ditemukan, tolong kirim pesan ke bot ini dulu", webhook: "Webhook", "Post URL": "Post URL", "Content Type": "Tipe konten", webhookJsonDesc: "{0} bagus untuk peladen http modern seperti express.js", webhookFormDataDesc: "{multipart} bagus untuk PHP, Anda hanya perlu mengurai json dengan {decodeFunction}", smtp: "Surel (SMTP)", secureOptionNone: "None / STARTTLS (25, 587)", secureOptionTLS: "TLS (465)", "Ignore TLS Error": "Abaikan Kesalahan TLS", "From Email": "Dari Surel", "To Email": "Ke Surel", smtpCC: "CC", smtpBCC: "BCC", discord: "Discord", "Discord Webhook URL": "Discord Webhook URL", wayToGetDiscordURL: "Anda bisa mendapatkan ini dengan pergi ke Server Settings -> Integrations -> Create Webhook", "Bot Display Name": "Nama Bot", "Prefix Custom Message": "Awalan Pesan", "Hello @everyone is...": "Halo {'@'}everyone is...", teams: "Microsoft Teams", "Webhook URL": "Webhook URL", wayToGetTeamsURL: "Anda dapat mempelajari cara membuat url webhook {0}.", signal: "Sinyal", Number: "Nomer", Recipients: "Penerima", needSignalAPI: "Anda harus memiliki klien sinyal dengan REST API.", wayToCheckSignalURL: "Anda dapat memeriksa url ini untuk melihat cara menyiapkannya:", signalImportant: "PENTING: Anda tidak dapat mencampur grup dan nomor di penerima!", gotify: "Gotify", "Application Token": "Token Aplikasi", "Server URL": "URL Peladen", Priority: "Prioritas", slack: "Slack", "Icon Emoji": "Ikon Emoji", "Channel Name": "Nama Saluran", "Uptime Ron URL": "Uptime Ron URL", aboutWebhooks: "Info lain tentang webhook: {0}", aboutChannelName: "Masukan nama saluran di {0} Kolom Nama Saluran jika Anda ingin melewati saluran webhook. Contoh: #saluran-lain", aboutKumaURL: "Jika Anda membiarkan bidang URL Uptime Ron kosong, itu akan menjadi bawaan ke halaman Proyek Github.", emojiCheatSheet: "Lembar contekan emoji: {0}", "rocket.chat": "Rocket.chat", pushover: "Pushover", pushy: "Pushy", octopush: "Octopush", promosms: "PromoSMS", lunasea: "LunaSea", apprise: "Apprise (Mendukung 50+ layanan notifikasi)", pushbullet: "Pushbullet", line: "Line Messenger", mattermost: "Mattermost", "User Key": "Kunci pengguna", Device: "Perangkat", "Message Title": "Judul Pesan", "Notification Sound": "Suara Nofifikasi", "More info on:": "Info lebih lanjut tentang: {0}", pushoverDesc1: "Prioritas darurat (2) memiliki batas waktu bawaan 30 detik antara percobaan ulang dan akan kadaluwarsa setelah 1 jam.", pushoverDesc2: "Jika Anda ingin mengirim pemberitahuan ke perangkat yang berbeda, isi kolom Perangkat.", "SMS Type": "Tipe SMS", octopushTypePremium: "Premium (Cepat - direkomendasikan untuk mengingatkan)", octopushTypeLowCost: "Low Cost (Lambat, terkadang diblokir oleh operator)", "Check octopush prices": "Cek harga octopush {0}.", octopushPhoneNumber: "Nomer Telpon/HP (format internasional, contoh : +33612345678) ", octopushSMSSender: "Nama Pengirim SMS : 3-11 karakter alfanumerik dan spasi (a-zA-Z0-9)", "LunaSea Device ID": "LunaSea Device ID", "Apprise URL": "Apprise URL", "Example:": "Contoh: {0}", "Read more:": "Baca lebih lajut: {0}", "Status:": "Status: {0}", "Read more": "Baca lebih lajut", appriseInstalled: "Apprise diinstall.", appriseNotInstalled: "Apprise tidak diinstall. {0}", "Access Token": "Token Akses", "Channel access token": "Token akses saluran", "Line Developers Console": "Konsol Pengembang Line", lineDevConsoleTo: "Konsol Pengembang Line - {0}", "Basic Settings": "Pengaturan Dasar", "User ID": "ID User", "Messaging API": "Messaging API", wayToGetLineChannelToken: "Pertama akses {0}, buat penyedia dan saluran (Messaging API), lalu Anda bisa mendapatkan token akses saluran dan id pengguna dari item menu yang disebutkan di atas.", "Icon URL": "Icon URL", aboutIconURL: "Anda dapat memberikan tautan ke gambar di \"Icon URL\" untuk mengganti gambar profil bawaan. Tidak akan digunakan jika Ikon Emoji diset.", aboutMattermostChannelName: "Anda dapat mengganti saluran bawaan tujuan posting webhook dengan memasukkan nama saluran ke dalam Kolom \"Channel Name\". Ini perlu diaktifkan di pengaturan webhook Mattermost. contoh: #other-channel", matrix: "Matrix", promosmsTypeEco: "SMS ECO - murah tapi lambat dan sering kelebihan beban. Terbatas hanya untuk penerima Polandia.", promosmsTypeFlash: "SMS FLASH - Pesan akan otomatis muncul di perangkat penerima. Terbatas hanya untuk penerima Polandia.", promosmsTypeFull: "SMS FULL - SMS tingkat premium, Anda dapat menggunakan Nama Pengirim Anda (Anda harus mendaftarkan nama terlebih dahulu). Dapat diAndalkan untuk peringatan.", promosmsTypeSpeed: "SMS SPEED - Prioritas tertinggi dalam sistem. Sangat cepat dan dapat diandalkan tetapi mahal (sekitar dua kali lipat dari harga SMS FULL).", promosmsPhoneNumber: "Nomor telepon (untuk penerima Polandia Anda dapat melewati kode area)", promosmsSMSSender: "Nama Pengirim SMS : Nama pra-registrasi atau salah satu bawaan: InfoSMS, Info SMS, MaxSMS, INFO, SMS", "Feishu WebHookUrl": "Feishu WebHookUrl", };
import LandingPage from '../layouts/LandingPage.vue'; import Home from '../pages/Home.vue'; import About from '../pages/About.vue'; import Contact from '../pages/Contact.vue'; import FAQs from '../pages/FAQs.vue'; export default [ { path: '/', component: LandingPage, children: [ { name: 'home', path: '/', component: Home, }, { name: 'about', path: '/about', component: About, }, { name: 'contact', path: '/contact', component: Contact, }, { name: 'faqs', path: '/faqs', component: FAQs, }, ], }, ];
from flask_wtf import FlaskForm from wtforms import (StringField, PasswordField, SubmitField, ValidationError, BooleanField) from wtforms.validators import Required, Email, EqualTo from ..models import User class SignUpForm(FlaskForm): first_name = StringField("Your First Name", validators=[Required()]) last_name = StringField("Your Last Name", validators=[Required()]) username = StringField("Your Username", validators=[Required()]) email = StringField("Your Email Address", validators=[Required(), Email()]) password = PasswordField("Password", validators=[Required(), EqualTo("password_confirm", message = "Passwords must match")]) password_confirm = PasswordField("Confirm Password", validators=[Required()]) submit = SubmitField("Sign Up") #Custom email validation def validate_email(self, data_field): if User.query.filter_by(email = data_field.data).first(): raise ValidationError("There is an account with that email") #Custom username validation def validate_username(self, data_field): if User.query.filter_by(username = data_field.data).first(): raise ValidationError("That username is taken") class LoginForm(FlaskForm): email = StringField("Your Email Address", validators=[Required(), Email()]) password = PasswordField("Password", validators=[Required()]) remember = BooleanField("Remember Me") submit = SubmitField("Sign In")
import Vue from 'vue' import Vuex from 'vuex' import user from './user' Vue.use(Vuex) /* * If not building with SSR mode, you can * directly export the Store instantiation */ const Store = new Vuex.Store({ modules: { user } }) if (process.env.DEV && module.hot) { module.hot.accept(['./user'], () => { const newUser = require('./user').default Store.hotUpdate({ modules: { user: newUser } }) }) } export default Store
import copy import os import click import nbformat from papermill.iorw import load_notebook_node, write_ipynb from dagster.utils import mkdir_p, safe_isfile def get_notebook_scaffolding(): first_cell_source = '"import dagstermill"' starting_notebook_init = ''' {{ "cells": [ {{ "cell_type": "code", "execution_count": null, "metadata": {{}}, "outputs": [], "source": [ {first_cell_source} ] }}, {{ "cell_type": "code", "execution_count": null, "metadata": {{ "tags": [ "parameters" ] }}, "outputs": [], "source": [ "context = dagstermill.get_context()" ] }} ], "metadata": {{ "celltoolbar": "Tags" }}, "nbformat": 4, "nbformat_minor": 2 }}''' return starting_notebook_init.format(first_cell_source=first_cell_source) @click.command(name='register-notebook', help=('Registers repository in existing notebook')) @click.option('--notebook', '-note', type=click.Path(exists=True), help='Path to notebook') def retroactively_scaffold_notebook(notebook): execute_retroactive_scaffold(notebook) def execute_retroactive_scaffold(notebook_path): nb = load_notebook_node(notebook_path) new_nb = copy.deepcopy(nb) import_cell_source = 'import dagstermill' import_cell = nbformat.v4.new_code_cell(source=import_cell_source) parameters_cell_source = 'context = dagstermill.get_context()' parameters_cell = nbformat.v4.new_code_cell(source=parameters_cell_source) parameters_cell.metadata['tags'] = ['parameters'] new_nb.cells = [import_cell, parameters_cell] + nb.cells write_ipynb(new_nb, notebook_path) @click.command(name='create-notebook', help=('Creates new dagstermill notebook.')) @click.option('--notebook', '-note', type=click.Path(), help="Name of notebook") @click.option( '--force-overwrite', is_flag=True, help="Will force overwrite any existing notebook or file with the same name.", ) def create_notebook(notebook, force_overwrite): execute_create_notebook(notebook, force_overwrite) def execute_create_notebook(notebook, force_overwrite): notebook_path = os.path.join( os.getcwd(), notebook if notebook.endswith('.ipynb') else notebook + ".ipynb" ) notebook_dir = os.path.dirname(notebook_path) mkdir_p(notebook_dir) if not force_overwrite and safe_isfile(notebook_path): click.confirm( ( 'Warning, {notebook_path} already exists and continuing ' 'will overwrite the existing notebook. ' 'Are you sure you want to continue?' ).format(notebook_path=notebook_path), abort=True, ) with open(notebook_path, 'w') as f: f.write(get_notebook_scaffolding()) click.echo("Created new dagstermill notebook at {path}".format(path=notebook_path)) def create_dagstermill_cli(): group = click.Group(name="dagstermill") group.add_command(create_notebook) group.add_command(retroactively_scaffold_notebook) return group def main(): cli = create_dagstermill_cli() # click magic cli(obj={}) # pylint:disable=E1120
import sys import json import io import enum from messagebird.balance import Balance from messagebird.call import Call from messagebird.call_list import CallList from messagebird.contact import Contact, ContactList from messagebird.error import Error, ValidationError from messagebird.group import Group, GroupList from messagebird.hlr import HLR from messagebird.message import Message, MessageList from messagebird.mms import MMS from messagebird.voice_webhook import VoiceWebhook, VoiceWebhookList from messagebird.voicemessage import VoiceMessagesList, VoiceMessage from messagebird.lookup import Lookup from messagebird.verify import Verify from messagebird.http_client import HttpClient, ResponseFormat from messagebird.conversation_message import ConversationMessage, ConversationMessageList from messagebird.conversation import Conversation, ConversationList from messagebird.conversation_webhook import ConversationWebhook, ConversationWebhookList from messagebird.voice_recording import VoiceRecordingsList, VoiceRecording from messagebird.voice_transcription import VoiceTranscriptionsList, VoiceTranscriptionsView from messagebird.call_flow import CallFlow, CallFlowList, CallFlowNumberList from messagebird.number import Number, NumberList ENDPOINT = 'https://rest.messagebird.com' CLIENT_VERSION = '1.4.1' PYTHON_VERSION = '%d.%d.%d' % (sys.version_info[0], sys.version_info[1], sys.version_info[2]) USER_AGENT = 'MessageBird/ApiClient/%s Python/%s' % (CLIENT_VERSION, PYTHON_VERSION) REST_TYPE = 'rest' CONVERSATION_API_ROOT = 'https://conversations.messagebird.com/v1/' CONVERSATION_API_WHATSAPP_SANDBOX_ROOT = 'https://whatsapp-sandbox.messagebird.com/v1/' CONVERSATION_PATH = 'conversations' CONVERSATION_MESSAGES_PATH = 'messages' CONVERSATION_WEB_HOOKS_PATH = 'webhooks' CONVERSATION_TYPE = 'conversation' VOICE_API_ROOT = 'https://voice.messagebird.com' VOICE_TYPE = 'voice' VOICE_PATH = 'calls' VOICE_LEGS_PATH = 'legs' VOICE_RECORDINGS_PATH = 'recordings' VOICE_TRANSCRIPTIONS_PATH = 'transcriptions' VOICE_WEB_HOOKS_PATH = 'webhooks' NUMBER_TYPE = 'number' NUMBER_API_ROOT = 'https://numbers.messagebird.com/v1/' NUMBER_PATH = 'phone-numbers' NUMBER_AVAILABLE_PATH = 'available-phone-numbers' class ErrorException(Exception): def __init__(self, errors): self.errors = errors message = ' '.join([str(e) for e in self.errors]) super(ErrorException, self).__init__(message) class SignleErrorException(Exception): def __init__(self, errorMessage): super(SignleErrorException, self).__init__(errorMessage) class Feature(enum.Enum): ENABLE_CONVERSATIONS_API_WHATSAPP_SANDBOX = 1 class Client(object): def __init__(self, access_key, http_client=None, features=[]): self.access_key = access_key self.http_client = http_client self.conversation_api_root = CONVERSATION_API_WHATSAPP_SANDBOX_ROOT if Feature.ENABLE_CONVERSATIONS_API_WHATSAPP_SANDBOX in features else CONVERSATION_API_ROOT def _get_http_client(self, type=REST_TYPE): if self.http_client: return self.http_client if type == CONVERSATION_TYPE: return HttpClient(self.conversation_api_root, self.access_key, USER_AGENT) if type == VOICE_TYPE: return HttpClient(VOICE_API_ROOT, self.access_key, USER_AGENT) if type == NUMBER_TYPE: return HttpClient(NUMBER_API_ROOT, self.access_key, USER_AGENT) return HttpClient(ENDPOINT, self.access_key, USER_AGENT) def request(self, path, method='GET', params=None, type=REST_TYPE): """Builds a request, gets a response and decodes it.""" response_text = self._get_http_client(type).request(path, method, params) if not response_text: return response_text response_json = json.loads(response_text) if 'errors' in response_json: raise (ErrorException([Error().load(e) for e in response_json['errors']])) return response_json def request_plain_text(self, path, method='GET', params=None, type=REST_TYPE): """Builds a request, gets a response and returns the body.""" response_text = self._get_http_client(type).request(path, method, params) try: # Try to decode the response to JSON to see if the API returned any # errors. response_json = json.loads(response_text) if 'errors' in response_json: raise (ErrorException([Error().load(e) for e in response_json['errors']])) except ValueError: # Do nothing: json.loads throws if the input string is not valid JSON, # which is expected. We'll just return the response body below. pass return response_text def request_store_as_file(self, path, filepath, method='GET', params=None, type=REST_TYPE): """Builds a request, gets a response and decodes it.""" response_binary = self._get_http_client(type).request(path, method, params, ResponseFormat.binary) if not response_binary: return response_binary with io.open(filepath, 'wb') as f: f.write(response_binary) return filepath def balance(self): """Retrieve your balance.""" return Balance().load(self.request('balance')) def call(self, id): """Retrieve the information of a specific call""" return Call().load(self.request('calls/' + str(id), 'GET', None, VOICE_TYPE)) def call_list(self, page=1): """Listing calls Args: page(int) : The page to list. Raises: ErrorException : On api returning errors Returns: CallList(object) : The list of calls requested & their status.""" return CallList().load(self.request('calls/?page=' + str(page), 'GET', None, VOICE_TYPE)) def call_create(self, source, destination, callFlow, webhook): """Creating a call Args: source(str) : The caller ID of the call. destination(string) : The number/address to be called. callFlow(object) : The call flow object to be executed when the call is answered. webhook(object) : The webhook object containing the url & required token. Raises: ErrorException : On api returning errors Returns: Call(object) : The Call object just created.""" params = locals() del (params['self']) return Call().load(self.request('calls', 'POST', params, VOICE_TYPE)) def call_delete(self, id): """Delete an existing call object.""" response = self.request_plain_text('calls/' + str(id), 'DELETE', None, VOICE_TYPE) # successful delete should be empty if len(response) > 0: raise SignleErrorException(response) def hlr(self, id): """Retrieve the information of a specific HLR lookup.""" return HLR().load(self.request('hlr/' + str(id))) def hlr_create(self, msisdn, reference): """Perform a new HLR lookup.""" return HLR().load(self.request('hlr', 'POST', {'msisdn': msisdn, 'reference': reference})) def message(self, id): """Retrieve the information of a specific message.""" return Message().load(self.request('messages/' + str(id))) def message_list(self, limit=20, offset=0, status=None): """Retrieve a list of the most recent messages. Args: limit(int) : The page limit. offset(int) : The page to list. status(str) : Message status filter(scheduled, sent, buffered, delivered, expired or delivery_failed) Returns: MessageList(object) : The List of the message requested.""" query = self._format_query(limit, offset) if status: query = query + "&status=" + status return MessageList().load(self.request('messages?' + query)) def message_create(self, originator, recipients, body, params=None): """Create a new message.""" if params is None: params = {} if type(recipients) == list: recipients = ','.join(recipients) params.update({'originator': originator, 'body': body, 'recipients': recipients}) return Message().load(self.request('messages', 'POST', params)) def message_delete(self, id): """Delete a message from the dashboard.""" self.request_plain_text('messages/' + str(id), 'DELETE') def mms_create(self, originator, recipients, body, mediaUrls, subject=None, reference=None, scheduledDatetime=None): """ Send bulk mms. Args: originator(str): name of the originator recipients(str/list(str)): comma separated numbers or list of numbers in E164 format body(str) : text message body mediaUrl(str) : list of URL's of attachments of the MMS message. subject(str) : utf-encoded subject reference(str) : client reference text scheduledDatetime(str) : scheduled date time in RFC3339 format Raises: ErrorException: On api returning errors Returns: MMS: On success an MMS instance instantiated with success response """ if isinstance(recipients, list): recipients = ','.join(recipients) if isinstance(mediaUrls, str): mediaUrls = [mediaUrls] params = locals() del (params['self']) return MMS().load(self.request('mms', 'POST', params)) def voice_message(self, id): "Retrieve the information of a specific voice message." return VoiceMessage().load(self.request('voicemessages/' + str(id))) def voice_message_list(self, limit=10, offset=0): "Retrieve the information of a list of voice messages." query = self._format_query(limit, offset) return VoiceMessagesList().load(self.request('voicemessages?' + query, 'GET', None)) def voice_message_create(self, recipients, body, params=None): """Create a new voice message.""" if params is None: params = {} if type(recipients) == list: recipients = ','.join(recipients) params.update({'recipients': recipients, 'body': body}) return VoiceMessage().load(self.request('voicemessages', 'POST', params)) def lookup(self, phonenumber, params=None): """Do a new lookup.""" if params is None: params = {} return Lookup().load(self.request('lookup/' + str(phonenumber), 'GET', params)) def lookup_hlr(self, phonenumber, params=None): """Retrieve the information of a specific HLR lookup.""" if params is None: params = {} return HLR().load(self.request('lookup/' + str(phonenumber) + '/hlr', 'GET', params)) def lookup_hlr_create(self, phonenumber, params=None): """Perform a new HLR lookup.""" if params is None: params = {} return HLR().load(self.request('lookup/' + str(phonenumber) + '/hlr', 'POST', params)) def verify(self, id): """Retrieve the information of a specific verification.""" return Verify().load(self.request('verify/' + str(id))) def verify_create(self, recipient, params=None): """Create a new verification.""" if params is None: params = {} params.update({'recipient': recipient}) return Verify().load(self.request('verify', 'POST', params)) def verify_verify(self, id, token): """Verify the token of a specific verification.""" return Verify().load(self.request('verify/' + str(id), params={'token': token})) def verify_delete(self, id): """Delete an existing verification object.""" self.request_plain_text('verify/' + str(id), 'DELETE') def contact(self, id): """Retrieve the information of a specific contact.""" return Contact().load(self.request('contacts/' + str(id))) def contact_create(self, phonenumber, params=None): if params is None: params = {} params.update({'msisdn': phonenumber}) return Contact().load(self.request('contacts', 'POST', params)) def contact_delete(self, id): self.request_plain_text('contacts/' + str(id), 'DELETE') def contact_update(self, id, params=None): self.request_plain_text('contacts/' + str(id), 'PATCH', params) def contact_list(self, limit=10, offset=0): query = self._format_query(limit, offset) return ContactList().load(self.request('contacts?' + query, 'GET', None)) def group(self, id): return Group().load(self.request('groups/' + str(id), 'GET', None)) def group_create(self, name, params=None): if params is None: params = {} params.update({'name': name}) return Group().load(self.request('groups', 'POST', params)) def group_delete(self, id): self.request_plain_text('groups/' + str(id), 'DELETE', None) def group_list(self, limit=10, offset=0): query = self._format_query(limit, offset) return GroupList().load(self.request('groups?' + query, 'GET', None)) def group_update(self, id, name, params=None): if params is None: params = {} params.update({'name': name}) self.request_plain_text('groups/' + str(id), 'PATCH', params) def group_add_contacts(self, groupId, contactIds): query = self.__group_add_contacts_query(contactIds) self.request_plain_text('groups/' + str(groupId) + '?' + query, 'PUT', None) def __group_add_contacts_query(self, contactIds): # __group_add_contacts_query gets a query string to add contacts to a # group. The expected format is ids[]=first-contact&ids[]=second-contact. # See: https://developers.messagebird.com/docs/groups#add-contact-to-group. return '&'.join('ids[]=' + str(id) for id in contactIds) def group_remove_contact(self, groupId, contactId): self.request_plain_text('groups/' + str(groupId) + '/contacts/' + str(contactId), 'DELETE', None) def conversation_list(self, limit=10, offset=0): uri = CONVERSATION_PATH + '?' + self._format_query(limit, offset) return ConversationList().load(self.request(uri, 'GET', None, CONVERSATION_TYPE)) def conversation_start(self, start_request): uri = CONVERSATION_PATH + '/start' return Conversation().load(self.request(uri, 'POST', start_request, CONVERSATION_TYPE)) def conversation_update(self, id, update_request): uri = CONVERSATION_PATH + '/' + str(id) return Conversation().load(self.request(uri, 'PATCH', update_request, CONVERSATION_TYPE)) def conversation_read(self, id): uri = CONVERSATION_PATH + '/' + str(id) return Conversation().load(self.request(uri, 'GET', None, CONVERSATION_TYPE)) def conversation_list_messages(self, conversation_id, limit=10, offset=0): uri = CONVERSATION_PATH + '/' + str(conversation_id) + '/' + CONVERSATION_MESSAGES_PATH uri += '?' + self._format_query(limit, offset) return ConversationMessageList().load(self.request(uri, 'GET', None, CONVERSATION_TYPE)) def conversation_create_message(self, conversation_id, message_create_request): uri = CONVERSATION_PATH + '/' + str(conversation_id) + '/' + CONVERSATION_MESSAGES_PATH return ConversationMessage().load(self.request(uri, 'POST', message_create_request, CONVERSATION_TYPE)) def conversation_read_message(self, message_id): uri = CONVERSATION_MESSAGES_PATH + '/' + str(message_id) return ConversationMessage().load(self.request(uri, 'GET', None, CONVERSATION_TYPE)) def conversation_create_webhook(self, webhook_create_request): return ConversationWebhook().load( self.request(CONVERSATION_WEB_HOOKS_PATH, 'POST', webhook_create_request, CONVERSATION_TYPE)) def conversation_update_webhook(self, id, update_request): """ Updates a webhook with the supplied parameters. API Reference: https://developers.messagebird.com/api/conversations/#webhooks """ uri = CONVERSATION_WEB_HOOKS_PATH + '/' + str(id) web_hook = self.request(uri, 'PATCH', update_request, CONVERSATION_TYPE) return ConversationWebhook().load(web_hook) def conversation_delete_webhook(self, id): uri = CONVERSATION_WEB_HOOKS_PATH + '/' + str(id) self.request(uri, 'DELETE', None, CONVERSATION_TYPE) def conversation_list_webhooks(self, limit=10, offset=0): uri = CONVERSATION_WEB_HOOKS_PATH + '?' + self._format_query(limit, offset) return ConversationWebhookList().load(self.request(uri, 'GET', None, CONVERSATION_TYPE)) def conversation_read_webhook(self, id): uri = CONVERSATION_WEB_HOOKS_PATH + '/' + str(id) return ConversationWebhook().load(self.request(uri, 'GET', None, CONVERSATION_TYPE)) def voice_recording_list_recordings(self, call_id, leg_id): uri = self.generate_voice_calls_url(call_id=call_id, leg_id=leg_id) return VoiceRecordingsList().load(self.request(uri, 'GET')) def voice_transcription_list(self, call_id, leg_id, recording_id): """List voice transcriptions.""" uri = self.generate_voice_calls_url(call_id, leg_id, recording_id) return VoiceTranscriptionsList().load(self.request(uri, 'GET')) def voice_transcription_download(self, call_id, leg_id, recording_id, transcriptions_file): """Download voice transcription file.""" uri = self.generate_voice_calls_url(call_id, leg_id, recording_id) + '/' + str(transcriptions_file) return self.request(uri, 'GET') def voice_transcription_view(self, call_id, leg_id, recording_id, transcriptions_id): """Get voice transcription data.""" uri = self.generate_voice_calls_url(call_id, leg_id, recording_id) + '/' + str(transcriptions_id) return VoiceTranscriptionsView().load(self.request(uri, 'GET')) def voice_transcription_create(self, call_id, leg_id, recording_id, language): """Create a voice transcription.""" uri = self.generate_voice_calls_url(call_id, leg_id, recording_id) params = {'language': str(language)} return VoiceTranscriptionsView().load(self.request(uri, 'POST', params, VOICE_TYPE)) def voice_recording_view(self, call_id, leg_id, recording_id): uri = self.generate_voice_calls_url(call_id=call_id, leg_id=leg_id) + '/' + str(recording_id) recording_response = self.request(uri, 'GET') recording_links = recording_response.get('_links') if recording_links is not None: recording_response['data'][0]['_links'] = recording_links return VoiceRecording().load(recording_response['data'][0]) def voice_recording_download(self, call_id, leg_id, recording_id): uri = self.generate_voice_calls_url(call_id=call_id, leg_id=leg_id) + '/' + str(recording_id) recording_response = self.request(uri, 'GET') recording_links = recording_response.get('_links') if recording_links is None or recording_links.get('file') is None: raise (ErrorException('There is no recording available')) recording_file = recording_links.get('file') recording_file = self.request_store_as_file(VOICE_API_ROOT + recording_file, recording_id + '.wav') return VOICE_API_ROOT + recording_file def voice_read_webhook(self, id): """ Retrieve a voice webhook API Reference: https://developers.messagebird.com/api/voice-calling/#webhooks """ uri = VOICE_API_ROOT + '/' + VOICE_WEB_HOOKS_PATH + '/' + str(id) return VoiceWebhook().load(self.request(uri, 'GET', None, VOICE_TYPE)) def voice_list_webhooks(self, limit=10, offset=0): """ Retrieve a list of voice webhooks. """ uri = VOICE_API_ROOT + '/' + VOICE_WEB_HOOKS_PATH + '?' + self._format_query(limit, offset) return VoiceWebhookList().load(self.request(uri, 'GET', None, VOICE_TYPE)) def voice_create_webhook(self, create_webhook_request): """ Create a voice webhook. """ if create_webhook_request is None: raise ValidationError('Create request is empty') uri = VOICE_API_ROOT + '/' + VOICE_WEB_HOOKS_PATH return VoiceWebhook().load(self.request(uri, 'POST', create_webhook_request.__dict__(), VOICE_TYPE)) def voice_update_webhook(self, id, update_webhook_request): """ Update a voice webhook. """ if update_webhook_request is None: raise ValidationError('Update request is empty') uri = VOICE_API_ROOT + '/' + VOICE_WEB_HOOKS_PATH + '/' + str(id) return VoiceWebhook().load(self.request(uri, 'PUT', update_webhook_request.__dict__(), VOICE_TYPE)) def voice_delete_webhook(self, id): """ Delete a voice webhook. """ uri = VOICE_API_ROOT + '/' + VOICE_WEB_HOOKS_PATH + '/' + str(id) self.request(uri, 'DELETE', None, VOICE_TYPE) def call_flow(self, id): return CallFlow().load(self.request('call-flows/' + str(id), 'GET', None, VOICE_TYPE)) def call_flow_list(self, limit=10, offset=0): query = self._format_query(limit, offset) return CallFlowList().load(self.request('call-flows?' + query, 'GET', None, VOICE_TYPE)) def call_flow_create(self, title, steps, default=False, record=False): params = {'title': title, 'steps': steps, 'default': default, 'record': record} return CallFlow().load(self.request('call-flows', 'POST', params, VOICE_TYPE)) def call_flow_update(self, id, title, steps, default, record): params = {'title': title, 'steps': steps, 'default': default, 'record': record} return CallFlow().load(self.request('call-flows/' + str(id), 'PUT', params, VOICE_TYPE)) def call_flow_delete(self, id): self.request_plain_text('call-flows/' + str(id), 'DELETE', None, VOICE_TYPE) def call_flow_numbers_list(self, call_flow_id): return CallFlowNumberList().load( self.request('call-flows/' + str(call_flow_id) + '/numbers', 'GET', None, VOICE_TYPE)) def call_flow_numbers_add(self, call_flow_id, numbers=()): params = {'numbers': numbers} return CallFlowNumberList().load( self.request('call-flows/' + str(call_flow_id) + '/numbers', 'POST', params, VOICE_TYPE)) def _format_query(self, limit, offset): return 'limit=' + str(limit) + '&offset=' + str(offset) def available_numbers_list(self, country, params={}, limit=20, offset=0): """Retrieve a list of phone numbers available for purchase.""" params['limit'] = limit params['offset'] = offset return NumberList().load(self.request(NUMBER_AVAILABLE_PATH + '/' + str(country), 'GET', params, NUMBER_TYPE)) def purchase_number(self, number, country, billingIntervalMonths=1): params = {'number': str(number), 'countryCode': str(country), 'billingIntervalMonths': int(billingIntervalMonths)} return Number().load(self.request(NUMBER_PATH, 'POST', params, NUMBER_TYPE)) def update_number(self, number, tags): params = {'tags': tags} return Number().load(self.request(NUMBER_PATH + '/' + str(number), 'PATCH', params, NUMBER_TYPE)) def delete_number(self, number): self.request(NUMBER_PATH + '/' + str(number), 'DELETE', None, NUMBER_TYPE) def purchased_numbers_list(self, params={}, limit=20, offset=0): params['limit'] = limit params['offset'] = offset return NumberList().load(self.request(NUMBER_PATH, 'GET', params, NUMBER_TYPE)) def purchased_number(self, number): return Number().load(self.request(NUMBER_PATH + '/' + number, 'GET', None, NUMBER_TYPE)) @staticmethod def generate_voice_calls_url(call_id=None, leg_id=None, recording_id=None): uri = VOICE_API_ROOT + '/' + VOICE_PATH + '/' uri += str(call_id) + '/' + VOICE_LEGS_PATH + '/' + str(leg_id) + '/' + VOICE_RECORDINGS_PATH if recording_id: uri += '/' + str(recording_id) + '/' + VOICE_TRANSCRIPTIONS_PATH return uri
/** * Module dependencies. */ var _ = require('underscore')._; var env = process.env.NODE_ENV || 'development'; var fs = require('fs') var http = require('http'); var express = require('express'); var bodyParser = require('body-parser'); var errorhandler = require('errorhandler') var methodOverride = require('method-override'); var morgan = require('morgan') var compression = require('compression') var app = express(); var server = http.createServer(app) var socketIO = require('socket.io'); var io = socketIO(server, { transports: ['websocket', 'htmlfile', 'xhr-polling', 'jsonp-polling'] }); var lobbyClass = require('./lib/lobby.js'); var config = require('./config.js')[env]; var path = require('path'); var lobby = new lobbyClass.Lobby(io); var statsConnectionCount = 0; var statsDisconnectCount = 0; var statsSocketCount = 0; var statsSocketMessagesReceived = 0; // Configuration // Set the CDN options var options = { publicDir: path.join(__dirname, 'app'), viewsDir: path.join(__dirname, 'app'), domain: 'dkb4nwmyziz71.cloudfront.net', bucket: 'hatchetapp', key: 'AKIAIS3XCFXFKWXGKK7Q', secret: '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', hostname: config.hostname, port: config.port, ssl: false, production: config.packAssets }; // Initialize the CDN magic var CDN = require('express-cdn')(app, options); app.set('views', __dirname + '/app'); app.set('view engine', 'ejs'); app.set('view options', { layout: false }); app.use(morgan('combined')); app.use(bodyParser.json()); app.use(methodOverride()); if (env === 'development') { app.use(express.static(__dirname + '/app')); app.use(errorhandler()); } if (env === 'production') { var oneDay = 86400000; // app.use(assetsManagerMiddleware); app.use(compression()); app.use(express.static(__dirname + '/app')); app.use(errorhandler()); } // Add the dynamic view helper app.locals.CDN = CDN(); app.get('/', function(req, res) { res.render('index.ejs'); }); app.get('/debug_state', function(req, res) { res.json({ "stats": { "connectionCount": statsConnectionCount, "disconnectCount": statsDisconnectCount, "currentSocketCount": statsSocketCount, "socketMessagesReceived": statsSocketMessagesReceived }, "rooms": _.map(lobby.rooms, function(room, key) { return room.json() }) }); }); app.get('/styleguide', function(req, res) { res.render('styleguide.ejs'); }); app.get('/:id', function(req, res) { if (req.params.id in lobby.rooms) { res.render('index.ejs'); } else { res.redirect('/'); } }); var port = process.env.app_port || 5000; // Use the port that Heroku provides or default to 5000 server.listen(port, function() { console.log("Express server listening on port %d in %s mode", port, app.settings.env); }); /* EVENT LISTENERS */ io.on('connection', function(socket) { statsConnectionCount++; statsSocketCount++; // console.log("On connect", socket.id); socket.on('disconnect', function() { statsDisconnectCount++; statsSocketCount--; // console.log("On disconnect", socket.id); lobby.broadcastDisconnect(socket); }); socket.on('create room', function(data, callback) { statsSocketMessagesReceived++; // console.log("on create room", socket.id, data); callback(lobby.createRoom()); }); socket.on('join room', function(data, callback) { statsSocketMessagesReceived++; // console.log("on join room " + data.roomUrl, socket.id, data); var room = lobby.joinRoom(socket, data); if (room.error) { callback({ error: room.error }); } else { callback(room.info()); } }); socket.on('room info', function(data, callback) { statsSocketMessagesReceived++; // console.log("on room info for " + data.roomUrl, socket.id, data); var room = lobby.getRoom(data.roomUrl); // room = { error: "there was an error" }; if (room.error) { callback({ error: room.error }); } else { callback(room.info()); } }); socket.on('set card pack', function(data, cardPack) { statsSocketMessagesReceived++; // console.log("on set card pack " + data.cardPack + " for " + data.roomUrl, socket.id, data); var room = lobby.getRoom(data.roomUrl); // console.log("error=" + room.error); if (!room.error) { room.setCardPack(data); } }); socket.on('vote', function(data, callback) { statsSocketMessagesReceived++; // console.log("on vote " + data.vote + " received for " + data.roomUrl, socket.id, data); var room = lobby.getRoom(data.roomUrl); if (room.error) { callback({ error: room.error }); } else { room.recordVote(socket, data); callback({}); } }); socket.on('unvote', function(data, callback) { statsSocketMessagesReceived++; // console.log("omn unvote received for " + data.roomUrl, socket.id, data); var room = lobby.getRoom(data.roomUrl); if (room.error) { callback({ error: room.error }); } else { room.destroyVote(socket, data); callback({}); } }); socket.on('reset vote', function(data, callback) { statsSocketMessagesReceived++; // console.log("on reset vote received for " + data.roomUrl, socket.id, data); var room = lobby.getRoom(data.roomUrl); if (room.error) { callback({ error: room.error }); } else { room.resetVote(); callback({}); } }); socket.on('force reveal', function(data, callback) { statsSocketMessagesReceived++; var room = lobby.getRoom(data.roomUrl); if (room.error) { callback({ error: room.error }); } else { room.forceReveal(); callback({}); } }); socket.on('sort votes', function(data, callback) { statsSocketMessagesReceived++; var room = lobby.getRoom(data.roomUrl); if (room.error) { callback({ error: room.error }); } else { room.sortVotes(); callback({}); } }); socket.on('toggle voter', function(data, callback) { statsSocketMessagesReceived++; // console.log("on toggle voter for " + data.roomUrl, socket.id, data); var room = lobby.getRoom(data.roomUrl); if (room.error) { callback({ error: room.error }); } else { room.toggleVoter(data); callback({}); } }); });
import matplotlib.pyplot as plt import random class PlotGenerator: def __init__(self): pass @staticmethod def _get_3xplus5_value(value): return (3 * value) + 5 @staticmethod def get_list_with_random_numbers_added(initial_list): transformed_list = [] for i in initial_list: transformed_list.append(i + random.randrange(-5, 6)) return transformed_list @staticmethod def generate_unidimensional_plot(initial_list, label, format_string=""): plt.plot(initial_list, format_string) plt.ylabel(label) plt.show() @staticmethod def generate_bidimensional_plot(x_list, y_list, label): plt.plot(x_list, x_list, "b:", x_list, y_list, "r.") plt.ylabel(label) plt.show() def get_transformed_list(self, initial_list): transformed_list = [] for i in initial_list: transformed_list.append(self._get_3xplus5_value(i)) return transformed_list pg = PlotGenerator() ini_list = range(-10, 11) pg.generate_unidimensional_plot(ini_list, "Plot showing y=3x+5") trans_list = pg.get_transformed_list(ini_list) rand_trans_list = pg.get_list_with_random_numbers_added(trans_list) print("========= Practice 2: A =============") print(trans_list) print("========= Practice 2: B =============") pg.generate_unidimensional_plot(rand_trans_list, "Plot showing y=3x+5 plus rand values", "r.") print("========= Practice 2: C =============") pg.generate_bidimensional_plot(trans_list, rand_trans_list, "X/Y scatter plot y=3x+5")
import os import time class ScraperLogger: """ Base Logger class - should implement a more specific Logger that inherits from this class """ def __init__(self): pass @staticmethod def get_date(): """ :return: current date """ c_date: str = str(time.localtime()[0]) + "-" + str(time.localtime()[1]) + "-" + str(time.localtime()[2]) return c_date @staticmethod def get_time(): """ :return: current time """ c_time: str = str(time.localtime()[3]) + ":" + str(time.localtime()[4]) + ":" + str(time.localtime()[5]) return c_time def scrape_food(self, url: str): pass def scrape_search_results(self, url: str): pass def enqueue(self, url: str, func): pass def food_in_db(self, url: str): pass def enter_in_db(self, food: str): pass def check_ingredients(self, food: str): pass def error(self, msg: str): pass def make_request(self, url: str, agent: str, proxies: dict): pass def message(self, msg: str): pass class SilentScraperLogger(ScraperLogger): """ No logging - don't generate any log file or logging data """ pass class ErrorScraperLogger(ScraperLogger): """ Log error messages only """ def __init__(self): """ Create a log directory if needed, and log file, named using the current date/time, """ super().__init__() # make directory for logs if it doesn't exist if not os.path.exists("logs"): os.mkdir("logs") # start log file c_date: str = self.get_date() c_time: str = self.get_time() logfile_name: str = "logs/" + c_date + "_" + c_time + "_logfile.txt" if not os.path.exists(logfile_name): self.logfile = open(logfile_name, "a") self.logfile.write("LOG FILE STARTED: {}\n\n".format(time.asctime())) else: self.logfile = open(logfile_name, "a") def __del__(self): self.logfile.close() def error(self, msg: str): self.logfile.write("{} - {}\n".format(self.get_time(), msg)) class VerboseScraperLogger(ErrorScraperLogger): """ Log all events to log file """ def scrape_food(self, url: str): self.logfile.write("{} - Scraping food details from URL: {}\n\n".format(self.get_time(), url)) def scrape_search_results(self, url: str): self.logfile.write("{} - Scraping search results from URL: {}\n\n".format(self.get_time(), url)) def enter_in_db(self, food: str): self.logfile.write("{} - Entering food: {} into database...\n\n".format(self.get_time(), food)) def enqueue(self, url: str, func): self.logfile.write("{} - Enqueuing URL: {} with FUNC: {}\n\n".format(self.get_time(), url, func)) def food_in_db(self, url: str): self.logfile.write("{} - Checking DB for entry with URL: {}\n\n".format(self.get_time(), url)) def check_ingredients(self, food: str): self.logfile.write("{} - Checking ingredients in food: {}\n\n".format(self.get_time(), food)) def make_request(self, url: str, agent: str, proxies: dict): self.logfile.write( "{} - Using PROXY: {} to make request for URL: {} using USER AGENT: {}\n\n".format(self.get_time(), proxies, url, agent)) def message(self, msg: str): self.logfile.write(msg + '\n')
#!/usr/bin/python # # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = """ --- module: ibclient author: "Nathan Gotz" short_description: Manage Infoblox via Web API description: - Manage Infoblox IPAM and DNS via Web API """ EXAMPLES = """ """ RETURN = """ """ from ansible.module_utils.basic import AnsibleModule try: from ibclient.ibclient import IBClient HAS_IBCLIENT = True except ImportError: HAS_IBCLIENT = False try: import requests requests.packages.urllib3.disable_warnings() HAS_REQUESTS = True except ImportError: HAS_REQUESTS = False def main(): """ Ansible module to manage Infoblox operation by using REST API """ module = AnsibleModule( argument_spec=dict( server=dict(required=True), username=dict(required=True), password=dict(required=True, no_log=True), action=dict(required=True, choices=[ "get_memberservers", "get_dhcp_servers", "get_dhcpfailover", "get_network", "get_network_by_ip", "get_network_by_comment", "get_next_available_network", "get_next_available_address", "get_network_container", "get_range", "get_dns_record", "get_similar_dns_records", "get_fixedaddress", "get_fixedaddress_by_mac", "create_network", "create_network_container", "create_range", "create_reservedaddress", "create_fixedaddress", "create_ztp_fixedaddress", "create_a_record", "create_ptr_record", "create_dns_record", "update_network", "update_network_container", "update_reservedaddress", "update_fixedaddress_by_ip_addr", "update_fixedaddress_mac_addr", "delete_network", "delete_network_container", "delete_range", "delete_reservedaddress", "delete_fixedaddress", "delete_fixedaddress_by_mac", "delete_dns_records" ]), host=dict(required=False), network=dict(required=False), start_addr=dict(required=False), end_addr=dict(required=False), objref=dict(required=False), ip_address=dict(required=False), mac_address=dict(required=False), comment=dict(required=False), cidr=dict(required=False, type='raw'), num=dict(required=False, type='raw'), type=dict(required=False), record=dict(required=False), template=dict(required=False), exc_start=dict(required=False), exc_end=dict(required=False), options=dict(required=False), tftp_server=dict(required=False), cfg_file=dict(required=False), vendor_code=dict(required=False), fqdn=dict(required=False), api_version=dict(required=False, default="2.3.1"), dns_view=dict(required=False, default="default"), net_view=dict(required=False, default="default"), fields=dict(required=False, default=None, type='raw'), ) ) if not HAS_REQUESTS: module.fail_json( msg="Library 'requests' is required. Use 'sudo pip install requests' to fix it.") if not HAS_IBCLIENT: # This is a lie. ibclient (Infoblox client) is not on pypi (yet) module.fail_json( msg="Library 'ibclient' is required. Use 'sudo pip install ibclient' to fix it.") """ Global vars """ server = module.params["server"] username = module.params["username"] password = module.params["password"] action = module.params["action"] host = module.params["host"] network = module.params["network"] start_addr = module.params["start_addr"] end_addr = module.params["end_addr"] objref = module.params["objref"] ip_address = module.params["ip_address"] mac_address = module.params["mac_address"] comment = module.params["comment"] cidr = module.params["cidr"] num = module.params["num"] type = module.params["type"] record = module.params["record"] template = module.params["template"] exc_start = module.params["exc_start"] exc_end = module.params["exc_end"] options = module.params["options"] tftp_server = module.params["tftp_server"] cfg_file = module.params["cfg_file"] vendor_code = module.params["vendor_code"] fqdn = module.params["fqdn"] api_version = module.params["api_version"] dns_view = module.params["dns_view"] net_view = module.params["net_view"] fields = module.params["fields"] ib = IBClient(server, username, password, api_version, dns_view, net_view) if action == "get_memberservers": result = ib.get_memberservers() if result: result_json = { 'changed': False, 'msg': result } else: result_json = { 'msg': "No member servers found" } elif action == "get_dhcp_servers": result = ib.get_dhcp_servers() if result: result_json = { 'changed': False, 'msg': result } else: result_json = { 'msg': "No DHCP servers found" } elif action == "get_dhcpfailover": result = ib.get_dhcpfailover() if result: result_json = { 'changed': False, 'msg': result } else: result_json = { 'msg': "No DHCP failover found" } elif action == "get_network": result = ib.get_network(network, fields) if result: result_json = { 'changed': False, 'msg': result } else: result_json = { 'msg': "Network not found" } elif action == "get_network_by_ip": result = ib.get_network_by_ip(ip_address, fields) if result: result_json = { 'changed': False, 'msg': result } else: result_json = { 'msg': "Network not found" } elif action == "get_network_by_comment": result = ib.get_network_by_comment(comment, fields) if result: result_json = { 'changed': False, 'msg': result } else: result_json = { 'msg': "Network not found" } elif action == "get_next_available_network": result = ib.get_next_available_network(network, cidr, num) if result: result_json = { 'changed': False, 'msg': result } else: result_json = { 'msg': "No next available network" } elif action == "get_next_available_address": result = ib.get_next_available_network(network, num) if result: result_json = { 'changed': False, 'msg': result } else: result_json = { 'msg': "No next available IP address" } elif action == "get_network_container": result = ib.get_network_container(network, fields) if result: result_json = { 'changed': False, 'msg': result } else: result_json = { 'msg': "Network Container not found" } elif action == "get_range": result = ib.get_range(start_addr, end_addr, fields) if result: result_json = { 'changed': False, 'msg': result } else: result_json = { 'msg': "No DHCP Range" } elif action == "get_dns_record": result = ib.get_dns_record(type, record, fields) if result: result_json = { 'changed': False, 'msg': result } else: result_json = { 'msg': "No DNS Record" } elif action == "get_similar_dns_records": result = ib.get_similar_dns_records(type, record, fields) if result: result_json = { 'changed': False, 'msg': result } else: result_json = { 'msg': "No Similar DNS Records" } elif action == "get_fixedaddress": result = ib.get_fixedaddress(ip_address, fields) if result: result_json = { 'changed': False, 'msg': result } else: result_json = { 'msg': "Fixed Address not found" } elif action == "get_fixedaddress_by_mac": result = ib.get_fixedaddress_by_mac(mac_address, fields) if result: result_json = { 'changed': False, 'msg': result } else: result_json = { 'msg': "Fixed Address not found" } elif action == "create_network": result = ib.create_network(network, comment, template) if result: result_json = { 'changed': False, 'msg': result } else: result_json = { 'msg': "Unable to create network" } elif action == "create_network_container": result = ib.create_network_container(network, comment) if result: result_json = { 'changed': False, 'msg': result } else: result_json = { 'msg': "Unable to create network container" } elif action == "create_range": result = ib.create_range(network, start_addr, end_addr, exc_start, exc_end, options, template) if result: result_json = { 'changed': False, 'msg': result } else: result_json = { 'msg': "Unable to create DHCP range" } elif action == "create_reservedaddress": result = ib.create_reservedaddress(ip_address, host) if result: result_json = { 'changed': False, 'msg': result } else: result_json = { 'msg': "Unable to create reserved address" } elif action == "create_fixedaddress": result = ib.create_fixedaddress(ip_address, mac_address, host) if result: result_json = { 'changed': False, 'msg': result } else: result_json = { 'msg': "Unable to fixed address" } elif action == "create_ztp_fixedaddress": result = ib.create_ztp_fixedaddress(ip_address, mac_addr, host, tftp_server, cfg_file, vendor_code) if result: result_json = { 'changed': False, 'msg': result } else: result_json = { 'msg': "Unable to fixed address" } elif action == "create_a_record": result = ib.create_a_record(ip_address, fqdn) if result: result_json = { 'changed': False, 'msg': result } else: result_json = { 'msg': "Unable to create DNS A record" } elif action == "create_ptr_record": result = ib.create_ptr_record(ip_address, fqdn) if result: result_json = { 'changed': False, 'msg': result } else: result_json = { 'msg': "Unable to create DNS PTR record" } elif action == "create_dns_record": result = ib.create_dns_record(ip_address, fqdn) if result: result_json = { 'changed': False, 'msg': result } else: result_json = { 'msg': "Unable to create DNS record" } elif action == "update_network": result = ib.update_network(network, comment) if result: result_json = { 'changed': True, 'msg': result } else: result_json = { 'msg': "Unable to update network" } elif action == "update_network_container": result = ib.update_network_container(network, comment) if result: result_json = { 'changed': True, 'msg': result } else: result_json = { 'msg': "Unable to update network container" } elif action == "update_reservedaddress": result = ib.update_reservedaddress(ip_address, host) if result: result_json = { 'changed': True, 'msg': result } else: result_json = { 'msg': "Unable to update reserved address" } elif action == "update_fixedaddress_by_ip_addr": result = ib.update_fixedaddress_by_ip_addr(ip_address, mac_address, host) if result: result_json = { 'changed': True, 'msg': result } else: result_json = { 'msg': "Unable to update fixed address" } elif action == "update_fixedaddress_mac_addr": result = ib.update_fixedaddress_by_mac_addr(mac_address, host) if result: result_json = { 'changed': True, 'msg': result } else: result_json = { 'msg': "Unable to update fixed address" } elif action == "delete_network": result = ib.delete_network(network) if result: result_json = { 'changed': True, 'msg': result } else: result_json = { 'msg': "Unable to delete network" } elif action == "delete_network_container": result = ib.delete_network_container(network) if result: result_json = { 'changed': True, 'msg': result } else: result_json = { 'msg': "Unable to delete network container" } elif action == "delete_range": result = ib.delete_range(start_addr, end_addr) if result: result_json = { 'changed': True, 'msg': result } else: result_json = { 'msg': "Unable to delete DHCP Range" } elif action == "delete_reservedaddress": result = ib.delete_reservedaddress(address) if result: result_json = { 'changed': True, 'msg': result } else: result_json = { 'msg': "Unable to delete reserved address" } elif action == "delete_fixedaddress": result = ib.delete_fixedaddress(ip_address) if result: result_json = { 'changed': True, 'msg': result } else: result_json = { 'msg': "Unable to delete fixed address" } elif action == "delete_fixedaddress_by_mac": result = ib.delete_fixedaddress_by_mac(mac_address) if result: result_json = { 'changed': True, 'msg': result } else: result_json = { 'msg': "Unable to delete fixed address" } elif action == "delete_dns_records": result = ib.delete_dns_records(fqdn) if result: result_json = { 'changed': True, 'msg': result } else: result_json = { 'msg': "Unable to delete DNS record" } module.exit_json(**result_json) if __name__ == "__main__": main()