text
stringlengths
3
1.05M
from typing import Union, List from youtubesearchpython.__future__.internal.json import loads import httpx from youtubesearchpython.__future__.internal.constants import * class VideoInternal: videoId = None videoComponent = None timeout = None def __init__(self, videoLink: str, componentMode: str, timeout: int = None): self.videoLink = videoLink self.componentMode = componentMode self.timeout = timeout async def get(self): self.videoId = await self.__getVideoId(self.videoLink) await self.__makeRequest() await self.__getComponents(self.componentMode) if not self.videoComponent: raise Exception('ERROR: Could not parse YouTube response.') async def __getVideoId(self, videoLink: str) -> str: if 'youtu.be' in videoLink: if videoLink[-1] == '/': return videoLink.split('/')[-2] return videoLink.split('/')[-1] elif 'youtube.com' in videoLink: if '&' not in videoLink: return videoLink[videoLink.index('v=') + 2:] return videoLink[videoLink.index('v=') + 2: videoLink.index('&')] else: return videoLink async def __makeRequest(self) -> None: try: async with httpx.AsyncClient() as client: response = await client.post( 'https://www.youtube.com/watch', params = { 'v': self.videoId, 'pbj': 1, }, headers = { 'User-Agent': userAgent, }, timeout = self.timeout, ) self.responseSource = response.json() except: raise Exception('ERROR: Could not make request.') async def __getComponents(self, mode: str) -> None: for element in self.responseSource: if playerResponseKey in element.keys(): if 'videoDetails' in element[playerResponseKey].keys(): ''' Valid video ID. ''' self.videoComponent = await self.__getVideoComponent(element[playerResponseKey], mode) break else: ''' Invalid video ID. ''' self.videoComponent = None async def __getVideoComponent(self, element: dict, mode: str) -> dict: videoComponent = {} if mode in ['getInfo', None]: component = { 'id': await self.__getValue(element, ['videoDetails', 'videoId']), 'title': await self.__getValue(element, ['videoDetails', 'title']), 'viewCount': { 'text': await self.__getValue(element, ['videoDetails', 'viewCount']) }, 'thumbnails': await self.__getValue(element, ['videoDetails', 'thumbnail', 'thumbnails']), 'description': await self.__getValue(element, ['videoDetails', 'shortDescription']), 'channel': { 'name': await self.__getValue(element, ['videoDetails', 'author']), 'id': await self.__getValue(element, ['videoDetails', 'channelId']), }, 'averageRating': await self.__getValue(element, ['videoDetails', 'averageRating']), 'keywords': await self.__getValue(element, ['videoDetails', 'keywords']), 'publishDate': await self.__getValue(element, ['microformat', 'playerMicroformatRenderer', 'publishDate']), 'uploadDate': await self.__getValue(element, ['microformat', 'playerMicroformatRenderer', 'uploadDate']), } component['link'] = 'https://www.youtube.com/watch?v=' + component['id'] component['channel']['link'] = 'https://www.youtube.com/channel/' + component['channel']['id'] videoComponent.update(component) if mode in ['getFormats', None]: component = { 'id': await self.__getValue(element, ['videoDetails', 'videoId']), 'streamingData': await self.__getValue(element, ['streamingData']), } videoComponent.update(component) return videoComponent async def __getValue(self, source: dict, path: List[str]) -> Union[str, int, dict, None]: value = source for key in path: if type(key) is str: if key in value.keys(): value = value[key] else: value = None break elif type(key) is int: if len(value) != 0: value = value[key] else: value = None break return value class PlaylistInternal: playlistComponent = None result = None continuationKey = None timeout = None def __init__(self, playlistLink: str, componentMode: str, timeout: int = None): self.playlistLink = playlistLink self.componentMode = componentMode self.timeout = timeout async def get(self): await self.__makeRequest(self.playlistLink) await self.__getComponents() async def next(self): if self.continuationKey: await self.__makeNextRequest() await self.__getNextComponents() async def __makeRequest(self, playlistLink: str) -> None: playlistLink.strip('/') try: async with httpx.AsyncClient() as client: response = await client.post( playlistLink, params = { 'pbj': '1', }, headers = { 'User-Agent': userAgent, }, timeout = self.timeout, ) self.responseSource = response.json() except: raise Exception('ERROR: Could not make request.') async def __makeNextRequest(self, requestBody = requestPayload) -> None: requestBody['continuation'] = self.continuationKey try: async with httpx.AsyncClient() as client: response = await client.post( 'https://www.youtube.com/youtubei/v1/browse', params = { 'key': searchKey, }, headers = { 'User-Agent': userAgent, }, json = requestBody, timeout = self.timeout, ) self.responseSource = response.json() except: raise Exception('ERROR: Could not make request.') async def __getComponents(self) -> None: for response in self.responseSource: if 'response' in response.keys(): playlistElement = { 'info': await self.__getValue(response, playlistInfoPath), 'videos': await self.__getValue(response, playlistVideosPath), } if not playlistElement['info']: raise Exception('ERROR: Could not parse YouTube response.') self.playlistComponent = await self.__getPlaylistComponent(playlistElement, self.componentMode) async def __getNextComponents(self) -> None: self.continuationKey = None playlistComponent = { 'videos': [], } continuationElements = await self.__getValue(self.responseSource, ['onResponseReceivedActions', 0, 'appendContinuationItemsAction', 'continuationItems']) for videoElement in continuationElements: if playlistVideoKey in videoElement.keys(): videoComponent = { 'id': await self.__getValue(videoElement, [playlistVideoKey, 'videoId']), 'title': await self.__getValue(videoElement, [playlistVideoKey, 'title', 'runs', 0, 'text']), 'thumbnails': await self.__getValue(videoElement, [playlistVideoKey, 'thumbnail', 'thumbnails']), 'channel': { 'name': await self.__getValue(videoElement, [playlistVideoKey, 'shortBylineText', 'runs', 0, 'text']), 'id': await self.__getValue(videoElement, [playlistVideoKey, 'shortBylineText', 'runs', 0, 'navigationEndpoint', 'browseEndpoint', 'browseId']), }, 'duration': await self.__getValue(videoElement, [playlistVideoKey, 'lengthText', 'simpleText']), 'accessibility': { 'title': await self.__getValue(videoElement, [playlistVideoKey, 'title', 'accessibility', 'accessibilityData', 'label']), 'duration': await self.__getValue(videoElement, [playlistVideoKey, 'lengthText', 'accessibility', 'accessibilityData', 'label']), }, } playlistComponent['videos'].append( videoComponent ) if continuationItemKey in videoElement.keys(): self.continuationKey = await self.__getValue(videoElement, continuationKeyPath) self.playlistComponent['videos'].extend(playlistComponent['videos']) async def __getPlaylistComponent(self, element: dict, mode: str) -> dict: playlistComponent = {} if mode in ['getInfo', None]: for infoElement in element['info']: if playlistPrimaryInfoKey in infoElement.keys(): component = { 'id': await self.__getValue(infoElement, [playlistPrimaryInfoKey, 'title', 'runs', 0, 'navigationEndpoint', 'watchEndpoint', 'playlistId']), 'title': await self.__getValue(infoElement, [playlistPrimaryInfoKey, 'title', 'runs', 0, 'text']), 'videoCount': await self.__getValue(infoElement, [playlistPrimaryInfoKey, 'stats', 0, 'runs', 0, 'text']), 'viewCount': await self.__getValue(infoElement, [playlistPrimaryInfoKey, 'stats', 1, 'simpleText']), 'thumbnails': await self.__getValue(infoElement, [playlistPrimaryInfoKey, 'thumbnailRenderer', 'playlistVideoThumbnailRenderer', 'thumbnail']), } if not component['thumbnails']: component['thumbnails'] = self.__getValue(infoElement, [playlistPrimaryInfoKey, 'thumbnailRenderer', 'playlistCustomThumbnailRenderer', 'thumbnail', 'thumbnails']), component['link'] = 'https://www.youtube.com/playlist?list=' + component['id'] playlistComponent.update(component) if playlistSecondaryInfoKey in infoElement.keys(): component = { 'channel': { 'name': await self.__getValue(infoElement, [playlistSecondaryInfoKey, 'videoOwner', 'videoOwnerRenderer', 'title', 'runs', 0, 'text']), 'id': await self.__getValue(infoElement, [playlistSecondaryInfoKey, 'videoOwner', 'videoOwnerRenderer', 'title', 'runs', 0, 'navigationEndpoint', 'browseEndpoint', 'browseId']), 'thumbnails': await self.__getValue(infoElement, [playlistSecondaryInfoKey, 'videoOwner', 'videoOwnerRenderer', 'thumbnail', 'thumbnails']), }, } component['channel']['link'] = 'https://www.youtube.com/channel/' + component['channel']['id'] playlistComponent.update(component) if mode in ['getVideos', None]: playlistComponent['videos'] = [] for videoElement in element['videos']: if playlistVideoKey in videoElement: videoComponent = { 'id': await self.__getValue(videoElement, [playlistVideoKey, 'videoId']), 'title': await self.__getValue(videoElement, [playlistVideoKey, 'title', 'runs', 0, 'text']), 'thumbnails': await self.__getValue(videoElement, [playlistVideoKey, 'thumbnail', 'thumbnails']), 'channel': { 'name': await self.__getValue(videoElement, [playlistVideoKey, 'shortBylineText', 'runs', 0, 'text']), 'id': await self.__getValue(videoElement, [playlistVideoKey, 'shortBylineText', 'runs', 0, 'navigationEndpoint', 'browseEndpoint', 'browseId']), }, 'duration': await self.__getValue(videoElement, [playlistVideoKey, 'lengthText', 'simpleText']), 'accessibility': { 'title': await self.__getValue(videoElement, [playlistVideoKey, 'title', 'accessibility', 'accessibilityData', 'label']), 'duration': await self.__getValue(videoElement, [playlistVideoKey, 'lengthText', 'accessibility', 'accessibilityData', 'label']), }, } videoComponent['link'] = 'https://www.youtube.com/watch?v=' + videoComponent['id'] videoComponent['channel']['link'] = 'https://www.youtube.com/channel/' + videoComponent['channel']['id'] playlistComponent['videos'].append( videoComponent ) if continuationItemKey in videoElement.keys(): self.continuationKey = await self.__getValue(videoElement, continuationKeyPath) return playlistComponent async def __getValue(self, source: dict, path: List[str]) -> Union[str, int, dict, None]: value = source for key in path: if type(key) is str: if key in value.keys(): value = value[key] else: value = None break elif type(key) is int: if len(value) != 0: value = value[key] else: value = None break return value class SuggestionsInternal: timeout = None def __init__(self): pass async def get(self, query: str, language: str = 'en', region: str = 'US', timeout: int = None) -> dict: self.query = query self.language = language self.region = region self.timeout = timeout searchSuggestions = [] await self.__makeRequest() await self.__parseSource() for element in self.responseSource: if type(element) is list: for searchSuggestionElement in element: searchSuggestions.append(searchSuggestionElement[0]) break return { 'result': searchSuggestions, } async def __parseSource(self) -> None: try: self.responseSource = await loads(self.response[self.response.index('(') + 1: self.response.index(')')]) except: raise Exception('ERROR: Could not parse YouTube response.') async def __makeRequest(self) -> None: try: async with httpx.AsyncClient() as client: response = await client.get( 'https://clients1.google.com/complete/search', params = { 'hl': self.language, 'gl': self.region, 'q': self.query, 'client': 'youtube', 'gs_ri': 'youtube', 'ds': 'yt', }, timeout = self.timeout, ) self.response = response.text except: raise Exception('ERROR: Could not make request.')
const PanelView = require('panels/view/PanelView'); const Panel = require('panels/model/Panel'); module.exports = { run() { describe('PanelView', () => { var fixtures; var model; var view; beforeEach(() => { model = new Panel(); view = new PanelView({ model }); document.body.innerHTML = '<div id="fixtures"></div>'; fixtures = document.body.querySelector('#fixtures'); fixtures.appendChild(view.render().el); }); afterEach(() => { view.remove(); }); it('Panel empty', () => { fixtures.firstChild.className = ''; expect(fixtures.innerHTML).toEqual('<div class=""></div>'); }); it('Append content', () => { model.set('appendContent', 'test'); model.set('appendContent', 'test2'); expect(view.$el.html()).toEqual('testtest2'); }); it('Update content', () => { model.set('content', 'test'); model.set('content', 'test2'); expect(view.$el.html()).toEqual('test2'); }); describe('Init with options', () => { beforeEach(() => { model = new Panel({ buttons: [{}] }); view = new PanelView({ model }); document.body.innerHTML = '<div id="fixtures"></div>'; fixtures = document.body.querySelector('#fixtures'); fixtures.appendChild(view.render().el); }); afterEach(() => { view.remove(); }); }); }); } };
import datetime from django.conf import settings from django.db import models from django.utils.text import slugify from django.utils.translation import ugettext_lazy as _ # Create your models here. class Category(models.Model): slug = models.SlugField(max_length=128, unique=True, blank=True) name = models.CharField(max_length=128) description = models.TextField(null=True, blank=True) # a2.parent = a1; a1.children.all() parent = models.ForeignKey('self', blank=True, null=True, related_name='children', on_delete=models.CASCADE) visible = models.BooleanField(default=True) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) class Meta: verbose_name = _('category') verbose_name_plural = _('categories') def __str__(self): return self.name def save(self, *args, **kwargs): self.slug = slugify(self.name) super().save(*args, **kwargs) class Tutorial(models.Model): title = models.CharField(max_length=150, blank=True) slug = models.SlugField(max_length=200, unique=True, blank=True) photo = models.ImageField( upload_to='photos/%Y/%m/%d/', null=True, blank=True) video = models.URLField(max_length=250, null=True, blank=True) content = models.TextField(blank=True) author = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='tutorials', blank=True) categories = models.ManyToManyField(Category, related_name='tutorials', blank=True) class Meta: verbose_name = _('tutorial') verbose_name_plural = _('tutorials') def __str__(self): return self.title def save(self, *args, **kwargs): today = datetime.datetime.today() title_slugified = slugify(self.title) self.slug = f'{today:%Y%m%d%M%S}-{title_slugified}' super().save(*args, **kwargs)
module.exports = { extends: require.resolve('eslint-config-ostai'), rules: { 'no-underscore-dangle': 'off', 'no-new': 'off' } }
/* * Copyright 2010-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #pragma once #include <aws/cognito-identity/CognitoIdentity_EXPORTS.h> #include <aws/core/utils/memory/stl/AWSString.h> namespace Aws { template<typename RESULT_TYPE> class AmazonWebServiceResult; namespace Utils { namespace Json { class JsonValue; } // namespace Json } // namespace Utils namespace CognitoIdentity { namespace Model { /** * Returned in response to a successful GetOpenIdToken request. */ class AWS_COGNITOIDENTITY_API GetOpenIdTokenResult { public: GetOpenIdTokenResult(); GetOpenIdTokenResult(const AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result); GetOpenIdTokenResult& operator=(const AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result); /** * A unique identifier in the format REGION:GUID. Note that the IdentityId returned * may not match the one passed on input. */ inline const Aws::String& GetIdentityId() const{ return m_identityId; } /** * A unique identifier in the format REGION:GUID. Note that the IdentityId returned * may not match the one passed on input. */ inline void SetIdentityId(const Aws::String& value) { m_identityId = value; } /** * A unique identifier in the format REGION:GUID. Note that the IdentityId returned * may not match the one passed on input. */ inline void SetIdentityId(Aws::String&& value) { m_identityId = value; } /** * A unique identifier in the format REGION:GUID. Note that the IdentityId returned * may not match the one passed on input. */ inline void SetIdentityId(const char* value) { m_identityId.assign(value); } /** * A unique identifier in the format REGION:GUID. Note that the IdentityId returned * may not match the one passed on input. */ inline GetOpenIdTokenResult& WithIdentityId(const Aws::String& value) { SetIdentityId(value); return *this;} /** * A unique identifier in the format REGION:GUID. Note that the IdentityId returned * may not match the one passed on input. */ inline GetOpenIdTokenResult& WithIdentityId(Aws::String&& value) { SetIdentityId(value); return *this;} /** * A unique identifier in the format REGION:GUID. Note that the IdentityId returned * may not match the one passed on input. */ inline GetOpenIdTokenResult& WithIdentityId(const char* value) { SetIdentityId(value); return *this;} /** * An OpenID token, valid for 15 minutes. */ inline const Aws::String& GetToken() const{ return m_token; } /** * An OpenID token, valid for 15 minutes. */ inline void SetToken(const Aws::String& value) { m_token = value; } /** * An OpenID token, valid for 15 minutes. */ inline void SetToken(Aws::String&& value) { m_token = value; } /** * An OpenID token, valid for 15 minutes. */ inline void SetToken(const char* value) { m_token.assign(value); } /** * An OpenID token, valid for 15 minutes. */ inline GetOpenIdTokenResult& WithToken(const Aws::String& value) { SetToken(value); return *this;} /** * An OpenID token, valid for 15 minutes. */ inline GetOpenIdTokenResult& WithToken(Aws::String&& value) { SetToken(value); return *this;} /** * An OpenID token, valid for 15 minutes. */ inline GetOpenIdTokenResult& WithToken(const char* value) { SetToken(value); return *this;} private: Aws::String m_identityId; Aws::String m_token; }; } // namespace Model } // namespace CognitoIdentity } // namespace Aws
import { parseObjects } from '../CSV' import * as fs from 'fs' test("test the CSV", () => { let data = fs.readFileSync(`${__dirname}/parser_test.csv`, 'utf-8') let rows = parseObjects(data) expect(rows.length).toBe(3) // depends on file contents of course expect(rows[0]['title']).toBe('a good day') expect(rows[0]['count']).toBe('5') var row0_description = `let's just pile it all in here, commas, "quoted strings" and new lines!` expect(rows[0]['description'].trim()).toBe(row0_description.trim()) expect(rows[1]['title']).toBe('an ok day') expect(rows[1]['count']).toBe('3') expect(rows[1]['description']).toBe('this is less ambitious') expect(rows[2]['title']).toBe('a day, i guess') expect(rows[2]['count']).toBe('2') expect(rows[2]['description']).toBe('what else could go wrong?') }) test("test official CSV", () => { let data = fs.readFileSync(`${__dirname}/women_in_computing.csv`, 'utf-8') let rows = parseObjects(data) expect(rows.length).toBe(11) rows.forEach((row, idx) => { expect(Object.keys(row).length).toBe(18) }) let title = rows[0] expect(title['Type']).toBe('title') expect(title['Background']).toMatch(/upload.wikimedia.org/) })
import { expect } from 'chai'; import { loadFixture } from './test-utils.js'; describe('Sitemaps', () => { let fixture; before(async () => { fixture = await loadFixture({ projectRoot: './fixtures/astro-sitemap-rss/', buildOptions: { site: 'https://astro.build/', sitemap: true, }, }); await fixture.build(); }); after(() => fixture.clean()); describe('RSS Generation', () => { it('generates RSS correctly', async () => { const rss = await fixture.readFile('/custom/feed.xml'); expect(rss).to.equal( `<?xml version="1.0" encoding="UTF-8"?><rss version="2.0" xmlns:itunes="http://www.itunes.com/dtds/podcast-1.0.dtd" xmlns:content="http://purl.org/rss/1.0/modules/content/"><channel><title><![CDATA[MF Doomcast]]></title><description><![CDATA[The podcast about the things you find on a picnic, or at a picnic table]]></description><link>https://astro.build/custom/feed.xml</link><language>en-us</language><itunes:author>MF Doom</itunes:author><item><title><![CDATA[Rap Snitch Knishes (feat. Mr. Fantastik)]]></title><link>https://astro.build/episode/rap-snitch-knishes/</link><guid>https://astro.build/episode/rap-snitch-knishes/</guid><description><![CDATA[Complex named this song the “22nd funniest rap song of all time.”]]></description><pubDate>Tue, 16 Nov 2004 00:00:00 GMT</pubDate><itunes:episodeType>music</itunes:episodeType><itunes:duration>172</itunes:duration><itunes:explicit>true</itunes:explicit></item><item><title><![CDATA[Fazers]]></title><link>https://astro.build/episode/fazers/</link><guid>https://astro.build/episode/fazers/</guid><description><![CDATA[Rhapsody ranked Take Me to Your Leader 17th on its list “Hip-Hop’s Best Albums of the Decade”]]></description><pubDate>Thu, 03 Jul 2003 00:00:00 GMT</pubDate><itunes:episodeType>music</itunes:episodeType><itunes:duration>197</itunes:duration><itunes:explicit>true</itunes:explicit></item><item><title><![CDATA[Rhymes Like Dimes (feat. Cucumber Slice)]]></title><link>https://astro.build/episode/rhymes-like-dimes/</link><guid>https://astro.build/episode/rhymes-like-dimes/</guid><description><![CDATA[Operation: Doomsday has been heralded as an underground classic that established MF Doom's rank within the underground hip-hop scene during the early to mid-2000s. ]]></description><pubDate>Tue, 19 Oct 1999 00:00:00 GMT</pubDate><itunes:episodeType>music</itunes:episodeType><itunes:duration>259</itunes:duration><itunes:explicit>true</itunes:explicit></item></channel></rss>` ); }); it('generates RSS with pregenerated URLs correctly', async () => { const rss = await fixture.readFile('/custom/feed-pregenerated-urls.xml'); expect(rss).to.equal( `<?xml version="1.0" encoding="UTF-8"?><rss version="2.0" xmlns:itunes="http://www.itunes.com/dtds/podcast-1.0.dtd" xmlns:content="http://purl.org/rss/1.0/modules/content/"><channel><title><![CDATA[MF Doomcast]]></title><description><![CDATA[The podcast about the things you find on a picnic, or at a picnic table]]></description><link>https://astro.build/custom/feed-pregenerated-urls.xml</link><language>en-us</language><itunes:author>MF Doom</itunes:author><item><title><![CDATA[Rap Snitch Knishes (feat. Mr. Fantastik)]]></title><link>https://example.com/episode/rap-snitch-knishes/</link><guid>https://example.com/episode/rap-snitch-knishes/</guid><description><![CDATA[Complex named this song the “22nd funniest rap song of all time.”]]></description><pubDate>Tue, 16 Nov 2004 00:00:00 GMT</pubDate><itunes:episodeType>music</itunes:episodeType><itunes:duration>172</itunes:duration><itunes:explicit>true</itunes:explicit></item><item><title><![CDATA[Fazers]]></title><link>https://example.com/episode/fazers/</link><guid>https://example.com/episode/fazers/</guid><description><![CDATA[Rhapsody ranked Take Me to Your Leader 17th on its list “Hip-Hop’s Best Albums of the Decade”]]></description><pubDate>Thu, 03 Jul 2003 00:00:00 GMT</pubDate><itunes:episodeType>music</itunes:episodeType><itunes:duration>197</itunes:duration><itunes:explicit>true</itunes:explicit></item><item><title><![CDATA[Rhymes Like Dimes (feat. Cucumber Slice)]]></title><link>https://example.com/episode/rhymes-like-dimes/</link><guid>https://example.com/episode/rhymes-like-dimes/</guid><description><![CDATA[Operation: Doomsday has been heralded as an underground classic that established MF Doom's rank within the underground hip-hop scene during the early to mid-2000s. ]]></description><pubDate>Tue, 19 Oct 1999 00:00:00 GMT</pubDate><itunes:episodeType>music</itunes:episodeType><itunes:duration>259</itunes:duration><itunes:explicit>true</itunes:explicit></item></channel></rss>` ); }); }); describe('Sitemap Generation', () => { it('Generates Sitemap correctly', async () => { let sitemap = await fixture.readFile('/sitemap.xml'); expect(sitemap).to.equal( `<?xml version="1.0" encoding="UTF-8"?><urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"><url><loc>https://astro.build/episode/fazers/</loc></url><url><loc>https://astro.build/episode/rap-snitch-knishes/</loc></url><url><loc>https://astro.build/episode/rhymes-like-dimes/</loc></url><url><loc>https://astro.build/episodes/</loc></url></urlset>\n` ); }); }); }); describe('Sitemaps served from subdirectory', () => { let fixture; before(async () => { fixture = await loadFixture({ projectRoot: './fixtures/astro-sitemap-rss/', buildOptions: { site: 'https://astro.build/base-directory/', sitemap: true, }, }); await fixture.build(); }); after(() => fixture.clean()); describe('Sitemap Generation', () => { it('Generates Sitemap correctly', async () => { let sitemap = await fixture.readFile('/sitemap.xml'); expect(sitemap).to.equal( `<?xml version="1.0" encoding="UTF-8"?><urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"><url><loc>https://astro.build/base-directory/episode/fazers/</loc></url><url><loc>https://astro.build/base-directory/episode/rap-snitch-knishes/</loc></url><url><loc>https://astro.build/base-directory/episode/rhymes-like-dimes/</loc></url><url><loc>https://astro.build/base-directory/episodes/</loc></url></urlset>\n` ); }); }); });
import React, { Component } from 'react'; import { connect } from 'react-redux'; import requireAuth from './requireAuth'; import { secretAction } from '../actions'; class Secret extends Component { async componentDidMount() { await this.props.secretAction(localStorage.getItem('token')); } render() { return ( <h2> :> {this.props.secret}</h2> ) } } const mapStateToProps = state => { return { secret: state.auth.secret } } export default connect(mapStateToProps, { secretAction })(requireAuth(Secret));
import xml.etree.ElementTree as ET xml_string = ''' <stuff> <users> <user x="2"> <id>001</id> <name>Chuck</name> </user> <user x="7"> <id>009</id> <name>Brent</name> </user> </users> </stuff> ''' root_stuff = ET.fromstring(xml_string) user_elements = root_stuff.findall('users/user') print('user count:', len(user_elements)) for user in user_elements: print('name:', user.find('name').text) print('id:', user.find('id').text) print('attribute(x):', user.get('x'))
import re from src import _base class Atom(_base.BaseAtom): MYSQL_ERR_CODE = r'(?P<err_code>[A-Z0-9][A-Z0-9-_]+)' MYSQL_SUBSYSTEM = r'(?P<subsystem>[A-Z]\S+)' # https://mariadb.com/kb/en/error-log/#format # https://dev.mysql.com/doc/refman/8.0/en/error-log-format.html MYSQL = re.compile(( r'{DATE}[T|\s]{TIME}{TIMEZONE}?\s' # timestamp r'({THREAD_ID}\s)?\[{LEVEL}\]\s(\[{MYSQL_ERR_CODE}\]\s)?(\[{MYSQL_SUBSYSTEM}\]\s)?' r'{MESSAGE}$' ).format(**Atom.asdict()))
import React from 'react'; // const VideoDetail = (props) => { const VideoDetail = ({video}) => { if(!video){ return <div>Loading...</div> //Added a condition that makes sure if someone tries to render VideoDetail and a video is not provided then the page will return a div that says Loading... //If a video is provided then the embed, title, and description from the markup shown below will be displayed. } const videoId = video.id.videoId; //This is with ES6: const url = `https://www.youtube.com/embed/${videoId}`; // This will come up with the URL for videos as long as the videoId is retrieved. // The iframe points to this URL showing a YouTube video player inside of the application. return ( <div className="video-detail col-md-8"> <div className="embed-responsive embed-responsive-16by9"> <iframe className="embed-responsive-item" src={url}></iframe> /*Make use of the iframe by providing the URL to it.*/ </div> <div className="details"> <div>{video.snippet.title}</div> <div>{video.snippet.description}</div> </div> </div> ); }; export default VideoDetail;
# Copyright (c) 2022, Framras AS-Izmir and contributors # For license information, please see license.txt # import frappe from frappe.model.document import Document class TRUTSUsageNotification(Document): pass
'use strict'; // Init the application configuration module for AngularJS application var ApplicationConfiguration = (function() { // Init module configuration options var applicationModuleName = 'youtubesw'; var applicationModuleVendorDependencies = ['ngResource', 'ngCookies', 'ngAnimate', 'ngTouch', 'ngSanitize', 'ui.router', 'ui.bootstrap', 'ui.utils', 'youtube-embed']; // Add a new vertical module var registerModule = function(moduleName, dependencies) { // Create angular module angular.module(moduleName, dependencies || []); // Add the module to the AngularJS configuration file angular.module(applicationModuleName).requires.push(moduleName); }; return { applicationModuleName: applicationModuleName, applicationModuleVendorDependencies: applicationModuleVendorDependencies, registerModule: registerModule }; })();
#!/usr/bin/python # # Copyright (c) SAS Institute Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # class PermissionDenied(Exception): pass class EC2Exception(Exception): pass class TargetExists(Exception): pass class TargetMissing(Exception): pass # REST errors class AuthHeaderError(Exception): pass
/* Copyright (c) 2013 William Malone (www.williammalone.com) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /*global window */ var BLOCKS; if (BLOCKS === undefined) { BLOCKS = {}; } BLOCKS.stack = function (options) { "use strict"; var stack = BLOCKS.eventDispatcher(), views = [], alpha, motors = [], x, y, visible, motorDestroyed = function (motor) { var i; motor.removeEventListener("destroyed", motorDestroyed); for (i = 0 ; i < motors.length; i += 1) { motors.splice(i, 1); break; } }, setViewsDirty = function (value) { var i; for (i = 0; i < views.length; i += 1) { views[i].dirty = value; } }, getViewsDirty = function () { var i; for (i = 0; i < views.length; i += 1) { if (views[i].dirty) { return true; } } }; options = options || {}; // Public Methods stack.addView = function (view) { view.stack = stack; views.push(view); }; stack.getView = function (name) { var i; for (i = 0; i < views.length; i += 1) { if (views[i].name === name) { return views[i]; } } }; stack.removeView = function (view) { var i; for (i = 0; i < views.length; i += 1) { if (views[i] === view) { views[i].splice(i, 1); break; } } }; stack.show = function () { var i; if (!visible) { visible = true; for (i = 0; i < views.length; i += 1) { views[i].show(); } } }; stack.hide = function () { var i; if (visible) { visible = false; for (i = 0; i < views.length; i += 1) { views[i].hide(); } } }; stack.isPointInside = function (pos) { var i; for (i = 0; i < views.length; i += 1) { if (views[i].visible && views[i].isPointInside(pos)) { return true; } } return false; }; stack.motorize = function (motor) { motor.addEventListener("destroyed", motorDestroyed); motors.push(motor); }; stack.removeMotors = function (type) { var i, motorArr = []; for (i = 0 ; i < motors.length; i += 1) { if (type) { if (motors[i].type === type) { motors[i].destroy(); } else { motorArr.push(motors[i]); } } else { motors[i].destroy(); } } motors = motorArr; }; stack.destroy = function () { var i; for (i = 0; i < views.length; i += 1) { views[i].destroy(); } views = []; stack = null; }; Object.defineProperty(stack, "dirty", { get: function () { return getViewsDirty(); }, set: function (value) { setViewsDirty(value); } }); Object.defineProperty(stack, "alpha", { get: function () { return alpha !== undefined ? alpha : 1; }, set: function (value) { var i; alpha = value; for (i = 0; i < views.length; i += 1) { views[i].alpha = value; } } }); Object.defineProperty(stack, "width", { get: function () { var i, largestWidth = 0; for (i = 0; i < views.length; i += 1) { if (views[i].visible && views[i].width > largestWidth) { largestWidth = views[i].width; } } return largestWidth; } }); Object.defineProperty(stack, "height", { get: function () { var i, largestHeight = 0; for (i = 0; i < views.length; i += 1) { if (views[i].visible && views[i].height > largestHeight) { largestHeight = views[i].height; } } return largestHeight; } }); x = options.x || 0; Object.defineProperty(stack, "x", { get: function () { return x; }, set: function (value) { if (x !== value) { x = value; setViewsDirty(true); } } }); y = options.y || 0; Object.defineProperty(stack, "y", { get: function () { return y; }, set: function (value) { if (y !== value) { y = value; setViewsDirty(true); } } }); visible = options.visible || true; Object.defineProperty(stack, "visible", { get: function () { return visible; }, set: function (value) { if (value) { stack.show(); } else { stack.hide(); } } }); return stack; };
import airflow from airflow import DAG dag = DAG( dag_id="listing_2_03", start_date=airflow.utils.dates.days_ago(14), schedule_interval=None, )
# Copyright (c) 2012-2021, Mark Peek <mark@peek.org> # All rights reserved. # # See LICENSE file for full license. from .aws import Action as BaseAction from .aws import BaseARN service_name = "AWS Elemental MediaPackage VOD" prefix = "mediapackage-vod" class Action(BaseAction): def __init__(self, action: str = None) -> None: super().__init__(prefix, action) class ARN(BaseARN): def __init__(self, resource: str = "", region: str = "", account: str = "") -> None: super().__init__( service=prefix, resource=resource, region=region, account=account ) CreateAsset = Action("CreateAsset") CreatePackagingConfiguration = Action("CreatePackagingConfiguration") CreatePackagingGroup = Action("CreatePackagingGroup") DeleteAsset = Action("DeleteAsset") DeletePackagingConfiguration = Action("DeletePackagingConfiguration") DeletePackagingGroup = Action("DeletePackagingGroup") DescribeAsset = Action("DescribeAsset") DescribePackagingConfiguration = Action("DescribePackagingConfiguration") DescribePackagingGroup = Action("DescribePackagingGroup") ListAssets = Action("ListAssets") ListPackagingConfigurations = Action("ListPackagingConfigurations") ListPackagingGroups = Action("ListPackagingGroups") ListTagsForResource = Action("ListTagsForResource") TagResource = Action("TagResource") UntagResource = Action("UntagResource")
from PyQt5.QtWidgets import QWidget, QApplication, QLabel, QLineEdit, QPushButton, QVBoxLayout, QHBoxLayout from PyQt5.QtWidgets import QAction, qApp, QMainWindow from PyQt5.QtGui import QFont, QIcon from PyQt5 import QtCore import json import sys import os class Game(QWidget): def __init__(self): super().__init__() self.dosya_ac() self.para = 30 self.count = 2 self.restart_count = 0 self.level_count = 1 self.bildi = False self.end = False self.init_ui() def dosya_ac(self): global data global ipuclari with open("ipucu.json", encoding="utf-8") as json_file: data = json.load(json_file) ipuclari = list() for i in data: for x in data[i].keys(): ipuclari.append(x) def init_ui(self): self.gold = QLabel("Gold: ") self.gold_str = QLabel(str(self.para)) self.level = QLabel("Level ") self.level_str = QLabel(str(self.level_count)) self.i1 = QLabel("İpucu 1: ") self.i1_str = QLabel(data['kelime'][ipuclari[self.restart_count]]['1']) self.i2 = QLabel("İpucu 2: ") self.i2_str = QLabel("İpucu Kilitli!") self.i3 = QLabel("İpucu 3: ") self.i3_str = QLabel("İpucu Kilitli!") self.yazi_alani = QLineEdit() self.i_btn = QPushButton("İpucu Aç (-10g)") self.tahmin_btn = QPushButton("Tahmin Et") self.sonraki = QPushButton("Sonraki") self.text = QLabel("") h_box = QHBoxLayout() h_box.addWidget(self.gold) h_box.addWidget(self.gold_str) h_box.addStretch() h_box.addWidget(self.level) h_box.addWidget(self.level_str) v_box = QVBoxLayout() v_box.addWidget(self.i1) v_box.addStretch() v_box.addWidget(self.i2) v_box.addStretch() v_box.addWidget(self.i3) v_box.addStretch() v2_box = QVBoxLayout() v2_box.addWidget(self.i1_str) v2_box.addStretch() v2_box.addWidget(self.i2_str) v2_box.addStretch() v2_box.addWidget(self.i3_str) v2_box.addStretch() h2_box = QHBoxLayout() h2_box.addStretch() h2_box.addLayout(v_box) h2_box.addLayout(v2_box) h2_box.addStretch() v3_box = QVBoxLayout() v3_box.addLayout(h_box) v3_box.addStretch() v3_box.addLayout(h2_box) v3_box.addStretch() v3_box.addWidget(self.yazi_alani) h3_box = QHBoxLayout() h3_box.addWidget(self.i_btn) h3_box.addWidget(self.tahmin_btn) h3_box.addWidget(self.sonraki) h4_box = QHBoxLayout() h4_box.addStretch() h4_box.addWidget(self.text) h4_box.addStretch() v4_box = QVBoxLayout() v4_box.addLayout(v3_box) v4_box.addLayout(h3_box) v4_box.addLayout(h4_box) self.setLayout(v4_box) self.setWindowTitle("Kelime Bulmaca") self.setWindowIcon(QIcon("logo.png")) self.setMinimumHeight(250) self.setMaximumHeight(250) self.setMinimumWidth(350) self.setMaximumWidth(350) self.i_btn.clicked.connect(self.info) self.tahmin_btn.clicked.connect(self.guess) self.sonraki.clicked.connect(self.next) self.show() def info(self): if self.count == 4 or self.end == True: return False if self.para >= 10: self.para -= 10 self.gold_str.setText(str(self.para)) if self.count == 2: self.i2_str.setText(data['kelime'][ipuclari[self.restart_count]][str(self.count)]) self.count += 1 elif self.count == 3: self.i3_str.setText(data['kelime'][ipuclari[self.restart_count]][str(self.count)]) self.count += 1 else: self.text.setText("Yeterli Gold Yok!") def guess(self): kullanıcı_tahmini = self.yazi_alani.text().lower() if kullanıcı_tahmini == "" or self.bildi == True or self.end == True: return False if self.text.text() != "": self.text.setText("") kelime = ipuclari[self.restart_count] if kullanıcı_tahmini == kelime: self.bildi = True self.para += 5 self.gold_str.setText(str(self.para)) self.text.setText("Doğru bildin! Sonraki soruya geç") else: self.text.setText("Bilemedin. Tekrar dene.") return False def next(self): if self.bildi == False or self.end == True: return False if self.restart_count != len(ipuclari)-1: self.restart_count += 1 self.bildi = False self.count = 2 self.yazi_alani.clear() self.i1_str.setText(data['kelime'][ipuclari[self.restart_count]]['1']) self.i2_str.setText("İpucu Kilitli!") self.i3_str.setText("İpucu Kilitli!") if self.level_count != len(ipuclari)-1: self.level_count += 1 self.level_str.setText(str(self.level_count)) else: self.level_str.setText("MAX") else: self.text.setText("Tebrikler bitirdiniz.") self.end = True return False if self.text.text() != "": self.text.setText("") app = QApplication(sys.argv) menu = Game() sys.exit(app.exec_())
/* @(#) $Header$ (LBL) */ /* $NetBSD: ip6.h,v 1.9 2000/07/13 05:34:21 itojun Exp $ */ /* $KAME: ip6.h,v 1.9 2000/07/02 21:01:32 itojun Exp $ */ /* * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the project nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Copyright (c) 1982, 1986, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)ip.h 8.1 (Berkeley) 6/10/93 */ #ifndef _NETINET_IP6_H_ #define _NETINET_IP6_H_ /* * Definition for internet protocol version 6. * RFC 2460 */ struct ip6_hdr { union { struct ip6_hdrctl { u_int32_t ip6_un1_flow; /* 20 bits of flow-ID */ u_int16_t ip6_un1_plen; /* payload length */ u_int8_t ip6_un1_nxt; /* next header */ u_int8_t ip6_un1_hlim; /* hop limit */ } ip6_un1; u_int8_t ip6_un2_vfc; /* 4 bits version, top 4 bits class */ } ip6_ctlun; struct in6_addr ip6_src; /* source address */ struct in6_addr ip6_dst; /* destination address */ }; #define ip6_vfc ip6_ctlun.ip6_un2_vfc #define ip6_flow ip6_ctlun.ip6_un1.ip6_un1_flow #define ip6_plen ip6_ctlun.ip6_un1.ip6_un1_plen #define ip6_nxt ip6_ctlun.ip6_un1.ip6_un1_nxt #define ip6_hlim ip6_ctlun.ip6_un1.ip6_un1_hlim #define ip6_hops ip6_ctlun.ip6_un1.ip6_un1_hlim /* in network endian */ #define IPV6_FLOWINFO_MASK ((u_int32_t)htonl(0x0fffffff)) /* flow info (28 bits) */ #define IPV6_FLOWLABEL_MASK ((u_int32_t)htonl(0x000fffff)) /* flow label (20 bits) */ #if 1 /* ECN bits proposed by Sally Floyd */ #define IP6TOS_CE 0x01 /* congestion experienced */ #define IP6TOS_ECT 0x02 /* ECN-capable transport */ #endif /* * Extension Headers */ struct ip6_ext { u_char ip6e_nxt; u_char ip6e_len; }; /* Hop-by-Hop options header */ /* XXX should we pad it to force alignment on an 8-byte boundary? */ struct ip6_hbh { u_int8_t ip6h_nxt; /* next header */ u_int8_t ip6h_len; /* length in units of 8 octets */ /* followed by options */ }; /* Destination options header */ /* XXX should we pad it to force alignment on an 8-byte boundary? */ struct ip6_dest { u_int8_t ip6d_nxt; /* next header */ u_int8_t ip6d_len; /* length in units of 8 octets */ /* followed by options */ }; /* Option types and related macros */ #define IP6OPT_PAD1 0x00 /* 00 0 00000 */ #define IP6OPT_PADN 0x01 /* 00 0 00001 */ #define IP6OPT_JUMBO 0xC2 /* 11 0 00010 = 194 */ #define IP6OPT_JUMBO_LEN 6 #define IP6OPT_ROUTER_ALERT 0x05 /* 00 0 00101 */ #define IP6OPT_RTALERT_LEN 4 #define IP6OPT_RTALERT_MLD 0 /* Datagram contains an MLD message */ #define IP6OPT_RTALERT_RSVP 1 /* Datagram contains an RSVP message */ #define IP6OPT_RTALERT_ACTNET 2 /* contains an Active Networks msg */ #define IP6OPT_MINLEN 2 #define IP6OPT_BINDING_UPDATE 0xc6 /* 11 0 00110 */ #define IP6OPT_BINDING_ACK 0x07 /* 00 0 00111 */ #define IP6OPT_BINDING_REQ 0x08 /* 00 0 01000 */ #define IP6OPT_HOME_ADDRESS 0xc9 /* 11 0 01001 */ #define IP6OPT_EID 0x8a /* 10 0 01010 */ #define IP6OPT_TYPE(o) ((o) & 0xC0) #define IP6OPT_TYPE_SKIP 0x00 #define IP6OPT_TYPE_DISCARD 0x40 #define IP6OPT_TYPE_FORCEICMP 0x80 #define IP6OPT_TYPE_ICMP 0xC0 #define IP6OPT_MUTABLE 0x20 /* Routing header */ struct ip6_rthdr { u_int8_t ip6r_nxt; /* next header */ u_int8_t ip6r_len; /* length in units of 8 octets */ u_int8_t ip6r_type; /* routing type */ u_int8_t ip6r_segleft; /* segments left */ /* followed by routing type specific data */ }; /* Type 0 Routing header */ struct ip6_rthdr0 { u_int8_t ip6r0_nxt; /* next header */ u_int8_t ip6r0_len; /* length in units of 8 octets */ u_int8_t ip6r0_type; /* always zero */ u_int8_t ip6r0_segleft; /* segments left */ u_int8_t ip6r0_reserved; /* reserved field */ u_int8_t ip6r0_slmap[3]; /* strict/loose bit map */ struct in6_addr ip6r0_addr[1]; /* up to 23 addresses */ }; /* Fragment header */ struct ip6_frag { u_int8_t ip6f_nxt; /* next header */ u_int8_t ip6f_reserved; /* reserved field */ u_int16_t ip6f_offlg; /* offset, reserved, and flag */ u_int32_t ip6f_ident; /* identification */ }; #define IP6F_OFF_MASK 0xfff8 /* mask out offset from ip6f_offlg */ #define IP6F_RESERVED_MASK 0x0006 /* reserved bits in ip6f_offlg */ #define IP6F_MORE_FRAG 0x0001 /* more-fragments flag */ #endif /* not _NETINET_IP6_H_ */
from collections.abc import MutableMapping import functools import numpy as np import matplotlib from matplotlib import _api, docstring, rcParams from matplotlib.artist import allow_rasterization import matplotlib.transforms as mtransforms import matplotlib.patches as mpatches import matplotlib.path as mpath class Spine(mpatches.Patch): """ An axis spine -- the line noting the data area boundaries. Spines are the lines connecting the axis tick marks and noting the boundaries of the data area. They can be placed at arbitrary positions. See `~.Spine.set_position` for more information. The default position is ``('outward', 0)``. Spines are subclasses of `.Patch`, and inherit much of their behavior. Spines draw a line, a circle, or an arc depending if `~.Spine.set_patch_line`, `~.Spine.set_patch_circle`, or `~.Spine.set_patch_arc` has been called. Line-like is the default. """ def __str__(self): return "Spine" @docstring.dedent_interpd def __init__(self, axes, spine_type, path, **kwargs): """ Parameters ---------- axes : `~matplotlib.axes.Axes` The `~.axes.Axes` instance containing the spine. spine_type : str The spine type. path : `~matplotlib.path.Path` The `.Path` instance used to draw the spine. Other Parameters ---------------- **kwargs Valid keyword arguments are: %(Patch:kwdoc)s """ super().__init__(**kwargs) self.axes = axes self.set_figure(self.axes.figure) self.spine_type = spine_type self.set_facecolor('none') self.set_edgecolor(rcParams['axes.edgecolor']) self.set_linewidth(rcParams['axes.linewidth']) self.set_capstyle('projecting') self.axis = None self.set_zorder(2.5) self.set_transform(self.axes.transData) # default transform self._bounds = None # default bounds # Defer initial position determination. (Not much support for # non-rectangular axes is currently implemented, and this lets # them pass through the spines machinery without errors.) self._position = None _api.check_isinstance(matplotlib.path.Path, path=path) self._path = path # To support drawing both linear and circular spines, this # class implements Patch behavior three ways. If # self._patch_type == 'line', behave like a mpatches.PathPatch # instance. If self._patch_type == 'circle', behave like a # mpatches.Ellipse instance. If self._patch_type == 'arc', behave like # a mpatches.Arc instance. self._patch_type = 'line' # Behavior copied from mpatches.Ellipse: # Note: This cannot be calculated until this is added to an Axes self._patch_transform = mtransforms.IdentityTransform() def set_patch_arc(self, center, radius, theta1, theta2): """Set the spine to be arc-like.""" self._patch_type = 'arc' self._center = center self._width = radius * 2 self._height = radius * 2 self._theta1 = theta1 self._theta2 = theta2 self._path = mpath.Path.arc(theta1, theta2) # arc drawn on axes transform self.set_transform(self.axes.transAxes) self.stale = True def set_patch_circle(self, center, radius): """Set the spine to be circular.""" self._patch_type = 'circle' self._center = center self._width = radius * 2 self._height = radius * 2 # circle drawn on axes transform self.set_transform(self.axes.transAxes) self.stale = True def set_patch_line(self): """Set the spine to be linear.""" self._patch_type = 'line' self.stale = True # Behavior copied from mpatches.Ellipse: def _recompute_transform(self): """ Notes ----- This cannot be called until after this has been added to an Axes, otherwise unit conversion will fail. This makes it very important to call the accessor method and not directly access the transformation member variable. """ assert self._patch_type in ('arc', 'circle') center = (self.convert_xunits(self._center[0]), self.convert_yunits(self._center[1])) width = self.convert_xunits(self._width) height = self.convert_yunits(self._height) self._patch_transform = mtransforms.Affine2D() \ .scale(width * 0.5, height * 0.5) \ .translate(*center) def get_patch_transform(self): if self._patch_type in ('arc', 'circle'): self._recompute_transform() return self._patch_transform else: return super().get_patch_transform() def get_window_extent(self, renderer=None): """ Return the window extent of the spines in display space, including padding for ticks (but not their labels) See Also -------- matplotlib.axes.Axes.get_tightbbox matplotlib.axes.Axes.get_window_extent """ # make sure the location is updated so that transforms etc are correct: self._adjust_location() bb = super().get_window_extent(renderer=renderer) if self.axis is None: return bb bboxes = [bb] tickstocheck = [self.axis.majorTicks[0]] if len(self.axis.minorTicks) > 1: # only pad for minor ticks if there are more than one # of them. There is always one... tickstocheck.append(self.axis.minorTicks[1]) for tick in tickstocheck: bb0 = bb.frozen() tickl = tick._size tickdir = tick._tickdir if tickdir == 'out': padout = 1 padin = 0 elif tickdir == 'in': padout = 0 padin = 1 else: padout = 0.5 padin = 0.5 padout = padout * tickl / 72 * self.figure.dpi padin = padin * tickl / 72 * self.figure.dpi if tick.tick1line.get_visible(): if self.spine_type == 'left': bb0.x0 = bb0.x0 - padout bb0.x1 = bb0.x1 + padin elif self.spine_type == 'bottom': bb0.y0 = bb0.y0 - padout bb0.y1 = bb0.y1 + padin if tick.tick2line.get_visible(): if self.spine_type == 'right': bb0.x1 = bb0.x1 + padout bb0.x0 = bb0.x0 - padin elif self.spine_type == 'top': bb0.y1 = bb0.y1 + padout bb0.y0 = bb0.y0 - padout bboxes.append(bb0) return mtransforms.Bbox.union(bboxes) def get_path(self): return self._path def _ensure_position_is_set(self): if self._position is None: # default position self._position = ('outward', 0.0) # in points self.set_position(self._position) def register_axis(self, axis): """ Register an axis. An axis should be registered with its corresponding spine from the Axes instance. This allows the spine to clear any axis properties when needed. """ self.axis = axis if self.axis is not None: self.axis.clear() self.stale = True def clear(self): """Clear the current spine.""" self._position = None # clear position if self.axis is not None: self.axis.clear() @_api.deprecated("3.4", alternative="`.Spine.clear`") def cla(self): self.clear() def _adjust_location(self): """Automatically set spine bounds to the view interval.""" if self.spine_type == 'circle': return if self._bounds is not None: low, high = self._bounds elif self.spine_type in ('left', 'right'): low, high = self.axes.viewLim.intervaly elif self.spine_type in ('top', 'bottom'): low, high = self.axes.viewLim.intervalx else: raise ValueError(f'unknown spine spine_type: {self.spine_type}') if self._patch_type == 'arc': if self.spine_type in ('bottom', 'top'): try: direction = self.axes.get_theta_direction() except AttributeError: direction = 1 try: offset = self.axes.get_theta_offset() except AttributeError: offset = 0 low = low * direction + offset high = high * direction + offset if low > high: low, high = high, low self._path = mpath.Path.arc(np.rad2deg(low), np.rad2deg(high)) if self.spine_type == 'bottom': rmin, rmax = self.axes.viewLim.intervaly try: rorigin = self.axes.get_rorigin() except AttributeError: rorigin = rmin scaled_diameter = (rmin - rorigin) / (rmax - rorigin) self._height = scaled_diameter self._width = scaled_diameter else: raise ValueError('unable to set bounds for spine "%s"' % self.spine_type) else: v1 = self._path.vertices assert v1.shape == (2, 2), 'unexpected vertices shape' if self.spine_type in ['left', 'right']: v1[0, 1] = low v1[1, 1] = high elif self.spine_type in ['bottom', 'top']: v1[0, 0] = low v1[1, 0] = high else: raise ValueError('unable to set bounds for spine "%s"' % self.spine_type) @allow_rasterization def draw(self, renderer): self._adjust_location() ret = super().draw(renderer) self.stale = False return ret def set_position(self, position): """ Set the position of the spine. Spine position is specified by a 2 tuple of (position type, amount). The position types are: * 'outward': place the spine out from the data area by the specified number of points. (Negative values place the spine inwards.) * 'axes': place the spine at the specified Axes coordinate (0 to 1). * 'data': place the spine at the specified data coordinate. Additionally, shorthand notations define a special positions: * 'center' -> ('axes', 0.5) * 'zero' -> ('data', 0.0) """ if position in ('center', 'zero'): # special positions pass else: if len(position) != 2: raise ValueError("position should be 'center' or 2-tuple") if position[0] not in ['outward', 'axes', 'data']: raise ValueError("position[0] should be one of 'outward', " "'axes', or 'data' ") self._position = position self.set_transform(self.get_spine_transform()) if self.axis is not None: self.axis.reset_ticks() self.stale = True def get_position(self): """Return the spine position.""" self._ensure_position_is_set() return self._position def get_spine_transform(self): """Return the spine transform.""" self._ensure_position_is_set() position = self._position if isinstance(position, str): if position == 'center': position = ('axes', 0.5) elif position == 'zero': position = ('data', 0) assert len(position) == 2, 'position should be 2-tuple' position_type, amount = position _api.check_in_list(['axes', 'outward', 'data'], position_type=position_type) if self.spine_type in ['left', 'right']: base_transform = self.axes.get_yaxis_transform(which='grid') elif self.spine_type in ['top', 'bottom']: base_transform = self.axes.get_xaxis_transform(which='grid') else: raise ValueError(f'unknown spine spine_type: {self.spine_type!r}') if position_type == 'outward': if amount == 0: # short circuit commonest case return base_transform else: offset_vec = {'left': (-1, 0), 'right': (1, 0), 'bottom': (0, -1), 'top': (0, 1), }[self.spine_type] # calculate x and y offset in dots offset_dots = amount * np.array(offset_vec) / 72 return (base_transform + mtransforms.ScaledTranslation( *offset_dots, self.figure.dpi_scale_trans)) elif position_type == 'axes': if self.spine_type in ['left', 'right']: # keep y unchanged, fix x at amount return (mtransforms.Affine2D.from_values(0, 0, 0, 1, amount, 0) + base_transform) elif self.spine_type in ['bottom', 'top']: # keep x unchanged, fix y at amount return (mtransforms.Affine2D.from_values(1, 0, 0, 0, 0, amount) + base_transform) elif position_type == 'data': if self.spine_type in ('right', 'top'): # The right and top spines have a default position of 1 in # axes coordinates. When specifying the position in data # coordinates, we need to calculate the position relative to 0. amount -= 1 if self.spine_type in ('left', 'right'): return mtransforms.blended_transform_factory( mtransforms.Affine2D().translate(amount, 0) + self.axes.transData, self.axes.transData) elif self.spine_type in ('bottom', 'top'): return mtransforms.blended_transform_factory( self.axes.transData, mtransforms.Affine2D().translate(0, amount) + self.axes.transData) def set_bounds(self, low=None, high=None): """ Set the spine bounds. Parameters ---------- low : float or None, optional The lower spine bound. Passing *None* leaves the limit unchanged. The bounds may also be passed as the tuple (*low*, *high*) as the first positional argument. .. ACCEPTS: (low: float, high: float) high : float or None, optional The higher spine bound. Passing *None* leaves the limit unchanged. """ if self.spine_type == 'circle': raise ValueError( 'set_bounds() method incompatible with circular spines') if high is None and np.iterable(low): low, high = low old_low, old_high = self.get_bounds() or (None, None) if low is None: low = old_low if high is None: high = old_high self._bounds = (low, high) self.stale = True def get_bounds(self): """Get the bounds of the spine.""" return self._bounds @classmethod def linear_spine(cls, axes, spine_type, **kwargs): """Create and return a linear `Spine`.""" # all values of 0.999 get replaced upon call to set_bounds() if spine_type == 'left': path = mpath.Path([(0.0, 0.999), (0.0, 0.999)]) elif spine_type == 'right': path = mpath.Path([(1.0, 0.999), (1.0, 0.999)]) elif spine_type == 'bottom': path = mpath.Path([(0.999, 0.0), (0.999, 0.0)]) elif spine_type == 'top': path = mpath.Path([(0.999, 1.0), (0.999, 1.0)]) else: raise ValueError('unable to make path for spine "%s"' % spine_type) result = cls(axes, spine_type, path, **kwargs) result.set_visible(rcParams['axes.spines.{0}'.format(spine_type)]) return result @classmethod def arc_spine(cls, axes, spine_type, center, radius, theta1, theta2, **kwargs): """Create and return an arc `Spine`.""" path = mpath.Path.arc(theta1, theta2) result = cls(axes, spine_type, path, **kwargs) result.set_patch_arc(center, radius, theta1, theta2) return result @classmethod def circular_spine(cls, axes, center, radius, **kwargs): """Create and return a circular `Spine`.""" path = mpath.Path.unit_circle() spine_type = 'circle' result = cls(axes, spine_type, path, **kwargs) result.set_patch_circle(center, radius) return result def set_color(self, c): """ Set the edgecolor. Parameters ---------- c : color Notes ----- This method does not modify the facecolor (which defaults to "none"), unlike the `.Patch.set_color` method defined in the parent class. Use `.Patch.set_facecolor` to set the facecolor. """ self.set_edgecolor(c) self.stale = True class SpinesProxy: """ A proxy to broadcast ``set_*`` method calls to all contained `.Spines`. The proxy cannot be used for any other operations on its members. The supported methods are determined dynamically based on the contained spines. If not all spines support a given method, it's executed only on the subset of spines that support it. """ def __init__(self, spine_dict): self._spine_dict = spine_dict def __getattr__(self, name): broadcast_targets = [spine for spine in self._spine_dict.values() if hasattr(spine, name)] if not name.startswith('set_') or not broadcast_targets: raise AttributeError( f"'SpinesProxy' object has no attribute '{name}'") def x(_targets, _funcname, *args, **kwargs): for spine in _targets: getattr(spine, _funcname)(*args, **kwargs) x = functools.partial(x, broadcast_targets, name) x.__doc__ = broadcast_targets[0].__doc__ return x def __dir__(self): names = [] for spine in self._spine_dict.values(): names.extend(name for name in dir(spine) if name.startswith('set_')) return list(sorted(set(names))) class Spines(MutableMapping): r""" The container of all `.Spine`\s in an Axes. The interface is dict-like mapping names (e.g. 'left') to `.Spine` objects. Additionally it implements some pandas.Series-like features like accessing elements by attribute:: spines['top'].set_visible(False) spines.top.set_visible(False) Multiple spines can be addressed simultaneously by passing a list:: spines[['top', 'right']].set_visible(False) Use an open slice to address all spines:: spines[:].set_visible(False) The latter two indexing methods will return a `SpinesProxy` that broadcasts all ``set_*`` calls to its members, but cannot be used for any other operation. """ def __init__(self, **kwargs): self._dict = kwargs @classmethod def from_dict(cls, d): return cls(**d) def __getstate__(self): return self._dict def __setstate__(self, state): self.__init__(**state) def __getattr__(self, name): try: return self._dict[name] except KeyError: raise AttributeError( f"'Spines' object does not contain a '{name}' spine") def __getitem__(self, key): if isinstance(key, list): unknown_keys = [k for k in key if k not in self._dict] if unknown_keys: raise KeyError(', '.join(unknown_keys)) return SpinesProxy({k: v for k, v in self._dict.items() if k in key}) if isinstance(key, tuple): raise ValueError('Multiple spines must be passed as a single list') if isinstance(key, slice): if key.start is None and key.stop is None and key.step is None: return SpinesProxy(self._dict) else: raise ValueError( 'Spines does not support slicing except for the fully ' 'open slice [:] to access all spines.') return self._dict[key] def __setitem__(self, key, value): # TODO: Do we want to deprecate adding spines? self._dict[key] = value def __delitem__(self, key): # TODO: Do we want to deprecate deleting spines? del self._dict[key] def __iter__(self): return iter(self._dict) def __len__(self): return len(self._dict)
import React from 'react' import MyNavbar from './MyNavbar' import AddBlogForm from './AddBlogForm' function PageAddBlog(prop) { return ( <div> <MyNavbar location={prop.location.pathname}/> <div className='container mt-5'> <AddBlogForm/> </div> </div> ) } export default PageAddBlog
var graphic = require('../../util/graphic'); var HeatmapLayer = require('./HeatmapLayer'); var zrUtil = require('zrender/lib/core/util'); function getIsInPiecewiseRange(dataExtent, pieceList, selected) { var dataSpan = dataExtent[1] - dataExtent[0]; pieceList = zrUtil.map(pieceList, function (piece) { return { interval: [ (piece.interval[0] - dataExtent[0]) / dataSpan, (piece.interval[1] - dataExtent[0]) / dataSpan ] }; }); var len = pieceList.length; var lastIndex = 0; return function (val) { // Try to find in the location of the last found for (var i = lastIndex; i < len; i++) { var interval = pieceList[i].interval; if (interval[0] <= val && val <= interval[1]) { lastIndex = i; break; } } if (i === len) { // Not found, back interation for (var i = lastIndex - 1; i >= 0; i--) { var interval = pieceList[i].interval; if (interval[0] <= val && val <= interval[1]) { lastIndex = i; break; } } } return i >= 0 && i < len && selected[i]; }; } function getIsInContinuousRange(dataExtent, range) { var dataSpan = dataExtent[1] - dataExtent[0]; range = [ (range[0] - dataExtent[0]) / dataSpan, (range[1] - dataExtent[0]) / dataSpan ]; return function (val) { return val >= range[0] && val <= range[1]; }; } function isGeoCoordSys(coordSys) { var dimensions = coordSys.dimensions; // Not use coorSys.type === 'geo' because coordSys maybe extended return dimensions[0] === 'lng' && dimensions[1] === 'lat'; } module.exports = require('../../echarts').extendChartView({ type: 'heatmap', render: function (seriesModel, ecModel, api) { var visualMapOfThisSeries; ecModel.eachComponent('visualMap', function (visualMap) { visualMap.eachTargetSeries(function (targetSeries) { if (targetSeries === seriesModel) { visualMapOfThisSeries = visualMap; } }); }); if (__DEV__) { if (!visualMapOfThisSeries) { throw new Error('Heatmap must use with visualMap'); } } this.group.removeAll(); var coordSys = seriesModel.coordinateSystem; if (coordSys.type === 'cartesian2d') { this._renderOnCartesian(coordSys, seriesModel, api); } else if (isGeoCoordSys(coordSys)) { this._renderOnGeo( coordSys, seriesModel, visualMapOfThisSeries, api ); } }, _renderOnCartesian: function (cartesian, seriesModel, api) { var xAxis = cartesian.getAxis('x'); var yAxis = cartesian.getAxis('y'); var group = this.group; if (__DEV__) { if (!(xAxis.type === 'category' && yAxis.type === 'category')) { throw new Error('Heatmap on cartesian must have two category axes'); } if (!(xAxis.onBand && yAxis.onBand)) { throw new Error('Heatmap on cartesian must have two axes with boundaryGap true'); } } var width = xAxis.getBandWidth(); var height = yAxis.getBandWidth(); var data = seriesModel.getData(); var itemStyleQuery = 'itemStyle.normal'; var hoverItemStyleQuery = 'itemStyle.emphasis'; var labelQuery = 'label.normal'; var hoverLabelQuery = 'label.emphasis'; var style = seriesModel.getModel(itemStyleQuery).getItemStyle(['color']); var hoverStl = seriesModel.getModel(hoverItemStyleQuery).getItemStyle(); var labelModel = seriesModel.getModel('label.normal'); var hoverLabelModel = seriesModel.getModel('label.emphasis'); data.each(['x', 'y', 'z'], function (x, y, z, idx) { var itemModel = data.getItemModel(idx); var point = cartesian.dataToPoint([x, y]); // Ignore empty data if (isNaN(z)) { return; } var rect = new graphic.Rect({ shape: { x: point[0] - width / 2, y: point[1] - height / 2, width: width, height: height }, style: { fill: data.getItemVisual(idx, 'color'), opacity: data.getItemVisual(idx, 'opacity') } }); // Optimization for large datset if (data.hasItemOption) { style = itemModel.getModel(itemStyleQuery).getItemStyle(['color']); hoverStl = itemModel.getModel(hoverItemStyleQuery).getItemStyle(); labelModel = itemModel.getModel(labelQuery); hoverLabelModel = itemModel.getModel(hoverLabelQuery); } var rawValue = seriesModel.getRawValue(idx); var defaultText = '-'; if (rawValue && rawValue[2] != null) { defaultText = rawValue[2]; } if (labelModel.getShallow('show')) { graphic.setText(style, labelModel); style.text = seriesModel.getFormattedLabel(idx, 'normal') || defaultText; } if (hoverLabelModel.getShallow('show')) { graphic.setText(hoverStl, hoverLabelModel); hoverStl.text = seriesModel.getFormattedLabel(idx, 'emphasis') || defaultText; } rect.setStyle(style); graphic.setHoverStyle(rect, data.hasItemOption ? hoverStl : zrUtil.extend({}, hoverStl)); group.add(rect); data.setItemGraphicEl(idx, rect); }); }, _renderOnGeo: function (geo, seriesModel, visualMapModel, api) { var inRangeVisuals = visualMapModel.targetVisuals.inRange; var outOfRangeVisuals = visualMapModel.targetVisuals.outOfRange; // if (!visualMapping) { // throw new Error('Data range must have color visuals'); // } var data = seriesModel.getData(); var hmLayer = this._hmLayer || (this._hmLayer || new HeatmapLayer()); hmLayer.blurSize = seriesModel.get('blurSize'); hmLayer.pointSize = seriesModel.get('pointSize'); hmLayer.minOpacity = seriesModel.get('minOpacity'); hmLayer.maxOpacity = seriesModel.get('maxOpacity'); var rect = geo.getViewRect().clone(); var roamTransform = geo.getRoamTransform().transform; rect.applyTransform(roamTransform); // Clamp on viewport var x = Math.max(rect.x, 0); var y = Math.max(rect.y, 0); var x2 = Math.min(rect.width + rect.x, api.getWidth()); var y2 = Math.min(rect.height + rect.y, api.getHeight()); var width = x2 - x; var height = y2 - y; var points = data.mapArray(['lng', 'lat', 'value'], function (lng, lat, value) { var pt = geo.dataToPoint([lng, lat]); pt[0] -= x; pt[1] -= y; pt.push(value); return pt; }); var dataExtent = visualMapModel.getExtent(); var isInRange = visualMapModel.type === 'visualMap.continuous' ? getIsInContinuousRange(dataExtent, visualMapModel.option.range) : getIsInPiecewiseRange( dataExtent, visualMapModel.getPieceList(), visualMapModel.option.selected ); hmLayer.update( points, width, height, inRangeVisuals.color.getNormalizer(), { inRange: inRangeVisuals.color.getColorMapper(), outOfRange: outOfRangeVisuals.color.getColorMapper() }, isInRange ); var img = new graphic.Image({ style: { width: width, height: height, x: x, y: y, image: hmLayer.canvas }, silent: true }); this.group.add(img); } });
# -*- coding: utf-8 -*- """ hyper/httplib_compat ~~~~~~~~~~~~~~~~~~~~ This file defines the publicly-accessible API for hyper. This API also constitutes the abstraction layer between HTTP/1.1 and HTTP/2. This API doesn't currently work, and is a lower priority than the HTTP/2 stack at this time. """ import socket try: import http.client as httplib except ImportError: import httplib from .compat import ssl from .http20.tls import wrap_socket # If there's no NPN support, we're going to drop all support for HTTP/2. try: support_20 = ssl.HAS_NPN except AttributeError: support_20 = False # The HTTPConnection object is currently always the underlying one. HTTPConnection = httplib.HTTPConnection HTTPSConnection = httplib.HTTPSConnection # If we have NPN support, define our custom one, otherwise just use the # default. if support_20: class HTTPSConnection(object): """ An object representing a single HTTPS connection, whether HTTP/1.1 or HTTP/2. More specifically, this object represents an abstraction over the distinction. This object encapsulates a connection object for one of the specific types of connection, and delegates most of the work to that object. """ def __init__(self, *args, **kwargs): # Whatever arguments and keyword arguments are passed to this # object need to be saved off for when we initialise one of our # subsidiary objects. self._original_args = args self._original_kwargs = kwargs # Set up some variables we're going to use later. self._sock = None self._conn = None # Prepare our backlog of method calls. self._call_queue = [] def __getattr__(self, name): # Anything that can't be found on this instance is presumably a # property of underlying connection object. # We need to be a little bit careful here. There are a few methods # that can act on a HTTPSConnection before it actually connects to # the remote server. We don't want to change the semantics of the, # HTTPSConnection so we need to spot these and queue them up. When # we actually create the backing Connection, we'll apply them # immediately. These methods can't throw exceptions, so we should # be fine. delay_methods = ["set_tunnel", "set_debuglevel"] if self._conn is None and name in delay_methods: # Return a little closure that saves off the method call to # apply later. def capture(obj, *args, **kwargs): self._call_queue.append((name, args, kwargs)) return capture elif self._conn is None: # We're being told to do something! We can now connect to the # remote server and build the connection object. self._delayed_connect() # Call through to the underlying object. return getattr(self._conn, name) def _delayed_connect(self): """ Called when we need to work out what kind of HTTPS connection we're actually going to use. """ # Because we're ghetto, we're going to quickly create a # HTTPConnection object to parse the args and kwargs for us, and # grab the values out. tempconn = httplib.HTTPConnection(*self._original_args, **self._original_kwargs) host = tempconn.host port = tempconn.port timeout = tempconn.timeout source_address = tempconn.source_address # Connect to the remote server. sock = socket.create_connection( (host, port), timeout, source_address ) # Wrap it in TLS. This needs to be looked at in future when I pull # in the TLS verification logic from urllib3, but right now we # accept insecurity because no-one's using this anyway. sock = wrap_socket(sock, host) # At this early stage the library can't do HTTP/2, so who cares? tempconn.sock = sock self._sock = sock self._conn = tempconn return
import _ from 'lodash'; import { logger } from 'lib/logger'; import { has } from 'lib/utilities'; import { buildErrorMessage } from 'lib/error-helpers'; /** * @typedef PaginationArticle * @property {string} slug - The slug of the article */ /** * Fetches a page of articles where pages are a given length * @param {any} database - The knex instance to query on * @param {number} pageLength - Length of page to be fetched * @param {number} pageIndex - Which page to fetch of size pageLength * @returns {Promise<PaginationArticle[]>} - An array of articles on the page */ export async function getPaginatedArticle(database, pageLength, pageIndex) { const offset = pageLength * pageIndex; const articles = await database .select('slug') .from('articles') .orderBy('created_at', 'DESC') .limit(pageLength) .offset(offset); return articles; } /** * @param {any} database - The knex instance to query on * @param {Object} articleData - The data for the article to be created * @param {string} articleData.slug * @param {string} articleData.title * @param {string} [articleData.teaser] * @param {string} [articleData.imageUrl] * @param {number} [articleData.category] - The id of the category * @param {Object[]} [articleData.authors] * @param {number} [articleData.authors.id] * @param {Object[]} [articleData.tags] * @param {number} [articleData.tags.id] * @returns {Promise<boolean>} - Whether the creation was a success */ export async function createNewArticle(database, articleData) { // We first build the article object used for creation if (!articleData.slug || !articleData.title) { logger.warn('Someone tried creating an article without slug or title'); throw new Error('Both slug and title must be provided for new articles'); } const articleRow = { slug: articleData.slug, title: articleData.title, }; if (articleData.teaser) { articleRow.teaser = articleData.teaser; } if (articleData.imageUrl) { articleRow.image_url = articleData.imageUrl; } if (articleData.category > 0) { articleRow.category_id = articleData.category; } // Remember to set created time articleRow.created_at = new Date(); let createdArticle; try { const [id] = await database('articles').insert(articleRow); createdArticle = { ...articleRow, id, }; } catch (e) { logger.error(e); throw new Error(buildErrorMessage()); } const inserts = []; if (_.get(articleData.authors, 'length', 0) > 0) { const authorArticleRows = articleData.authors.map(authorObject => { if (!authorObject.id) { logger.error( new Error('All authors in new article must have an id passed'), ); throw new Error(buildErrorMessage()); } return { author_id: authorObject.id, article_id: createdArticle.id, }; }); inserts.push(database('authors_articles').insert(authorArticleRows)); createdArticle.authors = authorArticleRows; } if (_.get(articleData.tags, 'length', 0) > 0) { const articleTagRows = articleData.tags.map(tagObject => { if (!tagObject.id) { logger.error( new Error('All tags in new article must have an id passed'), ); throw new Error(buildErrorMessage()); } return { tag_id: tagObject.id, article_id: createdArticle.id, }; }); inserts.push(database('articles_tags').insert(articleTagRows)); createdArticle.tags = articleTagRows; } try { await Promise.all(inserts); } catch (e) { logger.error(e); throw new Error(buildErrorMessage()); } return createdArticle; } /** * Updates the tags for a given article. * @param {database} database The database * @param {int} articleId The id of the article. * @param {int[]} newTags The ids of the tags. * @returns {string[]} slugs The edited slugs. */ export async function updateArticleTags(database, articleId, newTags) { // Delete old tags. await database('articles_tags') .where('article_id', '=', articleId) .del(); // Insert new tags. const insertArray = _.map(newTags, tagId => ({ article_id: articleId, tag_id: tagId, })); await database('articles_tags').insert(insertArray); // Get updated slugs. const rows = await database .select('slug') .from('articles_tags') .innerJoin('tags', 'tags.id', '=', 'articles_tags.tag_id') .where('article_id', '=', articleId); return rows.map(row => row.slug); } /** * @param {database} database The database * @param {string[]} ids The ids whose tags we receive * @returns {Object} data The data */ export async function articleTagQuery(database, ids) { // ids function parameter is an array of article ids // of which to fetch the tag of. // The function returns an object with article ids // as keys and values being arrays of author ids. const rows = await database .select('articles.id as articleId', 'tags.slug as tagSlug') .from('tags') .innerJoin('articles_tags', 'tags.id', '=', 'tag_id') .innerJoin('articles', 'articles.id', '=', 'article_id') .whereIn('articles.id', ids) .orderBy('articles_tags.id', 'asc'); const data = {}; rows.forEach(row => { // This will input them in ascending order by id (which represents time they were // inserted as author of that article) as the query was structured so. if (!has.call(data, row.articleId)) { data[row.articleId] = [row.tagSlug]; } else { data[row.articleId].push(row.tagSlug); } }); return data; } // TODO: refactor this to use async await and all the other nice stuff, disabling linting until then /* eslint-disable */ function orderArticlesInIssues(database, issues) { // Issues is assumed to be an array of integers where // the integers are the ids of issues return new Promise(resolve => { let updatesCalled = 0; let updatesReturned = 0; issues.forEach(issue_id => { // Get the current categories so we know if we have to add new ones // or delete old ones database .select('category_id', 'categories_order') .from('issues_categories_order') .where('issue_id', '=', issue_id) .orderBy('categories_order', 'ASC') .then(categoryRows => { database .select( 'issues_articles_order.id as id', 'category_id', 'article_order', ) .from('issues_articles_order') .innerJoin( 'articles', 'articles.id', '=', 'issues_articles_order.article_id', ) .where('type', '=', 0) .where('issue_id', '=', issue_id) .orderBy('category_id', 'ASC') .orderBy('issues_articles_order.article_order', 'ASC') .then(articleRows => { let lastCategory = null; let order = 0; const toUpdate = []; const newCategories = []; articleRows.forEach(row => { if (lastCategory !== row.category_id) { lastCategory = row.category_id; order = 0; newCategories.push(row.category_id); } if (order !== row.article_order) { toUpdate.push({ id: row.id, update: { article_order: order, }, }); } order += 1; }); updatesCalled += toUpdate.length; toUpdate.forEach(obj => { database('issues_articles_order') .where('id', '=', obj.id) .update(obj.update) .then(() => { updatesReturned += 1; if (updatesReturned >= updatesCalled) { resolve(true); } }); }); // Check if categories are still consistent let newCategoriesWithOrder = []; let consistent = true; categoryRows.forEach(category => { if ( newCategories.find(cat => cat === category.category_id) !== undefined ) { newCategoriesWithOrder.push(category); } else { consistent = false; } }); newCategories.forEach(category_id => { if ( newCategoriesWithOrder.find( cat => cat.category_id === category_id, ) === undefined ) { consistent = false; const foundHole = newCategoriesWithOrder.some( (cat, index) => { if (index !== cat.categories_order) { newCategoriesWithOrder.splice(index, 0, { category_id, categories_order: index, }); return true; } return false; }, ); if (!foundHole) { newCategoriesWithOrder.push({ category_id, categories_order: newCategoriesWithOrder.length, }); } } }); if (!consistent) { newCategoriesWithOrder = newCategoriesWithOrder.map( (cat, index) => ({ ...cat, categories_order: index, issue_id, }), ); updatesCalled += 1; // Delete the current categories order and insert the new one database('issues_categories_order') .where('issue_id', '=', issue_id) .del() .then(() => { database('issues_categories_order') .insert(newCategoriesWithOrder) .then(() => { updatesReturned += 1; if (updatesReturned >= updatesCalled) { resolve(true); } }); }); } else if (updatesCalled === 0) { resolve(true); } }); }); }); }); } /* eslint-enable */ /** * Updates articles directly tied to an article * @param {any} database - The Knex object for the databse to update * @param {string} keyField - A String which indicates which db field is used as the primary key in jsonGraphArg * @param {Object} jsonGraphArg - An object of type { [keyFields]: articleUpdateObject[] } * @returns {Promise<boolean>} - Whether the update was a success */ export async function updateArticles(database, keyField, jsonGraphArg) { const articlesWithChangedCategory = []; const updatePromises = _.map(jsonGraphArg, (articleUpdater, key) => { const processedArticleUpdater = { ...articleUpdater, }; if (has.call(processedArticleUpdater, 'category')) { // Rename it to category_id and note that this article had it's category changed processedArticleUpdater.category_id = processedArticleUpdater.category; delete processedArticleUpdater.category; articlesWithChangedCategory.push(key); } return database('articles') .where(keyField, '=', key) .update(processedArticleUpdater); }); await Promise.all(updatePromises); // If categories changed make sure issue data is still consistent if (articlesWithChangedCategory.length > 0) { const issueRows = await database .distinct('issue_id') .select() .from('issues_articles_order') .innerJoin( 'articles', 'articles.id', '=', 'issues_articles_order.article_id', ) .whereIn(`articles.${keyField}`, articlesWithChangedCategory); const issuesToUpdate = issueRows.map(row => row.issue_id); // If the articles were actually published in any issues if (issuesToUpdate.length > 0) { const flag = await orderArticlesInIssues(database, issuesToUpdate); if (!flag) { const msg = `error while reordering articles in issues: ${JSON.stringify( issuesToUpdate, )}`; throw new Error(msg); } } } // It hasn't thrown an error yet so it must have been a success return true; } /** * Fetches direct meta data of articles from the articles database table * @param {any} database - The Knex object for the databse to update * @param {string} queryField - Indicates which field to query by * @param {string[]} queryParams - Array of parameters of type queryField of articles to fetch * @param {string[]} columns - Which columns of the articles table to fetch * @returns {Promise<Object[]>} */ export function articleQuery(database, queryField, queryParams, columns) { // In order to be able to identify the rows we get back we need to include the queryField if (!columns.includes(queryField)) { columns.push(queryField); } return database .select(...columns) .from('articles') .whereIn(queryField, queryParams); }
/*jslint node: true */ /* eslint-env node */ 'use strict'; // Require express, socket.io, and vue var express = require('express'); var app = express(); var http = require('http').Server(app); var io = require('socket.io')(http); var path = require('path'); // Pick arbitrary port for server var port = 3000; app.set('port', (process.env.PORT || port)); // Serve static assets from public/ app.use(express.static(path.join(__dirname, 'public/'))); // Serve vue from node_modules as vue/ app.use('/vue', express.static(path.join(__dirname, '/node_modules/vue/dist/'))); // Serve leaflet from node_modules as leflet/ app.use('/leaflet', express.static(path.join(__dirname, '/node_modules/leaflet/dist/'))); // Serve esri leaflet geocoder from node_modules as esri-leaflet/ app.use('/esri-leaflet', express.static(path.join(__dirname, '/node_modules/esri-leaflet/dist/'))); // Serve esri leaflet geocoder from node_modules as esri-leaflet-geocoder/ app.use('/esri-leaflet-geocoder', express.static(path.join(__dirname, '/node_modules/esri-leaflet-geocoder/dist/'))); // Serve index.html directly as root page app.get('/', function (req, res) { res.sendFile(path.join(__dirname, 'source/customer_view.html')); }); // Serve driver.html as /driver app.get('/driver', function (req, res) { res.sendFile(path.join(__dirname, 'source/driver_view.html')); }); // Serve dispatcher.html as /dispatcher app.get('/manager', function (req, res) { res.sendFile(path.join(__dirname, 'source/manager_view.html')); }); app.get('/js/customer_view.js', function (req, res) { res.sendFile(path.join(__dirname, 'js/customer_view.js')); }); app.get('/js/driver_view.js', function (req, res) { res.sendFile(path.join(__dirname, 'js/driver_view.js')); }); app.get('/js/manager_view.js', function (req, res) { res.sendFile(path.join(__dirname, 'js/manager_view.js')); }); app.get('/js/customer_node.js', function (req, res) { res.sendFile(path.join(__dirname, 'js/customer_node.js')); }); app.get('/js/driver_node.js', function (req, res) { res.sendFile(path.join(__dirname, 'js/driver_node.js')); }); app.get('/js/dispatcher_node.js', function (req, res) { res.sendFile(path.join(__dirname, 'js/dispatcher_node.js')); }); app.get('/common/common.js', function (req, res) { res.sendFile(path.join(__dirname, 'common/common.js')); }); app.get('/common/common.css', function (req, res) { res.sendFile(path.join(__dirname, 'common/common.css')); }); app.get('/css/customer_view.css', function (req, res) { res.sendFile(path.join(__dirname, 'css/customer_view.css')); }); app.get('/css/driver_view.css', function (req, res) { res.sendFile(path.join(__dirname, 'css/driver_view.css')); }); app.get('/css/manager_view.css', function (req, res) { res.sendFile(path.join(__dirname, 'css/manager_view.css')); }); app.get('/resources/taxi.png', function (req, res) { res.sendFile(path.join(__dirname, 'resources/taxi.png')); }); app.get('/resources/customer.png', function (req, res) { res.sendFile(path.join(__dirname, 'resources/customer.png')); }); // Store data in an object to keep the global namespace clean and // prepare for multiple instances of data if necessary function Data() { this.orders = {}; this.taxis = {}; this.currentOrderNumber = 1000; } Data.prototype.getOrderNumber = function () { this.currentOrderNumber += 1; return this.currentOrderNumber; } /* Adds an order to to the queue */ Data.prototype.addOrder = function (order) { var orderId = this.getOrderNumber(); //Store the order in an "associative array" with orderId as key this.orders[orderId] = order; return orderId; }; Data.prototype.fixOrder = function (order) { //Store the order in an "associative array" with orderId as key this.orders[order.orderId] = order; }; /* Just deleting the order when it's finished */ Data.prototype.finishOrder = function (orderId) { delete this.orders[orderId]; console.log("Deleted1 "+orderId); }; /* Only needs to know orderId. The rest is up to the client to decide */ Data.prototype.updateOrderDetails = function (order) { for (var key in order) { this.orders[order.orderId][key] = order[key]; } }; Data.prototype.getAllOrders = function () { return this.orders; }; Data.prototype.addTaxi = function (taxi) { //Store the order in an "associative array" with orderId as key this.taxis[taxi.taxiId] = taxi; }; Data.prototype.updateTaxiDetails = function (taxi) { for (var key in taxi) { this.taxis[taxi.taxiId][key] = taxi[key]; } }; Data.prototype.removeTaxi = function (taxiId) { delete this.taxis[taxiId]; }; Data.prototype.getAllTaxis = function () { return this.taxis; }; var data = new Data(); io.on('connection', function (socket) { // Send list of orders when a client connects socket.emit('initialize', { orders: data.getAllOrders(), taxis: data.getAllTaxis() }); // Add a listener for when a connected client emits an "orderTaxi" message socket.on('orderTaxi', function (order) { var orderId = data.addOrder(order); order.orderId = orderId; // send updated info to all connected clients, note the use of "io" instead of "socket" io.emit('taxiOrdered', order); // send the orderId back to the customer who ordered socket.emit('orderId', orderId); }); socket.on('resetOrder', function (order) { data.fixOrder(order); // send updated info to all connected clients, note the use of "io" instead of "socket" io.emit('taxiOrdered', order); // send the orderId back to the customer who ordered socket.emit('orderId', order.orderId); }); socket.on('addTaxi', function (taxi) { data.addTaxi(taxi); // send updated info to all connected clients, note the use of io instead of socket io.emit('taxiAdded', taxi); }); socket.on('moveTaxi', function (taxi) { data.updateTaxiDetails(taxi); // send updated info to all connected clients, note the use of io instead of socket io.emit('taxiMoved', taxi); }); socket.on('taxiQuit', function (taxi) { data.removeTaxi(taxi); console.log("Taxi",taxi,"has left the job"); // send updated info to all connected clients, note the use of io instead of socket io.emit('taxiQuit', taxi); }); socket.on('finishOrder', function (orderId) { data.finishOrder(orderId); console.log(orderId); // send updated info to all connected clients, note the use of io instead of socket io.emit('orderFinished', orderId); }); socket.on('taxiAssigned', function(order) { data.updateOrderDetails(order); io.emit('currentQueue', { orders: data.getAllOrders() }); }); socket.on('orderAccepted', function(order) { data.updateOrderDetails(order); io.emit('orderAccepted', order ); }) }); var server = http.listen(app.get('port'), function () { console.log('Server listening on port ' + app.get('port')); });
/** * @fileoverview added by tsickle * @suppress {checkTypes,extraRequire,missingReturn,uselessCode} checked by tsc */ /** * @license * Copyright Google Inc. All Rights Reserved. * * Use of this source code is governed by an MIT-style license that can be * found in the LICENSE file at https://angular.io/license */ import { KeyValueDiffers, Pipe } from '@angular/core'; /** * @template K, V * @param {?} key * @param {?} value * @return {?} */ function makeKeyValuePair(key, value) { return { key: key, value: value }; } /** * A key value pair. * Usually used to represent the key value pairs from a Map or Object. * * \@publicApi * @record * @template K, V */ export function KeyValue() { } if (false) { /** @type {?} */ KeyValue.prototype.key; /** @type {?} */ KeyValue.prototype.value; } /** * \@ngModule CommonModule * \@description * * Transforms Object or Map into an array of key value pairs. * * The output array will be ordered by keys. * By default the comparator will be by Unicode point value. * You can optionally pass a compareFn if your keys are complex types. * * \@usageNotes * ### Examples * * This examples show how an Object or a Map can be iterated by ngFor with the use of this keyvalue * pipe. * * {\@example common/pipes/ts/keyvalue_pipe.ts region='KeyValuePipe'} * * \@publicApi */ export class KeyValuePipe { /** * @param {?} differs */ constructor(differs) { this.differs = differs; this.keyValues = []; } /** * @template K, V * @param {?} input * @param {?=} compareFn * @return {?} */ transform(input, compareFn = defaultComparator) { if (!input || (!(input instanceof Map) && typeof input !== 'object')) { return null; } if (!this.differ) { // make a differ for whatever type we've been passed in this.differ = this.differs.find(input).create(); } /** @type {?} */ const differChanges = this.differ.diff((/** @type {?} */ (input))); if (differChanges) { this.keyValues = []; differChanges.forEachItem((r) => { this.keyValues.push(makeKeyValuePair(r.key, (/** @type {?} */ (r.currentValue)))); }); this.keyValues.sort(compareFn); } return this.keyValues; } } KeyValuePipe.decorators = [ { type: Pipe, args: [{ name: 'keyvalue', pure: false },] } ]; /** @nocollapse */ KeyValuePipe.ctorParameters = () => [ { type: KeyValueDiffers } ]; if (false) { /** @type {?} */ KeyValuePipe.prototype.differ; /** @type {?} */ KeyValuePipe.prototype.keyValues; /** @type {?} */ KeyValuePipe.prototype.differs; } /** * @template K, V * @param {?} keyValueA * @param {?} keyValueB * @return {?} */ export function defaultComparator(keyValueA, keyValueB) { /** @type {?} */ const a = keyValueA.key; /** @type {?} */ const b = keyValueB.key; // if same exit with 0; if (a === b) return 0; // make sure that undefined are at the end of the sort. if (a === undefined) return 1; if (b === undefined) return -1; // make sure that nulls are at the end of the sort. if (a === null) return 1; if (b === null) return -1; if (typeof a == 'string' && typeof b == 'string') { return a < b ? -1 : 1; } if (typeof a == 'number' && typeof b == 'number') { return a - b; } if (typeof a == 'boolean' && typeof b == 'boolean') { return a < b ? -1 : 1; } // `a` and `b` are of different types. Compare their string values. /** @type {?} */ const aString = String(a); /** @type {?} */ const bString = String(b); return aString == bString ? 0 : aString < bString ? -1 : 1; } //# sourceMappingURL=data:application/json;base64,{"version":3,"file":"keyvalue_pipe.js","sourceRoot":"../../","sources":["packages/common/src/pipes/keyvalue_pipe.ts"],"names":[],"mappings":";;;;;;;;;;;AAQA,OAAO,EAAwD,eAAe,EAAE,IAAI,EAAgB,MAAM,eAAe,CAAC;;;;;;;AAE1H,SAAS,gBAAgB,CAAO,GAAM,EAAE,KAAQ;IAC9C,OAAO,EAAC,GAAG,EAAE,GAAG,EAAE,KAAK,EAAE,KAAK,EAAC,CAAC;AAClC,CAAC;;;;;;;;;AAQD,8BAGC;;;IAFC,uBAAO;;IACP,yBAAS;;;;;;;;;;;;;;;;;;;;;;AAwBX,MAAM,OAAO,YAAY;;;;IACvB,YAA6B,OAAwB;QAAxB,YAAO,GAAP,OAAO,CAAiB;QAG7C,cAAS,GAA8B,EAAE,CAAC;IAHM,CAAC;;;;;;;IAgBzD,SAAS,CACL,KAA0D,EAC1D,YAA8D,iBAAiB;QAEjF,IAAI,CAAC,KAAK,IAAI,CAAC,CAAC,CAAC,KAAK,YAAY,GAAG,CAAC,IAAI,OAAO,KAAK,KAAK,QAAQ,CAAC,EAAE;YACpE,OAAO,IAAI,CAAC;SACb;QAED,IAAI,CAAC,IAAI,CAAC,MAAM,EAAE;YAChB,uDAAuD;YACvD,IAAI,CAAC,MAAM,GAAG,IAAI,CAAC,OAAO,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,MAAM,EAAE,CAAC;SACjD;;cAEK,aAAa,GAA+B,IAAI,CAAC,MAAM,CAAC,IAAI,CAAC,mBAAA,KAAK,EAAO,CAAC;QAEhF,IAAI,aAAa,EAAE;YACjB,IAAI,CAAC,SAAS,GAAG,EAAE,CAAC;YACpB,aAAa,CAAC,WAAW,CAAC,CAAC,CAA6B,EAAE,EAAE;gBAC1D,IAAI,CAAC,SAAS,CAAC,IAAI,CAAC,gBAAgB,CAAC,CAAC,CAAC,GAAG,EAAE,mBAAA,CAAC,CAAC,YAAY,EAAE,CAAC,CAAC,CAAC;YACjE,CAAC,CAAC,CAAC;YACH,IAAI,CAAC,SAAS,CAAC,IAAI,CAAC,SAAS,CAAC,CAAC;SAChC;QACD,OAAO,IAAI,CAAC,SAAS,CAAC;IACxB,CAAC;;;YAzCF,IAAI,SAAC,EAAC,IAAI,EAAE,UAAU,EAAE,IAAI,EAAE,KAAK,EAAC;;;;YArC0B,eAAe;;;;IAyC5E,8BAA2C;;IAC3C,iCAAkD;;IAHtC,+BAAyC;;;;;;;;AA0CvD,MAAM,UAAU,iBAAiB,CAC7B,SAAyB,EAAE,SAAyB;;UAChD,CAAC,GAAG,SAAS,CAAC,GAAG;;UACjB,CAAC,GAAG,SAAS,CAAC,GAAG;IACvB,uBAAuB;IACvB,IAAI,CAAC,KAAK,CAAC;QAAE,OAAO,CAAC,CAAC;IACtB,uDAAuD;IACvD,IAAI,CAAC,KAAK,SAAS;QAAE,OAAO,CAAC,CAAC;IAC9B,IAAI,CAAC,KAAK,SAAS;QAAE,OAAO,CAAC,CAAC,CAAC;IAC/B,mDAAmD;IACnD,IAAI,CAAC,KAAK,IAAI;QAAE,OAAO,CAAC,CAAC;IACzB,IAAI,CAAC,KAAK,IAAI;QAAE,OAAO,CAAC,CAAC,CAAC;IAC1B,IAAI,OAAO,CAAC,IAAI,QAAQ,IAAI,OAAO,CAAC,IAAI,QAAQ,EAAE;QAChD,OAAO,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;KACvB;IACD,IAAI,OAAO,CAAC,IAAI,QAAQ,IAAI,OAAO,CAAC,IAAI,QAAQ,EAAE;QAChD,OAAO,CAAC,GAAG,CAAC,CAAC;KACd;IACD,IAAI,OAAO,CAAC,IAAI,SAAS,IAAI,OAAO,CAAC,IAAI,SAAS,EAAE;QAClD,OAAO,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;KACvB;;;UAEK,OAAO,GAAG,MAAM,CAAC,CAAC,CAAC;;UACnB,OAAO,GAAG,MAAM,CAAC,CAAC,CAAC;IACzB,OAAO,OAAO,IAAI,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,OAAO,GAAG,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAC7D,CAAC","sourcesContent":["/**\n * @license\n * Copyright Google Inc. All Rights Reserved.\n *\n * Use of this source code is governed by an MIT-style license that can be\n * found in the LICENSE file at https://angular.io/license\n */\n\nimport {KeyValueChangeRecord, KeyValueChanges, KeyValueDiffer, KeyValueDiffers, Pipe, PipeTransform} from '@angular/core';\n\nfunction makeKeyValuePair<K, V>(key: K, value: V): KeyValue<K, V> {\n  return {key: key, value: value};\n}\n\n/**\n * A key value pair.\n * Usually used to represent the key value pairs from a Map or Object.\n *\n * @publicApi\n */\nexport interface KeyValue<K, V> {\n  key: K;\n  value: V;\n}\n\n/**\n * @ngModule CommonModule\n * @description\n *\n * Transforms Object or Map into an array of key value pairs.\n *\n * The output array will be ordered by keys.\n * By default the comparator will be by Unicode point value.\n * You can optionally pass a compareFn if your keys are complex types.\n *\n * @usageNotes\n * ### Examples\n *\n * This examples show how an Object or a Map can be iterated by ngFor with the use of this keyvalue\n * pipe.\n *\n * {@example common/pipes/ts/keyvalue_pipe.ts region='KeyValuePipe'}\n *\n * @publicApi\n */\n@Pipe({name: 'keyvalue', pure: false})\nexport class KeyValuePipe implements PipeTransform {\n  constructor(private readonly differs: KeyValueDiffers) {}\n\n  private differ !: KeyValueDiffer<any, any>;\n  private keyValues: Array<KeyValue<any, any>> = [];\n\n  transform<K, V>(input: null, compareFn?: (a: KeyValue<K, V>, b: KeyValue<K, V>) => number): null;\n  transform<V>(\n      input: {[key: string]: V}|Map<string, V>,\n      compareFn?: (a: KeyValue<string, V>, b: KeyValue<string, V>) => number):\n      Array<KeyValue<string, V>>;\n  transform<V>(\n      input: {[key: number]: V}|Map<number, V>,\n      compareFn?: (a: KeyValue<number, V>, b: KeyValue<number, V>) => number):\n      Array<KeyValue<number, V>>;\n  transform<K, V>(input: Map<K, V>, compareFn?: (a: KeyValue<K, V>, b: KeyValue<K, V>) => number):\n      Array<KeyValue<K, V>>;\n  transform<K, V>(\n      input: null|{[key: string]: V, [key: number]: V}|Map<K, V>,\n      compareFn: (a: KeyValue<K, V>, b: KeyValue<K, V>) => number = defaultComparator):\n      Array<KeyValue<K, V>>|null {\n    if (!input || (!(input instanceof Map) && typeof input !== 'object')) {\n      return null;\n    }\n\n    if (!this.differ) {\n      // make a differ for whatever type we've been passed in\n      this.differ = this.differs.find(input).create();\n    }\n\n    const differChanges: KeyValueChanges<K, V>|null = this.differ.diff(input as any);\n\n    if (differChanges) {\n      this.keyValues = [];\n      differChanges.forEachItem((r: KeyValueChangeRecord<K, V>) => {\n        this.keyValues.push(makeKeyValuePair(r.key, r.currentValue !));\n      });\n      this.keyValues.sort(compareFn);\n    }\n    return this.keyValues;\n  }\n}\n\nexport function defaultComparator<K, V>(\n    keyValueA: KeyValue<K, V>, keyValueB: KeyValue<K, V>): number {\n  const a = keyValueA.key;\n  const b = keyValueB.key;\n  // if same exit with 0;\n  if (a === b) return 0;\n  // make sure that undefined are at the end of the sort.\n  if (a === undefined) return 1;\n  if (b === undefined) return -1;\n  // make sure that nulls are at the end of the sort.\n  if (a === null) return 1;\n  if (b === null) return -1;\n  if (typeof a == 'string' && typeof b == 'string') {\n    return a < b ? -1 : 1;\n  }\n  if (typeof a == 'number' && typeof b == 'number') {\n    return a - b;\n  }\n  if (typeof a == 'boolean' && typeof b == 'boolean') {\n    return a < b ? -1 : 1;\n  }\n  // `a` and `b` are of different types. Compare their string values.\n  const aString = String(a);\n  const bString = String(b);\n  return aString == bString ? 0 : aString < bString ? -1 : 1;\n}\n"]}
import logging import urllib from mandrill_email import send_email from application.handlers.base import BaseHandler class TaskQueueEmailsHandler(BaseHandler): def post(self): if self.POST("subject") and self.POST("email_type"): logging.info("prepping email") content = {} for arg in self.request.arguments(): values = self.request.get(arg) content[arg] = values logging.info(content) send_email( receiver_name=self.POST("receiver_name"), receiver_email=urllib.unquote(self.POST("receiver_email")), subject=self.POST("subject"), content=content, email_type=self.POST("email_type"))
// @flow import React from 'react'; import { FormattedMessage, injectIntl } from 'react-intl'; import type { $npm$ReactIntl$IntlShape } from 'react-intl'; import { ReactComponent as ErrorIcon } from './svg/Error.svg'; import './BluetoothApiWarning.scss'; type BluetoothApiWarningProps = { intl: $npm$ReactIntl$IntlShape }; class BluetoothApiWarning extends React.Component<BluetoothApiWarningProps, {}> { render() { return ( <div className='BluetoothApiWarning'> <span role='img' aria-label={this.props.intl.formatMessage({ id: 'BluetoothApiWarning.errorIconLabel' })}> <ErrorIcon className='BluetoothApiWarning__error-icon-svg' /> </span> <FormattedMessage id='BluetoothApiWarning.message' /> </div> ); } } export default injectIntl(BluetoothApiWarning);
const webpack = require('webpack'); const path = require('path'); module.exports = { entry: { app: './src/client/app.js', 'react.bundle': [ 'react', 'react-dom', 'react-redux', 'react-router', 'react-router-redux', 'redux', 'redux-thunk', ] }, output: { path: path.join(__dirname, '/public'), filename: 'bundle.js', publicPath: '/' }, module: { rules: [ { test: /\.js$/, include: path.join(__dirname, 'src'), loader: 'babel-loader', exclude: /node_modules/, query: { presets: ['es2015', 'react'] }, }, { test: /\.scss|\.css$/, loaders: [ 'style-loader', 'css-loader?modules&importLoaders=1&localIdentName=[name]__[local]__[hash:base64:5]', 'sass-loader'] }, { test: /\.svg|.png|.jpg$/, loader: 'file-loader' } ] }, plugins: [ new webpack.optimize.CommonsChunkPlugin({ name: 'react.bundle', filename: 'react.bundle.js' }), // These plugins below could be run just in production, for optmization. // I did it here because I'd like to show how the files could be small. // new webpack.optimize.OccurrenceOrderPlugin(), // new webpack.optimize.DedupePlugin(), // new webpack.optimize.UglifyJsPlugin({ // compress: { // unused: true, // dead_code: true, // warnings: false // } // }), // new webpack.optimize.AggressiveMergingPlugin(), // new webpack.DefinePlugin({ // 'process.env': { // NODE_ENV: JSON.stringify('production') // } // }) ], };
from __future__ import unicode_literals import datetime import logging from inspect import isclass from django.core.exceptions import ImproperlyConfigured, FieldDoesNotExist from django.db.models import Q from .fields import SlickReportField from .helpers import get_field_from_query_text from .registry import field_registry logger = logging.getLogger(__name__) class ReportGenerator(object): """ The main class responsible generating the report and managing the flow """ field_registry_class = field_registry """You can have a custom computation field locator! It only needs a `get_field_by_name(string)` and returns a ReportField`""" report_model = None """The main model where data is """ """ Class to generate a Json Object containing report data. """ date_field = None """Main date field to use whenever date filter is needed""" print_flag = None list_display_links = [] group_by = None """The field to use for grouping, if not set then the report is expected to be a sub version of the report model""" columns = None """A list of column names. Columns names can be 1. A Computation Field 2. If group_by is set, then any field on teh group_by model 3. If group_by is not set, then any field name on the report_model / queryset 4. A callable on the generator 5. Special __time_series__, and __crosstab__ Those can be use to control the position of the time series inside the columns, defaults it's appended at the end Example: columns = ['product_id', '__time_series__', 'col_b'] Same is true with __crosstab__ """ time_series_pattern = '' """ If set the Report will compute a time series. Possible options are: daily, weekly, semimonthly, monthly, quarterly, semiannually, annually and custom. if `custom` is set, you'd need to override `get_custom_time_series_dates` """ time_series_columns = None """ a list of Calculation Field names which will be included in the series calculation. Example: ['__total__', '__total_quantity__'] with compute those 2 fields for all the series """ crosstab_model = None """ If set, a cross tab over this model selected ids (via `crosstab_ids`) """ crosstab_columns = None """The computation fields which will be computed for each crosstab-ed ids """ crosstab_ids = None """A list is the ids to create a crosstab report on""" crosstab_compute_reminder = True """Include an an extra crosstab_columns for the outer group ( ie: all expects those `crosstab_ids`) """ show_empty_records = True """ If group_by is set, this option control if the report result will include all objects regardless of appearing in the report_model/qs. If set False, only those objects which are found in the report_model/qs Example: Say you group by client show_empty_records = True will get the computation fields for all clients in the Client model (including those who didnt make a transaction. show_empty_records = False will get the computation fields for all clients in the Client model (including those who didnt make a transaction. """ limit_records = None """Serves are a main limit to the returned data of teh report_model. Can be beneficial if the results may be huge. """ swap_sign = False def __init__(self, report_model=None, main_queryset=None, start_date=None, end_date=None, date_field=None, q_filters=None, kwargs_filters=None, group_by=None, columns=None, time_series_pattern=None, time_series_columns=None, crosstab_model=None, crosstab_columns=None, crosstab_ids=None, crosstab_compute_reminder=None, swap_sign=False, show_empty_records=None, print_flag=False, doc_type_plus_list=None, doc_type_minus_list=None, limit_records=False, ): """ :param report_model: Main model containing the data :param main_queryset: Default to report_model.objects :param start_date: :param end_date: :param date_field: :param q_filters: :param kwargs_filters: :param group_by: :param columns: :param time_series_pattern: :param time_series_columns: :param crosstab_model: :param crosstab_columns: :param crosstab_ids: :param crosstab_compute_reminder: :param swap_sign: :param show_empty_records: :param base_model: :param print_flag: :param doc_type_plus_list: :param doc_type_minus_list: :param limit_records: """ from .app_settings import SLICK_REPORTING_DEFAULT_START_DATE, SLICK_REPORTING_DEFAULT_END_DATE super(ReportGenerator, self).__init__() self.report_model = self.report_model or report_model if not self.report_model: raise ImproperlyConfigured('report_model must be set on a class level or via init') self.start_date = start_date or datetime.datetime.combine(SLICK_REPORTING_DEFAULT_START_DATE.date(), SLICK_REPORTING_DEFAULT_START_DATE.time()) self.end_date = end_date or datetime.datetime.combine(SLICK_REPORTING_DEFAULT_END_DATE.date(), SLICK_REPORTING_DEFAULT_END_DATE.time()) self.date_field = self.date_field or date_field if not self.date_field: raise ImproperlyConfigured('date_field must be set on a class level or via init') self.q_filters = q_filters or [] self.kwargs_filters = kwargs_filters or {} self.crosstab_model = self.crosstab_model or crosstab_model self.crosstab_columns = crosstab_columns or self.crosstab_columns or [] self.crosstab_ids = self.crosstab_ids or crosstab_ids or [] self.crosstab_compute_reminder = self.crosstab_compute_reminder if crosstab_compute_reminder is None else crosstab_compute_reminder main_queryset = main_queryset or self.report_model.objects self.columns = self.columns or columns or [] self.group_by = self.group_by or group_by self.time_series_pattern = self.time_series_pattern or time_series_pattern self.time_series_columns = self.time_series_columns or time_series_columns self._prepared_results = {} self.report_fields_classes = {} self._report_fields_dependencies = {'time_series': {}, 'crosstab': {}, 'normal': {}} self.existing_dependencies = {'series': [], 'matrix': [], 'normal': []} self.print_flag = print_flag or self.print_flag # todo validate columns is not empty (if no time series / cross tab) if self.group_by: try: self.group_by_field = [x for x in self.report_model._meta.fields if x.name == self.group_by][0] except IndexError: raise ImproperlyConfigured( f'Can not find group_by field:{self.group_by} in report_model {self.report_model} ') self.focus_field_as_key = self.group_by self.group_by_field_attname = self.group_by_field.attname else: self.focus_field_as_key = None self.group_by_field_attname = None # doc_types = form.get_doc_type_plus_minus_lists() doc_types = [], [] self.doc_type_plus_list = list(doc_type_plus_list) if doc_type_plus_list else doc_types[0] self.doc_type_minus_list = list(doc_type_minus_list) if doc_type_minus_list else doc_types[1] self.swap_sign = self.swap_sign or swap_sign self.limit_records = self.limit_records or limit_records # passed to the report fields # self.date_field = date_field or self.date_field # in case of a group by, do we show a grouped by model data regardless of their appearance in the results # a client who didnt make a transaction during the date period. self.show_empty_records = False # show_empty_records if show_empty_records else self.show_empty_records # Looks like this options is harder then what i thought as it interfere with the usual filtering of the report # Preparing actions self._parse() if self.group_by: if self.show_empty_records: pass # group_by_filter = self.kwargs_filters.get(self.group_by, '') # qs = self.group_by_field.related_model.objects # if group_by_filter: # import pdb; pdb.set_trace() # lookup = 'pk__in' if isinstance(group_by_filter, Iterable) else 'pk' # qs = qs.filter(**{lookup: group_by_filter}) # self.main_queryset = qs.values() else: self.main_queryset = self._apply_queryset_options(main_queryset) ids = self.main_queryset.values_list(self.group_by_field.attname).distinct() self.main_queryset = self.group_by_field.related_model.objects.filter(pk__in=ids).values() else: self.main_queryset = self._apply_queryset_options(main_queryset, self.get_database_columns()) self._prepare_report_dependencies() def _apply_queryset_options(self, query, fields=None): """ Apply the filters to the main queryset which will computed results be mapped to :param query: :param fields: :return: """ filters = { f'{self.date_field}__gt': self.start_date, f'{self.date_field}__lte': self.end_date, } filters.update(self.kwargs_filters) if filters: query = query.filter(**filters) if fields: return query.values(*fields) return query.values() def _construct_crosstab_filter(self, col_data): """ In charge of adding the needed crosstab filter, specific to the case of is_reminder or not :param col_data: :return: """ if col_data['is_reminder']: filters = [~Q(**{f"{col_data['model']}_id__in": self.crosstab_ids})] else: filters = [Q(**{f"{col_data['model']}_id": col_data['id']})] return filters def _prepare_report_dependencies(self): from .fields import SlickReportField all_columns = ( ('normal', self._parsed_columns), ('time_series', self._time_series_parsed_columns), ('crosstab', self._crosstab_parsed_columns), ) for window, window_cols in all_columns: for col_data in window_cols: klass = col_data['ref'] if isclass(klass) and issubclass(klass, SlickReportField): dependencies_names = klass.get_full_dependency_list() # check if any of this dependencies is on the report fields_on_report = [x for x in window_cols if x['ref'] in dependencies_names] for field in fields_on_report: self._report_fields_dependencies[window][field['name']] = col_data['name'] # import pdb; pdb.set_trace() for col_data in window_cols: klass = col_data['ref'] # if getattr(klass, 'name', '') not in klasses_names: # continue name = col_data['name'] # if column has a dependency then skip it if not (isclass(klass) and issubclass(klass, SlickReportField)): continue if self._report_fields_dependencies[window].get(name, False): continue report_class = klass(self.doc_type_plus_list, self.doc_type_minus_list, group_by=self.group_by, report_model=self.report_model, date_field=self.date_field) q_filters = None date_filter = { f'{self.date_field}__gt': col_data.get('start_date', self.start_date), f'{self.date_field}__lte': col_data.get('end_date', self.end_date), } date_filter.update(self.kwargs_filters) if window == 'crosstab': q_filters = self._construct_crosstab_filter(col_data) # print(f'preparing {report_class} for {window}') report_class.prepare(q_filters, date_filter) self.report_fields_classes[name] = report_class def _get_record_data(self, obj, columns): """ the function is run for every obj in the main_queryset :param obj: current row :param: columns: The columns we iterate on :return: a dict object containing all needed data """ # todo , if columns are empty for whatever reason this will throw an error display_link = self.list_display_links or columns[0] data = {} group_by_val = None if self.group_by: column_data = obj.get(self.group_by_field_attname, obj.get('id')) group_by_val = str(column_data) for window, window_cols in columns: for col_data in window_cols: name = col_data['name'] if col_data.get('source', '') == 'magic_field' and self.group_by: source = self._report_fields_dependencies[window].get(name, False) if source: computation_class = self.report_fields_classes[source] value = computation_class.get_dependency_value(group_by_val, col_data['ref'].name) else: try: computation_class = self.report_fields_classes[name] except KeyError: continue value = computation_class.resolve(group_by_val) if self.swap_sign: value = -value data[name] = value else: data[name] = obj.get(name, '') # if self.group_by and name in display_link: # data[name] = make_linkable_field(self.group_by_field.related_model, group_by_val, data[name]) return data def get_report_data(self): main_queryset = self.main_queryset[:self.limit_records] if self.limit_records else self.main_queryset all_columns = ( ('normal', self._parsed_columns), ('time_series', self._time_series_parsed_columns), ('crosstab', self._crosstab_parsed_columns), ) get_record_data = self._get_record_data data = [get_record_data(obj, all_columns) for obj in main_queryset] return data @classmethod def check_columns(cls, columns, group_by, report_model, ): """ Check and parse the columns, throw errors in case an item in the columns cant not identified :param columns: List of columns :param group_by: group by field if any :param report_model: the report model :return: List of dict, each dict contains relevant data to the respective field in `columns` """ group_by_model = None if group_by: group_by_field = [x for x in report_model._meta.fields if x.name == group_by][0] group_by_model = group_by_field.related_model parsed_columns = [] for col in columns: magic_field_class = None attr = None if type(col) is str: attr = getattr(cls, col, None) elif issubclass(col, SlickReportField): magic_field_class = col try: magic_field_class = magic_field_class or field_registry.get_field_by_name(col) except KeyError: magic_field_class = None if attr: # todo Add testing here col_data = {'name': col, 'verbose_name': getattr(attr, 'verbose_name', col), # 'type': 'method', 'ref': attr, 'type': 'text' } elif magic_field_class: # a magic field if col in ['__time_series__', '__crosstab__']: # These are placeholder not real computation field continue col_data = {'name': magic_field_class.name, 'verbose_name': magic_field_class.verbose_name, 'source': 'magic_field', 'ref': magic_field_class, 'type': magic_field_class.type, 'is_summable': magic_field_class.is_summable } else: # A database field model_to_use = group_by_model if group_by else report_model try: if '__' in col: # A traversing link order__client__email field = get_field_from_query_text(col, model_to_use) else: field = model_to_use._meta.get_field(col) except FieldDoesNotExist: raise FieldDoesNotExist( f'Field "{col}" not found either as an attribute to the generator class {cls}, ' f'or a computation field, or a database column for the model "{model_to_use}"') col_data = {'name': col, 'verbose_name': getattr(field, 'verbose_name', col), 'source': 'database', 'ref': field, 'type': field.get_internal_type() } parsed_columns.append(col_data) return parsed_columns def _parse(self): self.parsed_columns = self.check_columns(self.columns, self.group_by, self.report_model) self._parsed_columns = list(self.parsed_columns) self._time_series_parsed_columns = self.get_time_series_parsed_columns() self._crosstab_parsed_columns = self.get_crosstab_parsed_columns() def get_database_columns(self): return [col['name'] for col in self.parsed_columns if col['source'] == 'database'] def get_method_columns(self): return [col['name'] for col in self.parsed_columns if col['type'] == 'method'] def get_list_display_columns(self): columns = self.parsed_columns if self.time_series_pattern: time_series_columns = self.get_time_series_parsed_columns() try: index = self.columns.index('__time_series__') columns[index] = time_series_columns except ValueError: columns += time_series_columns if self.crosstab_model: crosstab_columns = self.get_crosstab_parsed_columns() try: index = self.columns.index('__crosstab__') columns[index] = crosstab_columns except ValueError: columns += crosstab_columns return columns def get_time_series_parsed_columns(self): """ Return time series columns with all needed data attached :param plain: if True it returns '__total__' instead of '__total_TS011212' :return: List if columns """ _values = [] cols = self.time_series_columns or [] series = self._get_time_series_dates() for dt in series: for col in cols: magic_field_class = None if type(col) is str: magic_field_class = field_registry.get_field_by_name(col) elif issubclass(col, SlickReportField): magic_field_class = col _values.append({ 'name': col + 'TS' + dt[1].strftime('%Y%m%d'), 'original_name': col, 'verbose_name': self.get_time_series_field_verbose_name(magic_field_class, dt), 'ref': magic_field_class, 'start_date': dt[0], 'end_date': dt[1], 'source': 'magic_field' if magic_field_class else '', 'is_summable': magic_field_class.is_summable, }) return _values def get_time_series_field_verbose_name(self, computation_class, date_period): """ Sent the column data to construct a verbose name. Default implementation is delegated to the ReportField.get_time_series_field_verbose_name (which is name + the end date %Y%m%d) :param computation_class: the computation field_name :param date_period: a tuple of (start_date, end_date) :return: a verbose string """ return computation_class.get_time_series_field_verbose_name(date_period) def get_custom_time_series_dates(self): """ Hook to get custom , maybe separated date periods :return: [ (date1,date2) , (date3,date4), .... ] """ return [] def _get_time_series_dates(self): from dateutil.relativedelta import relativedelta _values = [] series = self.time_series_pattern if series: if series == 'daily': time_delta = datetime.timedelta(days=1) elif series == 'weekly': time_delta = relativedelta(weeks=1) elif series == 'semimonthly': time_delta = relativedelta(weeks=2) elif series == 'monthly': time_delta = relativedelta(months=1) elif series == 'quarterly': time_delta = relativedelta(months=3) elif series == 'semiannually': time_delta = relativedelta(months=6) elif series == 'annually': time_delta = relativedelta(year=1) elif series == 'custom': return self.get_custom_time_series_dates() else: raise NotImplementedError() done = False start_date = self.start_date while not done: to_date = start_date + time_delta _values.append((start_date, to_date)) start_date = to_date if to_date >= self.end_date: done = True return _values def get_crosstab_parsed_columns(self): """ Return a list of the columns analyzed , with reference to computation field and everything :return: """ report_columns = self.crosstab_columns or [] ids = list(self.crosstab_ids) if self.crosstab_compute_reminder: ids.append('----') output_cols = [] ids_length = len(ids) - 1 for counter, id in enumerate(ids): for col in report_columns: magic_field_class = None if type(col) is str: magic_field_class = field_registry.get_field_by_name(col) elif issubclass(col, SlickReportField): magic_field_class = col output_cols.append({ 'name': f'{col}CT{id}', 'original_name': col, 'verbose_name': self.get_crosstab_field_verbose_name(magic_field_class, self.crosstab_model, id), 'ref': magic_field_class, 'id': id, 'model': self.crosstab_model, 'is_reminder': counter == ids_length, 'source': 'magic_field' if magic_field_class else '', 'is_summable': magic_field_class.is_summable, }) return output_cols def get_crosstab_field_verbose_name(self, computation_class, model, id): """ Hook to change the crosstab field verbose name, default it delegate this function to the ReportField :param computation_class: ReportField Class :param model: the model name as string :param id: the current crosstab id :return: a verbose string """ return computation_class.get_crosstab_field_verbose_name(model, id)
a = [] b = [None]*10 c = [40, 10, 70, 60] print(c) print(c[0]) print(c[-1]) c.pop() print(c) c.pop(0) print(c) c.append(90) print(len(c)) print(c)
'use strict'; class Component extends THREE.Object3D { constructor() { super(); } negWireframe() { this.children.forEach(function (element) { element.material.wireframe = !element.material.wireframe; }); } } class Base extends Component { constructor() { super(); this.top = new Cube([25, 2, 25], [0, -1, 0]); this.wheels = { TL: new Sphere(2, [-10.5, -4, -10.5]), TR: new Sphere(2, [10.5, -4, -10.5]), BL: new Sphere(2, [-10.5, -4, 10.5]), BR: new Sphere(2, [10.5, -4, 10.5]) }; this.pivot = new Calot(3, [0, Math.PI / 2], [0, 0, 0]); this.add(this.top); this.add(this.wheels.TL); this.add(this.wheels.TR); this.add(this.wheels.BL); this.add(this.wheels.BR); this.add(this.pivot); } pivotRot(angle) { const Axis = new THREE.Vector3(0, 1, 0); this.pivot.rotateOnWorldAxis(Axis, angle); } wheelsRotSide(angle) { this.wheels.TL.rotateZ(angle); this.wheels.TR.rotateZ(angle); this.wheels.BL.rotateZ(angle); this.wheels.BR.rotateZ(angle); } wheelsRotForward(angle) { this.wheels.TL.rotateX(angle); this.wheels.TR.rotateX(angle); this.wheels.BL.rotateX(angle); this.wheels.BR.rotateX(angle); } } class Arm extends Component { constructor() { super(); this.arm = new Cube([2, 15, 2], [0, 10, 0]); this.forearm = new Cube([15, 2, 2], [9, 19, 0]); this.joints = { elbow: new Sphere(2, [0, 19, 0]), wrist: new Sphere(2, [18, 19, 0]) }; this.palm = new Cube([1, 5, 1.5], [20, 19, 0]); this.fingers = { top: new Cube([5, 0.5, 0.5], [22.5, 20.25, 0]), bottom: new Cube([5, 0.5, 0.5], [22.5, 17.75, 0]), }; this.add(this.arm); this.add(this.forearm); this.add(this.joints.elbow); this.add(this.joints.wrist); this.add(this.palm); this.add(this.fingers.top); this.add(this.fingers.bottom); } axisRotY(angle) { const Axis = new THREE.Vector3(0, 1, 0); this.rotateOnWorldAxis(Axis, angle); } } class Target extends Component { constructor() { super(); this.base = new Cube([4, 25, 4], [30, 6.5, 0]); this.target = new Torus(2, 0.5, [30, 21.5, 0]); this.add(this.base); this.add(this.target) } }
#ifndef SRC_SPELLCHECKER_HUNSPELL_H_ #define SRC_SPELLCHECKER_HUNSPELL_H_ #include "spellchecker.h" #include "transcoder.h" class Hunspell; namespace spellchecker { class HunspellSpellchecker : public SpellcheckerImplementation { public: HunspellSpellchecker(); ~HunspellSpellchecker(); bool SetDictionary(const std::string& language, const std::string& path); std::vector<std::string> GetAvailableDictionaries(const std::string& path); std::vector<std::string> GetCorrectionsForMisspelling(const std::string& word); bool IsMisspelled(const std::string& word); std::vector<MisspelledRange> CheckSpelling(const uint16_t *text, size_t length); void Add(const std::string& word); void Remove(const std::string& word); private: Hunspell* hunspell; Transcoder *transcoder; Transcoder *toDictionaryTranscoder; Transcoder *fromDictionaryTranscoder; }; } // namespace spellchecker #endif // SRC_SPELLCHECKER_HUNSPELL_H_
({ "descTemplate": "${2} - ${1} ${0} 之 ${3}", "firstTip": "首頁", "lastTip": "末頁", "nextTip": "下一頁", "prevTip": "上一頁", "itemTitle": "項目", "pageStepLabelTemplate": "頁面 ${0}", "pageSizeLabelTemplate": "每頁 ${0} 個項目", "allItemsLabelTemplate": "所有項目", "gotoButtonTitle": "跳至特定的頁面", "dialogTitle": "跳至頁面", "dialogIndication": "指定頁碼", "pageCountIndication": " (${0} 頁)", "dialogConfirm": "執行", "dialogCancel": "取消" })
#pragma once #include "stdafx.h" class HitboxComponent { private: sf::Sprite& sprite; sf::RectangleShape hitbox; sf::Vector2f offset; public: bool render = false; HitboxComponent(sf::Sprite& sprite, sf::Vector2f offset, sf::Vector2f size); ~HitboxComponent(); //Functions void Update(const float& DeltaTime); void Render(sf::RenderTarget* target); bool checkCollision(HitboxComponent* hitbox); friend sf::RectangleShape getHitbox(HitboxComponent* hitboxComponent) { return hitboxComponent->hitbox; } };
from __future__ import (absolute_import, division, print_function, unicode_literals) from . import get_assert_same_ggplot, cleanup, assert_same_elements assert_same_ggplot = get_assert_same_ggplot(__file__) from nose.tools import (assert_true, assert_raises, assert_is, assert_is_not, assert_equal) from ggplot import * from ggplot.utils import six import pandas as pd def test_data_transforms(): import numpy as np p = ggplot(aes(x="np.log(price)"), data=diamonds) with assert_raises(Exception): #no numpy available p = ggplot(aes(x="ap.log(price)"), data=diamonds) def test_no_data_leak(): cols_before = diamonds.columns.copy() import numpy as np p = ggplot(aes(x="np.log(price)"), data=diamonds) cols_after = diamonds.columns.copy() assert_same_elements(cols_before, cols_after) assert_is_not(diamonds, p.data) def test_geom_with_data(): gg = ggplot(mtcars, aes("wt", "mpg")) + geom_point() cols_before = gg.data.columns.copy() _text = geom_text(aes(label="name"), data=mtcars[mtcars.cyl == 6]) g2 = gg + _text # Datasets are shared between ggplot objects but it is not allowed to change the columns in a # dataset after the initial ggplot(aes) call. assert_is_not(g2.data, mtcars, "Adding a dataset to a geom changed the data in ggplot") assert_same_elements(cols_before, g2.data.columns) def test_deepcopy(): from copy import deepcopy p = ggplot(aes(x="price"), data=diamonds) + geom_histogram() p2 = deepcopy(p) assert_is_not(p, p2) # Not sure what we have to do for that... #assert_equal(p, p2) assert_is(p.data, p2.data) assert_equal(len(p.geoms), len(p2.geoms)) assert_is_not(p.geoms[0], p2.geoms[0]) assert_equal(len(p.aesthetics), len(p2.aesthetics)) assert_is_not(p.aesthetics, p2.aesthetics) assert_is(p.aesthetics.__eval_env__, p2.aesthetics.__eval_env__) @cleanup def test_axis_changes_applied_to_all_axis(): # see https://github.com/yhat/ggplot/issues/147 # http://stackoverflow.com/questions/20807212/in-ggplot-for-python-specify-global-xlim-in-facet-wrap p = ggplot(aes(x="price"), data=diamonds) + geom_histogram() p = p + facet_wrap("cut", scales="free_y") fig = (p + xlim(0,1500)).draw() for ax in fig.axes: assert_true(ax.get_xlim() == (0,1500)) fig = (p + xlab(u"test")).draw() for ax in fig.axes: assert_true(ax.get_xlabel() == u"test") @cleanup def test_axis_changes_applied_to_all_axis_visual(): # see https://github.com/yhat/ggplot/issues/147 # http://stackoverflow.com/questions/20807212/in-ggplot-for-python-specify-global-xlim-in-facet-wrap p = ggplot(aes(x="price"), data=diamonds) + geom_histogram() p = p + facet_wrap("cut", scales="free_y") assert_same_ggplot(p + xlim(-10,5100) + xlab(u"test"), "axis_changes_to_all") @cleanup def test_different_markers(): from ggplot.components import assign_visual_mapping from ggplot.components.shapes import shape_gen # First the generator which assigns the shapes shape = shape_gen() assert_true(six.next(shape) != six.next(shape), "Subsequent shapes are not different!") shape = shape_gen() assert_true(six.next(shape) == 'o', "First shape is not 'o'") assert_true(six.next(shape) == '^', "Second shape is not '^'") # Do shapes show up in the transformed layer? df = pd.DataFrame({"x":[1,2],"y":[1,2], "a":["a","b"], "b":["c","d"]}) gg = ggplot(aes(x="x", y="y", shape="a", color="b"), data=df) new_df = assign_visual_mapping(df,aes(x="x", y="y", shape="a", color="b"), gg) layer = gg._get_layers(new_df) assert_true("shape" in layer[0], "no shape was assigned") assert_true(layer[0]["shape"] != layer[1]["shape"], "wrong marker was assigned") # And now a visual test that both shapes are there. Make them big so that the test is failing # if something is wrong gg = ggplot(aes(x="x", y="y", shape="a", color="b"), data=df) assert_same_ggplot(gg + geom_point(size=3000), "geom_point_marker")
/* * Copyright (c) 2010, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. */ /** * NASHORN-201: Date constructors does not work properly when a single String argument is passed. * * @test * @run */ var expected = "1970-01-01T00:00:00.000Z"; var d = new Date("1970"); var str = d.toISOString(); if (expected !== str) { throw new Error("expected " + expected + ", got " + str); }
import tensorflow as tf import os import time class bcolors: WARNING = '\033[93m' ENDC = '\033[0m' @tf.function def syrk_tf(A): #ret = A@tf.transpose(A) ret = tf.linalg.matmul(A,A,transpose_b=True) return ret if __name__ == "__main__": #Check if MKL is enabled import tensorflow.python.framework as tff print(bcolors.WARNING + "MKL Enabled : ", tff.test_util.IsMklEnabled(), bcolors.ENDC) #Set threads tf.config.threading.set_inter_op_parallelism_threads(1) tf.config.threading.set_intra_op_parallelism_threads(1) #Problem size N = os.environ["LAMP_N"] if "LAMP_N" in os.environ else 3000 REPS = os.environ["LAMP_REPS"] if "LAMP_N" in os.environ else 20 DTYPE = tf.float32 # Run in Graph mode tf.config.run_functions_eagerly(False) A = tf.random.normal([N, N], dtype=DTYPE) #Building Trace ret = syrk_tf(A) for i in range(REPS): start = time.perf_counter() ret = syrk_tf(A) end = time.perf_counter() print("syrk tf : ", end-start)
const MOCK_INITIAL_METRICS = { frame: { width: 320, height: 640, x: 0, y: 0, }, insets: { left: 0, right: 0, bottom: 0, top: 0, }, }; const RNSafeAreaContext = jest.requireActual('react-native-safe-area-context'); export default { ...RNSafeAreaContext, initialWindowMetrics: MOCK_INITIAL_METRICS, // Provide a simpler implementation with default values. SafeAreaProvider: ({ children, initialMetrics }) => { return ( <RNSafeAreaContext.SafeAreaFrameContext.Provider value={initialMetrics?.frame ?? MOCK_INITIAL_METRICS.frame} > <RNSafeAreaContext.SafeAreaInsetsContext.Provider value={initialMetrics?.insets ?? MOCK_INITIAL_METRICS.insets} > {children} </RNSafeAreaContext.SafeAreaInsetsContext.Provider> </RNSafeAreaContext.SafeAreaFrameContext.Provider> ); }, };
#!/usr/local/bin/flask import sys import os sys.path.append("../") import json import requests import pony.orm as pny import Database from ExternalServices import logins from flask import Flask, request, jsonify from flask_jwt_extended import JWTManager, create_access_token, jwt_required, fresh_jwt_required, get_jwt_identity, set_access_cookies, unset_jwt_cookies, get_raw_jwt import managementAPI import EDIconnector # Initialise database Database.initialiseDatabase() # Initialise services app = Flask(__name__) # create an instance if the imported Flask class # Initialise JWT app.config["JWT_SECRET_KEY"] = os.environ["JWT_PASSWD"] app.config["JWT_TOKEN_LOCATION"] = ["headers"] app.config['JWT_BLACKLIST_ENABLED'] = True app.config['JWT_BLACKLIST_TOKEN_CHECKS'] = ['access', 'refresh'] jwt = JWTManager(app) blacklist = set() @jwt.token_in_blacklist_loader def check_if_token_in_blacklist(decrypted_token): jti = decrypted_token['jti'] return jti in blacklist @app.route('/flask/version', methods=["GET"]) def getVersion(): return managementAPI.version() @app.route("/flask/signup", methods=["POST"]) def signup(): return managementAPI.signup() @app.route('/flask/user_type', methods=["GET"]) @fresh_jwt_required def getUserType(): username = get_jwt_identity() return managementAPI.getUserType(username) @app.route('/flask/login', methods=['POST']) def login(): return managementAPI.login() @app.route('/flask/changepassword', methods=['POST']) @pny.db_session @fresh_jwt_required @logins.admin_required def changePassword(): return managementAPI.changePassword() @app.route("/flask/authorised", methods=["GET"]) @fresh_jwt_required def authorised(): username = get_jwt_identity() return jsonify({"status": 200, "msg": "User authorised."}) @app.route('/flask/getmyworkflows', methods=['GET']) @pny.db_session @fresh_jwt_required def getMyWorkflows(): username = get_jwt_identity() return managementAPI.getMyWorkflows(username) @app.route('/flask/incidentlogs/<incident_uuid>', methods=['GET']) @pny.db_session @fresh_jwt_required def getIncidentLogs(incident_uuid): username = get_jwt_identity() return managementAPI.getIncidentLogs(incident_uuid, username) @app.route('/flask/createincident', methods=['POST']) @jwt_required def createIncident(): username = get_jwt_identity() return managementAPI.createIncident(username) @app.route('/flask/getincidents', methods=['GET']) @pny.db_session @fresh_jwt_required def getAllMyIncidents(): username = get_jwt_identity() pending_filter = request.args.get("pending", "false").lower() == "true" active_filter = request.args.get("active", "false").lower() == "true" completed_filter = request.args.get("completed", "false").lower() == "true" cancelled_filter = request.args.get("cancelled", "false").lower() == "true" error_filter = request.args.get("error", "false").lower() == "true" archived_filter = request.args.get("archived", "false").lower() == "true" return managementAPI.getAllMyIncidents(username, pending_filter, active_filter, completed_filter, cancelled_filter, error_filter, archived_filter) @app.route('/flask/incident/<incident_uuid>', methods=['GET']) @pny.db_session @fresh_jwt_required def getSpecificIncident(incident_uuid): username = get_jwt_identity() return managementAPI.getSpecificIncident(incident_uuid, username) @app.route('/flask/incident/<incident_uuid>', methods=['DELETE']) @pny.db_session @fresh_jwt_required def cancelSpecificIncident(incident_uuid): username = get_jwt_identity() return managementAPI.cancelSpecificIncident(incident_uuid, username) @app.route('/flask/archiveincident/<incident_uuid>', methods=['GET']) @pny.db_session @fresh_jwt_required def archiveIncident(incident_uuid): username = get_jwt_identity() return managementAPI.archiveIncident(incident_uuid, username) @app.route('/flask/activateincident/<incident_uuid>', methods=['GET']) @pny.db_session @fresh_jwt_required def activateIncident(incident_uuid): username = get_jwt_identity() return managementAPI.activateIncident(incident_uuid, username) @app.route('/flask/datasets', methods=['GET']) @pny.db_session @fresh_jwt_required def getMatchingDatasets(): username = get_jwt_identity() data_type = request.args.get("type", None) incident_uuid = request.args.get("incident_uuid", None) return managementAPI.getDataSets(data_type, incident_uuid, username) @app.route('/flask/metadata', methods=['GET']) @pny.db_session @fresh_jwt_required def getDataMetadata(): username = get_jwt_identity() data_uuid = request.args.get("data_uuid", None) incident_uuid = request.args.get("incident_uuid", None) return managementAPI.getDataMetadata(data_uuid, incident_uuid, username) @app.route('/flask/metadata', methods=['POST']) @pny.db_session @fresh_jwt_required def updateDataMetadata(): username = get_jwt_identity() return managementAPI.updateDataMetadata(username) @app.route('/flask/data/<data_uuid>', methods=['GET']) @pny.db_session @fresh_jwt_required def downloadData(data_uuid): return managementAPI.downloadData(data_uuid) @app.route('/flask/refreshsimulation', methods=['POST']) @pny.db_session @fresh_jwt_required def refreshSimulation(): return managementAPI.performRefreshSimulation(request.json) @app.route('/flask/simulation', methods=['DELETE']) @pny.db_session @fresh_jwt_required def cancelSimulation(): simulation_uuid = request.args.get("sim_uuid", None) username = get_jwt_identity() return managementAPI.performCancelSimulation(simulation_uuid, username) @app.route('/flask/data', methods=['DELETE']) @pny.db_session @fresh_jwt_required def deleteData(): data_uuid = request.args.get("data_uuid", None) incident_uuid = request.args.get("incident_uuid", None) username = get_jwt_identity() return managementAPI.deleteData(data_uuid, incident_uuid, username) @app.route('/flask/logs', methods=['GET']) @pny.db_session @fresh_jwt_required @logins.admin_required def getLogs(): return managementAPI.getLogs() @app.route('/flask/health', methods=['GET']) @pny.db_session @fresh_jwt_required @logins.admin_required def getHealth(): return managementAPI.getComponentHealths() @app.route('/flask/deleteworkflow', methods=['POST']) @pny.db_session @fresh_jwt_required @logins.admin_required def deleteWorkflow(): return managementAPI.deleteWorkflow(request.json) @app.route('/flask/workflowinfo', methods=['GET']) @pny.db_session @fresh_jwt_required @logins.admin_required def getWorkflowInfo(): return managementAPI.getWorkflowInfo() @app.route('/flask/addworkflow', methods=['POST']) @pny.db_session @fresh_jwt_required @logins.admin_required def addWorkflow(): return managementAPI.addWorkflow(request.json) @app.route('/flask/getallusers', methods=['GET']) @pny.db_session @fresh_jwt_required @logins.admin_required def getAllUsers(): return managementAPI.getAllUsers() @app.route('/flask/getuser', methods=['POST']) @pny.db_session @fresh_jwt_required @logins.admin_required def getUserDetails(): return managementAPI.getUserDetails(request.json) @app.route('/flask/edituser', methods=['POST']) @pny.db_session @fresh_jwt_required @logins.admin_required def editUserDetails(): return managementAPI.editUserDetails(request.json) @app.route('/flask/deleteuser', methods=['POST']) @pny.db_session @fresh_jwt_required @logins.admin_required def deleteUser(): return managementAPI.deleteUser(request.json) @app.route('/flask/updateworkflowhealth', methods=['POST']) @pny.db_session @fresh_jwt_required @logins.admin_required def updateWorkflowHealthStatus(): return managementAPI.updateWorkflowHealthStatus() @app.route('/flask/addusertoworkflow', methods=['POST']) @pny.db_session @fresh_jwt_required @logins.admin_required def addUserToWorkflow(): return managementAPI.addUserToWorkflow(request.json) @app.route('/flask/removeuserfromworkflow', methods=['POST']) @pny.db_session @fresh_jwt_required @logins.admin_required def removeUserFromWorkflow(): return managementAPI.removeUserFromWorkflow(request.json) @app.route('/flask/getediinfo', methods=['GET']) @pny.db_session @fresh_jwt_required @logins.admin_required def getEDIInfo(): return managementAPI.getEDIInfo() @app.route('/flask/deleteedihandler', methods=['POST']) @pny.db_session @fresh_jwt_required @logins.admin_required def deleteEDIHandler(): return managementAPI.deleteEDIHandler(request.json) @app.route("/flask/logout", methods=["DELETE"]) @fresh_jwt_required def logout(): response = jsonify({"status": 200, "msg": "User logged out."}) jti = get_raw_jwt()['jti'] blacklist.add(jti) return response @app.route("/EDI", methods=["POST"]) def post_edi_data_anon(): return EDIconnector.pushDataToEDI() @app.route("/EDI/<sourceid>", methods=["POST"]) def post_edi_data(sourceid): return EDIconnector.pushDataToEDI(sourceid) @app.route('/flask/getmachinestatuses', methods=['GET']) @pny.db_session @fresh_jwt_required @logins.admin_required def getMachineStatuses(): return managementAPI.performRetrieveMachineStatuses() @app.route('/flask/machine/<machineid>', methods=['DELETE']) @pny.db_session @fresh_jwt_required @logins.admin_required def deleteMachine(machineid): return managementAPI.performDeleteMachine(machineid) @app.route("/flask/addmachine", methods=["POST"]) @pny.db_session @fresh_jwt_required @logins.admin_required def add_new_machine(): return managementAPI.performAddNewMachine(request.json) @app.route("/flask/enablemachine/<machineid>", methods=["POST"]) @pny.db_session @fresh_jwt_required @logins.admin_required def enable_machine(machineid): return managementAPI.performEnableMachine(machineid) @app.route("/flask/disablemachine/<machineid>", methods=["POST"]) @pny.db_session @fresh_jwt_required @logins.admin_required def disable_machine(machineid): return managementAPI.performDisableMachine(machineid) @app.route("/flask/enabletestmodemachine/<machineid>", methods=["POST"]) @pny.db_session @fresh_jwt_required @logins.admin_required def enable_testmode_machine(machineid): return managementAPI.enableTestModeMachine(machineid) @app.route("/flask/disabletestmodemachine/<machineid>", methods=["POST"]) @pny.db_session @fresh_jwt_required @logins.admin_required def disable_test_mode_machine(machineid): return managementAPI.disableTestModeMachine(machineid) if __name__ == '__main__': app.run(host='0.0.0.0', port=8000)
const test = require('tape') const nlp = require('../_lib') test('tagset-change-isA-basic', function (t) { nlp.extend((Doc, world) => { world.addTags({ Doctor: { isA: 'Person', }, }) world.addWords({ surgeon: 'Doctor', 'surgeon general': 'Doctor', }) }) let doc = nlp('the surgeon operated') //basic isA t.equal(doc.match('#Doctor').out('normal'), 'surgeon', 'surgeon is a doctor') t.equal(doc.match('#Person+').length, 1, 'doctor is a person') doc = nlp('lkjsdf').tag('#Person') t.equal(doc.match('#Doctor').length, 0, 'person isnt a doctor, necessarily') doc = nlp('lkjsdf').tag('#Doctor') t.equal(doc.match('#Person').length, 1, 'post-hoc tags work, too') //multi-word doc = nlp('the surgeon general operated') t.equal(doc.match('#Doctor').out('normal'), 'surgeon general', 'multi-word') t.equal(doc.match('#Person').out('normal'), 'surgeon general', 'multi-word-isA') t.end() }) test('tagset-change-isA', function (t) { nlp.extend((Doc, world) => { world.addTags({ Doctor: { isA: 'Person', notA: ['Foo'], }, }) world.addWords({ lkjj: 'Foo', }) }) let doc = nlp('he is lkjj') t.equal(doc.match('#Foo').out('normal'), 'lkjj', 'init-there') doc.match('lkjj').tag('#Doctor') t.equal(doc.match('#Doctor').out('normal'), 'lkjj', 'doctor-tag-there') t.equal(doc.match('#Foo').out('normal'), '', 'foo-is-gone') t.end() }) test('tagset-remove-downward', function (t) { nlp.extend((Doc, world) => { world.addTags({ Doctor: { isA: 'Person', }, Surgeon: { isA: 'Doctor', }, }) }) let doc = nlp('george is a person.') doc.match('george').tag('Surgeon') t.ok(doc.has('#Surgeon'), 'Surgeon-tag-there') t.ok(doc.has('#Doctor'), 'doctor-tag-there') t.ok(doc.has('#Person'), 'person-tag-there') //remove one in the middle.. doc.match('george').unTag('Person') t.ok(doc.has('#Person') === false, 'person-tag-gone') t.ok(doc.has('#Doctor') === false, 'doctor-tag-gone1') t.ok(doc.has('#Surgeon') === false, 'Surgeon-tag-gone') t.end() }) test('tagset-remove-half-downward', function (t) { nlp.extend((Doc, world) => { world.addTags({ Doctor: { isA: 'Person', }, Surgeon: { isA: 'Doctor', }, }) }) let doc = nlp('george is a person.') doc.match('george').tag('Surgeon') //remove one just under the top.. doc.match('george').unTag('Doctor') t.ok(doc.has('#Person') === true, 'person-tag-there') t.ok(doc.has('#Doctor') === false, 'doctor-tag-gone2') t.ok(doc.has('#Surgeon') === false, 'Surgeon-tag-gone') t.end() }) test('tagset-tree', function (t) { nlp.extend((_, world) => { world.addTags({ One: {}, Two: {}, Three: { isA: 'Two' }, }) }) let doc = nlp(`have fun in toronto`, { toronto: 'Three' }) let m = doc.match('toronto') t.ok(m.has('#Three'), 'three') t.ok(m.has('#Two'), 'two') t.equal(m.has('#One'), false, 'no one') t.equal(m.has('#Adjective'), false, 'no Adjective') t.end() }) test('tagset-tree-array', function (t) { nlp.extend((_, world) => { world.addTags({ One: {}, Two: {}, Three: { isA: ['Two', 'One', 'FirstName'] }, }) }) let doc = nlp(`have fun in toronto`, { toronto: 'Three' }) let m = doc.match('toronto') t.ok(m.has('#Three'), 'three') t.ok(m.has('#Two'), 'two') t.ok(m.has('#One'), 'one') t.ok(m.has('#FirstName'), 'FirstName') t.ok(m.has('#Person'), 'Person') t.ok(m.has('#Noun'), 'Noun') t.end() })
/**************************************************************************** * drivers/sensors/ads1242.c * Character driver for the MCP3426 Differential Input 16 Bit Delta/Sigma ADC * * Copyright (C) 2016 Gregory Nutt. All rights reserved. * Copyright (C) 2015 DS-Automotion GmbH. All rights reserved. * Author: Alexander Entinger <a.entinger@ds-automotion.com> * Gregory Nutt <gnutt@nuttx.org> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * 3. Neither the name NuttX nor the names of its contributors may be * used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * ****************************************************************************/ /**************************************************************************** * Included Files ****************************************************************************/ #include <nuttx/config.h> #include <stdlib.h> #include <stdbool.h> #include <fixedmath.h> #include <errno.h> #include <debug.h> #include <nuttx/kmalloc.h> #include <nuttx/fs/fs.h> #include <nuttx/spi/spi.h> #include <nuttx/analog/ads1242.h> #if defined(CONFIG_SPI) && defined(CONFIG_ADC_ADS1242) /**************************************************************************** * Private Types ****************************************************************************/ struct ads1242_dev_s { FAR struct spi_dev_s *spi; /* Pointer to the SPI instance */ uint32_t osc_period_us; /* Period of the oscillator attached to the ADS1242 in us */ }; /**************************************************************************** * Private Function Prototypes ****************************************************************************/ /* SPI Helpers */ static void ads1242_lock(FAR struct spi_dev_s *spi); static void ads1242_unlock(FAR struct spi_dev_s *spi); static void ads1242_reset(FAR struct ads1242_dev_s *dev); static void ads1242_perform_selfgain_calibration( FAR struct ads1242_dev_s *dev); static void ads1242_perform_selfoffset_calibration( FAR struct ads1242_dev_s *dev); static void ads1242_perform_systemoffset_calibration( FAR struct ads1242_dev_s *dev); static void ads1242_read_conversion_result(FAR struct ads1242_dev_s *dev, FAR uint32_t *conversion_result); static void ads1242_write_reg(FAR struct ads1242_dev_s *dev, uint8_t const reg_addr, uint8_t const reg_value); static void ads1242_read_reg(FAR struct ads1242_dev_s *dev, uint8_t const reg_addr, FAR uint8_t *reg_value); static void ads1242_set_gain(FAR struct ads1242_dev_s *dev, ADS1242_GAIN_SELECTION const gain_selection); static void ads1242_set_positive_input(FAR struct ads1242_dev_s *dev, ADS1242_POSITIVE_INPUT_SELECTION const pos_in_sel); static void ads1242_set_negative_input(FAR struct ads1242_dev_s *dev, ADS1242_NEGATIVE_INPUT_SELECTION const neg_in_sel); static bool ads1242_is_data_ready(FAR struct ads1242_dev_s *dev); #if defined(CONFIG_DEBUG_FEATURES) && defined(CONFIG_DEBUG_INFO) static void ads1242_print_regs(FAR struct ads1242_dev_s *dev, char const *msg); #endif /* CONFIG_DEBUG_FEATURES && CONFIG_DEBUG_INFO */ /* Character driver methods */ static int ads1242_open(FAR struct file *filep); static int ads1242_close(FAR struct file *filep); static ssize_t ads1242_read(FAR struct file *, FAR char *, size_t); static ssize_t ads1242_write(FAR struct file *filep, FAR const char *buffer, size_t buflen); static int ads1242_ioctl(FAR struct file *filep, int cmd, unsigned long arg); /**************************************************************************** * Private Data ****************************************************************************/ static const struct file_operations g_ads1242_fops = { ads1242_open, ads1242_close, ads1242_read, ads1242_write, NULL, ads1242_ioctl, NULL }; /**************************************************************************** * Private Functions ****************************************************************************/ /**************************************************************************** * Name: ads1242_lock * * Description: * Lock and configure the SPI bus. * ****************************************************************************/ static void ads1242_lock(FAR struct spi_dev_s *spi) { SPI_LOCK(spi, true); SPI_SETMODE(spi, ADS1242_SPI_MODE); SPI_SETBITS(spi, 8); SPI_HWFEATURES(spi, 0); SPI_SETFREQUENCY(spi, ADS1242_SPI_FREQUENCY); } /**************************************************************************** * Name: ads1242_unlock * * Description: * Unlock the SPI bus. * ****************************************************************************/ static void ads1242_unlock(FAR struct spi_dev_s *spi) { SPI_LOCK(spi, false); } /**************************************************************************** * Name: ads1242_reset ****************************************************************************/ static void ads1242_reset(FAR struct ads1242_dev_s *dev) { ads1242_lock(dev->spi); SPI_SELECT(dev->spi, 0, true); /* Set nADC_SPI_CS to low which * selects the ADS1242 */ SPI_SEND(dev->spi, ADS1242_CMD_RESET); /* Issue reset command */ SPI_SELECT(dev->spi, 0, false); /* Set nADC_SPI_CS to high which * deselects the ADS1242 */ up_mdelay(100); /* Wait a little so the device has * time to perform a proper reset */ ads1242_unlock(dev->spi); } /**************************************************************************** * Name: ads1242_perform_selfgain_calibration ****************************************************************************/ static void ads1242_perform_selfgain_calibration(FAR struct ads1242_dev_s *dev) { ads1242_lock(dev->spi); SPI_SELECT(dev->spi, 0, true); SPI_SEND(dev->spi, ADS1242_CMD_SELF_GAIN_CALIB); SPI_SELECT(dev->spi, 0, false); ads1242_unlock(dev->spi); } /**************************************************************************** * Name: ads1242_perform_selfoffset_calibration ****************************************************************************/ static void ads1242_perform_selfoffset_calibration(FAR struct ads1242_dev_s *dev) { ads1242_lock(dev->spi); SPI_SELECT(dev->spi, 0, true); SPI_SEND(dev->spi, ADS1242_CMD_SELF_OFFSET_CALIB); SPI_SELECT(dev->spi, 0, false); ads1242_unlock(dev->spi); } /**************************************************************************** * Name: ads1242_perform_systemoffset_calibration ****************************************************************************/ static void ads1242_perform_systemoffset_calibration(FAR struct ads1242_dev_s *dev) { ads1242_lock(dev->spi); SPI_SELECT(dev->spi, 0, true); SPI_SEND(dev->spi, ADS1242_CMD_SYSTEM_OFFSET_CALIB); SPI_SELECT(dev->spi, 0, false); ads1242_unlock(dev->spi); } /**************************************************************************** * Name: ads1242_read_conversion_result ****************************************************************************/ static void ads1242_read_conversion_result(FAR struct ads1242_dev_s *dev, FAR uint32_t *conversion_result) { ads1242_lock(dev->spi); SPI_SELECT(dev->spi, 0, true); SPI_SEND(dev->spi, ADS1242_CMD_READ_DATA); /* Delay between last SCLK edge for DIN and first SCLK edge for DOUT: * RDATA, RDATAC, RREG, WREG: Min 50 x tOSC Periods */ up_udelay(50 * dev->osc_period_us); *conversion_result = 0; /* 1st Byte = MSB * 2nd Byte = Mid-Byte * 3rd Byte = LSB */ *conversion_result |= ((uint32_t)(SPI_SEND(dev->spi, 0xff))) << 16; *conversion_result |= ((uint32_t)(SPI_SEND(dev->spi, 0xff))) << 8; *conversion_result |= ((uint32_t)(SPI_SEND(dev->spi, 0xff))) << 0; SPI_SELECT(dev->spi, 0, false); ads1242_unlock(dev->spi); } /**************************************************************************** * Name: ads1242_write_reg * * Description: * Write to the registers starting with the register address specified as * part of the instruction. The number of registers that will be written * is one plus the value of the second byte. * ****************************************************************************/ static void ads1242_write_reg(FAR struct ads1242_dev_s *dev, uint8_t const reg_addr, uint8_t const reg_value) { ads1242_lock(dev->spi); SPI_SELECT(dev->spi, 0, true); SPI_SEND(dev->spi, ADS1242_CMD_WRITE_REGISTER | reg_addr); SPI_SEND(dev->spi, 0x00); /* Write 1 Byte */ SPI_SEND(dev->spi, reg_value); SPI_SELECT(dev->spi, 0, false); ads1242_unlock(dev->spi); } /**************************************************************************** * Name: ads1242_read_reg * * Description: * Output the data from up to 16 registers starting with the register * address specified as part of the instruction. The number of registers * read will be one plus the second byte count. If the count exceeds the * remaining registers, the addresses wrap back to the beginning. * ****************************************************************************/ static void ads1242_read_reg(FAR struct ads1242_dev_s *dev, uint8_t const reg_addr, FAR uint8_t *reg_value) { ads1242_lock(dev->spi); SPI_SELECT(dev->spi, 0, true); SPI_SEND(dev->spi, ADS1242_CMD_READ_REGISTER | reg_addr); SPI_SEND(dev->spi, 0x00); /* Read 1 Byte */ /* Delay between last SCLK edge for DIN and first SCLK edge for DOUT: * RDATA, RDATAC, RREG, WREG: Min 50 x tOSC Periods */ up_udelay(50 * dev->osc_period_us); *reg_value = SPI_SEND(dev->spi, 0xff); SPI_SELECT(dev->spi, 0, false); ads1242_unlock(dev->spi); } /**************************************************************************** * Name: ads1242_set_gain ****************************************************************************/ static void ads1242_set_gain(FAR struct ads1242_dev_s *dev, ADS1242_GAIN_SELECTION const gain_selection) { uint8_t setup_reg_value = 0; ads1242_read_reg(dev, ADS1242_REG_SETUP, &setup_reg_value); setup_reg_value &= ~(ADS1242_REG_SETUP_BIT_PGA2 | ADS1242_REG_SETUP_BIT_PGA1 | ADS1242_REG_SETUP_BIT_PGA0); setup_reg_value |= (uint8_t)(gain_selection); ads1242_write_reg(dev, ADS1242_REG_SETUP, setup_reg_value); #if defined(CONFIG_DEBUG_FEATURES) && defined(CONFIG_DEBUG_INFO) ads1242_print_regs(dev, "ads1242_set_gain"); #endif /* It is necessary to perform a offset calibration after setting the gain */ ads1242_perform_selfoffset_calibration(dev); } /**************************************************************************** * Name: ads1242_set_positive_input ****************************************************************************/ static void ads1242_set_positive_input(FAR struct ads1242_dev_s *dev, ADS1242_POSITIVE_INPUT_SELECTION const pos_in_sel) { uint8_t mux_reg_value = 0; ads1242_read_reg(dev, ADS1242_REG_MUX, &mux_reg_value); mux_reg_value &= ~(ADS1242_REG_MUX_BIT_PSEL3 | ADS1242_REG_MUX_BIT_PSEL2 | ADS1242_REG_MUX_BIT_PSEL1 | ADS1242_REG_MUX_BIT_PSEL0); mux_reg_value |= (uint8_t)(pos_in_sel); ads1242_write_reg(dev, ADS1242_REG_MUX, mux_reg_value); #if defined(CONFIG_DEBUG_FEATURES) && defined(CONFIG_DEBUG_INFO) ads1242_print_regs(dev, "ads1242_set_positive_input"); #endif } /**************************************************************************** * Name: ads1242_set_negative_input ****************************************************************************/ static void ads1242_set_negative_input(FAR struct ads1242_dev_s *dev, ADS1242_NEGATIVE_INPUT_SELECTION const neg_in_sel) { uint8_t mux_reg_value = 0; ads1242_read_reg(dev, ADS1242_REG_MUX, &mux_reg_value); mux_reg_value &= ~(ADS1242_REG_MUX_BIT_NSEL3 | ADS1242_REG_MUX_BIT_NSEL2 | ADS1242_REG_MUX_BIT_NSEL1 | ADS1242_REG_MUX_BIT_NSEL0); mux_reg_value |= (uint8_t)(neg_in_sel); ads1242_write_reg(dev, ADS1242_REG_MUX, mux_reg_value); #if defined(CONFIG_DEBUG_FEATURES) && defined(CONFIG_DEBUG_INFO) ads1242_print_regs(dev, "ads1242_set_negative_input"); #endif } /**************************************************************************** * Name: ads1242_set_negative_input ****************************************************************************/ static bool ads1242_is_data_ready(FAR struct ads1242_dev_s *dev) { uint8_t acr_reg_value = 0xff; ads1242_read_reg(dev, ADS1242_REG_ACR, &acr_reg_value); return (acr_reg_value & ADS1242_REG_ACR_BIT_nDRDY) == 0; } /**************************************************************************** * Name: ads1242_print_regs ****************************************************************************/ #if defined(CONFIG_DEBUG_FEATURES) && defined(CONFIG_DEBUG_INFO) static void ads1242_print_regs(FAR struct ads1242_dev_s *dev, char const *msg) { uint8_t setup_reg_value = 0; uint8_t mux_reg_value = 0; uint8_t acr_reg_value = 0; ainfo("%s\n", msg); ads1242_read_reg(dev, ADS1242_REG_SETUP, &setup_reg_value); ads1242_read_reg(dev, ADS1242_REG_MUX, &mux_reg_value); ads1242_read_reg(dev, ADS1242_REG_ACR, &acr_reg_value); ainfo("SETUP %02X\n", setup_reg_value); ainfo("MUX %02X\n", mux_reg_value); ainfo("ACR %02X\n", acr_reg_value); } #endif /* CONFIG_DEBUG_FEATURES && CONFIG_DEBUG_INFO */ /**************************************************************************** * Name: ads1242_open ****************************************************************************/ static int ads1242_open(FAR struct file *filep) { FAR struct inode *inode = filep->f_inode; FAR struct ads1242_dev_s *priv = inode->i_private; ads1242_reset(priv); up_mdelay(100); ads1242_perform_selfgain_calibration(priv); up_mdelay(100); /* SPEED = 1 -> fMod = fOsc / 256 (fMod = Modulator Clock Speed) * BUFEN = 1 -> Internal input buffer enabled -> results in a very high * impedance input for the ADC ~ 5 GOhm */ ads1242_write_reg(priv, ADS1242_REG_ACR, ADS1242_REG_ACR_BIT_SPEED | ADS1242_REG_ACR_BIT_BUFEN); ads1242_perform_selfoffset_calibration(priv); up_mdelay(100); #if defined(CONFIG_DEBUG_FEATURES) && defined(CONFIG_DEBUG_INFO) ads1242_print_regs(priv, "ads1242_open"); #endif return OK; } /**************************************************************************** * Name: ads1242_close ****************************************************************************/ static int ads1242_close(FAR struct file *filep) { FAR struct inode *inode = filep->f_inode; FAR struct ads1242_dev_s *priv = inode->i_private; ads1242_reset(priv); up_mdelay(100); return OK; } /**************************************************************************** * Name: ads1242_read ****************************************************************************/ static ssize_t ads1242_read(FAR struct file *filep, FAR char *buffer, size_t buflen) { return -ENOSYS; } /**************************************************************************** * Name: ads1242_write ****************************************************************************/ static ssize_t ads1242_write(FAR struct file *filep, FAR const char *buffer, size_t buflen) { return -ENOSYS; } /**************************************************************************** * Name: ads1242_ioctl ****************************************************************************/ static int ads1242_ioctl (FAR struct file *filep, int cmd, unsigned long arg) { FAR struct inode *inode = filep->f_inode; FAR struct ads1242_dev_s *priv = inode->i_private; int ret = OK; switch (cmd) { /* Read the result of an analog conversion */ case ANIOC_ADS2142_READ: { FAR uint32_t *data = (FAR uint32_t *)((uintptr_t)arg); ads1242_read_conversion_result(priv, data); } break; /* Set the gain of the ADC */ case ANIOC_ADS2142_SET_GAIN: { ads1242_set_gain(priv, (ADS1242_GAIN_SELECTION)(arg)); } break; /* Set the positive input of the ADC */ case ANIOC_ADS2142_SET_POSITIVE_INPUT: { ads1242_set_positive_input(priv, (ADS1242_POSITIVE_INPUT_SELECTION)(arg)); } break; /* Set the negative input of the ADC */ case ANIOC_ADS2142_SET_NEGATIVE_INPUT: { ads1242_set_negative_input(priv, (ADS1242_NEGATIVE_INPUT_SELECTION)(arg)); } break; /* Check if data is ready to be read */ case ANIOC_ADS2142_IS_DATA_READY: { FAR bool *is_data_ready = (FAR bool *)((uintptr_t)arg); *is_data_ready = ads1242_is_data_ready(priv); } break; /* Perform a system offset calibration - Note: Zero input signal must * be applied. */ case ANIOC_ADS2142_DO_SYSTEM_OFFSET_CALIB: { ads1242_perform_systemoffset_calibration(priv); } break; /* Command was not recognized */ default: _err("ERROR: Unrecognized cmd: %d\n", cmd); ret = -ENOTTY; break; } return ret; } /**************************************************************************** * Public Functions ****************************************************************************/ /**************************************************************************** * Name: ads1242_register * * Description: * Register the ADS1242 character device as 'devpath' * * Input Parameters: * devpath - The full path to the driver to register. E.g., "/dev/ads1242" * spi - An instance of the SPI interface to use to communicate with ADS1242 * osc_freq_hz - The frequency of the ADS1242 oscillator in Hz. Required for * calculating the minimum delay periods when accessing the device via SPI. * * Returned Value: * Zero (OK) on success; a negated errno value on failure. * ****************************************************************************/ int ads1242_register(FAR const char *devpath, FAR struct spi_dev_s *spi, uint32_t const osc_freq_hz) { FAR struct ads1242_dev_s *priv; int ret; /* Sanity check */ DEBUGASSERT(spi != NULL); /* Initialize the ADS1242 device structure */ priv = (FAR struct ads1242_dev_s *)kmm_malloc(sizeof(struct ads1242_dev_s)); if (priv == NULL) { _err("ERROR: Failed to allocate instance\n"); return -ENOMEM; } priv->spi = spi; float const osc_period_us = (1000.0 * 1000.0) / ((float)(osc_freq_hz)); priv->osc_period_us = (uint32_t)(osc_period_us); /* Register the character driver */ ret = register_driver(devpath, &g_ads1242_fops, 0666, priv); if (ret < 0) { _err("ERROR: Failed to register driver: %d\n", ret); kmm_free(priv); } return ret; } #endif /* CONFIG_SPI && CONFIG_ADC_ADS1242 */
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Provides a variety of device interactions based on adb. Eventually, this will be based on adb_wrapper. """ # pylint: disable=unused-argument import collections import itertools import json import logging import multiprocessing import os import posixpath import re import shutil import tempfile import time import zipfile from devil import base_error from devil import devil_env from devil.utils import cmd_helper from devil.android import apk_helper from devil.android import device_signal from devil.android import decorators from devil.android import device_errors from devil.android import device_temp_file from devil.android import install_commands from devil.android import logcat_monitor from devil.android import md5sum from devil.android.sdk import adb_wrapper from devil.android.sdk import gce_adb_wrapper from devil.android.sdk import intent from devil.android.sdk import keyevent from devil.android.sdk import split_select from devil.android.sdk import version_codes from devil.utils import host_utils from devil.utils import parallelizer from devil.utils import reraiser_thread from devil.utils import timeout_retry from devil.utils import zip_utils _DEFAULT_TIMEOUT = 30 _DEFAULT_RETRIES = 3 # A sentinel object for default values # TODO(jbudorick,perezju): revisit how default values are handled by # the timeout_retry decorators. DEFAULT = object() _RESTART_ADBD_SCRIPT = """ trap '' HUP trap '' TERM trap '' PIPE function restart() { stop adbd start adbd } restart & """ # Not all permissions can be set. _PERMISSIONS_BLACKLIST = [ 'android.permission.ACCESS_MOCK_LOCATION', 'android.permission.ACCESS_NETWORK_STATE', 'android.permission.AUTHENTICATE_ACCOUNTS', 'android.permission.BLUETOOTH', 'android.permission.BLUETOOTH_ADMIN', 'android.permission.DOWNLOAD_WITHOUT_NOTIFICATION', 'android.permission.INTERNET', 'android.permission.MANAGE_ACCOUNTS', 'android.permission.MODIFY_AUDIO_SETTINGS', 'android.permission.NFC', 'android.permission.READ_SYNC_SETTINGS', 'android.permission.READ_SYNC_STATS', 'android.permission.RECEIVE_BOOT_COMPLETED', 'android.permission.USE_CREDENTIALS', 'android.permission.VIBRATE', 'android.permission.WAKE_LOCK', 'android.permission.WRITE_SYNC_SETTINGS', 'com.android.browser.permission.READ_HISTORY_BOOKMARKS', 'com.android.browser.permission.WRITE_HISTORY_BOOKMARKS', 'com.android.launcher.permission.INSTALL_SHORTCUT', 'com.chrome.permission.DEVICE_EXTRAS', 'com.google.android.apps.chrome.permission.C2D_MESSAGE', 'com.google.android.apps.chrome.permission.READ_WRITE_BOOKMARK_FOLDERS', 'com.google.android.apps.chrome.TOS_ACKED', 'com.google.android.c2dm.permission.RECEIVE', 'com.google.android.providers.gsf.permission.READ_GSERVICES', 'com.sec.enterprise.knox.MDM_CONTENT_PROVIDER', 'org.chromium.chrome.permission.C2D_MESSAGE', 'org.chromium.chrome.permission.READ_WRITE_BOOKMARK_FOLDERS', 'org.chromium.chrome.TOS_ACKED', ] _CURRENT_FOCUS_CRASH_RE = re.compile( r'\s*mCurrentFocus.*Application (Error|Not Responding): (\S+)}') _GETPROP_RE = re.compile(r'\[(.*?)\]: \[(.*?)\]') _IPV4_ADDRESS_RE = re.compile(r'([0-9]{1,3}\.){3}[0-9]{1,3}\:[0-9]{4,5}') @decorators.WithExplicitTimeoutAndRetries( _DEFAULT_TIMEOUT, _DEFAULT_RETRIES) def GetAVDs(): """Returns a list of Android Virtual Devices. Returns: A list containing the configured AVDs. """ lines = cmd_helper.GetCmdOutput([ os.path.join(devil_env.config.LocalPath('android_sdk'), 'tools', 'android'), 'list', 'avd']).splitlines() avds = [] for line in lines: if 'Name:' not in line: continue key, value = (s.strip() for s in line.split(':', 1)) if key == 'Name': avds.append(value) return avds @decorators.WithExplicitTimeoutAndRetries( _DEFAULT_TIMEOUT, _DEFAULT_RETRIES) def RestartServer(): """Restarts the adb server. Raises: CommandFailedError if we fail to kill or restart the server. """ def adb_killed(): return not adb_wrapper.AdbWrapper.IsServerOnline() def adb_started(): return adb_wrapper.AdbWrapper.IsServerOnline() adb_wrapper.AdbWrapper.KillServer() if not timeout_retry.WaitFor(adb_killed, wait_period=1, max_tries=5): # TODO(perezju): raise an exception after fixng http://crbug.com/442319 logging.warning('Failed to kill adb server') adb_wrapper.AdbWrapper.StartServer() if not timeout_retry.WaitFor(adb_started, wait_period=1, max_tries=5): raise device_errors.CommandFailedError('Failed to start adb server') def _GetTimeStamp(): """Return a basic ISO 8601 time stamp with the current local time.""" return time.strftime('%Y%m%dT%H%M%S', time.localtime()) def _JoinLines(lines): # makes sure that the last line is also terminated, and is more memory # efficient than first appending an end-line to each line and then joining # all of them together. return ''.join(s for line in lines for s in (line, '\n')) def _IsGceInstance(serial): return _IPV4_ADDRESS_RE.match(serial) def _CreateAdbWrapper(device): if _IsGceInstance(str(device)): return gce_adb_wrapper.GceAdbWrapper(str(device)) else: if isinstance(device, adb_wrapper.AdbWrapper): return device else: return adb_wrapper.AdbWrapper(device) class DeviceUtils(object): _MAX_ADB_COMMAND_LENGTH = 512 _MAX_ADB_OUTPUT_LENGTH = 32768 _LAUNCHER_FOCUSED_RE = re.compile( r'\s*mCurrentFocus.*(Launcher|launcher).*') _VALID_SHELL_VARIABLE = re.compile('^[a-zA-Z_][a-zA-Z0-9_]*$') LOCAL_PROPERTIES_PATH = posixpath.join('/', 'data', 'local.prop') # Property in /data/local.prop that controls Java assertions. JAVA_ASSERT_PROPERTY = 'dalvik.vm.enableassertions' def __init__(self, device, enable_device_files_cache=False, default_timeout=_DEFAULT_TIMEOUT, default_retries=_DEFAULT_RETRIES): """DeviceUtils constructor. Args: device: Either a device serial, an existing AdbWrapper instance, or an an existing AndroidCommands instance. enable_device_files_cache: For PushChangedFiles(), cache checksums of pushed files rather than recomputing them on a subsequent call. default_timeout: An integer containing the default number of seconds to wait for an operation to complete if no explicit value is provided. default_retries: An integer containing the default number or times an operation should be retried on failure if no explicit value is provided. """ self.adb = None if isinstance(device, basestring): self.adb = _CreateAdbWrapper(device) elif isinstance(device, adb_wrapper.AdbWrapper): self.adb = device else: raise ValueError('Unsupported device value: %r' % device) self._commands_installed = None self._default_timeout = default_timeout self._default_retries = default_retries self._enable_device_files_cache = enable_device_files_cache self._cache = {} self._client_caches = {} assert hasattr(self, decorators.DEFAULT_TIMEOUT_ATTR) assert hasattr(self, decorators.DEFAULT_RETRIES_ATTR) self._ClearCache() def __eq__(self, other): """Checks whether |other| refers to the same device as |self|. Args: other: The object to compare to. This can be a basestring, an instance of adb_wrapper.AdbWrapper, or an instance of DeviceUtils. Returns: Whether |other| refers to the same device as |self|. """ return self.adb.GetDeviceSerial() == str(other) def __lt__(self, other): """Compares two instances of DeviceUtils. This merely compares their serial numbers. Args: other: The instance of DeviceUtils to compare to. Returns: Whether |self| is less than |other|. """ return self.adb.GetDeviceSerial() < other.adb.GetDeviceSerial() def __str__(self): """Returns the device serial.""" return self.adb.GetDeviceSerial() @decorators.WithTimeoutAndRetriesFromInstance() def IsOnline(self, timeout=None, retries=None): """Checks whether the device is online. Args: timeout: timeout in seconds retries: number of retries Returns: True if the device is online, False otherwise. Raises: CommandTimeoutError on timeout. """ try: return self.adb.GetState() == 'device' except base_error.BaseError as exc: logging.info('Failed to get state: %s', exc) return False @decorators.WithTimeoutAndRetriesFromInstance() def HasRoot(self, timeout=None, retries=None): """Checks whether or not adbd has root privileges. Args: timeout: timeout in seconds retries: number of retries Returns: True if adbd has root privileges, False otherwise. Raises: CommandTimeoutError on timeout. DeviceUnreachableError on missing device. """ try: self.RunShellCommand('ls /root', check_return=True) return True except device_errors.AdbCommandFailedError: return False def NeedsSU(self, timeout=DEFAULT, retries=DEFAULT): """Checks whether 'su' is needed to access protected resources. Args: timeout: timeout in seconds retries: number of retries Returns: True if 'su' is available on the device and is needed to to access protected resources; False otherwise if either 'su' is not available (e.g. because the device has a user build), or not needed (because adbd already has root privileges). Raises: CommandTimeoutError on timeout. DeviceUnreachableError on missing device. """ if 'needs_su' not in self._cache: try: self.RunShellCommand( '%s && ! ls /root' % self._Su('ls /root'), check_return=True, timeout=self._default_timeout if timeout is DEFAULT else timeout, retries=self._default_retries if retries is DEFAULT else retries) self._cache['needs_su'] = True except device_errors.AdbCommandFailedError: self._cache['needs_su'] = False return self._cache['needs_su'] def _Su(self, command): if self.build_version_sdk >= version_codes.MARSHMALLOW: return 'su 0 %s' % command return 'su -c %s' % command @decorators.WithTimeoutAndRetriesFromInstance() def EnableRoot(self, timeout=None, retries=None): """Restarts adbd with root privileges. Args: timeout: timeout in seconds retries: number of retries Raises: CommandFailedError if root could not be enabled. CommandTimeoutError on timeout. """ if self.IsUserBuild(): raise device_errors.CommandFailedError( 'Cannot enable root in user builds.', str(self)) if 'needs_su' in self._cache: del self._cache['needs_su'] self.adb.Root() self.WaitUntilFullyBooted() @decorators.WithTimeoutAndRetriesFromInstance() def IsUserBuild(self, timeout=None, retries=None): """Checks whether or not the device is running a user build. Args: timeout: timeout in seconds retries: number of retries Returns: True if the device is running a user build, False otherwise (i.e. if it's running a userdebug build). Raises: CommandTimeoutError on timeout. DeviceUnreachableError on missing device. """ return self.build_type == 'user' @decorators.WithTimeoutAndRetriesFromInstance() def GetExternalStoragePath(self, timeout=None, retries=None): """Get the device's path to its SD card. Args: timeout: timeout in seconds retries: number of retries Returns: The device's path to its SD card. Raises: CommandFailedError if the external storage path could not be determined. CommandTimeoutError on timeout. DeviceUnreachableError on missing device. """ if 'external_storage' in self._cache: return self._cache['external_storage'] value = self.RunShellCommand('echo $EXTERNAL_STORAGE', single_line=True, check_return=True) if not value: raise device_errors.CommandFailedError('$EXTERNAL_STORAGE is not set', str(self)) self._cache['external_storage'] = value return value @decorators.WithTimeoutAndRetriesFromInstance() def GetApplicationPaths(self, package, timeout=None, retries=None): """Get the paths of the installed apks on the device for the given package. Args: package: Name of the package. Returns: List of paths to the apks on the device for the given package. """ return self._GetApplicationPathsInternal(package) def _GetApplicationPathsInternal(self, package, skip_cache=False): cached_result = self._cache['package_apk_paths'].get(package) if cached_result is not None and not skip_cache: if package in self._cache['package_apk_paths_to_verify']: self._cache['package_apk_paths_to_verify'].remove(package) # Don't verify an app that is not thought to be installed. We are # concerned only with apps we think are installed having been # uninstalled manually. if cached_result and not self.PathExists(cached_result): cached_result = None self._cache['package_apk_checksums'].pop(package, 0) if cached_result is not None: return list(cached_result) # 'pm path' is liable to incorrectly exit with a nonzero number starting # in Lollipop. # TODO(jbudorick): Check if this is fixed as new Android versions are # released to put an upper bound on this. should_check_return = (self.build_version_sdk < version_codes.LOLLIPOP) output = self.RunShellCommand( ['pm', 'path', package], check_return=should_check_return) apks = [] for line in output: if not line.startswith('package:'): raise device_errors.CommandFailedError( 'pm path returned: %r' % '\n'.join(output), str(self)) apks.append(line[len('package:'):]) self._cache['package_apk_paths'][package] = list(apks) return apks @decorators.WithTimeoutAndRetriesFromInstance() def GetApplicationVersion(self, package, timeout=None, retries=None): """Get the version name of a package installed on the device. Args: package: Name of the package. Returns: A string with the version name or None if the package is not found on the device. """ output = self.RunShellCommand( ['dumpsys', 'package', package], check_return=True) if not output: return None for line in output: line = line.strip() if line.startswith('versionName='): return line[len('versionName='):] raise device_errors.CommandFailedError( 'Version name for %s not found on dumpsys output' % package, str(self)) @decorators.WithTimeoutAndRetriesFromInstance() def GetApplicationDataDirectory(self, package, timeout=None, retries=None): """Get the data directory on the device for the given package. Args: package: Name of the package. Returns: The package's data directory, or None if the package doesn't exist on the device. """ try: output = self._RunPipedShellCommand( 'pm dump %s | grep dataDir=' % cmd_helper.SingleQuote(package)) for line in output: _, _, dataDir = line.partition('dataDir=') if dataDir: return dataDir except device_errors.CommandFailedError: logging.exception('Could not find data directory for %s', package) return None @decorators.WithTimeoutAndRetriesFromInstance() def WaitUntilFullyBooted(self, wifi=False, timeout=None, retries=None): """Wait for the device to fully boot. This means waiting for the device to boot, the package manager to be available, and the SD card to be ready. It can optionally mean waiting for wifi to come up, too. Args: wifi: A boolean indicating if we should wait for wifi to come up or not. timeout: timeout in seconds retries: number of retries Raises: CommandFailedError on failure. CommandTimeoutError if one of the component waits times out. DeviceUnreachableError if the device becomes unresponsive. """ def sd_card_ready(): try: self.RunShellCommand(['test', '-d', self.GetExternalStoragePath()], check_return=True) return True except device_errors.AdbCommandFailedError: return False def pm_ready(): try: return self._GetApplicationPathsInternal('android', skip_cache=True) except device_errors.CommandFailedError: return False def boot_completed(): return self.GetProp('sys.boot_completed', cache=False) == '1' def wifi_enabled(): return 'Wi-Fi is enabled' in self.RunShellCommand(['dumpsys', 'wifi'], check_return=False) self.adb.WaitForDevice() timeout_retry.WaitFor(sd_card_ready) timeout_retry.WaitFor(pm_ready) timeout_retry.WaitFor(boot_completed) if wifi: timeout_retry.WaitFor(wifi_enabled) REBOOT_DEFAULT_TIMEOUT = 10 * _DEFAULT_TIMEOUT @decorators.WithTimeoutAndRetriesFromInstance( min_default_timeout=REBOOT_DEFAULT_TIMEOUT) def Reboot(self, block=True, wifi=False, timeout=None, retries=None): """Reboot the device. Args: block: A boolean indicating if we should wait for the reboot to complete. wifi: A boolean indicating if we should wait for wifi to be enabled after the reboot. The option has no effect unless |block| is also True. timeout: timeout in seconds retries: number of retries Raises: CommandTimeoutError on timeout. DeviceUnreachableError on missing device. """ def device_offline(): return not self.IsOnline() self.adb.Reboot() self._ClearCache() timeout_retry.WaitFor(device_offline, wait_period=1) if block: self.WaitUntilFullyBooted(wifi=wifi) INSTALL_DEFAULT_TIMEOUT = 4 * _DEFAULT_TIMEOUT @decorators.WithTimeoutAndRetriesFromInstance( min_default_timeout=INSTALL_DEFAULT_TIMEOUT) def Install(self, apk, allow_downgrade=False, reinstall=False, permissions=None, timeout=None, retries=None): """Install an APK. Noop if an identical APK is already installed. Args: apk: An ApkHelper instance or string containing the path to the APK. allow_downgrade: A boolean indicating if we should allow downgrades. reinstall: A boolean indicating if we should keep any existing app data. permissions: Set of permissions to set. If not set, finds permissions with apk helper. To set no permissions, pass []. timeout: timeout in seconds retries: number of retries Raises: CommandFailedError if the installation fails. CommandTimeoutError if the installation times out. DeviceUnreachableError on missing device. """ self._InstallInternal(apk, None, allow_downgrade=allow_downgrade, reinstall=reinstall, permissions=permissions) @decorators.WithTimeoutAndRetriesFromInstance( min_default_timeout=INSTALL_DEFAULT_TIMEOUT) def InstallSplitApk(self, base_apk, split_apks, allow_downgrade=False, reinstall=False, allow_cached_props=False, permissions=None, timeout=None, retries=None): """Install a split APK. Noop if all of the APK splits are already installed. Args: base_apk: An ApkHelper instance or string containing the path to the base APK. split_apks: A list of strings of paths of all of the APK splits. allow_downgrade: A boolean indicating if we should allow downgrades. reinstall: A boolean indicating if we should keep any existing app data. allow_cached_props: Whether to use cached values for device properties. permissions: Set of permissions to set. If not set, finds permissions with apk helper. To set no permissions, pass []. timeout: timeout in seconds retries: number of retries Raises: CommandFailedError if the installation fails. CommandTimeoutError if the installation times out. DeviceUnreachableError on missing device. DeviceVersionError if device SDK is less than Android L. """ self._InstallInternal(base_apk, split_apks, reinstall=reinstall, allow_cached_props=allow_cached_props, permissions=permissions, allow_downgrade=allow_downgrade) def _InstallInternal(self, base_apk, split_apks, allow_downgrade=False, reinstall=False, allow_cached_props=False, permissions=None): if split_apks: self._CheckSdkLevel(version_codes.LOLLIPOP) base_apk = apk_helper.ToHelper(base_apk) all_apks = [base_apk.path] if split_apks: all_apks += split_select.SelectSplits( self, base_apk.path, split_apks, allow_cached_props=allow_cached_props) if len(all_apks) == 1: logging.warning('split-select did not select any from %s', split_apks) package_name = base_apk.GetPackageName() device_apk_paths = self._GetApplicationPathsInternal(package_name) apks_to_install = None host_checksums = None if not device_apk_paths: apks_to_install = all_apks elif len(device_apk_paths) > 1 and not split_apks: logging.warning( 'Installing non-split APK when split APK was previously installed') apks_to_install = all_apks elif len(device_apk_paths) == 1 and split_apks: logging.warning( 'Installing split APK when non-split APK was previously installed') apks_to_install = all_apks else: try: apks_to_install, host_checksums = ( self._ComputeStaleApks(package_name, all_apks)) except EnvironmentError as e: logging.warning('Error calculating md5: %s', e) apks_to_install, host_checksums = all_apks, None if apks_to_install and not reinstall: self.Uninstall(package_name) apks_to_install = all_apks if apks_to_install: # Assume that we won't know the resulting device state. self._cache['package_apk_paths'].pop(package_name, 0) self._cache['package_apk_checksums'].pop(package_name, 0) if split_apks: partial = package_name if len(apks_to_install) < len(all_apks) else None self.adb.InstallMultiple( apks_to_install, partial=partial, reinstall=reinstall, allow_downgrade=allow_downgrade) else: self.adb.Install( base_apk.path, reinstall=reinstall, allow_downgrade=allow_downgrade) if (permissions is None and self.build_version_sdk >= version_codes.MARSHMALLOW): permissions = base_apk.GetPermissions() self.GrantPermissions(package_name, permissions) # Upon success, we know the device checksums, but not their paths. if host_checksums is not None: self._cache['package_apk_checksums'][package_name] = host_checksums else: # Running adb install terminates running instances of the app, so to be # consistent, we explicitly terminate it when skipping the install. self.ForceStop(package_name) @decorators.WithTimeoutAndRetriesFromInstance() def Uninstall(self, package_name, keep_data=False, timeout=None, retries=None): """Remove the app |package_name| from the device. This is a no-op if the app is not already installed. Args: package_name: The package to uninstall. keep_data: (optional) Whether to keep the data and cache directories. timeout: Timeout in seconds. retries: Number of retries. Raises: CommandFailedError if the uninstallation fails. CommandTimeoutError if the uninstallation times out. DeviceUnreachableError on missing device. """ installed = self._GetApplicationPathsInternal(package_name) if not installed: return try: self.adb.Uninstall(package_name, keep_data) self._cache['package_apk_paths'][package_name] = [] self._cache['package_apk_checksums'][package_name] = set() except: # Clear cache since we can't be sure of the state. self._cache['package_apk_paths'].pop(package_name, 0) self._cache['package_apk_checksums'].pop(package_name, 0) raise def _CheckSdkLevel(self, required_sdk_level): """Raises an exception if the device does not have the required SDK level. """ if self.build_version_sdk < required_sdk_level: raise device_errors.DeviceVersionError( ('Requires SDK level %s, device is SDK level %s' % (required_sdk_level, self.build_version_sdk)), device_serial=self.adb.GetDeviceSerial()) @decorators.WithTimeoutAndRetriesFromInstance() def RunShellCommand(self, cmd, check_return=False, cwd=None, env=None, as_root=False, single_line=False, large_output=False, timeout=None, retries=None): """Run an ADB shell command. The command to run |cmd| should be a sequence of program arguments or else a single string. When |cmd| is a sequence, it is assumed to contain the name of the command to run followed by its arguments. In this case, arguments are passed to the command exactly as given, without any further processing by the shell. This allows to easily pass arguments containing spaces or special characters without having to worry about getting quoting right. Whenever possible, it is recomended to pass |cmd| as a sequence. When |cmd| is given as a string, it will be interpreted and run by the shell on the device. This behaviour is consistent with that of command runners in cmd_helper as well as Python's own subprocess.Popen. TODO(perezju) Change the default of |check_return| to True when callers have switched to the new behaviour. Args: cmd: A string with the full command to run on the device, or a sequence containing the command and its arguments. check_return: A boolean indicating whether or not the return code should be checked. cwd: The device directory in which the command should be run. env: The environment variables with which the command should be run. as_root: A boolean indicating whether the shell command should be run with root privileges. single_line: A boolean indicating if only a single line of output is expected. large_output: Uses a work-around for large shell command output. Without this large output will be truncated. timeout: timeout in seconds retries: number of retries Returns: If single_line is False, the output of the command as a list of lines, otherwise, a string with the unique line of output emmited by the command (with the optional newline at the end stripped). Raises: AdbCommandFailedError if check_return is True and the exit code of the command run on the device is non-zero. CommandFailedError if single_line is True but the output contains two or more lines. CommandTimeoutError on timeout. DeviceUnreachableError on missing device. """ def env_quote(key, value): if not DeviceUtils._VALID_SHELL_VARIABLE.match(key): raise KeyError('Invalid shell variable name %r' % key) # using double quotes here to allow interpolation of shell variables return '%s=%s' % (key, cmd_helper.DoubleQuote(value)) def run(cmd): return self.adb.Shell(cmd) def handle_check_return(cmd): try: return run(cmd) except device_errors.AdbCommandFailedError as exc: if check_return: raise else: return exc.output def handle_large_command(cmd): if len(cmd) < self._MAX_ADB_COMMAND_LENGTH: return handle_check_return(cmd) else: with device_temp_file.DeviceTempFile(self.adb, suffix='.sh') as script: self._WriteFileWithPush(script.name, cmd) logging.info('Large shell command will be run from file: %s ...', cmd[:self._MAX_ADB_COMMAND_LENGTH]) return handle_check_return('sh %s' % script.name_quoted) def handle_large_output(cmd, large_output_mode): if large_output_mode: with device_temp_file.DeviceTempFile(self.adb) as large_output_file: cmd = '( %s )>%s' % (cmd, large_output_file.name) logging.debug('Large output mode enabled. Will write output to ' 'device and read results from file.') handle_large_command(cmd) return self.ReadFile(large_output_file.name, force_pull=True) else: try: return handle_large_command(cmd) except device_errors.AdbCommandFailedError as exc: if exc.status is None: logging.exception('No output found for %s', cmd) logging.warning('Attempting to run in large_output mode.') logging.warning('Use RunShellCommand(..., large_output=True) for ' 'shell commands that expect a lot of output.') return handle_large_output(cmd, True) else: raise if not isinstance(cmd, basestring): cmd = ' '.join(cmd_helper.SingleQuote(s) for s in cmd) if env: env = ' '.join(env_quote(k, v) for k, v in env.iteritems()) cmd = '%s %s' % (env, cmd) if cwd: cmd = 'cd %s && %s' % (cmd_helper.SingleQuote(cwd), cmd) if as_root and self.NeedsSU(): # "su -c sh -c" allows using shell features in |cmd| cmd = self._Su('sh -c %s' % cmd_helper.SingleQuote(cmd)) output = handle_large_output(cmd, large_output).splitlines() if single_line: if not output: return '' elif len(output) == 1: return output[0] else: msg = 'one line of output was expected, but got: %s' raise device_errors.CommandFailedError(msg % output, str(self)) else: return output def _RunPipedShellCommand(self, script, **kwargs): PIPESTATUS_LEADER = 'PIPESTATUS: ' script += '; echo "%s${PIPESTATUS[@]}"' % PIPESTATUS_LEADER kwargs['check_return'] = True output = self.RunShellCommand(script, **kwargs) pipestatus_line = output[-1] if not pipestatus_line.startswith(PIPESTATUS_LEADER): logging.error('Pipe exit statuses of shell script missing.') raise device_errors.AdbShellCommandFailedError( script, output, status=None, device_serial=self.adb.GetDeviceSerial()) output = output[:-1] statuses = [ int(s) for s in pipestatus_line[len(PIPESTATUS_LEADER):].split()] if any(statuses): raise device_errors.AdbShellCommandFailedError( script, output, status=statuses, device_serial=self.adb.GetDeviceSerial()) return output @decorators.WithTimeoutAndRetriesFromInstance() def KillAll(self, process_name, exact=False, signum=device_signal.SIGKILL, as_root=False, blocking=False, quiet=False, timeout=None, retries=None): """Kill all processes with the given name on the device. Args: process_name: A string containing the name of the process to kill. exact: A boolean indicating whether to kill all processes matching the string |process_name| exactly, or all of those which contain |process_name| as a substring. Defaults to False. signum: An integer containing the signal number to send to kill. Defaults to SIGKILL (9). as_root: A boolean indicating whether the kill should be executed with root privileges. blocking: A boolean indicating whether we should wait until all processes with the given |process_name| are dead. quiet: A boolean indicating whether to ignore the fact that no processes to kill were found. timeout: timeout in seconds retries: number of retries Returns: The number of processes attempted to kill. Raises: CommandFailedError if no process was killed and |quiet| is False. CommandTimeoutError on timeout. DeviceUnreachableError on missing device. """ procs_pids = self.GetPids(process_name) if exact: procs_pids = {process_name: procs_pids.get(process_name, [])} pids = set(itertools.chain(*procs_pids.values())) if not pids: if quiet: return 0 else: raise device_errors.CommandFailedError( 'No process "%s"' % process_name, str(self)) logging.info( 'KillAll(%r, ...) attempting to kill the following:', process_name) for name, ids in procs_pids.iteritems(): for i in ids: logging.info(' %05s %s', str(i), name) cmd = ['kill', '-%d' % signum] + sorted(pids) self.RunShellCommand(cmd, as_root=as_root, check_return=True) def all_pids_killed(): procs_pids_remain = self.GetPids(process_name) return not pids.intersection(itertools.chain(*procs_pids_remain.values())) if blocking: timeout_retry.WaitFor(all_pids_killed, wait_period=0.1) return len(pids) @decorators.WithTimeoutAndRetriesFromInstance() def StartActivity(self, intent_obj, blocking=False, trace_file_name=None, force_stop=False, timeout=None, retries=None): """Start package's activity on the device. Args: intent_obj: An Intent object to send. blocking: A boolean indicating whether we should wait for the activity to finish launching. trace_file_name: If present, a string that both indicates that we want to profile the activity and contains the path to which the trace should be saved. force_stop: A boolean indicating whether we should stop the activity before starting it. timeout: timeout in seconds retries: number of retries Raises: CommandFailedError if the activity could not be started. CommandTimeoutError on timeout. DeviceUnreachableError on missing device. """ cmd = ['am', 'start'] if blocking: cmd.append('-W') if trace_file_name: cmd.extend(['--start-profiler', trace_file_name]) if force_stop: cmd.append('-S') cmd.extend(intent_obj.am_args) for line in self.RunShellCommand(cmd, check_return=True): if line.startswith('Error:'): raise device_errors.CommandFailedError(line, str(self)) @decorators.WithTimeoutAndRetriesFromInstance() def StartInstrumentation(self, component, finish=True, raw=False, extras=None, timeout=None, retries=None): if extras is None: extras = {} cmd = ['am', 'instrument'] if finish: cmd.append('-w') if raw: cmd.append('-r') for k, v in extras.iteritems(): cmd.extend(['-e', str(k), str(v)]) cmd.append(component) # Store the package name in a shell variable to help the command stay under # the _MAX_ADB_COMMAND_LENGTH limit. package = component.split('/')[0] shell_snippet = 'p=%s;%s' % (package, cmd_helper.ShrinkToSnippet(cmd, 'p', package)) return self.RunShellCommand(shell_snippet, check_return=True, large_output=True) @decorators.WithTimeoutAndRetriesFromInstance() def BroadcastIntent(self, intent_obj, timeout=None, retries=None): """Send a broadcast intent. Args: intent: An Intent to broadcast. timeout: timeout in seconds retries: number of retries Raises: CommandTimeoutError on timeout. DeviceUnreachableError on missing device. """ cmd = ['am', 'broadcast'] + intent_obj.am_args self.RunShellCommand(cmd, check_return=True) @decorators.WithTimeoutAndRetriesFromInstance() def GoHome(self, timeout=None, retries=None): """Return to the home screen and obtain launcher focus. This command launches the home screen and attempts to obtain launcher focus until the timeout is reached. Args: timeout: timeout in seconds retries: number of retries Raises: CommandTimeoutError on timeout. DeviceUnreachableError on missing device. """ def is_launcher_focused(): output = self.RunShellCommand(['dumpsys', 'window', 'windows'], check_return=True, large_output=True) return any(self._LAUNCHER_FOCUSED_RE.match(l) for l in output) def dismiss_popups(): # There is a dialog present; attempt to get rid of it. # Not all dialogs can be dismissed with back. self.SendKeyEvent(keyevent.KEYCODE_ENTER) self.SendKeyEvent(keyevent.KEYCODE_BACK) return is_launcher_focused() # If Home is already focused, return early to avoid unnecessary work. if is_launcher_focused(): return self.StartActivity( intent.Intent(action='android.intent.action.MAIN', category='android.intent.category.HOME'), blocking=True) if not is_launcher_focused(): timeout_retry.WaitFor(dismiss_popups, wait_period=1) @decorators.WithTimeoutAndRetriesFromInstance() def ForceStop(self, package, timeout=None, retries=None): """Close the application. Args: package: A string containing the name of the package to stop. timeout: timeout in seconds retries: number of retries Raises: CommandTimeoutError on timeout. DeviceUnreachableError on missing device. """ cmd = 'p=%s;if [[ "$(ps)" = *$p* ]]; then am force-stop $p; fi' self.RunShellCommand(cmd % package, check_return=True) @decorators.WithTimeoutAndRetriesFromInstance() def ClearApplicationState( self, package, permissions=None, timeout=None, retries=None): """Clear all state for the given package. Args: package: A string containing the name of the package to stop. permissions: List of permissions to set after clearing data. timeout: timeout in seconds retries: number of retries Raises: CommandTimeoutError on timeout. DeviceUnreachableError on missing device. """ # Check that the package exists before clearing it for android builds below # JB MR2. Necessary because calling pm clear on a package that doesn't exist # may never return. if ((self.build_version_sdk >= version_codes.JELLY_BEAN_MR2) or self._GetApplicationPathsInternal(package)): self.RunShellCommand(['pm', 'clear', package], check_return=True) self.GrantPermissions(package, permissions) @decorators.WithTimeoutAndRetriesFromInstance() def SendKeyEvent(self, keycode, timeout=None, retries=None): """Sends a keycode to the device. See the devil.android.sdk.keyevent module for suitable keycode values. Args: keycode: A integer keycode to send to the device. timeout: timeout in seconds retries: number of retries Raises: CommandTimeoutError on timeout. DeviceUnreachableError on missing device. """ self.RunShellCommand(['input', 'keyevent', format(keycode, 'd')], check_return=True) PUSH_CHANGED_FILES_DEFAULT_TIMEOUT = 10 * _DEFAULT_TIMEOUT @decorators.WithTimeoutAndRetriesFromInstance( min_default_timeout=PUSH_CHANGED_FILES_DEFAULT_TIMEOUT) def PushChangedFiles(self, host_device_tuples, timeout=None, retries=None, delete_device_stale=False): """Push files to the device, skipping files that don't need updating. When a directory is pushed, it is traversed recursively on the host and all files in it are pushed to the device as needed. Additionally, if delete_device_stale option is True, files that exist on the device but don't exist on the host are deleted. Args: host_device_tuples: A list of (host_path, device_path) tuples, where |host_path| is an absolute path of a file or directory on the host that should be minimially pushed to the device, and |device_path| is an absolute path of the destination on the device. timeout: timeout in seconds retries: number of retries delete_device_stale: option to delete stale files on device Raises: CommandFailedError on failure. CommandTimeoutError on timeout. DeviceUnreachableError on missing device. """ all_changed_files = [] all_stale_files = [] missing_dirs = [] cache_commit_funcs = [] for h, d in host_device_tuples: assert os.path.isabs(h) and posixpath.isabs(d) changed_files, up_to_date_files, stale_files, cache_commit_func = ( self._GetChangedAndStaleFiles(h, d, delete_device_stale)) all_changed_files += changed_files all_stale_files += stale_files cache_commit_funcs.append(cache_commit_func) if (os.path.isdir(h) and changed_files and not up_to_date_files and not stale_files): missing_dirs.append(d) if delete_device_stale and all_stale_files: self.RunShellCommand(['rm', '-f'] + all_stale_files, check_return=True) if all_changed_files: if missing_dirs: self.RunShellCommand(['mkdir', '-p'] + missing_dirs, check_return=True) self._PushFilesImpl(host_device_tuples, all_changed_files) for func in cache_commit_funcs: func() def _GetChangedAndStaleFiles(self, host_path, device_path, track_stale=False): """Get files to push and delete Args: host_path: an absolute path of a file or directory on the host device_path: an absolute path of a file or directory on the device track_stale: whether to bother looking for stale files (slower) Returns: a three-element tuple 1st element: a list of (host_files_path, device_files_path) tuples to push 2nd element: a list of host_files_path that are up-to-date 3rd element: a list of stale files under device_path, or [] when track_stale == False """ try: # Length calculations below assume no trailing /. host_path = host_path.rstrip('/') device_path = device_path.rstrip('/') specific_device_paths = [device_path] ignore_other_files = not track_stale and os.path.isdir(host_path) if ignore_other_files: specific_device_paths = [] for root, _, filenames in os.walk(host_path): relative_dir = root[len(host_path) + 1:] specific_device_paths.extend( posixpath.join(device_path, relative_dir, f) for f in filenames) def calculate_host_checksums(): return md5sum.CalculateHostMd5Sums([host_path]) def calculate_device_checksums(): if self._enable_device_files_cache: cache_entry = self._cache['device_path_checksums'].get(device_path) if cache_entry and cache_entry[0] == ignore_other_files: return dict(cache_entry[1]) sums = md5sum.CalculateDeviceMd5Sums(specific_device_paths, self) cache_entry = [ignore_other_files, sums] self._cache['device_path_checksums'][device_path] = cache_entry return dict(sums) host_checksums, device_checksums = reraiser_thread.RunAsync(( calculate_host_checksums, calculate_device_checksums)) except EnvironmentError as e: logging.warning('Error calculating md5: %s', e) return ([(host_path, device_path)], [], [], lambda: 0) to_push = [] up_to_date = [] to_delete = [] if os.path.isfile(host_path): host_checksum = host_checksums.get(host_path) device_checksum = device_checksums.get(device_path) if host_checksum == device_checksum: up_to_date.append(host_path) else: to_push.append((host_path, device_path)) else: for host_abs_path, host_checksum in host_checksums.iteritems(): device_abs_path = posixpath.join( device_path, os.path.relpath(host_abs_path, host_path)) device_checksum = device_checksums.pop(device_abs_path, None) if device_checksum == host_checksum: up_to_date.append(host_abs_path) else: to_push.append((host_abs_path, device_abs_path)) to_delete = device_checksums.keys() def cache_commit_func(): new_sums = {posixpath.join(device_path, path[len(host_path) + 1:]): val for path, val in host_checksums.iteritems()} cache_entry = [ignore_other_files, new_sums] self._cache['device_path_checksums'][device_path] = cache_entry return (to_push, up_to_date, to_delete, cache_commit_func) def _ComputeDeviceChecksumsForApks(self, package_name): ret = self._cache['package_apk_checksums'].get(package_name) if ret is None: device_paths = self._GetApplicationPathsInternal(package_name) file_to_checksums = md5sum.CalculateDeviceMd5Sums(device_paths, self) ret = set(file_to_checksums.values()) self._cache['package_apk_checksums'][package_name] = ret return ret def _ComputeStaleApks(self, package_name, host_apk_paths): def calculate_host_checksums(): return md5sum.CalculateHostMd5Sums(host_apk_paths) def calculate_device_checksums(): return self._ComputeDeviceChecksumsForApks(package_name) host_checksums, device_checksums = reraiser_thread.RunAsync(( calculate_host_checksums, calculate_device_checksums)) stale_apks = [k for (k, v) in host_checksums.iteritems() if v not in device_checksums] return stale_apks, set(host_checksums.values()) def _PushFilesImpl(self, host_device_tuples, files): if not files: return size = sum(host_utils.GetRecursiveDiskUsage(h) for h, _ in files) file_count = len(files) dir_size = sum(host_utils.GetRecursiveDiskUsage(h) for h, _ in host_device_tuples) dir_file_count = 0 for h, _ in host_device_tuples: if os.path.isdir(h): dir_file_count += sum(len(f) for _r, _d, f in os.walk(h)) else: dir_file_count += 1 push_duration = self._ApproximateDuration( file_count, file_count, size, False) dir_push_duration = self._ApproximateDuration( len(host_device_tuples), dir_file_count, dir_size, False) zip_duration = self._ApproximateDuration(1, 1, size, True) if dir_push_duration < push_duration and dir_push_duration < zip_duration: self._PushChangedFilesIndividually(host_device_tuples) elif push_duration < zip_duration: self._PushChangedFilesIndividually(files) elif self._commands_installed is False: # Already tried and failed to install unzip command. self._PushChangedFilesIndividually(files) elif not self._PushChangedFilesZipped( files, [d for _, d in host_device_tuples]): self._PushChangedFilesIndividually(files) def _MaybeInstallCommands(self): if self._commands_installed is None: try: if not install_commands.Installed(self): install_commands.InstallCommands(self) self._commands_installed = True except device_errors.CommandFailedError as e: logging.warning('unzip not available: %s', str(e)) self._commands_installed = False return self._commands_installed @staticmethod def _ApproximateDuration(adb_calls, file_count, byte_count, is_zipping): # We approximate the time to push a set of files to a device as: # t = c1 * a + c2 * f + c3 + b / c4 + b / (c5 * c6), where # t: total time (sec) # c1: adb call time delay (sec) # a: number of times adb is called (unitless) # c2: push time delay (sec) # f: number of files pushed via adb (unitless) # c3: zip time delay (sec) # c4: zip rate (bytes/sec) # b: total number of bytes (bytes) # c5: transfer rate (bytes/sec) # c6: compression ratio (unitless) # All of these are approximations. ADB_CALL_PENALTY = 0.1 # seconds ADB_PUSH_PENALTY = 0.01 # seconds ZIP_PENALTY = 2.0 # seconds ZIP_RATE = 10000000.0 # bytes / second TRANSFER_RATE = 2000000.0 # bytes / second COMPRESSION_RATIO = 2.0 # unitless adb_call_time = ADB_CALL_PENALTY * adb_calls adb_push_setup_time = ADB_PUSH_PENALTY * file_count if is_zipping: zip_time = ZIP_PENALTY + byte_count / ZIP_RATE transfer_time = byte_count / (TRANSFER_RATE * COMPRESSION_RATIO) else: zip_time = 0 transfer_time = byte_count / TRANSFER_RATE return adb_call_time + adb_push_setup_time + zip_time + transfer_time def _PushChangedFilesIndividually(self, files): for h, d in files: self.adb.Push(h, d) def _PushChangedFilesZipped(self, files, dirs): with tempfile.NamedTemporaryFile(suffix='.zip') as zip_file: zip_proc = multiprocessing.Process( target=DeviceUtils._CreateDeviceZip, args=(zip_file.name, files)) zip_proc.start() try: # While it's zipping, ensure the unzip command exists on the device. if not self._MaybeInstallCommands(): zip_proc.terminate() return False # Warm up NeedsSU cache while we're still zipping. self.NeedsSU() with device_temp_file.DeviceTempFile( self.adb, suffix='.zip') as device_temp: zip_proc.join() self.adb.Push(zip_file.name, device_temp.name) quoted_dirs = ' '.join(cmd_helper.SingleQuote(d) for d in dirs) self.RunShellCommand( 'unzip %s&&chmod -R 777 %s' % (device_temp.name, quoted_dirs), as_root=True, env={'PATH': '%s:$PATH' % install_commands.BIN_DIR}, check_return=True) finally: if zip_proc.is_alive(): zip_proc.terminate() return True @staticmethod def _CreateDeviceZip(zip_path, host_device_tuples): with zipfile.ZipFile(zip_path, 'w') as zip_file: for host_path, device_path in host_device_tuples: zip_utils.WriteToZipFile(zip_file, host_path, device_path) # TODO(nednguyen): remove this and migrate the callsite to PathExists(). def FileExists(self, device_path, timeout=None, retries=None): """Checks whether the given file exists on the device. Arguments are the same as PathExists. """ return self.PathExists(device_path, timeout=timeout, retries=retries) def PathExists(self, device_paths, as_root=False, timeout=None, retries=None): """Checks whether the given path(s) exists on the device. Args: device_path: A string containing the absolute path to the file on the device, or an iterable of paths to check. as_root: Whether root permissions should be use to check for the existence of the given path(s). timeout: timeout in seconds retries: number of retries Returns: True if the all given paths exist on the device, False otherwise. Raises: CommandTimeoutError on timeout. DeviceUnreachableError on missing device. """ paths = device_paths if isinstance(paths, basestring): paths = (paths,) condition = ' -a '.join('-e %s' % cmd_helper.SingleQuote(p) for p in paths) cmd = 'test %s' % condition try: self.RunShellCommand(cmd, as_root=as_root, check_return=True, timeout=timeout, retries=retries) return True except device_errors.CommandFailedError: return False @decorators.WithTimeoutAndRetriesFromInstance() def PullFile(self, device_path, host_path, timeout=None, retries=None): """Pull a file from the device. Args: device_path: A string containing the absolute path of the file to pull from the device. host_path: A string containing the absolute path of the destination on the host. timeout: timeout in seconds retries: number of retries Raises: CommandFailedError on failure. CommandTimeoutError on timeout. """ # Create the base dir if it doesn't exist already dirname = os.path.dirname(host_path) if dirname and not os.path.exists(dirname): os.makedirs(dirname) self.adb.Pull(device_path, host_path) def _ReadFileWithPull(self, device_path): try: d = tempfile.mkdtemp() host_temp_path = os.path.join(d, 'tmp_ReadFileWithPull') self.adb.Pull(device_path, host_temp_path) with open(host_temp_path, 'r') as host_temp: return host_temp.read() finally: if os.path.exists(d): shutil.rmtree(d) _LS_RE = re.compile( r'(?P<perms>\S+) +(?P<owner>\S+) +(?P<group>\S+) +(?:(?P<size>\d+) +)?' + r'(?P<date>\S+) +(?P<time>\S+) +(?P<name>.+)$') @decorators.WithTimeoutAndRetriesFromInstance() def ReadFile(self, device_path, as_root=False, force_pull=False, timeout=None, retries=None): """Reads the contents of a file from the device. Args: device_path: A string containing the absolute path of the file to read from the device. as_root: A boolean indicating whether the read should be executed with root privileges. force_pull: A boolean indicating whether to force the operation to be performed by pulling a file from the device. The default is, when the contents are short, to retrieve the contents using cat instead. timeout: timeout in seconds retries: number of retries Returns: The contents of |device_path| as a string. Contents are intepreted using universal newlines, so the caller will see them encoded as '\n'. Also, all lines will be terminated. Raises: AdbCommandFailedError if the file can't be read. CommandTimeoutError on timeout. DeviceUnreachableError on missing device. """ def get_size(path): # TODO(jbudorick): Implement a generic version of Stat() that handles # as_root=True, then switch this implementation to use that. ls_out = self.RunShellCommand(['ls', '-l', device_path], as_root=as_root, check_return=True) for line in ls_out: m = self._LS_RE.match(line) if m and m.group('name') == posixpath.basename(device_path): return int(m.group('size')) logging.warning('Could not determine size of %s.', device_path) return None if (not force_pull and 0 < get_size(device_path) <= self._MAX_ADB_OUTPUT_LENGTH): return _JoinLines(self.RunShellCommand( ['cat', device_path], as_root=as_root, check_return=True)) elif as_root and self.NeedsSU(): with device_temp_file.DeviceTempFile(self.adb) as device_temp: cmd = 'SRC=%s DEST=%s;cp "$SRC" "$DEST" && chmod 666 "$DEST"' % ( cmd_helper.SingleQuote(device_path), cmd_helper.SingleQuote(device_temp.name)) self.RunShellCommand(cmd, as_root=True, check_return=True) return self._ReadFileWithPull(device_temp.name) else: return self._ReadFileWithPull(device_path) def _WriteFileWithPush(self, device_path, contents): with tempfile.NamedTemporaryFile() as host_temp: host_temp.write(contents) host_temp.flush() self.adb.Push(host_temp.name, device_path) @decorators.WithTimeoutAndRetriesFromInstance() def WriteFile(self, device_path, contents, as_root=False, force_push=False, timeout=None, retries=None): """Writes |contents| to a file on the device. Args: device_path: A string containing the absolute path to the file to write on the device. contents: A string containing the data to write to the device. as_root: A boolean indicating whether the write should be executed with root privileges (if available). force_push: A boolean indicating whether to force the operation to be performed by pushing a file to the device. The default is, when the contents are short, to pass the contents using a shell script instead. timeout: timeout in seconds retries: number of retries Raises: CommandFailedError if the file could not be written on the device. CommandTimeoutError on timeout. DeviceUnreachableError on missing device. """ if not force_push and len(contents) < self._MAX_ADB_COMMAND_LENGTH: # If the contents are small, for efficieny we write the contents with # a shell command rather than pushing a file. cmd = 'echo -n %s > %s' % (cmd_helper.SingleQuote(contents), cmd_helper.SingleQuote(device_path)) self.RunShellCommand(cmd, as_root=as_root, check_return=True) elif as_root and self.NeedsSU(): # Adb does not allow to "push with su", so we first push to a temp file # on a safe location, and then copy it to the desired location with su. with device_temp_file.DeviceTempFile(self.adb) as device_temp: self._WriteFileWithPush(device_temp.name, contents) # Here we need 'cp' rather than 'mv' because the temp and # destination files might be on different file systems (e.g. # on internal storage and an external sd card). self.RunShellCommand(['cp', device_temp.name, device_path], as_root=True, check_return=True) else: # If root is not needed, we can push directly to the desired location. self._WriteFileWithPush(device_path, contents) @decorators.WithTimeoutAndRetriesFromInstance() def Ls(self, device_path, timeout=None, retries=None): """Lists the contents of a directory on the device. Args: device_path: A string containing the path of the directory on the device to list. timeout: timeout in seconds retries: number of retries Returns: A list of pairs (filename, stat) for each file found in the directory, where the stat object has the properties: st_mode, st_size, and st_time. Raises: AdbCommandFailedError if |device_path| does not specify a valid and accessible directory in the device. CommandTimeoutError on timeout. DeviceUnreachableError on missing device. """ return self.adb.Ls(device_path) @decorators.WithTimeoutAndRetriesFromInstance() def Stat(self, device_path, timeout=None, retries=None): """Get the stat attributes of a file or directory on the device. Args: device_path: A string containing the path of from which to get attributes on the device. timeout: timeout in seconds retries: number of retries Returns: A stat object with the properties: st_mode, st_size, and st_time Raises: CommandFailedError if device_path cannot be found on the device. CommandTimeoutError on timeout. DeviceUnreachableError on missing device. """ dirname, target = device_path.rsplit('/', 1) for filename, stat in self.adb.Ls(dirname): if filename == target: return stat raise device_errors.CommandFailedError( 'Cannot find file or directory: %r' % device_path, str(self)) @decorators.WithTimeoutAndRetriesFromInstance() def SetJavaAsserts(self, enabled, timeout=None, retries=None): """Enables or disables Java asserts. Args: enabled: A boolean indicating whether Java asserts should be enabled or disabled. timeout: timeout in seconds retries: number of retries Returns: True if the device-side property changed and a restart is required as a result, False otherwise. Raises: CommandTimeoutError on timeout. """ def find_property(lines, property_name): for index, line in enumerate(lines): if line.strip() == '': continue key_value = tuple(s.strip() for s in line.split('=', 1)) if len(key_value) != 2: continue key, value = key_value if key == property_name: return index, value return None, '' new_value = 'all' if enabled else '' # First ensure the desired property is persisted. try: properties = self.ReadFile(self.LOCAL_PROPERTIES_PATH).splitlines() except device_errors.CommandFailedError: properties = [] index, value = find_property(properties, self.JAVA_ASSERT_PROPERTY) if new_value != value: if new_value: new_line = '%s=%s' % (self.JAVA_ASSERT_PROPERTY, new_value) if index is None: properties.append(new_line) else: properties[index] = new_line else: assert index is not None # since new_value == '' and new_value != value properties.pop(index) self.WriteFile(self.LOCAL_PROPERTIES_PATH, _JoinLines(properties)) # Next, check the current runtime value is what we need, and # if not, set it and report that a reboot is required. value = self.GetProp(self.JAVA_ASSERT_PROPERTY) if new_value != value: self.SetProp(self.JAVA_ASSERT_PROPERTY, new_value) return True else: return False def GetLanguage(self, cache=False): """Returns the language setting on the device. Args: cache: Whether to use cached properties when available. """ return self.GetProp('persist.sys.language', cache=cache) def GetCountry(self, cache=False): """Returns the country setting on the device. Args: cache: Whether to use cached properties when available. """ return self.GetProp('persist.sys.country', cache=cache) @property def screen_density(self): """Returns the screen density of the device.""" DPI_TO_DENSITY = { 120: 'ldpi', 160: 'mdpi', 240: 'hdpi', 320: 'xhdpi', 480: 'xxhdpi', 640: 'xxxhdpi', } return DPI_TO_DENSITY.get(self.pixel_density, 'tvdpi') @property def pixel_density(self): return int(self.GetProp('ro.sf.lcd_density', cache=True)) @property def build_description(self): """Returns the build description of the system. For example: nakasi-user 4.4.4 KTU84P 1227136 release-keys """ return self.GetProp('ro.build.description', cache=True) @property def build_fingerprint(self): """Returns the build fingerprint of the system. For example: google/nakasi/grouper:4.4.4/KTU84P/1227136:user/release-keys """ return self.GetProp('ro.build.fingerprint', cache=True) @property def build_id(self): """Returns the build ID of the system (e.g. 'KTU84P').""" return self.GetProp('ro.build.id', cache=True) @property def build_product(self): """Returns the build product of the system (e.g. 'grouper').""" return self.GetProp('ro.build.product', cache=True) @property def build_type(self): """Returns the build type of the system (e.g. 'user').""" return self.GetProp('ro.build.type', cache=True) @property def build_version_sdk(self): """Returns the build version sdk of the system as a number (e.g. 19). For version code numbers see: http://developer.android.com/reference/android/os/Build.VERSION_CODES.html For named constants see devil.android.sdk.version_codes Raises: CommandFailedError if the build version sdk is not a number. """ value = self.GetProp('ro.build.version.sdk', cache=True) try: return int(value) except ValueError: raise device_errors.CommandFailedError( 'Invalid build version sdk: %r' % value) @property def product_cpu_abi(self): """Returns the product cpu abi of the device (e.g. 'armeabi-v7a').""" return self.GetProp('ro.product.cpu.abi', cache=True) @property def product_model(self): """Returns the name of the product model (e.g. 'Nexus 7').""" return self.GetProp('ro.product.model', cache=True) @property def product_name(self): """Returns the product name of the device (e.g. 'nakasi').""" return self.GetProp('ro.product.name', cache=True) @property def product_board(self): """Returns the product board name of the device (e.g. 'shamu').""" return self.GetProp('ro.product.board', cache=True) def GetProp(self, property_name, cache=False, timeout=DEFAULT, retries=DEFAULT): """Gets a property from the device. Args: property_name: A string containing the name of the property to get from the device. cache: Whether to use cached properties when available. timeout: timeout in seconds retries: number of retries Returns: The value of the device's |property_name| property. Raises: CommandTimeoutError on timeout. """ assert isinstance(property_name, basestring), ( "property_name is not a string: %r" % property_name) prop_cache = self._cache['getprop'] if cache: if property_name not in prop_cache: # It takes ~120ms to query a single property, and ~130ms to query all # properties. So, when caching we always query all properties. output = self.RunShellCommand( ['getprop'], check_return=True, large_output=True, timeout=self._default_timeout if timeout is DEFAULT else timeout, retries=self._default_retries if retries is DEFAULT else retries) prop_cache.clear() for key, value in _GETPROP_RE.findall(''.join(output)): prop_cache[key] = value if property_name not in prop_cache: prop_cache[property_name] = '' else: # timeout and retries are handled down at run shell, because we don't # want to apply them in the other branch when reading from the cache value = self.RunShellCommand( ['getprop', property_name], single_line=True, check_return=True, timeout=self._default_timeout if timeout is DEFAULT else timeout, retries=self._default_retries if retries is DEFAULT else retries) prop_cache[property_name] = value return prop_cache[property_name] @decorators.WithTimeoutAndRetriesFromInstance() def SetProp(self, property_name, value, check=False, timeout=None, retries=None): """Sets a property on the device. Args: property_name: A string containing the name of the property to set on the device. value: A string containing the value to set to the property on the device. check: A boolean indicating whether to check that the property was successfully set on the device. timeout: timeout in seconds retries: number of retries Raises: CommandFailedError if check is true and the property was not correctly set on the device (e.g. because it is not rooted). CommandTimeoutError on timeout. """ assert isinstance(property_name, basestring), ( "property_name is not a string: %r" % property_name) assert isinstance(value, basestring), "value is not a string: %r" % value self.RunShellCommand(['setprop', property_name, value], check_return=True) prop_cache = self._cache['getprop'] if property_name in prop_cache: del prop_cache[property_name] # TODO(perezju) remove the option and make the check mandatory, but using a # single shell script to both set- and getprop. if check and value != self.GetProp(property_name, cache=False): raise device_errors.CommandFailedError( 'Unable to set property %r on the device to %r' % (property_name, value), str(self)) @decorators.WithTimeoutAndRetriesFromInstance() def GetABI(self, timeout=None, retries=None): """Gets the device main ABI. Args: timeout: timeout in seconds retries: number of retries Returns: The device's main ABI name. Raises: CommandTimeoutError on timeout. """ return self.GetProp('ro.product.cpu.abi', cache=True) @decorators.WithTimeoutAndRetriesFromInstance() def GetPids(self, process_name, timeout=None, retries=None): """Returns the PIDs of processes with the given name. Note that the |process_name| is often the package name. Args: process_name: A string containing the process name to get the PIDs for. timeout: timeout in seconds retries: number of retries Returns: A dict mapping process name to a list of PIDs for each process that contained the provided |process_name|. Raises: CommandTimeoutError on timeout. DeviceUnreachableError on missing device. """ procs_pids = collections.defaultdict(list) try: ps_output = self._RunPipedShellCommand( 'ps | grep -F %s' % cmd_helper.SingleQuote(process_name)) except device_errors.AdbShellCommandFailedError as e: if e.status and isinstance(e.status, list) and not e.status[0]: # If ps succeeded but grep failed, there were no processes with the # given name. return procs_pids else: raise for line in ps_output: try: ps_data = line.split() if process_name in ps_data[-1]: pid, process = ps_data[1], ps_data[-1] procs_pids[process].append(pid) except IndexError: pass return procs_pids @decorators.WithTimeoutAndRetriesFromInstance() def TakeScreenshot(self, host_path=None, timeout=None, retries=None): """Takes a screenshot of the device. Args: host_path: A string containing the path on the host to save the screenshot to. If None, a file name in the current directory will be generated. timeout: timeout in seconds retries: number of retries Returns: The name of the file on the host to which the screenshot was saved. Raises: CommandFailedError on failure. CommandTimeoutError on timeout. DeviceUnreachableError on missing device. """ if not host_path: host_path = os.path.abspath('screenshot-%s-%s.png' % ( self.adb.GetDeviceSerial(), _GetTimeStamp())) with device_temp_file.DeviceTempFile(self.adb, suffix='.png') as device_tmp: self.RunShellCommand(['/system/bin/screencap', '-p', device_tmp.name], check_return=True) self.PullFile(device_tmp.name, host_path) return host_path @decorators.WithTimeoutAndRetriesFromInstance() def GetMemoryUsageForPid(self, pid, timeout=None, retries=None): """Gets the memory usage for the given PID. Args: pid: PID of the process. timeout: timeout in seconds retries: number of retries Returns: A dict containing memory usage statistics for the PID. May include: Size, Rss, Pss, Shared_Clean, Shared_Dirty, Private_Clean, Private_Dirty, VmHWM Raises: CommandTimeoutError on timeout. """ result = collections.defaultdict(int) try: result.update(self._GetMemoryUsageForPidFromSmaps(pid)) except device_errors.CommandFailedError: logging.exception('Error getting memory usage from smaps') try: result.update(self._GetMemoryUsageForPidFromStatus(pid)) except device_errors.CommandFailedError: logging.exception('Error getting memory usage from status') return result @decorators.WithTimeoutAndRetriesFromInstance() def DismissCrashDialogIfNeeded(self, timeout=None, retries=None): """Dismiss the error/ANR dialog if present. Returns: Name of the crashed package if a dialog is focused, None otherwise. """ def _FindFocusedWindow(): match = None # TODO(jbudorick): Try to grep the output on the device instead of using # large_output if/when DeviceUtils exposes a public interface for piped # shell command handling. for line in self.RunShellCommand(['dumpsys', 'window', 'windows'], check_return=True, large_output=True): match = re.match(_CURRENT_FOCUS_CRASH_RE, line) if match: break return match match = _FindFocusedWindow() if not match: return None package = match.group(2) logging.warning('Trying to dismiss %s dialog for %s', *match.groups()) self.SendKeyEvent(keyevent.KEYCODE_DPAD_RIGHT) self.SendKeyEvent(keyevent.KEYCODE_DPAD_RIGHT) self.SendKeyEvent(keyevent.KEYCODE_ENTER) match = _FindFocusedWindow() if match: logging.error('Still showing a %s dialog for %s', *match.groups()) return package def _GetMemoryUsageForPidFromSmaps(self, pid): SMAPS_COLUMNS = ( 'Size', 'Rss', 'Pss', 'Shared_Clean', 'Shared_Dirty', 'Private_Clean', 'Private_Dirty') showmap_out = self._RunPipedShellCommand( 'showmap %d | grep TOTAL' % int(pid), as_root=True) split_totals = showmap_out[-1].split() if (not split_totals or len(split_totals) != 9 or split_totals[-1] != 'TOTAL'): raise device_errors.CommandFailedError( 'Invalid output from showmap: %s' % '\n'.join(showmap_out)) return dict(itertools.izip(SMAPS_COLUMNS, (int(n) for n in split_totals))) def _GetMemoryUsageForPidFromStatus(self, pid): for line in self.ReadFile( '/proc/%s/status' % str(pid), as_root=True).splitlines(): if line.startswith('VmHWM:'): return {'VmHWM': int(line.split()[1])} raise device_errors.CommandFailedError( 'Could not find memory peak value for pid %s', str(pid)) def GetLogcatMonitor(self, *args, **kwargs): """Returns a new LogcatMonitor associated with this device. Parameters passed to this function are passed directly to |logcat_monitor.LogcatMonitor| and are documented there. """ return logcat_monitor.LogcatMonitor(self.adb, *args, **kwargs) def GetClientCache(self, client_name): """Returns client cache.""" if client_name not in self._client_caches: self._client_caches[client_name] = {} return self._client_caches[client_name] def _ClearCache(self): """Clears all caches.""" for client in self._client_caches: self._client_caches[client].clear() self._cache = { # Map of packageId -> list of on-device .apk paths 'package_apk_paths': {}, # Set of packageId that were loaded from LoadCacheData and not yet # verified. 'package_apk_paths_to_verify': set(), # Map of packageId -> set of on-device .apk checksums 'package_apk_checksums': {}, # Map of property_name -> value 'getprop': {}, # Map of device_path -> [ignore_other_files, map of path->checksum] 'device_path_checksums': {}, } def LoadCacheData(self, data): """Initializes the cache from data created using DumpCacheData.""" obj = json.loads(data) self._cache['package_apk_paths'] = obj.get('package_apk_paths', {}) # When using a cache across script invokations, verify that apps have # not been uninstalled. self._cache['package_apk_paths_to_verify'] = set( self._cache['package_apk_paths'].iterkeys()) package_apk_checksums = obj.get('package_apk_checksums', {}) for k, v in package_apk_checksums.iteritems(): package_apk_checksums[k] = set(v) self._cache['package_apk_checksums'] = package_apk_checksums device_path_checksums = obj.get('device_path_checksums', {}) self._cache['device_path_checksums'] = device_path_checksums def DumpCacheData(self): """Dumps the current cache state to a string.""" obj = {} obj['package_apk_paths'] = self._cache['package_apk_paths'] obj['package_apk_checksums'] = self._cache['package_apk_checksums'] # JSON can't handle sets. for k, v in obj['package_apk_checksums'].iteritems(): obj['package_apk_checksums'][k] = list(v) obj['device_path_checksums'] = self._cache['device_path_checksums'] return json.dumps(obj, separators=(',', ':')) @classmethod def parallel(cls, devices, async=False): """Creates a Parallelizer to operate over the provided list of devices. Args: devices: A list of either DeviceUtils instances or objects from from which DeviceUtils instances can be constructed. If None, all attached devices will be used. async: If true, returns a Parallelizer that runs operations asynchronously. Returns: A Parallelizer operating over |devices|. Raises: device_errors.NoDevicesError: If no devices are passed. """ if not devices: raise device_errors.NoDevicesError() devices = [d if isinstance(d, cls) else cls(d) for d in devices] if async: return parallelizer.Parallelizer(devices) else: return parallelizer.SyncParallelizer(devices) @classmethod def HealthyDevices(cls, blacklist=None, **kwargs): blacklisted_devices = blacklist.Read() if blacklist else [] def blacklisted(adb): if adb.GetDeviceSerial() in blacklisted_devices: logging.warning('Device %s is blacklisted.', adb.GetDeviceSerial()) return True return False devices = [] for adb in adb_wrapper.AdbWrapper.Devices(): if not blacklisted(adb): devices.append(cls(_CreateAdbWrapper(adb), **kwargs)) return devices @decorators.WithTimeoutAndRetriesFromInstance() def RestartAdbd(self, timeout=None, retries=None): logging.info('Restarting adbd on device.') with device_temp_file.DeviceTempFile(self.adb, suffix='.sh') as script: self.WriteFile(script.name, _RESTART_ADBD_SCRIPT) self.RunShellCommand(['source', script.name], as_root=True) self.adb.WaitForDevice() @decorators.WithTimeoutAndRetriesFromInstance() def GrantPermissions(self, package, permissions, timeout=None, retries=None): # Permissions only need to be set on M and above because of the changes to # the permission model. if not permissions or self.build_version_sdk < version_codes.MARSHMALLOW: return # TODO(rnephew): After permission blacklist is complete, switch to using # &&s instead of ;s. cmd = '' logging.info('Setting permissions for %s.', package) permissions = [p for p in permissions if p not in _PERMISSIONS_BLACKLIST] if ('android.permission.WRITE_EXTERNAL_STORAGE' in permissions and 'android.permission.READ_EXTERNAL_STORAGE' not in permissions): permissions.append('android.permission.READ_EXTERNAL_STORAGE') cmd = ';'.join('pm grant %s %s' % (package, p) for p in permissions) if cmd: output = self.RunShellCommand(cmd) if output: logging.warning('Possible problem when granting permissions. Blacklist ' 'may need to be updated.') for line in output: logging.warning(' %s', line) @decorators.WithTimeoutAndRetriesFromInstance() def IsScreenOn(self, timeout=None, retries=None): """Determines if screen is on. Dumpsys input_method exposes screen on/off state. Below is an explination of the states. Pre-L: On: mScreenOn=true Off: mScreenOn=false L+: On: mInteractive=true Off: mInteractive=false Returns: True if screen is on, false if it is off. Raises: device_errors.CommandFailedError: If screen state cannot be found. """ if self.build_version_sdk < version_codes.LOLLIPOP: input_check = 'mScreenOn' check_value = 'mScreenOn=true' else: input_check = 'mInteractive' check_value = 'mInteractive=true' dumpsys_out = self._RunPipedShellCommand( 'dumpsys input_method | grep %s' % input_check) if not dumpsys_out: raise device_errors.CommandFailedError( 'Unable to detect screen state', str(self)) return check_value in dumpsys_out[0] @decorators.WithTimeoutAndRetriesFromInstance() def SetScreen(self, on, timeout=None, retries=None): """Turns screen on and off. Args: on: bool to decide state to switch to. True = on False = off. """ def screen_test(): return self.IsScreenOn() == on if screen_test(): logging.info('Screen already in expected state.') return self.RunShellCommand('input keyevent 26') timeout_retry.WaitFor(screen_test, wait_period=1)
"use strict"; Object.defineProperty(exports, "__esModule", { value: true }); var CameraFill = { name: 'camera', theme: 'fill', nameWithTheme: 'camera-fill', tag: 'svg', attrs: { xmlns: 'http://www.w3.org/2000/svg', class: 'icon', viewBox: '0 0 1024 1024' }, children: [ { tag: 'path', attrs: { d: 'M864 260H728l-32.4-90.8C691 156.5 679 148 665.4 148H358.6c-13.5 0-25.6 8.5-30.1 21.2L296 260H160c-44.2 0-80 35.8-80 80v456c0 44.2 35.8 80 80 80h704c44.2 0 80-35.8 80-80V340c0-44.2-35.8-80-80-80zM512 716c-88.4 0-160-71.6-160-160s71.6-160 160-160 160 71.6 160 160-71.6 160-160 160zm-96-160a96 96 0 1 0 192 0 96 96 0 1 0-192 0z' }, children: [] } ] }; exports.default = CameraFill;
const babel = require('@babel/core'); const expect = require('expect'); const path = require('path'); const plugin = require('../index'); describe('babel-plugin-auto-symbol-description', () => { function expectTransform(source, transformed) { const { code } = babel.transform(source, { plugins: [plugin] }); expect(code).toBe(transformed); } function expectNoTransform(source) { expectTransform(source, source); } describe('assignment', () => { it('assigns identifier as description', () => { expectTransform('s1 = Symbol();', 's1 = Symbol("s1");'); }); it('ignores symbols with description', () => { expectTransform('s1 = Symbol("a");', 's1 = Symbol("a");'); }); it('only works with = operator', () => { expectNoTransform('s1 += Symbol();'); }); }); describe('variable declaration', () => { it('assigns identifier as description', () => { expectTransform('var s1 = Symbol();', 'var s1 = Symbol("s1");'); expectTransform( 'var s1 = Symbol(), s2 = Symbol();', 'var s1 = Symbol("s1"),\n s2 = Symbol("s2");' ); }); it('ignores symbols with description', () => { expectTransform('var s1 = Symbol("a");', 'var s1 = Symbol("a");'); }); }); it('prepends description with filename', () => { const { code } = babel.transformFileSync( path.join(__dirname, 'fixtures', 'TodoActions.js'), { plugins: [plugin] } ); expect(code).toBe( '"use strict";\n\nvar CREATE = Symbol("TodoActions.CREATE");' ); }); });
/*! * OOUI v0.44.0 * https://www.mediawiki.org/wiki/OOUI * * Copyright 2011–2022 OOUI Team and other contributors. * Released under the MIT license * http://oojs.mit-license.org * * Date: 2022-05-17T17:50:55Z */ ( function ( OO ) { 'use strict'; /** * Toolbars are complex interface components that permit users to easily access a variety * of {@link OO.ui.Tool tools} (e.g., formatting commands) and actions, which are additional * commands that are part of the toolbar, but not configured as tools. * * Individual tools are customized and then registered with a * {@link OO.ui.ToolFactory tool factory}, which creates the tools on demand. Each tool has a * symbolic name (used when registering the tool), a title (e.g., ‘Insert image’), and an icon. * * Individual tools are organized in {@link OO.ui.ToolGroup toolgroups}, which can be * {@link OO.ui.MenuToolGroup menus} of tools, {@link OO.ui.ListToolGroup lists} of tools, or a * single {@link OO.ui.BarToolGroup bar} of tools. The arrangement and order of the toolgroups is * customized when the toolbar is set up. Tools can be presented in any order, but each can only * appear once in the toolbar. * * The toolbar can be synchronized with the state of the external "application", like a text * editor's editing area, marking tools as active/inactive (e.g. a 'bold' tool would be shown as * active when the text cursor was inside bolded text) or enabled/disabled (e.g. a table caption * tool would be disabled while the user is not editing a table). A state change is signalled by * emitting the {@link #event-updateState 'updateState' event}, which calls Tools' * {@link OO.ui.Tool#onUpdateState onUpdateState method}. * * The following is an example of a basic toolbar. * * @example * // Example of a toolbar * // Create the toolbar * var toolFactory = new OO.ui.ToolFactory(); * var toolGroupFactory = new OO.ui.ToolGroupFactory(); * var toolbar = new OO.ui.Toolbar( toolFactory, toolGroupFactory ); * * // We will be placing status text in this element when tools are used * var $area = $( '<p>' ).text( 'Toolbar example' ); * * // Define the tools that we're going to place in our toolbar * * // Create a class inheriting from OO.ui.Tool * function SearchTool() { * SearchTool.super.apply( this, arguments ); * } * OO.inheritClass( SearchTool, OO.ui.Tool ); * // Each tool must have a 'name' (used as an internal identifier, see later) and at least one * // of 'icon' and 'title' (displayed icon and text). * SearchTool.static.name = 'search'; * SearchTool.static.icon = 'search'; * SearchTool.static.title = 'Search...'; * // Defines the action that will happen when this tool is selected (clicked). * SearchTool.prototype.onSelect = function () { * $area.text( 'Search tool clicked!' ); * // Never display this tool as "active" (selected). * this.setActive( false ); * }; * SearchTool.prototype.onUpdateState = function () {}; * // Make this tool available in our toolFactory and thus our toolbar * toolFactory.register( SearchTool ); * * // Register two more tools, nothing interesting here * function SettingsTool() { * SettingsTool.super.apply( this, arguments ); * } * OO.inheritClass( SettingsTool, OO.ui.Tool ); * SettingsTool.static.name = 'settings'; * SettingsTool.static.icon = 'settings'; * SettingsTool.static.title = 'Change settings'; * SettingsTool.prototype.onSelect = function () { * $area.text( 'Settings tool clicked!' ); * this.setActive( false ); * }; * SettingsTool.prototype.onUpdateState = function () {}; * toolFactory.register( SettingsTool ); * * // Register two more tools, nothing interesting here * function StuffTool() { * StuffTool.super.apply( this, arguments ); * } * OO.inheritClass( StuffTool, OO.ui.Tool ); * StuffTool.static.name = 'stuff'; * StuffTool.static.icon = 'ellipsis'; * StuffTool.static.title = 'More stuff'; * StuffTool.prototype.onSelect = function () { * $area.text( 'More stuff tool clicked!' ); * this.setActive( false ); * }; * StuffTool.prototype.onUpdateState = function () {}; * toolFactory.register( StuffTool ); * * // This is a PopupTool. Rather than having a custom 'onSelect' action, it will display a * // little popup window (a PopupWidget). * function HelpTool( toolGroup, config ) { * OO.ui.PopupTool.call( this, toolGroup, $.extend( { popup: { * padded: true, * label: 'Help', * head: true * } }, config ) ); * this.popup.$body.append( '<p>I am helpful!</p>' ); * } * OO.inheritClass( HelpTool, OO.ui.PopupTool ); * HelpTool.static.name = 'help'; * HelpTool.static.icon = 'help'; * HelpTool.static.title = 'Help'; * toolFactory.register( HelpTool ); * * // Finally define which tools and in what order appear in the toolbar. Each tool may only be * // used once (but not all defined tools must be used). * toolbar.setup( [ * { * // 'bar' tool groups display tools' icons only, side-by-side. * type: 'bar', * include: [ 'search', 'help' ] * }, * { * // 'list' tool groups display both the titles and icons, in a dropdown list. * type: 'list', * indicator: 'down', * label: 'More', * include: [ 'settings', 'stuff' ] * } * // Note how the tools themselves are toolgroup-agnostic - the same tool can be displayed * // either in a 'list' or a 'bar'. There is a 'menu' tool group too, not showcased here, * // since it's more complicated to use. (See the next example snippet on this page.) * ] ); * * // Create some UI around the toolbar and place it in the document * var frame = new OO.ui.PanelLayout( { * expanded: false, * framed: true * } ); * var contentFrame = new OO.ui.PanelLayout( { * expanded: false, * padded: true * } ); * frame.$element.append( * toolbar.$element, * contentFrame.$element.append( $area ) * ); * $( document.body ).append( frame.$element ); * * // Here is where the toolbar is actually built. This must be done after inserting it into the * // document. * toolbar.initialize(); * toolbar.emit( 'updateState' ); * * The following example extends the previous one to illustrate 'menu' toolgroups and the usage of * {@link #event-updateState 'updateState' event}. * * @example * // Create the toolbar * var toolFactory = new OO.ui.ToolFactory(); * var toolGroupFactory = new OO.ui.ToolGroupFactory(); * var toolbar = new OO.ui.Toolbar( toolFactory, toolGroupFactory ); * * // We will be placing status text in this element when tools are used * var $area = $( '<p>' ).text( 'Toolbar example' ); * * // Define the tools that we're going to place in our toolbar * * // Create a class inheriting from OO.ui.Tool * function SearchTool() { * SearchTool.super.apply( this, arguments ); * } * OO.inheritClass( SearchTool, OO.ui.Tool ); * // Each tool must have a 'name' (used as an internal identifier, see later) and at least one * // of 'icon' and 'title' (displayed icon and text). * SearchTool.static.name = 'search'; * SearchTool.static.icon = 'search'; * SearchTool.static.title = 'Search...'; * // Defines the action that will happen when this tool is selected (clicked). * SearchTool.prototype.onSelect = function () { * $area.text( 'Search tool clicked!' ); * // Never display this tool as "active" (selected). * this.setActive( false ); * }; * SearchTool.prototype.onUpdateState = function () {}; * // Make this tool available in our toolFactory and thus our toolbar * toolFactory.register( SearchTool ); * * // Register two more tools, nothing interesting here * function SettingsTool() { * SettingsTool.super.apply( this, arguments ); * this.reallyActive = false; * } * OO.inheritClass( SettingsTool, OO.ui.Tool ); * SettingsTool.static.name = 'settings'; * SettingsTool.static.icon = 'settings'; * SettingsTool.static.title = 'Change settings'; * SettingsTool.prototype.onSelect = function () { * $area.text( 'Settings tool clicked!' ); * // Toggle the active state on each click * this.reallyActive = !this.reallyActive; * this.setActive( this.reallyActive ); * // To update the menu label * this.toolbar.emit( 'updateState' ); * }; * SettingsTool.prototype.onUpdateState = function () {}; * toolFactory.register( SettingsTool ); * * // Register two more tools, nothing interesting here * function StuffTool() { * StuffTool.super.apply( this, arguments ); * this.reallyActive = false; * } * OO.inheritClass( StuffTool, OO.ui.Tool ); * StuffTool.static.name = 'stuff'; * StuffTool.static.icon = 'ellipsis'; * StuffTool.static.title = 'More stuff'; * StuffTool.prototype.onSelect = function () { * $area.text( 'More stuff tool clicked!' ); * // Toggle the active state on each click * this.reallyActive = !this.reallyActive; * this.setActive( this.reallyActive ); * // To update the menu label * this.toolbar.emit( 'updateState' ); * }; * StuffTool.prototype.onUpdateState = function () {}; * toolFactory.register( StuffTool ); * * // This is a PopupTool. Rather than having a custom 'onSelect' action, it will display a * // little popup window (a PopupWidget). 'onUpdateState' is also already implemented. * function HelpTool( toolGroup, config ) { * OO.ui.PopupTool.call( this, toolGroup, $.extend( { popup: { * padded: true, * label: 'Help', * head: true * } }, config ) ); * this.popup.$body.append( '<p>I am helpful!</p>' ); * } * OO.inheritClass( HelpTool, OO.ui.PopupTool ); * HelpTool.static.name = 'help'; * HelpTool.static.icon = 'help'; * HelpTool.static.title = 'Help'; * toolFactory.register( HelpTool ); * * // Finally define which tools and in what order appear in the toolbar. Each tool may only be * // used once (but not all defined tools must be used). * toolbar.setup( [ * { * // 'bar' tool groups display tools' icons only, side-by-side. * type: 'bar', * include: [ 'search', 'help' ] * }, * { * // 'menu' tool groups display both the titles and icons, in a dropdown menu. * // Menu label indicates which items are selected. * type: 'menu', * indicator: 'down', * include: [ 'settings', 'stuff' ] * } * ] ); * * // Create some UI around the toolbar and place it in the document * var frame = new OO.ui.PanelLayout( { * expanded: false, * framed: true * } ); * var contentFrame = new OO.ui.PanelLayout( { * expanded: false, * padded: true * } ); * frame.$element.append( * toolbar.$element, * contentFrame.$element.append( $area ) * ); * $( document.body ).append( frame.$element ); * * // Here is where the toolbar is actually built. This must be done after inserting it into the * // document. * toolbar.initialize(); * toolbar.emit( 'updateState' ); * * @class * @extends OO.ui.Element * @mixins OO.EventEmitter * @mixins OO.ui.mixin.GroupElement * * @constructor * @param {OO.ui.ToolFactory} toolFactory Factory for creating tools * @param {OO.ui.ToolGroupFactory} toolGroupFactory Factory for creating toolgroups * @param {Object} [config] Configuration options * @cfg {boolean} [actions] Add an actions section to the toolbar. Actions are commands that are * included in the toolbar, but are not configured as tools. By default, actions are displayed on * the right side of the toolbar. * @cfg {string} [position='top'] Whether the toolbar is positioned above ('top') or below * ('bottom') content. * @cfg {jQuery} [$overlay] An overlay for the popup. * See <https://www.mediawiki.org/wiki/OOUI/Concepts#Overlays>. */ OO.ui.Toolbar = function OoUiToolbar( toolFactory, toolGroupFactory, config ) { // Allow passing positional parameters inside the config object if ( OO.isPlainObject( toolFactory ) && config === undefined ) { config = toolFactory; toolFactory = config.toolFactory; toolGroupFactory = config.toolGroupFactory; } // Configuration initialization config = config || {}; // Parent constructor OO.ui.Toolbar.super.call( this, config ); // Mixin constructors OO.EventEmitter.call( this ); OO.ui.mixin.GroupElement.call( this, config ); // Properties this.toolFactory = toolFactory; this.toolGroupFactory = toolGroupFactory; this.groupsByName = {}; this.activeToolGroups = 0; this.tools = {}; this.position = config.position || 'top'; this.$bar = $( '<div>' ); this.$actions = $( '<div>' ); this.$popups = $( '<div>' ); this.initialized = false; this.narrowThreshold = null; this.onWindowResizeHandler = this.onWindowResize.bind( this ); this.$overlay = ( config.$overlay === true ? OO.ui.getDefaultOverlay() : config.$overlay ) || this.$element; // Events this.$element .add( this.$bar ).add( this.$group ).add( this.$actions ) .on( 'mousedown keydown', this.onPointerDown.bind( this ) ); // Initialization this.$group.addClass( 'oo-ui-toolbar-tools' ); if ( config.actions ) { this.$bar.append( this.$actions.addClass( 'oo-ui-toolbar-actions' ) ); } this.$popups.addClass( 'oo-ui-toolbar-popups' ); this.$bar .addClass( 'oo-ui-toolbar-bar' ) .append( this.$group, '<div style="clear:both"></div>' ); // Possible classes: oo-ui-toolbar-position-top, oo-ui-toolbar-position-bottom this.$element .addClass( 'oo-ui-toolbar oo-ui-toolbar-position-' + this.position ) .append( this.$bar ); this.$overlay.append( this.$popups ); }; /* Setup */ OO.inheritClass( OO.ui.Toolbar, OO.ui.Element ); OO.mixinClass( OO.ui.Toolbar, OO.EventEmitter ); OO.mixinClass( OO.ui.Toolbar, OO.ui.mixin.GroupElement ); /* Events */ /** * @event updateState * * An 'updateState' event must be emitted on the Toolbar (by calling * `toolbar.emit( 'updateState' )`) every time the state of the application using the toolbar * changes, and an update to the state of tools is required. * * @param {...Mixed} data Application-defined parameters */ /** * @event active * * An 'active' event is emitted when the number of active toolgroups increases from 0, or * returns to 0. * * @param {boolean} There are active toolgroups in this toolbar */ /* Methods */ /** * Get the tool factory. * * @return {OO.ui.ToolFactory} Tool factory */ OO.ui.Toolbar.prototype.getToolFactory = function () { return this.toolFactory; }; /** * Get the toolgroup factory. * * @return {OO.Factory} Toolgroup factory */ OO.ui.Toolbar.prototype.getToolGroupFactory = function () { return this.toolGroupFactory; }; /** * Handles mouse down events. * * @private * @param {jQuery.Event} e Mouse down event * @return {undefined|boolean} False to prevent default if event is handled */ OO.ui.Toolbar.prototype.onPointerDown = function ( e ) { var $closestWidgetToEvent = $( e.target ).closest( '.oo-ui-widget' ), $closestWidgetToToolbar = this.$element.closest( '.oo-ui-widget' ); if ( !$closestWidgetToEvent.length || $closestWidgetToEvent[ 0 ] === $closestWidgetToToolbar[ 0 ] ) { return false; } }; /** * Handle window resize event. * * @private * @param {jQuery.Event} e Window resize event */ OO.ui.Toolbar.prototype.onWindowResize = function () { this.$element.add( this.$popups ).toggleClass( 'oo-ui-toolbar-narrow', this.$bar[ 0 ].clientWidth <= this.getNarrowThreshold() ); }; /** * Get the (lazily-computed) width threshold for applying the oo-ui-toolbar-narrow * class. * * @private * @return {number} Width threshold in pixels */ OO.ui.Toolbar.prototype.getNarrowThreshold = function () { if ( this.narrowThreshold === null ) { this.narrowThreshold = this.$group[ 0 ].offsetWidth + this.$actions[ 0 ].offsetWidth; } return this.narrowThreshold; }; /** * Sets up handles and preloads required information for the toolbar to work. * This must be called after it is attached to a visible document and before doing anything else. */ OO.ui.Toolbar.prototype.initialize = function () { if ( !this.initialized ) { this.initialized = true; $( this.getElementWindow() ).on( 'resize', this.onWindowResizeHandler ); this.onWindowResize(); } }; /** * Set up the toolbar. * * The toolbar is set up with a list of toolgroup configurations that specify the type of * toolgroup ({@link OO.ui.BarToolGroup bar}, {@link OO.ui.MenuToolGroup menu}, or * {@link OO.ui.ListToolGroup list}) to add and which tools to include, exclude, promote, or demote * within that toolgroup. Please see {@link OO.ui.ToolGroup toolgroups} for more information about * including tools in toolgroups. * * @param {Object[]} groups List of toolgroup configurations * @param {string} groups.name Symbolic name for this toolgroup * @param {string} [groups.type] Toolgroup type, e.g. "bar", "list", or "menu". Should exist in the * {@link OO.ui.ToolGroupFactory} provided via the constructor. Defaults to "list" for catch-all * groups where `include='*'`, otherwise "bar". * @param {Array|string} [groups.include] Tools to include in the toolgroup, or "*" for catch-all, * see {@link OO.ui.ToolFactory#extract} * @param {Array|string} [groups.exclude] Tools to exclude from the toolgroup * @param {Array|string} [groups.promote] Tools to promote to the beginning of the toolgroup * @param {Array|string} [groups.demote] Tools to demote to the end of the toolgroup */ OO.ui.Toolbar.prototype.setup = function ( groups ) { var i, len, type, toolGroup, groupConfig, items = [], defaultType = 'bar'; // Cleanup previous groups this.reset(); // Build out new groups for ( i = 0, len = groups.length; i < len; i++ ) { groupConfig = groups[ i ]; if ( groupConfig.include === '*' ) { // Apply defaults to catch-all groups if ( groupConfig.type === undefined ) { groupConfig.type = 'list'; } if ( groupConfig.label === undefined ) { groupConfig.label = OO.ui.msg( 'ooui-toolbar-more' ); } } // Check type has been registered type = this.getToolGroupFactory().lookup( groupConfig.type ) ? groupConfig.type : defaultType; toolGroup = this.getToolGroupFactory().create( type, this, groupConfig ); items.push( toolGroup ); this.groupsByName[ groupConfig.name ] = toolGroup; toolGroup.connect( this, { active: 'onToolGroupActive' } ); } this.addItems( items ); }; /** * Handle active events from tool groups * * @param {boolean} active Tool group has become active, inactive if false * @fires active */ OO.ui.Toolbar.prototype.onToolGroupActive = function ( active ) { if ( active ) { this.activeToolGroups++; if ( this.activeToolGroups === 1 ) { this.emit( 'active', true ); } } else { this.activeToolGroups--; if ( this.activeToolGroups === 0 ) { this.emit( 'active', false ); } } }; /** * Get a toolgroup by name * * @param {string} name Group name * @return {OO.ui.ToolGroup|null} Tool group, or null if none found by that name */ OO.ui.Toolbar.prototype.getToolGroupByName = function ( name ) { return this.groupsByName[ name ] || null; }; /** * Remove all tools and toolgroups from the toolbar. */ OO.ui.Toolbar.prototype.reset = function () { var i, len; this.groupsByName = {}; this.tools = {}; for ( i = 0, len = this.items.length; i < len; i++ ) { this.items[ i ].destroy(); } this.clearItems(); }; /** * Destroy the toolbar. * * Destroying the toolbar removes all event handlers and DOM elements that constitute the toolbar. * Call this method whenever you are done using a toolbar. */ OO.ui.Toolbar.prototype.destroy = function () { $( this.getElementWindow() ).off( 'resize', this.onWindowResizeHandler ); this.reset(); this.$element.remove(); }; /** * Check if the tool is available. * * Available tools are ones that have not yet been added to the toolbar. * * @param {string} name Symbolic name of tool * @return {boolean} Tool is available */ OO.ui.Toolbar.prototype.isToolAvailable = function ( name ) { return !this.tools[ name ]; }; /** * Prevent tool from being used again. * * @param {OO.ui.Tool} tool Tool to reserve */ OO.ui.Toolbar.prototype.reserveTool = function ( tool ) { this.tools[ tool.getName() ] = tool; }; /** * Allow tool to be used again. * * @param {OO.ui.Tool} tool Tool to release */ OO.ui.Toolbar.prototype.releaseTool = function ( tool ) { delete this.tools[ tool.getName() ]; }; /** * Get accelerator label for tool. * * The OOUI library does not contain an accelerator system, but this is the hook for one. To * use an accelerator system, subclass the toolbar and override this method, which is meant to * return a label that describes the accelerator keys for the tool passed (by symbolic name) to * the method. * * @param {string} name Symbolic name of tool * @return {string|undefined} Tool accelerator label if available */ OO.ui.Toolbar.prototype.getToolAccelerator = function () { return undefined; }; /** * Tools, together with {@link OO.ui.ToolGroup toolgroups}, constitute * {@link OO.ui.Toolbar toolbars}. * Each tool is configured with a static name, title, and icon and is customized with the command * to carry out when the tool is selected. Tools must also be registered with a * {@link OO.ui.ToolFactory tool factory}, which creates the tools on demand. * * Every Tool subclass must implement two methods: * * - {@link #onUpdateState} * - {@link #onSelect} * * Tools are added to toolgroups ({@link OO.ui.ListToolGroup ListToolGroup}, * {@link OO.ui.BarToolGroup BarToolGroup}, or {@link OO.ui.MenuToolGroup MenuToolGroup}), which * determine how the tool is displayed in the toolbar. See {@link OO.ui.Toolbar toolbars} for an * example. * * For more information, please see the [OOUI documentation on MediaWiki][1]. * [1]: https://www.mediawiki.org/wiki/OOUI/Toolbars * * @abstract * @class * @extends OO.ui.Widget * @mixins OO.ui.mixin.IconElement * @mixins OO.ui.mixin.FlaggedElement * @mixins OO.ui.mixin.TabIndexedElement * * @constructor * @param {OO.ui.ToolGroup} toolGroup * @param {Object} [config] Configuration options * @cfg {string|Function} [title] Title text or a function that returns text. If this config is * omitted, the value of the {@link #static-title static title} property is used. * * The title is used in different ways depending on the type of toolgroup that contains the tool. * The title is used as a tooltip if the tool is part of a {@link OO.ui.BarToolGroup bar} * toolgroup, or as the label text if the tool is part of a {@link OO.ui.ListToolGroup list} or * {@link OO.ui.MenuToolGroup menu} toolgroup. * * For bar toolgroups, a description of the accelerator key is appended to the title if an * accelerator key is associated with an action by the same name as the tool and accelerator * functionality has been added to the application. * To add accelerator key functionality, you must subclass OO.ui.Toolbar and override the * {@link OO.ui.Toolbar#getToolAccelerator getToolAccelerator} method. */ OO.ui.Tool = function OoUiTool( toolGroup, config ) { // Allow passing positional parameters inside the config object if ( OO.isPlainObject( toolGroup ) && config === undefined ) { config = toolGroup; toolGroup = config.toolGroup; } // Configuration initialization config = config || {}; // Parent constructor OO.ui.Tool.super.call( this, config ); // Properties this.toolGroup = toolGroup; this.toolbar = this.toolGroup.getToolbar(); this.active = false; this.$title = $( '<span>' ); this.$accel = $( '<span>' ); this.$link = $( '<a>' ); this.title = null; this.checkIcon = new OO.ui.IconWidget( { icon: 'check', classes: [ 'oo-ui-tool-checkIcon' ] } ); // Mixin constructors OO.ui.mixin.IconElement.call( this, config ); OO.ui.mixin.FlaggedElement.call( this, config ); OO.ui.mixin.TabIndexedElement.call( this, $.extend( { $tabIndexed: this.$link }, config ) ); // Events this.toolbar.connect( this, { updateState: 'onUpdateState' } ); // Initialization this.$title.addClass( 'oo-ui-tool-title' ); this.$accel .addClass( 'oo-ui-tool-accel' ) .prop( { // This may need to be changed if the key names are ever localized, // but for now they are essentially written in English dir: 'ltr', lang: 'en' } ); this.$link .addClass( 'oo-ui-tool-link' ) .append( this.checkIcon.$element, this.$icon, this.$title, this.$accel ) .attr( 'role', 'button' ); // Don't show keyboard shortcuts on mobile as users are unlikely to have // a physical keyboard, and likely to have limited screen space. if ( !OO.ui.isMobile() ) { this.$link.append( this.$accel ); } this.$element .data( 'oo-ui-tool', this ) .addClass( 'oo-ui-tool' ) .addClass( 'oo-ui-tool-name-' + this.constructor.static.name.replace( /^([^/]+)\/([^/]+).*$/, '$1-$2' ) ) .toggleClass( 'oo-ui-tool-with-label', this.constructor.static.displayBothIconAndLabel ) .append( this.$link ); this.setTitle( config.title || this.constructor.static.title ); }; /* Setup */ OO.inheritClass( OO.ui.Tool, OO.ui.Widget ); OO.mixinClass( OO.ui.Tool, OO.ui.mixin.IconElement ); OO.mixinClass( OO.ui.Tool, OO.ui.mixin.FlaggedElement ); OO.mixinClass( OO.ui.Tool, OO.ui.mixin.TabIndexedElement ); /* Static Properties */ /** * @static * @inheritdoc */ OO.ui.Tool.static.tagName = 'span'; /** * Symbolic name of tool. * * The symbolic name is used internally to register the tool with a * {@link OO.ui.ToolFactory ToolFactory}. It can also be used when adding tools to toolgroups. * * @abstract * @static * @inheritable * @property {string} */ OO.ui.Tool.static.name = ''; /** * Symbolic name of the group. * * The group name is used to associate tools with each other so that they can be selected later by * a {@link OO.ui.ToolGroup toolgroup}. * * @abstract * @static * @inheritable * @property {string} */ OO.ui.Tool.static.group = ''; /** * Tool title text or a function that returns title text. The value of the static property is * overridden if the #title config option is used. * * @abstract * @static * @inheritable * @property {string|Function} */ OO.ui.Tool.static.title = ''; /** * Display both icon and label when the tool is used in a {@link OO.ui.BarToolGroup bar} toolgroup. * Normally only the icon is displayed, or only the label if no icon is given. * * @static * @inheritable * @property {boolean} */ OO.ui.Tool.static.displayBothIconAndLabel = false; /** * Add tool to catch-all groups automatically. * * A catch-all group, which contains all tools that do not currently belong to a toolgroup, * can be included in a toolgroup using the wildcard selector, an asterisk (*). * * @static * @inheritable * @property {boolean} */ OO.ui.Tool.static.autoAddToCatchall = true; /** * Add tool to named groups automatically. * * By default, tools that are configured with a static ‘group’ property are added * to that group and will be selected when the symbolic name of the group is specified (e.g., when * toolgroups include tools by group name). * * @static * @property {boolean} * @inheritable */ OO.ui.Tool.static.autoAddToGroup = true; /** * Check if this tool is compatible with given data. * * This is a stub that can be overridden to provide support for filtering tools based on an * arbitrary piece of information (e.g., where the cursor is in a document). The implementation * must also call this method so that the compatibility check can be performed. * * @static * @inheritable * @param {Mixed} data Data to check * @return {boolean} Tool can be used with data */ OO.ui.Tool.static.isCompatibleWith = function () { return false; }; /* Methods */ /** * Handle the toolbar state being updated. This method is called when the * {@link OO.ui.Toolbar#event-updateState 'updateState' event} is emitted on the * {@link OO.ui.Toolbar Toolbar} that uses this tool, and should set the state of this tool * depending on application state (usually by calling #setDisabled to enable or disable the tool, * or #setActive to mark is as currently in-use or not). * * This is an abstract method that must be overridden in a concrete subclass. * * @method * @protected * @abstract */ OO.ui.Tool.prototype.onUpdateState = null; /** * Handle the tool being selected. This method is called when the user triggers this tool, * usually by clicking on its label/icon. * * This is an abstract method that must be overridden in a concrete subclass. * * @method * @protected * @abstract */ OO.ui.Tool.prototype.onSelect = null; /** * Check if the tool is active. * * Tools become active when their #onSelect or #onUpdateState handlers change them to appear pressed * with the #setActive method. Additional CSS is applied to the tool to reflect the active state. * * @return {boolean} Tool is active */ OO.ui.Tool.prototype.isActive = function () { return this.active; }; /** * Make the tool appear active or inactive. * * This method should be called within #onSelect or #onUpdateState event handlers to make the tool * appear pressed or not. * * @param {boolean} state Make tool appear active */ OO.ui.Tool.prototype.setActive = function ( state ) { this.active = !!state; this.$element.toggleClass( 'oo-ui-tool-active', this.active ); this.updateThemeClasses(); }; /** * Set the tool #title. * * @param {string|Function} title Title text or a function that returns text * @chainable * @return {OO.ui.Tool} The tool, for chaining */ OO.ui.Tool.prototype.setTitle = function ( title ) { this.title = OO.ui.resolveMsg( title ); this.updateTitle(); return this; }; /** * Get the tool #title. * * @return {string} Title text */ OO.ui.Tool.prototype.getTitle = function () { return this.title; }; /** * Get the tool's symbolic name. * * @return {string} Symbolic name of tool */ OO.ui.Tool.prototype.getName = function () { return this.constructor.static.name; }; /** * Update the title. */ OO.ui.Tool.prototype.updateTitle = function () { var titleTooltips = this.toolGroup.constructor.static.titleTooltips, accelTooltips = this.toolGroup.constructor.static.accelTooltips, accel = this.toolbar.getToolAccelerator( this.constructor.static.name ), tooltipParts = []; this.$title.text( this.title ); this.$accel.text( accel ); if ( titleTooltips && typeof this.title === 'string' && this.title.length ) { tooltipParts.push( this.title ); } if ( accelTooltips && typeof accel === 'string' && accel.length ) { tooltipParts.push( accel ); } if ( tooltipParts.length ) { this.$link.attr( 'title', tooltipParts.join( ' ' ) ); } else { this.$link.removeAttr( 'title' ); } }; /** * @inheritdoc OO.ui.mixin.IconElement */ OO.ui.Tool.prototype.setIcon = function ( icon ) { // Mixin method OO.ui.mixin.IconElement.prototype.setIcon.call( this, icon ); this.$element.toggleClass( 'oo-ui-tool-with-icon', !!this.icon ); return this; }; /** * Destroy tool. * * Destroying the tool removes all event handlers and the tool’s DOM elements. * Call this method whenever you are done using a tool. */ OO.ui.Tool.prototype.destroy = function () { this.toolbar.disconnect( this ); this.$element.remove(); }; /** * ToolGroups are collections of {@link OO.ui.Tool tools} that are used in a * {@link OO.ui.Toolbar toolbar}. * The type of toolgroup ({@link OO.ui.ListToolGroup list}, {@link OO.ui.BarToolGroup bar}, or * {@link OO.ui.MenuToolGroup menu}) to which a tool belongs determines how the tool is arranged * and displayed in the toolbar. Toolgroups themselves are created on demand with a * {@link OO.ui.ToolGroupFactory toolgroup factory}. * * Toolgroups can contain individual tools, groups of tools, or all available tools, as specified * using the `include` config option. See OO.ui.ToolFactory#extract on documentation of the format. * The options `exclude`, `promote`, and `demote` support the same formats. * * See {@link OO.ui.Toolbar toolbars} for a full example. For more information about toolbars in * general, please see the [OOUI documentation on MediaWiki][1]. * * [1]: https://www.mediawiki.org/wiki/OOUI/Toolbars * * @abstract * @class * @extends OO.ui.Widget * @mixins OO.ui.mixin.GroupElement * * @constructor * @param {OO.ui.Toolbar} toolbar * @param {Object} [config] Configuration options * @cfg {Array|string} [include=[]] List of tools to include in the toolgroup, see above. * @cfg {Array|string} [exclude=[]] List of tools to exclude from the toolgroup, see above. * @cfg {Array|string} [promote=[]] List of tools to promote to the beginning of the toolgroup, * see above. * @cfg {Array|string} [demote=[]] List of tools to demote to the end of the toolgroup, see above. * This setting is particularly useful when tools have been added to the toolgroup * en masse (e.g., via the catch-all selector). */ OO.ui.ToolGroup = function OoUiToolGroup( toolbar, config ) { // Allow passing positional parameters inside the config object if ( OO.isPlainObject( toolbar ) && config === undefined ) { config = toolbar; toolbar = config.toolbar; } // Configuration initialization config = config || {}; // Parent constructor OO.ui.ToolGroup.super.call( this, config ); // Mixin constructors OO.ui.mixin.GroupElement.call( this, config ); // Properties this.toolbar = toolbar; this.tools = {}; this.pressed = null; this.autoDisabled = false; this.include = config.include || []; this.exclude = config.exclude || []; this.promote = config.promote || []; this.demote = config.demote || []; this.onDocumentMouseKeyUpHandler = this.onDocumentMouseKeyUp.bind( this ); // Events this.$group.on( { mousedown: this.onMouseKeyDown.bind( this ), mouseup: this.onMouseKeyUp.bind( this ), keydown: this.onMouseKeyDown.bind( this ), keyup: this.onMouseKeyUp.bind( this ), focus: this.onMouseOverFocus.bind( this ), blur: this.onMouseOutBlur.bind( this ), mouseover: this.onMouseOverFocus.bind( this ), mouseout: this.onMouseOutBlur.bind( this ) } ); this.toolbar.getToolFactory().connect( this, { register: 'onToolFactoryRegister' } ); this.aggregate( { disable: 'itemDisable' } ); this.connect( this, { itemDisable: 'updateDisabled', disable: 'onDisable' } ); // Initialization this.$group.addClass( 'oo-ui-toolGroup-tools' ); this.$element .addClass( 'oo-ui-toolGroup' ) .append( this.$group ); this.onDisable( this.isDisabled() ); this.populate(); }; /* Setup */ OO.inheritClass( OO.ui.ToolGroup, OO.ui.Widget ); OO.mixinClass( OO.ui.ToolGroup, OO.ui.mixin.GroupElement ); /* Events */ /** * @event update */ /** * @event active * * An 'active' event is emitted when any popup is shown/hidden. * * @param {boolean} The popup is visible */ /* Static Properties */ /** * Show labels in tooltips. * * @static * @inheritable * @property {boolean} */ OO.ui.ToolGroup.static.titleTooltips = false; /** * Show acceleration labels in tooltips. * * Note: The OOUI library does not include an accelerator system, but does contain * a hook for one. To use an accelerator system, subclass the {@link OO.ui.Toolbar toolbar} and * override the {@link OO.ui.Toolbar#getToolAccelerator getToolAccelerator} method, which is * meant to return a label that describes the accelerator keys for a given tool (e.g., Control+M * key combination). * * @static * @inheritable * @property {boolean} */ OO.ui.ToolGroup.static.accelTooltips = false; /** * Automatically disable the toolgroup when all tools are disabled * * @static * @inheritable * @property {boolean} */ OO.ui.ToolGroup.static.autoDisable = true; /** * @abstract * @static * @inheritable * @property {string} */ OO.ui.ToolGroup.static.name = null; /* Methods */ /** * @inheritdoc */ OO.ui.ToolGroup.prototype.isDisabled = function () { return this.autoDisabled || OO.ui.ToolGroup.super.prototype.isDisabled.apply( this, arguments ); }; /** * @inheritdoc */ OO.ui.ToolGroup.prototype.updateDisabled = function () { var i, item, allDisabled = true; if ( this.constructor.static.autoDisable ) { for ( i = this.items.length - 1; i >= 0; i-- ) { item = this.items[ i ]; if ( !item.isDisabled() ) { allDisabled = false; break; } } this.autoDisabled = allDisabled; } OO.ui.ToolGroup.super.prototype.updateDisabled.apply( this, arguments ); }; /** * Handle disable events. * * @protected * @param {boolean} isDisabled */ OO.ui.ToolGroup.prototype.onDisable = function ( isDisabled ) { this.$group.toggleClass( 'oo-ui-toolGroup-disabled-tools', isDisabled ); this.$group.toggleClass( 'oo-ui-toolGroup-enabled-tools', !isDisabled ); }; /** * Handle mouse down and key down events. * * @protected * @param {jQuery.Event} e Mouse down or key down event * @return {undefined|boolean} False to prevent default if event is handled */ OO.ui.ToolGroup.prototype.onMouseKeyDown = function ( e ) { if ( !this.isDisabled() && ( e.which === OO.ui.MouseButtons.LEFT || e.which === OO.ui.Keys.SPACE || e.which === OO.ui.Keys.ENTER ) ) { this.pressed = this.findTargetTool( e ); if ( this.pressed ) { this.pressed.setActive( true ); this.getElementDocument().addEventListener( 'mouseup', this.onDocumentMouseKeyUpHandler, true ); this.getElementDocument().addEventListener( 'keyup', this.onDocumentMouseKeyUpHandler, true ); return false; } } }; /** * Handle document mouse up and key up events. * * @protected * @param {MouseEvent|KeyboardEvent} e Mouse up or key up event */ OO.ui.ToolGroup.prototype.onDocumentMouseKeyUp = function ( e ) { this.getElementDocument().removeEventListener( 'mouseup', this.onDocumentMouseKeyUpHandler, true ); this.getElementDocument().removeEventListener( 'keyup', this.onDocumentMouseKeyUpHandler, true ); // onMouseKeyUp may be called a second time, depending on where the mouse is when the button is // released, but since `this.pressed` will no longer be true, the second call will be ignored. this.onMouseKeyUp( e ); }; /** * Handle mouse up and key up events. * * @protected * @param {MouseEvent|KeyboardEvent} e Mouse up or key up event */ OO.ui.ToolGroup.prototype.onMouseKeyUp = function ( e ) { var tool = this.findTargetTool( e ); if ( !this.isDisabled() && this.pressed && this.pressed === tool && ( e.which === OO.ui.MouseButtons.LEFT || e.which === OO.ui.Keys.SPACE || e.which === OO.ui.Keys.ENTER ) ) { this.pressed.onSelect(); this.pressed = null; e.preventDefault(); e.stopPropagation(); } this.pressed = null; }; /** * Handle mouse over and focus events. * * @protected * @param {jQuery.Event} e Mouse over or focus event */ OO.ui.ToolGroup.prototype.onMouseOverFocus = function ( e ) { var tool = this.findTargetTool( e ); if ( this.pressed && this.pressed === tool ) { this.pressed.setActive( true ); } }; /** * Handle mouse out and blur events. * * @protected * @param {jQuery.Event} e Mouse out or blur event */ OO.ui.ToolGroup.prototype.onMouseOutBlur = function ( e ) { var tool = this.findTargetTool( e ); if ( this.pressed && this.pressed === tool ) { this.pressed.setActive( false ); } }; /** * Get the closest tool to a jQuery.Event. * * Only tool links are considered, which prevents other elements in the tool such as popups from * triggering tool group interactions. * * @private * @param {jQuery.Event} e * @return {OO.ui.Tool|null} Tool, `null` if none was found */ OO.ui.ToolGroup.prototype.findTargetTool = function ( e ) { var tool, $item = $( e.target ).closest( '.oo-ui-tool-link' ); if ( $item.length ) { tool = $item.parent().data( 'oo-ui-tool' ); } return tool && !tool.isDisabled() ? tool : null; }; /** * Handle tool registry register events. * * If a tool is registered after the group is created, we must repopulate the list to account for: * * - a tool being added that may be included * - a tool already included being overridden * * @protected * @param {string} name Symbolic name of tool */ OO.ui.ToolGroup.prototype.onToolFactoryRegister = function () { this.populate(); }; /** * Get the toolbar that contains the toolgroup. * * @return {OO.ui.Toolbar} Toolbar that contains the toolgroup */ OO.ui.ToolGroup.prototype.getToolbar = function () { return this.toolbar; }; /** * Add and remove tools based on configuration. */ OO.ui.ToolGroup.prototype.populate = function () { var i, len, name, tool, toolFactory = this.toolbar.getToolFactory(), names = {}, add = [], remove = [], list = this.toolbar.getToolFactory().getTools( this.include, this.exclude, this.promote, this.demote ); // Build a list of needed tools for ( i = 0, len = list.length; i < len; i++ ) { name = list[ i ]; if ( // Tool exists toolFactory.lookup( name ) && // Tool is available or is already in this group ( this.toolbar.isToolAvailable( name ) || this.tools[ name ] ) ) { // Hack to prevent infinite recursion via ToolGroupTool. We need to reserve the tool // before creating it, but we can't call reserveTool() yet because we haven't created // the tool. this.toolbar.tools[ name ] = true; tool = this.tools[ name ]; if ( !tool ) { // Auto-initialize tools on first use this.tools[ name ] = tool = toolFactory.create( name, this ); tool.updateTitle(); } this.toolbar.reserveTool( tool ); add.push( tool ); names[ name ] = true; } } // Remove tools that are no longer needed for ( name in this.tools ) { if ( !names[ name ] ) { this.tools[ name ].destroy(); this.toolbar.releaseTool( this.tools[ name ] ); remove.push( this.tools[ name ] ); delete this.tools[ name ]; } } if ( remove.length ) { this.removeItems( remove ); } // Update emptiness state if ( add.length ) { this.$element.removeClass( 'oo-ui-toolGroup-empty' ); } else { this.$element.addClass( 'oo-ui-toolGroup-empty' ); } // Re-add tools (moving existing ones to new locations) this.addItems( add ); // Disabled state may depend on items this.updateDisabled(); }; /** * Destroy toolgroup. */ OO.ui.ToolGroup.prototype.destroy = function () { var name; this.clearItems(); this.toolbar.getToolFactory().disconnect( this ); for ( name in this.tools ) { this.toolbar.releaseTool( this.tools[ name ] ); this.tools[ name ].disconnect( this ).destroy(); delete this.tools[ name ]; } this.$element.remove(); }; /** * A ToolFactory creates tools on demand. All tools ({@link OO.ui.Tool Tools}, * {@link OO.ui.PopupTool PopupTools}, and {@link OO.ui.ToolGroupTool ToolGroupTools}) must be * registered with a tool factory. Tools are registered by their symbolic name. See * {@link OO.ui.Toolbar toolbars} for an example. * * For more information about toolbars in general, please see the * [OOUI documentation on MediaWiki][1]. * * [1]: https://www.mediawiki.org/wiki/OOUI/Toolbars * * @class * @extends OO.Factory * @constructor */ OO.ui.ToolFactory = function OoUiToolFactory() { // Parent constructor OO.ui.ToolFactory.super.call( this ); }; /* Setup */ OO.inheritClass( OO.ui.ToolFactory, OO.Factory ); /* Methods */ /** * Get tools from the factory. * * @param {Array|string} include Included tools, see #extract for format * @param {Array|string} exclude Excluded tools, see #extract for format * @param {Array|string} promote Promoted tools, see #extract for format * @param {Array|string} demote Demoted tools, see #extract for format * @return {string[]} List of tools */ OO.ui.ToolFactory.prototype.getTools = function ( include, exclude, promote, demote ) { var i, len, included, promoted, demoted, auto = [], used = {}; // Collect included and not excluded tools included = OO.simpleArrayDifference( this.extract( include ), this.extract( exclude ) ); // Promotion promoted = this.extract( promote, used ); demoted = this.extract( demote, used ); // Auto for ( i = 0, len = included.length; i < len; i++ ) { if ( !used[ included[ i ] ] ) { auto.push( included[ i ] ); } } return promoted.concat( auto ).concat( demoted ); }; /** * Get a flat list of names from a list of names or groups. * * Normally, `collection` is an array of tool specifications. Tools can be specified in the * following ways: * * - To include an individual tool, use the symbolic name: `{ name: 'tool-name' }` or `'tool-name'`. * - To include all tools in a group, use the group name: `{ group: 'group-name' }`. (To assign the * tool to a group, use OO.ui.Tool.static.group.) * * Alternatively, to include all tools that are not yet assigned to any other toolgroup, use the * catch-all selector `'*'`. * * If `used` is passed, tool names that appear as properties in this object will be considered * already assigned, and will not be returned even if specified otherwise. The tool names extracted * by this function call will be added as new properties in the object. * * @private * @param {Array|string} collection List of tools, see above * @param {Object.<string,boolean>} [used] Object containing information about used tools, see above * @return {string[]} List of extracted tool names */ OO.ui.ToolFactory.prototype.extract = function ( collection, used ) { var i, len, item, name, tool, names = []; collection = !Array.isArray( collection ) ? [ collection ] : collection; for ( i = 0, len = collection.length; i < len; i++ ) { item = collection[ i ]; if ( item === '*' ) { for ( name in this.registry ) { tool = this.registry[ name ]; if ( // Only add tools by group name when auto-add is enabled tool.static.autoAddToCatchall && // Exclude already used tools ( !used || !used[ name ] ) ) { names.push( name ); if ( used ) { used[ name ] = true; } } } } else { // Allow plain strings as shorthand for named tools if ( typeof item === 'string' ) { item = { name: item }; } if ( OO.isPlainObject( item ) ) { if ( item.group ) { for ( name in this.registry ) { tool = this.registry[ name ]; if ( // Include tools with matching group tool.static.group === item.group && // Only add tools by group name when auto-add is enabled tool.static.autoAddToGroup && // Exclude already used tools ( !used || !used[ name ] ) ) { names.push( name ); if ( used ) { used[ name ] = true; } } } // Include tools with matching name and exclude already used tools } else if ( item.name && ( !used || !used[ item.name ] ) ) { names.push( item.name ); if ( used ) { used[ item.name ] = true; } } } } } return names; }; /** * ToolGroupFactories create {@link OO.ui.ToolGroup toolgroups} on demand. The toolgroup classes * must specify a symbolic name and be registered with the factory. The following classes are * registered by default: * * - {@link OO.ui.BarToolGroup BarToolGroups} (‘bar’) * - {@link OO.ui.MenuToolGroup MenuToolGroups} (‘menu’) * - {@link OO.ui.ListToolGroup ListToolGroups} (‘list’) * * See {@link OO.ui.Toolbar toolbars} for an example. * * For more information about toolbars in general, please see the * [OOUI documentation on MediaWiki][1]. * * [1]: https://www.mediawiki.org/wiki/OOUI/Toolbars * * @class * @extends OO.Factory * @constructor */ OO.ui.ToolGroupFactory = function OoUiToolGroupFactory() { var i, l, defaultClasses; // Parent constructor OO.Factory.call( this ); defaultClasses = this.constructor.static.getDefaultClasses(); // Register default toolgroups for ( i = 0, l = defaultClasses.length; i < l; i++ ) { this.register( defaultClasses[ i ] ); } }; /* Setup */ OO.inheritClass( OO.ui.ToolGroupFactory, OO.Factory ); /* Static Methods */ /** * Get a default set of classes to be registered on construction. * * @return {Function[]} Default classes */ OO.ui.ToolGroupFactory.static.getDefaultClasses = function () { return [ OO.ui.BarToolGroup, OO.ui.ListToolGroup, OO.ui.MenuToolGroup ]; }; /** * Popup tools open a popup window when they are selected from the {@link OO.ui.Toolbar toolbar}. * Each popup tool is configured with a static name, title, and icon, as well with as any popup * configurations. Unlike other tools, popup tools do not require that developers specify an * #onSelect or #onUpdateState method, as these methods have been implemented already. * * // Example of a popup tool. When selected, a popup tool displays * // a popup window. * function HelpTool( toolGroup, config ) { * OO.ui.PopupTool.call( this, toolGroup, $.extend( { popup: { * padded: true, * label: 'Help', * head: true * } }, config ) ); * this.popup.$body.append( '<p>I am helpful!</p>' ); * }; * OO.inheritClass( HelpTool, OO.ui.PopupTool ); * HelpTool.static.name = 'help'; * HelpTool.static.icon = 'help'; * HelpTool.static.title = 'Help'; * toolFactory.register( HelpTool ); * * For an example of a toolbar that contains a popup tool, see {@link OO.ui.Toolbar toolbars}. * For more information about toolbars in general, please see the * [OOUI documentation on MediaWiki][1]. * * [1]: https://www.mediawiki.org/wiki/OOUI/Toolbars * * @abstract * @class * @extends OO.ui.Tool * @mixins OO.ui.mixin.PopupElement * * @constructor * @param {OO.ui.ToolGroup} toolGroup * @param {Object} [config] Configuration options */ OO.ui.PopupTool = function OoUiPopupTool( toolGroup, config ) { // Allow passing positional parameters inside the config object if ( OO.isPlainObject( toolGroup ) && config === undefined ) { config = toolGroup; toolGroup = config.toolGroup; } // Parent constructor OO.ui.PopupTool.super.call( this, toolGroup, config ); // Mixin constructors OO.ui.mixin.PopupElement.call( this, config ); // Events this.popup.connect( this, { toggle: 'onPopupToggle' } ); // Initialization this.popup.setAutoFlip( false ); this.popup.setPosition( toolGroup.getToolbar().position === 'bottom' ? 'above' : 'below' ); this.$element.addClass( 'oo-ui-popupTool' ); this.popup.$element.addClass( 'oo-ui-popupTool-popup' ); this.toolbar.$popups.append( this.popup.$element ); }; /* Setup */ OO.inheritClass( OO.ui.PopupTool, OO.ui.Tool ); OO.mixinClass( OO.ui.PopupTool, OO.ui.mixin.PopupElement ); /* Methods */ /** * Handle the tool being selected. * * @inheritdoc */ OO.ui.PopupTool.prototype.onSelect = function () { if ( !this.isDisabled() ) { this.popup.toggle(); } return false; }; /** * Handle the toolbar state being updated. * * @inheritdoc */ OO.ui.PopupTool.prototype.onUpdateState = function () { }; /** * Handle popup visibility being toggled. * * @param {boolean} isVisible */ OO.ui.PopupTool.prototype.onPopupToggle = function ( isVisible ) { this.setActive( isVisible ); this.toolGroup.emit( 'active', isVisible ); }; /** * A ToolGroupTool is a special sort of tool that can contain other {@link OO.ui.Tool tools} * and {@link OO.ui.ToolGroup toolgroups}. The ToolGroupTool was specifically designed to be used * inside a {@link OO.ui.BarToolGroup bar} toolgroup to provide access to additional tools from * the bar item. Included tools will be displayed in a dropdown {@link OO.ui.ListToolGroup list} * when the ToolGroupTool is selected. * * // Example: ToolGroupTool with two nested tools, 'setting1' and 'setting2', * // defined elsewhere. * * function SettingsTool() { * SettingsTool.super.apply( this, arguments ); * }; * OO.inheritClass( SettingsTool, OO.ui.ToolGroupTool ); * SettingsTool.static.name = 'settings'; * SettingsTool.static.title = 'Change settings'; * SettingsTool.static.groupConfig = { * icon: 'settings', * label: 'ToolGroupTool', * include: [ 'setting1', 'setting2' ] * }; * toolFactory.register( SettingsTool ); * * For more information, please see the [OOUI documentation on MediaWiki][1]. * * Please note that this implementation is subject to change per [T74159] [2]. * * [1]: https://www.mediawiki.org/wiki/OOUI/Toolbars#ToolGroupTool * [2]: https://phabricator.wikimedia.org/T74159 * * @abstract * @class * @extends OO.ui.Tool * * @constructor * @param {OO.ui.ToolGroup} toolGroup * @param {Object} [config] Configuration options */ OO.ui.ToolGroupTool = function OoUiToolGroupTool( toolGroup, config ) { // Allow passing positional parameters inside the config object if ( OO.isPlainObject( toolGroup ) && config === undefined ) { config = toolGroup; toolGroup = config.toolGroup; } // Parent constructor OO.ui.ToolGroupTool.super.call( this, toolGroup, config ); // Properties this.innerToolGroup = this.createGroup( this.constructor.static.groupConfig ); // Events this.innerToolGroup.connect( this, { disable: 'onToolGroupDisable', // Re-emit active events from the innerToolGroup on the parent toolGroup active: this.toolGroup.emit.bind( this.toolGroup, 'active' ) } ); // Initialization this.$link.remove(); this.$element .addClass( 'oo-ui-toolGroupTool' ) .append( this.innerToolGroup.$element ); }; /* Setup */ OO.inheritClass( OO.ui.ToolGroupTool, OO.ui.Tool ); /* Static Properties */ /** * Toolgroup configuration. * * The toolgroup configuration consists of the tools to include, as well as an icon and label * to use for the bar item. Tools can be included by symbolic name, group, or with the * wildcard selector. Please see {@link OO.ui.ToolGroup toolgroup} for more information. * * @property {Object.<string,Array>} */ OO.ui.ToolGroupTool.static.groupConfig = {}; /* Methods */ /** * Handle the tool being selected. * * @inheritdoc */ OO.ui.ToolGroupTool.prototype.onSelect = function () { this.innerToolGroup.setActive( !this.innerToolGroup.active ); return false; }; /** * Synchronize disabledness state of the tool with the inner toolgroup. * * @private * @param {boolean} disabled Element is disabled */ OO.ui.ToolGroupTool.prototype.onToolGroupDisable = function ( disabled ) { this.setDisabled( disabled ); }; /** * Handle the toolbar state being updated. * * @inheritdoc */ OO.ui.ToolGroupTool.prototype.onUpdateState = function () { this.setActive( false ); }; /** * Build a {@link OO.ui.ToolGroup toolgroup} from the specified configuration. * * @param {Object.<string,Array>} group Toolgroup configuration. Please see * {@link OO.ui.ToolGroup toolgroup} for more information. * @return {OO.ui.ListToolGroup} */ OO.ui.ToolGroupTool.prototype.createGroup = function ( group ) { if ( group.include === '*' ) { // Apply defaults to catch-all groups if ( group.label === undefined ) { group.label = OO.ui.msg( 'ooui-toolbar-more' ); } } return this.toolbar.getToolGroupFactory().create( 'list', this.toolbar, group ); }; /** * BarToolGroups are one of three types of {@link OO.ui.ToolGroup toolgroups} that are used to * create {@link OO.ui.Toolbar toolbars} (the other types of groups are * {@link OO.ui.MenuToolGroup MenuToolGroup} and {@link OO.ui.ListToolGroup ListToolGroup}). * The {@link OO.ui.Tool tools} in a BarToolGroup are displayed by icon in a single row. The * title of the tool is displayed when users move the mouse over the tool. * * BarToolGroups are created by a {@link OO.ui.ToolGroupFactory tool group factory} when the toolbar * is set up. * * @example * // Example of a BarToolGroup with two tools * var toolFactory = new OO.ui.ToolFactory(); * var toolGroupFactory = new OO.ui.ToolGroupFactory(); * var toolbar = new OO.ui.Toolbar( toolFactory, toolGroupFactory ); * * // We will be placing status text in this element when tools are used * var $area = $( '<p>' ).text( 'Example of a BarToolGroup with two tools.' ); * * // Define the tools that we're going to place in our toolbar * * // Create a class inheriting from OO.ui.Tool * function SearchTool() { * SearchTool.super.apply( this, arguments ); * } * OO.inheritClass( SearchTool, OO.ui.Tool ); * // Each tool must have a 'name' (used as an internal identifier, see later) and at least one * // of 'icon' and 'title' (displayed icon and text). * SearchTool.static.name = 'search'; * SearchTool.static.icon = 'search'; * SearchTool.static.title = 'Search...'; * // Defines the action that will happen when this tool is selected (clicked). * SearchTool.prototype.onSelect = function () { * $area.text( 'Search tool clicked!' ); * // Never display this tool as "active" (selected). * this.setActive( false ); * }; * SearchTool.prototype.onUpdateState = function () {}; * // Make this tool available in our toolFactory and thus our toolbar * toolFactory.register( SearchTool ); * * // This is a PopupTool. Rather than having a custom 'onSelect' action, it will display a * // little popup window (a PopupWidget). * function HelpTool( toolGroup, config ) { * OO.ui.PopupTool.call( this, toolGroup, $.extend( { popup: { * padded: true, * label: 'Help', * head: true * } }, config ) ); * this.popup.$body.append( '<p>I am helpful!</p>' ); * } * OO.inheritClass( HelpTool, OO.ui.PopupTool ); * HelpTool.static.name = 'help'; * HelpTool.static.icon = 'help'; * HelpTool.static.title = 'Help'; * toolFactory.register( HelpTool ); * * // Finally define which tools and in what order appear in the toolbar. Each tool may only be * // used once (but not all defined tools must be used). * toolbar.setup( [ * { * // 'bar' tool groups display tools by icon only * type: 'bar', * include: [ 'search', 'help' ] * } * ] ); * * // Create some UI around the toolbar and place it in the document * var frame = new OO.ui.PanelLayout( { * expanded: false, * framed: true * } ); * var contentFrame = new OO.ui.PanelLayout( { * expanded: false, * padded: true * } ); * frame.$element.append( * toolbar.$element, * contentFrame.$element.append( $area ) * ); * $( document.body ).append( frame.$element ); * * // Here is where the toolbar is actually built. This must be done after inserting it into the * // document. * toolbar.initialize(); * * For more information about how to add tools to a bar tool group, please see * {@link OO.ui.ToolGroup toolgroup}. * For more information about toolbars in general, please see the * [OOUI documentation on MediaWiki][1]. * * [1]: https://www.mediawiki.org/wiki/OOUI/Toolbars * * @class * @extends OO.ui.ToolGroup * * @constructor * @param {OO.ui.Toolbar} toolbar * @param {Object} [config] Configuration options */ OO.ui.BarToolGroup = function OoUiBarToolGroup( toolbar, config ) { // Allow passing positional parameters inside the config object if ( OO.isPlainObject( toolbar ) && config === undefined ) { config = toolbar; toolbar = config.toolbar; } // Parent constructor OO.ui.BarToolGroup.super.call( this, toolbar, config ); // Initialization this.$element.addClass( 'oo-ui-barToolGroup' ); this.$group.addClass( 'oo-ui-barToolGroup-tools' ); }; /* Setup */ OO.inheritClass( OO.ui.BarToolGroup, OO.ui.ToolGroup ); /* Static Properties */ /** * @static * @inheritdoc */ OO.ui.BarToolGroup.static.titleTooltips = true; /** * @static * @inheritdoc */ OO.ui.BarToolGroup.static.accelTooltips = true; /** * @static * @inheritdoc */ OO.ui.BarToolGroup.static.name = 'bar'; /** * PopupToolGroup is an abstract base class used by both {@link OO.ui.MenuToolGroup MenuToolGroup} * and {@link OO.ui.ListToolGroup ListToolGroup} to provide a popup (an overlaid menu or list of * tools with an optional icon and label). This class can be used for other base classes that * also use this functionality. * * @abstract * @class * @extends OO.ui.ToolGroup * @mixins OO.ui.mixin.IconElement * @mixins OO.ui.mixin.IndicatorElement * @mixins OO.ui.mixin.LabelElement * @mixins OO.ui.mixin.TitledElement * @mixins OO.ui.mixin.FlaggedElement * @mixins OO.ui.mixin.ClippableElement * @mixins OO.ui.mixin.FloatableElement * @mixins OO.ui.mixin.TabIndexedElement * * @constructor * @param {OO.ui.Toolbar} toolbar * @param {Object} [config] Configuration options * @cfg {string} [header] Text to display at the top of the popup */ OO.ui.PopupToolGroup = function OoUiPopupToolGroup( toolbar, config ) { // Allow passing positional parameters inside the config object if ( OO.isPlainObject( toolbar ) && config === undefined ) { config = toolbar; toolbar = config.toolbar; } // Configuration initialization config = $.extend( { indicator: config.indicator === undefined ? ( toolbar.position === 'bottom' ? 'up' : 'down' ) : config.indicator }, config ); // Parent constructor OO.ui.PopupToolGroup.super.call( this, toolbar, config ); // Properties this.active = false; this.dragging = false; // Don't conflict with parent method of the same name this.onPopupDocumentMouseKeyUpHandler = this.onPopupDocumentMouseKeyUp.bind( this ); this.$handle = $( '<span>' ); // Mixin constructors OO.ui.mixin.IconElement.call( this, config ); OO.ui.mixin.IndicatorElement.call( this, config ); OO.ui.mixin.LabelElement.call( this, config ); OO.ui.mixin.TitledElement.call( this, config ); OO.ui.mixin.FlaggedElement.call( this, config ); OO.ui.mixin.ClippableElement.call( this, $.extend( { $clippable: this.$group }, config ) ); OO.ui.mixin.FloatableElement.call( this, $.extend( { $floatable: this.$group, $floatableContainer: this.$handle, hideWhenOutOfView: false, verticalPosition: this.toolbar.position === 'bottom' ? 'above' : 'below' }, config ) ); OO.ui.mixin.TabIndexedElement.call( this, $.extend( { $tabIndexed: this.$handle }, config ) ); // Events this.$handle.on( { keydown: this.onHandleMouseKeyDown.bind( this ), keyup: this.onHandleMouseKeyUp.bind( this ), mousedown: this.onHandleMouseKeyDown.bind( this ), mouseup: this.onHandleMouseKeyUp.bind( this ) } ); // Initialization this.$handle .addClass( 'oo-ui-popupToolGroup-handle' ) .attr( { role: 'button', 'aria-expanded': 'false' } ) .append( this.$icon, this.$label, this.$indicator ); // If the pop-up should have a header, add it to the top of the toolGroup. // Note: If this feature is useful for other widgets, we could abstract it into an // OO.ui.HeaderedElement mixin constructor. if ( config.header !== undefined ) { this.$group .prepend( $( '<span>' ) .addClass( 'oo-ui-popupToolGroup-header' ) .text( config.header ) ); } this.$element .addClass( 'oo-ui-popupToolGroup' ) .prepend( this.$handle ); this.$group.addClass( 'oo-ui-popupToolGroup-tools' ); this.toolbar.$popups.append( this.$group ); }; /* Setup */ OO.inheritClass( OO.ui.PopupToolGroup, OO.ui.ToolGroup ); OO.mixinClass( OO.ui.PopupToolGroup, OO.ui.mixin.IconElement ); OO.mixinClass( OO.ui.PopupToolGroup, OO.ui.mixin.IndicatorElement ); OO.mixinClass( OO.ui.PopupToolGroup, OO.ui.mixin.LabelElement ); OO.mixinClass( OO.ui.PopupToolGroup, OO.ui.mixin.TitledElement ); OO.mixinClass( OO.ui.PopupToolGroup, OO.ui.mixin.FlaggedElement ); OO.mixinClass( OO.ui.PopupToolGroup, OO.ui.mixin.ClippableElement ); OO.mixinClass( OO.ui.PopupToolGroup, OO.ui.mixin.FloatableElement ); OO.mixinClass( OO.ui.PopupToolGroup, OO.ui.mixin.TabIndexedElement ); /* Methods */ /** * @inheritdoc */ OO.ui.PopupToolGroup.prototype.setDisabled = function () { // Parent method OO.ui.PopupToolGroup.super.prototype.setDisabled.apply( this, arguments ); if ( this.isDisabled() && this.isElementAttached() ) { this.setActive( false ); } }; /** * Handle document mouse up and key up events. * * @protected * @param {MouseEvent|KeyboardEvent} e Mouse up or key up event */ OO.ui.PopupToolGroup.prototype.onPopupDocumentMouseKeyUp = function ( e ) { var $target = $( e.target ); // Only deactivate when clicking outside the dropdown element if ( $target.closest( '.oo-ui-popupToolGroup' )[ 0 ] === this.$element[ 0 ] ) { return; } if ( $target.closest( '.oo-ui-popupToolGroup-tools' )[ 0 ] === this.$group[ 0 ] ) { return; } this.setActive( false ); }; /** * @inheritdoc */ OO.ui.PopupToolGroup.prototype.onMouseKeyUp = function ( e ) { // Only close toolgroup when a tool was actually selected if ( !this.isDisabled() && this.pressed && this.pressed === this.findTargetTool( e ) && ( e.which === OO.ui.MouseButtons.LEFT || e.which === OO.ui.Keys.SPACE || e.which === OO.ui.Keys.ENTER ) ) { this.setActive( false ); } return OO.ui.PopupToolGroup.super.prototype.onMouseKeyUp.call( this, e ); }; /** * @inheritdoc */ OO.ui.PopupToolGroup.prototype.onMouseKeyDown = function ( e ) { var $focused, $firstFocusable, $lastFocusable; // Shift-Tab on the first tool in the group jumps to the handle. // Tab on the last tool in the group jumps to the next group. if ( !this.isDisabled() && e.which === OO.ui.Keys.TAB ) { // We can't use this.items because ListToolGroup inserts the extra fake // expand/collapse tool. $focused = $( document.activeElement ); $firstFocusable = OO.ui.findFocusable( this.$group ); if ( $focused[ 0 ] === $firstFocusable[ 0 ] && e.shiftKey ) { this.$handle.trigger( 'focus' ); return false; } $lastFocusable = OO.ui.findFocusable( this.$group, true ); if ( $focused[ 0 ] === $lastFocusable[ 0 ] && !e.shiftKey ) { // Focus this group's handle and let the browser's tab handling happen // (no 'return false'). // This way we don't have to fiddle with other ToolGroups' business, or worry what to do // if the next group is not a PopupToolGroup or doesn't exist at all. this.$handle.trigger( 'focus' ); // Close the popup so that we don't move back inside it (if this is the last group). this.setActive( false ); } } return OO.ui.PopupToolGroup.super.prototype.onMouseKeyDown.call( this, e ); }; /** * Handle mouse up and key up events. * * @protected * @param {jQuery.Event} e Mouse up or key up event * @return {undefined|boolean} False to prevent default if event is handled */ OO.ui.PopupToolGroup.prototype.onHandleMouseKeyUp = function ( e ) { if ( !this.isDisabled() && ( e.which === OO.ui.MouseButtons.LEFT || e.which === OO.ui.Keys.SPACE || e.which === OO.ui.Keys.ENTER ) ) { return false; } }; /** * Handle mouse down and key down events. * * @protected * @param {jQuery.Event} e Mouse down or key down event * @return {undefined|boolean} False to prevent default if event is handled */ OO.ui.PopupToolGroup.prototype.onHandleMouseKeyDown = function ( e ) { var $focusable; if ( !this.isDisabled() ) { // Tab on the handle jumps to the first tool in the group (if the popup is open). if ( e.which === OO.ui.Keys.TAB && !e.shiftKey ) { $focusable = OO.ui.findFocusable( this.$group ); if ( $focusable.length ) { $focusable.trigger( 'focus' ); return false; } } if ( e.which === OO.ui.MouseButtons.LEFT || e.which === OO.ui.Keys.SPACE || e.which === OO.ui.Keys.ENTER ) { this.setActive( !this.active ); return false; } } }; /** * Check if the tool group is active. * * @return {boolean} Tool group is active */ OO.ui.PopupToolGroup.prototype.isActive = function () { return this.active; }; /** * Switch into 'active' mode. * * When active, the popup is visible. A mouseup event anywhere in the document will trigger * deactivation. * * @param {boolean} value The active state to set * @fires active */ OO.ui.PopupToolGroup.prototype.setActive = function ( value ) { var containerWidth, containerLeft; value = !!value; if ( this.active !== value ) { this.active = value; if ( value ) { this.getElementDocument().addEventListener( 'mouseup', this.onPopupDocumentMouseKeyUpHandler, true ); this.getElementDocument().addEventListener( 'keyup', this.onPopupDocumentMouseKeyUpHandler, true ); this.$clippable.css( 'left', '' ); this.$element.addClass( 'oo-ui-popupToolGroup-active' ); this.$group.addClass( 'oo-ui-popupToolGroup-active-tools' ); this.$handle.attr( 'aria-expanded', true ); this.togglePositioning( true ); this.toggleClipping( true ); // Try anchoring the popup to the left first this.setHorizontalPosition( 'start' ); if ( this.isClippedHorizontally() || this.isFloatableOutOfView() ) { // Anchoring to the left caused the popup to clip, so anchor it to the // right instead. this.setHorizontalPosition( 'end' ); } if ( this.isClippedHorizontally() || this.isFloatableOutOfView() ) { // Anchoring to the right also caused the popup to clip, so just make it fill the // container. containerWidth = this.$clippableScrollableContainer.width(); containerLeft = this.$clippableScrollableContainer[ 0 ] === document.documentElement ? 0 : this.$clippableScrollableContainer.offset().left; this.toggleClipping( false ); this.setHorizontalPosition( 'start' ); this.$clippable.css( { 'margin-left': -( this.$element.offset().left - containerLeft ), width: containerWidth } ); } } else { this.getElementDocument().removeEventListener( 'mouseup', this.onPopupDocumentMouseKeyUpHandler, true ); this.getElementDocument().removeEventListener( 'keyup', this.onPopupDocumentMouseKeyUpHandler, true ); this.$element.removeClass( 'oo-ui-popupToolGroup-active' ); this.$group.removeClass( 'oo-ui-popupToolGroup-active-tools' ); this.$handle.attr( 'aria-expanded', false ); this.togglePositioning( false ); this.toggleClipping( false ); } this.emit( 'active', this.active ); this.updateThemeClasses(); } }; /** * ListToolGroups are one of three types of {@link OO.ui.ToolGroup toolgroups} that are used to * create {@link OO.ui.Toolbar toolbars} (the other types of groups are * {@link OO.ui.MenuToolGroup MenuToolGroup} and {@link OO.ui.BarToolGroup BarToolGroup}). * The {@link OO.ui.Tool tools} in a ListToolGroup are displayed by label in a dropdown menu. * The title of the tool is used as the label text. The menu itself can be configured with a label, * icon, indicator, header, and title. * * ListToolGroups can be configured to be expanded and collapsed. Collapsed lists will have a * ‘More’ option that users can select to see the full list of tools. If a collapsed toolgroup is * expanded, a ‘Fewer’ option permits users to collapse the list again. * * ListToolGroups are created by a {@link OO.ui.ToolGroupFactory toolgroup factory} when the * toolbar is set up. The factory requires the ListToolGroup's symbolic name, 'list', which is * specified along with the other configurations. For more information about how to add tools to a * ListToolGroup, please see {@link OO.ui.ToolGroup toolgroup}. * * @example * // Example of a ListToolGroup * var toolFactory = new OO.ui.ToolFactory(); * var toolGroupFactory = new OO.ui.ToolGroupFactory(); * var toolbar = new OO.ui.Toolbar( toolFactory, toolGroupFactory ); * * // Configure and register two tools * function SettingsTool() { * SettingsTool.super.apply( this, arguments ); * } * OO.inheritClass( SettingsTool, OO.ui.Tool ); * SettingsTool.static.name = 'settings'; * SettingsTool.static.icon = 'settings'; * SettingsTool.static.title = 'Change settings'; * SettingsTool.prototype.onSelect = function () { * this.setActive( false ); * }; * SettingsTool.prototype.onUpdateState = function () {}; * toolFactory.register( SettingsTool ); * // Register two more tools, nothing interesting here * function StuffTool() { * StuffTool.super.apply( this, arguments ); * } * OO.inheritClass( StuffTool, OO.ui.Tool ); * StuffTool.static.name = 'stuff'; * StuffTool.static.icon = 'search'; * StuffTool.static.title = 'Change the world'; * StuffTool.prototype.onSelect = function () { * this.setActive( false ); * }; * StuffTool.prototype.onUpdateState = function () {}; * toolFactory.register( StuffTool ); * toolbar.setup( [ * { * // Configurations for list toolgroup. * type: 'list', * label: 'ListToolGroup', * icon: 'ellipsis', * title: 'This is the title, displayed when user moves the mouse over the list ' + * 'toolgroup', * header: 'This is the header', * include: [ 'settings', 'stuff' ], * allowCollapse: ['stuff'] * } * ] ); * * // Create some UI around the toolbar and place it in the document * var frame = new OO.ui.PanelLayout( { * expanded: false, * framed: true * } ); * frame.$element.append( * toolbar.$element * ); * $( document.body ).append( frame.$element ); * // Build the toolbar. This must be done after the toolbar has been appended to the document. * toolbar.initialize(); * * For more information about toolbars in general, please see the * [OOUI documentation on MediaWiki][1]. * * [1]: https://www.mediawiki.org/wiki/OOUI/Toolbars * * @class * @extends OO.ui.PopupToolGroup * * @constructor * @param {OO.ui.Toolbar} toolbar * @param {Object} [config] Configuration options * @cfg {Array} [allowCollapse] Allow the specified tools to be collapsed. By default, collapsible * tools will only be displayed if users click the ‘More’ option displayed at the bottom of the * list. If the list is expanded, a ‘Fewer’ option permits users to collapse the list again. * Any tools that are included in the toolgroup, but are not designated as collapsible, will always * be displayed. * To open a collapsible list in its expanded state, set #expanded to 'true'. * @cfg {Array} [forceExpand] Expand the specified tools. All other tools will be designated as * collapsible. Unless #expanded is set to true, the collapsible tools will be collapsed when the * list is first opened. * @cfg {boolean} [expanded=false] Expand collapsible tools. This config is only relevant if tools * have been designated as collapsible. When expanded is set to true, all tools in the group will * be displayed when the list is first opened. Users can collapse the list with a ‘Fewer’ option at * the bottom. */ OO.ui.ListToolGroup = function OoUiListToolGroup( toolbar, config ) { // Allow passing positional parameters inside the config object if ( OO.isPlainObject( toolbar ) && config === undefined ) { config = toolbar; toolbar = config.toolbar; } // Configuration initialization config = config || {}; // Properties (must be set before parent constructor, which calls #populate) this.allowCollapse = config.allowCollapse; this.forceExpand = config.forceExpand; this.expanded = config.expanded !== undefined ? config.expanded : false; this.collapsibleTools = []; // Parent constructor OO.ui.ListToolGroup.super.call( this, toolbar, config ); // Initialization this.$element.addClass( 'oo-ui-listToolGroup' ); this.$group.addClass( 'oo-ui-listToolGroup-tools' ); }; /* Setup */ OO.inheritClass( OO.ui.ListToolGroup, OO.ui.PopupToolGroup ); /* Static Properties */ /** * @static * @inheritdoc */ OO.ui.ListToolGroup.static.name = 'list'; /* Methods */ /** * @inheritdoc */ OO.ui.ListToolGroup.prototype.populate = function () { var i, len, allowCollapse = []; OO.ui.ListToolGroup.super.prototype.populate.call( this ); // Update the list of collapsible tools if ( this.allowCollapse !== undefined ) { allowCollapse = this.allowCollapse; } else if ( this.forceExpand !== undefined ) { allowCollapse = OO.simpleArrayDifference( Object.keys( this.tools ), this.forceExpand ); } this.collapsibleTools = []; for ( i = 0, len = allowCollapse.length; i < len; i++ ) { if ( this.tools[ allowCollapse[ i ] ] !== undefined ) { this.collapsibleTools.push( this.tools[ allowCollapse[ i ] ] ); } } // Keep at the end, even when tools are added this.$group.append( this.getExpandCollapseTool().$element ); this.getExpandCollapseTool().toggle( this.collapsibleTools.length !== 0 ); this.updateCollapsibleState(); }; /** * Get the expand/collapse tool for this group * * @return {OO.ui.Tool} Expand collapse tool */ OO.ui.ListToolGroup.prototype.getExpandCollapseTool = function () { var ExpandCollapseTool; if ( this.expandCollapseTool === undefined ) { ExpandCollapseTool = function () { ExpandCollapseTool.super.apply( this, arguments ); }; OO.inheritClass( ExpandCollapseTool, OO.ui.Tool ); ExpandCollapseTool.prototype.onSelect = function () { this.toolGroup.expanded = !this.toolGroup.expanded; this.toolGroup.updateCollapsibleState(); this.setActive( false ); }; ExpandCollapseTool.prototype.onUpdateState = function () { // Do nothing. Tool interface requires an implementation of this function. }; ExpandCollapseTool.static.name = 'more-fewer'; this.expandCollapseTool = new ExpandCollapseTool( this ); } return this.expandCollapseTool; }; /** * @inheritdoc */ OO.ui.ListToolGroup.prototype.onMouseKeyUp = function ( e ) { // Do not close the popup when the user wants to show more/fewer tools if ( $( e.target ).closest( '.oo-ui-tool-name-more-fewer' ).length && ( e.which === OO.ui.MouseButtons.LEFT || e.which === OO.ui.Keys.SPACE || e.which === OO.ui.Keys.ENTER ) ) { // HACK: Prevent the popup list from being hidden. Skip the PopupToolGroup implementation // (which hides the popup list when a tool is selected) and call ToolGroup's implementation // directly. return OO.ui.ListToolGroup.super.super.prototype.onMouseKeyUp.call( this, e ); } else { return OO.ui.ListToolGroup.super.prototype.onMouseKeyUp.call( this, e ); } }; OO.ui.ListToolGroup.prototype.updateCollapsibleState = function () { var inverted = this.toolbar.position === 'bottom', icon = this.expanded === inverted ? 'expand' : 'collapse'; this.getExpandCollapseTool() .setIcon( icon ) .setTitle( OO.ui.msg( this.expanded ? 'ooui-toolgroup-collapse' : 'ooui-toolgroup-expand' ) ); for ( var i = 0; i < this.collapsibleTools.length; i++ ) { this.collapsibleTools[ i ].toggle( this.expanded ); } // Re-evaluate clipping, because our height has changed this.clip(); }; /** * MenuToolGroups are one of three types of {@link OO.ui.ToolGroup toolgroups} that are used to * create {@link OO.ui.Toolbar toolbars} (the other types of groups are * {@link OO.ui.BarToolGroup BarToolGroup} and {@link OO.ui.ListToolGroup ListToolGroup}). * MenuToolGroups contain selectable {@link OO.ui.Tool tools}, which are displayed by label in a * dropdown menu. The tool's title is used as the label text, and the menu label is updated to * reflect which tool or tools are currently selected. If no tools are selected, the menu label * is empty. The menu can be configured with an indicator, icon, title, and/or header. * * MenuToolGroups are created by a {@link OO.ui.ToolGroupFactory tool group factory} when the * toolbar is set up. * * @example * // Example of a MenuToolGroup * var toolFactory = new OO.ui.ToolFactory(); * var toolGroupFactory = new OO.ui.ToolGroupFactory(); * var toolbar = new OO.ui.Toolbar( toolFactory, toolGroupFactory ); * * // We will be placing status text in this element when tools are used * var $area = $( '<p>' ).text( 'An example of a MenuToolGroup. Select a tool from the ' * + 'dropdown menu.' ); * * // Define the tools that we're going to place in our toolbar * * function SettingsTool() { * SettingsTool.super.apply( this, arguments ); * this.reallyActive = false; * } * OO.inheritClass( SettingsTool, OO.ui.Tool ); * SettingsTool.static.name = 'settings'; * SettingsTool.static.icon = 'settings'; * SettingsTool.static.title = 'Change settings'; * SettingsTool.prototype.onSelect = function () { * $area.text( 'Settings tool clicked!' ); * // Toggle the active state on each click * this.reallyActive = !this.reallyActive; * this.setActive( this.reallyActive ); * // To update the menu label * this.toolbar.emit( 'updateState' ); * }; * SettingsTool.prototype.onUpdateState = function () {}; * toolFactory.register( SettingsTool ); * * function StuffTool() { * StuffTool.super.apply( this, arguments ); * this.reallyActive = false; * } * OO.inheritClass( StuffTool, OO.ui.Tool ); * StuffTool.static.name = 'stuff'; * StuffTool.static.icon = 'ellipsis'; * StuffTool.static.title = 'More stuff'; * StuffTool.prototype.onSelect = function () { * $area.text( 'More stuff tool clicked!' ); * // Toggle the active state on each click * this.reallyActive = !this.reallyActive; * this.setActive( this.reallyActive ); * // To update the menu label * this.toolbar.emit( 'updateState' ); * }; * StuffTool.prototype.onUpdateState = function () {}; * toolFactory.register( StuffTool ); * * // Finally define which tools and in what order appear in the toolbar. Each tool may only be * // used once (but not all defined tools must be used). * toolbar.setup( [ * { * type: 'menu', * header: 'This is the (optional) header', * title: 'This is the (optional) title', * include: [ 'settings', 'stuff' ] * } * ] ); * * // Create some UI around the toolbar and place it in the document * var frame = new OO.ui.PanelLayout( { * expanded: false, * framed: true * } ); * var contentFrame = new OO.ui.PanelLayout( { * expanded: false, * padded: true * } ); * frame.$element.append( * toolbar.$element, * contentFrame.$element.append( $area ) * ); * $( document.body ).append( frame.$element ); * * // Here is where the toolbar is actually built. This must be done after inserting it into the * // document. * toolbar.initialize(); * toolbar.emit( 'updateState' ); * * For more information about how to add tools to a MenuToolGroup, please see * {@link OO.ui.ToolGroup toolgroup}. * For more information about toolbars in general, please see the * [OOUI documentation on MediaWiki] [1]. * * [1]: https://www.mediawiki.org/wiki/OOUI/Toolbars * * @class * @extends OO.ui.PopupToolGroup * * @constructor * @param {OO.ui.Toolbar} toolbar * @param {Object} [config] Configuration options */ OO.ui.MenuToolGroup = function OoUiMenuToolGroup( toolbar, config ) { // Allow passing positional parameters inside the config object if ( OO.isPlainObject( toolbar ) && config === undefined ) { config = toolbar; toolbar = config.toolbar; } // Configuration initialization config = config || {}; // Parent constructor OO.ui.MenuToolGroup.super.call( this, toolbar, config ); // Events this.toolbar.connect( this, { updateState: 'onUpdateState' } ); // Initialization this.$element.addClass( 'oo-ui-menuToolGroup' ); this.$group.addClass( 'oo-ui-menuToolGroup-tools' ); }; /* Setup */ OO.inheritClass( OO.ui.MenuToolGroup, OO.ui.PopupToolGroup ); /* Static Properties */ /** * @static * @inheritdoc */ OO.ui.MenuToolGroup.static.name = 'menu'; /* Methods */ /** * Handle the toolbar state being updated. * * When the state changes, the title of each active item in the menu will be joined together and * used as a label for the group. The label will be empty if none of the items are active. * * @private */ OO.ui.MenuToolGroup.prototype.onUpdateState = function () { var name, labelTexts = []; for ( name in this.tools ) { if ( this.tools[ name ].isActive() ) { labelTexts.push( this.tools[ name ].getTitle() ); } } this.setLabel( labelTexts.join( ', ' ) || ' ' ); }; }( OO ) ); //# sourceMappingURL=oojs-ui-toolbars.js.map.json
import types, itertools, exceptions def type_string(obj): objType = type(obj) if objType is types.InstanceType: objType = obj.__class__ return getattr(objType, '__module__', '-') + '.' + objType.__name__ class NetRepr(object): def __init__(self, objectPool): self.objectPool = objectPool self.cache = {} self._identfactory = itertools.count() def clear(self): self.cache.clear() self._identfactory = itertools.count() def netrepr_tuple(self, obj): return repr(tuple(itertools.imap(self.netrepr, obj))) def netrepr_list(self, obj): return repr(map(self.netrepr, obj)) def netrepr_exception(self, e): cls = e.__class__ if cls.__module__ == 'exceptions': rval = cls.__name__ + self.netrepr_tuple(e.args) else: rval = 'Exception(%r)' % ('[Remote] %s.%s %s' % (cls.__module__, cls.__name__, e),) return rval def netrepr(self, obj): if obj is None: return 'None' objtype = type(obj) if objtype is int or objtype is long or objtype is float: return repr(obj) elif objtype is str or objtype is unicode: if True: return repr(obj) else: # "intern" these obj_id = id(obj) cached = self.get(cache, obj_id, None) if cached is None: ident = self._identfactory.next() self.cache[obj_id] = '__cached__(%r)' % (obj_id,) cached = '__cache__(%r, %r)' % (obj_id, obj) return cached return self.netrepr_default(obj) def netrepr_default(self, obj): method = getattr(obj, '__netrepr__', None) if method is None: method = self.objectPool.referenceForObject(obj).__netrepr__ return method() class BaseObjectPool(object): def __init__(self): self.idents = {} self.refs = {} self.pools = [] def referenceForIdent(self, ident): return self.idents[ident] def base_alloc(self, ref, ident): self.refs[ref] = ident self.idents[ident] = ref def base_dealloc(self, ref, ident): del self.refs[ref] del self.idents[ident] def autorelease(self, ref): if not self.pools: raise RuntimeError, "no autoreleasepool for %r" % (ref,) pool = self.pools[-1] pool[ref] = pool.get(ref, 0) + 1 def push(self): #print "pushed pool" self.pools.append({}) def pop(self): if not self.pools: raise RuntimeError, "popped too many pools" #print "popped pool" pool = self.pools.pop() for ref, count in pool.iteritems(): ref.release(count) def referenceForObject(self, obj): raise TypeError, "Can not create a reference to %r, the bridge is unidirectional" % (obj,) class RemoteObjectPool(BaseObjectPool): def __init__(self, writecode): BaseObjectPool.__init__(self) self.writecode = writecode self.namespace = { 'None': None, '__ref__': self.referenceForRemoteIdent, } def referenceForRemoteIdent(self, ident, type_string): rval = self.idents.get(ident) if rval is None: rval = RemoteObjectReference(self, ident, type_string) return rval class ObjectPool(BaseObjectPool): def __init__(self): BaseObjectPool.__init__(self) self._identfactory = itertools.count() self.obj_ids = {} self.namespace = { '__obj__': self.objectForIdent, } def object_alloc(self, ref, obj_id): self.obj_ids[obj_id] = ref def object_dealloc(self, ref, obj_id): del self.obj_ids[obj_id] def objectForIdent(self, ident): return self.referenceForIdent(ident).obj def referenceForObject(self, obj): obj_id = id(obj) rval = self.obj_ids.get(obj_id) if rval is None: ident = self._identfactory.next() rval = ObjectReference(self, ident, type_string(obj), obj, obj_id) rval = rval.alloc().autorelease() return rval class BaseObjectReference(object): def __init__(self, objectPool, ident, type_string): self.ident = ident self.type_string = type_string self.objectPool = objectPool self.retainCount = 1 def retain(self, count=1): #print "%r.retain(%d)" % (self, count) self.retainCount += count return self def alloc(self): self.objectPool.base_alloc(self, self.ident) return self def dealloc(self): self.objectPool.base_dealloc(self, self.ident) self.retainCount = -1 def release(self, count=1): #print "%r.release(%d)" % (self, count) newCount = self.retainCount - count #print " newCount = %d" % (newCount,) if newCount == 0: self.dealloc() elif newCount < 0: raise ValueError, "Reference %r over-released (%r -> %r)" % (self, self.retainCount, newCount) self.retainCount = newCount return self def autorelease(self): #print "%s.autorelease()" % (self,) self.objectPool.autorelease(self) return self def __repr__(self): return "%s(%r, %r)" % (type(self).__name__, self.ident, self.type_string) class RemoteObjectReference(BaseObjectReference): def __netrepr__(self): return "__obj__(%r)" % (self.ident,) class ObjectReference(BaseObjectReference): def __init__(self, objectPool, ident, type_string, obj, obj_id): BaseObjectReference.__init__(self, objectPool, ident, type_string) self.obj = obj self.obj_id = id(obj) def alloc(self): self = BaseObjectReference.alloc(self) self.objectPool.object_alloc(self, self.obj_id) return self def dealloc(self): self.objectPool.object_dealloc(self, self.obj_id) self.obj = None self.obj_id = -1 BaseObjectReference.dealloc(self) def __netrepr__(self): return "__ref__(%r, %r)" % (self.ident, self.type_string) def test_netrepr(): import compiler pool = ObjectPool() pool.push() netrepr = NetRepr(pool).netrepr assert netrepr("foo") == repr("foo") ref = pool.referenceForObject(object) assert ref.obj is object assert ref is pool.referenceForObject(object) assert ref.retainCount == 1 refrepr = netrepr(ref) assert refrepr == netrepr(ref) ref.retain() assert ref.retainCount == 2 pool.pop() pool.push() assert ref.retainCount == 1 def __ref__(ident, type_string): return pool.referenceForIdent(ident) netref = eval(refrepr) assert netref is ref assert netref.obj is object ref.release() pool.pop() assert ref.obj is None
# https://www.hackerrank.com/challenges/reduce-function/problem def product(fracs): t = reduce(lambda x,y:x*y,fracs,1) return t.numerator, t.denominator
export default 'react imported from root';
# -*- coding: utf-8 -*- """ ITU-R BT.2100 ============= Defines *ITU-R BT.2100* opto-electrical transfer functions (OETF / OECF), opto-optical transfer functions (OOTF / OOCF) and electro-optical transfer functions (EOTF / EOCF) and their inverse: - :func:`colour.models.oetf_PQ_BT2100` - :func:`colour.models.oetf_inverse_PQ_BT2100` - :func:`colour.models.eotf_PQ_BT2100` - :func:`colour.models.eotf_inverse_PQ_BT2100` - :func:`colour.models.ootf_PQ_BT2100` - :func:`colour.models.ootf_inverse_PQ_BT2100` - :func:`colour.models.oetf_HLG_BT2100` - :func:`colour.models.oetf_inverse_HLG_BT2100` - :func:`colour.models.eotf_HLG_BT2100_1` - :func:`colour.models.eotf_HLG_BT2100_2` - :attr:`colour.models.BT2100_HLG_EOTF_METHODS` - :func:`colour.models.eotf_HLG_BT2100` - :func:`colour.models.eotf_inverse_HLG_BT2100_1` - :func:`colour.models.eotf_inverse_HLG_BT2100_2` - :attr:`colour.models.BT2100_HLG_EOTF_INVERSE_METHODS` - :func:`colour.models.eotf_inverse_HLG_BT2100` - :func:`colour.models.ootf_HLG_BT2100` - :func:`colour.models.ootf_inverse_HLG_BT2100` - :func:`colour.models.ootf_HLG_BT2100_1` - :func:`colour.models.ootf_HLG_BT2100_2` - :attr:`colour.models.BT2100_HLG_OOTF_METHODS` - :func:`colour.models.ootf_HLG_BT2100` - :func:`colour.models.ootf_inverse_HLG_BT2100_1` - :func:`colour.models.ootf_inverse_HLG_BT2100_2` - :attr:`colour.models.BT2100_HLG_OOTF_INVERSE_METHODS` - :func:`colour.models.ootf_inverse_HLG_BT2100` See Also -------- `RGB Colourspaces Jupyter Notebook <http://nbviewer.jupyter.org/github/colour-science/colour-notebooks/\ blob/master/notebooks/models/rgb.ipynb>`_ References ---------- - :cite:`Borer2017a` : Borer, T. (2017). Private Discussion with Mansencal, T. and Shaw, N. - :cite:`InternationalTelecommunicationUnion2017` : International Telecommunication Union. (2017). Recommendation ITU-R BT.2100-1 - Image parameter values for high dynamic range television for use in production and international programme exchange. Retrieved from https://www.itu.int/dms_pubrec/itu-r/rec/bt/\ R-REC-BT.2100-1-201706-I!!PDF-E.pdf - :cite:`InternationalTelecommunicationUnion2018` : International Telecommunication Union. (2018). Recommendation ITU-R BT.2100-2 - Image parameter values for high dynamic range television for use in production and international programme exchange. Retrieved from https://www.itu.int/dms_pubrec/itu-r/rec/bt/\ R-REC-BT.2100-2-201807-I!!PDF-E.pdf """ from __future__ import division, unicode_literals import numpy as np from colour.algebra import spow from colour.models.rgb.transfer_functions import ( eotf_BT1886, eotf_ST2084, eotf_inverse_BT1886, oetf_ARIBSTDB67, oetf_BT709, eotf_inverse_ST2084, oetf_inverse_ARIBSTDB67, oetf_inverse_BT709) from colour.models.rgb.transfer_functions.arib_std_b67 import ( ARIBSTDB67_CONSTANTS) from colour.utilities import ( CaseInsensitiveMapping, Structure, as_float_array, as_float, filter_kwargs, from_range_1, to_domain_1, tsplit, tstack, usage_warning) __author__ = 'Colour Developers' __copyright__ = 'Copyright (C) 2013-2019 - Colour Developers' __license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause' __maintainer__ = 'Colour Developers' __email__ = 'colour-science@googlegroups.com' __status__ = 'Production' __all__ = [ 'oetf_PQ_BT2100', 'oetf_inverse_PQ_BT2100', 'eotf_PQ_BT2100', 'eotf_inverse_PQ_BT2100', 'ootf_PQ_BT2100', 'ootf_inverse_PQ_BT2100', 'BT2100_HLG_WEIGHTS', 'BT2100_HLG_CONSTANTS', 'gamma_function_HLG_BT2100', 'oetf_HLG_BT2100', 'oetf_inverse_HLG_BT2100', 'black_level_lift_HLG_BT2100', 'eotf_HLG_BT2100_1', 'eotf_HLG_BT2100_2', 'BT2100_HLG_EOTF_METHODS', 'eotf_HLG_BT2100', 'eotf_inverse_HLG_BT2100_1', 'eotf_inverse_HLG_BT2100_2', 'BT2100_HLG_EOTF_INVERSE_METHODS', 'eotf_inverse_HLG_BT2100', 'ootf_HLG_BT2100_1', 'ootf_HLG_BT2100_2', 'BT2100_HLG_OOTF_METHODS', 'ootf_HLG_BT2100', 'ootf_inverse_HLG_BT2100_1', 'ootf_inverse_HLG_BT2100_2', 'BT2100_HLG_OOTF_INVERSE_METHODS', 'ootf_inverse_HLG_BT2100' ] def oetf_PQ_BT2100(E): """ Defines *Recommendation ITU-R BT.2100* *Reference PQ* opto-electrical transfer function (OETF / OECF). The OETF maps relative scene linear light into the non-linear *PQ* signal value. Parameters ---------- E : numeric or array_like :math:`E = {R_S, G_S, B_S; Y_S; or I_S}` is the signal determined by scene light and scaled by camera exposure. Returns ------- numeric or ndarray :math:`E'` is the resulting non-linear signal (:math:`R'`, :math:`G'`, :math:`B'`). Notes ----- +------------+-----------------------+---------------+ | **Domain** | **Scale - Reference** | **Scale - 1** | +============+=======================+===============+ | ``E`` | [0, 1] | [0, 1] | +------------+-----------------------+---------------+ +------------+-----------------------+---------------+ | **Range** | **Scale - Reference** | **Scale - 1** | +============+=======================+===============+ | ``E_p`` | [0, 1] | [0, 1] | +------------+-----------------------+---------------+ References ---------- :cite:`Borer2017a`, :cite:`InternationalTelecommunicationUnion2017` Examples -------- >>> oetf_PQ_BT2100(0.1) # doctest: +ELLIPSIS 0.7247698... """ return eotf_inverse_ST2084(ootf_PQ_BT2100(E), 10000) def oetf_inverse_PQ_BT2100(E_p): """ Defines *Recommendation ITU-R BT.2100* *Reference PQ* inverse opto-electrical transfer function (OETF / OECF). Parameters ---------- E_p : numeric or array_like :math:`E'` is the resulting non-linear signal (:math:`R'`, :math:`G'`, :math:`B'`). Returns ------- numeric or ndarray :math:`E = {R_S, G_S, B_S; Y_S; or I_S}` is the signal determined by scene light and scaled by camera exposure. Notes ----- +------------+-----------------------+---------------+ | **Domain** | **Scale - Reference** | **Scale - 1** | +============+=======================+===============+ | ``E_p`` | [0, 1] | [0, 1] | +------------+-----------------------+---------------+ +------------+-----------------------+---------------+ | **Range** | **Scale - Reference** | **Scale - 1** | +============+=======================+===============+ | ``E`` | [0, 1] | [0, 1] | +------------+-----------------------+---------------+ References ---------- :cite:`Borer2017a`, :cite:`InternationalTelecommunicationUnion2017` Examples -------- >>> oetf_inverse_PQ_BT2100(0.724769816665726) # doctest: +ELLIPSIS 0.0999999... """ return ootf_inverse_PQ_BT2100(eotf_ST2084(E_p, 10000)) def eotf_PQ_BT2100(E_p): """ Defines *Recommendation ITU-R BT.2100* *Reference PQ* electro-optical transfer function (EOTF / EOCF). The EOTF maps the non-linear *PQ* signal into display light. Parameters ---------- E_p : numeric or array_like :math:`E'` denotes a non-linear colour value :math:`{R', G', B'}` or :math:`{L', M', S'}` in *PQ* space [0, 1]. Returns ------- numeric or ndarray :math:`F_D` is the luminance of a displayed linear component :math:`{R_D, G_D, B_D}` or :math:`Y_D` or :math:`I_D`, in :math:`cd/m^2`. Notes ----- +------------+-----------------------+---------------+ | **Domain** | **Scale - Reference** | **Scale - 1** | +============+=======================+===============+ | ``E_p`` | [0, 1] | [0, 1] | +------------+-----------------------+---------------+ +------------+-----------------------+---------------+ | **Range** | **Scale - Reference** | **Scale - 1** | +============+=======================+===============+ | ``F_D`` | [0, 1] | [0, 1] | +------------+-----------------------+---------------+ References ---------- :cite:`Borer2017a`, :cite:`InternationalTelecommunicationUnion2017` Examples -------- >>> eotf_PQ_BT2100(0.724769816665726) # doctest: +ELLIPSIS 779.9883608... """ return eotf_ST2084(E_p, 10000) def eotf_inverse_PQ_BT2100(F_D): """ Defines *Recommendation ITU-R BT.2100* *Reference PQ* inverse electro-optical transfer function (EOTF / EOCF). Parameters ---------- F_D : numeric or array_like :math:`F_D` is the luminance of a displayed linear component :math:`{R_D, G_D, B_D}` or :math:`Y_D` or :math:`I_D`, in :math:`cd/m^2`. Returns ------- numeric or ndarray :math:`E'` denotes a non-linear colour value :math:`{R', G', B'}` or :math:`{L', M', S'}` in *PQ* space [0, 1]. Notes ----- +------------+-----------------------+---------------+ | **Domain** | **Scale - Reference** | **Scale - 1** | +============+=======================+===============+ | ``F_D`` | [0, 1] | [0, 1] | +------------+-----------------------+---------------+ +------------+-----------------------+---------------+ | **Range** | **Scale - Reference** | **Scale - 1** | +============+=======================+===============+ | ``E_p`` | [0, 1] | [0, 1] | +------------+-----------------------+---------------+ References ---------- :cite:`Borer2017a`, :cite:`InternationalTelecommunicationUnion2017` Examples -------- >>> eotf_inverse_PQ_BT2100(779.988360834085370) # doctest: +ELLIPSIS 0.7247698... """ return eotf_inverse_ST2084(F_D, 10000) def ootf_PQ_BT2100(E): """ Defines *Recommendation ITU-R BT.2100* *Reference PQ* opto-optical transfer function (OOTF / OOCF). The OOTF maps relative scene linear light to display linear light. Parameters ---------- E : numeric or array_like :math:`E = {R_S, G_S, B_S; Y_S; or I_S}` is the signal determined by scene light and scaled by camera exposure. Returns ------- numeric or ndarray :math:`F_D` is the luminance of a displayed linear component (:math:`R_D`, :math:`G_D`, :math:`B_D`; :math:`Y_D`; or :math:`I_D`). Notes ----- +------------+-----------------------+---------------+ | **Domain** | **Scale - Reference** | **Scale - 1** | +============+=======================+===============+ | ``E`` | [0, 1] | [0, 1] | +------------+-----------------------+---------------+ +------------+-----------------------+---------------+ | **Range** | **Scale - Reference** | **Scale - 1** | +============+=======================+===============+ | ``F_D`` | [0, 1] | [0, 1] | +------------+-----------------------+---------------+ References ---------- :cite:`Borer2017a`, :cite:`InternationalTelecommunicationUnion2017` Examples -------- >>> ootf_PQ_BT2100(0.1) # doctest: +ELLIPSIS 779.9883608... """ E = as_float_array(E) return 100 * eotf_BT1886(oetf_BT709(59.5208 * E)) def ootf_inverse_PQ_BT2100(F_D): """ Defines *Recommendation ITU-R BT.2100* *Reference PQ* inverse opto-optical transfer function (OOTF / OOCF). Parameters ---------- F_D : numeric or array_like :math:`F_D` is the luminance of a displayed linear component (:math:`R_D`, :math:`G_D`, :math:`B_D`; :math:`Y_D`; or :math:`I_D`). Returns ------- numeric or ndarray :math:`E = {R_S, G_S, B_S; Y_S; or I_S}` is the signal determined by scene light and scaled by camera exposure. Notes ----- +------------+-----------------------+---------------+ | **Domain** | **Scale - Reference** | **Scale - 1** | +============+=======================+===============+ | ``F_D`` | [0, 1] | [0, 1] | +------------+-----------------------+---------------+ +------------+-----------------------+---------------+ | **Range** | **Scale - Reference** | **Scale - 1** | +============+=======================+===============+ | ``E`` | [0, 1] | [0, 1] | +------------+-----------------------+---------------+ References ---------- :cite:`Borer2017a`, :cite:`InternationalTelecommunicationUnion2017` Examples -------- >>> ootf_inverse_PQ_BT2100(779.988360834115840) # doctest: +ELLIPSIS 0.1000000... """ F_D = as_float_array(F_D) return oetf_inverse_BT709(eotf_inverse_BT1886(F_D / 100)) / 59.5208 BT2100_HLG_WEIGHTS = np.array([0.2627, 0.6780, 0.0593]) """ Luminance weights for *Recommendation ITU-R BT.2100* *Reference HLG*. BT2100_HLG_WEIGHTS : ndarray """ BT2100_HLG_CONSTANTS = Structure( a=ARIBSTDB67_CONSTANTS.a, b=1 - 4 * ARIBSTDB67_CONSTANTS.a, c=0.5 - ARIBSTDB67_CONSTANTS.a * np.log(4 * ARIBSTDB67_CONSTANTS.a)) """ *Recommendation ITU-R BT.2100* *Reference HLG* constants expressed in their analytical form in contrast to the *ARIB STD-B67 (Hybrid Log-Gamma)* numerical reference. References ---------- :cite:`InternationalTelecommunicationUnion2017` BT2100_HLG_CONSTANTS : Structure """ def gamma_function_HLG_BT2100(L_W=1000): """ Returns the *Reference HLG* system gamma value for given display nominal peak luminance. Parameters ---------- L_W : numeric, optional :math:`L_W` is nominal peak luminance of the display in :math:`cd/m^2` for achromatic pixels. Returns ------- numeric *Reference HLG* system gamma value. Examples -------- >>> gamma_function_HLG_BT2100() 1.2 >>> gamma_function_HLG_BT2100(2000) # doctest: +ELLIPSIS 1.3264325... >>> gamma_function_HLG_BT2100(4000) # doctest: +ELLIPSIS 1.4528651... """ gamma = 1.2 + 0.42 * np.log10(L_W / 1000) return gamma def oetf_HLG_BT2100(E, constants=BT2100_HLG_CONSTANTS): """ Defines *Recommendation ITU-R BT.2100* *Reference HLG* opto-electrical transfer function (OETF / OECF). The OETF maps relative scene linear light into the non-linear *HLG* signal value. Parameters ---------- E : numeric or array_like :math:`E` is the signal for each colour component :math:`{R_S, G_S, B_S}` proportional to scene linear light and scaled by camera exposure. constants : Structure, optional *Recommendation ITU-R BT.2100* *Reference HLG* constants. Returns ------- numeric or ndarray :math:`E'` is the resulting non-linear signal :math:`{R', G', B'}`. Notes ----- +------------+-----------------------+---------------+ | **Domain** | **Scale - Reference** | **Scale - 1** | +============+=======================+===============+ | ``E`` | [0, 1] | [0, 1] | +------------+-----------------------+---------------+ +------------+-----------------------+---------------+ | **Range** | **Scale - Reference** | **Scale - 1** | +============+=======================+===============+ | ``E_p`` | [0, 1] | [0, 1] | +------------+-----------------------+---------------+ References ---------- :cite:`Borer2017a`, :cite:`InternationalTelecommunicationUnion2017` Examples -------- >>> oetf_HLG_BT2100(0.18 / 12) # doctest: +ELLIPSIS 0.2121320... """ return oetf_ARIBSTDB67(12 * E, constants=constants) def oetf_inverse_HLG_BT2100(E_p, constants=BT2100_HLG_CONSTANTS): """ Defines *Recommendation ITU-R BT.2100* *Reference HLG* inverse opto-electrical transfer function (OETF / OECF). Parameters ---------- E_p : numeric or array_like :math:`E'` is the resulting non-linear signal :math:`{R', G', B'}`. constants : Structure, optional *Recommendation ITU-R BT.2100* *Reference HLG* constants. Returns ------- numeric or ndarray :math:`E` is the signal for each colour component :math:`{R_S, G_S, B_S}` proportional to scene linear light and scaled by camera exposure. Notes ----- +------------+-----------------------+---------------+ | **Domain** | **Scale - Reference** | **Scale - 1** | +============+=======================+===============+ | ``E_p`` | [0, 1] | [0, 1] | +------------+-----------------------+---------------+ +------------+-----------------------+---------------+ | **Range** | **Scale - Reference** | **Scale - 1** | +============+=======================+===============+ | ``E`` | [0, 1] | [0, 1] | +------------+-----------------------+---------------+ References ---------- :cite:`Borer2017a`, :cite:`InternationalTelecommunicationUnion2017` Examples -------- >>> oetf_inverse_HLG_BT2100(0.212132034355964) # doctest: +ELLIPSIS 0.0149999... """ return oetf_inverse_ARIBSTDB67(E_p, constants=constants) / 12 def black_level_lift_HLG_BT2100(L_B=0, L_W=1000, gamma=None): """ Returns the *Reference HLG* black level lift :math:`\\Beta` for given display luminance for black, nominal peak luminance and system gamma value. Parameters ---------- L_B : numeric, optional :math:`L_B` is the display luminance for black in :math:`cd/m^2`. L_W : numeric, optional :math:`L_W` is nominal peak luminance of the display in :math:`cd/m^2` for achromatic pixels. gamma : numeric, optional System gamma value, 1.2 at the nominal display peak luminance of :math:`1000 cd/m^2`. Returns ------- numeric *Reference HLG* black level lift :math:`\\Beta`. Examples -------- >>> black_level_lift_HLG_BT2100() 0.0 >>> black_level_lift_HLG_BT2100(0.01) # doctest: +ELLIPSIS 0.0142964... >>> black_level_lift_HLG_BT2100(0.001, 2000) # doctest: +ELLIPSIS 0.0073009... >>> black_level_lift_HLG_BT2100(0.01, gamma=1.4) # doctest: +ELLIPSIS 0.0283691... """ if gamma is None: gamma = gamma_function_HLG_BT2100(L_W) beta = np.sqrt(3 * spow((L_B / L_W), 1 / gamma)) return beta def eotf_HLG_BT2100_1(E_p, L_B=0, L_W=1000, gamma=None, constants=BT2100_HLG_CONSTANTS): """ Defines *Recommendation ITU-R BT.2100* *Reference HLG* electro-optical transfer function (EOTF / EOCF) as given in *ITU-R BT.2100-1*. The EOTF maps the non-linear *HLG* signal into display light. Parameters ---------- E_p : numeric or array_like :math:`E'` is the non-linear signal :math:`{R', G', B'}` as defined for the OETF. L_B : numeric, optional :math:`L_B` is the display luminance for black in :math:`cd/m^2`. L_W : numeric, optional :math:`L_W` is nominal peak luminance of the display in :math:`cd/m^2` for achromatic pixels. gamma : numeric, optional System gamma value, 1.2 at the nominal display peak luminance of :math:`1000 cd/m^2`. constants : Structure, optional *Recommendation ITU-R BT.2100* *Reference HLG* constants. Returns ------- numeric or ndarray Luminance :math:`F_D` of a displayed linear component :math:`{R_D, G_D, B_D}` or :math:`Y_D` or :math:`I_D`, in :math:`cd/m^2`. Notes ----- +------------+-----------------------+---------------+ | **Domain** | **Scale - Reference** | **Scale - 1** | +============+=======================+===============+ | ``E_p`` | [0, 1] | [0, 1] | +------------+-----------------------+---------------+ +------------+-----------------------+---------------+ | **Range** | **Scale - Reference** | **Scale - 1** | +============+=======================+===============+ | ``F_D`` | [0, 1] | [0, 1] | +------------+-----------------------+---------------+ References ---------- :cite:`Borer2017a`, :cite:`InternationalTelecommunicationUnion2017` Examples -------- >>> eotf_HLG_BT2100_1(0.212132034355964) # doctest: +ELLIPSIS 6.4760398... >>> eotf_HLG_BT2100_1(0.212132034355964, 0.01) # doctest: +ELLIPSIS 6.4859750... """ return ootf_HLG_BT2100_1( oetf_inverse_ARIBSTDB67(E_p, constants=constants) / 12, L_B, L_W, gamma) def eotf_HLG_BT2100_2(E_p, L_B=0, L_W=1000, gamma=None, constants=BT2100_HLG_CONSTANTS): """ Defines *Recommendation ITU-R BT.2100* *Reference HLG* electro-optical transfer function (EOTF / EOCF) as given in *ITU-R BT.2100-2* with the modified black level behaviour. The EOTF maps the non-linear *HLG* signal into display light. Parameters ---------- E_p : numeric or array_like :math:`E'` is the non-linear signal :math:`{R', G', B'}` as defined for the *HLG Reference* OETF. L_B : numeric, optional :math:`L_B` is the display luminance for black in :math:`cd/m^2`. L_W : numeric, optional :math:`L_W` is nominal peak luminance of the display in :math:`cd/m^2` for achromatic pixels. gamma : numeric, optional System gamma value, 1.2 at the nominal display peak luminance of :math:`1000 cd/m^2`. constants : Structure, optional *Recommendation ITU-R BT.2100* *Reference HLG* constants. Returns ------- numeric or ndarray Luminance :math:`F_D` of a displayed linear component :math:`{R_D, G_D, B_D}` or :math:`Y_D` or :math:`I_D`, in :math:`cd/m^2`. Notes ----- +------------+-----------------------+---------------+ | **Domain** | **Scale - Reference** | **Scale - 1** | +============+=======================+===============+ | ``E_p`` | [0, 1] | [0, 1] | +------------+-----------------------+---------------+ +------------+-----------------------+---------------+ | **Range** | **Scale - Reference** | **Scale - 1** | +============+=======================+===============+ | ``F_D`` | [0, 1] | [0, 1] | +------------+-----------------------+---------------+ References ---------- :cite:`Borer2017a`, :cite:`InternationalTelecommunicationUnion2018` Examples -------- >>> eotf_HLG_BT2100_2(0.212132034355964) # doctest: +ELLIPSIS 6.4760398... >>> eotf_HLG_BT2100_2(0.212132034355964, 0.01) # doctest: +ELLIPSIS 7.3321975... """ beta = black_level_lift_HLG_BT2100(L_B, L_W, gamma) return ootf_HLG_BT2100_2( oetf_inverse_ARIBSTDB67( (1 - beta) * E_p + beta, constants=constants) / 12, L_W, gamma) BT2100_HLG_EOTF_METHODS = CaseInsensitiveMapping({ 'ITU-R BT.2100-1': eotf_HLG_BT2100_1, 'ITU-R BT.2100-2': eotf_HLG_BT2100_2, }) BT2100_HLG_EOTF_METHODS.__doc__ = """ Supported *Recommendation ITU-R BT.2100* *Reference HLG* electro-optical transfer function (EOTF / EOCF). References ---------- :cite:`Borer2017a`, :cite:`InternationalTelecommunicationUnion2017`, :cite:`InternationalTelecommunicationUnion2018` BT2100_HLG_EOTF_METHODS : CaseInsensitiveMapping **{'ITU-R BT.2100-1', 'ITU-R BT.2100-2'}** """ def eotf_HLG_BT2100(E_p, L_B=0, L_W=1000, gamma=None, constants=BT2100_HLG_CONSTANTS, method='ITU-R BT.2100-2'): """ Defines *Recommendation ITU-R BT.2100* *Reference HLG* electro-optical transfer function (EOTF / EOCF). The EOTF maps the non-linear *HLG* signal into display light. Parameters ---------- E_p : numeric or array_like :math:`E'` denotes a non-linear colour value :math:`{R', G', B'}` or :math:`{L', M', S'}` in *HLG* space. L_B : numeric, optional :math:`L_B` is the display luminance for black in :math:`cd/m^2`. L_W : numeric, optional :math:`L_W` is nominal peak luminance of the display in :math:`cd/m^2` for achromatic pixels. gamma : numeric, optional System gamma value, 1.2 at the nominal display peak luminance of :math:`1000 cd/m^2`. constants : Structure, optional *Recommendation ITU-R BT.2100* *Reference HLG* constants. method : unicode, optional **{'ITU-R BT.2100-1', 'ITU-R BT.2100-2'}**, Computation method. Returns ------- numeric or ndarray Luminance :math:`F_D` of a displayed linear component :math:`{R_D, G_D, B_D}` or :math:`Y_D` or :math:`I_D`, in :math:`cd/m^2`. Notes ----- +------------+-----------------------+---------------+ | **Domain** | **Scale - Reference** | **Scale - 1** | +============+=======================+===============+ | ``E_p`` | [0, 1] | [0, 1] | +------------+-----------------------+---------------+ +------------+-----------------------+---------------+ | **Range** | **Scale - Reference** | **Scale - 1** | +============+=======================+===============+ | ``F_D`` | [0, 1] | [0, 1] | +------------+-----------------------+---------------+ References ---------- :cite:`Borer2017a`, :cite:`InternationalTelecommunicationUnion2017`, :cite:`InternationalTelecommunicationUnion2018` Examples -------- >>> eotf_HLG_BT2100(0.212132034355964) # doctest: +ELLIPSIS 6.4760398... >>> eotf_HLG_BT2100(0.212132034355964, method='ITU-R BT.2100-1') ... # doctest: +ELLIPSIS 6.4760398... >>> eotf_HLG_BT2100(0.212132034355964, 0.01) ... # doctest: +ELLIPSIS 7.3321975... """ return BT2100_HLG_EOTF_METHODS[method](E_p, L_B, L_W, gamma, constants) def eotf_inverse_HLG_BT2100_1(F_D, L_B=0, L_W=1000, gamma=None, constants=BT2100_HLG_CONSTANTS): """ Defines *Recommendation ITU-R BT.2100* *Reference HLG* inverse electro-optical transfer function (EOTF / EOCF) as given in *ITU-R BT.2100-1*. Parameters ---------- F_D : numeric or array_like Luminance :math:`F_D` of a displayed linear component :math:`{R_D, G_D, B_D}` or :math:`Y_D` or :math:`I_D`, in :math:`cd/m^2`. L_B : numeric, optional :math:`L_B` is the display luminance for black in :math:`cd/m^2`. L_W : numeric, optional :math:`L_W` is nominal peak luminance of the display in :math:`cd/m^2` for achromatic pixels. gamma : numeric, optional System gamma value, 1.2 at the nominal display peak luminance of :math:`1000 cd/m^2`. constants : Structure, optional *Recommendation ITU-R BT.2100* *Reference HLG* constants. Returns ------- numeric or ndarray :math:`E'` denotes a non-linear colour value :math:`{R', G', B'}` or :math:`{L', M', S'}` in *HLG* space. Notes ----- +------------+-----------------------+---------------+ | **Domain** | **Scale - Reference** | **Scale - 1** | +============+=======================+===============+ | ``F_D`` | [0, 1] | [0, 1] | +------------+-----------------------+---------------+ +------------+-----------------------+---------------+ | **Range** | **Scale - Reference** | **Scale - 1** | +============+=======================+===============+ | ``E_p`` | [0, 1] | [0, 1] | +------------+-----------------------+---------------+ References ---------- :cite:`Borer2017a`, :cite:`InternationalTelecommunicationUnion2017` Examples -------- >>> eotf_inverse_HLG_BT2100_1(6.476039825649814) # doctest: +ELLIPSIS 0.2121320... >>> eotf_inverse_HLG_BT2100_1(6.485975065251558, 0.01) ... # doctest: +ELLIPSIS 0.2121320... """ return oetf_ARIBSTDB67( ootf_inverse_HLG_BT2100_1(F_D, L_B, L_W, gamma) * 12, constants=constants) def eotf_inverse_HLG_BT2100_2(F_D, L_B=0, L_W=1000, gamma=None, constants=BT2100_HLG_CONSTANTS): """ Defines *Recommendation ITU-R BT.2100* *Reference HLG* inverse electro-optical transfer function (EOTF / EOCF) as given in *ITU-R BT.2100-2* with the modified black level behaviour. Parameters ---------- F_D : numeric or array_like Luminance :math:`F_D` of a displayed linear component :math:`{R_D, G_D, B_D}` or :math:`Y_D` or :math:`I_D`, in :math:`cd/m^2`. L_B : numeric, optional :math:`L_B` is the display luminance for black in :math:`cd/m^2`. L_W : numeric, optional :math:`L_W` is nominal peak luminance of the display in :math:`cd/m^2` for achromatic pixels. gamma : numeric, optional System gamma value, 1.2 at the nominal display peak luminance of :math:`1000 cd/m^2`. constants : Structure, optional *Recommendation ITU-R BT.2100* *Reference HLG* constants. Returns ------- numeric or ndarray :math:`E'` denotes a non-linear colour value :math:`{R', G', B'}` or :math:`{L', M', S'}` in *HLG* space. Notes ----- +------------+-----------------------+---------------+ | **Domain** | **Scale - Reference** | **Scale - 1** | +============+=======================+===============+ | ``F_D`` | [0, 1] | [0, 1] | +------------+-----------------------+---------------+ +------------+-----------------------+---------------+ | **Range** | **Scale - Reference** | **Scale - 1** | +============+=======================+===============+ | ``E_p`` | [0, 1] | [0, 1] | +------------+-----------------------+---------------+ References ---------- :cite:`Borer2017a`, :cite:`InternationalTelecommunicationUnion2018` Examples -------- >>> eotf_inverse_HLG_BT2100_2(6.476039825649814) # doctest: +ELLIPSIS 0.2121320... >>> eotf_inverse_HLG_BT2100_2(7.332197528353875, 0.01) ... # doctest: +ELLIPSIS 0.2121320... """ beta = black_level_lift_HLG_BT2100(L_B, L_W, gamma) return (oetf_ARIBSTDB67( ootf_inverse_HLG_BT2100_2(F_D, L_W, gamma) * 12, constants=constants) - beta) / (1 - beta) BT2100_HLG_EOTF_INVERSE_METHODS = CaseInsensitiveMapping({ 'ITU-R BT.2100-1': eotf_inverse_HLG_BT2100_1, 'ITU-R BT.2100-2': eotf_inverse_HLG_BT2100_2, }) BT2100_HLG_EOTF_INVERSE_METHODS.__doc__ = """ Supported *Recommendation ITU-R BT.2100* *Reference HLG* inverse electro-optical transfer function (EOTF / EOCF). References ---------- :cite:`Borer2017a`, :cite:`InternationalTelecommunicationUnion2017`, :cite:`InternationalTelecommunicationUnion2018` BT2100_HLG_EOTF_INVERSE_METHODS : CaseInsensitiveMapping **{'ITU-R BT.2100-1', 'ITU-R BT.2100-2'}** """ def eotf_inverse_HLG_BT2100(F_D, L_B=0, L_W=1000, gamma=None, constants=BT2100_HLG_CONSTANTS, method='ITU-R BT.2100-2'): """ Defines *Recommendation ITU-R BT.2100* *Reference HLG* inverse electro-optical transfer function (EOTF / EOCF). Parameters ---------- F_D : numeric or array_like Luminance :math:`F_D` of a displayed linear component :math:`{R_D, G_D, B_D}` or :math:`Y_D` or :math:`I_D`, in :math:`cd/m^2`. L_B : numeric, optional :math:`L_B` is the display luminance for black in :math:`cd/m^2`. L_W : numeric, optional :math:`L_W` is nominal peak luminance of the display in :math:`cd/m^2` for achromatic pixels. gamma : numeric, optional System gamma value, 1.2 at the nominal display peak luminance of :math:`1000 cd/m^2`. constants : Structure, optional *Recommendation ITU-R BT.2100* *Reference HLG* constants. method : unicode, optional **{'ITU-R BT.2100-1', 'ITU-R BT.2100-2'}**, Computation method. Returns ------- numeric or ndarray :math:`E'` denotes a non-linear colour value :math:`{R', G', B'}` or :math:`{L', M', S'}` in *HLG* space. Notes ----- +------------+-----------------------+---------------+ | **Domain** | **Scale - Reference** | **Scale - 1** | +============+=======================+===============+ | ``F_D`` | [0, 1] | [0, 1] | +------------+-----------------------+---------------+ +------------+-----------------------+---------------+ | **Range** | **Scale - Reference** | **Scale - 1** | +============+=======================+===============+ | ``E_p`` | [0, 1] | [0, 1] | +------------+-----------------------+---------------+ References ---------- :cite:`Borer2017a`, :cite:`InternationalTelecommunicationUnion2017`, :cite:`InternationalTelecommunicationUnion2018` Examples -------- >>> eotf_inverse_HLG_BT2100(6.476039825649814) # doctest: +ELLIPSIS 0.2121320... >>> eotf_inverse_HLG_BT2100(6.476039825649814, method='ITU-R BT.2100-1') ... # doctest: +ELLIPSIS 0.2121320... >>> eotf_inverse_HLG_BT2100(7.332197528353875, 0.01) # doctest: +ELLIPSIS 0.2121320... """ return BT2100_HLG_EOTF_INVERSE_METHODS[method](F_D, L_B, L_W, gamma, constants) def ootf_HLG_BT2100_1(E, L_B=0, L_W=1000, gamma=None): """ Defines *Recommendation ITU-R BT.2100* *Reference HLG* opto-optical transfer function (OOTF / OOCF) as given in *ITU-R BT.2100-1*. The OOTF maps relative scene linear light to display linear light. Parameters ---------- E : numeric or array_like :math:`E` is the signal for each colour component :math:`{R_S, G_S, B_S}` proportional to scene linear light and scaled by camera exposure. L_B : numeric, optional :math:`L_B` is the display luminance for black in :math:`cd/m^2`. L_W : numeric, optional :math:`L_W` is nominal peak luminance of the display in :math:`cd/m^2` for achromatic pixels. gamma : numeric, optional System gamma value, 1.2 at the nominal display peak luminance of :math:`1000 cd/m^2`. Returns ------- numeric or ndarray :math:`F_D` is the luminance of a displayed linear component :math:`{R_D, G_D, or B_D}`, in :math:`cd/m^2`. Notes ----- +------------+-----------------------+---------------+ | **Domain** | **Scale - Reference** | **Scale - 1** | +============+=======================+===============+ | ``E`` | [0, 1] | [0, 1] | +------------+-----------------------+---------------+ +------------+-----------------------+---------------+ | **Range** | **Scale - Reference** | **Scale - 1** | +============+=======================+===============+ | ``F_D`` | [0, 1] | [0, 1] | +------------+-----------------------+---------------+ References ---------- :cite:`Borer2017a`, :cite:`InternationalTelecommunicationUnion2017` Examples -------- >>> ootf_HLG_BT2100_1(0.1) # doctest: +ELLIPSIS 63.0957344... >>> ootf_HLG_BT2100_1(0.1, 0.01) ... # doctest: +ELLIPSIS 63.1051034... """ E = np.atleast_1d(to_domain_1(E)) if E.shape[-1] != 3: usage_warning( '"Recommendation ITU-R BT.2100" "Reference HLG OOTF" uses ' 'RGB Luminance in computations and expects a vector input, thus ' 'the given input array will be stacked to compose a vector for ' 'internal computations but a single component will be output.') R_S = G_S = B_S = E else: R_S, G_S, B_S = tsplit(E) alpha = L_W - L_B beta = L_B Y_S = np.sum(BT2100_HLG_WEIGHTS * tstack([R_S, G_S, B_S]), axis=-1) if gamma is None: gamma = gamma_function_HLG_BT2100(L_W) R_D = alpha * R_S * np.abs(Y_S) ** (gamma - 1) + beta G_D = alpha * G_S * np.abs(Y_S) ** (gamma - 1) + beta B_D = alpha * B_S * np.abs(Y_S) ** (gamma - 1) + beta if E.shape[-1] != 3: return as_float(from_range_1(R_D)) else: RGB_D = tstack([R_D, G_D, B_D]) return from_range_1(RGB_D) def ootf_HLG_BT2100_2(E, L_W=1000, gamma=None): """ Defines *Recommendation ITU-R BT.2100* *Reference HLG* opto-optical transfer function (OOTF / OOCF) as given in *ITU-R BT.2100-2*. The OOTF maps relative scene linear light to display linear light. Parameters ---------- E : numeric or array_like :math:`E` is the signal for each colour component :math:`{R_S, G_S, B_S}` proportional to scene linear light and scaled by camera exposure. L_W : numeric, optional :math:`L_W` is nominal peak luminance of the display in :math:`cd/m^2` for achromatic pixels. gamma : numeric, optional System gamma value, 1.2 at the nominal display peak luminance of :math:`1000 cd/m^2`. Returns ------- numeric or ndarray :math:`F_D` is the luminance of a displayed linear component :math:`{R_D, G_D, or B_D}`, in :math:`cd/m^2`. Notes ----- +------------+-----------------------+---------------+ | **Domain** | **Scale - Reference** | **Scale - 1** | +============+=======================+===============+ | ``E`` | [0, 1] | [0, 1] | +------------+-----------------------+---------------+ +------------+-----------------------+---------------+ | **Range** | **Scale - Reference** | **Scale - 1** | +============+=======================+===============+ | ``F_D`` | [0, 1] | [0, 1] | +------------+-----------------------+---------------+ References ---------- :cite:`InternationalTelecommunicationUnion2018` Examples -------- >>> ootf_HLG_BT2100_2(0.1) # doctest: +ELLIPSIS 63.0957344... """ E = np.atleast_1d(to_domain_1(E)) if E.shape[-1] != 3: usage_warning( '"Recommendation ITU-R BT.2100" "Reference HLG OOTF" uses ' 'RGB Luminance in computations and expects a vector input, thus ' 'the given input array will be stacked to compose a vector for ' 'internal computations but a single component will be output.') R_S = G_S = B_S = E else: R_S, G_S, B_S = tsplit(E) alpha = L_W Y_S = np.sum(BT2100_HLG_WEIGHTS * tstack([R_S, G_S, B_S]), axis=-1) if gamma is None: gamma = gamma_function_HLG_BT2100(L_W) R_D = alpha * R_S * np.abs(Y_S) ** (gamma - 1) G_D = alpha * G_S * np.abs(Y_S) ** (gamma - 1) B_D = alpha * B_S * np.abs(Y_S) ** (gamma - 1) if E.shape[-1] != 3: return as_float(from_range_1(R_D)) else: RGB_D = tstack([R_D, G_D, B_D]) return from_range_1(RGB_D) BT2100_HLG_OOTF_METHODS = CaseInsensitiveMapping({ 'ITU-R BT.2100-1': ootf_HLG_BT2100_1, 'ITU-R BT.2100-2': ootf_HLG_BT2100_2, }) BT2100_HLG_OOTF_METHODS.__doc__ = """ Supported *Recommendation ITU-R BT.2100* *Reference HLG* opto-optical transfer function (OOTF / OOCF). References ---------- :cite:`Borer2017a`, :cite:`InternationalTelecommunicationUnion2017`, :cite:`InternationalTelecommunicationUnion2018` BT2100_HLG_OOTF_METHODS : CaseInsensitiveMapping **{'ITU-R BT.2100-1', 'ITU-R BT.2100-2'}** """ def ootf_HLG_BT2100(E, L_B=0, L_W=1000, gamma=None, method='ITU-R BT.2100-2'): """ Defines *Recommendation ITU-R BT.2100* *Reference HLG* opto-optical transfer function (OOTF / OOCF). The OOTF maps relative scene linear light to display linear light. Parameters ---------- E : numeric or array_like :math:`E` is the signal for each colour component :math:`{R_S, G_S, B_S}` proportional to scene linear light and scaled by camera exposure. L_B : numeric, optional :math:`L_B` is the display luminance for black in :math:`cd/m^2`. L_W : numeric, optional :math:`L_W` is nominal peak luminance of the display in :math:`cd/m^2` for achromatic pixels. gamma : numeric, optional System gamma value, 1.2 at the nominal display peak luminance of :math:`1000 cd/m^2`. method : unicode, optional **{'ITU-R BT.2100-1', 'ITU-R BT.2100-2'}**, Computation method. Returns ------- numeric or ndarray :math:`F_D` is the luminance of a displayed linear component :math:`{R_D, G_D, or B_D}`, in :math:`cd/m^2`. Notes ----- +------------+-----------------------+---------------+ | **Domain** | **Scale - Reference** | **Scale - 1** | +============+=======================+===============+ | ``E`` | [0, 1] | [0, 1] | +------------+-----------------------+---------------+ +------------+-----------------------+---------------+ | **Range** | **Scale - Reference** | **Scale - 1** | +============+=======================+===============+ | ``F_D`` | [0, 1] | [0, 1] | +------------+-----------------------+---------------+ References ---------- :cite:`Borer2017a`, :cite:`InternationalTelecommunicationUnion2017` Examples -------- >>> ootf_HLG_BT2100(0.1) # doctest: +ELLIPSIS 63.0957344... >>> ootf_HLG_BT2100(0.1, 0.01, method='ITU-R BT.2100-1') ... # doctest: +ELLIPSIS 63.1051034... """ function = BT2100_HLG_OOTF_METHODS[method] return function( E, **filter_kwargs(function, **{ 'L_B': L_B, 'L_W': L_W, 'gamma': gamma })) def ootf_inverse_HLG_BT2100_1(F_D, L_B=0, L_W=1000, gamma=None): """ Defines *Recommendation ITU-R BT.2100* *Reference HLG* inverse opto-optical transfer function (OOTF / OOCF) as given in *ITU-R BT.2100-1*. Parameters ---------- F_D : numeric or array_like :math:`F_D` is the luminance of a displayed linear component :math:`{R_D, G_D, or B_D}`, in :math:`cd/m^2`. L_B : numeric, optional :math:`L_B` is the display luminance for black in :math:`cd/m^2`. L_W : numeric, optional :math:`L_W` is nominal peak luminance of the display in :math:`cd/m^2` for achromatic pixels. gamma : numeric, optional System gamma value, 1.2 at the nominal display peak luminance of :math:`1000 cd/m^2`. Returns ------- numeric or ndarray :math:`E` is the signal for each colour component :math:`{R_S, G_S, B_S}` proportional to scene linear light and scaled by camera exposure. Notes ----- +------------+-----------------------+---------------+ | **Domain** | **Scale - Reference** | **Scale - 1** | +============+=======================+===============+ | ``F_D`` | [0, 1] | [0, 1] | +------------+-----------------------+---------------+ +------------+-----------------------+---------------+ | **Range** | **Scale - Reference** | **Scale - 1** | +============+=======================+===============+ | ``E`` | [0, 1] | [0, 1] | +------------+-----------------------+---------------+ References ---------- :cite:`Borer2017a`, :cite:`InternationalTelecommunicationUnion2017` Examples -------- >>> ootf_inverse_HLG_BT2100_1(63.095734448019336) # doctest: +ELLIPSIS 0.1000000... >>> ootf_inverse_HLG_BT2100_1(63.105103490674857, 0.01) ... # doctest: +ELLIPSIS 0.0999999... """ F_D = np.atleast_1d(to_domain_1(F_D)) if F_D.shape[-1] != 3: usage_warning( '"Recommendation ITU-R BT.2100" "Reference HLG OOTF" uses ' 'RGB Luminance in computations and expects a vector input, thus ' 'the given input array will be stacked to compose a vector for ' 'internal computations but a single component will be output.') R_D = G_D = B_D = F_D else: R_D, G_D, B_D = tsplit(F_D) Y_D = np.sum(BT2100_HLG_WEIGHTS * tstack([R_D, G_D, B_D]), axis=-1) alpha = L_W - L_B beta = L_B if gamma is None: gamma = gamma_function_HLG_BT2100(L_W) R_S = np.where( Y_D == beta, 0.0, (np.abs((Y_D - beta) / alpha) ** ((1 - gamma) / gamma)) * (R_D - beta) / alpha, ) G_S = np.where( Y_D == beta, 0.0, (np.abs((Y_D - beta) / alpha) ** ((1 - gamma) / gamma)) * (G_D - beta) / alpha, ) B_S = np.where( Y_D == beta, 0.0, (np.abs((Y_D - beta) / alpha) ** ((1 - gamma) / gamma)) * (B_D - beta) / alpha, ) if F_D.shape[-1] != 3: return as_float(from_range_1(R_S)) else: RGB_S = tstack([R_S, G_S, B_S]) return from_range_1(RGB_S) def ootf_inverse_HLG_BT2100_2(F_D, L_W=1000, gamma=None): """ Defines *Recommendation ITU-R BT.2100* *Reference HLG* inverse opto-optical transfer function (OOTF / OOCF) as given in *ITU-R BT.2100-2*. Parameters ---------- F_D : numeric or array_like :math:`F_D` is the luminance of a displayed linear component :math:`{R_D, G_D, or B_D}`, in :math:`cd/m^2`. L_W : numeric, optional :math:`L_W` is nominal peak luminance of the display in :math:`cd/m^2` for achromatic pixels. gamma : numeric, optional System gamma value, 1.2 at the nominal display peak luminance of :math:`1000 cd/m^2`. Returns ------- numeric or ndarray :math:`E` is the signal for each colour component :math:`{R_S, G_S, B_S}` proportional to scene linear light and scaled by camera exposure. Notes ----- +------------+-----------------------+---------------+ | **Domain** | **Scale - Reference** | **Scale - 1** | +============+=======================+===============+ | ``F_D`` | [0, 1] | [0, 1] | +------------+-----------------------+---------------+ +------------+-----------------------+---------------+ | **Range** | **Scale - Reference** | **Scale - 1** | +============+=======================+===============+ | ``E`` | [0, 1] | [0, 1] | +------------+-----------------------+---------------+ References ---------- :cite:`InternationalTelecommunicationUnion2018` Examples -------- >>> ootf_inverse_HLG_BT2100_2(63.095734448019336) # doctest: +ELLIPSIS 0.1000000... """ F_D = np.atleast_1d(to_domain_1(F_D)) if F_D.shape[-1] != 3: usage_warning( '"Recommendation ITU-R BT.2100" "Reference HLG OOTF" uses ' 'RGB Luminance in computations and expects a vector input, thus ' 'the given input array will be stacked to compose a vector for ' 'internal computations but a single component will be output.') R_D = G_D = B_D = F_D else: R_D, G_D, B_D = tsplit(F_D) Y_D = np.sum(BT2100_HLG_WEIGHTS * tstack([R_D, G_D, B_D]), axis=-1) alpha = L_W if gamma is None: gamma = gamma_function_HLG_BT2100(L_W) R_S = np.where( Y_D == 0, 0.0, (np.abs(Y_D / alpha) ** ((1 - gamma) / gamma)) * R_D / alpha, ) G_S = np.where( Y_D == 0, 0.0, (np.abs(Y_D / alpha) ** ((1 - gamma) / gamma)) * G_D / alpha, ) B_S = np.where( Y_D == 0, 0.0, (np.abs(Y_D / alpha) ** ((1 - gamma) / gamma)) * B_D / alpha, ) if F_D.shape[-1] != 3: return as_float(from_range_1(R_S)) else: RGB_S = tstack([R_S, G_S, B_S]) return from_range_1(RGB_S) BT2100_HLG_OOTF_INVERSE_METHODS = CaseInsensitiveMapping({ 'ITU-R BT.2100-1': ootf_inverse_HLG_BT2100_1, 'ITU-R BT.2100-2': ootf_inverse_HLG_BT2100_2, }) BT2100_HLG_OOTF_INVERSE_METHODS.__doc__ = """ Supported *Recommendation ITU-R BT.2100* *Reference HLG* inverse opto-optical transfer function (OOTF / OOCF). References ---------- :cite:`Borer2017a`, :cite:`InternationalTelecommunicationUnion2017`, :cite:`InternationalTelecommunicationUnion2018` BT2100_HLG_OOTF_INVERSE_METHODS : CaseInsensitiveMapping **{'ITU-R BT.2100-1', 'ITU-R BT.2100-2'}** """ def ootf_inverse_HLG_BT2100(F_D, L_B=0, L_W=1000, gamma=None, method='ITU-R BT.2100-2'): """ Defines *Recommendation ITU-R BT.2100* *Reference HLG* inverse opto-optical transfer function (OOTF / OOCF). Parameters ---------- F_D : numeric or array_like :math:`F_D` is the luminance of a displayed linear component :math:`{R_D, G_D, or B_D}`, in :math:`cd/m^2`. L_B : numeric, optional :math:`L_B` is the display luminance for black in :math:`cd/m^2`. L_W : numeric, optional :math:`L_W` is nominal peak luminance of the display in :math:`cd/m^2` for achromatic pixels. gamma : numeric, optional System gamma value, 1.2 at the nominal display peak luminance of :math:`1000 cd/m^2`. method : unicode, optional **{'ITU-R BT.2100-1', 'ITU-R BT.2100-2'}**, Computation method. Returns ------- numeric or ndarray :math:`E` is the signal for each colour component :math:`{R_S, G_S, B_S}` proportional to scene linear light and scaled by camera exposure. Notes ----- +------------+-----------------------+---------------+ | **Domain** | **Scale - Reference** | **Scale - 1** | +============+=======================+===============+ | ``F_D`` | [0, 1] | [0, 1] | +------------+-----------------------+---------------+ +------------+-----------------------+---------------+ | **Range** | **Scale - Reference** | **Scale - 1** | +============+=======================+===============+ | ``E`` | [0, 1] | [0, 1] | +------------+-----------------------+---------------+ References ---------- :cite:`Borer2017a`, :cite:`InternationalTelecommunicationUnion2017`, :cite:`InternationalTelecommunicationUnion2018` Examples -------- >>> ootf_inverse_HLG_BT2100(63.095734448019336) # doctest: +ELLIPSIS 0.1000000... >>> ootf_inverse_HLG_BT2100( ... 63.105103490674857, 0.01, method='ITU-R BT.2100-1') ... # doctest: +ELLIPSIS 0.0999999... """ function = BT2100_HLG_OOTF_INVERSE_METHODS[method] return function( F_D, **filter_kwargs(function, **{ 'L_B': L_B, 'L_W': L_W, 'gamma': gamma }))
const router = require("express").Router(); const { Character, Checkpoint, Item, Drop, Raid } = require("../db/models"); const NOUN = "character"; router.get(`/`, async (req, res, next) => { try { res.json( await Character.findAll({ include: [ { model: Drop, include: [Item, Checkpoint] }, { model: Checkpoint, include: [Raid] }, { model: Item, include: [Drop] }, ], order: [["characterName", "asc"], [Drop, "dropName", "ASC"]], }) ); } catch (e) { next(e); } }); router.get(`/:${NOUN}Id`, async (req, res, next) => { try { res.json( await Character.findById(req.params[`${NOUN}Id`], { include: [ { model: Drop, include: [Item, Checkpoint] }, { model: Checkpoint, include: [Raid] }, { model: Item, include: [Drop] }, ], order: [["characterName", "asc"], [Drop, "dropName", "ASC"]], }) ); } catch (e) { next(e); } }); router.post("/", async (req, res, next) => { try { res.json(await Character.create(req.body)); } catch (e) { next(e); } }); router.put(`/:${NOUN}Id`, async (req, res, next) => { try { const [, updatedCharacter] = await Character.update(req.body, { where: { id: req.params[`${NOUN}Id`], }, returning: true, plain: true, }); res.json(updatedCharacter); } catch (e) { next(e); } }); router.delete(`/:${NOUN}Id`, async (req, res, next) => { try { await Character.destroy({ where: { id: req.params.charId } }); const remaining = await Character.findAll(); res.json(remaining); } catch (e) { next(e); } }); module.exports = router;
import shutil import tempfile import pytest @pytest.fixture def temp_dir(): dir_name = tempfile.mkdtemp(suffix='-pytest').rstrip('/') yield dir_name shutil.rmtree(dir_name)
#!/usr/bin/env python import datetime import hashlib import math import operator import optparse import os import re import sys import threading import time import webbrowser from collections import namedtuple, OrderedDict from functools import wraps from getpass import getpass from io import TextIOWrapper # Py2k compat. if sys.version_info[0] == 2: PY2 = True binary_types = (buffer, bytes, bytearray) decode_handler = 'replace' numeric = (int, long, float) unicode_type = unicode from StringIO import StringIO else: PY2 = False binary_types = (bytes, bytearray) decode_handler = 'backslashreplace' numeric = (int, float) unicode_type = str from io import StringIO try: from flask import ( Flask, abort, escape, flash, jsonify, make_response, Markup, redirect, render_template, request, session, url_for) except ImportError: raise RuntimeError('Unable to import flask module. Install by running ' 'pip install flask') try: from pygments import formatters, highlight, lexers except ImportError: import warnings warnings.warn('pygments library not found.', ImportWarning) syntax_highlight = lambda data: '<pre>%s</pre>' % data else: def syntax_highlight(data): if not data: return '' lexer = lexers.get_lexer_by_name('sql') formatter = formatters.HtmlFormatter(linenos=False) return highlight(data, lexer, formatter) try: from peewee import __version__ peewee_version = tuple([int(p) for p in __version__.split('.')]) except ImportError: raise RuntimeError('Unable to import peewee module. Install by running ' 'pip install peewee') else: if peewee_version < (3, 0, 0): raise RuntimeError('Peewee >= 3.0.0 is required. Found version %s. ' 'Please update by running pip install --update ' 'peewee' % __version__) from peewee import * from peewee import IndexMetadata from peewee import sqlite3 from playhouse.dataset import DataSet from playhouse.migrate import migrate CUR_DIR = os.path.realpath(os.path.dirname(__file__)) DEBUG = False MAX_RESULT_SIZE = 1000 ROWS_PER_PAGE = 50 SECRET_KEY = 'sqlite-database-browser-0.1.0' app = Flask( __name__, static_folder=os.path.join(CUR_DIR, 'static'), template_folder=os.path.join(CUR_DIR, 'templates')) app.config.from_object(__name__) dataset = None migrator = None # # Database metadata objects. # TriggerMetadata = namedtuple('TriggerMetadata', ('name', 'sql')) ViewMetadata = namedtuple('ViewMetadata', ('name', 'sql')) # # Database helpers. # class SqliteDataSet(DataSet): @property def filename(self): db_file = dataset._database.database if db_file.startswith('file:'): db_file = db_file[5:] return os.path.realpath(db_file.rsplit('?', 1)[0]) @property def is_readonly(self): db_file = dataset._database.database return db_file.endswith('?mode=ro') @property def base_name(self): return os.path.basename(self.filename) @property def created(self): stat = os.stat(self.filename) return datetime.datetime.fromtimestamp(stat.st_ctime) @property def modified(self): stat = os.stat(self.filename) return datetime.datetime.fromtimestamp(stat.st_mtime) @property def size_on_disk(self): stat = os.stat(self.filename) return stat.st_size def get_indexes(self, table): return dataset._database.get_indexes(table) def get_all_indexes(self): cursor = self.query( 'SELECT name, sql FROM sqlite_master ' 'WHERE type = ? ORDER BY name', ('index',)) return [IndexMetadata(row[0], row[1], None, None, None) for row in cursor.fetchall()] def get_columns(self, table): return dataset._database.get_columns(table) def get_foreign_keys(self, table): return dataset._database.get_foreign_keys(table) def get_triggers(self, table): cursor = self.query( 'SELECT name, sql FROM sqlite_master ' 'WHERE type = ? AND tbl_name = ?', ('trigger', table)) return [TriggerMetadata(*row) for row in cursor.fetchall()] def get_all_triggers(self): cursor = self.query( 'SELECT name, sql FROM sqlite_master ' 'WHERE type = ? ORDER BY name', ('trigger',)) return [TriggerMetadata(*row) for row in cursor.fetchall()] def get_all_views(self): cursor = self.query( 'SELECT name, sql FROM sqlite_master ' 'WHERE type = ? ORDER BY name', ('view',)) return [ViewMetadata(*row) for row in cursor.fetchall()] def get_virtual_tables(self): cursor = self.query( 'SELECT name FROM sqlite_master ' 'WHERE type = ? AND sql LIKE ? ' 'ORDER BY name', ('table', 'CREATE VIRTUAL TABLE%')) return set([row[0] for row in cursor.fetchall()]) def get_corollary_virtual_tables(self): virtual_tables = self.get_virtual_tables() suffixes = ['content', 'docsize', 'segdir', 'segments', 'stat'] return set( '%s_%s' % (virtual_table, suffix) for suffix in suffixes for virtual_table in virtual_tables) # # Flask views. # @app.route('/') def index(): return render_template('index.html', sqlite=sqlite3) @app.route('/login/', methods=['GET', 'POST']) def login(): if request.method == 'POST': if request.form.get('password') == app.config['PASSWORD']: session['authorized'] = True return redirect(session.get('next_url') or url_for('index')) flash('The password you entered is incorrect.', 'danger') return render_template('login.html') @app.route('/logout/', methods=['GET']) def logout(): session.pop('authorized', None) return redirect(url_for('login')) def require_table(fn): @wraps(fn) def inner(table, *args, **kwargs): if table not in dataset.tables: abort(404) return fn(table, *args, **kwargs) return inner @app.route('/create-table/', methods=['POST']) def table_create(): table = (request.form.get('table_name') or '').strip() if not table: flash('Table name is required.', 'danger') return redirect(request.form.get('redirect') or url_for('index')) dataset[table] return redirect(url_for('table_import', table=table)) @app.route('/<table>/') @require_table def table_structure(table): ds_table = dataset[table] model_class = ds_table.model_class table_sql = dataset.query( 'SELECT sql FROM sqlite_master WHERE tbl_name = ? AND type = ?', [table, 'table']).fetchone()[0] return render_template( 'table_structure.html', columns=dataset.get_columns(table), ds_table=ds_table, foreign_keys=dataset.get_foreign_keys(table), indexes=dataset.get_indexes(table), model_class=model_class, table=table, table_sql=table_sql, triggers=dataset.get_triggers(table)) def get_request_data(): if request.method == 'POST': return request.form return request.args @app.route('/<table>/add-column/', methods=['GET', 'POST']) @require_table def add_column(table): column_mapping = OrderedDict(( ('VARCHAR', CharField), ('TEXT', TextField), ('INTEGER', IntegerField), ('REAL', FloatField), ('BOOL', BooleanField), ('BLOB', BlobField), ('DATETIME', DateTimeField), ('DATE', DateField), ('TIME', TimeField), ('DECIMAL', DecimalField))) request_data = get_request_data() col_type = request_data.get('type') name = request_data.get('name', '') if request.method == 'POST': if name and col_type in column_mapping: migrate( migrator.add_column( table, name, column_mapping[col_type](null=True))) flash('Column "%s" was added successfully!' % name, 'success') dataset.update_cache(table) return redirect(url_for('table_structure', table=table)) else: flash('Name and column type are required.', 'danger') return render_template( 'add_column.html', col_type=col_type, column_mapping=column_mapping, name=name, table=table) @app.route('/<table>/drop-column/', methods=['GET', 'POST']) @require_table def drop_column(table): request_data = get_request_data() name = request_data.get('name', '') columns = dataset.get_columns(table) column_names = [column.name for column in columns] if request.method == 'POST': if name in column_names: migrate(migrator.drop_column(table, name)) flash('Column "%s" was dropped successfully!' % name, 'success') dataset.update_cache(table) return redirect(url_for('table_structure', table=table)) else: flash('Name is required.', 'danger') return render_template( 'drop_column.html', columns=columns, column_names=column_names, name=name, table=table) @app.route('/<table>/rename-column/', methods=['GET', 'POST']) @require_table def rename_column(table): request_data = get_request_data() rename = request_data.get('rename', '') rename_to = request_data.get('rename_to', '') columns = dataset.get_columns(table) column_names = [column.name for column in columns] if request.method == 'POST': if (rename in column_names) and (rename_to not in column_names): migrate(migrator.rename_column(table, rename, rename_to)) flash('Column "%s" was renamed successfully!' % rename, 'success') dataset.update_cache(table) return redirect(url_for('table_structure', table=table)) else: flash('Column name is required and cannot conflict with an ' 'existing column\'s name.', 'danger') return render_template( 'rename_column.html', columns=columns, column_names=column_names, rename=rename, rename_to=rename_to, table=table) @app.route('/<table>/add-index/', methods=['GET', 'POST']) @require_table def add_index(table): request_data = get_request_data() indexed_columns = request_data.getlist('indexed_columns') unique = bool(request_data.get('unique')) columns = dataset.get_columns(table) if request.method == 'POST': if indexed_columns: migrate( migrator.add_index( table, indexed_columns, unique)) flash('Index created successfully.', 'success') return redirect(url_for('table_structure', table=table)) else: flash('One or more columns must be selected.', 'danger') return render_template( 'add_index.html', columns=columns, indexed_columns=indexed_columns, table=table, unique=unique) @app.route('/<table>/drop-index/', methods=['GET', 'POST']) @require_table def drop_index(table): request_data = get_request_data() name = request_data.get('name', '') indexes = dataset.get_indexes(table) index_names = [index.name for index in indexes] if request.method == 'POST': if name in index_names: migrate(migrator.drop_index(table, name)) flash('Index "%s" was dropped successfully!' % name, 'success') return redirect(url_for('table_structure', table=table)) else: flash('Index name is required.', 'danger') return render_template( 'drop_index.html', indexes=indexes, index_names=index_names, name=name, table=table) @app.route('/<table>/drop-trigger/', methods=['GET', 'POST']) @require_table def drop_trigger(table): request_data = get_request_data() name = request_data.get('name', '') triggers = dataset.get_triggers(table) trigger_names = [trigger.name for trigger in triggers] if request.method == 'POST': if name in trigger_names: dataset.query('DROP TRIGGER "%s";' % name) flash('Trigger "%s" was dropped successfully!' % name, 'success') return redirect(url_for('table_structure', table=table)) else: flash('Trigger name is required.', 'danger') return render_template( 'drop_trigger.html', triggers=triggers, trigger_names=trigger_names, name=name, table=table) @app.route('/<table>/content/') @require_table def table_content(table): page_number = request.args.get('page') or '' page_number = int(page_number) if page_number.isdigit() else 1 dataset.update_cache(table) ds_table = dataset[table] total_rows = ds_table.all().count() rows_per_page = app.config['ROWS_PER_PAGE'] total_pages = int(math.ceil(total_rows / float(rows_per_page))) # Restrict bounds. page_number = min(page_number, total_pages) page_number = max(page_number, 1) previous_page = page_number - 1 if page_number > 1 else None next_page = page_number + 1 if page_number < total_pages else None query = ds_table.all().paginate(page_number, rows_per_page) ordering = request.args.get('ordering') if ordering: field = ds_table.model_class._meta.columns[ordering.lstrip('-')] if ordering.startswith('-'): field = field.desc() query = query.order_by(field) field_names = ds_table.columns columns = [f.column_name for f in ds_table.model_class._meta.sorted_fields] table_sql = dataset.query( 'SELECT sql FROM sqlite_master WHERE tbl_name = ? AND type = ?', [table, 'table']).fetchone()[0] return render_template( 'table_content.html', columns=columns, ds_table=ds_table, field_names=field_names, next_page=next_page, ordering=ordering, page=page_number, previous_page=previous_page, query=query, table=table, total_pages=total_pages, total_rows=total_rows) @app.route('/<table>/query/', methods=['GET', 'POST']) @require_table def table_query(table): data = [] data_description = error = row_count = sql = None if request.method == 'POST': sql = request.form['sql'] if 'export_json' in request.form: return export(table, sql, 'json') elif 'export_csv' in request.form: return export(table, sql, 'csv') try: cursor = dataset.query(sql) except Exception as exc: error = str(exc) else: data = cursor.fetchall()[:app.config['MAX_RESULT_SIZE']] data_description = cursor.description row_count = cursor.rowcount else: if request.args.get('sql'): sql = request.args.get('sql') else: sql = 'SELECT *\nFROM "%s"' % (table) table_sql = dataset.query( 'SELECT sql FROM sqlite_master WHERE tbl_name = ? AND type = ?', [table, 'table']).fetchone()[0] return render_template( 'table_query.html', data=data, data_description=data_description, error=error, query_images=get_query_images(), row_count=row_count, sql=sql, table=table, table_sql=table_sql) @app.route('/table-definition/', methods=['POST']) def set_table_definition_preference(): key = 'show' show = False if request.form.get(key) and request.form.get(key) != 'false': session[key] = show = True elif key in session: del session[key] return jsonify({key: show}) def export(table, sql, export_format): model_class = dataset[table].model_class query = model_class.raw(sql).dicts() buf = StringIO() if export_format == 'json': kwargs = {'indent': 2} filename = '%s-export.json' % table mimetype = 'text/javascript' else: kwargs = {} filename = '%s-export.csv' % table mimetype = 'text/csv' dataset.freeze(query, export_format, file_obj=buf, **kwargs) response_data = buf.getvalue() response = make_response(response_data) response.headers['Content-Length'] = len(response_data) response.headers['Content-Type'] = mimetype response.headers['Content-Disposition'] = 'attachment; filename=%s' % ( filename) response.headers['Expires'] = 0 response.headers['Pragma'] = 'public' return response @app.route('/<table>/import/', methods=['GET', 'POST']) @require_table def table_import(table): count = None request_data = get_request_data() strict = bool(request_data.get('strict')) if request.method == 'POST': file_obj = request.files.get('file') if not file_obj: flash('Please select an import file.', 'danger') elif not file_obj.filename.lower().endswith(('.csv', '.json')): flash('Unsupported file-type. Must be a .json or .csv file.', 'danger') else: if file_obj.filename.lower().endswith('.json'): format = 'json' else: format = 'csv' # Here we need to translate the file stream. Werkzeug uses a # spooled temporary file opened in wb+ mode, which is not # compatible with Python's CSV module. We'd need to reach pretty # far into Flask's internals to modify this behavior, so instead # we'll just translate the stream into utf8-decoded unicode. if not PY2: try: stream = TextIOWrapper(file_obj, encoding='utf8') except AttributeError: # The SpooledTemporaryFile used by werkzeug does not # implement an API that the TextIOWrapper expects, so we'll # just consume the whole damn thing and decode it. # Fixed in werkzeug 0.15. stream = StringIO(file_obj.read().decode('utf8')) else: stream = file_obj.stream try: with dataset.transaction(): count = dataset.thaw( table, format=format, file_obj=stream, strict=strict) except Exception as exc: flash('Error importing file: %s' % exc, 'danger') else: flash( 'Successfully imported %s objects from %s.' % ( count, file_obj.filename), 'success') return redirect(url_for('table_content', table=table)) return render_template( 'table_import.html', count=count, strict=strict, table=table) @app.route('/<table>/drop/', methods=['GET', 'POST']) @require_table def drop_table(table): if request.method == 'POST': model_class = dataset[table].model_class model_class.drop_table() dataset.update_cache() # Update all tables. flash('Table "%s" dropped successfully.' % table, 'success') return redirect(url_for('index')) return render_template('drop_table.html', table=table) @app.template_filter('format_index') def format_index(index_sql): split_regex = re.compile(r'\bon\b', re.I) if not split_regex.search(index_sql): return index_sql create, definition = split_regex.split(index_sql) return '\nON '.join((create.strip(), definition.strip())) @app.template_filter('value_filter') def value_filter(value, max_length=50): if isinstance(value, numeric): return value if isinstance(value, binary_types): if not isinstance(value, (bytes, bytearray)): value = bytes(value) # Handle `buffer` type. value = value.decode('utf-8', decode_handler) if isinstance(value, unicode_type): value = escape(value) if len(value) > max_length: return ('<span class="truncated">%s</span> ' '<span class="full" style="display:none;">%s</span>' '<a class="toggle-value" href="#">...</a>') % ( value[:max_length], value) return value column_re = re.compile('(.+?)\((.+)\)', re.S) column_split_re = re.compile(r'(?:[^,(]|\([^)]*\))+') def _format_create_table(sql): create_table, column_list = column_re.search(sql).groups() columns = [' %s' % column.strip() for column in column_split_re.findall(column_list) if column.strip()] return '%s (\n%s\n)' % ( create_table, ',\n'.join(columns)) @app.template_filter() def format_create_table(sql): try: return _format_create_table(sql) except: return sql @app.template_filter('highlight') def highlight_filter(data): return Markup(syntax_highlight(data)) def get_query_images(): accum = [] image_dir = os.path.join(app.static_folder, 'img') if not os.path.exists(image_dir): return accum for filename in sorted(os.listdir(image_dir)): basename = os.path.splitext(os.path.basename(filename))[0] parts = basename.split('-') accum.append((parts, 'img/' + filename)) return accum # # Flask application helpers. # @app.context_processor def _general(): return { 'dataset': dataset, 'login_required': bool(app.config.get('PASSWORD')), } @app.context_processor def _now(): return {'now': datetime.datetime.now()} @app.before_request def _connect_db(): dataset.connect() @app.teardown_request def _close_db(exc): if not dataset._database.is_closed(): dataset.close() class PrefixMiddleware(object): def __init__(self, app, prefix): self.app = app self.prefix = '/%s' % prefix.strip('/') self.prefix_len = len(self.prefix) def __call__(self, environ, start_response): if environ['PATH_INFO'].startswith(self.prefix): environ['PATH_INFO'] = environ['PATH_INFO'][self.prefix_len:] environ['SCRIPT_NAME'] = self.prefix return self.app(environ, start_response) else: start_response('404', [('Content-Type', 'text/plain')]) return ['URL does not match application prefix.'.encode()] # # Script options. # def get_option_parser(): parser = optparse.OptionParser() parser.add_option( '-p', '--port', default=8080, help='Port for web interface, default=8080', type='int') parser.add_option( '-H', '--host', default='127.0.0.1', help='Host for web interface, default=127.0.0.1') parser.add_option( '-d', '--debug', action='store_true', help='Run server in debug mode') parser.add_option( '-x', '--no-browser', action='store_false', default=True, dest='browser', help='Do not automatically open browser page.') parser.add_option( '-P', '--password', action='store_true', dest='prompt_password', help='Prompt for password to access database browser.') parser.add_option( '-r', '--read-only', action='store_true', dest='read_only', help='Open database in read-only mode.') parser.add_option( '-u', '--url-prefix', dest='url_prefix', help='URL prefix for application.') ssl_opts = optparse.OptionGroup(parser, 'SSL options') ssl_opts.add_option( '-c', '--ssl-cert', dest='ssl_cert', help='SSL certificate file path.') ssl_opts.add_option( '-k', '--ssl-key', dest='ssl_key', help='SSL private key file path.') ssl_opts.add_option( '-a', '--ad-hoc', action='store_true', dest='ssl_ad_hoc', help='Use ad-hoc SSL context.') parser.add_option_group(ssl_opts) return parser def die(msg, exit_code=1): sys.stderr.write('%s\n' % msg) sys.stderr.flush() sys.exit(exit_code) def open_browser_tab(host, port): url = 'http://%s:%s/' % (host, port) def _open_tab(url): time.sleep(1.5) webbrowser.open_new_tab(url) thread = threading.Thread(target=_open_tab, args=(url,)) thread.daemon = True thread.start() def install_auth_handler(password): app.config['PASSWORD'] = password @app.before_request def check_password(): if not session.get('authorized') and request.path != '/login/' and \ not request.path.startswith(('/static/', '/favicon')): flash('You must log-in to view the database browser.', 'danger') session['next_url'] = request.base_url return redirect(url_for('login')) def initialize_app(filename, read_only=False, password=None, url_prefix=None): global dataset global migrator if password: install_auth_handler(password) if read_only: if sys.version_info < (3, 4, 0): die('Python 3.4.0 or newer is required for read-only access.') if peewee_version < (3, 5, 1): die('Peewee 3.5.1 or newer is required for read-only access.') db = SqliteDatabase('file:%s?mode=ro' % filename, uri=True) try: db.connect() except OperationalError: die('Unable to open database file in read-only mode. Ensure that ' 'the database exists in order to use read-only mode.') db.close() dataset = SqliteDataSet(db, bare_fields=True) else: dataset = SqliteDataSet('sqlite:///%s' % filename, bare_fields=True) if url_prefix: app.wsgi_app = PrefixMiddleware(app.wsgi_app, prefix=url_prefix) migrator = dataset._migrator dataset.close() def main(): # This function exists to act as a console script entry-point. parser = get_option_parser() options, args = parser.parse_args() if not args: die('Error: missing required path to database file.') password = None if options.prompt_password: if os.environ.get('SQLITE_WEB_PASSWORD'): password = os.environ['SQLITE_WEB_PASSWORD'] else: while True: password = getpass('Enter password: ') password_confirm = getpass('Confirm password: ') if password != password_confirm: print('Passwords did not match!') else: break # Initialize the dataset instance and (optionally) authentication handler. initialize_app(args[0], options.read_only, password, options.url_prefix) if options.browser: open_browser_tab(options.host, options.port) if password: key = b'sqlite-web-' + args[0].encode('utf8') + password.encode('utf8') app.secret_key = hashlib.sha256(key).hexdigest() # Set up SSL context, if specified. kwargs = {} if options.ssl_ad_hoc: kwargs['ssl_context'] = 'adhoc' if options.ssl_cert and options.ssl_key: if not os.path.exists(options.ssl_cert) or not os.path.exists(options.ssl_key): die('ssl cert or ssl key not found. Please check the file-paths.') kwargs['ssl_context'] = (options.ssl_cert, options.ssl_key) elif options.ssl_cert: die('ssl key "-k" is required alongside the ssl cert') elif options.ssl_key: die('ssl cert "-c" is required alongside the ssl key') # Run WSGI application. app.run(host=options.host, port=options.port, debug=options.debug, **kwargs) if __name__ == '__main__': main()
/* BLIS An object-based framework for developing high-performance BLAS-like libraries. Copyright (C) 2014, The University of Texas at Austin Copyright (C) 2018 - 2019, Advanced Micro Devices, Inc. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - Neither the name(s) of the copyright holder(s) nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "blis.h" #include "test_libblis.h" // Static variables. static char* op_str = "gemm"; static char* o_types = "mmm"; // a b c static char* p_types = "hh"; // transa transb static thresh_t thresh[BLIS_NUM_FP_TYPES] = { { 1e-04, 1e-05 }, // warn, pass for s { 1e-04, 1e-05 }, // warn, pass for c { 1e-13, 1e-14 }, // warn, pass for d { 1e-13, 1e-14 } }; // warn, pass for z // Local prototypes. void libblis_test_gemm_deps ( thread_data_t* tdata, test_params_t* params, test_op_t* op ); void libblis_test_gemm_experiment ( test_params_t* params, test_op_t* op, iface_t iface, char* dc_str, char* pc_str, char* sc_str, unsigned int p_cur, double* perf, double* resid ); void libblis_test_gemm_md ( test_params_t* params, test_op_t* op, iface_t iface, char* dc_str, char* pc_str, char* sc_str, unsigned int p_cur, double* perf, double* resid ); void libblis_test_gemm_impl ( iface_t iface, obj_t* alpha, obj_t* a, obj_t* b, obj_t* beta, obj_t* c ); void libblis_test_gemm_check ( test_params_t* params, obj_t* alpha, obj_t* a, obj_t* b, obj_t* beta, obj_t* c, obj_t* c_orig, double* resid ); void libblis_test_gemm_md_check ( test_params_t* params, obj_t* alpha, obj_t* a, obj_t* b, obj_t* beta, obj_t* c, obj_t* c_orig, double* resid ); double libblis_test_gemm_flops ( obj_t* a, obj_t* b, obj_t* c ); void libblis_test_gemm_deps ( thread_data_t* tdata, test_params_t* params, test_op_t* op ) { libblis_test_randv( tdata, params, &(op->ops->randv) ); libblis_test_randm( tdata, params, &(op->ops->randm) ); libblis_test_setv( tdata, params, &(op->ops->setv) ); libblis_test_normfv( tdata, params, &(op->ops->normfv) ); libblis_test_subv( tdata, params, &(op->ops->subv) ); libblis_test_scalv( tdata, params, &(op->ops->scalv) ); libblis_test_copym( tdata, params, &(op->ops->copym) ); libblis_test_scalm( tdata, params, &(op->ops->scalm) ); libblis_test_gemv( tdata, params, &(op->ops->gemv) ); } void libblis_test_gemm ( thread_data_t* tdata, test_params_t* params, test_op_t* op ) { // Return early if this test has already been done. if ( libblis_test_op_is_done( op ) ) return; // Return early if operation is disabled. if ( libblis_test_op_is_disabled( op ) || libblis_test_l3_is_disabled( op ) ) return; // Call dependencies first. if ( TRUE ) libblis_test_gemm_deps( tdata, params, op ); // Execute the test driver for each implementation requested. //if ( op->front_seq == ENABLE ) { libblis_test_op_driver( tdata, params, op, BLIS_TEST_SEQ_FRONT_END, op_str, p_types, o_types, thresh, libblis_test_gemm_experiment ); } } void libblis_test_gemm_experiment ( test_params_t* params, test_op_t* op, iface_t iface, char* dc_str, char* pc_str, char* sc_str, unsigned int p_cur, double* perf, double* resid ) { unsigned int n_repeats = params->n_repeats; unsigned int i; double time_min = DBL_MAX; double time; num_t datatype; dim_t m, n, k; trans_t transa; trans_t transb; obj_t alpha, a, b, beta, c; obj_t c_save; // Use a different function to handle mixed datatypes. if ( params->mixed_domain || params->mixed_precision ) { libblis_test_gemm_md( params, op, iface, dc_str, pc_str, sc_str, p_cur, perf, resid ); return; } // Use the datatype of the first char in the datatype combination string. bli_param_map_char_to_blis_dt( dc_str[0], &datatype ); // Map the dimension specifier to actual dimensions. m = libblis_test_get_dim_from_prob_size( op->dim_spec[0], p_cur ); n = libblis_test_get_dim_from_prob_size( op->dim_spec[1], p_cur ); k = libblis_test_get_dim_from_prob_size( op->dim_spec[2], p_cur ); // Map parameter characters to BLIS constants. bli_param_map_char_to_blis_trans( pc_str[0], &transa ); bli_param_map_char_to_blis_trans( pc_str[1], &transb ); // Create test scalars. bli_obj_scalar_init_detached( datatype, &alpha ); bli_obj_scalar_init_detached( datatype, &beta ); // Create test operands (vectors and/or matrices). libblis_test_mobj_create( params, datatype, transa, sc_str[1], m, k, &a ); libblis_test_mobj_create( params, datatype, transb, sc_str[2], k, n, &b ); libblis_test_mobj_create( params, datatype, BLIS_NO_TRANSPOSE, sc_str[0], m, n, &c ); libblis_test_mobj_create( params, datatype, BLIS_NO_TRANSPOSE, sc_str[0], m, n, &c_save ); // Set alpha and beta. if ( bli_obj_is_real( &c ) ) { bli_setsc( 1.2, 0.0, &alpha ); bli_setsc( 0.9, 0.0, &beta ); } else { bli_setsc( 1.2, 0.8, &alpha ); bli_setsc( 0.9, 1.0, &beta ); } #if 0 //bli_setm( &BLIS_ONE, &a ); bli_setsc( 1.0, 0.0, &alpha ); bli_setsc( 1.0, 0.0, &beta ); #endif // Randomize A, B, and C, and save C. libblis_test_mobj_randomize( params, TRUE, &a ); libblis_test_mobj_randomize( params, TRUE, &b ); libblis_test_mobj_randomize( params, TRUE, &c ); bli_copym( &c, &c_save ); // Apply the parameters. bli_obj_set_conjtrans( transa, &a ); bli_obj_set_conjtrans( transb, &b ); // Repeat the experiment n_repeats times and record results. for ( i = 0; i < n_repeats; ++i ) { bli_copym( &c_save, &c ); time = bli_clock(); libblis_test_gemm_impl( iface, &alpha, &a, &b, &beta, &c ); time_min = bli_clock_min_diff( time_min, time ); } // Estimate the performance of the best experiment repeat. *perf = ( 2.0 * m * n * k ) / time_min / FLOPS_PER_UNIT_PERF; if ( bli_obj_is_complex( &c ) ) *perf *= 4.0; // Perform checks. libblis_test_gemm_check( params, &alpha, &a, &b, &beta, &c, &c_save, resid ); // Zero out performance and residual if output matrix is empty. libblis_test_check_empty_problem( &c, perf, resid ); // Free the test objects. bli_obj_free( &a ); bli_obj_free( &b ); bli_obj_free( &c ); bli_obj_free( &c_save ); } void libblis_test_gemm_md ( test_params_t* params, test_op_t* op, iface_t iface, char* dc_str, char* pc_str, char* sc_str, unsigned int p_cur, double* perf, double* resid ) { unsigned int n_repeats = params->n_repeats; unsigned int i; double time_min = DBL_MAX; double time; num_t dt_a, dt_b, dt_c; num_t dt_complex; dim_t m, n, k; trans_t transa; trans_t transb; obj_t alpha, a, b, beta, c; obj_t c_save; // Decode the datatype combination string. bli_param_map_char_to_blis_dt( dc_str[0], &dt_c ); bli_param_map_char_to_blis_dt( dc_str[1], &dt_a ); bli_param_map_char_to_blis_dt( dc_str[2], &dt_b ); // Project one of the datatypes (it doesn't matter which) to the // complex domain. dt_complex = bli_dt_proj_to_complex( dt_c ); // Map the dimension specifier to actual dimensions. m = libblis_test_get_dim_from_prob_size( op->dim_spec[0], p_cur ); n = libblis_test_get_dim_from_prob_size( op->dim_spec[1], p_cur ); k = libblis_test_get_dim_from_prob_size( op->dim_spec[2], p_cur ); // Map parameter characters to BLIS constants. bli_param_map_char_to_blis_trans( pc_str[0], &transa ); bli_param_map_char_to_blis_trans( pc_str[1], &transb ); // Create test scalars. bli_obj_scalar_init_detached( dt_complex, &alpha ); bli_obj_scalar_init_detached( dt_complex, &beta ); // Create test operands (vectors and/or matrices). libblis_test_mobj_create( params, dt_a, transa, sc_str[1], m, k, &a ); libblis_test_mobj_create( params, dt_b, transb, sc_str[2], k, n, &b ); libblis_test_mobj_create( params, dt_c, BLIS_NO_TRANSPOSE, sc_str[0], m, n, &c ); libblis_test_mobj_create( params, dt_c, BLIS_NO_TRANSPOSE, sc_str[0], m, n, &c_save ); // For mixed-precision, set the computation precision of C. if ( params->mixed_precision ) { num_t dt_comp; prec_t comp_prec; // The computation precision is encoded in the computation datatype, // which appears as an additional char in dc_str. bli_param_map_char_to_blis_dt( dc_str[3], &dt_comp ); // Extract the precision from the computation datatype. comp_prec = bli_dt_prec( dt_comp ); // Set the computation precision of C. bli_obj_set_comp_prec( comp_prec, &c ); } // Set alpha and beta. { bli_setsc( 2.0, 0.0, &alpha ); bli_setsc( 1.2, 0.5, &beta ); //bli_setsc( 1.0, 0.0, &alpha ); //bli_setsc( 1.0, 0.0, &beta ); } // Randomize A, B, and C, and save C. libblis_test_mobj_randomize( params, TRUE, &a ); libblis_test_mobj_randomize( params, TRUE, &b ); libblis_test_mobj_randomize( params, TRUE, &c ); bli_copym( &c, &c_save ); // Apply the parameters. bli_obj_set_conjtrans( transa, &a ); bli_obj_set_conjtrans( transb, &b ); // Repeat the experiment n_repeats times and record results. for ( i = 0; i < n_repeats; ++i ) { bli_copym( &c_save, &c ); time = bli_clock(); libblis_test_gemm_impl( iface, &alpha, &a, &b, &beta, &c ); time_min = bli_clock_min_diff( time_min, time ); } // Estimate the performance of the best experiment repeat. //*perf = ( 2.0 * m * n * k ) / time_min / FLOPS_PER_UNIT_PERF; //if ( bli_obj_is_complex( &c ) ) *perf *= 4.0; *perf = libblis_test_gemm_flops( &a, &b, &c ) / time_min / FLOPS_PER_UNIT_PERF; // Perform checks. libblis_test_gemm_md_check( params, &alpha, &a, &b, &beta, &c, &c_save, resid ); // Zero out performance and residual if output matrix is empty. libblis_test_check_empty_problem( &c, perf, resid ); // Free the test objects. bli_obj_free( &a ); bli_obj_free( &b ); bli_obj_free( &c ); bli_obj_free( &c_save ); } void libblis_test_gemm_impl ( iface_t iface, obj_t* alpha, obj_t* a, obj_t* b, obj_t* beta, obj_t* c ) { switch ( iface ) { case BLIS_TEST_SEQ_FRONT_END: #if 0 //bli_printm( "alpha", alpha, "%5.2f", "" ); //bli_printm( "beta", beta, "%5.2f", "" ); if ( bli_obj_dt( c ) == BLIS_DCOMPLEX ) { bli_printm( "a", a, "%5.2f", "" ); bli_printm( "b", b, "%5.2f", "" ); bli_printm( "c", c, "%5.2f", "" ); } #endif //if ( bli_obj_length( b ) == 16 && // bli_obj_stor3_from_strides( c, a, b ) == BLIS_CRR ) //bli_printm( "c before", c, "%6.3f", "" ); bli_gemm( alpha, a, b, beta, c ); //bls_gemm( alpha, a, b, beta, c ); #if 0 if ( bli_obj_dt( c ) == BLIS_DCOMPLEX ) bli_printm( "c after", c, "%6.3f", "" ); #endif //bli_printm( "c after", c, "%5.2f", "" ); break; default: libblis_test_printf_error( "Invalid interface type.\n" ); } } void libblis_test_gemm_md_check ( test_params_t* params, obj_t* alpha, obj_t* a, obj_t* b, obj_t* beta, obj_t* c, obj_t* c_orig, double* resid ) { num_t dt_real = bli_obj_dt_proj_to_real( c ); num_t dt_comp = bli_obj_dt_proj_to_complex( c ); num_t dt; dim_t m = bli_obj_length( c ); dim_t n = bli_obj_width( c ); dim_t k = bli_obj_width_after_trans( a ); obj_t norm; obj_t t, v, w, z; double junk; // Compute our reference checksum in the real domain if all operands // are real, and in the complex domain otherwise. Also implicit in this // is that we use the storage precision of C to determine the precision // in which we perform the reference checksum. if ( bli_obj_is_real( a ) && bli_obj_is_real( b ) && bli_obj_is_real( c ) ) dt = dt_real; else dt = dt_comp; // This function works in a manner similar to that of the function // libblis_test_gemm_check(), except that we project a, b, and c into // the complex domain (regardless of their storage datatype), and then // proceed with the checking accordingly. obj_t a2, b2, c2, c0; bli_obj_scalar_init_detached( dt_real, &norm ); bli_obj_create( dt, n, 1, 0, 0, &t ); bli_obj_create( dt, m, 1, 0, 0, &v ); bli_obj_create( dt, k, 1, 0, 0, &w ); bli_obj_create( dt, m, 1, 0, 0, &z ); libblis_test_vobj_randomize( params, TRUE, &t ); // We need to zero out the imaginary part of t in order for our // checks to work in all cases. Otherwise, the imaginary parts // could affect intermediate products, depending on the order that // they are executed. bli_setiv( &BLIS_ZERO, &t ); // Create complex equivalents of a, b, c_orig, and c. bli_obj_create( dt, m, k, 0, 0, &a2 ); bli_obj_create( dt, k, n, 0, 0, &b2 ); bli_obj_create( dt, m, n, 0, 0, &c2 ); bli_obj_create( dt, m, n, 0, 0, &c0 ); // Cast a, b, c_orig, and c into the datatype of our temporary objects. bli_castm( a, &a2 ); bli_castm( b, &b2 ); bli_castm( c_orig, &c2 ); bli_castm( c, &c0 ); bli_gemv( &BLIS_ONE, &c0, &t, &BLIS_ZERO, &v ); #if 0 if ( bli_obj_is_scomplex( c ) && bli_obj_is_float( a ) && bli_obj_is_float( b ) ) { bli_printm( "test_gemm.c: a", a, "%7.3f", "" ); bli_printm( "test_gemm.c: b", b, "%7.3f", "" ); bli_printm( "test_gemm.c: c orig", c_orig, "%7.3f", "" ); bli_printm( "test_gemm.c: c computed", c, "%7.3f", "" ); } #endif #if 0 bli_gemm( alpha, &a2, &b2, beta, &c2 ); bli_gemv( &BLIS_ONE, &c2, &t, &BLIS_ZERO, &z ); if ( bli_obj_is_real( c ) ) bli_setiv( &BLIS_ZERO, &z ); #else bli_gemv( &BLIS_ONE, &b2, &t, &BLIS_ZERO, &w ); bli_gemv( alpha, &a2, &w, &BLIS_ZERO, &z ); bli_gemv( beta, &c2, &t, &BLIS_ONE, &z ); if ( bli_obj_is_real( c ) ) bli_setiv( &BLIS_ZERO, &z ); #endif bli_subv( &z, &v ); bli_normfv( &v, &norm ); bli_getsc( &norm, resid, &junk ); bli_obj_free( &t ); bli_obj_free( &v ); bli_obj_free( &w ); bli_obj_free( &z ); bli_obj_free( &a2 ); bli_obj_free( &b2 ); bli_obj_free( &c2 ); bli_obj_free( &c0 ); } void libblis_test_gemm_check ( test_params_t* params, obj_t* alpha, obj_t* a, obj_t* b, obj_t* beta, obj_t* c, obj_t* c_orig, double* resid ) { num_t dt = bli_obj_dt( c ); num_t dt_real = bli_obj_dt_proj_to_real( c ); dim_t m = bli_obj_length( c ); dim_t n = bli_obj_width( c ); dim_t k = bli_obj_width_after_trans( a ); obj_t norm; obj_t t, v, w, z; double junk; // // Pre-conditions: // - a is randomized. // - b is randomized. // - c_orig is randomized. // Note: // - alpha and beta should have non-zero imaginary components in the // complex cases in order to more fully exercise the implementation. // // Under these conditions, we assume that the implementation for // // C := beta * C_orig + alpha * transa(A) * transb(B) // // is functioning correctly if // // normfv( v - z ) // // is negligible, where // // v = C * t // z = ( beta * C_orig + alpha * transa(A) * transb(B) ) * t // = beta * C_orig * t + alpha * transa(A) * transb(B) * t // = beta * C_orig * t + alpha * transa(A) * w // = beta * C_orig * t + z // bli_obj_scalar_init_detached( dt_real, &norm ); bli_obj_create( dt, n, 1, 0, 0, &t ); bli_obj_create( dt, m, 1, 0, 0, &v ); bli_obj_create( dt, k, 1, 0, 0, &w ); bli_obj_create( dt, m, 1, 0, 0, &z ); libblis_test_vobj_randomize( params, TRUE, &t ); bli_gemv( &BLIS_ONE, c, &t, &BLIS_ZERO, &v ); bli_gemv( &BLIS_ONE, b, &t, &BLIS_ZERO, &w ); bli_gemv( alpha, a, &w, &BLIS_ZERO, &z ); bli_gemv( beta, c_orig, &t, &BLIS_ONE, &z ); bli_subv( &z, &v ); bli_normfv( &v, &norm ); bli_getsc( &norm, resid, &junk ); bli_obj_free( &t ); bli_obj_free( &v ); bli_obj_free( &w ); bli_obj_free( &z ); } double libblis_test_gemm_flops ( obj_t* a, obj_t* b, obj_t* c ) { bool a_is_real = bli_obj_is_real( a ); bool a_is_complex = bli_obj_is_complex( a ); bool b_is_real = bli_obj_is_real( b ); bool b_is_complex = bli_obj_is_complex( b ); bool c_is_real = bli_obj_is_real( c ); bool c_is_complex = bli_obj_is_complex( c ); double m = ( double )bli_obj_length( c ); double n = ( double )bli_obj_width( c ); double k = ( double )bli_obj_width( a ); double flops; if ( ( c_is_complex && a_is_complex && b_is_complex ) ) { flops = 8.0 * m * n * k; } else if ( ( c_is_complex && a_is_complex && b_is_real ) || ( c_is_complex && a_is_real && b_is_complex ) || ( c_is_real && a_is_complex && b_is_complex ) ) { flops = 4.0 * m * n * k; } else { flops = 2.0 * m * n * k; } return flops; }
import os import shutil from conans import ConanFile, CMake, tools class CmsisConan(ConanFile): name = "CMSIS-DSP" version = "1.9.0" # DSP package version git_sha = "13b9f72f212688d2306d0d085d87cbb4bf9e5d3f" license = "Apache-2.0" author = "Torfinn Berset <torfinn@bloomlife.com>" url = "https://github.com/torfinnberset/CMSIS_5" homepage = "http://www.keil.com/pack/doc/CMSIS/DSP/html/index.html" description = "A suite of common signal processing functions for use on Cortex-M processor based devices" exports = ["CMakeLists.txt", "arm_bitreversal.c", "arm-none-eabi.cmake"] topics = ("arm", "dsp", "cmsis") settings = "os", "compiler", "build_type", "arch" options = {"shared": [True, False]} default_options = "shared=False" generators = "cmake" def source(self): tools.get("{0}/archive/{1}.tar.gz".format(self.url, self.git_sha)) os.rename("CMSIS_5-{}".format(self.git_sha), "CMSIS_5") def build(self): for f in self.exports: src = "{}/{}".format(os.path.dirname(os.path.abspath(__file__)), f) shutil.copy(src, "{}/{}".format(os.curdir, f)) cmake = CMake(self) cmake.configure(defs={ "ARCHITECTURE": self.settings.arch, # Prevents ARM startup code from being generated "__PROGRAM_START": None }) cmake.build() def package(self): self.copy(pattern="*.h", dst="include/", src='CMSIS_5/CMSIS/DSP/Include') self.copy(pattern="*.h", dst="include/", src='CMSIS_5/CMSIS/Core/Include') self.copy("*CMSIS-DSP.dll", dst="bin", keep_path=False) self.copy("*CMSIS-DSP.so", dst="lib", keep_path=False) self.copy("*CMSIS-DSP.dylib", dst="lib", keep_path=False) self.copy("*CMSIS-DSP.a", dst="lib", keep_path=False) def package_info(self): self.cpp_info.libs = ["CMSIS-DSP"] # Prevents ARM startup code from being generated self.cpp_info.defines = ['__PROGRAM_START']
'use strict'; module.exports = function parseJSON(res, fn){ res.text = ''; res.setEncoding('utf8'); res.on('data', chunk => { res.text += chunk; }); res.on('end', () => { try { var body = res.text && JSON.parse(res.text); } catch (e) { var err = e; // issue #675: return the raw response if the response parsing fails err.rawResponse = res.text || null; // issue #876: return the http status code if the response parsing fails err.statusCode = res.statusCode; } finally { fn(err, body); } }); };
'use strict' /* * Create a `get` function that takes a key and return the corresponding value * in the sourceObject * * @notions Functions, Data-Structures, Get */ // Provided code : const sourceObject = { num: 42, bool: true, str: 'some text', log: console.log, } // Your code : function get(whatever){ return sourceObject[whatever]; } //* Begin of tests const assert = require('assert') assert.strictEqual(typeof get, 'function') assert.strictEqual(get('num'), 42) assert.strictEqual(get('bool'), true) assert.strictEqual(get('str'), 'some text') assert.strictEqual(get('log'), console.log) assert.strictEqual(get('noexist'), undefined) // End of tests */
from __future__ import annotations import logging import time from dataclasses import replace from secrets import token_bytes from typing import Any, Dict, List, Optional, Set from blspy import AugSchemeMPL, G2Element from chia.consensus.cost_calculator import calculate_cost_of_program, NPCResult from chia.full_node.bundle_tools import simple_solution_generator from chia.full_node.mempool_check_conditions import get_name_puzzle_conditions from chia.protocols.wallet_protocol import PuzzleSolutionResponse from chia.types.blockchain_format.coin import Coin from chia.types.blockchain_format.program import Program from chia.types.blockchain_format.sized_bytes import bytes32 from chia.types.coin_solution import CoinSolution from chia.types.generator_types import BlockGenerator from chia.types.spend_bundle import SpendBundle from chia.util.byte_types import hexstr_to_bytes from chia.util.condition_tools import conditions_dict_for_solution, pkm_pairs_for_conditions_dict from chia.util.ints import uint8, uint32, uint64, uint128 from chia.util.json_util import dict_to_json_str from chia.wallet.block_record import HeaderBlockRecord from chia.wallet.cc_wallet.cc_info import CCInfo from chia.wallet.cc_wallet.cc_utils import ( CC_MOD, SpendableCC, cc_puzzle_for_inner_puzzle, cc_puzzle_hash_for_inner_puzzle_hash, get_lineage_proof_from_coin_and_puz, spend_bundle_for_spendable_ccs, uncurry_cc, ) from chia.wallet.derivation_record import DerivationRecord from chia.wallet.puzzles.genesis_by_coin_id_with_0 import ( create_genesis_or_zero_coin_checker, genesis_coin_id_for_genesis_coin_checker, lineage_proof_for_genesis, ) from chia.wallet.puzzles.p2_delegated_puzzle_or_hidden_puzzle import ( DEFAULT_HIDDEN_PUZZLE_HASH, calculate_synthetic_secret_key, ) from chia.wallet.transaction_record import TransactionRecord from chia.wallet.util.transaction_type import TransactionType from chia.wallet.util.wallet_types import WalletType from chia.wallet.wallet import Wallet from chia.wallet.wallet_coin_record import WalletCoinRecord from chia.wallet.wallet_info import WalletInfo class CCWallet: wallet_state_manager: Any log: logging.Logger wallet_info: WalletInfo cc_coin_record: WalletCoinRecord cc_info: CCInfo standard_wallet: Wallet base_puzzle_program: Optional[bytes] base_inner_puzzle_hash: Optional[bytes32] cost_of_single_tx: Optional[int] @staticmethod async def create_new_cc( wallet_state_manager: Any, wallet: Wallet, amount: uint64, ): self = CCWallet() self.cost_of_single_tx = None self.base_puzzle_program = None self.base_inner_puzzle_hash = None self.standard_wallet = wallet self.log = logging.getLogger(__name__) self.wallet_state_manager = wallet_state_manager self.cc_info = CCInfo(None, []) info_as_string = bytes(self.cc_info).hex() self.wallet_info = await wallet_state_manager.user_store.create_wallet( "CC Wallet", WalletType.COLOURED_COIN, info_as_string ) if self.wallet_info is None: raise ValueError("Internal Error") try: spend_bundle = await self.generate_new_coloured_coin(amount) except Exception: await wallet_state_manager.user_store.delete_wallet(self.id()) raise await self.wallet_state_manager.add_new_wallet(self, self.id()) # Change and actual coloured coin non_ephemeral_spends: List[Coin] = spend_bundle.not_ephemeral_additions() cc_coin = None puzzle_store = self.wallet_state_manager.puzzle_store for c in non_ephemeral_spends: info = await puzzle_store.wallet_info_for_puzzle_hash(c.puzzle_hash) if info is None: raise ValueError("Internal Error") id, wallet_type = info if id == self.id(): cc_coin = c if cc_coin is None: raise ValueError("Internal Error, unable to generate new coloured coin") regular_record = TransactionRecord( confirmed_at_height=uint32(0), created_at_time=uint64(int(time.time())), to_puzzle_hash=cc_coin.puzzle_hash, amount=uint64(cc_coin.amount), fee_amount=uint64(0), confirmed=False, sent=uint32(0), spend_bundle=spend_bundle, additions=spend_bundle.additions(), removals=spend_bundle.removals(), wallet_id=self.wallet_state_manager.main_wallet.id(), sent_to=[], trade_id=None, type=uint32(TransactionType.OUTGOING_TX.value), name=token_bytes(), ) cc_record = TransactionRecord( confirmed_at_height=uint32(0), created_at_time=uint64(int(time.time())), to_puzzle_hash=cc_coin.puzzle_hash, amount=uint64(cc_coin.amount), fee_amount=uint64(0), confirmed=False, sent=uint32(10), spend_bundle=None, additions=spend_bundle.additions(), removals=spend_bundle.removals(), wallet_id=self.id(), sent_to=[], trade_id=None, type=uint32(TransactionType.INCOMING_TX.value), name=token_bytes(), ) await self.standard_wallet.push_transaction(regular_record) await self.standard_wallet.push_transaction(cc_record) return self @staticmethod async def create_wallet_for_cc( wallet_state_manager: Any, wallet: Wallet, genesis_checker_hex: str, ) -> CCWallet: self = CCWallet() self.cost_of_single_tx = None self.base_puzzle_program = None self.base_inner_puzzle_hash = None self.standard_wallet = wallet self.log = logging.getLogger(__name__) self.wallet_state_manager = wallet_state_manager self.cc_info = CCInfo(Program.from_bytes(bytes.fromhex(genesis_checker_hex)), []) info_as_string = bytes(self.cc_info).hex() self.wallet_info = await wallet_state_manager.user_store.create_wallet( "CC Wallet", WalletType.COLOURED_COIN, info_as_string ) if self.wallet_info is None: raise Exception("wallet_info is None") await self.wallet_state_manager.add_new_wallet(self, self.id()) return self @staticmethod async def create( wallet_state_manager: Any, wallet: Wallet, wallet_info: WalletInfo, ) -> CCWallet: self = CCWallet() self.log = logging.getLogger(__name__) self.cost_of_single_tx = None self.wallet_state_manager = wallet_state_manager self.wallet_info = wallet_info self.standard_wallet = wallet self.cc_info = CCInfo.from_bytes(hexstr_to_bytes(self.wallet_info.data)) self.base_puzzle_program = None self.base_inner_puzzle_hash = None return self @classmethod def type(cls) -> uint8: return uint8(WalletType.COLOURED_COIN) def id(self) -> uint32: return self.wallet_info.id async def get_confirmed_balance(self, record_list: Optional[Set[WalletCoinRecord]] = None) -> uint64: if record_list is None: record_list = await self.wallet_state_manager.coin_store.get_unspent_coins_for_wallet(self.id()) amount: uint64 = uint64(0) for record in record_list: lineage = await self.get_lineage_proof_for_coin(record.coin) if lineage is not None: amount = uint64(amount + record.coin.amount) self.log.info(f"Confirmed balance for cc wallet {self.id()} is {amount}") return uint64(amount) async def get_unconfirmed_balance(self, unspent_records=None) -> uint128: confirmed = await self.get_confirmed_balance(unspent_records) unconfirmed_tx: List[TransactionRecord] = await self.wallet_state_manager.tx_store.get_unconfirmed_for_wallet( self.id() ) addition_amount = 0 removal_amount = 0 for record in unconfirmed_tx: if record.type is TransactionType.INCOMING_TX: addition_amount += record.amount else: removal_amount += record.amount result = confirmed - removal_amount + addition_amount self.log.info(f"Unconfirmed balance for cc wallet {self.id()} is {result}") return uint128(result) async def get_max_send_amount(self, records=None): spendable: List[WalletCoinRecord] = list( await self.wallet_state_manager.get_spendable_coins_for_wallet(self.id(), records) ) if len(spendable) == 0: return 0 spendable.sort(reverse=True, key=lambda record: record.coin.amount) if self.cost_of_single_tx is None: coin = spendable[0].coin tx = await self.generate_signed_transaction( [coin.amount], [coin.puzzle_hash], coins={coin}, ignore_max_send_amount=True ) program: BlockGenerator = simple_solution_generator(tx.spend_bundle) # npc contains names of the coins removed, puzzle_hashes and their spend conditions result: NPCResult = get_name_puzzle_conditions( program, self.wallet_state_manager.constants.MAX_BLOCK_COST_CLVM, True ) cost_result: uint64 = calculate_cost_of_program( program.program, result, self.wallet_state_manager.constants.COST_PER_BYTE ) self.cost_of_single_tx = cost_result self.log.info(f"Cost of a single tx for standard wallet: {self.cost_of_single_tx}") max_cost = self.wallet_state_manager.constants.MAX_BLOCK_COST_CLVM / 2 # avoid full block TXs current_cost = 0 total_amount = 0 total_coin_count = 0 for record in spendable: current_cost += self.cost_of_single_tx total_amount += record.coin.amount total_coin_count += 1 if current_cost + self.cost_of_single_tx > max_cost: break return total_amount async def get_name(self): return self.wallet_info.name async def set_name(self, new_name: str): new_info = replace(self.wallet_info, name=new_name) self.wallet_info = new_info await self.wallet_state_manager.user_store.update_wallet(self.wallet_info, False) def get_colour(self) -> str: assert self.cc_info.my_genesis_checker is not None return bytes(self.cc_info.my_genesis_checker).hex() async def coin_added(self, coin: Coin, header_hash: bytes32, removals: List[Coin], height: uint32): """ Notification from wallet state manager that wallet has been received. """ self.log.info(f"CC wallet has been notified that {coin} was added") search_for_parent: bool = True inner_puzzle = await self.inner_puzzle_for_cc_puzhash(coin.puzzle_hash) lineage_proof = Program.to((1, [coin.parent_coin_info, inner_puzzle.get_tree_hash(), coin.amount])) await self.add_lineage(coin.name(), lineage_proof, True) for name, lineage_proofs in self.cc_info.lineage_proofs: if coin.parent_coin_info == name: search_for_parent = False break if search_for_parent: data: Dict[str, Any] = { "data": { "action_data": { "api_name": "request_puzzle_solution", "height": height, "coin_name": coin.parent_coin_info, "received_coin": coin.name(), } } } data_str = dict_to_json_str(data) await self.wallet_state_manager.create_action( name="request_puzzle_solution", wallet_id=self.id(), wallet_type=self.type(), callback="puzzle_solution_received", done=False, data=data_str, in_transaction=True, ) async def puzzle_solution_received(self, response: PuzzleSolutionResponse, action_id: int): coin_name = response.coin_name height = response.height puzzle: Program = response.puzzle r = uncurry_cc(puzzle) header_hash = self.wallet_state_manager.blockchain.height_to_hash(height) block: Optional[ HeaderBlockRecord ] = await self.wallet_state_manager.blockchain.block_store.get_header_block_record(header_hash) if block is None: return None removals = block.removals if r is not None: mod_hash, genesis_coin_checker, inner_puzzle = r self.log.info(f"parent: {coin_name} inner_puzzle for parent is {inner_puzzle}") parent_coin = None for coin in removals: if coin.name() == coin_name: parent_coin = coin if parent_coin is None: raise ValueError("Error in finding parent") lineage_proof = get_lineage_proof_from_coin_and_puz(parent_coin, puzzle) await self.add_lineage(coin_name, lineage_proof) await self.wallet_state_manager.action_store.action_done(action_id) async def get_new_inner_hash(self) -> bytes32: return await self.standard_wallet.get_new_puzzlehash() async def get_new_inner_puzzle(self) -> Program: return await self.standard_wallet.get_new_puzzle() async def get_puzzle_hash(self, new: bool): return await self.standard_wallet.get_puzzle_hash(new) async def get_new_puzzlehash(self) -> bytes32: return await self.standard_wallet.get_new_puzzlehash() def puzzle_for_pk(self, pubkey) -> Program: inner_puzzle = self.standard_wallet.puzzle_for_pk(bytes(pubkey)) cc_puzzle: Program = cc_puzzle_for_inner_puzzle(CC_MOD, self.cc_info.my_genesis_checker, inner_puzzle) self.base_puzzle_program = bytes(cc_puzzle) self.base_inner_puzzle_hash = inner_puzzle.get_tree_hash() return cc_puzzle async def get_new_cc_puzzle_hash(self): return (await self.wallet_state_manager.get_unused_derivation_record(self.id())).puzzle_hash # Create a new coin of value 0 with a given colour async def generate_zero_val_coin(self, send=True, exclude: List[Coin] = None) -> SpendBundle: if self.cc_info.my_genesis_checker is None: raise ValueError("My genesis checker is None") if exclude is None: exclude = [] coins = await self.standard_wallet.select_coins(0, exclude) assert coins != set() origin = coins.copy().pop() origin_id = origin.name() cc_inner = await self.get_new_inner_hash() cc_puzzle_hash: Program = cc_puzzle_hash_for_inner_puzzle_hash( CC_MOD, self.cc_info.my_genesis_checker, cc_inner ) tx: TransactionRecord = await self.standard_wallet.generate_signed_transaction( uint64(0), cc_puzzle_hash, uint64(0), origin_id, coins ) assert tx.spend_bundle is not None full_spend: SpendBundle = tx.spend_bundle self.log.info(f"Generate zero val coin: cc_puzzle_hash is {cc_puzzle_hash}") # generate eve coin so we can add future lineage_proofs even if we don't eve spend eve_coin = Coin(origin_id, cc_puzzle_hash, uint64(0)) await self.add_lineage( eve_coin.name(), Program.to( ( 1, [eve_coin.parent_coin_info, cc_inner, eve_coin.amount], ) ), ) await self.add_lineage(eve_coin.parent_coin_info, Program.to((0, [origin.as_list(), 1]))) if send: regular_record = TransactionRecord( confirmed_at_height=uint32(0), created_at_time=uint64(int(time.time())), to_puzzle_hash=cc_puzzle_hash, amount=uint64(0), fee_amount=uint64(0), confirmed=False, sent=uint32(10), spend_bundle=full_spend, additions=full_spend.additions(), removals=full_spend.removals(), wallet_id=uint32(1), sent_to=[], trade_id=None, type=uint32(TransactionType.INCOMING_TX.value), name=token_bytes(), ) cc_record = TransactionRecord( confirmed_at_height=uint32(0), created_at_time=uint64(int(time.time())), to_puzzle_hash=cc_puzzle_hash, amount=uint64(0), fee_amount=uint64(0), confirmed=False, sent=uint32(0), spend_bundle=full_spend, additions=full_spend.additions(), removals=full_spend.removals(), wallet_id=self.id(), sent_to=[], trade_id=None, type=uint32(TransactionType.INCOMING_TX.value), name=full_spend.name(), ) await self.wallet_state_manager.add_transaction(regular_record) await self.wallet_state_manager.add_pending_transaction(cc_record) return full_spend async def get_spendable_balance(self, records=None) -> uint64: coins = await self.get_cc_spendable_coins(records) amount = 0 for record in coins: amount += record.coin.amount return uint64(amount) async def get_pending_change_balance(self) -> uint64: unconfirmed_tx = await self.wallet_state_manager.tx_store.get_unconfirmed_for_wallet(self.id()) addition_amount = 0 for record in unconfirmed_tx: if not record.is_in_mempool(): continue our_spend = False for coin in record.removals: # Don't count eve spend as change if coin.parent_coin_info.hex() == self.get_colour(): continue if await self.wallet_state_manager.does_coin_belong_to_wallet(coin, self.id()): our_spend = True break if our_spend is not True: continue for coin in record.additions: if await self.wallet_state_manager.does_coin_belong_to_wallet(coin, self.id()): addition_amount += coin.amount return uint64(addition_amount) async def get_cc_spendable_coins(self, records=None) -> List[WalletCoinRecord]: result: List[WalletCoinRecord] = [] record_list: Set[WalletCoinRecord] = await self.wallet_state_manager.get_spendable_coins_for_wallet( self.id(), records ) for record in record_list: lineage = await self.get_lineage_proof_for_coin(record.coin) if lineage is not None: result.append(record) return result async def select_coins(self, amount: uint64) -> Set[Coin]: """ Returns a set of coins that can be used for generating a new transaction. """ async with self.wallet_state_manager.lock: spendable_am = await self.get_confirmed_balance() if amount > spendable_am: error_msg = f"Can't select amount higher than our spendable balance {amount}, spendable {spendable_am}" self.log.warning(error_msg) raise ValueError(error_msg) self.log.info(f"About to select coins for amount {amount}") spendable: List[WalletCoinRecord] = await self.get_cc_spendable_coins() sum = 0 used_coins: Set = set() # Use older coins first spendable.sort(key=lambda r: r.confirmed_block_height) # Try to use coins from the store, if there isn't enough of "unused" # coins use change coins that are not confirmed yet unconfirmed_removals: Dict[bytes32, Coin] = await self.wallet_state_manager.unconfirmed_removals_for_wallet( self.id() ) for coinrecord in spendable: if sum >= amount and len(used_coins) > 0: break if coinrecord.coin.name() in unconfirmed_removals: continue sum += coinrecord.coin.amount used_coins.add(coinrecord.coin) self.log.info(f"Selected coin: {coinrecord.coin.name()} at height {coinrecord.confirmed_block_height}!") # This happens when we couldn't use one of the coins because it's already used # but unconfirmed, and we are waiting for the change. (unconfirmed_additions) if sum < amount: raise ValueError( "Can't make this transaction at the moment. Waiting for the change from the previous transaction." ) self.log.info(f"Successfully selected coins: {used_coins}") return used_coins async def get_sigs(self, innerpuz: Program, innersol: Program, coin_name: bytes32) -> List[G2Element]: puzzle_hash = innerpuz.get_tree_hash() pubkey, private = await self.wallet_state_manager.get_keys(puzzle_hash) synthetic_secret_key = calculate_synthetic_secret_key(private, DEFAULT_HIDDEN_PUZZLE_HASH) sigs: List[G2Element] = [] error, conditions, cost = conditions_dict_for_solution( innerpuz, innersol, self.wallet_state_manager.constants.MAX_BLOCK_COST_CLVM ) if conditions is not None: for _, msg in pkm_pairs_for_conditions_dict( conditions, coin_name, self.wallet_state_manager.constants.AGG_SIG_ME_ADDITIONAL_DATA ): signature = AugSchemeMPL.sign(synthetic_secret_key, msg) sigs.append(signature) return sigs async def inner_puzzle_for_cc_puzhash(self, cc_hash: bytes32) -> Program: record: DerivationRecord = await self.wallet_state_manager.puzzle_store.get_derivation_record_for_puzzle_hash( cc_hash.hex() ) inner_puzzle: Program = self.standard_wallet.puzzle_for_pk(bytes(record.pubkey)) return inner_puzzle async def get_lineage_proof_for_coin(self, coin) -> Optional[Program]: for name, proof in self.cc_info.lineage_proofs: if name == coin.parent_coin_info: return proof return None async def generate_signed_transaction( self, amounts: List[uint64], puzzle_hashes: List[bytes32], fee: uint64 = uint64(0), origin_id: bytes32 = None, coins: Set[Coin] = None, ignore_max_send_amount: bool = False, ) -> TransactionRecord: # Get coins and calculate amount of change required outgoing_amount = uint64(sum(amounts)) total_outgoing = outgoing_amount + fee if not ignore_max_send_amount: max_send = await self.get_max_send_amount() if total_outgoing > max_send: raise ValueError(f"Can't send more than {max_send} in a single transaction") if coins is None: selected_coins: Set[Coin] = await self.select_coins(uint64(total_outgoing)) else: selected_coins = coins total_amount = sum([x.amount for x in selected_coins]) change = total_amount - total_outgoing primaries = [] for amount, puzzle_hash in zip(amounts, puzzle_hashes): primaries.append({"puzzlehash": puzzle_hash, "amount": amount}) if change > 0: changepuzzlehash = await self.get_new_inner_hash() primaries.append({"puzzlehash": changepuzzlehash, "amount": change}) coin = list(selected_coins)[0] inner_puzzle = await self.inner_puzzle_for_cc_puzhash(coin.puzzle_hash) if self.cc_info.my_genesis_checker is None: raise ValueError("My genesis checker is None") genesis_id = genesis_coin_id_for_genesis_coin_checker(self.cc_info.my_genesis_checker) spendable_cc_list = [] innersol_list = [] sigs: List[G2Element] = [] first = True for coin in selected_coins: coin_inner_puzzle = await self.inner_puzzle_for_cc_puzhash(coin.puzzle_hash) if first: first = False if fee > 0: innersol = self.standard_wallet.make_solution(primaries=primaries, fee=fee) else: innersol = self.standard_wallet.make_solution(primaries=primaries) else: innersol = self.standard_wallet.make_solution() innersol_list.append(innersol) lineage_proof = await self.get_lineage_proof_for_coin(coin) assert lineage_proof is not None spendable_cc_list.append(SpendableCC(coin, genesis_id, inner_puzzle, lineage_proof)) sigs = sigs + await self.get_sigs(coin_inner_puzzle, innersol, coin.name()) spend_bundle = spend_bundle_for_spendable_ccs( CC_MOD, self.cc_info.my_genesis_checker, spendable_cc_list, innersol_list, sigs, ) # TODO add support for array in stored records return TransactionRecord( confirmed_at_height=uint32(0), created_at_time=uint64(int(time.time())), to_puzzle_hash=puzzle_hashes[0], amount=uint64(outgoing_amount), fee_amount=uint64(0), confirmed=False, sent=uint32(0), spend_bundle=spend_bundle, additions=spend_bundle.additions(), removals=spend_bundle.removals(), wallet_id=self.id(), sent_to=[], trade_id=None, type=uint32(TransactionType.OUTGOING_TX.value), name=spend_bundle.name(), ) async def add_lineage(self, name: bytes32, lineage: Optional[Program], in_transaction=False): self.log.info(f"Adding parent {name}: {lineage}") current_list = self.cc_info.lineage_proofs.copy() current_list.append((name, lineage)) cc_info: CCInfo = CCInfo(self.cc_info.my_genesis_checker, current_list) await self.save_info(cc_info, in_transaction) async def save_info(self, cc_info: CCInfo, in_transaction): self.cc_info = cc_info current_info = self.wallet_info data_str = bytes(cc_info).hex() wallet_info = WalletInfo(current_info.id, current_info.name, current_info.type, data_str) self.wallet_info = wallet_info await self.wallet_state_manager.user_store.update_wallet(wallet_info, in_transaction) async def generate_new_coloured_coin(self, amount: uint64) -> SpendBundle: coins = await self.standard_wallet.select_coins(amount) origin = coins.copy().pop() origin_id = origin.name() cc_inner_hash = await self.get_new_inner_hash() await self.add_lineage(origin_id, Program.to((0, [origin.as_list(), 0]))) genesis_coin_checker = create_genesis_or_zero_coin_checker(origin_id) minted_cc_puzzle_hash = cc_puzzle_hash_for_inner_puzzle_hash(CC_MOD, genesis_coin_checker, cc_inner_hash) tx_record: TransactionRecord = await self.standard_wallet.generate_signed_transaction( amount, minted_cc_puzzle_hash, uint64(0), origin_id, coins ) assert tx_record.spend_bundle is not None lineage_proof: Optional[Program] = lineage_proof_for_genesis(origin) lineage_proofs = [(origin_id, lineage_proof)] cc_info: CCInfo = CCInfo(genesis_coin_checker, lineage_proofs) await self.save_info(cc_info, False) return tx_record.spend_bundle async def create_spend_bundle_relative_amount(self, cc_amount, zero_coin: Coin = None) -> Optional[SpendBundle]: # If we're losing value then get coloured coins with at least that much value # If we're gaining value then our amount doesn't matter if cc_amount < 0: cc_spends = await self.select_coins(abs(cc_amount)) else: if zero_coin is None: return None cc_spends = set() cc_spends.add(zero_coin) if cc_spends is None: return None # Calculate output amount given relative difference and sum of actual values spend_value = sum([coin.amount for coin in cc_spends]) cc_amount = spend_value + cc_amount # Loop through coins and create solution for innerpuzzle list_of_solutions = [] output_created = None sigs: List[G2Element] = [] for coin in cc_spends: if output_created is None: newinnerpuzhash = await self.get_new_inner_hash() innersol = self.standard_wallet.make_solution( primaries=[{"puzzlehash": newinnerpuzhash, "amount": cc_amount}] ) output_created = coin else: innersol = self.standard_wallet.make_solution(consumed=[output_created.name()]) innerpuz: Program = await self.inner_puzzle_for_cc_puzhash(coin.puzzle_hash) sigs = sigs + await self.get_sigs(innerpuz, innersol, coin.name()) lineage_proof = await self.get_lineage_proof_for_coin(coin) puzzle_reveal = cc_puzzle_for_inner_puzzle(CC_MOD, self.cc_info.my_genesis_checker, innerpuz) # Use coin info to create solution and add coin and solution to list of CoinSolutions solution = [ innersol, coin.as_list(), lineage_proof, None, None, None, None, None, ] list_of_solutions.append(CoinSolution(coin, puzzle_reveal, Program.to(solution))) aggsig = AugSchemeMPL.aggregate(sigs) return SpendBundle(list_of_solutions, aggsig)
/** * Copyright IBM Corp. 2016, 2020 * * This source code is licensed under the Apache-2.0 license found in the * LICENSE file in the root directory of this source tree. * * Code generated by @carbon/icon-build-helpers. DO NOT EDIT. */ var _24 = { "elem": "svg", "attrs": { "xmlns": "http://www.w3.org/2000/svg", "viewBox": "0 0 32 32", "fill": "currentColor", "width": 24, "height": 24 }, "content": [{ "elem": "path", "attrs": { "d": "M28,12H4a2.0023,2.0023,0,0,0-2,2V28a2.002,2.002,0,0,0,2,2H28a2.0023,2.0023,0,0,0,2-2V14A2.0027,2.0027,0,0,0,28,12ZM20,28H12V27a1.0009,1.0009,0,0,1,1-1h6a1.0009,1.0009,0,0,1,1,1Zm8,0H22V27a3.0033,3.0033,0,0,0-3-3H13a3.0033,3.0033,0,0,0-3,3v1H4V14H28.002Z" } }, { "elem": "path", "attrs": { "d": "M16 23a4 4 0 114-4A4.0042 4.0042 0 0116 23zm0-6a2 2 0 102 2A2.0023 2.0023 0 0016 17zM30 4V8a2.0027 2.0027 0 01-2 2H24a2.0027 2.0027 0 01-2-2V4a2.0027 2.0027 0 012-2h4A2.0027 2.0027 0 0130 4zM28 8l.002-4H24V8zM20 4V8a2.0027 2.0027 0 01-2 2H14a2.0023 2.0023 0 01-2-2V4a2.0023 2.0023 0 012-2h4A2.0027 2.0027 0 0120 4zM18 8l.002-4H14V8zM10 4V8a2.0023 2.0023 0 01-2 2H4A2.0023 2.0023 0 012 8V4A2.0023 2.0023 0 014 2H8A2.0023 2.0023 0 0110 4zM8 8l.0015-4H4V8z" } }], "name": "thumbnail-preview", "size": 24 }; export default _24;
const Router = require("koa-router"); const router = new Router(); const gamebank = require("gamebank"); const Customer = require("config").get("Customer"); router.post("/API/:attribute/:method", ctx => { return new Promise((resolve, reject) => { const { attribute, method } = ctx.params; let params = ctx.request.body; let generateKeys = null; const userName = params[0]; const password = params[1]; if (attribute == "broadcast" && method == "accountCreate") { if (params.length != 2 || userName == "" || password == "") { throw "Parameter error"; } const wif = Customer.initAcoount.wif; const initName = Customer.initAcoount.initName; const fee = Customer.initAcoount.fee; generateKeys = gamebank.auth.generateKeys(userName, password, [ "owner", "active", "posting", "memo" ]); const metadata = ""; const owner = { weight_threshold: 1, account_auths: [], key_auths: [[generateKeys.owner, 1]] }; const active = { weight_threshold: 1, account_auths: [], key_auths: [[generateKeys.active, 1]] }; const posting = { weight_threshold: 1, account_auths: [], key_auths: [[generateKeys.posting, 1]] }; const memoKey = generateKeys.memo; const arr = []; arr.push( wif, fee, initName, params[0], owner, active, posting, memoKey, metadata ); params = arr; } params.push(function(err, result) { if (err) { reject(err); } else { if (generateKeys) { const private = gamebank.auth.getPrivateKeys(userName, password, [ "posting", "active", "owner", "memo" ]); resolve([ { keyValue: private.posting, key: 1, keyName: "posting", extra: "发帖,点赞等操作" }, { keyValue: private.active, key: 2, keyName: "active", extra: "转账,交易等操作" }, { keyValue: private.owner, key: 3, keyName: "owner", extra: "恢复账号等操作" }, { keyValue: private.memo, key: 4, keyName: "memo", extra: "备注等操作" } ]); } else { resolve(result); } } }); var paramArr = params.map(element => { if (element == null) { return undefined; } else { return element; } }); gamebank[attribute][method].apply(null, paramArr); }) .then(res => { ctx.body = { data: res, success: true, code: 200 }; ctx.status = 200; }) .catch(err => { console.log(err); ctx.body = { data: err, success: false, code: 400 }; ctx.status = 400; }); }); function formateTime(time=Date.now()){ const FormatTime=new Date(time) const Year=FormatTime.getFullYear() const Month=FormatTime.getMonth()+1 const Day=FormatTime.getDate() const Hours=FormatTime.getHours() const Minutes=FormatTime.getMinutes() return `${Year}-${Month}-${Day} ${Hours}:${Minutes}` } router.get("/lottery", async ctx => { const { head_block_number } = await gamebank.api.getDynamicGlobalPropertiesAsync(); ctx.body = { code: 200, data: { AmountOfBets: global.currentAmountOfBets, CurrentPeriod: global.CurrentPeriod, Openingtime: formateTime(Date.now()+(global.currentOpeningtime - head_block_number) * 3000) , PurchaseNumber: [...new Set(global.PurchaseNumber)].length, lastAwarded: global.lastAwarded }, success: true }; ctx.status = 200; }); router.get("/blockInformation", async ctx => { ctx.body = { code: 200, data: global.blockInformation, success: true }; ctx.status = 200; }); module.exports = router;
var BABYLON; (function (BABYLON) { var intersectBoxAASphere = function (boxMin, boxMax, sphereCenter, sphereRadius) { if (boxMin.x > sphereCenter.x + sphereRadius) return false; if (sphereCenter.x - sphereRadius > boxMax.x) return false; if (boxMin.y > sphereCenter.y + sphereRadius) return false; if (sphereCenter.y - sphereRadius > boxMax.y) return false; if (boxMin.z > sphereCenter.z + sphereRadius) return false; if (sphereCenter.z - sphereRadius > boxMax.z) return false; return true; }; var getLowestRoot = function (a, b, c, maxR) { var determinant = b * b - 4.0 * a * c; var result = { root: 0, found: false }; if (determinant < 0) return result; var sqrtD = Math.sqrt(determinant); var r1 = (-b - sqrtD) / (2.0 * a); var r2 = (-b + sqrtD) / (2.0 * a); if (r1 > r2) { var temp = r2; r2 = r1; r1 = temp; } if (r1 > 0 && r1 < maxR) { result.root = r1; result.found = true; return result; } if (r2 > 0 && r2 < maxR) { result.root = r2; result.found = true; return result; } return result; }; var Collider = (function () { function Collider() { this.radius = new BABYLON.Vector3(1, 1, 1); this.retry = 0; this.basePointWorld = BABYLON.Vector3.Zero(); this.velocityWorld = BABYLON.Vector3.Zero(); this.normalizedVelocity = BABYLON.Vector3.Zero(); this._collisionPoint = BABYLON.Vector3.Zero(); this._planeIntersectionPoint = BABYLON.Vector3.Zero(); this._tempVector = BABYLON.Vector3.Zero(); this._tempVector2 = BABYLON.Vector3.Zero(); this._tempVector3 = BABYLON.Vector3.Zero(); this._tempVector4 = BABYLON.Vector3.Zero(); this._edge = BABYLON.Vector3.Zero(); this._baseToVertex = BABYLON.Vector3.Zero(); this._destinationPoint = BABYLON.Vector3.Zero(); this._slidePlaneNormal = BABYLON.Vector3.Zero(); this._displacementVector = BABYLON.Vector3.Zero(); } // Methods Collider.prototype._initialize = function (source, dir, e) { this.velocity = dir; BABYLON.Vector3.NormalizeToRef(dir, this.normalizedVelocity); this.basePoint = source; source.multiplyToRef(this.radius, this.basePointWorld); dir.multiplyToRef(this.radius, this.velocityWorld); this.velocityWorldLength = this.velocityWorld.length(); this.epsilon = e; this.collisionFound = false; }; Collider.prototype._checkPointInTriangle = function (point, pa, pb, pc, n) { pa.subtractToRef(point, this._tempVector); pb.subtractToRef(point, this._tempVector2); BABYLON.Vector3.CrossToRef(this._tempVector, this._tempVector2, this._tempVector4); var d = BABYLON.Vector3.Dot(this._tempVector4, n); if (d < 0) return false; pc.subtractToRef(point, this._tempVector3); BABYLON.Vector3.CrossToRef(this._tempVector2, this._tempVector3, this._tempVector4); d = BABYLON.Vector3.Dot(this._tempVector4, n); if (d < 0) return false; BABYLON.Vector3.CrossToRef(this._tempVector3, this._tempVector, this._tempVector4); d = BABYLON.Vector3.Dot(this._tempVector4, n); return d >= 0; }; Collider.prototype._canDoCollision = function (sphereCenter, sphereRadius, vecMin, vecMax) { var distance = BABYLON.Vector3.Distance(this.basePointWorld, sphereCenter); var max = Math.max(this.radius.x, this.radius.y, this.radius.z); if (distance > this.velocityWorldLength + max + sphereRadius) { return false; } if (!intersectBoxAASphere(vecMin, vecMax, this.basePointWorld, this.velocityWorldLength + max)) return false; return true; }; Collider.prototype._testTriangle = function (faceIndex, subMesh, p1, p2, p3) { var t0; var embeddedInPlane = false; if (!subMesh._trianglePlanes) { subMesh._trianglePlanes = []; } if (!subMesh._trianglePlanes[faceIndex]) { subMesh._trianglePlanes[faceIndex] = new BABYLON.Plane(0, 0, 0, 0); subMesh._trianglePlanes[faceIndex].copyFromPoints(p1, p2, p3); } var trianglePlane = subMesh._trianglePlanes[faceIndex]; if ((!subMesh.getMaterial()) && !trianglePlane.isFrontFacingTo(this.normalizedVelocity, 0)) return; var signedDistToTrianglePlane = trianglePlane.signedDistanceTo(this.basePoint); var normalDotVelocity = BABYLON.Vector3.Dot(trianglePlane.normal, this.velocity); if (normalDotVelocity == 0) { if (Math.abs(signedDistToTrianglePlane) >= 1.0) return; embeddedInPlane = true; t0 = 0; } else { t0 = (-1.0 - signedDistToTrianglePlane) / normalDotVelocity; var t1 = (1.0 - signedDistToTrianglePlane) / normalDotVelocity; if (t0 > t1) { var temp = t1; t1 = t0; t0 = temp; } if (t0 > 1.0 || t1 < 0.0) return; if (t0 < 0) t0 = 0; if (t0 > 1.0) t0 = 1.0; } this._collisionPoint.copyFromFloats(0, 0, 0); var found = false; var t = 1.0; if (!embeddedInPlane) { this.basePoint.subtractToRef(trianglePlane.normal, this._planeIntersectionPoint); this.velocity.scaleToRef(t0, this._tempVector); this._planeIntersectionPoint.addInPlace(this._tempVector); if (this._checkPointInTriangle(this._planeIntersectionPoint, p1, p2, p3, trianglePlane.normal)) { found = true; t = t0; this._collisionPoint.copyFrom(this._planeIntersectionPoint); } } if (!found) { var velocitySquaredLength = this.velocity.lengthSquared(); var a = velocitySquaredLength; this.basePoint.subtractToRef(p1, this._tempVector); var b = 2.0 * (BABYLON.Vector3.Dot(this.velocity, this._tempVector)); var c = this._tempVector.lengthSquared() - 1.0; var lowestRoot = getLowestRoot(a, b, c, t); if (lowestRoot.found) { t = lowestRoot.root; found = true; this._collisionPoint.copyFrom(p1); } this.basePoint.subtractToRef(p2, this._tempVector); b = 2.0 * (BABYLON.Vector3.Dot(this.velocity, this._tempVector)); c = this._tempVector.lengthSquared() - 1.0; lowestRoot = getLowestRoot(a, b, c, t); if (lowestRoot.found) { t = lowestRoot.root; found = true; this._collisionPoint.copyFrom(p2); } this.basePoint.subtractToRef(p3, this._tempVector); b = 2.0 * (BABYLON.Vector3.Dot(this.velocity, this._tempVector)); c = this._tempVector.lengthSquared() - 1.0; lowestRoot = getLowestRoot(a, b, c, t); if (lowestRoot.found) { t = lowestRoot.root; found = true; this._collisionPoint.copyFrom(p3); } p2.subtractToRef(p1, this._edge); p1.subtractToRef(this.basePoint, this._baseToVertex); var edgeSquaredLength = this._edge.lengthSquared(); var edgeDotVelocity = BABYLON.Vector3.Dot(this._edge, this.velocity); var edgeDotBaseToVertex = BABYLON.Vector3.Dot(this._edge, this._baseToVertex); a = edgeSquaredLength * (-velocitySquaredLength) + edgeDotVelocity * edgeDotVelocity; b = edgeSquaredLength * (2.0 * BABYLON.Vector3.Dot(this.velocity, this._baseToVertex)) - 2.0 * edgeDotVelocity * edgeDotBaseToVertex; c = edgeSquaredLength * (1.0 - this._baseToVertex.lengthSquared()) + edgeDotBaseToVertex * edgeDotBaseToVertex; lowestRoot = getLowestRoot(a, b, c, t); if (lowestRoot.found) { var f = (edgeDotVelocity * lowestRoot.root - edgeDotBaseToVertex) / edgeSquaredLength; if (f >= 0.0 && f <= 1.0) { t = lowestRoot.root; found = true; this._edge.scaleInPlace(f); p1.addToRef(this._edge, this._collisionPoint); } } p3.subtractToRef(p2, this._edge); p2.subtractToRef(this.basePoint, this._baseToVertex); edgeSquaredLength = this._edge.lengthSquared(); edgeDotVelocity = BABYLON.Vector3.Dot(this._edge, this.velocity); edgeDotBaseToVertex = BABYLON.Vector3.Dot(this._edge, this._baseToVertex); a = edgeSquaredLength * (-velocitySquaredLength) + edgeDotVelocity * edgeDotVelocity; b = edgeSquaredLength * (2.0 * BABYLON.Vector3.Dot(this.velocity, this._baseToVertex)) - 2.0 * edgeDotVelocity * edgeDotBaseToVertex; c = edgeSquaredLength * (1.0 - this._baseToVertex.lengthSquared()) + edgeDotBaseToVertex * edgeDotBaseToVertex; lowestRoot = getLowestRoot(a, b, c, t); if (lowestRoot.found) { f = (edgeDotVelocity * lowestRoot.root - edgeDotBaseToVertex) / edgeSquaredLength; if (f >= 0.0 && f <= 1.0) { t = lowestRoot.root; found = true; this._edge.scaleInPlace(f); p2.addToRef(this._edge, this._collisionPoint); } } p1.subtractToRef(p3, this._edge); p3.subtractToRef(this.basePoint, this._baseToVertex); edgeSquaredLength = this._edge.lengthSquared(); edgeDotVelocity = BABYLON.Vector3.Dot(this._edge, this.velocity); edgeDotBaseToVertex = BABYLON.Vector3.Dot(this._edge, this._baseToVertex); a = edgeSquaredLength * (-velocitySquaredLength) + edgeDotVelocity * edgeDotVelocity; b = edgeSquaredLength * (2.0 * BABYLON.Vector3.Dot(this.velocity, this._baseToVertex)) - 2.0 * edgeDotVelocity * edgeDotBaseToVertex; c = edgeSquaredLength * (1.0 - this._baseToVertex.lengthSquared()) + edgeDotBaseToVertex * edgeDotBaseToVertex; lowestRoot = getLowestRoot(a, b, c, t); if (lowestRoot.found) { f = (edgeDotVelocity * lowestRoot.root - edgeDotBaseToVertex) / edgeSquaredLength; if (f >= 0.0 && f <= 1.0) { t = lowestRoot.root; found = true; this._edge.scaleInPlace(f); p3.addToRef(this._edge, this._collisionPoint); } } } if (found) { var distToCollision = t * this.velocity.length(); if (!this.collisionFound || distToCollision < this.nearestDistance) { if (!this.intersectionPoint) { this.intersectionPoint = this._collisionPoint.clone(); } else { this.intersectionPoint.copyFrom(this._collisionPoint); } this.nearestDistance = distToCollision; this.collisionFound = true; this.collidedMesh = subMesh.getMesh(); } } }; Collider.prototype._collide = function (subMesh, pts, indices, indexStart, indexEnd, decal) { for (var i = indexStart; i < indexEnd; i += 3) { var p1 = pts[indices[i] - decal]; var p2 = pts[indices[i + 1] - decal]; var p3 = pts[indices[i + 2] - decal]; this._testTriangle(i, subMesh, p3, p2, p1); } }; Collider.prototype._getResponse = function (pos, vel) { pos.addToRef(vel, this._destinationPoint); vel.scaleInPlace((this.nearestDistance / vel.length())); this.basePoint.addToRef(vel, pos); pos.subtractToRef(this.intersectionPoint, this._slidePlaneNormal); this._slidePlaneNormal.normalize(); this._slidePlaneNormal.scaleToRef(this.epsilon, this._displacementVector); pos.addInPlace(this._displacementVector); this.intersectionPoint.addInPlace(this._displacementVector); this._slidePlaneNormal.scaleInPlace(BABYLON.Plane.SignedDistanceToPlaneFromPositionAndNormal(this.intersectionPoint, this._slidePlaneNormal, this._destinationPoint)); this._destinationPoint.subtractInPlace(this._slidePlaneNormal); this._destinationPoint.subtractToRef(this.intersectionPoint, vel); }; return Collider; })(); BABYLON.Collider = Collider; })(BABYLON || (BABYLON = {})); //# sourceMappingURL=babylon.collider.js.map
#!/usr/bin/python # -*- encoding: utf-8 -*- import torch import torch.nn as nn from torch.utils.data import DataLoader import torch.nn.functional as F import os import os.path as osp import time import sys import logging import numpy as np import argparse import importlib import json import cv2 from lib.model_1 import DeepLabLargeFOV from lib.pascal_voc import PascalVoc from lib.pascal_voc_aug import PascalVoc_Aug from lib.transform import RandomCrop from lib.optimizer import Optimizer from lib.loss import * from utils.logger import setup_logger from evaluate import eval_model from torchsummary import summary batchsize = 8 torch.multiprocessing.set_sharing_strategy('file_system') def get_args(): parser = argparse.ArgumentParser(description='Train a network') parser.add_argument( '--cfg', dest = 'cfg', type = str, default = 'config/pascal_voc_aug_multi_scale.py', help = 'config file for training' ) return parser.parse_args() def get_LocationMap(data,threshold = 0.7*255,max=255,min=0): change = lambda a,th:1 if a>th else 0 fm = [change(j,threshold) for i in data for j in i] fm = np.array(fm).reshape(321,321) return fm def returnCAM(feature_conv, weight_softmax): # generate the class activation maps upsample to 256x256 size_upsample = (321, 321) nc, h, w = feature_conv.shape #print(feature_conv.shape) #print("class_idx :{}".format(len(class_idx))) #print('idx is {} weight_softmax{} shape is {}'.format(idx,idx,len(weight_softmax))) #print(nc,h,w) feature_conv = np.swapaxes(feature_conv,0,1) cam = np.dot(weight_softmax,feature_conv)#weightsoftmax (21,1024) feature_conv (1024,41,41) cam = cam - np.min(cam) cam_img = cam / np.max(cam) cam_img = np.uint8(255 * cam_img) cam_img = cam_img.swapaxes(0,2) cam_img = cv2.resize(cam_img, size_upsample) cam_img = cam_img.swapaxes(0,2) return cam_img def getCams(preds,feature,weight_softmax): # print('shape of preds:{}'.format(preds)) bz, nc, h, w = feature.shape #print('bz is {}'.format(bz)) CAMs = np.zeros(shape=(0,21,321,321)) for i in range(bz): #print(i) #print("shape of CAMs is {}".format(CAMs.shape)) #print('shape of nextCams is{}'.format(returnCAM(feature[i], weight_softmax, [idxs[i,-1]]).shape)) nextCAM = returnCAM(feature[i], weight_softmax) #print(nextCAM) CAMs = np.r_[CAMs,np.expand_dims(nextCAM,axis=0)] return CAMs def showCAM(CAMs,img): img = cv2.imread('test.jpg') height=321 width=321 heatmap = cv2.applyColorMap(cv2.resize(CAMs[0],(width, height)), cv2.COLORMAP_JET) result = heatmap * 0.3 + img * 0.5 cv2.imwrite('CAM.jpg', result) def train(args): ## setup cfg and logger spec = importlib.util.spec_from_file_location('mod_cfg', args.cfg) mod_cfg = importlib.util.module_from_spec(spec) spec.loader.exec_module(mod_cfg) cfg = mod_cfg.cfg cfg_str = json.dumps(cfg, ensure_ascii=False, indent=2) if not osp.exists(cfg.res_pth): os.makedirs(cfg.res_pth) setup_logger(cfg.res_pth) logger = logging.getLogger(__name__) logger.info(cfg_str) device = torch.device('cuda:0') ## modules and losses logger.info('creating model and loss module') net = DeepLabLargeFOV(3, cfg.n_classes) #for item in net.parameters(): # print(item.size()) net.train() net.cuda() if not torch.cuda.device_count() == 0: net = nn.DataParallel(net) n_min = (cfg.crop_size**2) * cfg.batchsize // 8 criteria = OhemCELoss(0.7, n_min) globloss = GlobLoss(0.7,n_min) criteria.cuda() #hook the feature extractor features_blobs_1 = [] features_blobs_2 = [] features_blobs_3 = [] features_blobs_4 = [] def hook_feature(module, input, output): features_blobs_1.append(output.data.cpu().numpy()) features_blobs_2.append(output.data.cpu().numpy()) features_blobs_3.append(output.data.cpu().numpy()) features_blobs_4.append(output.data.cpu().numpy()) net._modules['module']._modules['MDC_DC_1']._modules.get('1').register_forward_hook(hook_feature) net._modules['module']._modules['MDC_DC_2']._modules.get('1').register_forward_hook(hook_feature) net._modules['module']._modules['MDC_DC_3']._modules.get('1').register_forward_hook(hook_feature) net._modules['module']._modules['MDC_DC_4']._modules.get('1').register_forward_hook(hook_feature) params = list(net.parameters()) count =0 #summary(net,(3,321,321)) weight_softmax = np.squeeze(params[-3].data.cpu().numpy()) ## dataset logger.info('creating dataset and dataloader') ds = eval(cfg.dataset)(cfg, mode='train') dl = DataLoader(ds, batch_size = cfg.batchsize, shuffle = True, num_workers = cfg.n_workers, drop_last = True) ## optimizer logger.info('creating optimizer') optimizer = Optimizer( params = net.parameters(), warmup_start_lr = cfg.warmup_start_lr, warmup_steps = cfg.warmup_iter, lr0 = cfg.start_lr, max_iter = cfg.iter_num, momentum = cfg.momentum, wd = cfg.weight_decay, power = cfg.power) ## train loop loss_avg = [] st = time.time() diter = iter(dl) logger.info('start training') max = 0 min = 0 mean = 0 for it in range(cfg.iter_num): if it/20 ==0: print('training {}/{}'.format(it,cfg.iter_num)) try: im, lb ,clb = next(diter) if not im.size()[0] == cfg.batchsize: continue except StopIteration: diter = iter(dl) im, lb = next(diter) im = im.cuda() lb = lb.cuda()#16,1,321,321 optimizer.zero_grad() out,pred_c1,pred_c2,pred_c3,pred_c4 = net(im)#out:16.21.321.321 pred:(16,21) #print(out.size()) lb = torch.squeeze(lb) probs,idx = pred_c1.sort(1,True) CAMs_1 =getCams(pred_c1,features_blobs_1[0], weight_softmax) CAMs_2 =getCams(pred_c2,features_blobs_2[0], weight_softmax) CAMs_3 =getCams(pred_c3,features_blobs_3[0], weight_softmax) CAMs_4 =getCams(pred_c4,features_blobs_4[0], weight_softmax) #print(features_blobs_1[0].shape,len(features_blobs_1)) location_map =np.argmax((CAMs_1 + (CAMs_2+CAMs_3+CAMs_4)/3),axis=1) #(16,21,321,321) pred_mask = torch.argmax(out,dim=1) #false_mask = torch.from_numpy(false_mask) loss = globloss(out,location_map,pred_mask,clb,pred_c1,pred_c2,pred_c3,pred_c4) loss.backward() optimizer.step() loss = loss.detach().cpu().numpy() loss_avg.append(loss) ## log message if it%cfg.log_iter==0 and not it==0: loss_avg = sum(loss_avg) / len(loss_avg) ed = time.time() t_int = ed - st lr = optimizer.get_lr() msg = 'iter: {}/{}, loss: {:.4f}'.format(it, cfg.iter_num, loss_avg) msg = '{}, lr: {:4f}, time: {:.4f}'.format(msg, lr, t_int) logger.info(msg) st = ed loss_avg = [] ## dump model model_pth = osp.join(cfg.res_pth, 'model_final.pkl') net.cpu() state_dict = net.module.state_dict() if hasattr(net, 'module') else net.state_dict() torch.save(state_dict, model_pth) logger.info('training done, model saved to: {}'.format(model_pth)) ## test after train if cfg.test_after_train: net.cuda() mIOU = eval_model(net, cfg) logger.info('iou in whole is: {}'.format(mIOU)) if __name__ == "__main__": args = get_args() train(args) test(args)
// Copyright 2012 the V8 project authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef V8_HEAP_HEAP_H_ #define V8_HEAP_HEAP_H_ #include <atomic> #include <cmath> #include <memory> #include <unordered_map> #include <unordered_set> #include <vector> // Clients of this interface shouldn't depend on lots of heap internals. // Do not include anything from src/heap here! #include "include/v8-callbacks.h" #include "include/v8-embedder-heap.h" #include "include/v8-internal.h" #include "include/v8-isolate.h" #include "src/base/atomic-utils.h" #include "src/base/enum-set.h" #include "src/base/platform/condition-variable.h" #include "src/base/platform/mutex.h" #include "src/builtins/accessors.h" #include "src/common/assert-scope.h" #include "src/common/globals.h" #include "src/heap/allocation-observer.h" #include "src/heap/allocation-result.h" #include "src/heap/heap-allocator.h" #include "src/init/heap-symbols.h" #include "src/objects/allocation-site.h" #include "src/objects/fixed-array.h" #include "src/objects/hash-table.h" #include "src/objects/heap-object.h" #include "src/objects/js-array-buffer.h" #include "src/objects/objects.h" #include "src/objects/smi.h" #include "src/objects/visitors.h" #include "src/roots/roots.h" #include "src/utils/allocation.h" #include "testing/gtest/include/gtest/gtest_prod.h" // nogncheck namespace v8 { namespace debug { using OutOfMemoryCallback = void (*)(void* data); } // namespace debug namespace internal { namespace heap { class HeapTester; class TestMemoryAllocatorScope; } // namespace heap namespace third_party_heap { class Heap; class Impl; } // namespace third_party_heap class IncrementalMarking; class BackingStore; class JSArrayBuffer; class JSPromise; class NativeContext; using v8::MemoryPressureLevel; class ArrayBufferCollector; class ArrayBufferSweeper; class BasicMemoryChunk; class CodeLargeObjectSpace; class CodeRange; class CollectionBarrier; class ConcurrentAllocator; class ConcurrentMarking; class CppHeap; class GCIdleTimeHandler; class GCIdleTimeHeapState; class GCTracer; template <typename T> class GlobalHandleVector; class IsolateSafepoint; class HeapObjectAllocationTracker; class HeapObjectsFilter; class HeapStats; class Isolate; class JSFinalizationRegistry; class LinearAllocationArea; class LocalEmbedderHeapTracer; class LocalHeap; class MarkingBarrier; class MemoryAllocator; class MemoryChunk; class MemoryMeasurement; class MemoryReducer; class MinorMarkCompactCollector; class ObjectIterator; class ObjectStats; class Page; class PagedSpace; class ReadOnlyHeap; class RootVisitor; class SafepointScope; class ScavengeJob; class Scavenger; class ScavengerCollector; class SharedReadOnlySpace; class Space; class StressScavengeObserver; class TimedHistogram; class WeakObjectRetainer; enum ArrayStorageAllocationMode { DONT_INITIALIZE_ARRAY_ELEMENTS, INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE }; enum class ClearRecordedSlots { kYes, kNo }; enum class InvalidateRecordedSlots { kYes, kNo }; enum class ClearFreedMemoryMode { kClearFreedMemory, kDontClearFreedMemory }; enum ExternalBackingStoreType { kArrayBuffer, kExternalString, kNumTypes }; enum class RetainingPathOption { kDefault, kTrackEphemeronPath }; // These values are persisted to logs. Entries should not be renumbered and // numeric values should never be reused. If you add new items here, update // src/tools/metrics/histograms/enums.xml in chromium. enum class GarbageCollectionReason : int { kUnknown = 0, kAllocationFailure = 1, kAllocationLimit = 2, kContextDisposal = 3, kCountersExtension = 4, kDebugger = 5, kDeserializer = 6, kExternalMemoryPressure = 7, kFinalizeMarkingViaStackGuard = 8, kFinalizeMarkingViaTask = 9, kFullHashtable = 10, kHeapProfiler = 11, kTask = 12, kLastResort = 13, kLowMemoryNotification = 14, kMakeHeapIterable = 15, kMemoryPressure = 16, kMemoryReducer = 17, kRuntime = 18, kSamplingProfiler = 19, kSnapshotCreator = 20, kTesting = 21, kExternalFinalize = 22, kGlobalAllocationLimit = 23, kMeasureMemory = 24, kBackgroundAllocationFailure = 25, kLastReason = kBackgroundAllocationFailure, }; static_assert(kGarbageCollectionReasonMaxValue == static_cast<int>(GarbageCollectionReason::kLastReason), "The value of kGarbageCollectionReasonMaxValue is inconsistent."); enum class YoungGenerationHandling { kRegularScavenge = 0, kFastPromotionDuringScavenge = 1, // Histogram::InspectConstructionArguments in chromium requires us to have at // least three buckets. kUnusedBucket = 2, // If you add new items here, then update the young_generation_handling in // counters.h. // Also update src/tools/metrics/histograms/histograms.xml in chromium. }; enum class GCIdleTimeAction : uint8_t; enum class SkipRoot { kExternalStringTable, kGlobalHandles, kOldGeneration, kStack, kMainThreadHandles, kUnserializable, kWeak }; enum UnprotectMemoryOrigin { kMainThread, kMaybeOffMainThread, }; class StrongRootsEntry final { explicit StrongRootsEntry(const char* label) : label(label) {} // Label that identifies the roots in tooling. const char* label; FullObjectSlot start; FullObjectSlot end; StrongRootsEntry* prev; StrongRootsEntry* next; friend class Heap; }; #ifdef DEBUG struct CommentStatistic { const char* comment; int size; int count; void Clear() { comment = nullptr; size = 0; count = 0; } // Must be small, since an iteration is used for lookup. static const int kMaxComments = 64; }; #endif using EphemeronRememberedSet = std::unordered_map<EphemeronHashTable, std::unordered_set<int>, Object::Hasher>; class Heap { public: // Stores ephemeron entries where the EphemeronHashTable is in old-space, // and the key of the entry is in new-space. Such keys do not appear in the // usual OLD_TO_NEW remembered set. EphemeronRememberedSet ephemeron_remembered_set_; enum FindMementoMode { kForRuntime, kForGC }; enum class HeapGrowingMode { kSlow, kConservative, kMinimal, kDefault }; enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT, MINOR_MARK_COMPACT, TEAR_DOWN }; // Emits GC events for DevTools timeline. class V8_NODISCARD DevToolsTraceEventScope { public: DevToolsTraceEventScope(Heap* heap, const char* event_name, const char* event_type); ~DevToolsTraceEventScope(); private: Heap* heap_; const char* event_name_; }; class ExternalMemoryAccounting { public: int64_t total() { return total_.load(std::memory_order_relaxed); } int64_t limit() { return limit_.load(std::memory_order_relaxed); } int64_t low_since_mark_compact() { return low_since_mark_compact_.load(std::memory_order_relaxed); } void ResetAfterGC() { set_low_since_mark_compact(total()); set_limit(total() + kExternalAllocationSoftLimit); } int64_t Update(int64_t delta) { const int64_t amount = total_.fetch_add(delta, std::memory_order_relaxed) + delta; if (amount < low_since_mark_compact()) { set_low_since_mark_compact(amount); set_limit(amount + kExternalAllocationSoftLimit); } return amount; } int64_t AllocatedSinceMarkCompact() { int64_t total_bytes = total(); int64_t low_since_mark_compact_bytes = low_since_mark_compact(); if (total_bytes <= low_since_mark_compact_bytes) { return 0; } return static_cast<uint64_t>(total_bytes - low_since_mark_compact_bytes); } private: void set_total(int64_t value) { total_.store(value, std::memory_order_relaxed); } void set_limit(int64_t value) { limit_.store(value, std::memory_order_relaxed); } void set_low_since_mark_compact(int64_t value) { low_since_mark_compact_.store(value, std::memory_order_relaxed); } // The amount of external memory registered through the API. std::atomic<int64_t> total_{0}; // The limit when to trigger memory pressure from the API. std::atomic<int64_t> limit_{kExternalAllocationSoftLimit}; // Caches the amount of external memory registered at the last MC. std::atomic<int64_t> low_since_mark_compact_{0}; }; using PretenuringFeedbackMap = std::unordered_map<AllocationSite, size_t, Object::Hasher>; // Taking this mutex prevents the GC from entering a phase that relocates // object references. base::Mutex* relocation_mutex() { return &relocation_mutex_; } // Support for context snapshots. After calling this we have a linear // space to write objects in each space. struct Chunk { uint32_t size; Address start; Address end; }; using Reservation = std::vector<Chunk>; #if V8_OS_ANDROID // Don't apply pointer multiplier on Android since it has no swap space and // should instead adapt it's heap size based on available physical memory. static const int kPointerMultiplier = 1; static const int kHeapLimitMultiplier = 1; #else static const int kPointerMultiplier = kTaggedSize / 4; // The heap limit needs to be computed based on the system pointer size // because we want a pointer-compressed heap to have larger limit than // an orinary 32-bit which that is contrained by 2GB virtual address space. static const int kHeapLimitMultiplier = kSystemPointerSize / 4; #endif static const size_t kMaxInitialOldGenerationSize = 256 * MB * kHeapLimitMultiplier; // These constants control heap configuration based on the physical memory. static constexpr size_t kPhysicalMemoryToOldGenerationRatio = 4; // Young generation size is the same for compressed heaps and 32-bit heaps. static constexpr size_t kOldGenerationToSemiSpaceRatio = 128 * kHeapLimitMultiplier / kPointerMultiplier; static constexpr size_t kOldGenerationToSemiSpaceRatioLowMemory = 256 * kHeapLimitMultiplier / kPointerMultiplier; static constexpr size_t kOldGenerationLowMemory = 128 * MB * kHeapLimitMultiplier; static constexpr size_t kNewLargeObjectSpaceToSemiSpaceRatio = 1; #if ENABLE_HUGEPAGE static constexpr size_t kMinSemiSpaceSize = kHugePageSize * kPointerMultiplier; static constexpr size_t kMaxSemiSpaceSize = kHugePageSize * 16 * kPointerMultiplier; #else static constexpr size_t kMinSemiSpaceSize = 512 * KB * kPointerMultiplier; static constexpr size_t kMaxSemiSpaceSize = 8192 * KB * kPointerMultiplier; #endif STATIC_ASSERT(kMinSemiSpaceSize % (1 << kPageSizeBits) == 0); STATIC_ASSERT(kMaxSemiSpaceSize % (1 << kPageSizeBits) == 0); static const int kTraceRingBufferSize = 512; static const int kStacktraceBufferSize = 512; static const int kNoGCFlags = 0; static const int kReduceMemoryFootprintMask = 1; // GCs that are forced, either through testing configurations (requring // --expose-gc) or through DevTools (using LowMemoryNotificaton). static const int kForcedGC = 2; // The minimum size of a HeapObject on the heap. static const int kMinObjectSizeInTaggedWords = 2; static const int kMinPromotedPercentForFastPromotionMode = 90; STATIC_ASSERT(static_cast<int>(RootIndex::kUndefinedValue) == Internals::kUndefinedValueRootIndex); STATIC_ASSERT(static_cast<int>(RootIndex::kTheHoleValue) == Internals::kTheHoleValueRootIndex); STATIC_ASSERT(static_cast<int>(RootIndex::kNullValue) == Internals::kNullValueRootIndex); STATIC_ASSERT(static_cast<int>(RootIndex::kTrueValue) == Internals::kTrueValueRootIndex); STATIC_ASSERT(static_cast<int>(RootIndex::kFalseValue) == Internals::kFalseValueRootIndex); STATIC_ASSERT(static_cast<int>(RootIndex::kempty_string) == Internals::kEmptyStringRootIndex); // Calculates the maximum amount of filler that could be required by the // given alignment. V8_EXPORT_PRIVATE static int GetMaximumFillToAlign( AllocationAlignment alignment); // Calculates the actual amount of filler required for a given address at the // given alignment. V8_EXPORT_PRIVATE static int GetFillToAlign(Address address, AllocationAlignment alignment); // Returns the size of the initial area of a code-range, which is marked // writable and reserved to contain unwind information. static size_t GetCodeRangeReservedAreaSize(); [[noreturn]] void FatalProcessOutOfMemory(const char* location); // Checks whether the space is valid. static bool IsValidAllocationSpace(AllocationSpace space); // Zapping is needed for verify heap, and always done in debug builds. static inline bool ShouldZapGarbage() { #ifdef DEBUG return true; #else #ifdef VERIFY_HEAP return FLAG_verify_heap; #else return false; #endif #endif } // Helper function to get the bytecode flushing mode based on the flags. This // is required because it is not safe to acess flags in concurrent marker. static inline base::EnumSet<CodeFlushMode> GetCodeFlushMode(Isolate* isolate); static uintptr_t ZapValue() { return FLAG_clear_free_memory ? kClearedFreeMemoryValue : kZapValue; } static inline bool IsYoungGenerationCollector(GarbageCollector collector) { return collector == GarbageCollector::SCAVENGER || collector == GarbageCollector::MINOR_MARK_COMPACTOR; } static inline GarbageCollector YoungGenerationCollector() { return (FLAG_minor_mc) ? GarbageCollector::MINOR_MARK_COMPACTOR : GarbageCollector::SCAVENGER; } static inline const char* CollectorName(GarbageCollector collector) { switch (collector) { case GarbageCollector::SCAVENGER: return "Scavenger"; case GarbageCollector::MARK_COMPACTOR: return "Mark-Compact"; case GarbageCollector::MINOR_MARK_COMPACTOR: return "Minor Mark-Compact"; } return "Unknown collector"; } static inline const char* CollectorName(v8::GCType gc_type) { switch (gc_type) { case kGCTypeScavenge: return "Scavenger"; case kGCTypeMarkSweepCompact: return "Mark-Compact"; case kGCTypeMinorMarkCompact: return "Minor Mark-Compact"; default: break; } return "Unknown collector"; } // Copy block of memory from src to dst. Size of block should be aligned // by pointer size. static inline void CopyBlock(Address dst, Address src, int byte_size); // Executes generational and/or marking write barrier for a [start, end) range // of non-weak slots inside |object|. template <typename TSlot> V8_EXPORT_PRIVATE void WriteBarrierForRange(HeapObject object, TSlot start, TSlot end); V8_EXPORT_PRIVATE static void WriteBarrierForCodeSlow(Code host); V8_EXPORT_PRIVATE static void GenerationalBarrierSlow(HeapObject object, Address slot, HeapObject value); V8_EXPORT_PRIVATE inline void RecordEphemeronKeyWrite( EphemeronHashTable table, Address key_slot); V8_EXPORT_PRIVATE static void EphemeronKeyWriteBarrierFromCode( Address raw_object, Address address, Isolate* isolate); V8_EXPORT_PRIVATE static void GenerationalBarrierForCodeSlow( Code host, RelocInfo* rinfo, HeapObject value); V8_EXPORT_PRIVATE static bool PageFlagsAreConsistent(HeapObject object); // Notifies the heap that is ok to start marking or other activities that // should not happen during deserialization. void NotifyDeserializationComplete(); void NotifyBootstrapComplete(); void NotifyOldGenerationExpansion(AllocationSpace space, MemoryChunk* chunk); inline Address* NewSpaceAllocationTopAddress(); inline Address* NewSpaceAllocationLimitAddress(); inline Address* OldSpaceAllocationTopAddress(); inline Address* OldSpaceAllocationLimitAddress(); size_t NewSpaceSize(); size_t NewSpaceCapacity(); // Move len non-weak tagged elements from src_slot to dst_slot of dst_object. // The source and destination memory ranges can overlap. V8_EXPORT_PRIVATE void MoveRange(HeapObject dst_object, ObjectSlot dst_slot, ObjectSlot src_slot, int len, WriteBarrierMode mode); // Copy len non-weak tagged elements from src_slot to dst_slot of dst_object. // The source and destination memory ranges must not overlap. template <typename TSlot> void CopyRange(HeapObject dst_object, TSlot dst_slot, TSlot src_slot, int len, WriteBarrierMode mode); // Initialize a filler object to keep the ability to iterate over the heap // when introducing gaps within pages. If slots could have been recorded in // the freed area, then pass ClearRecordedSlots::kYes as the mode. Otherwise, // pass ClearRecordedSlots::kNo. Clears memory if clearing slots. V8_EXPORT_PRIVATE HeapObject CreateFillerObjectAt( Address addr, int size, ClearRecordedSlots clear_slots_mode); void CreateFillerObjectAtBackground(Address addr, int size, ClearFreedMemoryMode clear_memory_mode); template <typename T> void CreateFillerForArray(T object, int elements_to_trim, int bytes_to_trim); bool CanMoveObjectStart(HeapObject object); bool IsImmovable(HeapObject object); V8_EXPORT_PRIVATE static bool IsLargeObject(HeapObject object); // Trim the given array from the left. Note that this relocates the object // start and hence is only valid if there is only a single reference to it. V8_EXPORT_PRIVATE FixedArrayBase LeftTrimFixedArray(FixedArrayBase obj, int elements_to_trim); // Trim the given array from the right. V8_EXPORT_PRIVATE void RightTrimFixedArray(FixedArrayBase obj, int elements_to_trim); void RightTrimWeakFixedArray(WeakFixedArray obj, int elements_to_trim); // Converts the given boolean condition to JavaScript boolean value. inline Oddball ToBoolean(bool condition); // Notify the heap that a context has been disposed. V8_EXPORT_PRIVATE int NotifyContextDisposed(bool dependant_context); void set_native_contexts_list(Object object) { native_contexts_list_.store(object.ptr(), std::memory_order_release); } Object native_contexts_list() const { return Object(native_contexts_list_.load(std::memory_order_acquire)); } void set_allocation_sites_list(Object object) { allocation_sites_list_ = object; } Object allocation_sites_list() { return allocation_sites_list_; } void set_dirty_js_finalization_registries_list(Object object) { dirty_js_finalization_registries_list_ = object; } Object dirty_js_finalization_registries_list() { return dirty_js_finalization_registries_list_; } void set_dirty_js_finalization_registries_list_tail(Object object) { dirty_js_finalization_registries_list_tail_ = object; } Object dirty_js_finalization_registries_list_tail() { return dirty_js_finalization_registries_list_tail_; } // Used in CreateAllocationSiteStub and the (de)serializer. Address allocation_sites_list_address() { return reinterpret_cast<Address>(&allocation_sites_list_); } // Traverse all the allocaions_sites [nested_site and weak_next] in the list // and foreach call the visitor void ForeachAllocationSite( Object list, const std::function<void(AllocationSite)>& visitor); // Number of mark-sweeps. int ms_count() const { return ms_count_; } // Checks whether the given object is allowed to be migrated from it's // current space into the given destination space. Used for debugging. bool AllowedToBeMigrated(Map map, HeapObject object, AllocationSpace dest); void CheckHandleCount(); // Print short heap statistics. void PrintShortHeapStatistics(); // Print statistics of freelists of old_space: // with FLAG_trace_gc_freelists: summary of each FreeListCategory. // with FLAG_trace_gc_freelists_verbose: also prints the statistics of each // FreeListCategory of each page. void PrintFreeListsStats(); // Dump heap statistics in JSON format. void DumpJSONHeapStatistics(std::stringstream& stream); bool write_protect_code_memory() const { return write_protect_code_memory_; } uintptr_t code_space_memory_modification_scope_depth() { return code_space_memory_modification_scope_depth_; } void increment_code_space_memory_modification_scope_depth() { code_space_memory_modification_scope_depth_++; } void decrement_code_space_memory_modification_scope_depth() { code_space_memory_modification_scope_depth_--; } void UnprotectAndRegisterMemoryChunk(MemoryChunk* chunk, UnprotectMemoryOrigin origin); V8_EXPORT_PRIVATE void UnprotectAndRegisterMemoryChunk( HeapObject object, UnprotectMemoryOrigin origin); void UnregisterUnprotectedMemoryChunk(MemoryChunk* chunk); V8_EXPORT_PRIVATE void ProtectUnprotectedMemoryChunks(); void IncrementCodePageCollectionMemoryModificationScopeDepth() { code_page_collection_memory_modification_scope_depth_++; } void DecrementCodePageCollectionMemoryModificationScopeDepth() { code_page_collection_memory_modification_scope_depth_--; } uintptr_t code_page_collection_memory_modification_scope_depth() { return code_page_collection_memory_modification_scope_depth_; } inline HeapState gc_state() const { return gc_state_.load(std::memory_order_relaxed); } void SetGCState(HeapState state); bool IsTearingDown() const { return gc_state() == TEAR_DOWN; } bool force_oom() const { return force_oom_; } bool ignore_local_gc_requests() const { return ignore_local_gc_requests_depth_ > 0; } inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; } bool IsGCWithoutStack() const; // If an object has an AllocationMemento trailing it, return it, otherwise // return a null AllocationMemento. template <FindMementoMode mode> inline AllocationMemento FindAllocationMemento(Map map, HeapObject object); // Performs GC after background allocation failure. void CollectGarbageForBackground(LocalHeap* local_heap); // // Support for the API. // void CreateApiObjects(); // Implements the corresponding V8 API function. bool IdleNotification(double deadline_in_seconds); bool IdleNotification(int idle_time_in_ms); V8_EXPORT_PRIVATE void MemoryPressureNotification(MemoryPressureLevel level, bool is_isolate_locked); void CheckMemoryPressure(); V8_EXPORT_PRIVATE void AddNearHeapLimitCallback(v8::NearHeapLimitCallback, void* data); V8_EXPORT_PRIVATE void RemoveNearHeapLimitCallback( v8::NearHeapLimitCallback callback, size_t heap_limit); V8_EXPORT_PRIVATE void AutomaticallyRestoreInitialHeapLimit( double threshold_percent); void AppendArrayBufferExtension(JSArrayBuffer object, ArrayBufferExtension* extension); void DetachArrayBufferExtension(JSArrayBuffer object, ArrayBufferExtension* extension); IsolateSafepoint* safepoint() { return safepoint_.get(); } V8_EXPORT_PRIVATE double MonotonicallyIncreasingTimeInMs() const; void VerifyNewSpaceTop(); void RecordStats(HeapStats* stats, bool take_snapshot = false); bool MeasureMemory(std::unique_ptr<v8::MeasureMemoryDelegate> delegate, v8::MeasureMemoryExecution execution); std::unique_ptr<v8::MeasureMemoryDelegate> MeasureMemoryDelegate( Handle<NativeContext> context, Handle<JSPromise> promise, v8::MeasureMemoryMode mode); // Check new space expansion criteria and expand semispaces if it was hit. void CheckNewSpaceExpansionCriteria(); void VisitExternalResources(v8::ExternalResourceVisitor* visitor); // An object should be promoted if the object has survived a // scavenge operation. inline bool ShouldBePromoted(Address old_address); void IncrementDeferredCount(v8::Isolate::UseCounterFeature feature); inline int NextScriptId(); inline int NextDebuggingId(); inline int GetNextTemplateSerialNumber(); void SetSerializedObjects(FixedArray objects); void SetSerializedGlobalProxySizes(FixedArray sizes); void SetBasicBlockProfilingData(Handle<ArrayList> list); // For post mortem debugging. void RememberUnmappedPage(Address page, bool compacted); int64_t external_memory_hard_limit() { return max_old_generation_size() / 2; } V8_INLINE int64_t external_memory(); V8_EXPORT_PRIVATE int64_t external_memory_limit(); V8_INLINE int64_t update_external_memory(int64_t delta); V8_EXPORT_PRIVATE size_t YoungArrayBufferBytes(); V8_EXPORT_PRIVATE size_t OldArrayBufferBytes(); uint64_t backing_store_bytes() const { return backing_store_bytes_.load(std::memory_order_relaxed); } void CompactWeakArrayLists(); V8_EXPORT_PRIVATE void AddRetainedMap(Handle<NativeContext> context, Handle<Map> map); // This event is triggered after object is moved to a new place. void OnMoveEvent(HeapObject target, HeapObject source, int size_in_bytes); bool deserialization_complete() const { return deserialization_complete_; } // We can only invoke Safepoint() on the main thread local heap after // deserialization is complete. Before that, main_thread_local_heap_ might be // null. V8_INLINE bool CanSafepoint() const { return deserialization_complete(); } bool HasLowAllocationRate(); bool HasHighFragmentation(); bool HasHighFragmentation(size_t used, size_t committed); void ActivateMemoryReducerIfNeeded(); V8_EXPORT_PRIVATE bool ShouldOptimizeForMemoryUsage(); bool HighMemoryPressure() { return memory_pressure_level_.load(std::memory_order_relaxed) != MemoryPressureLevel::kNone; } bool CollectionRequested(); void CheckCollectionRequested(); void RestoreHeapLimit(size_t heap_limit) { // Do not set the limit lower than the live size + some slack. size_t min_limit = SizeOfObjects() + SizeOfObjects() / 4; set_max_old_generation_size( std::min(max_old_generation_size(), std::max(heap_limit, min_limit))); } // =========================================================================== // Initialization. =========================================================== // =========================================================================== void ConfigureHeap(const v8::ResourceConstraints& constraints); void ConfigureHeapDefault(); // Prepares the heap, setting up for deserialization. void SetUp(LocalHeap* main_thread_local_heap); // Sets read-only heap and space. void SetUpFromReadOnlyHeap(ReadOnlyHeap* ro_heap); void ReplaceReadOnlySpace(SharedReadOnlySpace* shared_ro_space); // Sets up the heap memory without creating any objects. void SetUpSpaces(LinearAllocationArea* new_allocation_info, LinearAllocationArea* old_allocation_info); // Prepares the heap, setting up for deserialization. void InitializeMainThreadLocalHeap(LocalHeap* main_thread_local_heap); // (Re-)Initialize hash seed from flag or RNG. void InitializeHashSeed(); // Bootstraps the object heap with the core set of objects required to run. // Returns whether it succeeded. bool CreateHeapObjects(); // Create ObjectStats if live_object_stats_ or dead_object_stats_ are nullptr. void CreateObjectStats(); // Sets the TearDown state, so no new GC tasks get posted. void StartTearDown(); // Destroys all memory allocated by the heap. void TearDown(); // Returns whether SetUp has been called. bool HasBeenSetUp() const; // =========================================================================== // Getters for spaces. ======================================================= // =========================================================================== inline Address NewSpaceTop(); NewSpace* new_space() { return new_space_; } OldSpace* old_space() { return old_space_; } OldSpace* shared_old_space() { return shared_old_space_; } CodeSpace* code_space() { return code_space_; } MapSpace* map_space() { return map_space_; } inline PagedSpace* space_for_maps(); OldLargeObjectSpace* lo_space() { return lo_space_; } CodeLargeObjectSpace* code_lo_space() { return code_lo_space_; } NewLargeObjectSpace* new_lo_space() { return new_lo_space_; } ReadOnlySpace* read_only_space() { return read_only_space_; } inline PagedSpace* paged_space(int idx); inline Space* space(int idx); // =========================================================================== // Getters to other components. ============================================== // =========================================================================== GCTracer* tracer() { return tracer_.get(); } MemoryAllocator* memory_allocator() { return memory_allocator_.get(); } const MemoryAllocator* memory_allocator() const { return memory_allocator_.get(); } inline ConcurrentAllocator* concurrent_allocator_for_maps(); inline Isolate* isolate(); MarkCompactCollector* mark_compact_collector() { return mark_compact_collector_.get(); } MinorMarkCompactCollector* minor_mark_compact_collector() { return minor_mark_compact_collector_.get(); } ArrayBufferSweeper* array_buffer_sweeper() { return array_buffer_sweeper_.get(); } // The potentially overreserved address space region reserved by the code // range if it exists or empty region otherwise. const base::AddressRegion& code_region(); CodeRange* code_range() { return code_range_.get(); } // The base of the code range if it exists or null address. inline Address code_range_base(); LocalHeap* main_thread_local_heap() { return main_thread_local_heap_; } Heap* AsHeap() { return this; } // =========================================================================== // Root set access. ========================================================== // =========================================================================== // Shortcut to the roots table stored in the Isolate. V8_INLINE RootsTable& roots_table(); // Heap root getters. #define ROOT_ACCESSOR(type, name, CamelName) inline type name(); MUTABLE_ROOT_LIST(ROOT_ACCESSOR) #undef ROOT_ACCESSOR V8_INLINE void SetRootMaterializedObjects(FixedArray objects); V8_INLINE void SetRootScriptList(Object value); V8_INLINE void SetRootNoScriptSharedFunctionInfos(Object value); V8_INLINE void SetMessageListeners(TemplateList value); V8_INLINE void SetPendingOptimizeForTestBytecode(Object bytecode); StrongRootsEntry* RegisterStrongRoots(const char* label, FullObjectSlot start, FullObjectSlot end); void UnregisterStrongRoots(StrongRootsEntry* entry); void UpdateStrongRoots(StrongRootsEntry* entry, FullObjectSlot start, FullObjectSlot end); void SetBuiltinsConstantsTable(FixedArray cache); void SetDetachedContexts(WeakArrayList detached_contexts); // A full copy of the interpreter entry trampoline, used as a template to // create copies of the builtin at runtime. The copies are used to create // better profiling information for ticks in bytecode execution. Note that // this is always a copy of the full builtin, i.e. not the off-heap // trampoline. // See also: FLAG_interpreted_frames_native_stack. void SetInterpreterEntryTrampolineForProfiling(Code code); void EnqueueDirtyJSFinalizationRegistry( JSFinalizationRegistry finalization_registry, std::function<void(HeapObject object, ObjectSlot slot, Object target)> gc_notify_updated_slot); MaybeHandle<JSFinalizationRegistry> DequeueDirtyJSFinalizationRegistry(); // Called from Heap::NotifyContextDisposed to remove all // FinalizationRegistries with {context} from the dirty list when the context // e.g. navigates away or is detached. If the dirty list is empty afterwards, // the cleanup task is aborted if needed. void RemoveDirtyFinalizationRegistriesOnContext(NativeContext context); inline bool HasDirtyJSFinalizationRegistries(); void PostFinalizationRegistryCleanupTaskIfNeeded(); void set_is_finalization_registry_cleanup_task_posted(bool posted) { is_finalization_registry_cleanup_task_posted_ = posted; } bool is_finalization_registry_cleanup_task_posted() { return is_finalization_registry_cleanup_task_posted_; } V8_EXPORT_PRIVATE void KeepDuringJob(Handle<JSReceiver> target); void ClearKeptObjects(); // =========================================================================== // Inline allocation. ======================================================== // =========================================================================== // Switch whether inline bump-pointer allocation should be used. V8_EXPORT_PRIVATE void EnableInlineAllocation(); V8_EXPORT_PRIVATE void DisableInlineAllocation(); // =========================================================================== // Methods triggering GCs. =================================================== // =========================================================================== // Performs garbage collection operation. // Returns whether there is a chance that another major GC could // collect more garbage. V8_EXPORT_PRIVATE bool CollectGarbage( AllocationSpace space, GarbageCollectionReason gc_reason, const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags); // Performs a full garbage collection. V8_EXPORT_PRIVATE void CollectAllGarbage( int flags, GarbageCollectionReason gc_reason, const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags); // Last hope GC, should try to squeeze as much as possible. V8_EXPORT_PRIVATE void CollectAllAvailableGarbage( GarbageCollectionReason gc_reason); // Precise garbage collection that potentially finalizes already running // incremental marking before performing an atomic garbage collection. // Only use if absolutely necessary or in tests to avoid floating garbage! V8_EXPORT_PRIVATE void PreciseCollectAllGarbage( int flags, GarbageCollectionReason gc_reason, const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags); // Performs garbage collection operation for the shared heap. V8_EXPORT_PRIVATE void CollectSharedGarbage( GarbageCollectionReason gc_reason); // Reports and external memory pressure event, either performs a major GC or // completes incremental marking in order to free external resources. void ReportExternalMemoryPressure(); using GetExternallyAllocatedMemoryInBytesCallback = v8::Isolate::GetExternallyAllocatedMemoryInBytesCallback; void SetGetExternallyAllocatedMemoryInBytesCallback( GetExternallyAllocatedMemoryInBytesCallback callback) { external_memory_callback_ = callback; } // Invoked when GC was requested via the stack guard. void HandleGCRequest(); // =========================================================================== // Iterators. ================================================================ // =========================================================================== // None of these methods iterate over the read-only roots. To do this use // ReadOnlyRoots::Iterate. Read-only root iteration is not necessary for // garbage collection and is usually only performed as part of // (de)serialization or heap verification. // Iterates over the strong roots and the weak roots. void IterateRoots(RootVisitor* v, base::EnumSet<SkipRoot> options); void IterateRootsIncludingClients(RootVisitor* v, base::EnumSet<SkipRoot> options); // Iterates over entries in the smi roots list. Only interesting to the // serializer/deserializer, since GC does not care about smis. void IterateSmiRoots(RootVisitor* v); // Iterates over weak string tables. void IterateWeakRoots(RootVisitor* v, base::EnumSet<SkipRoot> options); void IterateWeakGlobalHandles(RootVisitor* v); void IterateBuiltins(RootVisitor* v); void IterateStackRoots(RootVisitor* v); // =========================================================================== // Remembered set API. ======================================================= // =========================================================================== // Used for query incremental marking status in generated code. Address* IsMarkingFlagAddress() { return reinterpret_cast<Address*>(&is_marking_flag_); } void SetIsMarkingFlag(uint8_t flag) { is_marking_flag_ = flag; } void ClearRecordedSlot(HeapObject object, ObjectSlot slot); void ClearRecordedSlotRange(Address start, Address end); static int InsertIntoRememberedSetFromCode(MemoryChunk* chunk, Address slot); #ifdef DEBUG void VerifyClearedSlot(HeapObject object, ObjectSlot slot); void VerifySlotRangeHasNoRecordedSlots(Address start, Address end); #endif // =========================================================================== // Incremental marking API. ================================================== // =========================================================================== int GCFlagsForIncrementalMarking() { return ShouldOptimizeForMemoryUsage() ? kReduceMemoryFootprintMask : kNoGCFlags; } // Start incremental marking and ensure that idle time handler can perform // incremental steps. V8_EXPORT_PRIVATE void StartIdleIncrementalMarking( GarbageCollectionReason gc_reason, GCCallbackFlags gc_callback_flags = GCCallbackFlags::kNoGCCallbackFlags); // Starts incremental marking assuming incremental marking is currently // stopped. V8_EXPORT_PRIVATE void StartIncrementalMarking( int gc_flags, GarbageCollectionReason gc_reason, GCCallbackFlags gc_callback_flags = GCCallbackFlags::kNoGCCallbackFlags); void StartIncrementalMarkingIfAllocationLimitIsReached( int gc_flags, GCCallbackFlags gc_callback_flags = GCCallbackFlags::kNoGCCallbackFlags); void StartIncrementalMarkingIfAllocationLimitIsReachedBackground(); void FinalizeIncrementalMarkingIfComplete(GarbageCollectionReason gc_reason); // Synchronously finalizes incremental marking. V8_EXPORT_PRIVATE void FinalizeIncrementalMarkingAtomically( GarbageCollectionReason gc_reason); void CompleteSweepingFull(); void CompleteSweepingYoung(GarbageCollector collector); // Ensures that sweeping is finished for that object's page. void EnsureSweepingCompleted(HeapObject object); IncrementalMarking* incremental_marking() const { return incremental_marking_.get(); } MarkingBarrier* marking_barrier() const { return marking_barrier_.get(); } // =========================================================================== // Concurrent marking API. =================================================== // =========================================================================== ConcurrentMarking* concurrent_marking() const { return concurrent_marking_.get(); } // The runtime uses this function to notify potentially unsafe object layout // changes that require special synchronization with the concurrent marker. // The old size is the size of the object before layout change. // By default recorded slots in the object are invalidated. Pass // InvalidateRecordedSlots::kNo if this is not necessary or to perform this // manually. void NotifyObjectLayoutChange( HeapObject object, const DisallowGarbageCollection&, InvalidateRecordedSlots invalidate_recorded_slots = InvalidateRecordedSlots::kYes); #ifdef VERIFY_HEAP // This function checks that either // - the map transition is safe, // - or it was communicated to GC using NotifyObjectLayoutChange. V8_EXPORT_PRIVATE void VerifyObjectLayoutChange(HeapObject object, Map new_map); // Checks that this is a safe map transition. V8_EXPORT_PRIVATE void VerifySafeMapTransition(HeapObject object, Map new_map); #endif // =========================================================================== // Deoptimization support API. =============================================== // =========================================================================== // Setters for code offsets of well-known deoptimization targets. void SetConstructStubCreateDeoptPCOffset(int pc_offset); void SetConstructStubInvokeDeoptPCOffset(int pc_offset); void SetInterpreterEntryReturnPCOffset(int pc_offset); // Invalidates references in the given {code} object that are referenced // transitively from the deoptimization data. Mutates write-protected code. void InvalidateCodeDeoptimizationData(Code code); void DeoptMarkedAllocationSites(); bool DeoptMaybeTenuredAllocationSites(); // =========================================================================== // Embedder heap tracer support. ============================================= // =========================================================================== LocalEmbedderHeapTracer* local_embedder_heap_tracer() const { return local_embedder_heap_tracer_.get(); } V8_EXPORT_PRIVATE void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer); EmbedderHeapTracer* GetEmbedderHeapTracer() const; void RegisterExternallyReferencedObject(Address* location); V8_EXPORT_PRIVATE void SetEmbedderStackStateForNextFinalization( EmbedderHeapTracer::EmbedderStackState stack_state); EmbedderHeapTracer::TraceFlags flags_for_embedder_tracer() const; // =========================================================================== // Unified heap (C++) support. =============================================== // =========================================================================== V8_EXPORT_PRIVATE void AttachCppHeap(v8::CppHeap* cpp_heap); V8_EXPORT_PRIVATE void DetachCppHeap(); v8::CppHeap* cpp_heap() const { return cpp_heap_; } const cppgc::EmbedderStackState* overriden_stack_state() const; // =========================================================================== // Embedder roots optimizations. ============================================= // =========================================================================== V8_EXPORT_PRIVATE void SetEmbedderRootsHandler(EmbedderRootsHandler* handler); EmbedderRootsHandler* GetEmbedderRootsHandler() const; // =========================================================================== // External string table API. ================================================ // =========================================================================== // Registers an external string. inline void RegisterExternalString(String string); // Called when a string's resource is changed. The size of the payload is sent // as argument of the method. V8_EXPORT_PRIVATE void UpdateExternalString(String string, size_t old_payload, size_t new_payload); // Finalizes an external string by deleting the associated external // data and clearing the resource pointer. inline void FinalizeExternalString(String string); static String UpdateYoungReferenceInExternalStringTableEntry( Heap* heap, FullObjectSlot pointer); // =========================================================================== // Methods checking/returning the space of a given object/address. =========== // =========================================================================== // Returns whether the object resides in new space. static inline bool InYoungGeneration(Object object); static inline bool InYoungGeneration(MaybeObject object); static inline bool InYoungGeneration(HeapObject heap_object); static inline bool InFromPage(Object object); static inline bool InFromPage(MaybeObject object); static inline bool InFromPage(HeapObject heap_object); static inline bool InToPage(Object object); static inline bool InToPage(MaybeObject object); static inline bool InToPage(HeapObject heap_object); // Returns whether the object resides in old space. inline bool InOldSpace(Object object); // Checks whether an address/object is in the non-read-only heap (including // auxiliary area and unused area). Use IsValidHeapObject if checking both // heaps is required. V8_EXPORT_PRIVATE bool Contains(HeapObject value) const; // Same as above, but checks whether the object resides in any of the code // spaces. V8_EXPORT_PRIVATE bool ContainsCode(HeapObject value) const; // Checks whether an address/object is in the non-read-only heap (including // auxiliary area and unused area). Use IsValidHeapObject if checking both // heaps is required. V8_EXPORT_PRIVATE bool SharedHeapContains(HeapObject value) const; // Returns whether the object should be in the shared old space. V8_EXPORT_PRIVATE bool ShouldBeInSharedOldSpace(HeapObject value); // Checks whether an address/object in a space. // Currently used by tests, serialization and heap verification only. V8_EXPORT_PRIVATE bool InSpace(HeapObject value, AllocationSpace space) const; // Returns true when this heap is shared. V8_EXPORT_PRIVATE bool IsShared(); // Slow methods that can be used for verification as they can also be used // with off-heap Addresses. V8_EXPORT_PRIVATE bool InSpaceSlow(Address addr, AllocationSpace space) const; static inline Heap* FromWritableHeapObject(HeapObject obj); // =========================================================================== // Object statistics tracking. =============================================== // =========================================================================== // Returns the number of buckets used by object statistics tracking during a // major GC. Note that the following methods fail gracefully when the bounds // are exceeded though. size_t NumberOfTrackedHeapObjectTypes(); // Returns object statistics about count and size at the last major GC. // Objects are being grouped into buckets that roughly resemble existing // instance types. size_t ObjectCountAtLastGC(size_t index); size_t ObjectSizeAtLastGC(size_t index); // Retrieves names of buckets used by object statistics tracking. bool GetObjectTypeName(size_t index, const char** object_type, const char** object_sub_type); // The total number of native contexts object on the heap. size_t NumberOfNativeContexts(); // The total number of native contexts that were detached but were not // garbage collected yet. size_t NumberOfDetachedContexts(); // =========================================================================== // Code statistics. ========================================================== // =========================================================================== // Collect code (Code and BytecodeArray objects) statistics. void CollectCodeStatistics(); // =========================================================================== // GC statistics. ============================================================ // =========================================================================== // Returns the maximum amount of memory reserved for the heap. V8_EXPORT_PRIVATE size_t MaxReserved(); size_t MaxSemiSpaceSize() { return max_semi_space_size_; } size_t InitialSemiSpaceSize() { return initial_semispace_size_; } size_t MaxOldGenerationSize() { return max_old_generation_size(); } // Limit on the max old generation size imposed by the underlying allocator. V8_EXPORT_PRIVATE static size_t AllocatorLimitOnMaxOldGenerationSize(); V8_EXPORT_PRIVATE static size_t HeapSizeFromPhysicalMemory( uint64_t physical_memory); V8_EXPORT_PRIVATE static void GenerationSizesFromHeapSize( size_t heap_size, size_t* young_generation_size, size_t* old_generation_size); V8_EXPORT_PRIVATE static size_t YoungGenerationSizeFromOldGenerationSize( size_t old_generation_size); V8_EXPORT_PRIVATE static size_t YoungGenerationSizeFromSemiSpaceSize( size_t semi_space_size); V8_EXPORT_PRIVATE static size_t SemiSpaceSizeFromYoungGenerationSize( size_t young_generation_size); V8_EXPORT_PRIVATE static size_t MinYoungGenerationSize(); V8_EXPORT_PRIVATE static size_t MinOldGenerationSize(); V8_EXPORT_PRIVATE static size_t MaxOldGenerationSize( uint64_t physical_memory); // Returns the capacity of the heap in bytes w/o growing. Heap grows when // more spaces are needed until it reaches the limit. size_t Capacity(); // Returns the capacity of the old generation. V8_EXPORT_PRIVATE size_t OldGenerationCapacity(); // Returns the amount of memory currently held alive by the unmapper. size_t CommittedMemoryOfUnmapper(); // Returns the amount of memory currently committed for the heap. size_t CommittedMemory(); // Returns the amount of memory currently committed for the old space. size_t CommittedOldGenerationMemory(); // Returns the amount of executable memory currently committed for the heap. size_t CommittedMemoryExecutable(); // Returns the amount of phyical memory currently committed for the heap. size_t CommittedPhysicalMemory(); // Returns the maximum amount of memory ever committed for the heap. size_t MaximumCommittedMemory() { return maximum_committed_; } // Updates the maximum committed memory for the heap. Should be called // whenever a space grows. void UpdateMaximumCommitted(); // Returns the available bytes in space w/o growing. // Heap doesn't guarantee that it can allocate an object that requires // all available bytes. Check MaxHeapObjectSize() instead. size_t Available(); // Returns size of all objects residing in the heap. V8_EXPORT_PRIVATE size_t SizeOfObjects(); // Returns size of all global handles in the heap. V8_EXPORT_PRIVATE size_t TotalGlobalHandlesSize(); // Returns size of all allocated/used global handles in the heap. V8_EXPORT_PRIVATE size_t UsedGlobalHandlesSize(); void UpdateSurvivalStatistics(int start_new_space_size); inline void IncrementPromotedObjectsSize(size_t object_size) { promoted_objects_size_ += object_size; } inline size_t promoted_objects_size() { return promoted_objects_size_; } inline void IncrementSemiSpaceCopiedObjectSize(size_t object_size) { semi_space_copied_object_size_ += object_size; } inline size_t semi_space_copied_object_size() { return semi_space_copied_object_size_; } inline size_t SurvivedYoungObjectSize() { return promoted_objects_size_ + semi_space_copied_object_size_; } inline void IncrementNodesDiedInNewSpace() { nodes_died_in_new_space_++; } inline void IncrementNodesCopiedInNewSpace() { nodes_copied_in_new_space_++; } inline void IncrementNodesPromoted() { nodes_promoted_++; } inline void IncrementYoungSurvivorsCounter(size_t survived) { survived_last_scavenge_ = survived; survived_since_last_expansion_ += survived; } void UpdateNewSpaceAllocationCounter(); V8_EXPORT_PRIVATE size_t NewSpaceAllocationCounter(); // This should be used only for testing. void set_new_space_allocation_counter(size_t new_value) { new_space_allocation_counter_ = new_value; } void UpdateOldGenerationAllocationCounter() { old_generation_allocation_counter_at_last_gc_ = OldGenerationAllocationCounter(); old_generation_size_at_last_gc_ = 0; } size_t OldGenerationAllocationCounter() { return old_generation_allocation_counter_at_last_gc_ + PromotedSinceLastGC(); } size_t EmbedderAllocationCounter() const; // This should be used only for testing. void set_old_generation_allocation_counter_at_last_gc(size_t new_value) { old_generation_allocation_counter_at_last_gc_ = new_value; } size_t PromotedSinceLastGC() { size_t old_generation_size = OldGenerationSizeOfObjects(); return old_generation_size > old_generation_size_at_last_gc_ ? old_generation_size - old_generation_size_at_last_gc_ : 0; } int gc_count() const { return gc_count_; } bool is_current_gc_forced() const { return is_current_gc_forced_; } // Returns whether the currently in-progress GC should avoid increasing the // ages on any objects that live for a set number of collections. bool ShouldCurrentGCKeepAgesUnchanged() const { return is_current_gc_forced_ || is_current_gc_for_heap_profiler_; } // Returns the size of objects residing in non-new spaces. // Excludes external memory held by those objects. V8_EXPORT_PRIVATE size_t OldGenerationSizeOfObjects(); // Returns the size of objects held by the EmbedderHeapTracer. V8_EXPORT_PRIVATE size_t EmbedderSizeOfObjects() const; // Returns the global size of objects (embedder + V8 non-new spaces). V8_EXPORT_PRIVATE size_t GlobalSizeOfObjects(); // We allow incremental marking to overshoot the V8 and global allocation // limit for performace reasons. If the overshoot is too large then we are // more eager to finalize incremental marking. bool AllocationLimitOvershotByLargeMargin(); // Return the maximum size objects can be before having to allocate them as // large objects. This takes into account allocating in the code space for // which the size of the allocatable space per V8 page may depend on the OS // page size at runtime. You may use kMaxRegularHeapObjectSize as a constant // instead if you know the allocation isn't in the code spaces. inline V8_EXPORT_PRIVATE int MaxRegularHeapObjectSize( AllocationType allocation); // =========================================================================== // Prologue/epilogue callback methods.======================================== // =========================================================================== void AddGCPrologueCallback(v8::Isolate::GCCallbackWithData callback, GCType gc_type_filter, void* data); void RemoveGCPrologueCallback(v8::Isolate::GCCallbackWithData callback, void* data); void AddGCEpilogueCallback(v8::Isolate::GCCallbackWithData callback, GCType gc_type_filter, void* data); void RemoveGCEpilogueCallback(v8::Isolate::GCCallbackWithData callback, void* data); void CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags); void CallGCEpilogueCallbacks(GCType gc_type, GCCallbackFlags flags); // =========================================================================== // Allocation methods. ======================================================= // =========================================================================== // Creates a filler object and returns a heap object immediately after it. V8_EXPORT_PRIVATE HeapObject PrecedeWithFiller(HeapObject object, int filler_size); // Creates a filler object if needed for alignment and returns a heap object // immediately after it. If any space is left after the returned object, // another filler object is created so the over allocated memory is iterable. V8_WARN_UNUSED_RESULT HeapObject AlignWithFiller(HeapObject object, int object_size, int allocation_size, AllocationAlignment alignment); // Allocate an external backing store with the given allocation callback. // If the callback fails (indicated by a nullptr result) then this function // will re-try the allocation after performing GCs. This is useful for // external backing stores that may be retained by (unreachable) V8 objects // such as ArrayBuffers, ExternalStrings, etc. // // The function may also proactively trigger GCs even if the allocation // callback does not fail to keep the memory usage low. V8_EXPORT_PRIVATE void* AllocateExternalBackingStore( const std::function<void*(size_t)>& allocate, size_t byte_length); // =========================================================================== // Allocation site tracking. ================================================= // =========================================================================== // Updates the AllocationSite of a given {object}. The entry (including the // count) is cached on the local pretenuring feedback. inline void UpdateAllocationSite( Map map, HeapObject object, PretenuringFeedbackMap* pretenuring_feedback); // Merges local pretenuring feedback into the global one. Note that this // method needs to be called after evacuation, as allocation sites may be // evacuated and this method resolves forward pointers accordingly. void MergeAllocationSitePretenuringFeedback( const PretenuringFeedbackMap& local_pretenuring_feedback); // Adds an allocation site to the list of sites to be pretenured during the // next collection. Added allocation sites are pretenured independent of // their feedback. V8_EXPORT_PRIVATE void PretenureAllocationSiteOnNextCollection( AllocationSite site); // =========================================================================== // Allocation tracking. ====================================================== // =========================================================================== // Adds {new_space_observer} to new space and {observer} to any other space. void AddAllocationObserversToAllSpaces( AllocationObserver* observer, AllocationObserver* new_space_observer); // Removes {new_space_observer} from new space and {observer} from any other // space. void RemoveAllocationObserversFromAllSpaces( AllocationObserver* observer, AllocationObserver* new_space_observer); // Check if the given object was recently allocated and its fields may appear // as uninitialized to background threads. // This predicate may be invoked from a background thread. inline bool IsPendingAllocation(HeapObject object); inline bool IsPendingAllocation(Object object); // Notifies that all previously allocated objects are properly initialized // and ensures that IsPendingAllocation returns false for them. This function // may be invoked only on the main thread. V8_EXPORT_PRIVATE void PublishPendingAllocations(); // =========================================================================== // Heap object allocation tracking. ========================================== // =========================================================================== V8_EXPORT_PRIVATE void AddHeapObjectAllocationTracker( HeapObjectAllocationTracker* tracker); V8_EXPORT_PRIVATE void RemoveHeapObjectAllocationTracker( HeapObjectAllocationTracker* tracker); bool has_heap_object_allocation_tracker() const { return !allocation_trackers_.empty(); } // =========================================================================== // Retaining path tracking. ================================================== // =========================================================================== // Adds the given object to the weak table of retaining path targets. // On each GC if the marker discovers the object, it will print the retaining // path. This requires --track-retaining-path flag. void AddRetainingPathTarget(Handle<HeapObject> object, RetainingPathOption option); // =========================================================================== // Stack frame support. ====================================================== // =========================================================================== // Returns the Code object for a given interior pointer. Code GcSafeFindCodeForInnerPointer(Address inner_pointer); // Returns true if {addr} is contained within {code} and false otherwise. // Mostly useful for debugging. bool GcSafeCodeContains(Code code, Address addr); // Casts a heap object to a code object and checks if the inner_pointer is // within the object. Code GcSafeCastToCode(HeapObject object, Address inner_pointer); // Returns the map of an object. Can be used during garbage collection, i.e. // it supports a forwarded map. Fails if the map is not the code map. Map GcSafeMapOfCodeSpaceObject(HeapObject object); // ============================================================================= #ifdef VERIFY_HEAP // Verify the heap is in its normal state before or after a GC. V8_EXPORT_PRIVATE void Verify(); // Verify the read-only heap after all read-only heap objects have been // created. void VerifyReadOnlyHeap(); void VerifyRememberedSetFor(HeapObject object); #endif #ifdef V8_ENABLE_ALLOCATION_TIMEOUT void V8_EXPORT_PRIVATE set_allocation_timeout(int allocation_timeout); #endif // V8_ENABLE_ALLOCATION_TIMEOUT #ifdef DEBUG void VerifyCountersAfterSweeping(); void VerifyCountersBeforeConcurrentSweeping(); void Print(); void PrintHandles(); // Report code statistics. void ReportCodeStatistics(const char* title); #endif void* GetRandomMmapAddr() { void* result = v8::internal::GetRandomMmapAddr(); #if V8_TARGET_ARCH_X64 #if V8_OS_DARWIN // The Darwin kernel [as of macOS 10.12.5] does not clean up page // directory entries [PDE] created from mmap or mach_vm_allocate, even // after the region is destroyed. Using a virtual address space that is // too large causes a leak of about 1 wired [can never be paged out] page // per call to mmap(). The page is only reclaimed when the process is // killed. Confine the hint to a 32-bit section of the virtual address // space. See crbug.com/700928. uintptr_t offset = reinterpret_cast<uintptr_t>(result) & kMmapRegionMask; result = reinterpret_cast<void*>(mmap_region_base_ + offset); #endif // V8_OS_DARWIN #endif // V8_TARGET_ARCH_X64 return result; } void RegisterCodeObject(Handle<Code> code); static const char* GarbageCollectionReasonToString( GarbageCollectionReason gc_reason); // Calculates the nof entries for the full sized number to string cache. inline int MaxNumberToStringCacheSize() const; static Isolate* GetIsolateFromWritableObject(HeapObject object); // Ensure that we have swept all spaces in such a way that we can iterate // over all objects. void MakeHeapIterable(); private: class AllocationTrackerForDebugging; using ExternalStringTableUpdaterCallback = String (*)(Heap* heap, FullObjectSlot pointer); // External strings table is a place where all external strings are // registered. We need to keep track of such strings to properly // finalize them. class ExternalStringTable { public: explicit ExternalStringTable(Heap* heap) : heap_(heap) {} ExternalStringTable(const ExternalStringTable&) = delete; ExternalStringTable& operator=(const ExternalStringTable&) = delete; // Registers an external string. inline void AddString(String string); bool Contains(String string); void IterateAll(RootVisitor* v); void IterateYoung(RootVisitor* v); void PromoteYoung(); // Restores internal invariant and gets rid of collected strings. Must be // called after each Iterate*() that modified the strings. void CleanUpAll(); void CleanUpYoung(); // Finalize all registered external strings and clear tables. void TearDown(); void UpdateYoungReferences( Heap::ExternalStringTableUpdaterCallback updater_func); void UpdateReferences( Heap::ExternalStringTableUpdaterCallback updater_func); private: void Verify(); void VerifyYoung(); Heap* const heap_; // To speed up scavenge collections young string are kept separate from old // strings. std::vector<Object> young_strings_; std::vector<Object> old_strings_; }; struct StringTypeTable { InstanceType type; int size; RootIndex index; }; struct ConstantStringTable { const char* contents; RootIndex index; }; struct StructTable { InstanceType type; int size; RootIndex index; }; struct GCCallbackTuple { GCCallbackTuple(v8::Isolate::GCCallbackWithData callback, GCType gc_type, void* data) : callback(callback), gc_type(gc_type), data(data) {} bool operator==(const GCCallbackTuple& other) const; v8::Isolate::GCCallbackWithData callback; GCType gc_type; void* data; }; static const int kInitialEvalCacheSize = 64; static const int kInitialNumberStringCacheSize = 256; static const int kRememberedUnmappedPages = 128; static const StringTypeTable string_type_table[]; static const ConstantStringTable constant_string_table[]; static const StructTable struct_table[]; static const int kYoungSurvivalRateHighThreshold = 90; static const int kYoungSurvivalRateAllowedDeviation = 15; static const int kOldSurvivalRateLowThreshold = 10; static const int kMaxMarkCompactsInIdleRound = 7; static const int kInitialFeedbackCapacity = 256; Heap(); ~Heap(); Heap(const Heap&) = delete; Heap& operator=(const Heap&) = delete; static bool IsRegularObjectAllocation(AllocationType allocation) { return AllocationType::kYoung == allocation || AllocationType::kOld == allocation; } static size_t DefaultGetExternallyAllocatedMemoryInBytesCallback() { return 0; } #define ROOT_ACCESSOR(type, name, CamelName) inline void set_##name(type value); ROOT_LIST(ROOT_ACCESSOR) #undef ROOT_ACCESSOR void set_current_gc_flags(int flags) { current_gc_flags_ = flags; } inline bool ShouldReduceMemory() const { return (current_gc_flags_ & kReduceMemoryFootprintMask) != 0; } int NumberOfScavengeTasks(); // Checks whether a global GC is necessary GarbageCollector SelectGarbageCollector(AllocationSpace space, const char** reason); // Free all LABs in the heap. void FreeLinearAllocationAreas(); // Free all shared LABs. void FreeSharedLinearAllocationAreas(); // Free all shared LABs of main thread. void FreeMainThreadSharedLinearAllocationAreas(); // Performs garbage collection in a safepoint. // Returns the number of freed global handles. size_t PerformGarbageCollection( GarbageCollector collector, GarbageCollectionReason gc_reason, const char* collector_reason, const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags); // Performs garbage collection in the shared heap. void PerformSharedGarbageCollection(Isolate* initiator, GarbageCollectionReason gc_reason); inline void UpdateOldSpaceLimits(); bool CreateInitialMaps(); void CreateInternalAccessorInfoObjects(); void CreateInitialObjects(); // Commits from space if it is uncommitted. void EnsureFromSpaceIsCommitted(); // Uncommit unused semi space. V8_EXPORT_PRIVATE bool UncommitFromSpace(); // Fill in bogus values in from space void ZapFromSpace(); // Zaps the memory of a code object. V8_EXPORT_PRIVATE void ZapCodeObject(Address start_address, int size_in_bytes); // Initialize a filler object to keep the ability to iterate over the heap // when introducing gaps within pages. If the memory after the object header // of the filler should be cleared, pass in kClearFreedMemory. The default is // kDontClearFreedMemory. V8_EXPORT_PRIVATE HeapObject CreateFillerObjectAt(Address addr, int size, ClearFreedMemoryMode clear_memory_mode = ClearFreedMemoryMode::kDontClearFreedMemory); // Range write barrier implementation. template <int kModeMask, typename TSlot> V8_INLINE void WriteBarrierForRangeImpl(MemoryChunk* source_page, HeapObject object, TSlot start_slot, TSlot end_slot); // Deopts all code that contains allocation instruction which are tenured or // not tenured. Moreover it clears the pretenuring allocation site statistics. void ResetAllAllocationSitesDependentCode(AllocationType allocation); // Evaluates local pretenuring for the old space and calls // ResetAllTenuredAllocationSitesDependentCode if too many objects died in // the old space. void EvaluateOldSpaceLocalPretenuring(uint64_t size_of_objects_before_gc); // Record statistics after garbage collection. void ReportStatisticsAfterGC(); // Flush the number to string cache. void FlushNumberStringCache(); void ConfigureInitialOldGenerationSize(); double ComputeMutatorUtilization(const char* tag, double mutator_speed, double gc_speed); bool HasLowYoungGenerationAllocationRate(); bool HasLowOldGenerationAllocationRate(); bool HasLowEmbedderAllocationRate(); void ReduceNewSpaceSize(); GCIdleTimeHeapState ComputeHeapState(); bool PerformIdleTimeAction(GCIdleTimeAction action, GCIdleTimeHeapState heap_state, double deadline_in_ms); void IdleNotificationEpilogue(GCIdleTimeAction action, GCIdleTimeHeapState heap_state, double start_ms, double deadline_in_ms); void PrintMaxMarkingLimitReached(); void PrintMaxNewSpaceSizeReached(); int NextStressMarkingLimit(); void AddToRingBuffer(const char* string); void GetFromRingBuffer(char* buffer); void CompactRetainedMaps(WeakArrayList retained_maps); void CollectGarbageOnMemoryPressure(); void EagerlyFreeExternalMemory(); bool InvokeNearHeapLimitCallback(); void ComputeFastPromotionMode(); // Attempt to over-approximate the weak closure by marking object groups and // implicit references from global handles, but don't atomically complete // marking. If we continue to mark incrementally, we might have marked // objects that die later. void FinalizeIncrementalMarkingIncrementally( GarbageCollectionReason gc_reason); void InvokeIncrementalMarkingPrologueCallbacks(); void InvokeIncrementalMarkingEpilogueCallbacks(); // =========================================================================== // Pretenuring. ============================================================== // =========================================================================== // Pretenuring decisions are made based on feedback collected during new space // evacuation. Note that between feedback collection and calling this method // object in old space must not move. void ProcessPretenuringFeedback(); // Removes an entry from the global pretenuring storage. void RemoveAllocationSitePretenuringFeedback(AllocationSite site); // =========================================================================== // Actual GC. ================================================================ // =========================================================================== // Code that should be run before and after each GC. Includes some // reporting/verification activities when compiled with DEBUG set. void GarbageCollectionPrologue(GarbageCollectionReason gc_reason, const v8::GCCallbackFlags gc_callback_flags); void GarbageCollectionPrologueInSafepoint(); void GarbageCollectionEpilogue(GarbageCollector collector); void GarbageCollectionEpilogueInSafepoint(GarbageCollector collector); // Performs a major collection in the whole heap. void MarkCompact(); // Performs a minor collection of just the young generation. void MinorMarkCompact(); // Code to be run before and after mark-compact. void MarkCompactPrologue(); void MarkCompactEpilogue(); // Performs a minor collection in new generation. void Scavenge(); void EvacuateYoungGeneration(); void UpdateYoungReferencesInExternalStringTable( ExternalStringTableUpdaterCallback updater_func); void UpdateReferencesInExternalStringTable( ExternalStringTableUpdaterCallback updater_func); void ProcessAllWeakReferences(WeakObjectRetainer* retainer); void ProcessYoungWeakReferences(WeakObjectRetainer* retainer); void ProcessNativeContexts(WeakObjectRetainer* retainer); void ProcessAllocationSites(WeakObjectRetainer* retainer); void ProcessDirtyJSFinalizationRegistries(WeakObjectRetainer* retainer); void ProcessWeakListRoots(WeakObjectRetainer* retainer); // =========================================================================== // GC statistics. ============================================================ // =========================================================================== inline size_t OldGenerationSpaceAvailable() { uint64_t bytes = OldGenerationSizeOfObjects() + AllocatedExternalMemorySinceMarkCompact(); if (old_generation_allocation_limit() <= bytes) return 0; return old_generation_allocation_limit() - static_cast<size_t>(bytes); } void UpdateTotalGCTime(double duration); bool MaximumSizeScavenge() { return maximum_size_scavenges_ > 0; } bool IsIneffectiveMarkCompact(size_t old_generation_size, double mutator_utilization); void CheckIneffectiveMarkCompact(size_t old_generation_size, double mutator_utilization); inline void IncrementExternalBackingStoreBytes(ExternalBackingStoreType type, size_t amount); inline void DecrementExternalBackingStoreBytes(ExternalBackingStoreType type, size_t amount); // =========================================================================== // Growing strategy. ========================================================= // =========================================================================== MemoryReducer* memory_reducer() { return memory_reducer_.get(); } // For some webpages RAIL mode does not switch from PERFORMANCE_LOAD. // This constant limits the effect of load RAIL mode on GC. // The value is arbitrary and chosen as the largest load time observed in // v8 browsing benchmarks. static const int kMaxLoadTimeMs = 7000; bool ShouldOptimizeForLoadTime(); size_t old_generation_allocation_limit() const { return old_generation_allocation_limit_.load(std::memory_order_relaxed); } void set_old_generation_allocation_limit(size_t newlimit) { old_generation_allocation_limit_.store(newlimit, std::memory_order_relaxed); } size_t global_allocation_limit() const { return global_allocation_limit_; } size_t max_old_generation_size() { return max_old_generation_size_.load(std::memory_order_relaxed); } void set_max_old_generation_size(size_t value) { max_old_generation_size_.store(value, std::memory_order_relaxed); } bool always_allocate() { return always_allocate_scope_count_ != 0; } V8_EXPORT_PRIVATE bool CanExpandOldGeneration(size_t size); V8_EXPORT_PRIVATE bool CanExpandOldGenerationBackground(LocalHeap* local_heap, size_t size); V8_EXPORT_PRIVATE bool CanPromoteYoungAndExpandOldGeneration(size_t size); bool ShouldExpandOldGenerationOnSlowAllocation( LocalHeap* local_heap = nullptr); bool IsRetryOfFailedAllocation(LocalHeap* local_heap); bool IsMainThreadParked(LocalHeap* local_heap); HeapGrowingMode CurrentHeapGrowingMode(); double PercentToOldGenerationLimit(); double PercentToGlobalMemoryLimit(); enum class IncrementalMarkingLimit { kNoLimit, kSoftLimit, kHardLimit, kFallbackForEmbedderLimit }; IncrementalMarkingLimit IncrementalMarkingLimitReached(); bool ShouldStressCompaction() const; bool UseGlobalMemoryScheduling() const { return FLAG_global_gc_scheduling && local_embedder_heap_tracer(); } base::Optional<size_t> GlobalMemoryAvailable(); void RecomputeLimits(GarbageCollector collector); // =========================================================================== // Idle notification. ======================================================== // =========================================================================== bool RecentIdleNotificationHappened(); // =========================================================================== // GC Tasks. ================================================================= // =========================================================================== void ScheduleScavengeTaskIfNeeded(); // =========================================================================== // Allocation methods. ======================================================= // =========================================================================== HeapAllocator* allocator() { return &heap_allocator_; } // Allocates a JS Map in the heap. V8_WARN_UNUSED_RESULT AllocationResult AllocateMap(InstanceType instance_type, int instance_size, ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND, int inobject_properties = 0); // Allocate an uninitialized object. The memory is non-executable if the // hardware and OS allow. This is the single choke-point for allocations // performed by the runtime and should not be bypassed (to extend this to // inlined allocations, use the Heap::DisableInlineAllocation() support). V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult AllocateRaw(int size_in_bytes, AllocationType allocation, AllocationOrigin origin = AllocationOrigin::kRuntime, AllocationAlignment alignment = kTaggedAligned); // This method will try to allocate objects quickly (AllocationType::kYoung) // otherwise it falls back to a slower path indicated by the mode. enum AllocationRetryMode { kLightRetry, kRetryOrFail }; template <AllocationRetryMode mode> V8_WARN_UNUSED_RESULT V8_INLINE HeapObject AllocateRawWith(int size, AllocationType allocation, AllocationOrigin origin = AllocationOrigin::kRuntime, AllocationAlignment alignment = kTaggedAligned); // Call AllocateRawWith with kRetryOrFail. Matches the method in LocalHeap. V8_WARN_UNUSED_RESULT inline Address AllocateRawOrFail( int size, AllocationType allocation, AllocationOrigin origin = AllocationOrigin::kRuntime, AllocationAlignment alignment = kTaggedAligned); // Allocates a heap object based on the map. V8_WARN_UNUSED_RESULT AllocationResult Allocate(Handle<Map> map, AllocationType allocation); // Allocates a partial map for bootstrapping. V8_WARN_UNUSED_RESULT AllocationResult AllocatePartialMap(InstanceType instance_type, int instance_size); void FinalizePartialMap(Map map); void set_force_oom(bool value) { force_oom_ = value; } void set_force_gc_on_next_allocation() { force_gc_on_next_allocation_ = true; } // Helper for IsPendingAllocation. inline bool IsPendingAllocationInternal(HeapObject object); // =========================================================================== // Retaining path tracing ==================================================== // =========================================================================== void AddRetainer(HeapObject retainer, HeapObject object); void AddEphemeronRetainer(HeapObject retainer, HeapObject object); void AddRetainingRoot(Root root, HeapObject object); // Returns true if the given object is a target of retaining path tracking. // Stores the option corresponding to the object in the provided *option. bool IsRetainingPathTarget(HeapObject object, RetainingPathOption* option); void PrintRetainingPath(HeapObject object, RetainingPathOption option); void UpdateRetainersAfterScavenge(); #ifdef DEBUG V8_EXPORT_PRIVATE void IncrementObjectCounters(); #endif // DEBUG std::vector<Handle<NativeContext>> FindAllNativeContexts(); std::vector<WeakArrayList> FindAllRetainedMaps(); MemoryMeasurement* memory_measurement() { return memory_measurement_.get(); } AllocationType allocation_type_for_in_place_internalizable_strings() const { return allocation_type_for_in_place_internalizable_strings_; } bool IsStressingScavenge(); ExternalMemoryAccounting external_memory_; // This can be calculated directly from a pointer to the heap; however, it is // more expedient to get at the isolate directly from within Heap methods. Isolate* isolate_ = nullptr; HeapAllocator heap_allocator_; // These limits are initialized in Heap::ConfigureHeap based on the resource // constraints and flags. size_t code_range_size_ = 0; size_t max_semi_space_size_ = 0; size_t initial_semispace_size_ = 0; // Full garbage collections can be skipped if the old generation size // is below this threshold. size_t min_old_generation_size_ = 0; // If the old generation size exceeds this limit, then V8 will // crash with out-of-memory error. std::atomic<size_t> max_old_generation_size_{0}; // TODO(mlippautz): Clarify whether this should take some embedder // configurable limit into account. size_t min_global_memory_size_ = 0; size_t max_global_memory_size_ = 0; size_t initial_max_old_generation_size_ = 0; size_t initial_max_old_generation_size_threshold_ = 0; size_t initial_old_generation_size_ = 0; bool old_generation_size_configured_ = false; size_t maximum_committed_ = 0; size_t old_generation_capacity_after_bootstrap_ = 0; // Backing store bytes (array buffers and external strings). // Use uint64_t counter since the counter could overflow the 32-bit range // temporarily on 32-bit. std::atomic<uint64_t> backing_store_bytes_{0}; // For keeping track of how much data has survived // scavenge since last new space expansion. size_t survived_since_last_expansion_ = 0; // ... and since the last scavenge. size_t survived_last_scavenge_ = 0; // This is not the depth of nested AlwaysAllocateScope's but rather a single // count, as scopes can be acquired from multiple tasks (read: threads). std::atomic<size_t> always_allocate_scope_count_{0}; // Stores the memory pressure level that set by MemoryPressureNotification // and reset by a mark-compact garbage collection. std::atomic<MemoryPressureLevel> memory_pressure_level_; std::vector<std::pair<v8::NearHeapLimitCallback, void*>> near_heap_limit_callbacks_; // For keeping track of context disposals. int contexts_disposed_ = 0; NewSpace* new_space_ = nullptr; OldSpace* old_space_ = nullptr; CodeSpace* code_space_ = nullptr; MapSpace* map_space_ = nullptr; OldLargeObjectSpace* lo_space_ = nullptr; CodeLargeObjectSpace* code_lo_space_ = nullptr; NewLargeObjectSpace* new_lo_space_ = nullptr; ReadOnlySpace* read_only_space_ = nullptr; OldSpace* shared_old_space_ = nullptr; MapSpace* shared_map_space_ = nullptr; std::unique_ptr<ConcurrentAllocator> shared_old_allocator_; std::unique_ptr<ConcurrentAllocator> shared_map_allocator_; // Map from the space id to the space. Space* space_[LAST_SPACE + 1]; LocalHeap* main_thread_local_heap_ = nullptr; // List for tracking ArrayBufferExtensions ArrayBufferExtension* old_array_buffer_extensions_ = nullptr; ArrayBufferExtension* young_array_buffer_extensions_ = nullptr; // Determines whether code space is write-protected. This is essentially a // race-free copy of the {FLAG_write_protect_code_memory} flag. bool write_protect_code_memory_ = false; // Holds the number of open CodeSpaceMemoryModificationScopes. uintptr_t code_space_memory_modification_scope_depth_ = 0; // Holds the number of open CodePageCollectionMemoryModificationScopes. std::atomic<uintptr_t> code_page_collection_memory_modification_scope_depth_{ 0}; std::atomic<HeapState> gc_state_{NOT_IN_GC}; int gc_post_processing_depth_ = 0; // Returns the amount of external memory registered since last global gc. V8_EXPORT_PRIVATE uint64_t AllocatedExternalMemorySinceMarkCompact(); // Starts marking when stress_marking_percentage_% of the marking start limit // is reached. std::atomic<int> stress_marking_percentage_{0}; // Observer that causes more frequent checks for reached incremental // marking limit. AllocationObserver* stress_marking_observer_ = nullptr; // Observer that can cause early scavenge start. StressScavengeObserver* stress_scavenge_observer_ = nullptr; // The maximum percent of the marking limit reached wihout causing marking. // This is tracked when specyfing --fuzzer-gc-analysis. double max_marking_limit_reached_ = 0.0; // How many mark-sweep collections happened. unsigned int ms_count_ = 0; // How many gc happened. unsigned int gc_count_ = 0; // The number of Mark-Compact garbage collections that are considered as // ineffective. See IsIneffectiveMarkCompact() predicate. int consecutive_ineffective_mark_compacts_ = 0; static const uintptr_t kMmapRegionMask = 0xFFFFFFFFu; uintptr_t mmap_region_base_ = 0; // For post mortem debugging. int remembered_unmapped_pages_index_ = 0; Address remembered_unmapped_pages_[kRememberedUnmappedPages]; // Limit that triggers a global GC on the next (normally caused) GC. This // is checked when we have already decided to do a GC to help determine // which collector to invoke, before expanding a paged space in the old // generation and on every allocation in large object space. std::atomic<size_t> old_generation_allocation_limit_{0}; size_t global_allocation_limit_ = 0; // Weak list heads, threaded through the objects. // List heads are initialized lazily and contain the undefined_value at start. // {native_contexts_list_} is an Address instead of an Object to allow the use // of atomic accessors. std::atomic<Address> native_contexts_list_; Object allocation_sites_list_; Object dirty_js_finalization_registries_list_; // Weak list tails. Object dirty_js_finalization_registries_list_tail_; std::vector<GCCallbackTuple> gc_epilogue_callbacks_; std::vector<GCCallbackTuple> gc_prologue_callbacks_; GetExternallyAllocatedMemoryInBytesCallback external_memory_callback_; int deferred_counters_[v8::Isolate::kUseCounterFeatureCount]; size_t promoted_objects_size_ = 0; double promotion_ratio_ = 0.0; double promotion_rate_ = 0.0; size_t semi_space_copied_object_size_ = 0; size_t previous_semi_space_copied_object_size_ = 0; double semi_space_copied_rate_ = 0.0; int nodes_died_in_new_space_ = 0; int nodes_copied_in_new_space_ = 0; int nodes_promoted_ = 0; // This is the pretenuring trigger for allocation sites that are in maybe // tenure state. When we switched to the maximum new space size we deoptimize // the code that belongs to the allocation site and derive the lifetime // of the allocation site. unsigned int maximum_size_scavenges_ = 0; // Total time spent in GC. double total_gc_time_ms_ = 0.0; // Last time an idle notification happened. double last_idle_notification_time_ = 0.0; // Last time a garbage collection happened. double last_gc_time_ = 0.0; std::unique_ptr<GCTracer> tracer_; std::unique_ptr<MarkCompactCollector> mark_compact_collector_; std::unique_ptr<MinorMarkCompactCollector> minor_mark_compact_collector_; std::unique_ptr<ScavengerCollector> scavenger_collector_; std::unique_ptr<ArrayBufferSweeper> array_buffer_sweeper_; std::unique_ptr<MemoryAllocator> memory_allocator_; std::unique_ptr<IncrementalMarking> incremental_marking_; std::unique_ptr<ConcurrentMarking> concurrent_marking_; std::unique_ptr<GCIdleTimeHandler> gc_idle_time_handler_; std::unique_ptr<MemoryMeasurement> memory_measurement_; std::unique_ptr<MemoryReducer> memory_reducer_; std::unique_ptr<ObjectStats> live_object_stats_; std::unique_ptr<ObjectStats> dead_object_stats_; std::unique_ptr<ScavengeJob> scavenge_job_; std::unique_ptr<AllocationObserver> scavenge_task_observer_; std::unique_ptr<AllocationObserver> stress_concurrent_allocation_observer_; std::unique_ptr<LocalEmbedderHeapTracer> local_embedder_heap_tracer_; std::unique_ptr<MarkingBarrier> marking_barrier_; std::unique_ptr<AllocationTrackerForDebugging> allocation_tracker_for_debugging_; // This object controls virtual space reserved for code on the V8 heap. This // is only valid for 64-bit architectures where kRequiresCodeRange. // // Owned by the heap when !V8_COMPRESS_POINTERS_IN_SHARED_CAGE, otherwise is // process-wide. std::shared_ptr<CodeRange> code_range_; // The embedder owns the C++ heap. v8::CppHeap* cpp_heap_ = nullptr; EmbedderRootsHandler* embedder_roots_handler_ = nullptr; StrongRootsEntry* strong_roots_head_ = nullptr; base::Mutex strong_roots_mutex_; bool need_to_remove_stress_concurrent_allocation_observer_ = false; // This counter is increased before each GC and never reset. // To account for the bytes allocated since the last GC, use the // NewSpaceAllocationCounter() function. size_t new_space_allocation_counter_ = 0; // This counter is increased before each GC and never reset. To // account for the bytes allocated since the last GC, use the // OldGenerationAllocationCounter() function. size_t old_generation_allocation_counter_at_last_gc_ = 0; // The size of objects in old generation after the last MarkCompact GC. size_t old_generation_size_at_last_gc_{0}; // The size of global memory after the last MarkCompact GC. size_t global_memory_at_last_gc_ = 0; // The feedback storage is used to store allocation sites (keys) and how often // they have been visited (values) by finding a memento behind an object. The // storage is only alive temporary during a GC. The invariant is that all // pointers in this map are already fixed, i.e., they do not point to // forwarding pointers. PretenuringFeedbackMap global_pretenuring_feedback_; std::unique_ptr<GlobalHandleVector<AllocationSite>> allocation_sites_to_pretenure_; char trace_ring_buffer_[kTraceRingBufferSize]; // Used as boolean. uint8_t is_marking_flag_ = 0; // If it's not full then the data is from 0 to ring_buffer_end_. If it's // full then the data is from ring_buffer_end_ to the end of the buffer and // from 0 to ring_buffer_end_. bool ring_buffer_full_ = false; size_t ring_buffer_end_ = 0; // Flag is set when the heap has been configured. The heap can be repeatedly // configured through the API until it is set up. bool configured_ = false; // Currently set GC flags that are respected by all GC components. int current_gc_flags_ = Heap::kNoGCFlags; // Currently set GC callback flags that are used to pass information between // the embedder and V8's GC. GCCallbackFlags current_gc_callback_flags_ = GCCallbackFlags::kNoGCCallbackFlags; std::unique_ptr<IsolateSafepoint> safepoint_; bool is_current_gc_forced_ = false; bool is_current_gc_for_heap_profiler_ = false; ExternalStringTable external_string_table_; const AllocationType allocation_type_for_in_place_internalizable_strings_; base::Mutex relocation_mutex_; std::unique_ptr<CollectionBarrier> collection_barrier_; int ignore_local_gc_requests_depth_ = 0; int gc_callbacks_depth_ = 0; bool deserialization_complete_ = false; int max_regular_code_object_size_ = 0; bool fast_promotion_mode_ = false; // Used for testing purposes. bool force_oom_ = false; bool force_gc_on_next_allocation_ = false; bool delay_sweeper_tasks_for_testing_ = false; HeapObject pending_layout_change_object_; base::Mutex unprotected_memory_chunks_mutex_; std::unordered_set<MemoryChunk*> unprotected_memory_chunks_; std::unordered_map<HeapObject, HeapObject, Object::Hasher> retainer_; std::unordered_map<HeapObject, Root, Object::Hasher> retaining_root_; // If an object is retained by an ephemeron, then the retaining key of the // ephemeron is stored in this map. std::unordered_map<HeapObject, HeapObject, Object::Hasher> ephemeron_retainer_; // For each index inthe retaining_path_targets_ array this map // stores the option of the corresponding target. std::unordered_map<int, RetainingPathOption> retaining_path_target_option_; std::vector<HeapObjectAllocationTracker*> allocation_trackers_; bool is_finalization_registry_cleanup_task_posted_ = false; std::unique_ptr<third_party_heap::Heap> tp_heap_; // Classes in "heap" can be friends. friend class AlwaysAllocateScope; friend class ArrayBufferCollector; friend class ArrayBufferSweeper; friend class ConcurrentMarking; friend class EvacuateVisitorBase; friend class GCCallbacksScope; friend class GCTracer; friend class HeapAllocator; friend class HeapObjectIterator; friend class ScavengeTaskObserver; friend class IgnoreLocalGCRequests; friend class IncrementalMarking; friend class IncrementalMarkingJob; friend class LargeObjectSpace; friend class LocalHeap; friend class MarkingBarrier; friend class OldLargeObjectSpace; friend class OptionalAlwaysAllocateScope; template <typename ConcreteVisitor, typename MarkingState> friend class MarkingVisitorBase; friend class MarkCompactCollector; friend class MarkCompactCollectorBase; friend class MinorMarkCompactCollector; friend class NewLargeObjectSpace; friend class NewSpace; friend class ObjectStatsCollector; friend class Page; friend class PagedSpace; friend class ReadOnlyRoots; friend class Scavenger; friend class ScavengerCollector; friend class StressConcurrentAllocationObserver; friend class Space; friend class Sweeper; friend class heap::TestMemoryAllocatorScope; friend class third_party_heap::Heap; friend class third_party_heap::Impl; // The allocator interface. friend class Factory; friend class LocalFactory; template <typename IsolateT> friend class Deserializer; // The Isolate constructs us. friend class Isolate; // Used in cctest. friend class heap::HeapTester; }; class HeapStats { public: static const int kStartMarker = 0xDECADE00; static const int kEndMarker = 0xDECADE01; intptr_t* start_marker; // 0 size_t* ro_space_size; // 1 size_t* ro_space_capacity; // 2 size_t* new_space_size; // 3 size_t* new_space_capacity; // 4 size_t* old_space_size; // 5 size_t* old_space_capacity; // 6 size_t* code_space_size; // 7 size_t* code_space_capacity; // 8 size_t* map_space_size; // 9 size_t* map_space_capacity; // 10 size_t* lo_space_size; // 11 size_t* code_lo_space_size; // 12 size_t* global_handle_count; // 13 size_t* weak_global_handle_count; // 14 size_t* pending_global_handle_count; // 15 size_t* near_death_global_handle_count; // 16 size_t* free_global_handle_count; // 17 size_t* memory_allocator_size; // 18 size_t* memory_allocator_capacity; // 19 size_t* malloced_memory; // 20 size_t* malloced_peak_memory; // 21 size_t* objects_per_type; // 22 size_t* size_per_type; // 23 int* os_error; // 24 char* last_few_messages; // 25 char* js_stacktrace; // 26 intptr_t* end_marker; // 27 }; // Disables GC for all allocations. It should not be used // outside heap, deserializer, and isolate bootstrap. // Use AlwaysAllocateScopeForTesting in tests. class V8_NODISCARD AlwaysAllocateScope { public: inline ~AlwaysAllocateScope(); private: friend class AlwaysAllocateScopeForTesting; friend class Evacuator; friend class Heap; friend class HeapAllocator; friend class Isolate; explicit inline AlwaysAllocateScope(Heap* heap); Heap* heap_; }; // Like AlwaysAllocateScope if the heap argument to the constructor is // non-null. No-op otherwise. // // This class exists because AlwaysAllocateScope doesn't compose with // base::Optional, since supporting that composition requires making // base::Optional a friend class, defeating the purpose of hiding its // constructor. class V8_NODISCARD OptionalAlwaysAllocateScope { public: inline ~OptionalAlwaysAllocateScope(); private: friend class Heap; explicit inline OptionalAlwaysAllocateScope(Heap* heap); Heap* heap_; }; class V8_NODISCARD AlwaysAllocateScopeForTesting { public: explicit inline AlwaysAllocateScopeForTesting(Heap* heap); private: AlwaysAllocateScope scope_; }; // The CodeSpaceMemoryModificationScope can only be used by the main thread. class V8_NODISCARD CodeSpaceMemoryModificationScope { public: explicit inline CodeSpaceMemoryModificationScope(Heap* heap); inline ~CodeSpaceMemoryModificationScope(); private: Heap* heap_; }; // The CodePageCollectionMemoryModificationScope can only be used by the main // thread. It will not be enabled if a CodeSpaceMemoryModificationScope is // already active. class V8_NODISCARD CodePageCollectionMemoryModificationScope { public: explicit inline CodePageCollectionMemoryModificationScope(Heap* heap); inline ~CodePageCollectionMemoryModificationScope(); private: Heap* heap_; }; // The CodePageMemoryModificationScope does not check if tansitions to // writeable and back to executable are actually allowed, i.e. the MemoryChunk // was registered to be executable. It can be used by concurrent threads. class V8_NODISCARD CodePageMemoryModificationScope { public: explicit inline CodePageMemoryModificationScope(BasicMemoryChunk* chunk); explicit inline CodePageMemoryModificationScope(Code object); inline ~CodePageMemoryModificationScope(); private: BasicMemoryChunk* chunk_; bool scope_active_; // Disallow any GCs inside this scope, as a relocation of the underlying // object would change the {MemoryChunk} that this scope targets. DISALLOW_GARBAGE_COLLECTION(no_heap_allocation_) }; class V8_NODISCARD IgnoreLocalGCRequests { public: explicit inline IgnoreLocalGCRequests(Heap* heap); inline ~IgnoreLocalGCRequests(); private: Heap* heap_; }; // Visitor class to verify interior pointers in spaces that do not contain // or care about intergenerational references. All heap object pointers have to // point into the heap to a location that has a map pointer at its first word. // Caveat: Heap::Contains is an approximation because it can return true for // objects in a heap space but above the allocation pointer. class VerifyPointersVisitor : public ObjectVisitorWithCageBases, public RootVisitor { public: V8_INLINE explicit VerifyPointersVisitor(Heap* heap); void VisitPointers(HeapObject host, ObjectSlot start, ObjectSlot end) override; void VisitPointers(HeapObject host, MaybeObjectSlot start, MaybeObjectSlot end) override; void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override; void VisitCodeTarget(Code host, RelocInfo* rinfo) override; void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override; void VisitRootPointers(Root root, const char* description, FullObjectSlot start, FullObjectSlot end) override; void VisitRootPointers(Root root, const char* description, OffHeapObjectSlot start, OffHeapObjectSlot end) override; protected: V8_INLINE void VerifyHeapObjectImpl(HeapObject heap_object); V8_INLINE void VerifyCodeObjectImpl(HeapObject heap_object); template <typename TSlot> V8_INLINE void VerifyPointersImpl(TSlot start, TSlot end); virtual void VerifyPointers(HeapObject host, MaybeObjectSlot start, MaybeObjectSlot end); Heap* heap_; }; // Verify that all objects are Smis. class VerifySmisVisitor : public RootVisitor { public: void VisitRootPointers(Root root, const char* description, FullObjectSlot start, FullObjectSlot end) override; }; // Space iterator for iterating over all the paged spaces of the heap: Map // space, old space and code space. Returns each space in turn, and null when it // is done. class V8_EXPORT_PRIVATE PagedSpaceIterator { public: explicit PagedSpaceIterator(Heap* heap) : heap_(heap), counter_(FIRST_GROWABLE_PAGED_SPACE) {} PagedSpace* Next(); private: Heap* heap_; int counter_; }; class V8_EXPORT_PRIVATE SpaceIterator : public Malloced { public: explicit SpaceIterator(Heap* heap); virtual ~SpaceIterator(); bool HasNext(); Space* Next(); private: Heap* heap_; int current_space_; // from enum AllocationSpace. }; // A HeapObjectIterator provides iteration over the entire non-read-only heap. // It aggregates the specific iterators for the different spaces as these can // only iterate over one space only. // // HeapObjectIterator ensures there is no allocation during its lifetime (using // an embedded DisallowGarbageCollection instance). // // HeapObjectIterator can skip free list nodes (that is, de-allocated heap // objects that still remain in the heap). As implementation of free nodes // filtering uses GC marks, it can't be used during MS/MC GC phases. Also, it is // forbidden to interrupt iteration in this mode, as this will leave heap // objects marked (and thus, unusable). // // See ReadOnlyHeapObjectIterator if you need to iterate over read-only space // objects, or CombinedHeapObjectIterator if you need to iterate over both // heaps. class V8_EXPORT_PRIVATE HeapObjectIterator { public: enum HeapObjectsFiltering { kNoFiltering, kFilterUnreachable }; explicit HeapObjectIterator(Heap* heap, HeapObjectsFiltering filtering = kNoFiltering); ~HeapObjectIterator(); HeapObject Next(); private: HeapObject NextObject(); Heap* heap_; std::unique_ptr<SafepointScope> safepoint_scope_; HeapObjectsFiltering filtering_; HeapObjectsFilter* filter_; // Space iterator for iterating all the spaces. SpaceIterator* space_iterator_; // Object iterator for the space currently being iterated. std::unique_ptr<ObjectIterator> object_iterator_; DISALLOW_GARBAGE_COLLECTION(no_heap_allocation_) }; // Abstract base class for checking whether a weak object should be retained. class WeakObjectRetainer { public: virtual ~WeakObjectRetainer() = default; // Return whether this object should be retained. If nullptr is returned the // object has no references. Otherwise the address of the retained object // should be returned as in some GC situations the object has been moved. virtual Object RetainAs(Object object) = 0; }; // ----------------------------------------------------------------------------- // Allows observation of heap object allocations. class HeapObjectAllocationTracker { public: virtual void AllocationEvent(Address addr, int size) = 0; virtual void MoveEvent(Address from, Address to, int size) {} virtual void UpdateObjectSizeEvent(Address addr, int size) {} virtual ~HeapObjectAllocationTracker() = default; }; template <typename T> inline T ForwardingAddress(T heap_obj); // Address block allocator compatible with standard containers which registers // its allocated range as strong roots. class StrongRootBlockAllocator { public: using pointer = Address*; using const_pointer = const Address*; using reference = Address&; using const_reference = const Address&; using value_type = Address; using size_type = size_t; using difference_type = ptrdiff_t; template <class U> struct rebind; explicit StrongRootBlockAllocator(Heap* heap) : heap_(heap) {} Address* allocate(size_t n); void deallocate(Address* p, size_t n) noexcept; private: Heap* heap_; }; // Rebinding to Address gives another StrongRootBlockAllocator. template <> struct StrongRootBlockAllocator::rebind<Address> { using other = StrongRootBlockAllocator; }; // Rebinding to something other than Address gives a std::allocator that // is copy-constructable from StrongRootBlockAllocator. template <class U> struct StrongRootBlockAllocator::rebind { class other : public std::allocator<U> { public: // NOLINTNEXTLINE other(const StrongRootBlockAllocator&) {} }; }; } // namespace internal } // namespace v8 #endif // V8_HEAP_HEAP_H_
#include "orientation.cpp" #include "hostcommand.cpp" #ifndef HOST_COMMUNICATOR_H #define HOST_COMMUNICATOR_H class HostCommunicator { public: HostCommunicator(); HostCommand* getCommandQueue(); }; #endif /* HOST_COMMUNICATOR_H */
""" Only the tests from the watching (simulated) to the handling (substituted). Excluded: the watching-streaming routines (see ``tests_streaming.py`` and ``test_watching.py``). Excluded: the causation and handling routines (to be done later). Used for internal control that the event queueing works are intended. If the intentions change, the tests should be rewritten. They are NOT part of the public interface of the framework. """ import asyncio import weakref import pytest from kopf.reactor.queueing import watcher, EOS # Some overhead for the synchronous logic in async tests: it also takes time. CODE_OVERHEAD = 0.05 # 0.01 is too fast, 0.1 is too slow, 0.5 is good enough. @pytest.mark.parametrize('uids, cnts, events', [ pytest.param(['uid1'], [1], [ {'type': 'ADDED', 'object': {'metadata': {'uid': 'uid1'}}}, ], id='single'), pytest.param(['uid1'], [3], [ {'type': 'ADDED', 'object': {'metadata': {'uid': 'uid1'}}}, {'type': 'MODIFIED', 'object': {'metadata': {'uid': 'uid1'}}}, {'type': 'DELETED', 'object': {'metadata': {'uid': 'uid1'}}}, ], id='multiple'), pytest.param(['uid1', 'uid2'], [3, 2], [ {'type': 'ADDED', 'object': {'metadata': {'uid': 'uid1'}}}, {'type': 'ADDED', 'object': {'metadata': {'uid': 'uid2'}}}, {'type': 'MODIFIED', 'object': {'metadata': {'uid': 'uid1'}}}, {'type': 'MODIFIED', 'object': {'metadata': {'uid': 'uid2'}}}, {'type': 'DELETED', 'object': {'metadata': {'uid': 'uid1'}}}, ], id='mixed'), ]) @pytest.mark.usefixtures('watcher_limited') async def test_event_multiplexing(worker_mock, timer, resource, handler, stream, events, uids, cnts): """ Verify that every unique uid goes into its own queue+worker, which are never shared. """ # Inject the events of unique objects - to produce few queues/workers. stream.return_value = iter(events) # Run the watcher (near-instantly and test-blocking). with timer: await watcher( namespace=None, resource=resource, handler=handler, ) # The queues are not cleared by the mocked worker, but the worker exits fast. assert timer.seconds < CODE_OVERHEAD # The handler must not be called by the watcher, only by the worker. # But the worker (even if mocked) must be called & awaited by the watcher. assert not handler.awaited assert not handler.called assert worker_mock.awaited # Are the worker-queues created by the watcher? Populated as expected? # One queue per unique uid? All events are sequential? EOS marker appended? assert worker_mock.call_count == len(uids) assert worker_mock.call_count == len(cnts) for uid, cnt, (args, kwargs) in zip(uids, cnts, worker_mock.call_args_list): key = kwargs['key'] queues = kwargs['queues'] assert kwargs['handler'] is handler assert key == (resource, uid) assert key in queues queue_events = [] while not queues[key].empty(): queue_events.append(queues[key].get_nowait()) assert len(queue_events) == cnt + 1 assert queue_events[-1] is EOS assert all(queue_event['object']['metadata']['uid'] == uid for queue_event in queue_events[:-1]) @pytest.mark.parametrize('uids, vals, events', [ pytest.param(['uid1'], ['b'], [ {'type': 'MODIFIED', 'object': {'metadata': {'uid': 'uid1'}, 'spec': 'a'}}, {'type': 'MODIFIED', 'object': {'metadata': {'uid': 'uid1'}, 'spec': 'b'}}, ], id='the same'), pytest.param(['uid1', 'uid2'], ['a', 'b'], [ {'type': 'MODIFIED', 'object': {'metadata': {'uid': 'uid1'}, 'spec': 'a'}}, {'type': 'MODIFIED', 'object': {'metadata': {'uid': 'uid2'}, 'spec': 'b'}}, ], id='distinct'), pytest.param(['uid1', 'uid2', 'uid3'], ['e', 'd', 'f'], [ {'type': 'ADDED', 'object': {'metadata': {'uid': 'uid1'}, 'spec': 'a'}}, {'type': 'ADDED', 'object': {'metadata': {'uid': 'uid2'}, 'spec': 'b'}}, {'type': 'MODIFIED', 'object': {'metadata': {'uid': 'uid1'}, 'spec': 'c'}}, {'type': 'MODIFIED', 'object': {'metadata': {'uid': 'uid2'}, 'spec': 'd'}}, {'type': 'DELETED', 'object': {'metadata': {'uid': 'uid1'}, 'spec': 'e'}}, {'type': 'DELETED', 'object': {'metadata': {'uid': 'uid3'}, 'spec': 'f'}}, ], id='mixed'), ]) @pytest.mark.usefixtures('watcher_limited') async def test_event_batching(mocker, resource, handler, timer, stream, events, uids, vals): """ Verify that only the last event per uid is actually handled. """ # Override the default timeouts to make the tests faster. mocker.patch('kopf.reactor.queueing.WORKER_IDLE_TIMEOUT', 0.5) mocker.patch('kopf.reactor.queueing.WORKER_BATCH_WINDOW', 0.1) mocker.patch('kopf.reactor.queueing.WORKER_EXIT_TIMEOUT', 0.5) # Inject the events of unique objects - to produce few queues/workers. stream.return_value = iter(events) # Run the watcher (near-instantly and test-blocking). with timer: await watcher( namespace=None, resource=resource, handler=handler, ) # Significantly less than the queue getting timeout, but sufficient to run. # 2 <= 1 pull for the event chain + 1 pull for EOS. TODO: 1x must be enough. from kopf.reactor.queueing import WORKER_BATCH_WINDOW assert timer.seconds < WORKER_BATCH_WINDOW + CODE_OVERHEAD # Was the handler called at all? Awaited as needed for async fns? assert handler.awaited # Was it called only once per uid? Only with the latest event? assert handler.call_count == len(uids) assert handler.call_count == len(vals) for uid, val, (args, kwargs) in zip(uids, vals, handler.call_args_list): event = kwargs['event'] assert event['object']['metadata']['uid'] == uid assert event['object']['spec'] == val @pytest.mark.parametrize('unique, events', [ pytest.param(1, [ {'type': 'ADDED', 'object': {'metadata': {'uid': 'uid1'}}}, {'type': 'MODIFIED', 'object': {'metadata': {'uid': 'uid1'}}}, {'type': 'DELETED', 'object': {'metadata': {'uid': 'uid1'}}}, ], id='the same'), pytest.param(2, [ {'type': 'ADDED', 'object': {'metadata': {'uid': 'uid1'}}}, {'type': 'ADDED', 'object': {'metadata': {'uid': 'uid2'}}}, ], id='distinct'), ]) @pytest.mark.usefixtures('watcher_in_background') async def test_garbage_collection_of_queues(mocker, stream, events, unique, worker_spy): # Override the default timeouts to make the tests faster. mocker.patch('kopf.reactor.queueing.WORKER_IDLE_TIMEOUT', 0.5) mocker.patch('kopf.reactor.queueing.WORKER_BATCH_WINDOW', 0.1) mocker.patch('kopf.reactor.queueing.WORKER_EXIT_TIMEOUT', 0.5) # Inject the events of unique objects - to produce few queues/workers. stream.return_value = iter(events) # Give it a moment to populate the queues and spawn all the workers. # Intercept and remember _any_ seen dict of queues for further checks. while worker_spy.call_count < unique: await asyncio.sleep(0.001) # give control to the loop queues = worker_spy.call_args_list[-1][1]['queues'] # The mutable(!) queues dict is now populated with the objects' queues. assert len(queues) != 0 # usually 1, but can be 2+ if it is fast enough. # Weakly remember the queues to make sure they are gc'ed later. refs = [weakref.ref(queue) for queue in queues.values()] assert all([ref() is not None for ref in refs]) # Give the workers some time to finish waiting for the events. # Once the idle timeout, they will exit and gc their individual queues. from kopf.reactor.queueing import WORKER_IDLE_TIMEOUT, WORKER_BATCH_WINDOW await asyncio.sleep(WORKER_BATCH_WINDOW) # depleting the queues. await asyncio.sleep(WORKER_IDLE_TIMEOUT) # idling on empty queues. await asyncio.sleep(CODE_OVERHEAD) # The mutable(!) queues dict is now empty, i.e. garbage-collected. assert len(queues) == 0 # Truly garbage-collected? Memory freed? assert all([ref() is None for ref in refs]) # TODO: also add tests for the depletion of the workers pools on cancellation (+timing)
// Copyright (c) 2014-2020 The Crown developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef CROWN_CACHE_H #define CROWN_CACHE_H #include <flat-database.h> #include <masternode/masternode-budget.h> #include <masternode/masternode-payments.h> #include <masternode/masternode.h> #include <masternode/masternodeman.h> #include <netfulfilledman.h> #include <node/ui_interface.h> #include <systemnode/systemnode-payments.h> #include <systemnode/systemnode.h> #include <systemnode/systemnodeman.h> #include <util/system.h> #include <util/translation.h> void DumpCaches(); bool LoadCaches(); #endif // CROWN_CACHE_H
#pragma once #include "webrtc.VideoFrame.VideoFrame.g.h" namespace winrt::Microsoft::WinRTC::WebRtcWrapper::webrtc::VideoFrame::implementation { struct VideoFrame : VideoFrameT<VideoFrame> { VideoFrame() = default; // VideoFrame(::webrtc::VideoFrame webrtc_videoframe); int32_t Width(); int32_t Height(); uint32_t Size(); //::webrtc::VideoFrame webrtc_videoframe_; }; }
from django.contrib import admin from .models import Question, Choice # Register your models here. #one ways of changing things # class QuestionAdmin(admin.ModelAdmin): # fields = ['pub_date', 'question_text'] # another way of doing things class ChoiceInline(admin.TabularInline): model = Choice extra = 3 class QuestionAdmin(admin.ModelAdmin): fieldsets = [ (None, {'fields': ['question_text'],}), ('Date information', {'fields': ['pub_date'], 'classes':['collapse'] }), ] inlines = [ChoiceInline] list_display = ("question_text", "pub_date","was_published_recently") list_filter = ['pub_date'] search_fields = ['question_text'] admin.site.register(Question, QuestionAdmin) # admin.site.register(Choice)
# Copyright 2018 The TensorFlow Probability Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """The InverseGamma distribution class.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # Dependency imports import numpy as np import tensorflow.compat.v2 as tf from tensorflow_probability.python import math as tfp_math from tensorflow_probability.python.bijectors import chain as chain_bijector from tensorflow_probability.python.bijectors import reciprocal as reciprocal_bijector from tensorflow_probability.python.bijectors import softplus as softplus_bijector from tensorflow_probability.python.distributions import distribution from tensorflow_probability.python.distributions import gamma as gamma_lib from tensorflow_probability.python.internal import assert_util from tensorflow_probability.python.internal import distribution_util from tensorflow_probability.python.internal import dtype_util from tensorflow_probability.python.internal import parameter_properties from tensorflow_probability.python.internal import prefer_static as ps from tensorflow_probability.python.internal import reparameterization from tensorflow_probability.python.internal import tensor_util __all__ = [ 'InverseGamma', ] class InverseGamma(distribution.Distribution): """InverseGamma distribution. The `InverseGamma` distribution is defined over positive real numbers using parameters `concentration` (aka "alpha") and `scale` (aka "beta"). #### Mathematical Details The probability density function (pdf) is, ```none pdf(x; alpha, beta, x > 0) = x**(-alpha - 1) exp(-beta / x) / Z Z = Gamma(alpha) beta**-alpha ``` where: * `concentration = alpha`, * `scale = beta`, * `Z` is the normalizing constant, and, * `Gamma` is the [gamma function]( https://en.wikipedia.org/wiki/Gamma_function). The cumulative density function (cdf) is, ```none cdf(x; alpha, beta, x > 0) = GammaInc(alpha, beta / x) / Gamma(alpha) ``` where `GammaInc` is the [upper incomplete Gamma function]( https://en.wikipedia.org/wiki/Incomplete_gamma_function). The parameters can be intuited via their relationship to mean and variance when these moments exist, ```none mean = beta / (alpha - 1) when alpha > 1 variance = beta**2 / (alpha - 1)**2 / (alpha - 2) when alpha > 2 ``` i.e., under the same conditions: ```none alpha = mean**2 / variance + 2 beta = mean * (mean**2 / variance + 1) ``` Distribution parameters are automatically broadcast in all functions; see examples for details. Samples of this distribution are reparameterized (pathwise differentiable). The derivatives are computed using the approach described in the paper [Michael Figurnov, Shakir Mohamed, Andriy Mnih. Implicit Reparameterization Gradients, 2018](https://arxiv.org/abs/1805.08498) #### Examples ```python tfd = tfp.distributions dist = tfd.InverseGamma(concentration=3.0, scale=2.0) dist2 = tfd.InverseGamma(concentration=[3.0, 4.0], scale=[2.0, 3.0]) ``` Compute the gradients of samples w.r.t. the parameters: ```python tfd = tfp.distributions concentration = tf.constant(3.0) scale = tf.constant(2.0) dist = tfd.InverseGamma(concentration, scale) samples = dist.sample(5) # Shape [5] loss = tf.reduce_mean(tf.square(samples)) # Arbitrary loss function # Unbiased stochastic gradients of the loss function grads = tf.gradients(loss, [concentration, scale]) ``` """ def __init__(self, concentration, scale=None, validate_args=False, allow_nan_stats=True, name='InverseGamma'): """Construct InverseGamma with `concentration` and `scale` parameters. The parameters `concentration` and `scale` must be shaped in a way that supports broadcasting (e.g. `concentration + scale` is a valid operation). Args: concentration: Floating point tensor, the concentration params of the distribution(s). Must contain only positive values. scale: Floating point tensor, the scale params of the distribution(s). Must contain only positive values. validate_args: Python `bool`, default `False`. When `True` distribution parameters are checked for validity despite possibly degrading runtime performance. When `False` invalid inputs may silently render incorrect outputs. allow_nan_stats: Python `bool`, default `True`. When `True`, statistics (e.g., mean, mode, variance) use the value "`NaN`" to indicate the result is undefined. When `False`, an exception is raised if one or more of the statistic's batch members are undefined. name: Python `str` name prefixed to Ops created by this class. Raises: TypeError: if `concentration` and `scale` are different dtypes. """ parameters = dict(locals()) with tf.name_scope(name) as name: dtype = dtype_util.common_dtype( [concentration, scale], dtype_hint=tf.float32) self._concentration = tensor_util.convert_nonref_to_tensor( concentration, dtype=dtype, name='concentration') self._scale = tensor_util.convert_nonref_to_tensor( scale, dtype=dtype, name='scale') super(InverseGamma, self).__init__( dtype=self._concentration.dtype, validate_args=validate_args, allow_nan_stats=allow_nan_stats, reparameterization_type=reparameterization.FULLY_REPARAMETERIZED, parameters=parameters, name=name) @classmethod def _parameter_properties(cls, dtype, num_classes=None): # pylint: disable=g-long-lambda return dict( concentration=parameter_properties.ParameterProperties( default_constraining_bijector_fn=( lambda: softplus_bijector.Softplus(low=dtype_util.eps(dtype)))), scale=parameter_properties.ParameterProperties( default_constraining_bijector_fn=( lambda: softplus_bijector.Softplus(low=dtype_util.eps(dtype))))) # pylint: enable=g-long-lambda @property def concentration(self): """Concentration parameter.""" return self._concentration @property def scale(self): """Scale parameter.""" return self._scale def _batch_shape_tensor(self): return ps.broadcast_shape( ps.shape(self.concentration), ps.shape(self.scale)) def _batch_shape(self): return tf.broadcast_static_shape(self.concentration.shape, self.scale.shape) def _event_shape_tensor(self): return tf.constant([], dtype=tf.int32) def _event_shape(self): return tf.TensorShape([]) @distribution_util.AppendDocstring( """Note: See `tf.random.gamma` docstring for sampling details and caveats.""") def _sample_n(self, n, seed=None): return tf.math.exp(-gamma_lib.random_gamma( shape=[n], concentration=self.concentration, rate=self.scale, seed=seed, log_space=True)) def _log_prob(self, x): concentration = tf.convert_to_tensor(self.concentration) scale = tf.convert_to_tensor(self.scale) unnormalized_prob = -(1. + concentration) * tf.math.log(x) - scale / x normalization = ( tf.math.lgamma(concentration) - concentration * tf.math.log(scale)) return unnormalized_prob - normalization def _cdf(self, x): # Note that igammac returns the upper regularized incomplete gamma # function Q(a, x), which is what we want for the CDF. return tf.math.igammac(self.concentration, self.scale / x) def _quantile(self, p): return tf.math.reciprocal( tfp_math.igammacinv(self.concentration, p)) * self.scale def _entropy(self): concentration = tf.convert_to_tensor(self.concentration) scale = tf.convert_to_tensor(self.scale) return (concentration + tf.math.log(scale) + tf.math.lgamma(concentration) - ((1. + concentration) * tf.math.digamma(concentration))) @distribution_util.AppendDocstring( """The mean of an inverse gamma distribution is `scale / (concentration - 1)`, when `concentration > 1`, and `NaN` otherwise. If `self.allow_nan_stats` is `False`, an exception will be raised rather than returning `NaN`""") def _mean(self): concentration = tf.convert_to_tensor(self.concentration) scale = tf.convert_to_tensor(self.scale) mean = scale / (concentration - 1.) if self.allow_nan_stats: assertions = [] else: assertions = [assert_util.assert_less( tf.ones([], self.dtype), concentration, message='mean undefined when any concentration <= 1')] with tf.control_dependencies(assertions): return tf.where( concentration > 1., mean, dtype_util.as_numpy_dtype(self.dtype)(np.nan)) @distribution_util.AppendDocstring( """Variance for inverse gamma is defined only for `concentration > 2`. If `self.allow_nan_stats` is `False`, an exception will be raised rather than returning `NaN`.""") def _variance(self): concentration = tf.convert_to_tensor(self.concentration) scale = tf.convert_to_tensor(self.scale) var = ( tf.square(scale) / tf.square(concentration - 1.) / (concentration - 2.)) if self.allow_nan_stats: assertions = [] else: assertions = [assert_util.assert_less( tf.constant(2., dtype=self.dtype), concentration, message='variance undefined when any concentration <= 2')] with tf.control_dependencies(assertions): return tf.where( concentration > 2., var, dtype_util.as_numpy_dtype(self.dtype)(np.nan)) @distribution_util.AppendDocstring( """The mode of an inverse gamma distribution is `scale / (concentration + 1)`.""") def _mode(self): return self.scale / (1. + self.concentration) def _default_event_space_bijector(self): return chain_bijector.Chain([ reciprocal_bijector.Reciprocal(validate_args=self.validate_args), softplus_bijector.Softplus(validate_args=self.validate_args) ], validate_args=self.validate_args) def _sample_control_dependencies(self, x): assertions = [] if not self.validate_args: return assertions assertions.append(assert_util.assert_non_negative( x, message='Sample must be non-negative.')) return assertions def _parameter_control_dependencies(self, is_init): if not self.validate_args: return [] assertions = [] if is_init != tensor_util.is_ref(self.concentration): assertions.append(assert_util.assert_positive( self.concentration, message='Argument `concentration` must be positive.')) if is_init != tensor_util.is_ref(self.scale): assertions.append(assert_util.assert_positive( self.scale, message='Argument `scale` must be positive.')) return assertions
/** * @fileoverview added by tsickle * @suppress {checkTypes} checked by tsc */ import { InjectionToken } from '@angular/core'; export const /** @type {?} */ ORIGIN_URL = new InjectionToken('ORIGIN_URL'); export const /** @type {?} */ REQUEST = new InjectionToken('REQUEST'); export const /** @type {?} */ RESPONSE = new InjectionToken('RESPONSE'); //# sourceMappingURL=data:application/json;base64,eyJ2ZXJzaW9uIjozLCJmaWxlIjoidG9rZW5zLmpzIiwic291cmNlUm9vdCI6IiIsInNvdXJjZXMiOlsiLi4vLi4vLi4vLi4vLi4vLi4vLi4vbW9kdWxlcy9jb21tb24vdG9rZW5zL3NyYy90b2tlbnMudHMiXSwibmFtZXMiOltdLCJtYXBwaW5ncyI6Ijs7OztBQU9BLE9BQU8sRUFBRSxjQUFjLEVBQUUsTUFBTSxlQUFlLENBQUM7QUFFL0MsTUFBTSxDQUFDLHVCQUFNLFVBQVUsR0FBRyxJQUFJLGNBQWMsQ0FBTSxZQUFZLENBQUMsQ0FBQztBQUNoRSxNQUFNLENBQUMsdUJBQU0sT0FBTyxHQUFHLElBQUksY0FBYyxDQUFNLFNBQVMsQ0FBQyxDQUFDO0FBQzFELE1BQU0sQ0FBQyx1QkFBTSxRQUFRLEdBQUcsSUFBSSxjQUFjLENBQU0sVUFBVSxDQUFDLENBQUMiLCJzb3VyY2VzQ29udGVudCI6WyIvKipcbiAqIEBsaWNlbnNlXG4gKiBDb3B5cmlnaHQgR29vZ2xlIExMQyBBbGwgUmlnaHRzIFJlc2VydmVkLlxuICpcbiAqIFVzZSBvZiB0aGlzIHNvdXJjZSBjb2RlIGlzIGdvdmVybmVkIGJ5IGFuIE1JVC1zdHlsZSBsaWNlbnNlIHRoYXQgY2FuIGJlXG4gKiBmb3VuZCBpbiB0aGUgTElDRU5TRSBmaWxlIGF0IGh0dHBzOi8vYW5ndWxhci5pby9saWNlbnNlXG4gKi9cbmltcG9ydCB7IEluamVjdGlvblRva2VuIH0gZnJvbSAnQGFuZ3VsYXIvY29yZSc7XG5cbmV4cG9ydCBjb25zdCBPUklHSU5fVVJMID0gbmV3IEluamVjdGlvblRva2VuPGFueT4oJ09SSUdJTl9VUkwnKTtcbmV4cG9ydCBjb25zdCBSRVFVRVNUID0gbmV3IEluamVjdGlvblRva2VuPGFueT4oJ1JFUVVFU1QnKTtcbmV4cG9ydCBjb25zdCBSRVNQT05TRSA9IG5ldyBJbmplY3Rpb25Ub2tlbjxhbnk+KCdSRVNQT05TRScpO1xuIl19
/*========================================================================= Program: ParaView Module: pqCreateCustomFilterReaction.h Copyright (c) 2005,2006 Sandia Corporation, Kitware Inc. All rights reserved. ParaView is a free software; you can redistribute it and/or modify it under the terms of the ParaView license version 1.2. See License_v1.2.txt for the full ParaView license. A copy of this license can be obtained by contacting Kitware Inc. 28 Corporate Drive Clifton Park, NY 12065 USA THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ========================================================================*/ #ifndef pqCreateCustomFilterReaction_h #define pqCreateCustomFilterReaction_h #include "pqReaction.h" /** * pqCreateCustomFilterReaction popups the create-custom-filter wizard for the * active selection. */ class PQAPPLICATIONCOMPONENTS_EXPORT pqCreateCustomFilterReaction : public pqReaction { Q_OBJECT typedef pqReaction Superclass; public: /** * Constructor. Parent cannot be nullptr. */ pqCreateCustomFilterReaction(QAction* parent); /** * Create custom filter. */ static void createCustomFilter(); public Q_SLOTS: // NOLINT(readability-redundant-access-specifiers) /** * Updates the enabled state. Applications need not explicitly call this. */ void updateEnableState() override; protected: /** * Called when the action is triggered. */ void onTriggered() override { pqCreateCustomFilterReaction::createCustomFilter(); } private: Q_DISABLE_COPY(pqCreateCustomFilterReaction) }; #endif
#!/usr/bin/env python3 # Copyright (c) 2020 SiFive Inc. # SPDX-License-Identifier: Apache-2.0 """Generate Freedom E SDK settings.mk from devicetree source files""" import argparse import sys import pydevicetree SUPPORTED_TYPES = ["rtl", "arty", "qemu", "hifive", "spike", "vc707", "vcu118"] def parse_arguments(argv): """Parse the arguments into a dictionary with argparse""" arg_parser = argparse.ArgumentParser( description="Generate Freedom E SDK settings.mk from Devicetrees") arg_parser.add_argument("-t", "--type", required=True, help="The type of the target to generate settings.mk for. \ Supported types include: %s" % ", ".join(SUPPORTED_TYPES)) arg_parser.add_argument("-d", "--dts", required=True, help="The path to the Devicetree for the target") arg_parser.add_argument("-o", "--output", type=argparse.FileType('w'), help="The path of the settings.mk file to output") parsed_args = arg_parser.parse_args(argv) if not any([t in parsed_args.type for t in SUPPORTED_TYPES]): print("Type '%s' is not supported, please choose one of: %s" % (parsed_args.type, ', '.join(SUPPORTED_TYPES))) sys.exit(1) return parsed_args def get_boot_hart(tree): """Get the boot hart if one is specified, otherwise just gets the first hart""" metal_boothart = tree.chosen("metal,boothart") if metal_boothart: return tree.get_by_reference(metal_boothart[0]) return tree.get_by_path("/cpus").children[0] def get_all_arch(tree): """Get a list of architecture strings from all harts""" return [cpu.get_field("riscv,isa") for cpu in tree.get_by_path("/cpus").children] def get_greatest_common_arch(archs): """Get the RISC-V ISA string which contains as many extensions as are supported by all harts in the design""" if len(archs) == 1: return archs[0] # Get all ISA extensions implemented by any hart extensions = ''.join(set(''.join([arch[4:] for arch in archs]))) # Get a list of any extensions which aren't supported by all harts disallowed_extensions = "" for extension in extensions: if not all([extension in arch[4:] for arch in archs]): disallowed_extensions += extension # Get the longest arch from the list arch = max(archs, key=len) # Filter out any disallowed extensions for extension in disallowed_extensions: base = arch[:4] extensions = arch[4:].replace(extension, "") arch = base + extensions return arch def arch2arch(arch): """Remap certain arch strings which are known to not be supportable""" # pylint: disable=too-many-return-statements if arch == "rv32ea": return "rv32e" if arch in ["rv32ema", "rv32emc"]: return "rv32em" if arch == "rv32ia": return "rv32i" if arch == "rv32ima": return "rv32im" if arch == "rv64ia": return "rv64i" if arch == "rv64ima": return "rv64im" return arch def arch2abi(arch): """Map arch to abi""" # pylint: disable=too-many-return-statements if "rv32e" in arch: if "d" in arch: return "ilp32ed" if "f" in arch: return "ilp32ef" return "ilp32e" if "rv32i" in arch: if "d" in arch: return "ilp32d" if "f" in arch: return "ilp32f" return "ilp32" if "rv64i" in arch: if "d" in arch: return "lp64d" if "f" in arch: return "lp64f" return "lp64" raise Exception("Unknown arch %s" % arch) def type2tag(target_type): """Given the target type, return the list of TARGET_TAGS to parameterize Freedom E SDK""" if "arty" in target_type or "vc707" in target_type or "vcu118" in target_type: tags = "fpga openocd" elif "hifive1-revb" in target_type: tags = "board jlink" elif "rtl" in target_type: tags = "rtl" elif "spike" in target_type: tags = "spike" elif "qemu" in target_type: tags = "qemu" else: tags = "board openocd" return tags def get_port_width(tree): """Get the width of the RTL port, if the entry node specifies it""" metal_entry = tree.chosen("metal,entry") port_width = None if metal_entry: entry_node = tree.get_by_reference(metal_entry[0]) # If the entry node is a testram, the parent node # is the port and has the port width port_width_bytes = entry_node.parent.get_field( "sifive,port-width-bytes") # If the entry node is /memory, the node itself # has the port width if port_width_bytes is None: port_width_bytes = entry_node.get_field( "sifive,port-width-bytes") if port_width_bytes is not None: port_width = 8 * port_width_bytes return port_width def get_series(boot_hart, bitness): """Given the boot hart and the bitness, get the SiFive core series name""" hart_compat = boot_hart.get_field("compatible") series = None if "bullet" in hart_compat: series = "sifive-7-series" elif "caboose" in hart_compat: series = "sifive-2-series" elif "rocket" in hart_compat: series = "sifive-3-series" if bitness == 32 else "sifive-5-series" return series def main(argv): """Parse arguments, extract data, and render the settings.mk to file""" # pylint: disable=too-many-locals parsed_args = parse_arguments(argv) tree = pydevicetree.Devicetree.parseFile( parsed_args.dts, followIncludes=True) boot_hart = get_boot_hart(tree) archs = get_all_arch(tree) arch = arch2arch(get_greatest_common_arch(archs)) bitness = 32 if "32" in arch else 64 abi = arch2abi(arch) codemodel = "medlow" if bitness == 32 else "medany" series = get_series(boot_hart, bitness) tags = type2tag(parsed_args.type) if "rtl" in parsed_args.type: dhry_iters = 2000 core_iters = 5 freertos_wait_ms = 10 else: dhry_iters = 20000000 core_iters = 5000 freertos_wait_ms = 1000 settings = """# Copyright (C) 2020 SiFive Inc # SPDX-License-Identifier: Apache-2.0 RISCV_ARCH = %s RISCV_ABI = %s RISCV_CMODEL = %s RISCV_SERIES = %s TARGET_TAGS = %s TARGET_DHRY_ITERS = %d TARGET_CORE_ITERS = %d TARGET_FREERTOS_WAIT_MS = %d""" % (arch, abi, codemodel, series, tags, dhry_iters, core_iters, freertos_wait_ms) port_width = get_port_width(tree) if port_width is not None: settings += "\n\nCOREIP_MEM_WIDTH = %d" % port_width if parsed_args.output: parsed_args.output.write(settings) parsed_args.output.close() else: print(settings) if __name__ == "__main__": main(sys.argv[1:])
macDetailCallback("24693e000000/24",[{"d":"2015-01-29","t":"add","a":"5F., No. 237, Sec. 1, Datong Rd., Xizhi Dist.\nNew Taipei City Taiwan 221\n\n","c":"TAIWAN, PROVINCE OF CHINA","o":"innodisk Corporation"},{"d":"2015-08-27","t":"change","a":"5F., No. 237, Sec. 1, Datong Rd., Xizhi Dist. New Taipei City Taiwan TW 221","c":"TW","o":"innodisk Corporation"}]);
from unittest import TestCase from kata import PaginationHelper class TestPaginationHelper(TestCase): def setUp(self): collection = range(1, 25) self.helper = PaginationHelper(collection, 10) def test_item_count(self): self.assertEqual(self.helper.item_count(), 24, 'item_count returned incorrect value') def test_page_count(self): self.assertEqual(self.helper.page_count(), 3, 'page_count is returning incorrect value.') def test_page_item_count(self): self.assertEqual(self.helper.page_item_count(1), 10, 'page_item_count is returning incorrect value.') self.assertEqual(self.helper.page_item_count(2), 4, 'page_item_count is returning incorrect value') self.assertEqual(self.helper.page_item_count(3), -1, 'page_item_count is returning incorrect value') def test_page_index(self): self.assertEqual(self.helper.page_index(0), 0, 'page_index returned incorrect value') self.assertEqual(self.helper.page_index(23), 2, 'page_index returned incorrect value') self.assertEqual(self.helper.page_index(24), -1, 'page_index returned incorrect value when provided a item_index argument that was out of ' 'range') self.assertEqual(self.helper.page_index(40), -1, 'page_index returned incorrect value when provided a item_index argument that was out of ' 'range') self.assertEqual(self.helper.page_index(3), 0, 'page_index returned incorrect value') self.assertEqual(self.helper.page_index(-1), -1, 'page_index returned incorrect value when provided a itemIndex argument that was out of ' 'range. pageIndex(-1) should return -1') self.assertEqual(self.helper.page_index(-23), -1, 'page_index returned incorrect value when provided a item_index argument that was out of ' 'range. pageIndex(-23) shoudl return -1') self.assertEqual(self.helper.page_index(-15), -1, 'page_index returned incorrect value when provided a item_index argument that was out of ' 'range.') def test_empty_array(self): helper = PaginationHelper([], 10) self.assertEqual(helper.page_index(0), -1, 'pageIndex(0) called when there was an empty collection')
// Copyright (C) 2016 the V8 project authors. All rights reserved. // This code is governed by the BSD license found in the LICENSE file. /*--- description: Property descriptor for `Number.MAX_SAFE_INTEGER` esid: sec-number.max_safe_integer es6id: 20.1.2.6 info: > The value of Number.MAX_SAFE_INTEGER is 9007199254740991 This property has the attributes { [[Writable]]: false, [[Enumerable]]: false, [[Configurable]]: false }. includes: [propertyHelper.js] ---*/ var desc = Object.getOwnPropertyDescriptor(Number, 'MAX_SAFE_INTEGER'); assert.sameValue(desc.set, undefined, 'Does not define a `get` accessor'); assert.sameValue(desc.get, undefined, 'Does not define a `set` accessor'); assert.sameValue(desc.value, 9007199254740991); verifyNotEnumerable(Number, 'MAX_SAFE_INTEGER'); verifyNotWritable(Number, 'MAX_SAFE_INTEGER'); verifyNotConfigurable(Number, 'MAX_SAFE_INTEGER');
# -*- coding:utf-8 -*- import oneflow as flow import oneflow.nn as nn from models.resnet50 import resnet50 def conv3x3( in_planes: int, out_planes: int, stride: int = 1, groups: int = 1, dilation: int = 1 ) -> nn.Conv2d: """3x3 convolution with padding""" return nn.Conv2d( in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, groups=groups, bias=False, dilation=dilation, ) class ConvRelu(nn.Module): def __init__(self, in_: int, out: int) -> None: super(ConvRelu, self).__init__() self.conv = conv3x3(in_, out) self.activation = nn.ReLU(inplace=True) def forward(self, x): x = self.conv(x) x = self.activation(x) return x class DecoderBlockLinkNet(nn.Module): def __init__(self, in_channels, n_filters) -> None: super().__init__() self.relu = nn.ReLU(inplace=True) self.conv1 = nn.Conv2d(in_channels, in_channels // 4, 1) self.norm1 = nn.BatchNorm2d(in_channels // 4) self.deconv2 = nn.ConvTranspose2d( in_channels // 4, in_channels // 4, kernel_size=4, stride=2, padding=1, output_padding=0, ) self.norm2 = nn.BatchNorm2d(in_channels // 4) self.conv3 = nn.Conv2d(in_channels // 4, n_filters, 1) self.norm3 = nn.BatchNorm2d(n_filters) def forward(self, x): x = self.conv1(x) x = self.norm1(x) x = self.relu(x) x = self.deconv2(x) x = self.norm2(x) x = self.relu(x) x = self.conv3(x) x = self.norm3(x) x = self.relu(x) return x class LinkNet34(nn.Module): def __init__( self, num_classes=1, num_channels=3, pretrained=False, pretrained_model_path="" ): super().__init__() assert num_channels == 3 self.num_classes = num_classes filters = [64, 128, 256, 512] resnet = resnet50() # if pretrained=True, load pretrained resnet model if pretrained: resnet.load_state_dict(flow.load(pretrained_model_path)) self.firstconv = resnet.conv1 self.firstbn = resnet.bn1 self.firstrelu = resnet.relu self.firstmaxpool = resnet.maxpool self.encoder1 = resnet.layer1 self.encoder2 = resnet.layer2 self.encoder3 = resnet.layer3 self.encoder4 = resnet.layer4 # # Decoder self.decoder4 = DecoderBlockLinkNet(filters[3], filters[2]) self.decoder3 = DecoderBlockLinkNet(filters[2], filters[1]) self.decoder2 = DecoderBlockLinkNet(filters[1], filters[0]) self.decoder1 = DecoderBlockLinkNet(filters[0], filters[0]) # Final Classifier self.finaldeconv1 = nn.ConvTranspose2d(filters[0], 32, 3, stride=2) self.finalrelu1 = nn.ReLU(inplace=True) self.finalconv2 = nn.Conv2d(32, 32, 3) self.finalrelu2 = nn.ReLU(inplace=True) self.finalconv3 = nn.Conv2d(32, num_classes, 2, padding=1) # noinspection PyCallingNonCallable def forward(self, x): # Encoder x = self.firstconv(x) x = self.firstbn(x) x = self.firstrelu(x) x = self.firstmaxpool(x) e1 = self.encoder1(x) e2 = self.encoder2(e1) e3 = self.encoder3(e2) e4 = self.encoder4(e3) # return e1,e2,e3,e4 # Decoder with Skip Connections d4 = self.decoder4(e4) + e3 d3 = self.decoder3(d4) + e2 d2 = self.decoder2(d3) + e1 d1 = self.decoder1(d2) # Final Classification f1 = self.finaldeconv1(d1) f2 = self.finalrelu1(f1) f3 = self.finalconv2(f2) f4 = self.finalrelu2(f3) f5 = self.finalconv3(f4) if self.num_classes > 1: x_out = nn.logsoftmax(f5, axis=1) else: x_out = f5 return x_out
import { combineReducers, createStore, applyMiddleware } from 'redux' import Immutable from 'seamless-immutable' import createSagaMiddleware from 'redux-saga' import rootSaga from '../Sagas' import { reducer as NewsReducer } from './NewsRedux' export const reducers = combineReducers({ news: NewsReducer }) export const sagaMiddleware = createSagaMiddleware() export default (initialState) => { const store = createStore( reducers, Immutable(initialState), applyMiddleware(sagaMiddleware) ) store.runSaga = sagaMiddleware.run store.runSaga(rootSaga) return store }
import numpy as np import time from bicycle_dynamics import BicycleDynamics from irs_lqr.all import IrsLqrParameters, IrsLqrZeroOrder import matplotlib.pyplot as plt from matplotlib import cm # 1. Load dynamics. bicycle = BicycleDynamics(0.1) # 2. Set up desried trajectory and cost parameters. timesteps = 100 params = IrsLqrParameters() params.Q = np.diag([5, 5, 3, 0.1, 0.1]) params.Qd = np.diag([50, 50, 30, 1, 1]) params.R = np.diag([1, 0.1]) params.x0 = np.array([0, 0, 0, 0, 0]) xd = np.array([3.0, 1.0, np.pi/2, 0, 0]) params.xd_trj = np.tile(xd, (timesteps+1,1)) params.xbound = [ -np.array([1e4, 1e4, 1e4, 1e4, np.pi/4]), np.array([1e4, 1e4, 1e4, 1e4, np.pi/4]) ] params.ubound = np.array([ -np.array([1e4, 1e4]), np.array([1e4, 1e4]) ]) params.u_trj_initial = np.tile(np.array([0.1, 0.0]), (timesteps,1)) # 3. Set up initial guess. x_initial_var = np.array([2.0, 2.0, 1.0, 2.0, 0.01]) u_initial_var = np.array([2.0, 1.0]) num_samples = 10000 # Sampling function for variance stepping. def sampling(xbar, ubar, iter): dx = np.random.normal(0.0, (x_initial_var / (iter ** 0.5)), size = (num_samples, bicycle.dim_x)) du = np.random.normal(0.0, (u_initial_var / (iter ** 0.5)), size = (num_samples, bicycle.dim_u)) return dx, du # 4. Solve. solver = IrsLqrZeroOrder(bicycle, params, sampling) time_now = time.time() solver.iterate(20) print("Final cost: " + str(solver.cost)) print("Elapsed time: " + str(time.time() - time_now)) plt.figure() plt.axis('equal') colormap = cm.get_cmap("jet") num_iters = len(solver.x_trj_lst) for i in range(num_iters): x_trj = solver.x_trj_lst[i] jm = colormap(i/ num_iters) plt.plot(x_trj[:,0], x_trj[:,1], color=(jm[0], jm[1], jm[2], (i+1) / num_iters)) plt.show()
from __future__ import unicode_literals from __future__ import absolute_import import copy import datetime import json import time import django import django.utils.timezone as timezone from django.test import TestCase, TransactionTestCase import error.test.utils as error_test_utils import job.test.utils as job_test_utils import storage.test.utils as storage_test_utils import trigger.test.utils as trigger_test_utils from error.models import Error from job.configuration.data.exceptions import InvalidConnection from job.configuration.data.job_data import JobData from job.configuration.interface.job_interface import JobInterface from job.configuration.results.job_results import JobResults from job.error.mapping import create_legacy_error_mapping from job.seed.results.job_results import JobResults as SeedJobResults from job.models import Job, JobExecution, JobExecutionOutput, JobInputFile, JobType, JobTypeRevision, JobTypeTag from node.resources.json.resources import Resources from trigger.models import TriggerRule class TestJobManager(TransactionTestCase): def setUp(self): django.setup() def test_get_details(self): """Tests calling JobManager.get_details() with extra data inputs that should be ignored""" workspace_1 = storage_test_utils.create_workspace() file_1 = storage_test_utils.create_file(workspace=workspace_1) file_2 = storage_test_utils.create_file(workspace=workspace_1) interface = { 'version': '1.0', 'command': 'my_command', 'command_arguments': 'args', 'input_data': [{ 'name': 'Input 1', 'type': 'file', 'media_types': ['text/plain'], }]} job_type = job_test_utils.create_job_type(interface=interface) job = job_test_utils.create_job(job_type=job_type, status='PENDING') orig_data = { 'version': '1.0', 'input_data': [{ 'name': 'Input 1', 'file_id': file_1.id }, { 'name': 'Input 2', 'file_id': file_2.id }, { 'name': 'Input 3', 'value': 'hello' }]} data = copy.deepcopy(orig_data) job_data = JobData(data) Job.objects.populate_job_data(job, job_data) # populate_job_data() strips out extra inputs, so force them back in job.data = orig_data job.save() # No exception means success Job.objects.get_details(job.id) def test_populate_job_data(self): """Tests calling JobManager.populate_job_data()""" workspace_1 = storage_test_utils.create_workspace() workspace_2 = storage_test_utils.create_workspace() workspace_3 = storage_test_utils.create_workspace() file_1 = storage_test_utils.create_file(workspace=workspace_1, file_size=10485760.0) file_2 = storage_test_utils.create_file(workspace=workspace_2, file_size=104857600.0) interface = { 'version': '1.0', 'command': 'my_command', 'command_arguments': 'args', 'input_data': [{ 'name': 'Input 1', 'type': 'file', 'media_types': ['text/plain'], }, { 'name': 'Input 2', 'type': 'file', 'media_types': ['text/plain'], }], 'output_data': [{ 'name': 'Output 1', 'type': 'files', 'media_type': 'image/png', }]} job_type = job_test_utils.create_job_type(interface=interface) job = job_test_utils.create_job(job_type=job_type, status='PENDING', input_file_size=None) data = { 'version': '1.0', 'input_data': [{ 'name': 'Input 1', 'file_id': file_1.id }, { 'name': 'Input 2', 'file_id': file_2.id }], 'output_data': [{ 'name': 'Output 1', 'workspace_id': workspace_3.id }]} job_data = JobData(data) Job.objects.populate_job_data(job, job_data) job = Job.objects.get(id=job.id) # Make sure input file size is calculated and set self.assertEqual(job.input_file_size, 110) # Convert from file bytes to MiB to get 110 value # Make sure job input file models are created job_input_files = JobInputFile.objects.filter(job_id=job.id) self.assertEqual(len(job_input_files), 2) for job_input_file in job_input_files: if job_input_file.job_input == 'Input 1': self.assertEqual(job_input_file.input_file_id, file_1.id) elif job_input_file.job_input == 'Input 2': self.assertEqual(job_input_file.input_file_id, file_2.id) else: self.fail('Invalid input name: %s' % job_input_file.job_input) def test_populate_job_data_extra_inputs(self): """Tests calling JobManager.populate_job_data() with extra inputs""" workspace_1 = storage_test_utils.create_workspace() file_1 = storage_test_utils.create_file(workspace=workspace_1) file_2 = storage_test_utils.create_file(workspace=workspace_1) interface = { 'version': '1.0', 'command': 'my_command', 'command_arguments': 'args', 'input_data': [{ 'name': 'Input 1', 'type': 'file', 'media_types': ['text/plain'], }]} job_type = job_test_utils.create_job_type(interface=interface) job = job_test_utils.create_job(job_type=job_type, status='PENDING') data = { 'version': '1.0', 'input_data': [{ 'name': 'Input 1', 'file_id': file_1.id }, { 'name': 'Input 2', 'file_id': file_2.id }, { 'name': 'Input 3', 'value': 'hello' }]} job_data = JobData(data) Job.objects.populate_job_data(job, job_data) # Check that only Input 1 remains in the job_data job = Job.objects.get(id=job.id) data_dict = job.get_job_data().get_dict() self.assertEqual(len(data_dict['input_data']), 1) self.assertEqual(data_dict['input_data'][0]['name'], 'Input 1') def test_process_job_input(self): """Tests calling JobManager.process_job_input()""" date_1 = timezone.now() min_src_started_job_1 = date_1 - datetime.timedelta(days=200) max_src_ended_job_1 = date_1 + datetime.timedelta(days=200) date_2 = date_1 + datetime.timedelta(minutes=30) date_3 = date_1 + datetime.timedelta(minutes=40) date_4 = date_1 + datetime.timedelta(minutes=50) min_src_started_job_2 = date_1 - datetime.timedelta(days=500) max_src_ended_job_2 = date_1 + datetime.timedelta(days=500) workspace = storage_test_utils.create_workspace() file_1 = storage_test_utils.create_file(workspace=workspace, file_size=10485760.0) file_2 = storage_test_utils.create_file(workspace=workspace, file_size=104857600.0, source_started=date_2, source_ended=date_3) file_3 = storage_test_utils.create_file(workspace=workspace, file_size=987654321.0, source_started=min_src_started_job_1, source_ended=date_4) file_4 = storage_test_utils.create_file(workspace=workspace, file_size=46546.0, source_ended=max_src_ended_job_1) file_5 = storage_test_utils.create_file(workspace=workspace, file_size=83457.0, source_started=date_2) file_6 = storage_test_utils.create_file(workspace=workspace, file_size=42126588636633.0, source_ended=date_4) file_7 = storage_test_utils.create_file(workspace=workspace, file_size=76645464662354.0) file_8 = storage_test_utils.create_file(workspace=workspace, file_size=4654.0, source_started=min_src_started_job_2) file_9 = storage_test_utils.create_file(workspace=workspace, file_size=545.0, source_started=date_3, source_ended=max_src_ended_job_2) file_10 = storage_test_utils.create_file(workspace=workspace, file_size=0.154, source_ended=date_4) interface = { 'version': '1.0', 'command': 'my_command', 'command_arguments': 'args', 'input_data': [{ 'name': 'Input 1', 'type': 'file', 'media_types': ['text/plain'], }, { 'name': 'Input 2', 'type': 'files', 'media_types': ['text/plain'], }], 'output_data': [{ 'name': 'Output 1', 'type': 'files', 'media_type': 'image/png', }]} job_type = job_test_utils.create_job_type(interface=interface) data_1 = { 'version': '1.0', 'input_data': [{ 'name': 'Input 1', 'file_id': file_1.id }, { 'name': 'Input 2', 'file_ids': [file_2.id, file_3.id, file_4.id, file_5.id] }], 'output_data': [{ 'name': 'Output 1', 'workspace_id': workspace.id }]} data_2 = { 'version': '1.0', 'input_data': [{ 'name': 'Input 1', 'file_id': file_6.id }, { 'name': 'Input 2', 'file_ids': [file_7.id, file_8.id, file_9.id, file_10.id] }], 'output_data': [{ 'name': 'Output 1', 'workspace_id': workspace.id }]} job_1 = job_test_utils.create_job(job_type=job_type, num_exes=0, status='PENDING', input_file_size=None, input=data_1) job_2 = job_test_utils.create_job(job_type=job_type, num_exes=0, status='PENDING', input_file_size=None, input=data_2) # Execute method Job.objects.process_job_input(job_1) Job.objects.process_job_input(job_2) # Retrieve updated job models jobs = Job.objects.filter(id__in=[job_1.id, job_2.id]).order_by('id') job_1 = jobs[0] job_2 = jobs[1] # Check jobs for expected fields self.assertEqual(job_1.input_file_size, 1053.0) self.assertEqual(job_1.source_started, min_src_started_job_1) self.assertEqual(job_1.source_ended, max_src_ended_job_1) self.assertEqual(job_2.input_file_size, 113269857.0) self.assertEqual(job_2.source_started, min_src_started_job_2) self.assertEqual(job_2.source_ended, max_src_ended_job_2) # Make sure job input file models are created job_input_files = JobInputFile.objects.filter(job_id=job_1.id) self.assertEqual(len(job_input_files), 5) input_files_dict = {'Input 1': set(), 'Input 2': set()} for job_input_file in job_input_files: input_files_dict[job_input_file.job_input].add(job_input_file.input_file_id) self.assertDictEqual(input_files_dict, {'Input 1': {file_1.id}, 'Input 2': {file_2.id, file_3.id, file_4.id, file_5.id}}) job_input_files = JobInputFile.objects.filter(job_id=job_2.id) self.assertEqual(len(job_input_files), 5) input_files_dict = {'Input 1': set(), 'Input 2': set()} for job_input_file in job_input_files: input_files_dict[job_input_file.job_input].add(job_input_file.input_file_id) self.assertDictEqual(input_files_dict, {'Input 1': {file_6.id}, 'Input 2': {file_7.id, file_8.id, file_9.id, file_10.id}}) def test_process_job_output(self): """Tests calling JobManager.process_job_output()""" output_1 = JobResults() output_1.add_file_parameter('foo', 1) output_2 = JobResults() output_2.add_file_parameter('foo', 2) # These jobs have completed and have their execution results job_exe_1 = job_test_utils.create_job_exe(status='COMPLETED', output=output_1) job_exe_2 = job_test_utils.create_job_exe(status='COMPLETED', output=output_2) # These jobs have their execution results, but have not completed job_exe_3 = job_test_utils.create_job_exe(status='RUNNING') job_exe_4 = job_test_utils.create_job_exe(status='RUNNING') for job_exe in [job_exe_3, job_exe_4]: job_exe_output = JobExecutionOutput() job_exe_output.job_exe_id = job_exe.id job_exe_output.job_id = job_exe.job_id job_exe_output.job_type_id = job_exe.job.job_type_id job_exe_output.exe_num = job_exe.exe_num job_exe_output.output = JobResults().get_dict() job_exe_output.save() # These jobs have completed, but do not have their execution results job_exe_5 = job_test_utils.create_job_exe(status='RUNNING') job_exe_6 = job_test_utils.create_job_exe(status='RUNNING') for job in [job_exe_5.job, job_exe_6.job]: job.status = 'COMPLETED' job.save() # Test method job_ids = [job_exe.job_id for job_exe in [job_exe_1, job_exe_2, job_exe_3, job_exe_4, job_exe_5, job_exe_6]] result_ids = Job.objects.process_job_output(job_ids, timezone.now()) self.assertEqual(set(result_ids), {job_exe_1.job_id, job_exe_2.job_id}) # Jobs 1 and 2 should have output populated, jobs 3 through 6 should not jobs = list(Job.objects.filter(id__in=job_ids).order_by('id')) self.assertEqual(len(jobs), 6) self.assertTrue(jobs[0].has_output()) self.assertDictEqual(jobs[0].output, output_1.get_dict()) self.assertTrue(jobs[1].has_output()) self.assertDictEqual(jobs[1].output, output_2.get_dict()) self.assertFalse(jobs[2].has_output()) self.assertFalse(jobs[3].has_output()) self.assertFalse(jobs[4].has_output()) self.assertFalse(jobs[5].has_output()) def test_queue_job_timestamps(self): """Tests that job attributes are updated when a job is queued.""" job = job_test_utils.create_job(num_exes=1, status='CANCELED', input={}, started=timezone.now(), ended=timezone.now()) Job.objects.update_jobs_to_queued([job], timezone.now(), requeue=True) job = Job.objects.get(pk=job.id) self.assertEqual(job.status, 'QUEUED') self.assertIsNotNone(job.queued) self.assertIsNone(job.started) self.assertIsNone(job.ended) def test_queue_superseded_jobs(self): """Tests that JobManager.update_jobs_to_queued() does not queue superseded jobs""" job = job_test_utils.create_job(status='FAILED') Job.objects.supersede_jobs_old([job], timezone.now()) job_ids = Job.objects.update_jobs_to_queued([job], timezone.now()) job = Job.objects.get(pk=job.id) self.assertListEqual(job_ids, []) self.assertEqual(job.status, 'FAILED') self.assertTrue(job.is_superseded) def test_superseded_job(self): """Tests creating a job that supersedes another job""" old_job = job_test_utils.create_job() event = trigger_test_utils.create_trigger_event() new_job = Job.objects.create_job(old_job.job_type, event.id, superseded_job=old_job, delete_superseded=False) new_job.save() when = timezone.now() Job.objects.supersede_jobs_old([old_job], when) new_job = Job.objects.get(pk=new_job.id) self.assertEqual(new_job.status, 'PENDING') self.assertFalse(new_job.is_superseded) self.assertEqual(new_job.root_superseded_job_id, old_job.id) self.assertEqual(new_job.superseded_job_id, old_job.id) self.assertFalse(new_job.delete_superseded) self.assertIsNone(new_job.superseded) old_job = Job.objects.get(pk=old_job.id) self.assertTrue(old_job.is_superseded) self.assertEqual(old_job.superseded, when) def test_update_status_running(self): """Tests that job attributes are updated when a job is running.""" job_1 = job_test_utils.create_job(num_exes=1, started=None, ended=timezone.now()) job_2 = job_test_utils.create_job(num_exes=1, started=None, ended=timezone.now()) when = timezone.now() Job.objects.update_status([job_1, job_2], 'RUNNING', when) jobs = Job.objects.filter(id__in=[job_1.id, job_2.id]) for job in jobs: self.assertEqual(job.status, 'RUNNING') self.assertEqual(job.started, when) self.assertIsNone(job.ended) self.assertEqual(job.last_status_change, when) def test_update_status_pending(self): """Tests that job attributes are updated when a job is pending.""" job = job_test_utils.create_job(num_exes=1, started=timezone.now(), ended=timezone.now()) Job.objects.update_status([job], 'PENDING', timezone.now()) self.assertEqual(job.status, 'PENDING') self.assertIsNone(job.ended) def test_update_status_blocked(self): """Tests that job attributes are updated when a job is blocked.""" job = job_test_utils.create_job(num_exes=1, started=timezone.now(), ended=timezone.now()) Job.objects.update_status([job], 'BLOCKED', timezone.now()) self.assertEqual(job.status, 'BLOCKED') self.assertIsNone(job.ended) def test_update_status_queued(self): """Tests that queued status updates are rejected.""" job = job_test_utils.create_job(num_exes=1, started=timezone.now(), ended=timezone.now()) self.assertRaises(Exception, Job.objects.update_status, [job], 'QUEUED', timezone.now()) def test_update_status_failed(self): """Tests that job attributes are updated when a job is failed.""" job = job_test_utils.create_job(num_exes=1, started=timezone.now(), ended=timezone.now()) error = error_test_utils.create_error() self.assertRaises(Exception, Job.objects.update_status, [job], 'FAILED', timezone.now()) self.assertRaises(Exception, Job.objects.update_status, [job], 'RUNNING', timezone.now(), error) Job.objects.update_status([job], 'FAILED', timezone.now(), error) self.assertEqual(job.status, 'FAILED') self.assertIsNotNone(job.ended) def test_update_status_completed(self): """Tests that job attributes are updated when a job is completed.""" job = job_test_utils.create_job(num_exes=1, started=timezone.now(), ended=timezone.now()) Job.objects.update_status([job], 'COMPLETED', timezone.now()) self.assertEqual(job.status, 'COMPLETED') self.assertIsNotNone(job.ended) def test_update_status_canceled(self): """Tests that job attributes are updated when a job is canceled.""" job = job_test_utils.create_job(num_exes=1, started=timezone.now(), ended=timezone.now()) Job.objects.update_status([job], 'CANCELED', timezone.now()) self.assertEqual(job.status, 'CANCELED') self.assertIsNotNone(job.ended) class TestJob(TestCase): def setUp(self): django.setup() def test_is_ready_to_requeue(self): """Tests checking the job status for requeue eligibility.""" self.assertFalse(Job(status='PENDING').is_ready_to_requeue) self.assertFalse(Job(status='BLOCKED').is_ready_to_requeue) self.assertFalse(Job(status='QUEUED').is_ready_to_requeue) self.assertFalse(Job(status='RUNNING').is_ready_to_requeue) self.assertTrue(Job(status='FAILED').is_ready_to_requeue) self.assertFalse(Job(status='COMPLETED').is_ready_to_requeue) self.assertTrue(Job(status='CANCELED').is_ready_to_requeue) def test_get_seed_job_results(self): """Test retrieving job results from a Seed job type""" job_type = job_test_utils.create_seed_job_type() input = { "version": "1.0", "input_data": {}, "output_data": {} } job = job_test_utils.create_job(job_type, input=input) self.assertIsInstance(job.get_job_results(), SeedJobResults) class TestJobExecutionManager(TransactionTestCase): """Tests for the job execution model manager""" fixtures = ['ingest_job_types.json'] def setUp(self): django.setup() self.job_type_1 = job_test_utils.create_job_type() self.job_type_2 = job_test_utils.create_job_type() self.job_1a = job_test_utils.create_job(job_type=self.job_type_1) job_test_utils.create_job_exe(job=self.job_1a, status='FAILED') time.sleep(.01) job_test_utils.create_job_exe(job=self.job_1a, status='FAILED') time.sleep(.01) job_test_utils.create_job_exe(job=self.job_1a, status='COMPLETED') time.sleep(.01) self.last_run_1a = job_test_utils.create_job_exe(job=self.job_1a, status='RUNNING') self.job_1b = job_test_utils.create_job(job_type=self.job_type_1, status='FAILED') self.last_run_1b = job_test_utils.create_job_exe(job=self.job_1b, status='FAILED') self.job_2a = job_test_utils.create_job(job_type=self.job_type_2) job_test_utils.create_job_exe(job=self.job_2a, status='FAILED') time.sleep(.01) job_test_utils.create_job_exe(job=self.job_2a, status='FAILED') time.sleep(.01) job_test_utils.create_job_exe(job=self.job_2a, status='COMPLETED') time.sleep(.01) self.last_run_2a = job_test_utils.create_job_exe(job=self.job_2a, status='RUNNING') self.job_2b = job_test_utils.create_job(job_type=self.job_type_2) self.last_run_2b = job_test_utils.create_job_exe(job=self.job_2b, status='COMPLETED') def test_get_latest(self): job_query = Job.objects.all() expected_result = { self.job_1a.id: self.last_run_1a, self.job_1b.id: self.last_run_1b, self.job_2a.id: self.last_run_2a, self.job_2b.id: self.last_run_2b, } latest_job_exes = JobExecution.objects.get_latest(job_query) self.assertDictEqual(latest_job_exes, expected_result, 'latest job executions do not match expected results') def test_get_latest_job_exes_with_a_filter(self): job_query = Job.objects.filter(status='FAILED') expected_result = { self.job_1b.id: self.last_run_1b, } latest_job_exes = JobExecution.objects.get_latest(job_query) self.assertDictEqual(latest_job_exes, expected_result, 'latest job executions do not match expected results') class TestJobType(TransactionTestCase): def setUp(self): django.setup() seed_interface_str = \ """ { "seedVersion": "1.0.0", "job": { "name": "test", "jobVersion": "1.0.0", "packageVersion": "1.0.0", "title": "Test job to exercise Seed functionality", "description": "Reads input file and ", "tags": [ "testing", "seed" ], "maintainer": { "name": "John Doe", "organization": "E-corp", "email": "jdoe@example.com", "url": "http://www.example.com", "phone": "666-555-4321" }, "timeout": 3600, "interface": { "command": "${INPUT_TEXT} ${INPUT_FILES} ${READ_LENGTH}", "inputs": { "files": [ { "name": "INPUT_TEXT", "mediaTypes": [ "text/plain" ], "partial": true }, { "name": "INPUT_FILES", "multiple": true } ], "json": [ { "name": "READ_LENGTH", "type": "integer" }, { "name": "OUTPUT_COUNT", "type": "integer" } ] }, "outputs": { "files": [ { "name": "OUTPUT_FILES", "mediaType": "text/plain", "multiple": true, "pattern": "output_files*.txt" }, { "name": "OUTPUT_TEXT", "mediaType": "text/plain", "pattern": "output_text.txt" } ], "json": [ { "name": "cell_count", "key": "cellCount", "type": "integer" } ] }, "mounts": [ { "name": "MOUNT_PATH", "path": "/the/container/path", "mode": "ro" } ], "settings": [ { "name": "DB_HOST", "secret": false }, { "name": "DB_PASS", "secret": true } ] }, "resources": { "scalar": [ { "name": "cpus", "value": 1.5 }, { "name": "mem", "value": 244.0 }, { "name": "sharedMem", "value": 1.0 }, { "name": "disk", "value": 11.0, "inputMultiplier": 4.0 } ] }, "errors": [ { "code": 1, "name": "data-issue", "title": "Data Issue discovered", "description": "There was a problem with input data", "category": "data" }, { "code": 2, "name": "missing-mount", "title": "Missing mount", "description": "Expected mount point not available at run time", "category": "job" }, { "code": 3, "name": "missing-setting", "title": "Missing setting", "description": "Expected setting not defined in environment variable", "category": "job" }, { "code": 4, "name": "missing-env", "title": "Missing environment", "description": "Expected environment not provided", "category": "job" } ] } } """ self.seed_job_type = job_test_utils.create_job_type(interface=json.loads(seed_interface_str)) self.legacy_job_type = job_test_utils.create_job_type() self.legacy_job_type.cpus_required = 5.0 self.legacy_job_type.mem_const_required = 6.0 self.legacy_job_type.mem_mult_required = 7.0 self.legacy_job_type.shared_mem_required = 8.0 self.legacy_job_type.disk_out_const_required = 9.0 self.legacy_job_type.disk_out_mult_required = 10.0 def test_get_legacy_cpu_resource_from_legacy_interface(self): job_type = self.legacy_job_type value = job_type.get_cpus_required() self.assertEqual(job_type.cpus_required, value) def test_get_legacy_mem_resource_from_legacy_interface(self): job_type = self.legacy_job_type value = job_type.get_mem_const_required() self.assertEqual(job_type.mem_const_required, value) def test_get_legacy_mem_resource_multiplier_from_legacy_interface(self): job_type = self.legacy_job_type value = job_type.get_mem_mult_required() self.assertEqual(job_type.mem_mult_required, value) def test_get_legacy_sharedmem_resource_from_legacy_interface(self): job_type = self.legacy_job_type value = job_type.get_shared_mem_required() self.assertEqual(job_type.shared_mem_required, value) def test_get_legacy_disk_resource_from_legacy_interface(self): job_type = self.legacy_job_type value = job_type.get_disk_out_const_required() self.assertEqual(job_type.disk_out_const_required, value) def test_get_legacy_disk_resource_multiplier_from_legacy_interface(self): job_type = self.legacy_job_type value = job_type.get_disk_out_mult_required() self.assertEqual(job_type.disk_out_mult_required, value) def test_get_legacy_cpu_resource_from_seed_interface(self): job_type = self.seed_job_type value = job_type.get_cpus_required() self.assertEqual(1.5, value) def test_get_legacy_cpu_resource_multiplier_from_seed_interface(self): job_type = self.seed_job_type value = job_type._get_legacy_resource('cpus', job_type.cpus_required, False) self.assertEqual(0.0, value) def test_get_legacy_mem_resource_from_seed_interface(self): job_type = self.seed_job_type value = job_type.get_mem_const_required() self.assertEqual(244.0, value) def test_get_legacy_mem_resource_multiplier_from_seed_interface(self): job_type = self.seed_job_type value = job_type.get_mem_mult_required() self.assertEqual(0.0, value) def test_get_legacy_sharedmem_resource_from_seed_interface(self): job_type = self.seed_job_type value = job_type.get_shared_mem_required() self.assertEqual(1.0, value) def test_get_legacy_sharedmem_resource_multiplier_from_seed_interface(self): job_type = self.seed_job_type value = job_type._get_legacy_resource('sharedmem', job_type.shared_mem_required, False) self.assertEqual(0.0, value) def test_get_legacy_disk_resource_from_seed_interface(self): job_type = self.seed_job_type value = job_type.get_disk_out_const_required() self.assertEqual(11.0, value) def test_get_legacy_disk_resource_multiplier_from_seed_interface(self): job_type = self.seed_job_type value = job_type.get_disk_out_mult_required() self.assertEqual(4.0, value) def test_get_tagged_docker_image_from_tagged_image(self): job_type = self.seed_job_type job_type.docker_image = 'image:tag' # Should pull from packageVersion of Seed Manifest self.assertEqual('image:1.0.0', job_type.get_tagged_docker_image()) def test_get_tagged_docker_image_from_untagged_image(self): job_type = self.seed_job_type job_type.docker_image = 'image' # Should pull from packageVersion of Seed Manifest self.assertEqual('image:1.0.0', job_type.get_tagged_docker_image()) def test_get_tagged_docker_image_from_docker_image_legacy_job_type(self): job_type = self.legacy_job_type job_type.docker_image = 'image:tag' # Should ONLY use docker_image field with legacy job type self.assertEqual('image:tag', job_type.get_tagged_docker_image()) class TestJobTypeManagerCreateJobType(TransactionTestCase): def setUp(self): django.setup() self.workspace = storage_test_utils.create_workspace() self.error = error_test_utils.create_error() interface = { 'version': '1.0', 'command': 'my_command', 'command_arguments': 'args', 'input_data': [{ 'name': 'Test Input 1', 'type': 'file', 'media_types': ['text/plain'], }], 'output_data': [{ 'name': 'Test Output 1', 'type': 'files', 'media_type': 'image/png', }]} self.job_interface = JobInterface(interface) self.configuration = { 'version': '1.0', 'condition': { 'media_type': 'text/plain' }, 'data': { 'input_data_name': 'Test Input 1', 'workspace_name': self.workspace.name } } self.trigger_config = job_test_utils.MockTriggerRuleConfiguration(job_test_utils.MOCK_TYPE, self.configuration) self.error_mapping = create_legacy_error_mapping({ 'version': '1.0', 'exit_codes': { '-15': self.error.name, } }) def test_successful_no_trigger_rule(self): """Tests calling JobTypeManager.create_job_type_v5() successfully with no trigger rule or error mapping""" name = 'my-job-type' version = '1.0' # Call test job_type = JobType.objects.create_job_type_v5(name, version, self.job_interface) # Check results job_type = JobType.objects.select_related('trigger_rule').get(pk=job_type.id) self.assertDictEqual(job_type.get_job_interface().get_dict(), self.job_interface.get_dict()) self.assertEqual(job_type.revision_num, 1) self.assertIsNone(job_type.trigger_rule_id) self.assertSetEqual(set(job_type.get_error_mapping()._mapping.keys()), set()) def test_successful_with_trigger_rule(self): """Tests calling JobTypeManager.create_job_type_v5() successfully with a trigger rule and error mapping""" name = 'my-job-type' version = '1.0' trigger_rule = trigger_test_utils.create_trigger_rule(trigger_type=job_test_utils.MOCK_TYPE, configuration=self.trigger_config.get_dict()) # Call test job_type = JobType.objects.create_job_type_v5(name, version, self.job_interface, trigger_rule, self.error_mapping) # Check results job_type = JobType.objects.select_related('trigger_rule').get(pk=job_type.id) self.assertDictEqual(job_type.get_job_interface().get_dict(), self.job_interface.get_dict()) self.assertEqual(job_type.revision_num, 1) self.assertEqual(job_type.trigger_rule_id, trigger_rule.id) trigger_rule = TriggerRule.objects.get(pk=trigger_rule.id) self.assertTrue(trigger_rule.is_active) self.assertSetEqual(set(job_type.get_error_mapping()._mapping.keys()), {-15}) def test_invalid_trigger_rule(self): """Tests calling JobTypeManager.create_job_type_v5() with an invalid trigger rule""" name = 'my-job-type' version = '1.0' trigger_rule = trigger_test_utils.create_trigger_rule(trigger_type=job_test_utils.MOCK_ERROR_TYPE, configuration=self.trigger_config.get_dict()) # Call test self.assertRaises(InvalidConnection, JobType.objects.create_job_type_v5, name, version, self.job_interface, trigger_rule, self.error_mapping) def test_successful_other_fields(self): """Tests calling JobTypeManager.create_job_type_v5() successfully with additional fields""" name = 'my-job-type' version = '1.0' title = 'my title' description = 'my-description' priority = 13 custom_resources = Resources({'resources': {'foo': 10.0}}) docker_params = [["a","1"],["b","2"]] # Call test job_type = JobType.objects.create_job_type_v5(name, version, self.job_interface, title=title, description=description, priority=priority, docker_params=docker_params, custom_resources=custom_resources) # Check results job_type = JobType.objects.select_related('trigger_rule').get(pk=job_type.id) self.assertDictEqual(job_type.get_job_interface().get_dict(), self.job_interface.get_dict()) self.assertEqual(job_type.revision_num, 1) self.assertIsNone(job_type.trigger_rule_id) self.assertSetEqual(set(job_type.get_error_mapping()._mapping.keys()), set()) self.assertDictEqual(job_type.get_custom_resources().get_dict(), custom_resources.get_dict()) self.assertEqual(job_type.description, description) self.assertEqual(job_type.priority, priority) self.assertIsNone(job_type.deprecated) self.assertIsNone(job_type.paused) self.assertEqual(job_type.docker_params, docker_params) def test_successful_paused(self): """Tests calling JobTypeManager.create_job_type_v5() and pausing it""" name = 'my-job-type' version = '1.0' title = 'my title' description = 'my-description' priority = 13 is_paused = True # Call test job_type = JobType.objects.create_job_type_v5(name, version, self.job_interface, title=title, description=description, priority=priority, is_paused=is_paused) # Check results job_type = JobType.objects.select_related('trigger_rule').get(pk=job_type.id) self.assertDictEqual(job_type.get_job_interface().get_dict(), self.job_interface.get_dict()) self.assertEqual(job_type.revision_num, 1) self.assertIsNone(job_type.trigger_rule_id) self.assertSetEqual(set(job_type.get_error_mapping()._mapping.keys()), set()) self.assertEqual(job_type.description, description) self.assertEqual(job_type.priority, priority) self.assertEqual(job_type.is_paused, is_paused) self.assertIsNotNone(job_type.paused) def test_uneditable_field(self): """Tests calling JobTypeManager.create_job_type_v5() with an uneditable field""" name = 'my-job-type' version = '1.0' title = 'my title' description = 'my-description' priority = 13 is_system = True # Call test self.assertRaises(Exception, JobType.objects.create_job_type_v5, name, version, self.job_interface, title=title, description=description, priority=priority, is_system=is_system) def test_invalid_error_mapping(self): """Tests calling JobTypeManager.create_job_type_v5() with an invalid error mapping""" name = 'my-job-type' version = '1.0' title = 'my title' description = 'my-description' priority = 13 is_system = True error_mapping = create_legacy_error_mapping({ 'version': '1.0', 'exit_codes': { '1': 'test-invalid-error', } }) # Call test self.assertRaises(Exception, JobType.objects.create_job_type_v5, name, version, self.job_interface, error_mapping=error_mapping, title=title, description=description, priority=priority, is_system=is_system) class TestJobTypeManagerEditJobType(TransactionTestCase): def setUp(self): django.setup() self.workspace = storage_test_utils.create_workspace() self.error = error_test_utils.create_error() interface = { 'version': '1.0', 'command': 'my_command', 'command_arguments': 'args', 'input_data': [{ 'name': 'Test Input 1', 'type': 'file', 'media_types': ['text/plain'], }], 'output_data': [{ 'name': 'Test Output 1', 'type': 'files', 'media_type': 'image/png', }]} self.job_interface = JobInterface(interface) new_interface = { 'version': '1.0', 'command': 'my_command', 'command_arguments': 'args', 'input_data': [{ 'name': 'Test Input 2', 'type': 'files', 'media_types': ['image/png', 'image/tiff'], }], 'output_data': [{ 'name': 'Test Output 2', 'type': 'file', }]} self.new_job_interface = JobInterface(new_interface) self.configuration = { 'version': '1.0', 'condition': { 'media_type': 'text/plain' }, 'data': { 'input_data_name': 'Test Input 1', 'workspace_name': self.workspace.name } } self.trigger_config = job_test_utils.MockTriggerRuleConfiguration(job_test_utils.MOCK_TYPE, self.configuration) self.new_configuration = { 'version': '1.0', 'condition': { 'media_type': 'application/json' }, 'data': { 'input_data_name': 'Test Input 1', 'workspace_name': self.workspace.name } } self.new_trigger_config = job_test_utils.MockTriggerRuleConfiguration(job_test_utils.MOCK_TYPE, self.new_configuration) def test_change_general_fields(self): """Tests calling JobTypeManager.edit_job_type_v5() with a change to some general fields""" name = 'my-job-type' version = '1.0' title = 'my title' priority = 12 error_mapping = create_legacy_error_mapping({ 'version': '1.0', 'exit_codes': { '-15': self.error.name, } }) custom_resources = Resources({'resources': {'foo': 10.0}}) new_title = 'my new title' new_priority = 13 new_error_mapping = create_legacy_error_mapping({ 'version': '1.0', 'exit_codes': { '-16': self.error.name, } }) new_custom_resources = Resources({'resources': {'foo': 100.0}}) new_is_paused = True trigger_rule = trigger_test_utils.create_trigger_rule(trigger_type=job_test_utils.MOCK_TYPE, configuration=self.trigger_config.get_dict()) job_type = JobType.objects.create_job_type_v5(name, version, self.job_interface, trigger_rule, title=title, priority=priority, error_mapping=error_mapping, custom_resources=custom_resources) # Call test JobType.objects.edit_job_type_v5(job_type.id, title=new_title, priority=new_priority, error_mapping=new_error_mapping, custom_resources=new_custom_resources, is_paused=new_is_paused) # Check results job_type = JobType.objects.select_related('trigger_rule').get(pk=job_type.id) self.assertEqual(job_type.revision_num, 1) self.assertEqual(job_type.trigger_rule_id, trigger_rule.id) trigger_rule = TriggerRule.objects.get(pk=trigger_rule.id) self.assertTrue(trigger_rule.is_active) self.assertEqual(job_type.title, new_title) self.assertEqual(job_type.priority, new_priority) self.assertSetEqual(set(job_type.get_error_mapping()._mapping.keys()), {-16}) self.assertDictEqual(job_type.get_custom_resources().get_dict(), new_custom_resources.get_dict()) self.assertEqual(job_type.is_paused, new_is_paused) self.assertIsNotNone(job_type.paused) def test_change_to_interface(self): """Tests calling JobTypeManager.edit_job_type_v5() with a change to the interface""" name = 'my-job-type' version = '1.0' trigger_rule = trigger_test_utils.create_trigger_rule(trigger_type=job_test_utils.MOCK_TYPE, configuration=self.trigger_config.get_dict()) job_type = JobType.objects.create_job_type_v5(name, version, self.job_interface, trigger_rule) # Call test JobType.objects.edit_job_type_v5(job_type.id, self.new_job_interface, None, False) # Check results job_type = JobType.objects.select_related('trigger_rule').get(pk=job_type.id) self.assertDictEqual(job_type.get_job_interface().get_dict(), self.new_job_interface.get_dict()) self.assertEqual(job_type.revision_num, 2) self.assertEqual(job_type.trigger_rule_id, trigger_rule.id) trigger_rule = TriggerRule.objects.get(pk=trigger_rule.id) self.assertTrue(trigger_rule.is_active) # New revision due to interface change num_of_revs = JobTypeRevision.objects.filter(job_type_id=job_type.id).count() self.assertEqual(num_of_revs, 2) def test_change_to_trigger_rule(self): """Tests calling JobTypeManager.edit_job_type_v5() with a change to the trigger rule""" name = 'my-job-type' version = '1.0' trigger_rule = trigger_test_utils.create_trigger_rule(trigger_type=job_test_utils.MOCK_TYPE, configuration=self.trigger_config.get_dict()) new_trigger_rule = trigger_test_utils.create_trigger_rule(trigger_type=job_test_utils.MOCK_TYPE, configuration=self.new_trigger_config.get_dict()) job_type = JobType.objects.create_job_type_v5(name, version, self.job_interface, trigger_rule) # Call test JobType.objects.edit_job_type_v5(job_type.id, None, new_trigger_rule, False) # Check results job_type = JobType.objects.select_related('trigger_rule').get(pk=job_type.id) self.assertDictEqual(job_type.get_job_interface().get_dict(), self.job_interface.get_dict()) self.assertEqual(job_type.revision_num, 1) self.assertEqual(job_type.trigger_rule_id, new_trigger_rule.id) trigger_rule = TriggerRule.objects.get(pk=trigger_rule.id) self.assertFalse(trigger_rule.is_active) new_trigger_rule = TriggerRule.objects.get(pk=new_trigger_rule.id) self.assertTrue(new_trigger_rule.is_active) num_of_revs = JobTypeRevision.objects.filter(job_type_id=job_type.id).count() self.assertEqual(num_of_revs, 1) def test_remove_trigger_rule(self): """Tests calling JobTypeManager.edit_job_type_v5() that removes the trigger rule""" name = 'my-job-type' version = '1.0' trigger_rule = trigger_test_utils.create_trigger_rule(trigger_type=job_test_utils.MOCK_TYPE, configuration=self.trigger_config.get_dict()) job_type = JobType.objects.create_job_type_v5(name, version, self.job_interface, trigger_rule) # Call test JobType.objects.edit_job_type_v5(job_type.id, None, None, True) # Check results job_type = JobType.objects.select_related('trigger_rule').get(pk=job_type.id) self.assertDictEqual(job_type.get_job_interface().get_dict(), self.job_interface.get_dict()) self.assertEqual(job_type.revision_num, 1) self.assertIsNone(job_type.trigger_rule) trigger_rule = TriggerRule.objects.get(pk=trigger_rule.id) self.assertFalse(trigger_rule.is_active) num_of_revs = JobTypeRevision.objects.filter(job_type_id=job_type.id).count() self.assertEqual(num_of_revs, 1) def test_change_to_both(self): """Tests calling JobTypeManager.edit_job_type_v5() with a change to both the definition and the trigger rule """ name = 'my-job-type' version = '1.0' trigger_rule = trigger_test_utils.create_trigger_rule(trigger_type=job_test_utils.MOCK_TYPE, configuration=self.trigger_config.get_dict()) new_trigger_rule = trigger_test_utils.create_trigger_rule(trigger_type=job_test_utils.MOCK_TYPE, configuration=self.new_trigger_config.get_dict()) job_type = JobType.objects.create_job_type_v5(name, version, self.job_interface, trigger_rule) # Call test JobType.objects.edit_job_type_v5(job_type.id, self.new_job_interface, new_trigger_rule, False) # Check results job_type = JobType.objects.select_related('trigger_rule').get(pk=job_type.id) self.assertDictEqual(job_type.get_job_interface().get_dict(), self.new_job_interface.get_dict()) self.assertEqual(job_type.revision_num, 2) self.assertEqual(job_type.trigger_rule_id, new_trigger_rule.id) trigger_rule = TriggerRule.objects.get(pk=trigger_rule.id) self.assertFalse(trigger_rule.is_active) new_trigger_rule = TriggerRule.objects.get(pk=new_trigger_rule.id) self.assertTrue(new_trigger_rule.is_active) # New revision due to definition change num_of_revs = JobTypeRevision.objects.filter(job_type_id=job_type.id).count() self.assertEqual(num_of_revs, 2) def test_invalid_trigger_rule(self): """Tests calling JobTypeManager.edit_job_type_v5() with a new invalid trigger rule""" name = 'my-job-type' version = '1.0' trigger_rule = trigger_test_utils.create_trigger_rule(trigger_type=job_test_utils.MOCK_TYPE, configuration=self.trigger_config.get_dict()) new_trigger_rule = trigger_test_utils.create_trigger_rule(trigger_type=job_test_utils.MOCK_ERROR_TYPE, configuration=self.new_trigger_config.get_dict()) job_type = JobType.objects.create_job_type_v5(name, version, self.job_interface, trigger_rule) # Call test self.assertRaises(InvalidConnection, JobType.objects.edit_job_type_v5, job_type.id, self.new_job_interface, new_trigger_rule, False) # Check results job_type = JobType.objects.select_related('trigger_rule').get(pk=job_type.id) self.assertDictEqual(job_type.get_job_interface().get_dict(), self.job_interface.get_dict()) self.assertEqual(job_type.revision_num, 1) self.assertEqual(job_type.trigger_rule_id, trigger_rule.id) trigger_rule = TriggerRule.objects.get(pk=trigger_rule.id) self.assertTrue(trigger_rule.is_active) num_of_revs = JobTypeRevision.objects.filter(job_type_id=job_type.id).count() self.assertEqual(num_of_revs, 1) def test_system_job_type(self): """Tests calling JobTypeManager.edit_job_type_v5() for a system job type""" name = 'my-job-type' version = '1.0' title = 'my title' new_title = 'my new title' trigger_rule = trigger_test_utils.create_trigger_rule(trigger_type=job_test_utils.MOCK_TYPE, configuration=self.trigger_config.get_dict()) job_type = JobType.objects.create_job_type_v5(name, version, self.job_interface, trigger_rule, title=title) job_type.is_system = True job_type.save() # Call test self.assertRaises(Exception, JobType.objects.edit_job_type_v5, job_type.id, title=new_title) # Check results job_type = JobType.objects.select_related('trigger_rule').get(pk=job_type.id) # No Change self.assertEqual(job_type.title, title) def test_pause_system_job_type(self): """Tests calling JobTypeManager.edit_job_type_v5() and pausing a system job type""" name = 'my-job-type' version = '1.0' trigger_rule = trigger_test_utils.create_trigger_rule(trigger_type=job_test_utils.MOCK_TYPE, configuration=self.trigger_config.get_dict()) job_type = JobType.objects.create_job_type_v5(name, version, self.job_interface, trigger_rule, is_paused=False) job_type.is_system = True job_type.save() # Call test JobType.objects.edit_job_type_v5(job_type.id, is_paused=True) # Check results job_type = JobType.objects.select_related('trigger_rule').get(pk=job_type.id) self.assertEqual(job_type.is_paused, True) def test_uneditable_field(self): """Tests calling JobTypeManager.edit_job_type_v5() to change an uneditable field""" name = 'my-job-type' version = '1.0' title = 'my title' new_title = 'my new title' trigger_rule = trigger_test_utils.create_trigger_rule(trigger_type=job_test_utils.MOCK_TYPE, configuration=self.trigger_config.get_dict()) job_type = JobType.objects.create_job_type_v5(name, version, self.job_interface, trigger_rule, title=title) # Call test self.assertRaises(Exception, JobType.objects.edit_job_type_v5, job_type.id, title=new_title, is_system=True) # Check results job_type = JobType.objects.select_related('trigger_rule').get(pk=job_type.id) # No change self.assertEqual(job_type.title, title) def test_invalid_error_mapping(self): """Tests calling JobTypeManager.edit_job_type_v5() with an invalid error mapping""" name = 'my-job-type' version = '1.0' title = 'my title' description = 'my-description' priority = 13 is_system = True error_mapping = create_legacy_error_mapping({ 'version': '1.0', 'exit_codes': { '1': 'test-invalid-error', } }) # Call test self.assertRaises(Exception, JobType.objects.edit_job_type_v5, name, version, self.job_interface, error_mapping=error_mapping, title=title, description=description, priority=priority, is_system=is_system) class TestJobTypeManagerValidateJobType(TestCase): def setUp(self): django.setup() self.workspace = storage_test_utils.create_workspace() self.error = error_test_utils.create_error() self.interface = { 'version': '1.0', 'command': 'my_command', 'command_arguments': 'args', 'input_data': [{ 'name': 'Test Input 1', 'type': 'file', 'media_types': ['text/plain'], }], 'output_data': [{ 'name': 'Test Output 1', 'type': 'files', 'media_type': 'image/png', }]} self.job_interface = JobInterface(self.interface) self.error_mapping = create_legacy_error_mapping({ 'version': '1.0', 'exit_codes': { '1': self.error.name, } }) self.configuration = { 'version': '1.0', 'condition': { 'media_type': 'text/plain' }, 'data': { 'input_data_name': 'Test Input 1', 'workspace_name': self.workspace.name } } self.trigger_config = job_test_utils.MockTriggerRuleConfiguration(job_test_utils.MOCK_TYPE, self.configuration) self.trigger_rule = trigger_test_utils.create_trigger_rule(trigger_type=job_test_utils.MOCK_TYPE, configuration=self.trigger_config.get_dict()) self.invalid_trigger_config = job_test_utils.MockErrorTriggerRuleConfiguration(job_test_utils.MOCK_ERROR_TYPE, self.configuration) self.invalid_trigger_rule = trigger_test_utils.create_trigger_rule(trigger_type=job_test_utils.MOCK_ERROR_TYPE, configuration=self.trigger_config.get_dict()) def test_successful(self): """Tests calling JobTypeManager.validate_job_type_v5() successfully""" warnings = JobType.objects.validate_job_type_v5('name', '1.0', self.interface, self.error_mapping, self.trigger_config) # Check results self.assertListEqual(warnings, []) def test_invalid(self): """Tests calling JobTypeManager.validate_job_type_v5() with an invalid trigger rule""" self.assertRaises(InvalidConnection, JobType.objects.validate_job_type_v5, 'name', '1.0', self.interface, self.error_mapping, self.invalid_trigger_config) class TestJobTypeRunningStatus(TestCase): def setUp(self): django.setup() self.job_type_1 = job_test_utils.create_job_type(name='Type 1', version='1.0') self.job_type_2 = job_test_utils.create_job_type(name='Type 2', version='2.0') self.job_type_3 = job_test_utils.create_job_type(name='Type 1', version='2.0') self.entry_1_longest = datetime.datetime.utcfromtimestamp(500000).replace(tzinfo=timezone.utc) self.entry_1_shortest = datetime.datetime.utcfromtimestamp(650000).replace(tzinfo=timezone.utc) self.entry_2_longest = datetime.datetime.utcfromtimestamp(600000).replace(tzinfo=timezone.utc) self.entry_2_shortest = datetime.datetime.utcfromtimestamp(750000).replace(tzinfo=timezone.utc) self.entry_3_longest = datetime.datetime.utcfromtimestamp(700000).replace(tzinfo=timezone.utc) self.entry_3_shortest = datetime.datetime.utcfromtimestamp(800000).replace(tzinfo=timezone.utc) job_test_utils.create_job(job_type=self.job_type_1, status='RUNNING', last_status_change=self.entry_1_longest) job_test_utils.create_job(job_type=self.job_type_1, status='RUNNING', last_status_change=self.entry_1_shortest) job_test_utils.create_job(job_type=self.job_type_2, status='RUNNING', last_status_change=self.entry_2_shortest) job_test_utils.create_job(job_type=self.job_type_2, status='RUNNING', last_status_change=self.entry_2_longest) job_test_utils.create_job(job_type=self.job_type_2, status='RUNNING', last_status_change=self.entry_2_shortest) job_test_utils.create_job(job_type=self.job_type_3, status='RUNNING', last_status_change=self.entry_3_shortest) job_test_utils.create_job(job_type=self.job_type_3, status='RUNNING', last_status_change=self.entry_3_longest) job_test_utils.create_job(job_type=self.job_type_3, status='RUNNING', last_status_change=self.entry_3_longest) job_test_utils.create_job(job_type=self.job_type_3, status='RUNNING', last_status_change=self.entry_3_shortest) def test_successful(self): """Tests calling the get_running_job_status method on JobExecutionManager.""" status = JobType.objects.get_running_status() self.assertEqual(len(status), 3) # Check entry 1 self.assertEqual(status[0].job_type.id, self.job_type_1.id) self.assertEqual(status[0].job_type.name, 'Type 1') self.assertEqual(status[0].job_type.version, '1.0') self.assertEqual(status[0].count, 2) self.assertEqual(status[0].longest_running, self.entry_1_longest) # Check entry 2 self.assertEqual(status[1].job_type.id, self.job_type_2.id) self.assertEqual(status[1].job_type.name, 'Type 2') self.assertEqual(status[1].job_type.version, '2.0') self.assertEqual(status[1].count, 3) self.assertEqual(status[1].longest_running, self.entry_2_longest) # Check entry 3 self.assertEqual(status[2].job_type.id, self.job_type_3.id) self.assertEqual(status[2].job_type.name, 'Type 1') self.assertEqual(status[2].job_type.version, '2.0') self.assertEqual(status[2].count, 4) self.assertEqual(status[2].longest_running, self.entry_3_longest) class TestJobTypeFailedStatus(TestCase): def setUp(self): django.setup() self.job_type_1 = job_test_utils.create_job_type(name='Type 1', version='1.0') self.job_type_2 = job_test_utils.create_job_type(name='Type 2', version='2.0') self.job_type_3 = job_test_utils.create_job_type(name='Type 1', version='2.0') self.error_1 = Error.objects.create(name='Error 1', description='Test', category='SYSTEM') self.error_2 = Error.objects.create(name='Error 2', description='Test', category='SYSTEM') self.error_3 = Error.objects.create(name='Error 3', description='Test', category='DATA') # Date stamps for errors self.entry_1_last_time = datetime.datetime.utcfromtimestamp(590000).replace(tzinfo=timezone.utc) self.entry_1_first_time = datetime.datetime.utcfromtimestamp(580000).replace(tzinfo=timezone.utc) self.entry_2_time = datetime.datetime.utcfromtimestamp(585000).replace(tzinfo=timezone.utc) self.entry_3_last_time = datetime.datetime.utcfromtimestamp(490000).replace(tzinfo=timezone.utc) self.entry_3_mid_time = datetime.datetime.utcfromtimestamp(480000).replace(tzinfo=timezone.utc) self.entry_3_first_time = datetime.datetime.utcfromtimestamp(470000).replace(tzinfo=timezone.utc) self.entry_4_time = datetime.datetime.utcfromtimestamp(385000).replace(tzinfo=timezone.utc) # Create jobs job_test_utils.create_job(job_type=self.job_type_1, status='RUNNING', last_status_change=timezone.now()) job_test_utils.create_job(job_type=self.job_type_1, error=self.error_1, status='FAILED', last_status_change=self.entry_2_time) job_test_utils.create_job(job_type=self.job_type_2, error=self.error_1, status='FAILED', last_status_change=self.entry_4_time) job_test_utils.create_job(job_type=self.job_type_2, error=self.error_2, status='FAILED', last_status_change=self.entry_1_last_time) job_test_utils.create_job(job_type=self.job_type_2, error=self.error_2, status='FAILED', last_status_change=self.entry_1_first_time) job_test_utils.create_job(job_type=self.job_type_3, error=self.error_2, status='FAILED', last_status_change=self.entry_3_mid_time) job_test_utils.create_job(job_type=self.job_type_3, error=self.error_2, status='FAILED', last_status_change=self.entry_3_last_time) job_test_utils.create_job(job_type=self.job_type_3, error=self.error_2, status='FAILED', last_status_change=self.entry_3_first_time) job_test_utils.create_job(job_type=self.job_type_3, error=self.error_3, status='FAILED', last_status_change=timezone.now()) def test_successful(self): """Tests calling the get_failed_jobs_with_system_errors method on JobManager.""" status = JobType.objects.get_failed_status() self.assertEqual(len(status), 4) # Check entry 1 self.assertEqual(status[0].job_type.id, self.job_type_2.id) self.assertEqual(status[0].job_type.name, 'Type 2') self.assertEqual(status[0].job_type.version, '2.0') self.assertEqual(status[0].error.name, 'Error 2') self.assertEqual(status[0].count, 2) self.assertEqual(status[0].first_error, self.entry_1_first_time) self.assertEqual(status[0].last_error, self.entry_1_last_time) # Check entry 2 self.assertEqual(status[1].job_type.id, self.job_type_1.id) self.assertEqual(status[1].job_type.name, 'Type 1') self.assertEqual(status[1].job_type.version, '1.0') self.assertEqual(status[1].error.name, 'Error 1') self.assertEqual(status[1].count, 1) self.assertEqual(status[1].first_error, self.entry_2_time) self.assertEqual(status[1].last_error, self.entry_2_time) # Check entry 3 self.assertEqual(status[2].job_type.id, self.job_type_3.id) self.assertEqual(status[2].job_type.name, 'Type 1') self.assertEqual(status[2].job_type.version, '2.0') self.assertEqual(status[2].error.name, 'Error 2') self.assertEqual(status[2].count, 3) self.assertEqual(status[2].first_error, self.entry_3_first_time) self.assertEqual(status[2].last_error, self.entry_3_last_time) # Check entry 4 self.assertEqual(status[3].job_type.id, self.job_type_2.id) self.assertEqual(status[3].job_type.name, 'Type 2') self.assertEqual(status[3].job_type.version, '2.0') self.assertEqual(status[3].error.name, 'Error 1') self.assertEqual(status[3].count, 1) self.assertEqual(status[3].first_error, self.entry_4_time) self.assertEqual(status[3].last_error, self.entry_4_time) class TestJobTypeTagManager(TransactionTestCase): def setUp(self): django.setup() self.job_type1 = "test-type1" self.tag_set1 = ["tag1", "tag2", "oneandfour"] self.job_type2 = "test-type2" self.tag_set2 = ["tag3", "tag4"] self.job_type3 = "test-type3" self.tag_set3 = ["tag5", "tag6"] self.job_type4 = "test-type4" self.tag_set4 = ["tag7", "tag8", "oneandfour"] JobTypeTag.objects.create_job_type_tags(self.job_type1, self.tag_set1) JobTypeTag.objects.create_job_type_tags(self.job_type3, self.tag_set3) JobTypeTag.objects.create_job_type_tags(self.job_type4, self.tag_set4) def test_create_job_type_tags(self): """Tests calling JobTypeManager.create_job_type_tags()""" result = JobTypeTag.objects.create_job_type_tags(self.job_type2, self.tag_set2) self.assertEqual(len(result), 2) def test_clear_job_type_tags(self): """Tests calling JobTypeManager.clear_job_type_tags()""" tags = JobTypeTag.objects.get_tags(self.job_type3) self.assertEqual(tags, self.tag_set3) JobTypeTag.objects.clear_job_type_tags(self.job_type3) tags = JobTypeTag.objects.get_tags(self.job_type3) self.assertEqual(len(tags), 0) def test_get_job_type_tags(self): """Tests calling JobTypeManager.clear_job_type_tags()""" tags = JobTypeTag.objects.get_tags(self.job_type1) self.assertEqual(tags, self.tag_set1) def test_get_tagged_job_types(self): """Tests calling JobTypeManager.get_tagged_job_types()""" job_types = JobTypeTag.objects.get_tagged_job_types(["tag1", "tag2"]) self.assertEqual(len(job_types), 1) self.assertEqual(job_types[0], self.job_type1) def test_get_matching_job_types(self): """Tests calling JobTypeManager.get_matching_job_types()""" job_types = JobTypeTag.objects.get_matching_job_types("no-match") self.assertEqual(len(job_types), 0) job_types = JobTypeTag.objects.get_matching_job_types("one") self.assertEqual(len(job_types), 2) self.assertEqual(job_types[0], self.job_type1) job_types = JobTypeTag.objects.get_matching_job_types("tag1") self.assertEqual(len(job_types), 1) self.assertEqual(job_types[0], self.job_type1)
# Copyright (c) OpenMMLab. All rights reserved. import mmcv import numpy as np def adjust_hue(img, hue_factor): """Adjust hue of an image. Args: img (ndarray): Image to be adjust with BGR order. value (float): the amount of shift in H channel and must be in the interval [-0.5, 0.5].. 0.5 and -0.5 give complete reversal of hue channel in HSV space in positive and negative direction respectively. 0 means no shift. Therefore, both -0.5 and 0.5 will give an image with complementary colors while 0 gives the original image. Returns: ndarray: The hue-adjusted image. """ if hue_factor is None: return img else: assert hue_factor >= -0.5 and hue_factor <= 0.5 img = mmcv.bgr2hsv(img) img[:, :, 0] = (img[:, :, 0].astype(np.int) + int(hue_factor * 180.) + 180) % 180 img = mmcv.hsv2bgr(img) return img def adjust_gamma(img, gamma=1.0): """Using gamma correction to process the image. Args: img (ndarray): Image to be adjusted. uint8 datatype. gamma (float or int): Gamma value used in gamma correction. gamma is a positive value. Note: gamma larger than 1 make the shadows darker, while gamma smaller than 1 make dark regions lighter. Default: 1.0. """ assert isinstance(gamma, float) or isinstance(gamma, int) assert gamma > 0 assert img.dtype == 'uint8' table = ((np.arange(256) / 255.) ** gamma * (255 + 1 - 1e-3))\ .astype('uint8') adjusted_img = mmcv.lut_transform(np.array(img, dtype=np.uint8), table) return adjusted_img
# coding: utf-8 # # Copyright 2018 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Domain objects for the pages for subtopics, and related models.""" from constants import constants from core.domain import change_domain from core.domain import state_domain from core.platform import models import feconf import utils (topic_models,) = models.Registry.import_models([models.NAMES.topic]) SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_HTML = 'page_contents_html' SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_AUDIO = 'page_contents_audio' SUBTOPIC_PAGE_PROPERTY_PAGE_WRITTEN_TRANSLATIONS = 'page_written_translations' CMD_CREATE_NEW = 'create_new' # These take additional 'property_name' and 'new_value' parameters and, # optionally, 'old_value'. CMD_UPDATE_SUBTOPIC_PAGE_PROPERTY = 'update_subtopic_page_property' class SubtopicPageChange(change_domain.BaseChange): """Domain object for changes made to subtopic_page object. The allowed commands, together with the attributes: - 'create_new' (with topic_id, subtopic_id) - 'update_subtopic_page_property' ( with property_name, new_value, old_value, subtopic_id). """ # The allowed list of subtopic page properties which can be used in # update_subtopic_page_property command. SUBTOPIC_PAGE_PROPERTIES = ( SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_HTML, SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_AUDIO, SUBTOPIC_PAGE_PROPERTY_PAGE_WRITTEN_TRANSLATIONS) ALLOWED_COMMANDS = [{ 'name': CMD_CREATE_NEW, 'required_attribute_names': ['topic_id', 'subtopic_id'], 'optional_attribute_names': [] }, { 'name': CMD_UPDATE_SUBTOPIC_PAGE_PROPERTY, 'required_attribute_names': [ 'property_name', 'new_value', 'old_value', 'subtopic_id'], 'optional_attribute_names': [], 'allowed_values': {'property_name': SUBTOPIC_PAGE_PROPERTIES} }] class SubtopicPageContents(object): """Domain object for the contents on a subtopic page.""" def __init__( self, subtitled_html, recorded_voiceovers, written_translations): """Constructs a SubtopicPageContents domain object. Args: subtitled_html: SubtitledHtml. The html data being displayed on the page. recorded_voiceovers: RecordedVoiceovers. The recorded voiceovers for the subtopic page content and their translations in different languages. written_translations: WrittenTranslations. The text translations of the subtopic page content. """ self.subtitled_html = subtitled_html self.recorded_voiceovers = recorded_voiceovers self.written_translations = written_translations def validate(self): """Validates the SubtopicPageContentsObject, verifying that all fields are of the correct type. """ self.subtitled_html.validate() content_ids = set([self.subtitled_html.content_id]) self.recorded_voiceovers.validate(content_ids) self.written_translations.validate(content_ids) @classmethod def create_default_subtopic_page_contents(cls): """Creates a default subtopic page contents object. Returns: SubtopicPageContents. A default object. """ content_id = feconf.DEFAULT_SUBTOPIC_PAGE_CONTENT_ID return cls( state_domain.SubtitledHtml.create_default_subtitled_html( content_id), state_domain.RecordedVoiceovers.from_dict( {'voiceovers_mapping': {content_id: {}}}), state_domain.WrittenTranslations.from_dict( {'translations_mapping': {content_id: {}}})) def to_dict(self): """Returns a dict representing this SubtopicPageContents domain object. Returns: A dict, mapping all fields of SubtopicPageContents instance. """ return { 'subtitled_html': self.subtitled_html.to_dict(), 'recorded_voiceovers': self.recorded_voiceovers.to_dict(), 'written_translations': self.written_translations.to_dict() } @classmethod def from_dict(cls, page_contents_dict): """Creates a subtopic page contents object from a dictionary. Args: page_contents_dict: dict. The dict representation of SubtopicPageContents object. Returns: SubtopicPageContents. The corresponding object. """ return cls( state_domain.SubtitledHtml.from_dict( page_contents_dict['subtitled_html']), state_domain.RecordedVoiceovers.from_dict(page_contents_dict[ 'recorded_voiceovers']), state_domain.WrittenTranslations.from_dict(page_contents_dict[ 'written_translations'])) class SubtopicPage(object): """Domain object for a Subtopic page.""" def __init__( self, subtopic_page_id, topic_id, page_contents, page_contents_schema_version, language_code, version): """Constructs a SubtopicPage domain object. Args: subtopic_page_id: str. The unique ID of the subtopic page. topic_id: str. The ID of the topic that this subtopic is a part of. page_contents: SubtopicPageContents. The html and audio translations to be surfaced to the learner. page_contents_schema_version: int. The schema version for the page contents object. language_code: str. The ISO 639-1 code for the language this subtopic page is written in. version: int. The current version of the subtopic. """ self.id = subtopic_page_id self.topic_id = topic_id self.page_contents = page_contents self.page_contents_schema_version = page_contents_schema_version self.language_code = language_code self.version = version def to_dict(self): """Returns a dict representing this SubtopicPage domain object. Returns: A dict, mapping all fields of SubtopicPage instance. """ return { 'id': self.id, 'topic_id': self.topic_id, 'page_contents': self.page_contents.to_dict(), 'page_contents_schema_version': self.page_contents_schema_version, 'language_code': self.language_code, 'version': self.version } @classmethod def get_subtopic_page_id(cls, topic_id, subtopic_id): """Returns the subtopic page id from the topic_id and subtopic_id. Args: topic_id: str. The id of the topic that the subtopic is a part of. subtopic_id: int. The id of the subtopic. Returns: str. The subtopic_page_id calculated from the given values. """ return '%s-%s' % (topic_id, subtopic_id) @classmethod def create_default_subtopic_page(cls, subtopic_id, topic_id): """Creates a SubtopicPage object with default values. Args: subtopic_id: str. ID of the subtopic. topic_id: str. The Id of the topic to which this page is linked with. Returns: SubtopicPage. A subtopic object with given id, topic_id and default page contents field. """ subtopic_page_id = cls.get_subtopic_page_id(topic_id, subtopic_id) return cls( subtopic_page_id, topic_id, SubtopicPageContents.create_default_subtopic_page_contents(), feconf.CURRENT_SUBTOPIC_PAGE_CONTENTS_SCHEMA_VERSION, constants.DEFAULT_LANGUAGE_CODE, 0) @classmethod def update_page_contents_from_model( cls, versioned_page_contents, current_version): """Converts the page_contents blob contained in the given versioned_skill_contents dict from current_version to current_version + 1. Note that the versioned_skill_contents being passed in is modified in-place. Args: versioned_page_contents: dict. A dict with two keys: - schema_version: str. The schema version for the page_contents dict. - page_contents: dict. The dict comprising the subtopic page contents. current_version: int. The current schema version of page_contents. """ versioned_page_contents['schema_version'] = current_version + 1 conversion_fn = getattr( cls, '_convert_page_contents_v%s_dict_to_v%s_dict' % ( current_version, current_version + 1)) versioned_page_contents['page_contents'] = conversion_fn( versioned_page_contents['page_contents']) def get_subtopic_id_from_subtopic_page_id(self): """Returns the id from the subtopic page id of the object. Returns: int. The subtopic_id of the object. """ return int(self.id[len(self.topic_id) + 1:]) def update_page_contents_html(self, new_page_contents_html): """The new value for the html data field. Args: new_page_contents_html: SubtitledHtml. The new html for the subtopic page. """ self.page_contents.subtitled_html = new_page_contents_html def update_page_contents_audio(self, new_page_contents_audio): """The new value for the recorded_voiceovers data field. Args: new_page_contents_audio: RecordedVoiceovers. The new audio for the subtopic page. """ self.page_contents.recorded_voiceovers = new_page_contents_audio def update_page_contents_written_translations( self, new_page_written_translations_dict): """The new value for the written_translations data field. Args: new_page_written_translations_dict: dict. The new translation for the subtopic page. """ self.page_contents.written_translations = ( state_domain.WrittenTranslations.from_dict( new_page_written_translations_dict)) def validate(self): """Validates various properties of the SubtopicPage object. Raises: ValidationError: One or more attributes of the subtopic page are invalid. """ if not isinstance(self.topic_id, basestring): raise utils.ValidationError( 'Expected topic_id to be a string, received %s' % self.topic_id) if not isinstance(self.version, int): raise utils.ValidationError( 'Expected version number to be an int, received %s' % self.version) self.page_contents.validate() if not isinstance(self.page_contents_schema_version, int): raise utils.ValidationError( 'Expected page contents schema version to be an integer, ' 'received %s' % self.page_contents_schema_version) if ( self.page_contents_schema_version != feconf.CURRENT_SUBTOPIC_PAGE_CONTENTS_SCHEMA_VERSION): raise utils.ValidationError( 'Expected page contents schema version to be %s, received %s' % ( feconf.CURRENT_SUBTOPIC_PAGE_CONTENTS_SCHEMA_VERSION, self.page_contents_schema_version) ) if not isinstance(self.language_code, basestring): raise utils.ValidationError( 'Expected language code to be a string, received %s' % self.language_code) if not any([self.language_code == lc['code'] for lc in constants.ALL_LANGUAGE_CODES]): raise utils.ValidationError( 'Invalid language code: %s' % self.language_code)
#-*- coding: utf-8 -*- from django.db.models import get_model from shopping_cart.config import PRODUCT_MODEL class CartItem(object): """ Representa um item do Cart """ def __init__(self, item_pk, quantity): """ Guarda o id do item e a quantity """ self.item_pk = item_pk self.quantity = quantity def get_object(self): try: return PRODUCT_MODEL.objects.get(pk=self.item_pk) except PRODUCT_MODEL.DoesNotExist: return None class Cart(object): """ Representa o carrinho de um usuário """ def __init__(self): self.items = [] def get_item(self, item_pk): for item in self.items: if item.item_pk == item_pk: return item def get_or_create_item(self, item_pk): for item in self.items: if item.item_pk == item_pk: return item new_item = CartItem(item_pk, 0) self.items.append(new_item) return new_item def change_quantity(self, item_pk, quantity): self.get_item(item_pk).quantity = quantity def increase_quantity(self, item_pk): self.get_or_create_item(item_pk).quantity += 1 def decrease_quantity(self, item_pk): if self.get_item(item_pk).quantity > 0: self.get_item(item_pk).quantity -= 1 if self.get_item(item_pk).quantity == 0: self.delete_item(item_pk) def delete_item(self, item_pk): self.items.remove(self.get_item(item_pk)) def erase_cart(self, request): self.items = [] request.session.pop(u'cart') def get_quantity_total_items(self): return sum([item.quantity for item in self.items]) def get_cart(request): return request.session.get(u'cart', Cart())
/* * Copyright (c) 2017 Spotify AB. * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #pragma once #include <functional> #include <string> namespace nativeformat { namespace decoder { typedef std::function<void(long frame_index, long frame_count, float *samples)> DECODE_CALLBACK; typedef std::function<void(bool success)> LOAD_DECODER_CALLBACK; typedef std::function<void(const std::string &domain, int error_code)> ERROR_DECODER_CALLBACK; extern const long UNKNOWN_FRAMES; extern const std::string DECODER_AUDIOCONVERTER_NAME; extern const std::string version(); class Decoder { public: virtual double sampleRate() = 0; virtual int channels() = 0; virtual long currentFrameIndex() = 0; virtual void seek(long frame_index) = 0; virtual long frames() = 0; virtual void decode(long frames, const DECODE_CALLBACK &decode_callback) = 0; virtual bool eof() = 0; virtual const std::string &path() = 0; virtual const std::string &name() = 0; virtual void flush() = 0; virtual void load(const ERROR_DECODER_CALLBACK &decoder_error_callback, const LOAD_DECODER_CALLBACK &decoder_load_callback) = 0; }; } // namespace decoder } // namespace nativeformat
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Tests binary search of BaseConnection and exception thrown when available is called while disconnected """ import pytest from com_server import Connection, ConnectException def test_bin_srch() -> None: b = Connection(port="test", baud=123) b._rcv_queue = [ (1636911273.8617003, b""), (1636911274.8653774, b""), ] assert b._binary_search_rcv(1636911273.8617003) == 0 def test_available_exception() -> None: """ Tests that calling available while not connected will raise exception """ b = Connection(port="test", baud=123) with pytest.raises(ConnectException): b.available
#!/usr/bin/env python2 # coding: utf-8 import os import time import unittest from pykit import daemonize from pykit import proc from pykit import ututil dd = ututil.dd this_base = os.path.dirname(__file__) def subproc(script, env=None): if env is None: env = dict(PYTHONPATH=this_base + '/../..',) return proc.shell_script(script, env=env) def read_file(fn): try: with open(fn, 'r') as f: cont = f.read() return cont except EnvironmentError: return None class TestDaemonize(unittest.TestCase): foo_fn = '/tmp/foo' bar_fn = '/tmp/bar' pidfn = '/tmp/test_daemonize.pid' def _clean(self): # kill foo.py and kill bar.py # bar.py might be waiting for foo.py to release lock-file. try: subproc('python2 {b}/foo.py stop'.format(b=this_base)) except Exception as e: dd(repr(e)) time.sleep(0.1) try: subproc('python2 {b}/bar.py stop'.format(b=this_base)) except Exception as e: dd(repr(e)) # remove written file try: os.unlink(self.foo_fn) except EnvironmentError as e: pass try: os.unlink(self.bar_fn) except EnvironmentError as e: pass def setUp(self): self._clean() def tearDown(self): self._clean() def test_start(self): subproc('python2 {b}/foo.py start'.format(b=this_base)) time.sleep(0.2) self.assertEqual('foo-before', read_file(self.foo_fn)) time.sleep(1) self.assertEqual('foo-after', read_file(self.foo_fn)) def test_stop(self): subproc('python2 {b}/foo.py start'.format(b=this_base)) time.sleep(0.2) self.assertEqual('foo-before', read_file(self.foo_fn), 'foo started') subproc('python2 {b}/foo.py stop'.format(b=this_base)) time.sleep(0.2) self.assertEqual('foo-before', read_file(self.foo_fn), 'process has been kill thus no content is updated') def test_restart(self): subproc('python2 {b}/foo.py start'.format(b=this_base)) time.sleep(0.2) self.assertEqual('foo-before', read_file(self.foo_fn)) os.unlink(self.foo_fn) self.assertEqual(None, read_file(self.foo_fn)) subproc('python2 {b}/foo.py restart'.format(b=this_base)) time.sleep(0.2) self.assertEqual('foo-before', read_file(self.foo_fn), 'restarted and rewritten to the file') def test_exclusive_pid(self): subproc('python2 {b}/foo.py start'.format(b=this_base)) time.sleep(0.1) subproc('python2 {b}/bar.py start'.format(b=this_base)) time.sleep(0.1) self.assertEqual(None, read_file(self.bar_fn), 'bar.py not started or run') def test_default_pid_file(self): d = daemonize.Daemon() self.assertEqual('/var/run/__main__', d.pidfile) def test_close_fds(self): env = dict(PYTHONPATH='{path_daemonize}:{path_pykit}'.format( path_daemonize=this_base + '/../..', path_pykit=this_base + '/../../..')) code, out, err = subproc('python2 {b}/close_fds.py close'.format(b=this_base), env=env) dd('close_fds.py close result:') dd(code) dd('out:') for l in out.split('\n'): dd(' ', l) dd('err:') for l in err.split('\n'): dd(' ', l) time.sleep(1) fds = read_file(self.foo_fn) dd('fds:', fds) self.assertNotIn(self.bar_fn, fds) self._clean() code, out, err = subproc('python2 {b}/close_fds.py open'.format(b=this_base), env=env) dd('close_fds.py open result:') dd(code) dd('out:') for l in out.split('\n'): dd(' ', l) dd('err:') for l in err.split('\n'): dd(' ', l) time.sleep(1) fds = read_file(self.foo_fn) dd('fds:', fds) self.assertIn(self.bar_fn, fds)
import React, { useEffect, useState, Component } from 'react' import CIcon from '@coreui/icons-react' import { cilSearch } from '@coreui/icons' import ReactPaginate from 'react-paginate' import branch from './../../assets/images/avatars/branch.png' import { Link } from 'react-router-dom' import axios from 'axios' import { CAvatar, CCol, CRow, CFormInput, CInputGroup, CInputGroupText, CTable, CTableBody, CTableHead, CTableHeaderCell, CTableRow, CTableDataCell, CCard, CCardBody, CPagination, CButton, } from '@coreui/react' const Location = () => { const [LocationList, setLocationList] = useState([]) const [items, setItems] = useState([]) const [pageCount, setpageCount] = useState(0) let limit = 15 const deleteLocation = (id) => { alert('Are you sure to delete this record!') axios.delete(`http://localhost:5000/deleteLocation/${id}`).then((response) => { setLocationList( LocationList.filter((items) => { return items.Location_ID != id }), ) }) } const [search, setSearch] = useState('') const getProductData = async () => { try { const res = await fetch(`http://localhost:5000/api/Location`) const data = await res.json() console.log(data.data) const total = res.headers.get('x-total-count') setpageCount(Math.ceil(total / limit)) setItems(data) } catch (e) { console.log(e) } } useEffect(() => { getProductData() }, [limit]) const fetchComments = async (currentPage) => { const res = await fetch( `https://jsonplaceholder.typicode.com/comments?_page=${currentPage}&_limit=${limit}`, ) const data = await res.json() return data } const handlePageClick = async (data) => { console.log(data.selected) let currentPage = data.selected + 1 const commentsFormServer = await fetchComments(currentPage) setItems(commentsFormServer) } return ( <div> <CCard> <CCardBody> <CRow> <CCol xs={5}> <h5>Location Registry</h5> </CCol> <CCol xs={5} sm={4} lg={5}> <CInputGroup className="mb-1 my-0 mx-0" lg={6} xs={6}> <CInputGroupText> <CIcon icon={cilSearch} /> </CInputGroupText> <CFormInput placeholder="Search" onChange={(e) => { setSearch(e.target.value) }} /> </CInputGroup> </CCol> <CCol xs={2}> <Link to="/Add_Location"> {' '} <CButton>{<CAvatar src={branch} size="md" />}Add new Location </CButton> </Link> </CCol> </CRow> </CCardBody> </CCard> <br /> <CCard> <CCardBody> <div className="table-container"> <> <CCol xs={12}> <CTable> <CTableHead> <CTableRow> <CTableHeaderCell scope="col">Location Name</CTableHeaderCell> <CTableHeaderCell scope="col">Address</CTableHeaderCell> <CTableHeaderCell scope="col"></CTableHeaderCell> </CTableRow> </CTableHead> <CTableBody> {items .filter((item) => { if (search == '') { return item } else if (item.name.toLowerCase().includes(search.toLowerCase())) { return item } }) .map((item) => { return ( <CTableRow key={item.id}> <CTableDataCell scope="row">{item.Location_Name}</CTableDataCell> <CTableDataCell scope="row">{item.Location_Name}</CTableDataCell> <CTableDataCell> <CButton>View</CButton> <CButton className="m-1" color="success"> Edit </CButton> <CButton onClick={() => { deleteLocation(item.Location_ID) }} color="danger" > Delete </CButton> </CTableDataCell> </CTableRow> ) })} </CTableBody> <CPagination aria-label="Page navigation example"> <ReactPaginate previousLabel={'previous'} nextLabel={'next'} breakLabel={'...'} pageCount={pageCount} marginPagesDisplayed={2} pageRangeDisplayed={3} onPageChange={handlePageClick} containerClassName={'pagination justify-content-center'} pageClassName={'page-item'} pageLinkClassName={'page-link'} previousClassName={'page-item'} previousLinkClassName={'page-link'} nextClassName={'page-item'} nextLinkClassName={'page-link'} breakClassName={'page-item'} breakLinkClassName={'page-link'} activeClassName={'active'} /> </CPagination> </CTable> </CCol> </> </div> </CCardBody> </CCard> <br /> </div> ) } export default Location
from django.utils.http import urlencode #django <2 compat try: from django.urls import reverse except ImportError: from django.core.urlresolvers import reverse from .exceptions import InvalidActionError from .exceptions import InvalidControllerError class ApplicationHelper(object): """ApplicationHelpers can contain functions useful in a controller. Each controller is assigned a helper. Either the global ApplicationHelper, or a class with the same name as the controller such as foo_helper.py, and being a subclass of ApplicationHelper.""" def __init__(self, controller): self.controller = controller def url_for(self, controller = None, action = None, named_url = None, url_params = None, url_args=None, url_kwargs=None): """ :param controller: a controller name in snake case, without prefix, if you are not redirecting inside the current controller :param action: an action name - if a controller is not specified, the action will be in the current controller :param named_url: A named URL in django :param url_params: The query string params :param url_args: the list arguments for this URL, this will be used to build the URL within django's urlresolvers :param url_kwargs: the dict arguments for this URL, this will be used to build the URL within django's urlresolvers :return: """ if not named_url: from .controller import get_actions, get_controller_name if controller: controllerClassOrInstance = self.controller._site.controllers.get(controller, None) if controllerClassOrInstance is None: raise InvalidControllerError(controller) controller_name = controller else: controllerClassOrInstance = self.controller controller_name = self.controller._controller_name if action: try: action = action.strip('"\'') action_func = get_actions(controllerClassOrInstance,with_prefix=False)[action] except KeyError: raise InvalidActionError(action) named_url = getattr(action_func,'named_url',None) if named_url is None: controller_name = get_controller_name(controllerClassOrInstance, with_prefix=False) named_url = '%s_%s' % (controller_name, action) else: named_url = "%s_index" % controller_name url = reverse(named_url, args=url_args, kwargs=url_kwargs) if url_params is not None: return '%s?%s' % (url, urlencode(url_params)) return url
#!/usr/bin/env python import argparse import sys import socket import random import struct from sendutils import * from headers import * def test_message(dest): init_stk = [ STACK(dest) ] prog = [ LOAD(0), # load destination VARLOADREG(), # load egress port value corresponding to destination SETEGRESS() ] return prog, init_stk def main(): if len(sys.argv) < 3: print "sends a message that is routed based on register values on the switch" print "the result field will be 999" print "arguments: <destination host> <message>" print "example send to host h1: ./ex_routing_table_message.py 1 hello" return dest = int(sys.argv[1]) message = sys.argv[2] addr = socket.gethostbyname("10.0.9.99") # dummy address instrs, stk = test_message(dest) send_pkt(addr, instrs, stk, message) if __name__ == '__main__': main()
from .base import * # noqa from .base import env # GENERAL # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#debug DEBUG = True # https://docs.djangoproject.com/en/dev/ref/settings/#secret-key SECRET_KEY = env( "DJANGO_SECRET_KEY", default="NzEUafAjLMzrvA6C6i16Bvpf6NjXoO8vBF5tYUwRtwNo8IOMGdcZoFFjTl9uO0pz", ) # https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts ALLOWED_HOSTS = ["localhost", "0.0.0.0", "127.0.0.1"] # CACHES # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#caches CACHES = { "default": { "BACKEND": "django.core.cache.backends.locmem.LocMemCache", "LOCATION": "", } } # TEMPLATES # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#templates TEMPLATES[0]["OPTIONS"]["debug"] = DEBUG # noqa F405 # EMAIL # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#email-backend EMAIL_BACKEND = env( "DJANGO_EMAIL_BACKEND", default="django.core.mail.backends.console.EmailBackend" ) # https://docs.djangoproject.com/en/dev/ref/settings/#email-host EMAIL_HOST = "localhost" # https://docs.djangoproject.com/en/dev/ref/settings/#email-port EMAIL_PORT = 1025 # django-debug-toolbar # ------------------------------------------------------------------------------ # https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#prerequisites INSTALLED_APPS += ["debug_toolbar"] # noqa F405 # https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#middleware MIDDLEWARE += ["debug_toolbar.middleware.DebugToolbarMiddleware"] # noqa F405 # https://django-debug-toolbar.readthedocs.io/en/latest/configuration.html#debug-toolbar-config DEBUG_TOOLBAR_CONFIG = { "DISABLE_PANELS": ["debug_toolbar.panels.redirects.RedirectsPanel"], "SHOW_TEMPLATE_CONTEXT": True, } # https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#internal-ips INTERNAL_IPS = ["127.0.0.1", "10.0.2.2"] if env("USE_DOCKER") == "yes": import socket hostname, _, ips = socket.gethostbyname_ex(socket.gethostname()) INTERNAL_IPS += [ip[:-1] + "1" for ip in ips] # django-extensions # ------------------------------------------------------------------------------ # https://django-extensions.readthedocs.io/en/latest/installation_instructions.html#configuration INSTALLED_APPS += ["django_extensions"] # noqa F405 # Celery # ------------------------------------------------------------------------------ # http://docs.celeryproject.org/en/latest/userguide/configuration.html#task-eager-propagates CELERY_TASK_EAGER_PROPAGATES = True # Your stuff... # ------------------------------------------------------------------------------ # MEDIA # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#media-root MEDIA_ROOT = str(APPS_DIR("media")) # https://docs.djangoproject.com/en/dev/ref/settings/#media-url MEDIA_URL = "/lexmapr/media/"
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """A word-counting workflow that uses Google Cloud Datastore. This example shows how to use ``datastoreio`` to read from and write to Google Cloud Datastore. Note that running this example may incur charge for Cloud Datastore operations. See https://developers.google.com/datastore/ for more details on Google Cloud Datastore. See https://beam.apache.org/get-started/quickstart on how to run a Beam pipeline. Read-only Mode: In this mode, this example reads Cloud Datastore entities using the ``datastoreio.ReadFromDatastore`` transform, extracts the words, counts them and write the output to a set of files. The following options must be provided to run this pipeline in read-only mode: `` --project YOUR_PROJECT_ID --kind YOUR_DATASTORE_KIND --output [YOUR_LOCAL_FILE *or* gs://YOUR_OUTPUT_PATH] --read_only `` Read-write Mode: In this mode, this example reads words from an input file, converts them to Cloud Datastore ``Entity`` objects and writes them to Cloud Datastore using the ``datastoreio.Write`` transform. The second pipeline will then read these Cloud Datastore entities using the ``datastoreio.ReadFromDatastore`` transform, extract the words, count them and write the output to a set of files. The following options must be provided to run this pipeline in read-write mode: `` --project YOUR_PROJECT_ID --kind YOUR_DATASTORE_KIND --output [YOUR_LOCAL_FILE *or* gs://YOUR_OUTPUT_PATH] `` Note: We are using the Cloud Datastore protobuf objects directly because that is the interface that the ``datastoreio`` exposes. See the following links on more information about these protobuf messages. https://cloud.google.com/datastore/docs/reference/rpc/google.datastore.v1 and https://github.com/googleapis/googleapis/tree/master/google/datastore/v1 """ from __future__ import absolute_import import argparse import logging import re import uuid from google.cloud.proto.datastore.v1 import entity_pb2 from google.cloud.proto.datastore.v1 import query_pb2 from googledatastore import helper as datastore_helper, PropertyFilter import apache_beam as beam from apache_beam.io import ReadFromText from apache_beam.io.gcp.datastore.v1.datastoreio import ReadFromDatastore from apache_beam.io.gcp.datastore.v1.datastoreio import WriteToDatastore from apache_beam.metrics import Metrics from apache_beam.utils.pipeline_options import GoogleCloudOptions from apache_beam.utils.pipeline_options import PipelineOptions from apache_beam.utils.pipeline_options import SetupOptions empty_line_counter = Metrics.counter('main', 'empty_lines') word_length_counter = Metrics.counter('main', 'word_lengths') word_counter = Metrics.counter('main', 'total_words') class WordExtractingDoFn(beam.DoFn): """Parse each line of input text into words.""" def process(self, element): """Returns an iterator over words in contents of Cloud Datastore entity. The element is a line of text. If the line is blank, note that, too. Args: element: the input element to be processed Returns: The processed element. """ content_value = element.properties.get('content', None) text_line = '' if content_value: text_line = content_value.string_value if not text_line: empty_line_counter.inc() words = re.findall(r'[A-Za-z\']+', text_line) for w in words: word_length_counter.inc(len(w)) word_counter.inc() return words class EntityWrapper(object): """Create a Cloud Datastore entity from the given string.""" def __init__(self, namespace, kind, ancestor): self._namespace = namespace self._kind = kind self._ancestor = ancestor def make_entity(self, content): entity = entity_pb2.Entity() if self._namespace is not None: entity.key.partition_id.namespace_id = self._namespace # All entities created will have the same ancestor datastore_helper.add_key_path(entity.key, self._kind, self._ancestor, self._kind, str(uuid.uuid4())) datastore_helper.add_properties(entity, {"content": unicode(content)}) return entity def write_to_datastore(project, user_options, pipeline_options): """Creates a pipeline that writes entities to Cloud Datastore.""" p = beam.Pipeline(options=pipeline_options) # pylint: disable=expression-not-assigned (p | 'read' >> ReadFromText(user_options.input) | 'create entity' >> beam.Map( EntityWrapper(user_options.namespace, user_options.kind, user_options.ancestor).make_entity) | 'write to datastore' >> WriteToDatastore(project)) # Actually run the pipeline (all operations above are deferred). p.run().wait_until_finish() def make_ancestor_query(kind, namespace, ancestor): """Creates a Cloud Datastore ancestor query. The returned query will fetch all the entities that have the parent key name set to the given `ancestor`. """ ancestor_key = entity_pb2.Key() datastore_helper.add_key_path(ancestor_key, kind, ancestor) if namespace is not None: ancestor_key.partition_id.namespace_id = namespace query = query_pb2.Query() query.kind.add().name = kind datastore_helper.set_property_filter( query.filter, '__key__', PropertyFilter.HAS_ANCESTOR, ancestor_key) return query def read_from_datastore(project, user_options, pipeline_options): """Creates a pipeline that reads entities from Cloud Datastore.""" p = beam.Pipeline(options=pipeline_options) # Create a query to read entities from datastore. query = make_ancestor_query(user_options.kind, user_options.namespace, user_options.ancestor) # Read entities from Cloud Datastore into a PCollection. lines = p | 'read from datastore' >> ReadFromDatastore( project, query, user_options.namespace) # Count the occurrences of each word. counts = (lines | 'split' >> (beam.ParDo(WordExtractingDoFn()) .with_output_types(unicode)) | 'pair_with_one' >> beam.Map(lambda x: (x, 1)) | 'group' >> beam.GroupByKey() | 'count' >> beam.Map(lambda (word, ones): (word, sum(ones)))) # Format the counts into a PCollection of strings. output = counts | 'format' >> beam.Map(lambda (word, c): '%s: %s' % (word, c)) # Write the output using a "Write" transform that has side effects. # pylint: disable=expression-not-assigned output | 'write' >> beam.io.WriteToText(file_path_prefix=user_options.output, num_shards=user_options.num_shards) # Actually run the pipeline (all operations above are deferred). result = p.run() # Wait until completion, main thread would access post-completion job results. result.wait_until_finish() return result def run(argv=None): """Main entry point; defines and runs the wordcount pipeline.""" parser = argparse.ArgumentParser() parser.add_argument('--input', dest='input', default='gs://dataflow-samples/shakespeare/kinglear.txt', help='Input file to process.') parser.add_argument('--kind', dest='kind', required=True, help='Datastore Kind') parser.add_argument('--namespace', dest='namespace', help='Datastore Namespace') parser.add_argument('--ancestor', dest='ancestor', default='root', help='The ancestor key name for all entities.') parser.add_argument('--output', dest='output', required=True, help='Output file to write results to.') parser.add_argument('--read_only', action='store_true', help='Read an existing dataset, do not write first') parser.add_argument('--num_shards', dest='num_shards', type=int, # If the system should choose automatically. default=0, help='Number of output shards') known_args, pipeline_args = parser.parse_known_args(argv) # We use the save_main_session option because one or more DoFn's in this # workflow rely on global context (e.g., a module imported at module level). pipeline_options = PipelineOptions(pipeline_args) pipeline_options.view_as(SetupOptions).save_main_session = True gcloud_options = pipeline_options.view_as(GoogleCloudOptions) # Write to Datastore if `read_only` options is not specified. if not known_args.read_only: write_to_datastore(gcloud_options.project, known_args, pipeline_options) # Read entities from Datastore. result = read_from_datastore(gcloud_options.project, known_args, pipeline_options) result.metrics().query() #TODO(pabloem)(BEAM-1366) Fix these once metrics are 100% queriable. if __name__ == '__main__': logging.getLogger().setLevel(logging.INFO) run()
/*! * Angular Directives For Accessible Applications * * Copyright (C) 2015-2017 Deque Systems Inc., All Rights Reserved * * See the project LICENSE file for usage - https://github.com/dequelabs/ngA11y/blob/master/LICENSE */ !function(){"use strict";function a(a){for(var b=a[0].querySelectorAll(c),d=[],e=0,f=b.length;e<f;e++){var g=b[e],h=g.getAttribute("aria-hidden");if(!h||"true"!==h.toLowerCase()){var i=g.getAttribute("tabindex");i&&"-1"===i||d.push(b[e])}}if(d.length>0)return{first:d[0],last:d[d.length-1]}}var b;try{b=angular.module("ngA11y")}catch(a){b=angular.module("ngA11y",[])}var c="a, input, [tabindex], select, button, textarea, area";b.directive("nga11yModal",["$log",function(b){return{link:function(b,c,d,e){c.attr("tabindex","-1");var f=c[0].querySelector("[nga11y-modal-closer]");c.on("keydown",function(b){if(27===b.which)return void(f&&angular.element(f).triggerHandler("click"));if(9===b.which){var d=a(c);d&&(d.first===d.last?(d.first.focus(),b.preventDefault()):b.target===d.first&&b.shiftKey?(d.last.focus(),b.preventDefault()):b.target!==d.last||b.shiftKey||(d.first.focus(),b.preventDefault()))}})}}}])}();
import numpy as np import pandas as pd from sklearn.linear_model import LassoLarsCV from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsRegressor from sklearn.pipeline import make_pipeline, make_union from tpot.builtins import StackingEstimator, ZeroCount # NOTE: Make sure that the class is labeled 'target' in the data file tpot_data = pd.read_csv('PATH/TO/DATA/FILE', sep='COLUMN_SEPARATOR', dtype=np.float64) features = tpot_data.drop('target', axis=1).values training_features, testing_features, training_target, testing_target = \ train_test_split(features, tpot_data['target'].values, random_state=None) # Average CV score on the training set was:-6726.261493663938 exported_pipeline = make_pipeline( StackingEstimator(estimator=LassoLarsCV(normalize=False)), ZeroCount(), KNeighborsRegressor(n_neighbors=3, p=1, weights="distance") ) exported_pipeline.fit(training_features, training_target) results = exported_pipeline.predict(testing_features)
import json import traceback from typing import IO from typing import Iterable class Meta(type): def __new__(mcs, name, base, attr): mappings = {} for k, v in attr.items(): if isinstance(v, Field): mappings[k] = v for k in mappings: attr.pop(k) attr['__mappings__'] = mappings return type.__new__(mcs, name, base, attr) class Field: def __init__(self, name=None, value=None): self.name = name self.value = value def __repr__(self): return '{}'.format(self.__class__.__name__) class JsonItemMixin: """实现`dump`方法, 把一个`Item`类转化为`json`""" def __init__(self): ... def dump(self): dumping = {} for k, v in self.__mappings__.items(): print('dumping: ', k, v) dumping[k] = v return dumping class FileItemMixin: """实现`save`, `flush` 方法, 保存这个`Item`为一个文件""" def __init__(self): ... def save(self): ... class Item(metaclass=Meta): def __init__(self): self.obj = {} def keys(self) -> Iterable: return self.obj.keys() def items(self) -> (Iterable, Iterable): return self.items() def __len__(self) -> int: return self.obj.__len__() def __setitem__(self, k, v): return self.obj.__setitem__(k, v) def __getitem__(self, k): return self.obj.__getitem__(k) def __delitem__(self, v): return self.obj.__delitem__(v) def __str__(self) -> str: return self.obj.__str__() class JsonItem(Item, JsonItemMixin): def __init__(self): super(JsonItem, self).__init__() class JsonFile(Item): def __init__(self, file: IO, obj: dict = None, indent=4): """Json与文件同步序列化 注意不要使用此构造方法,应当使用工厂方法 :param file: 一个文件流 :param obj: 一个字典 :param indent: tab的长度 """ super().__init__() if not file.writable(): raise IOError('文件不可写<{}>'.format(file.name)) if not file.seekable(): raise IOError('文件不可查<{}>'.format(file.name)) if not file.readable(): raise IOError('文件不可读<{}>'.format(file.name)) self.f = file self.obj = obj self.indent = indent @classmethod def from_newfile(cls, filename, mode='w', encoding='utf8'): f = open(filename, mode=mode, encoding=encoding) return cls(f, {}) @classmethod def from_filename(cls, filename, mode='r+', encoding='utf8'): f = open(filename, mode=mode, encoding=encoding) c = cls(f) c.load(f) return c @classmethod def from_streaming(cls, streaming): c = cls(streaming, {}) return c def load(self, streaming: IO): self.obj = json.load(streaming) def dump(self): self.f.seek(0) json.dump(self.obj, self.f, indent=4, ensure_ascii=False) def close(self): self.dump() self.f.close() # #################################test Mapping########################### class MyItem(Item): def __init__(self): super().__init__() field = Field() class MyJsonItem(JsonItem): field = Field() class Test: def test_json_item(self): mji = MyJsonItem() mji.dump() def test_mapping(self): it = MyItem() mp = it.__mappings__ print('mp={}'.format(mp)) def test_json2(self): jf2 = JsonFile.from_filename('resource/testjson2.json') jf2['title'] = 'changeadsfsdafdsd testjson2' jf2.close() # noinspection PyBroadException def start_test(self): method_names = [method for method in self.__dir__() if method.startswith('test')] for method_name in method_names: try: print('======test: [{}]======'.format(method_name)) getattr(self, method_name)() except: print('======Exception=======') traceback.print_exc() if __name__ == '__main__': test = Test() test.start_test()
from typing import Optional from bxcommon import constants from bxcommon.messages.bloxroute.bloxroute_message_control_flags import BloxrouteMessageControlFlags from bxcommon.messages.bloxroute.bloxroute_message_type import BloxrouteMessageType from bxcommon.messages.validation.abstract_message_validator import AbstractMessageValidator from bxcommon.messages.validation.control_flag_validation_error import ControlFlagValidationError from bxcommon.messages.validation.message_size_validation_settings import \ MessageSizeValidationSettings from bxcommon.messages.validation.message_validation_error import MessageValidationError from bxcommon.utils import convert from bxcommon.utils.buffers.input_buffer import InputBuffer class BloxrouteMessageValidator(AbstractMessageValidator): FIRST_VALIDATING_VERSION = 4 def __init__( self, size_validation_settings: Optional[MessageSizeValidationSettings], connection_protocol_version: int ): self._size_validation_settings: Optional[MessageSizeValidationSettings] = size_validation_settings self._connection_protocol_version: int = connection_protocol_version def validate( self, is_full_msg: bool, msg_type: Optional[bytes], header_len: int, payload_len: Optional[int], input_buffer: InputBuffer ) -> None: """ Validates message payload length. Throws MessageValidationError is message is not valid :param is_full_msg: indicates if the full message is available on input buffer :param msg_type: message type :param header_len: message header length :param payload_len: message payload length :param input_buffer: input buffer """ if self._connection_protocol_version >= self.FIRST_VALIDATING_VERSION: self._validate_starting_sequence(input_buffer) if self._size_validation_settings is not None: self._validate_payload_length(msg_type, payload_len) if self._connection_protocol_version >= self.FIRST_VALIDATING_VERSION: self._validate_control_flags(is_full_msg, header_len, payload_len, input_buffer) def _validate_starting_sequence(self, input_buffer: InputBuffer) -> None: if input_buffer.length < constants.STARTING_SEQUENCE_BYTES_LEN: return starting_sequence_bytes = input_buffer[:constants.STARTING_SEQUENCE_BYTES_LEN] if starting_sequence_bytes != constants.STARTING_SEQUENCE_BYTES: raise MessageValidationError( f"Expected message to begin with starting sequence " f"but received first bytes " f"'{convert.bytes_to_hex(starting_sequence_bytes)}'" ) def _validate_payload_length(self, msg_type: Optional[bytes], payload_len: Optional[int]) -> None: if msg_type is None or payload_len is None: return if msg_type == BloxrouteMessageType.TRANSACTION: size_validation_settings = self._size_validation_settings assert size_validation_settings is not None if payload_len > size_validation_settings.max_tx_size_bytes: raise MessageValidationError( f"Transaction message size exceeds expected max size. " f"Expected: {size_validation_settings.max_tx_size_bytes}. " f"Actual: {payload_len}." ) elif msg_type in { BloxrouteMessageType.BROADCAST, BloxrouteMessageType.TRANSACTIONS, BloxrouteMessageType.TX_SERVICE_SYNC_BLOCKS_SHORT_IDS, BloxrouteMessageType.TX_SERVICE_SYNC_TXS, BloxrouteMessageType.TRANSACTION_CLEANUP, BloxrouteMessageType.BLOCK_CONFIRMATION, }: size_validation_settings = self._size_validation_settings assert size_validation_settings is not None if payload_len > size_validation_settings.max_block_size_bytes: raise MessageValidationError( f"{msg_type} message size exceeds expected max size. " f"Expected: {size_validation_settings.max_block_size_bytes}. " f"Actual: {payload_len}." ) elif payload_len > constants.DEFAULT_MAX_PAYLOAD_LEN_BYTES: raise MessageValidationError( f"Message by type '{msg_type}' exceeds expected payload len. " f"Expected: {constants.DEFAULT_MAX_PAYLOAD_LEN_BYTES}. " f"Actual: {payload_len}." ) def _validate_control_flags( self, is_full: bool, header_len: int, payload_len: Optional[int], input_buffer: InputBuffer ) -> None: if not is_full: return assert payload_len is not None if input_buffer.length < header_len + payload_len: raise MessageValidationError( f"Not enough bytes in the input buffer to get control flags. " f"Header length: {header_len}. " f"Payload length: {payload_len}. " f"Input buffer length: {input_buffer.length}" ) control_flag_byte = input_buffer[header_len + payload_len - 1:header_len + payload_len] if BloxrouteMessageControlFlags.VALID not in BloxrouteMessageControlFlags(control_flag_byte[0]): raise ControlFlagValidationError( f"Control flags byte does not have VALID flag set. Value: {control_flag_byte}.", control_flag_byte[0] )
(function( ab, eventTarget ){ "use strict"; ab.threeBase = function(config){ config = config || {}; var aspectRatio = 2.58, scene = (function(){ var scn = new THREE.Scene(); return function(){ return scn; } }()), renderer = (function(){ var rnd = new THREE.WebGLRenderer(); return function(){ return rnd; } }()), camera = (function(){ var cam = new THREE.PerspectiveCamera( 60, aspectRatio, 0.1, 1000 ); return function(){ return cam; } }()), init = function(){ document.body.appendChild( renderer().domElement ); window.addEventListener('resize', onResize); onResize(); }, onResize = function(){ var width = window.innerWidth, height = window.innerWidth / aspectRatio; renderer().setSize( width, height ); camera().aspect = width / height; camera().updateProjectionMatrix(); }; init(); return { scene: scene, camera: camera, renderer: renderer }; } }(window.ab = window.ab || {}, ab.eventTarget));
import streamlit as st import pandas as pd import base64 class DataLoader: def __init__(self): self.is_without_labels = False self.separator = ',' def check_labels(self): if st.checkbox('The file has no labels for columns in the first row'): self.is_without_labels = True def check_separator(self): sep_dict = {'comma': ',', 'semicolon': ';', 'space': ' ','tab':'\t'} sep = st.selectbox('Select the separator used in the file', list(sep_dict.keys())) if sep: self.separator = sep_dict[sep] def load_file(self): return st.file_uploader('Upload csv, tsv or txt file:', type=['csv','tsv','txt']) @st.cache def load_data(self, arquivo): if self.is_without_labels: df = pd.read_csv(arquivo, sep=self.separator, header=None) df = df.select_dtypes(include=['number']) df.dropna(inplace=True) df.columns = ['x' + str(i) for i in range(1, df.shape[1] + 1)] return df else: df = pd.read_csv(arquivo, sep=self.separator) df = df.select_dtypes(include=['number']) df.dropna(inplace=True) lowercase = lambda x: str(x).lower() df.rename(lowercase, axis='columns', inplace=True) return df
#! /usr/bin/env python3 # Method that handles piping to allow communication between processes import os, sys, time, re, pipe from redirection import outRedir, inRedir def piping(args): # '|' for split command #args = args.split('|') ''''' left and right arguments. lArg retrieves data of the pipe's left side rArg retrieves data of the pipe's right side ''''' lArg = args[0:args.index("|")] rArg = args[len(lArg)+1:] pipeRead, pipeWrite = os.pipe() rc = os.fork() # Forking fails if we get a zero returned if rc < 0: os.write(2, ("Fork failed, returning %d\n" % rc).encode()) exit() elif rc == 0: # redirects child stdout (fd0) os.close(1) # duplicates the child's fds and assigns them to pipeWrite os.dup(pipeWrite) os.set_inheritable(1, True) for fdno in (pipeRead, pipeWrite): # closes all fds os.close(fdno) # command will get the left argument command(lArg) else: # redirects child stdin (fd0) os.close(0) os.dup(pipeRead) os.set_inheritable(0, True) for fdno in (pipeWite, pipeRead): # closes all fds os.close(fdno) if "|" in rArg: pipe(rArg) # command will get the right side command(rArg) if '<' in lArg: inRedir("in") if '>' in lArg: inRedir("out") # The right argument will be executed else: if '<' in rArg: outRedir("in") if '>' in rArg: outRedir("out") ''''' From p5-pipe-fork.py within the demos directory pid = os.getpid() # get and remember pid pr,pw = os.pipe() for f in (pr, pw): os.set_inheritable(f, True) print("pipe fds: pr=%d, pw=%d" % (pr, pw)) import fileinput print("About to fork (pid=%d)" % pid) rc = os.fork() if rc < 0: print("fork failed, returning %d\n" % rc, file=sys.stderr) sys.exit(1) elif rc == 0: # child - will write to pipe print("Child: My pid==%d. Parent's pid=%d" % (os.getpid(), pid), file=sys.stderr) args = ["wc", "p3-exec.py"] os.close(1) # redirect child's stdout os.dup(pw) for fd in (pr, pw): os.close(fd) print("hello from child") else: # parent (forked ok) print("Parent: My pid==%d. Child's pid=%d" % (os.getpid(), rc), file=sys.stderr) os.close(0) os.dup(pr) for fd in (pw, pr): os.close(fd) for line in fileinput.input(): print("From child: <%s>" % line) '''''
#!/bin/python3 import math import os import random import re import sys if __name__ == '__main__': s = input() the_letters = {} total = len(s) for character in s: if character in the_letters: continue else: the_letters[character] = s.count(character) frequencies = sorted(the_letters.items(), key = lambda kv:(total - kv[1], kv[0])) count = 0 for item in frequencies: count += 1 print(item[0], item[1]) if count >= 3: break
import React from "react"; // import { Link } from 'gatsby' import icon1 from "../img/img/icon1.svg"; import icon2 from "../img/img/icon2.svg"; // import instagram from '../img/social/instagram.svg' // import twitter from '../img/social/twitter.svg' // import vimeo from '../img/social/vimeo.svg' import contactus from "../img/img/contactus.svg"; const Footer = class extends React.Component { render() { return ( // <footer className="footer has-background-black has-text-white-ter"> // <div className="content has-text-centered"> // <img // src={logo} // alt="Kaldi" // style={{ width: '14em', height: '10em' }} // /> // </div> // <div className="content has-text-centered has-background-black has-text-white-ter"> // <div className="container has-background-black has-text-white-ter"> // <div className="columns"> // <div className="column is-4"> // <section className="menu"> // <ul className="menu-list"> // <li> // <Link to="/" className="navbar-item"> // Home // </Link> // </li> // <li> // <Link className="navbar-item" to="/about"> // About // </Link> // </li> // <li> // <Link className="navbar-item" to="/products"> // Products // </Link> // </li> // <li> // <Link className="navbar-item" to="/contact/examples"> // Form Examples // </Link> // </li> // <li> // <a // className="navbar-item" // href="/admin/" // target="_blank" // rel="noopener noreferrer" // > // Admin // </a> // </li> // </ul> // </section> // </div> // <div className="column is-4"> // <section> // <ul className="menu-list"> // <li> // <Link className="navbar-item" to="/blog"> // Latest Stories // </Link> // </li> // <li> // <Link className="navbar-item" to="/contact"> // Contact // </Link> // </li> // </ul> // </section> // </div> // <div className="column is-4 social"> // <a title="facebook" href="https://facebook.com"> // <img // src={facebook} // alt="Facebook" // style={{ width: '1em', height: '1em' }} // /> // </a> // <a title="twitter" href="https://twitter.com"> // <img // className="fas fa-lg" // src={twitter} // alt="Twitter" // style={{ width: '1em', height: '1em' }} // /> // </a> // <a title="instagram" href="https://instagram.com"> // <img // src={instagram} // alt="Instagram" // style={{ width: '1em', height: '1em' }} // /> // </a> // <a title="vimeo" href="https://vimeo.com"> // <img // src={vimeo} // alt="Vimeo" // style={{ width: '1em', height: '1em' }} // /> // </a> // </div> // </div> // </div> // </div> // </footer> <footer id="contact-us"> <div className="container"> <div className="footer-content"> <h1 className="header-txt scroll-reveal">Contact Us</h1> <div className="divider scroll-reveal"> <img src={contactus} alt="contactus" /> </div> <div className="contact-info scroll-reveal"> <div className="info-address"> <h3>Location</h3> <p>Bagwatibhal,Nepal</p> </div> <div className="reservations"> <h3>Further Info</h3> <p> info@kriticraft.com <br /> 9803871287 </p> </div> </div> <div className="contact-form scroll-reveal" data-origin="bottom" data-distance="20%" > <h3>Contact us</h3> <form data-netlify="true"> <input type="text" name="name" placeholder="Name" /> <input type="email" name="email" placeholder="Email" /> <textarea placeholder="Message" /> <button className="send-form" type="button"> Send </button> </form> </div> <div className="social-icons scroll-reveal" data-duration="1500"> <div className="fb-i"> <img src={icon1} alt="icon" /> </div> <div className="insta-i"> <img src={icon2} alt="icon" /> </div> </div> </div> <p className="copy-info">Copyright 2017 © by Anna Dadej</p> </div> </footer> ); } }; export default Footer;
import firebase from 'firebase' require('dotenv').config() let config = { apiKey: process.env.apiKey, authDomain: process.env.authDomain, databaseURL: process.env.databaseURL, projectId: process.env.projectId, storageBucket: process.env.storageBucket, messagingSenderId: process.env.messagingSenderId }; let fire = firebase.initializeApp(config); export default fire;