content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
from __future__ import absolute_import
from .huffman import HeapNode, HuffmanCoding
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
764,
71,
1648,
805,
1330,
679,
499,
19667,
11,
14721,
805,
34,
7656,
198
] | 3.5 | 24 |
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import pandas as pd
import plotly.express as px
import re
from functions import filterByTokens
# load article csv into panda
articleData = pd.read_csv('fataburen_articles_diva_processed.csv')
articleData['Pages'] = articleData['EndPage']-articleData['StartPage']+1 # add pagecount as separate column in dataframe
articleData = articleData[articleData['NBN'].str.contains('nordiskamuseet')] # clean data by removing duplicate articles added by other institutions
# Load & prepare author data
authorsData = pd.read_csv('fataburen_authors.csv')
unique_authors = authorsData['Name']
authorsDataByArticleCount = authorsData.copy()
authorsDataByPageCount = authorsData.copy()
authorsDataByEarliestArticle = authorsData.copy()
authorsDataByArticleCount.sort_values(by=['ArticlesTotal', 'Name'], ascending=[True, False], inplace=True)
authorsDataByPageCount.sort_values(by=['PagesTotal', 'Name'], ascending=[True, False], inplace=True)
authorsDataByEarliestArticle.sort_values(by=['EarliestArticle', 'Name'], ascending=[False, False], inplace=True)
authorsDataByEarliestArticle['LatestArticleYear'] = authorsDataByEarliestArticle['LatestArticle'].str.slice(0, 4)
authorsDataByEarliestArticle['ArticleMeanRounded'] = round(authorsDataByEarliestArticle['ArticleMean'], 0)
# Load & prepare keyword data
keywordsData = pd.read_csv('fataburen_keywords.csv')
unique_keywords = keywordsData['Keyword'].unique()
keywordsDataByArticleCount = keywordsData.copy()
keywordsDataByPageCount = keywordsData.copy()
keywordsDataByEarliestArticle = keywordsData.copy()
keywordsDataByArticleCount.sort_values(by=['ArticlesTotal', 'Keyword'], ascending=[True, False], inplace=True)
keywordsDataByPageCount.sort_values(by=['PagesTotal', 'Keyword'], ascending=[True, False], inplace=True)
keywordsDataByEarliestArticle.sort_values(by=['EarliestArticle', 'Keyword'], ascending=[False, False], inplace=True)
keywordsDataByEarliestArticle['LatestArticleYear'] = keywordsDataByEarliestArticle['LatestArticle'].str.slice(0, 4)
keywordsDataByEarliestArticle['ArticleMeanRounded'] = round(keywordsDataByEarliestArticle['ArticleMean'], 0)
unique_keywords = keywordsData['Keyword']
# Initiate & configure Dash to display the graphs
app = dash.Dash(__name__, suppress_callback_exceptions=True)
server = app.server
# Layouts
url_bar_and_content_div = html.Div([
dcc.Location(id='url', refresh=False),
html.Div(id='page-content')
])
header = html.Div(children=[
html.H1('Fataburen Articles 1886–2017'),
dcc.Link('Explore Articles', href='/explore'), ' • Article Statistics: ',
dcc.Link('Authors by Article Count', href='/authors-articles'), ' • ',
dcc.Link('Authors by Page Count', href='/authors-pages'), ' • ',
dcc.Link('Authors by Active Period', href='/authors-period'), ' • ',
dcc.Link('Keywords by Author', href='/keywords-author'), ' • ',
dcc.Link('Keywords by Article Count', href='/keywords-articles'), ' • ',
dcc.Link('Keywords by Page Count', href='/keywords-pages'), ' • ',
dcc.Link('Keywords by Active Period', href='/keywords-period'), ' • ',
dcc.Link('About', href='/about'),
],
id='header'
)
layout_explore = html.Div(children=[
header,
html.P([
'Explore the content of Fataburen, the yearbook/journal of Nordiska museet & Skansen. Work in progress by ',
html.A(
children='Aron Ambrosiani',
href='https://twitter.com/AronAmbrosiani/'
),
'. ',
dcc.Link('About this website', href='/about')
]),
html.Div(
className='dropdowns',
children=[
dcc.Dropdown(
id='keyword',
options=[{'label': i, 'value': i} for i in unique_keywords],
value='Keywords',
multi=True,
placeholder='Select keywords (OR)'
),
dcc.Dropdown(
id='author',
options=[{'label': i, 'value': i} for i in unique_authors],
value='Name',
multi=True,
placeholder='Select authors (OR)'
)
]
),
html.P(
id='articleCount',
children=''),
dcc.Graph(
id='articlesByYearFigure'
),
html.Div([
html.H2(
children='Selected Article: '
),
html.P([
html.Span(
id='articleInfo',
children=''
),
html.Br(),
'Link to DiVA page: ',
html.A(
id='outbound-link',
href='',
target='_blank',
children='',
title=''),
html.Br(),
'Direct link to PDF: ',
html.A(
id='pdf-link',
href='',
target='_blank',
children='',
title='')
])], className='one column'
)])
layout_authors_articles = html.Div(children=[
header,
html.Div(
dcc.Graph(
id='authorsByArticles',
figure=px.bar(
authorsDataByArticleCount,
x='ArticlesTotal',
y='Name',
orientation='h',
title='Authors by Article Count',
hover_name='Name',
hover_data={'Name': False}
),
style={'height': len(authorsDataByArticleCount)*15},
),
className='fullheight'
)])
layout_authors_pages = html.Div(children=[
header,
html.Div(
dcc.Graph(
id='authorsByPages',
figure=px.bar(
authorsDataByPageCount,
x='PagesTotal',
y='Name',
orientation='h',
title='Authors by Page Count',
hover_name='Name',
hover_data={'Name': False}
),
style={'height': len(authorsDataByArticleCount)*15},
),
className='fullheight'
)])
layout_authors_period = html.Div(children=[
header,
html.Div([
'Bars show timespan from earliest to latest published article. Mean publishing year displayed on hover.',
dcc.Graph(
id='authorsByPeriod',
figure=px.timeline(
authorsDataByEarliestArticle,
x_start='EarliestArticle',
x_end='LatestArticle',
y='Name',
title='Authors by Active Period',
hover_name='Name',
hover_data={
'EarliestArticle': '|%Y',
'LatestArticle': False,
'LatestArticleYear': True,
'ArticleMeanRounded': True,
'Name': False
}
),
style={'height': len(authorsDataByArticleCount)*15}
)],
className='fullheight'
)])
layout_keywords_author = html.Div(children=[
header,
html.P('Select author to display most used keywords.'),
dcc.Dropdown(
id='authorKeywords',
options=[{'label': i, 'value': i} for i in unique_authors],
value='Hammarstedt, Nils Edvard',
multi=False,
placeholder='Select author',
),
dcc.Graph(
id='keywordsByAuthor'
)])
layout_keywords_articles = html.Div(children=[
header,
html.Div(
dcc.Graph(
id='keywordsByArticles',
figure=px.bar(
keywordsDataByArticleCount,
x='ArticlesTotal',
y='Keyword',
orientation='h',
title='Keywords by Article Count',
hover_name='Keyword',
hover_data={'Keyword': False}
),
style={'height': len(keywordsDataByArticleCount)*15},
),
className='fullheight'
)])
layout_keywords_pages = html.Div(children=[
header,
html.Div(
dcc.Graph(
id='keywordsByPages',
figure=px.bar(
keywordsDataByPageCount,
x='PagesTotal',
y='Keyword',
orientation='h',
title='Keywords by Page Count',
hover_name='Keyword',
hover_data={'Keyword': False}
),
style={'height': len(keywordsDataByArticleCount)*15},
),
className='fullheight'
)])
layout_keywords_period = html.Div(children=[
header,
html.Div([
'Bars show timespan from earliest to latest published article. Mean publishing year displayed on hover.',
dcc.Graph(
id='keywordsByPeriod',
figure=px.timeline(
keywordsDataByEarliestArticle,
x_start='EarliestArticle',
x_end='LatestArticle',
y='Keyword',
title='Keywords by Active Period',
hover_name='Keyword',
hover_data={
'EarliestArticle': '|%Y',
'LatestArticle': False,
'LatestArticleYear': True,
'ArticleMeanRounded': True,
'Keyword': False
}
),
style={'height': len(keywordsDataByArticleCount)*15}
)],
className='fullheight'
)])
layout_about = html.Div(children=[
header,
html.P([
'Explore the content of Fataburen, the yearbook/journal of Nordiska museet & Skansen. Work in progress by ',
html.A(
children='Aron Ambrosiani',
href='https://twitter.com/AronAmbrosiani/'
),
'. Code available at ',
html.A(
children='github.com/ambrosiani/fataburen/',
href='https://github.com/ambrosiani/fataburen/'),
' as open source. Please reuse and adapt if you find it useful!'
]),
html.P([
'This website was made as a final project for the course ',
html.A(
children='4ME501: Programming for Digital Humanities',
href='https://lnu.se/en/course/programming-for-digital-humanities/vaxjo-distance-international-part-time-autumn/'),
' given at Linneaus University during the Autumn 2020 semester.'
]),
html.P([
'The presented data was exported from ',
html.A(
children='DiVA',
href='http://www.diva-portal.org/'
),
' using the following feed url: ',
html.Br(),
html.A(
children='http://www.diva-portal.org/smash/export.jsf?format=csvall&addFilename=true&aq=[[]]&aqe=[]&aq2=[[{”seriesISSN”:”0348-971X","organisationId-Xtra":false},{"publicationTypeCode":["chapter"]}]]&onlyFullText=false&noOfRows=2000&sortOrder=title_sort_asc&sortOrder2=dateIssued_sort_asc',
href='http://www.diva-portal.org/smash/export.jsf?format=csvall&addFilename=true&aq=[[]]&aqe=[]&aq2=[[{”seriesISSN”:”0348-971X","organisationId-Xtra":false},{"publicationTypeCode":["chapter"]}]]&onlyFullText=false&noOfRows=2000&sortOrder=title_sort_asc&sortOrder2=dateIssued_sort_asc')
]
)])
# Add layouts to app
app.layout = url_bar_and_content_div
app.validation_layout = html.Div([
url_bar_and_content_div,
layout_explore,
layout_authors_articles,
layout_authors_pages,
layout_authors_period,
layout_keywords_author,
layout_keywords_articles,
layout_keywords_pages,
layout_keywords_period,
layout_about])
# Callback for layout switching
@app.callback(Output('page-content', 'children'),
Input('url', 'pathname'))
# Callback for graph clicks on Explore view
@app.callback(
Output('outbound-link', 'children'),
Output('outbound-link', 'href'),
Output('outbound-link', 'title'),
Output('articleInfo', 'children'),
Output('pdf-link', 'children'),
Output('pdf-link', 'href'),
Output('pdf-link', 'title'),
Input('articlesByYearFigure', 'clickData'))
# Callback for keyword & author dropdowns on Explore view
@app.callback(
Output('articlesByYearFigure', 'figure'),
Output('articleCount', 'children'),
Input('keyword', 'value'),
Input('author', 'value'))
# Callback for Author dropdown on Keywords by Author view
@app.callback(
Output('keywordsByAuthor', 'figure'),
Output('keywordsByAuthor', 'style'),
Input('authorKeywords', 'value'))
if __name__ == '__main__':
app.run_server(debug=False)
| [
11748,
14470,
198,
11748,
14470,
62,
7295,
62,
5589,
3906,
355,
288,
535,
198,
11748,
14470,
62,
6494,
62,
5589,
3906,
355,
27711,
198,
6738,
14470,
13,
45841,
3976,
1330,
23412,
11,
25235,
198,
198,
11748,
19798,
292,
355,
279,
67,
1... | 2.202375 | 5,727 |
# Generated by Django 2.2 on 2020-04-26 15:19
from django.db import migrations, models
import django.utils.timezone
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
319,
12131,
12,
3023,
12,
2075,
1315,
25,
1129,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
26791,
13,
2435,
11340,
628
] | 3.025641 | 39 |
description = 'BOA Table 4'
pvprefix = 'SQ:BOA:mcu1:'
devices = dict(
t4tx = device('nicos_ess.devices.epics.motor.EpicsMotor',
epicstimeout = 3.0,
description = 'Table 4 X Translation',
motorpv = pvprefix + 'T4TX',
errormsgpv = pvprefix + 'T4TX-MsgTxt',
precision = 0.05,
),
t4ty = device('nicos_ess.devices.epics.motor.EpicsMotor',
epicstimeout = 3.0,
description = 'Table 4 Y Translation',
motorpv = pvprefix + 'T4TY',
errormsgpv = pvprefix + 'T4TY-MsgTxt',
precision = 0.05,
),
Table4 = device('nicos_sinq.boa.devices.boatable.BoaTable',
description = 'Table 4',
standard_devices = ['t4tx', 't4ty']
),
)
| [
11213,
796,
705,
8202,
32,
8655,
604,
6,
198,
198,
79,
85,
40290,
796,
705,
50,
48,
25,
8202,
32,
25,
23209,
84,
16,
32105,
198,
198,
42034,
796,
8633,
7,
198,
220,
220,
220,
256,
19,
17602,
796,
3335,
10786,
6988,
418,
62,
408,... | 2.056338 | 355 |
from __future__ import print_function
import os
import sys
import json
import urllib
import itertools
from tqdm import tqdm
from utils import LogUtil
from parser import Parser
from ioutils import read_lines
from ads_parser import AdsParser
from pycorenlp import StanfordCoreNLP
# The following two lines make CoreNLP happy
reload(sys)
sys.setdefaultencoding('UTF8')
class CoreNLPParser(Parser):
""" The CoreNLPParser class builds upon Stanford CoreNLP package """
CORENLP_PARSER = "edu.stanford.nlp.pipeline.CoreNLPServer"
def parse(self, text):
""" Named entity recognition (NER) using stanford CoreNLP package
Args:
text (str): A string (can be a long string) in which Named Entity
Recognition will run.
Return:
this function returns a dictionary contains the NERs identified,
sentences extracted, and name of the source parser
"""
if type(text) != str:
text = text.encode('utf8')
if text[0].isspace(): # dont strip white spaces
text = '.' + text[1:]
# Quote (with percent-encoding) reserved characters in URL for CoreNLP
text = urllib.quote(text)
output = self.corenlp.annotate(text, properties=self.props)
# flatten sentences and tokens
tokenlists = [s['tokens'] for s in output['sentences']]
tokens = itertools.chain.from_iterable(tokenlists)
names = []
for token in tokens:
if token['ner'] != 'O':
name = {
'label': token['ner'],
'begin': token['characterOffsetBegin'],
'end': token['characterOffsetEnd'],
'text': token['originalText'],
'source': 'corenlp'
}
names.append(name)
# Handle multi-word tokens:
# Merge any adjacent Target tokens, if of the same type and
# separated by a space, into one span.
names.sort(key=lambda x: int(x['begin']))
new_names = []
skip_names = []
for n in names:
if n in skip_names:
continue
next_name = [n2 for n2 in names if
n['label'] == 'Target' and
n2['label'] == 'Target' and
int(n2['begin']) == int(n['end']) + 1]
if len(next_name) > 0:
n['text'] += ' ' + next_name[0]['text']
n['end'] = next_name[0]['end']
skip_names.append(next_name[0])
# Either way, save this one
new_names.append(n)
return {
'ner': new_names,
'X-Parsed-By': CoreNLPParser.CORENLP_PARSER,
'sentences': output['sentences']
}
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
input_parser = parser.add_mutually_exclusive_group(required=True)
input_parser.add_argument('-i', '--in_file', help='Path to input file')
input_parser.add_argument('-li', '--in_list', help='Path to input list')
parser.add_argument('-o', '--out_file', required=True,
help='Path to output JSON file')
parser.add_argument('-l', '--log_file', default='./corenlp-parser-log.txt',
help='Log file that contains processing information. '
'It is default to ./corenlp-parser-log.txt unless '
'otherwise specified.')
parser.add_argument('-p', '--tika_server_url', required=False,
help='Tika server URL')
parser.add_argument('-c', '--corenlp_server_url',
default='http://localhost:9000',
help='CoreNLP Server URL')
parser.add_argument('-n', '--ner_model', required=False,
help='Path to a Named Entity Recognition (NER) model ')
parser.add_argument('-a', '--ads_url',
default='https://api.adsabs.harvard.edu/v1/search/query',
help='ADS RESTful API. The ADS RESTful API should not '
'need to be changed frequently unless someting at '
'the ADS is changed.')
parser.add_argument('-t', '--ads_token',
default='jON4eu4X43ENUI5ugKYc6GZtoywF376KkKXWzV8U',
help='The ADS token, which is required to use the ADS '
'RESTful API. The token was obtained using the '
'instructions at '
'https://github.com/adsabs/adsabs-dev-api#access. '
'The ADS token should not need to be changed '
'frequently unless something at the ADS is '
'changed.')
args = parser.parse_args()
process(**vars(args))
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
33918,
198,
11748,
2956,
297,
571,
198,
11748,
340,
861,
10141,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
198,
6738,
3384,
4487... | 2.062006 | 2,403 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
@FileName : Agent.py
@Author : citang
@Date : 2021/7/27 5:46 下午
@Description : description the function of the file
"""
import sys
from framework import Model, Db, Log, Config, Common
class __Agent__:
"""模块功能"""
def Data(self, name):
"""创建数据模型对象"""
return Model.__ModelData__(name)
def Db(self):
"""创建数据库对象"""
return Db.__MysqlDb__()
def Log(self):
"""创建日志对象"""
return Log.__Module__(self.__MODULENAME, self.__HANDLERNAME)
def __Cache(self):
"""创建缓存对象"""
pass
def GetAppConfig(self, group, name):
"""获取应用程序配置"""
return Config.GetAppConfig(group, name)
def GetSysConfig(self, group, name):
"""获取系统配置"""
return Config.GetSysConfig(group, name)
def SetApiCode(self, code):
"""设置API错误代码"""
self.__APICODE = str(code)
def GetApiCode(self):
"""获取API错误代码"""
return self.__APICODE
def GetRemoteIp(self):
"""获取请求IP"""
return self.__REMOTE_IP
def SetResult(self, data):
"""设置返回内容"""
# 若没有设置RESULTYPE则不允许设置返回值
if self.__RESULTYPE == '':
raise Exception('resultype is empty, cant set result')
# 检查数据格式
if data is None:
raise Exception('must not none of data')
if data.GetName() != self.__RESULTYPE:
raise Exception('router resultype different!')
self.__DATA = data.DumpDict()
def SetDictData(self, data):
"""设置返回内容"""
if not isinstance(data, dict):
raise Exception('data type must be dict')
# 检查数据格式
if data is None:
raise Exception('must not none of data')
self.__DATA = data
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
220,
220,
220,
2488,
8979,
5376,
220,
220,
220,
1058,
15906,
13,
9078,
198,
220,
220,
220,
2488,
13838,
220,... | 1.740385 | 1,040 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
DEV notes:
uses a trained network to predict the class for an input image
pass in a single image /path/to/image and
return the flower name and class probability
python predict.py /path/to/image checkpoint
python predict.py input checkpoint --top_k 3
python predict.py input checkpoint --category_names cat_to_name.json
python predict.py input checkpoint --gpu
"""
import numpy as np
import matplotlib.pyplot as plt # used to show() test image flower_files[3000]
import torch
import torchvision.transforms as transforms #used in transformations
import json #used to decode json file cat_to_name.json
import torchvision.models as models #imports models, pretrained VGG16 is used
#import is for training to be robust on truncated images
from PIL import ImageFile #work with JPEG images
ImageFile.Load_TRUNCATED_IMAGES = True
from PIL import Image
import torch.nn.functional as F #used for softmax function
import argparse #used to get arguments from user
def get_input_args():
"""
Retrieves and parses command line arguments provided by the user when
they run the program from a terminal. This function uses Python's
argparse module to create and define these command line arguments
If the user fails to provide some or all of the 3 arguments, then the
missing arguments.
1. image --path path to image default
2. checkpoint name --chekckpoint name of checkpoint
3. amount of classes to return top k --top_k
4. specify file containg target label names categotry_names
5. specify run on gpu --gpu
"""
#Create Argument Parser object named parser
parser = argparse.ArgumentParser()
#Argument 1:
parser.add_argument('--path', type=str, default='flowers/test/69/image_05971.jpg', \
help='path/to/image')
#Argument 2:
parser.add_argument('--check_point', type=str, default='model_transfer.pt', \
help='checkpoint name')
#Argument 3:
parser.add_argument('--top_k', type=int, default='1', \
help='amount of classes to return')
#Argument 4:
parser.add_argument('--category_names', type=str, default='cat_to_name.json', \
help='file with target labels')
#Argument 5:
parser.add_argument('--gpu', type=str, default='cpu', \
help='hardware to run on CPU or GPU')
#Assign variables in_args tp parse_args()
in_args = parser.parse_args()
#access values of Arguments by printing it
#print("Arguments: ", in_args) #DEBUG
return in_args
def load_model_transfer_checkpoint(checkpoint_path):
"""Load a checkpoint and rebuild the model.
Parameters
----------
checkpoint_path : str
Check point path
"""
#load the saved checkpoint
checkpoint = torch.load(checkpoint_path)
#print(checkpoint) #DEBUG
#Model Architecure pre trained VGG16
model_transfer = models.vgg16(pretrained=True)
#print(model_transfer) #DEBUG
#Freez feature parameters
for param in model_transfer.parameters():
param.requires_grad = False
model_transfer.architecture = checkpoint['architecture']
model_transfer.classifier = checkpoint['classifier']
model_transfer.load_state_dict = checkpoint['state_dict']
#optimizer_transfer.state_dict = checkpoint['optimizer_dict']
model_transfer.class_to_idx = checkpoint['class_to_idx']
#print(model_transfer) #DEBUG
return model_transfer
def process_image(image_path):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
Parameters
----------
path : str
image path
'''
# TODO: Process a PIL image for use in a PyTorch model
#normalize the means and standard deveiations
#of the images to what the netword expects
standard_normalization = transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
img = Image.open(image_path)
im_transforms = transforms.Compose([transforms.Resize(size=(224, 224)),
transforms.CenterCrop(224),
transforms.ToTensor(),
standard_normalization])
processed_image = im_transforms(img)
return processed_image
def imshow(image, ax=None, title=None):
"""Imshow for Tensor.
Parameters
----------
path : str
image path
"""
#print(title)
if ax is None:
fig, ax = plt.subplots()
# PyTorch tensors assume the color channel is the first dimension
# but matplotlib assumes is the third dimension
image = image.numpy().transpose((1, 2, 0))
# Undo preprocessing
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
image = std * image + mean
# Image needs to be clipped between 0 and 1 or it looks like noise when displayed
image = np.clip(image, 0, 1)
ax.imshow(image)
return ax
def predict(image_path, model, topk=5):
''' Predict the class (or classes) of an image using a trained deep learning model.
Parameters
----------
image_path : str
image path
model :
trained model from checkpoint model
topk : int
predict top-K most probable classes
'''
#print(image_path, model, topk)
# TODO: Implement the code to predict the class from an image file
img_processed = process_image(image_path)
#add a dimension with a length of one expanding the rank
img_squeezed = img_processed.unsqueeze_(0)
#convert to floating point notation
img_squeezed_fp = img_squeezed.float()
with torch.no_grad():
output = model(img_squeezed_fp)
probability = torch.exp(output)
#print('output1',probability.topk(topk))
#probability = F.softmax(output.data,dim=1)
#return probability.topk(topk)
return probability.topk(topk)
def check_sanity(image_path, cat_to_name, model_transfer):
''' Plot the probabilities for the top 5 classes as a bar graph, along with the input image
Parameters
----------
image_path : str
image path
cat_to_name : str
mapping from category label to category name
model :
trained model from checkpoint model
'''
with open(cat_to_name, 'r') as f:
cat_to_name = json.load(f)
flower_num = image_path.split('/')[2]#title
fig_title = cat_to_name[flower_num]#get name of jpg
axs = imshow(process_image(image_path), ax=plt) #plot fig 1
axs.suptitle(fig_title)
#axs.show()
probs_classes = predict(image_path, model_transfer)
#print('probs_classes',probs_classes)
probs = np.array(probs_classes[0][0])
#print('probs',probs)
classes = [cat_to_name[str(index + 1)] for index in np.array(probs_classes[1][0])]
#print('classes',classes)
classes_len = float(len(classes))
fig,ax = plt.subplots(figsize=(7,4))
width = 0.8
tick_locs = np.arange(classes_len)
ax.bar(tick_locs, probs, width, linewidth=4.0, align = 'center')
ax.set_xticks(ticks = tick_locs)
ax.set_xticklabels(classes)
ax.set_xlim(min(tick_locs)-0.6,max(tick_locs)+0.6)
ax.set_yticks([0.2,0.4,0.6,0.8,1,1.2])
ax.set_ylim((0,1))
ax.yaxis.grid(True)
#plt.subplot(2,2,1)
plt.show()
def main():
"""
Main
"""
#print('python predict.py -h')
in_args = get_input_args()
print(in_args)
device = torch.device("cuda:0" if in_args.gpu and torch.cuda.is_available() else "cpu")
print('Device available on this machine: ', device)
checkpoint_model_transfer = load_model_transfer_checkpoint(in_args.check_point)
#print(checkpoint_model_transfer.type)
#processed_img = process_image(in_args.path)
#print(processed_img.shape)
#imshow(processed_img)
probs_classes = predict(in_args.path, checkpoint_model_transfer, in_args.top_k)
print(probs_classes)
check_sanity(in_args.path, in_args.category_names, checkpoint_model_transfer)
if __name__ == '__main__':
main()
#called if script is executed on its own
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
39345,
4710,
25,
198,
2664,
257,
8776,
3127,
284,
4331,
262,
1398,
329,
281,
5128,
2939,
198,
66... | 2.547899 | 3,236 |
if __name__ == "__main__":
read_data()
| [
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
1100,
62,
7890,
3419,
198
] | 2.333333 | 18 |
#!/usr/bin/env python
#coding=utf-8
# vlan parse
def parse_cisco(req):
'''phy_slot/phy_subslot/phy_port:XPI.XCI'''
nasportid = req.get('NAS-Port-Id')
if not nasportid:return
nasportid = nasportid.lower()
parse_vlanid()
parse_vlanid2()
return req
def parse_std(req):
''''''
nasportid = req.get('NAS-Port-Id')
if not nasportid:return
nasportid = nasportid.lower()
parse_vlanid()
parse_vlanid2()
return req
def parse_ros(req):
''''''
nasportid = req.get('NAS-Port-Id')
if not nasportid:return
nasportid = nasportid.lower()
parse_vlanid()
parse_vlanid2()
return req
parse_radback = parse_ros
parse_zte = parse_ros
_parses = {
'0' : parse_std,
'9' : parse_cisco,
'3041' : parse_cisco,
'2352' : parse_radback,
'2011' : parse_std,
'25506' : parse_std,
'3902' : parse_zte,
'14988' : parse_ros
}
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
66,
7656,
28,
40477,
12,
23,
198,
198,
2,
220,
410,
9620,
21136,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
198,
4299,
21136,
62,
66,
4861,
7,
42180,
2599,
198,
220,
22... | 1.977459 | 488 |
import boto3
| [
11748,
275,
2069,
18,
198
] | 2.6 | 5 |
"""Errors for Google Assistant."""
class SmartHomeError(Exception):
"""Google Assistant Smart Home errors.
https://developers.google.com/actions/smarthome/create-app#error_responses
"""
def __init__(self, code, msg):
"""Log error code."""
super().__init__(msg)
self.code = code
| [
37811,
9139,
5965,
329,
3012,
15286,
526,
15931,
628,
198,
4871,
10880,
16060,
12331,
7,
16922,
2599,
198,
220,
220,
220,
37227,
11708,
15286,
10880,
5995,
8563,
13,
628,
220,
220,
220,
3740,
1378,
16244,
364,
13,
13297,
13,
785,
14,
... | 2.683333 | 120 |
# Generated by Django 3.1.13 on 2021-09-05 16:30
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
1485,
319,
33448,
12,
2931,
12,
2713,
1467,
25,
1270,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,... | 2.840909 | 44 |
import smart_imports
smart_imports.all()
| [
198,
11748,
4451,
62,
320,
3742,
198,
198,
27004,
62,
320,
3742,
13,
439,
3419,
628
] | 2.75 | 16 |
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField, ValidationError, BooleanField
from wtforms.validators import Email, EqualTo, Required
from ..models import User
import email_validator
| [
6738,
42903,
62,
86,
27110,
1330,
46947,
8479,
198,
6738,
266,
83,
23914,
1330,
10903,
15878,
11,
30275,
15878,
11,
39900,
15878,
11,
3254,
24765,
12331,
11,
41146,
15878,
198,
6738,
266,
83,
23914,
13,
12102,
2024,
1330,
9570,
11,
2870... | 4 | 57 |
#!/usr/bin/env python3
# -*- encoding: utf-8
# SPDX-License-Identifier: CC0-1.0 & Additional T&Cs
# Copyright (c) 12021 - 12021 HE, Emporia.AI Pte Ltd
# See LICENSE.md for Additional Terms and Conditions
from quart import Blueprint, abort
from quart_trio import QuartTrio
from quart_schema import validate_request, validate_response
from routes import *
from common import *
from objects.auth import *
from objects.manage import singleton_manage
blueprint = Blueprint('MANAGE', __name__)
@blueprint.route("/api/engine/v1/MANAGE", methods=["POST"])
@validate_request(Manage_DATA)
@validate_response(Manage, 200)
async def engine_MANAGE(data: Manage_DATA) -> Manage:
"""
"""
if 1: #try:
result = await singleton_manage.route_MANAGE(data=data)
return result
#except:
# abort(400)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
21004,
25,
3384,
69,
12,
23,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
12624,
15,
12,
16,
13,
15,
1222,
15891,
309,
5,
32274,
198,
2,
15069,
357,
66... | 2.783784 | 296 |
import pandas as pd
import os
import numpy as np
line_end = " \\\\\n"
files = ["china", "desharnais", "isbsg10", "osp", "osp2", "ground", "flight"]
percentage_metrics = ["SA+", "SD+", "MMRE-", "MdMRE-", "Pred25+", "Pred40+", "Delta+"]
to_show = ["MAR-", "SA+", "MdMRE-", "Pred40+", "Effort-" ,"Months-","Defects-", "Risks-"]
opt_map = {'nsga-iii' : "NSGA-III", 'random' : "Random64", 'sway' : "SWAY", 'moead' : "MOEA/D", 'tpe' : "TPE", 'nsga-ii' : "NSGA-II", 'default' : "Default",
"Random1024" : "Random1024", "Random300" : "Random300", "random300" : "Random300", "random150" : "random150", "random60" : "random60"}
for file in files:
path = os.path.join( 'combo-three', "result-" + file + "-summary+rank.csv" )
df = pd.read_csv( path )
df = df[[x in to_show for x in df["m"]]]
print(df)
mean = df.pivot( index="pt", columns="m", values="mean" )
mean = mean.drop( [x for x in mean.columns if x[-1] == "?"], axis = 1 ).reset_index()
median = df.pivot( index="pt", columns="m", values="median" )
median = median.drop( [x for x in median.columns if x[-1] == "?"], axis = 1 ).reset_index()
std = df.pivot( index="pt", columns="m", values="std" )
std = std.drop( [x for x in std.columns if x[-1] == "?"], axis = 1 ).reset_index()
iqr = df.pivot( index="pt", columns="m", values="iqr" )
iqr = iqr.drop( [x for x in iqr.columns if x[-1] == "?"], axis = 1 ).reset_index()
rank = df.pivot( index="pt", columns="m", values="rank" ).reset_index()
rank = rank.drop( [x for x in rank.columns if x[-1] == "?"], axis = 1 )
metrics = [c for c in mean.columns if c[-1] in ["+", "-"] and c in to_show]
optimizers = list( mean["pt"].unique() )
wins = dict( (k, df[df["pt"] == k][df["rank"] == 1]["rank"].sum() ) for k in optimizers )
optimizers = sorted( optimizers, key = lambda x : wins[x], reverse=True )
s = ""
s += "\\begin{table*}[t]" + "\n"
s += "\\caption{Results for the " + file + " model. Cells read as `mean [median iqr]'.}" + "\n"
s += "\\label{tab-res-"+ file +"}" + "\n"
s += "\\begin{tabular}{@{}lr" + "r"*len(metrics) + "@{}}" + "\n"
s += "\\toprule" + "\n"
s += " & ".join(["Search", "Wins"] + metrics) + line_end
last_win = -1
for o in optimizers:
win = wins[o]
if last_win != win:
s += "\\midrule" + "\n"
last_win = win
row = [opt_map[o], f"{win:.0f}"]
for m in metrics:
me = mean[mean["pt"] == o][m].values[0]
md = median[median["pt"] == o][m].values[0]
iq = iqr[iqr["pt"] == o][m].values[0]
if m in percentage_metrics:
me *= 100
md *= 100
iq *= 100
w = rank[rank["pt"] == o][m].values[0]
x = "\\cellcolor[HTML]{C0C0C0}" if w == 1 else ""
row += [ x + f"{me:.1f} [{md:.1f} {iq:.1f}]" ]
s += " & ".join( row ) + line_end
s += "\\bottomrule" + "\n"
s += "\\end{tabular}" + "\n"
s += "\\end{table*}" + "\n"
with open(os.path.join("combo-three", "latex-tab-"+file+".txt"), "w") as out:
out.write(s)
| [
11748,
19798,
292,
355,
279,
67,
198,
11748,
28686,
198,
11748,
299,
32152,
355,
45941,
198,
198,
1370,
62,
437,
796,
366,
3467,
13426,
77,
1,
198,
198,
16624,
796,
14631,
354,
1437,
1600,
366,
67,
5069,
1501,
15152,
1600,
366,
271,
... | 2.092166 | 1,519 |
# -*- coding: utf-8 -*-
"""Series transformations as slotting and resampling."""
from .time import dt_from_s, s_from_dt
from .datastructures import DataTimeSlot, DataTimeSlotSeries, TimePoint, DataTimePointSeries, DataTimePoint
from .utilities import compute_data_loss
from .operations import avg
from .units import TimeUnit
# Setup logging
import logging
logger = logging.getLogger(__name__)
#==========================
# Base Transformation
#==========================
class Transformation(object):
"""Base transformation class."""
@classmethod
#==========================
# Resampler Transformation
#==========================
class Resampler(Transformation):
"""Resampler transformation."""
def process(self, data_time_point_series, from_t=None, to_t=None, from_dt=None, to_dt=None,
validity=None, force_close_last=True, include_extremes=False, fill_with=None,
force_data_loss=None, fill_gaps=True, force=False):
"""Start the resampling process. If start and/or end are not set, they are set automatically
based on first and last points of the series"""
if not isinstance(data_time_point_series, DataTimePointSeries):
raise TypeError('Can process only DataTimePointSeries, got "{}"'.format(data_time_point_series.__class__.__name__))
if not data_time_point_series:
raise ValueError('Cannot process empty data_time_point_series')
if include_extremes:
if from_t is not None or to_t is not None:
raise ValueError('Setting "include_extremes" is not compatible with giving a from_t or a to_t')
from_rounding_method = 'floor'
to_rounding_method = 'ceil'
else:
from_rounding_method = 'ceil'
to_rounding_method = 'floor'
# Move fromt_dt and to_dt to epoch to simplify the following
if from_dt is not None:
from_t = s_from_dt(from_dt)
if to_dt is not None:
to_t = s_from_dt(to_dt)
# Also force close if we have explicitly set an end
force_close_last = True
# Set "from" if not set, otherwise check for consistency # TODO: move to steaming
if from_t is None:
from_t = data_time_point_series[0].t
from_dt = dt_from_s(from_t, tz=data_time_point_series.tz)
# Is the point already rounded to the time unit or do we have to round it ourselves?
if not from_dt == self.time_unit.round_dt(from_dt):
from_dt = self.time_unit.round_dt(from_dt, how=from_rounding_method)
from_t = s_from_dt(from_dt)
else:
from_dt = dt_from_s(from_t, tz=data_time_point_series.tz)
if from_dt != self.time_unit.round_dt(from_dt):
raise ValueError('Sorry, provided from_t is not consistent with the self.time_unit of "{}" (Got "{}")'.format(self.time_unit, from_t))
# Set "to" if not set, otherwise check for consistency # TODO: move to streaming
if to_t is None:
to_t = data_time_point_series[-1].t
to_dt = dt_from_s(to_t, tz=data_time_point_series.tz)
# Is the point already rounded to the time unit or do we have to round it ourselves?
if not to_dt == self.time_unit.round_dt(to_dt):
to_dt = self.time_unit.round_dt(to_dt, how=to_rounding_method)
to_t = s_from_dt(to_dt)
else:
to_dt = dt_from_s(to_t, tz=data_time_point_series.tz)
if to_dt != self.time_unit.round_dt(to_dt):
raise ValueError('Sorry, provided to_t is not consistent with the self.time_unit of "{}" (Got "{}")'.format(self.time_unit, to_t))
# Move the start back of half and the end forward of
# half unit as well, as the point will be in the center
from_t = from_t - (self.time_unit.duration_s() /2)
from_dt = dt_from_s(from_t, data_time_point_series.tz)
to_t = to_t + (self.time_unit.duration_s() /2)
to_dt = dt_from_s(to_t, data_time_point_series.tz)
# Automatically detect validity if not set
if validity is None:
validity = data_time_point_series.autodetected_sampling_interval
logger.info('Using auto-detected sampling interval: %ss', validity)
# Check if not upsamplimg (with some tolearance):
if not force:
if validity > (self.time_unit.duration_s() * 1.10):
raise ValueError('Upsampling not supported yet (resampler unit: {}; detected time series sampling interval: {})'.format(self.time_unit, validity))
logger.debug('Started slotter from "%s" (%s) to "%s" (%s)', from_dt, from_t, to_dt, to_t)
if from_dt >= to_dt:
raise ValueError('Sorry, from is >= to! (from_t={}, to_t={})'.format(from_t, to_t))
# Set some support vars
slot_start_t = None
slot_end_t = None
prev_data_time_point = None
working_serie = DataTimePointSeries()
process_ended = False
resampled_data_time_point_series = DataTimePointSeries()
# Set timezone
timezone = data_time_point_series.tz
logger.debug('Using timezone "%s"', timezone)
# Counters
count = 0
first = True
# Indexes
series_indexes = data_time_point_series.indexes
series_resolution = data_time_point_series.resolution
# Now go trough all the data in the time series
for data_time_point in data_time_point_series:
logger.debug('Processing %s', data_time_point)
# Increase counter
count += 1
# Set start_dt if not already done TODO: implement it correctly
#if not from_dt:
# from_dt = self.time_unit.timeInterval.round_dt(data_time_point.dt) if rounded else data_time_point.dt
# Pretend there was a slot before if we are at the beginning. TOOD: improve me.
if slot_end_t is None:
slot_end_t = from_t
# First, check if we have some points to discard at the beginning
if data_time_point.t < from_t:
# If we are here it means we are going data belonging to a previous slot
# (probably just spare data loaded to have access to the prev_datapoint)
prev_data_time_point = data_time_point
continue
# Similar concept for the end
# TODO: what if we are in streaming mode? add if to_t is not None?
if data_time_point.t >= to_t:
if process_ended:
continue
# The following procedure works in general for slots at the beginning and in the middle.
# The approach is to detect if the current slot is "outdated" and spin a new one if so.
if data_time_point.t > slot_end_t:
# If the current slot is outdated:
# 1) Add this last point to the data_time_point_series:
working_serie.append(data_time_point)
#2) keep spinning new slots until the current data point falls in one of them.
# NOTE: Read the following "while" more as an "if" which can also lead to spin multiple
# slot if there are empty slots between the one being closed and the data_time_point.dt.
# TODO: leave or remove the above if for code readability?
while slot_end_t < data_time_point.t:
logger.debug('Checking for end {} with point {}'.format(slot_end_t, data_time_point.t))
# If we are in the pre-first slot, just silently spin a new slot:
if slot_start_t is not None:
# Append last point. Can be appended to multiple slots, this is normal since
# the empty slots in the middle will have only a far prev and a far next.
# can also be appended several times if working_serie is not reset (looping in the while)
if data_time_point not in working_serie:
working_serie.append(data_time_point)
logger.debug('This slot (start={}, end={}) is closed, now aggregating it..'.format(slot_start_t, slot_end_t))
logger.debug('working_serie len: %s', len(working_serie))
logger.debug('working_serie first point dt: %s', working_serie[0].dt)
logger.debug('working_serie last point dt: %s', working_serie[-1].dt)
# Compute slot...
dataTimePoint = self._compute_resampled_point(working_serie,
unit = self.time_unit,
start_t = slot_start_t,
end_t = slot_end_t,
validity = validity,
timezone = timezone,
fill_with = fill_with,
force_data_loss = force_data_loss,
fill_gaps = fill_gaps,
series_indexes = series_indexes,
series_resolution = series_resolution,
first_last = first)
# Set first to false
if first:
first = False
# .. and append results
if dataTimePoint:
logger.debug('Computed datapoint: %s',dataTimePoint )
resampled_data_time_point_series.append(dataTimePoint)
# Create a new slot. This is where all the "conventional" time logic kicks-in, and where the time zone is required.
slot_start_t = slot_end_t
slot_end_t = s_from_dt(dt_from_s(slot_start_t, tz=timezone) + self.time_unit)
# Create a new working_serie as part of the "create a new slot" procedure
working_serie = DataTimePointSeries()
# Append the previous prev_data_time_point to the new DataTimeSeries
if prev_data_time_point:
working_serie.append(prev_data_time_point)
logger.debug('Spinned a new slot (start={}, end={})'.format(slot_start_t, slot_end_t))
# If last slot mark process as completed (and aggregate last slot if necessary)
if data_time_point.dt >= to_dt:
# Edge case where we would otherwise miss the last slot
if data_time_point.dt == to_dt:
# Compute slot...
dataTimePoint = self._compute_resampled_point(working_serie,
unit = self.time_unit,
start_t = slot_start_t,
end_t = slot_end_t,
validity = validity,
timezone = timezone,
fill_with = fill_with,
force_data_loss = force_data_loss,
fill_gaps = fill_gaps,
series_indexes = series_indexes,
series_resolution = series_resolution,
first_last = True)
# .. and append results
if dataTimePoint:
resampled_data_time_point_series.append(dataTimePoint)
process_ended = True
# Append this point to the working serie
working_serie.append(data_time_point)
# ..and save as previous point
prev_data_time_point = data_time_point
# Last slots
if force_close_last:
# 1) Close the last slot and aggreagte it. You should never do it unless you knwo what you are doing
if working_serie:
logger.debug('This resampled point (start={}, end={}) is done, now computing it..'.format(slot_start_t, slot_end_t))
# Compute slot...
dataTimePoint = self._compute_resampled_point(working_serie,
unit = self.time_unit,
start_t = slot_start_t,
end_t = slot_end_t,
validity = validity,
timezone = timezone,
fill_with = fill_with,
force_data_loss = force_data_loss,
fill_gaps = fill_gaps,
series_indexes = series_indexes,
series_resolution = series_resolution,
first_last = True)
# .. and append results
if dataTimePoint:
resampled_data_time_point_series.append(dataTimePoint)
# 2) Handle missing slots until the requested end (end_dt)
# TODO: Implement it. Sure?
logger.info('Resampled %s DataTimePoints in %s DataTimePoints', count, len(resampled_data_time_point_series))
return resampled_data_time_point_series
#==========================
# Slotter Transformation
#==========================
class Slotter(Transformation):
"""Slotter transformation."""
def process(self, data_time_point_series, from_t=None, from_dt=None, to_t=None, to_dt=None, validity=None, force_close_last=False,
include_extremes=False, fill_with=None, force_data_loss=None, fill_gaps=True, force=False):
"""Start the slotting process. If start and/or end are not set, they are set automatically based on first and last points of the series."""
if not isinstance(data_time_point_series, DataTimePointSeries):
raise TypeError('Can process only DataTimePointSeries, got "{}"'.format(data_time_point_series.__class__.__name__))
if not data_time_point_series:
raise ValueError('Cannot process empty data_time_point_series')
if include_extremes:
if from_t is not None or to_t is not None:
raise ValueError('Setting "include_extremes" is not compatible with giving a from_t or a to_t')
from_rounding_method = 'floor'
to_rounding_method = 'ceil'
force_close_last = True
else:
from_rounding_method = 'ceil'
to_rounding_method = 'floor'
# Move fromt_dt and to_dt to epoch to simplify the following
if from_dt is not None:
from_t = s_from_dt(from_dt)
if to_dt is not None:
to_t = s_from_dt(to_dt)
# Also force close if we have explicitly set an end
force_close_last = True
# Set "from" if not set, otherwise check for consistency # TODO: move to steaming
if from_t is None:
from_t = data_time_point_series[0].t
from_dt = dt_from_s(from_t, data_time_point_series.tz)
# Is the point already rounded to the time unit or do we have to round it ourselves?
if not from_dt == self.time_unit.round_dt(from_dt):
from_dt = self.time_unit.round_dt(from_dt, how=from_rounding_method)
from_t = s_from_dt(from_dt)
else:
from_dt = dt_from_s(from_t, data_time_point_series.tz)
if from_dt != self.time_unit.round_dt(from_dt):
raise ValueError('Sorry, provided from_t is not consistent with the self.time_unit of "{}" (Got "{}")'.format(self.time_unit, from_t))
# Set "to" if not set, otherwise check for consistency # TODO: move to streaming
if to_t is None:
to_t = data_time_point_series[-1].t
to_dt = dt_from_s(to_t, data_time_point_series.tz)
# Is the point already rounded to the time unit or do we have to round it ourselves?
if not to_dt == self.time_unit.round_dt(to_dt):
to_dt = self.time_unit.round_dt(to_dt, how=to_rounding_method)
to_t = s_from_dt(to_dt)
else:
to_dt = dt_from_s(to_t, data_time_point_series.tz)
if to_dt != self.time_unit.round_dt(to_dt):
raise ValueError('Sorry, provided to_t is not consistent with the self.time_unit of "{}" (Got "{}")'.format(self.time_unit, to_t))
# Also force close if we have explicitly set an end
force_close_last = True
# Automatically detect validity if not set
if validity is None:
validity = data_time_point_series.autodetected_sampling_interval
logger.info('Using auto-detected sampling interval: %ss', validity)
# Check if not upslotting (with some tolerance)
if not force:
# TODO: this check is super-weak. Will fail in loads of edge cases, i.e. months slotted in 30 days.
unit_duration_s = self.time_unit.duration_s(data_time_point_series[0].dt)
if validity > (unit_duration_s * 1.1):
raise ValueError('Upslotting not supported yet (slotter unit: {}; detected time series sampling interval: {})'.format(unit_duration_s, validity))
# Log
logger.debug('Started slotter from "%s" (%s) to "%s" (%s)', from_dt, from_t, to_dt, to_t)
if from_dt >= to_dt:
raise ValueError('Sorry, from is >= to! (from_t={}, to_t={})'.format(from_t, to_t))
# Set some support vars
slot_start_t = None
slot_end_t = None
prev_data_time_point = None
working_serie = DataTimePointSeries()
process_ended = False
data_time_slot_series = DataTimeSlotSeries()
slots_to_be_interpolated = []
last_no_full_data_loss_slot = None
# Set timezone
timezone = data_time_point_series.tz
logger.debug('Using timezone "%s"', timezone)
# Counters
count = 0
first = True
# Indexes
series_indexes = data_time_point_series.indexes
series_resolution = data_time_point_series.resolution
# Now go trough all the data in the time series
for data_time_point in data_time_point_series:
logger.debug('Processing %s', data_time_point)
# Increase counter
count += 1
# Set start_dt if not already done TODO: implement it correctly
#if not from_dt:
# from_dt = self.time_unit.timeInterval.round_dt(data_time_point.dt) if rounded else data_time_point.dt
# Pretend there was a slot before if we are at the beginning. TOOD: improve me.
if slot_end_t is None:
slot_end_t = from_t
# First, check if we have some points to discard at the beginning
if data_time_point.t < from_t:
# If we are here it means we are going data belonging to a previous slot
# (probably just spare data loaded to have access to the prev_datapoint)
prev_data_time_point = data_time_point
continue
# Similar concept for the end
# TODO: what if we are in streaming mode? add if to_t is not None?
if data_time_point.t >= to_t:
if process_ended:
break
# The following procedure works in general for slots at the beginning and in the middle.
# The approach is to detect if the current slot is "outdated" and spin a new one if so.
if data_time_point.t > slot_end_t:
# If the current slot is outdated:
# 1) Add this last point to the data_time_point_series:
working_serie.append(data_time_point)
#2) keep spinning new slots until the current data point falls in one of them.
# NOTE: Read the following "while" more as an "if" which can also lead to spin multiple
# slot if there are empty slots between the one being closed and the data_time_point.dt.
# TODO: leave or remove the above if for code readability?
while slot_end_t < data_time_point.t:
logger.debug('Checking for end {} with point {}'.format(slot_end_t, data_time_point.t))
# If we are in the pre-first slot, just silently spin a new slot:
if slot_start_t is not None:
# Append last point. Can be appended to multiple slots, this is normal since
# the empty slots in the middle will have only a far prev and a far next.
# can also be appended several times if working_serie is not reset (looping in the while)
if data_time_point not in working_serie:
working_serie.append(data_time_point)
logger.debug('This slot (start={}, end={}) is closed, now aggregating it..'.format(slot_start_t, slot_end_t))
logger.debug('working_serie first point dt: %s', working_serie[0].dt)
logger.debug('working_serie last point dt: %s', working_serie[-1].dt)
# Compute slot...
data_time_slot = self._compute_slot(working_serie,
unit = self.time_unit,
start_t = slot_start_t,
end_t = slot_end_t,
validity = validity,
timezone = timezone,
fill_with = fill_with,
force_data_loss = force_data_loss,
fill_gaps = fill_gaps,
series_indexes = series_indexes,
series_resolution = series_resolution,
first_last = first)
# Set first to false
if first:
first = False
# .. and append results (unless we are before the first timeseries start point)
if slot_end_t > data_time_point_series[0].t:
if data_time_slot.data_loss == 1.0:
# if data loss is full, append to slot to the slots to be interpolated
slots_to_be_interpolated.append(data_time_slot)
else:
# If we have slots to be intepolated
if slots_to_be_interpolated:
for i, slot_to_be_interpolated in enumerate(slots_to_be_interpolated):
# Prepare for interpolated data
interpolated_data = {}
# Computed interpolated data
if self.interpolation_method == 'linear':
for data_key in data_time_slot_series.data_keys():
interpolated_data[data_key] = ((((data_time_slot.data[data_key] - last_no_full_data_loss_slot.data[data_key]) /
(len(slots_to_be_interpolated) + 1) ) * (i+1)) + last_no_full_data_loss_slot.data[data_key])
elif self.interpolation_method == 'uniform':
for data_key in data_time_slot_series.data_keys():
interpolated_data[data_key] = (((data_time_slot.data[data_key] - last_no_full_data_loss_slot.data[data_key]) / 2)
+ last_no_full_data_loss_slot.data[data_key])
else:
raise Exception('Unknown interpolation method "{}"'.format(self.interpolation_method))
# Add interpolated data
slot_to_be_interpolated._data = interpolated_data
data_time_slot_series.append(slot_to_be_interpolated)
# Reset the "buffer"
slots_to_be_interpolated = []
# Append this slot to the time series
data_time_slot_series.append(data_time_slot)
# ... and set this slot as the last with no full data loss
last_no_full_data_loss_slot = data_time_slot
# Create a new slot. This is where all the "calendar" time unit logic kicks-in, and where the time zone is required.
slot_start_t = slot_end_t
slot_start_dt = dt_from_s(slot_start_t, tz=timezone)
slot_end_t = s_from_dt(dt_from_s(slot_start_t, tz=timezone) + self.time_unit)
slot_end_dt = dt_from_s(slot_end_t, tz=timezone)
# Create a new working_serie as part of the "create a new slot" procedure
working_serie = DataTimePointSeries()
# Append the previous prev_data_time_point to the new DataTimeSeries
if prev_data_time_point:
working_serie.append(prev_data_time_point)
logger.debug('Spinned a new slot (start={}, end={})'.format(slot_start_dt, slot_end_dt))
# If last slot mark process as completed (and aggregate last slot if necessary)
if data_time_point.dt >= to_dt:
# Edge case where we would otherwise miss the last slot
if data_time_point.dt == to_dt:
# Compute slot...
data_time_slot = self._compute_slot(working_serie,
unit = self.time_unit,
start_t = slot_start_t,
end_t = slot_end_t,
validity = validity,
timezone = timezone,
fill_with = fill_with,
force_data_loss = force_data_loss,
fill_gaps = fill_gaps,
series_indexes = series_indexes,
series_resolution = series_resolution,
first_last = True)
# .. and append results
data_time_slot_series.append(data_time_slot)
process_ended = True
# Append this point to the working serie
working_serie.append(data_time_point)
# ..and save as previous point
prev_data_time_point = data_time_point
# Last slots
if force_close_last:
# 1) Close the last slot and aggreagte it. You should never do it unless you knwo what you are doing
if working_serie:
logger.debug('This slot (start={}, end={}) is closed, now aggregating it..'.format(slot_start_t, slot_end_t))
# Compute slot...
data_time_slot = self._compute_slot(working_serie,
unit = self.time_unit,
start_t = slot_start_t,
end_t = slot_end_t,
validity = validity,
timezone = timezone,
fill_with = fill_with,
force_data_loss = force_data_loss,
fill_gaps = fill_gaps,
series_indexes = series_indexes,
series_resolution = series_resolution,
first_last = True)
# .. and append results
data_time_slot_series.append(data_time_slot)
# 2) Handle missing slots until the requested end (end_dt)
# TODO: Implement it. Sure? Clashes with the idea of reconstructors..
logger.info('Slotted %s DataTimePoints in %s DataTimeSlots', count, len(data_time_slot_series))
return data_time_slot_series
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
27996,
38226,
355,
10852,
889,
290,
581,
321,
11347,
526,
15931,
198,
198,
6738,
764,
2435,
1330,
288,
83,
62,
6738,
62,
82,
11,
264,
62,
6738,
62,
28664,
198,
... | 1.804808 | 17,721 |
from pygelib.CG_routines import _raw_cg_product, _compute_output_shape
from pygelib.Layers import CGProduct, SO3Linear, L1DifferenceLayer, ManyEdgeMPLayer
from pygelib.utils import _convert_to_SO3part_view
from pygelib.SO3VecArray import SO3VecArray
from pygelib.transforms import radial_gaussian
from pygelib.rotations import EulerRot
import pytest
import numpy as np
import torch
| [
6738,
12972,
25280,
571,
13,
39816,
62,
81,
448,
1127,
1330,
4808,
1831,
62,
66,
70,
62,
11167,
11,
4808,
5589,
1133,
62,
22915,
62,
43358,
198,
6738,
12972,
25280,
571,
13,
43,
6962,
1330,
29925,
15667,
11,
12809,
18,
14993,
451,
1... | 2.954198 | 131 |
import torch
from torch import nn, einsum
| [
11748,
28034,
198,
6738,
28034,
1330,
299,
77,
11,
304,
1040,
388,
198
] | 3.230769 | 13 |
# -*- coding: utf-8 -*-
"""
@Project :
@FileName:
@Author :penghr
@Time :202x/xx/xx xx:xx
@Desc :
"""
import os
import h5py
import cv2
import numpy as np
from PIL import Image
from tqdm import tqdm
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
31,
16775,
1058,
220,
198,
31,
8979,
5376,
25,
220,
198,
31,
13838,
220,
1058,
3617,
456,
81,
220,
198,
31,
7575,
220,
220,
220,
1058,
19004,
87,
14,
532... | 2.175258 | 97 |
import socket
import struct
import RF24
# print("%d %d %d %d %d %d" % (self._heartbeat, self._id, self._lefta, self._leftb, self._righta, self._rightb))
# sock.sendall(struct.pack("!BBBBBB", self._heartbeat, self._id, self._lefta, self._leftb, self._righta, self._rightb))
| [
11748,
17802,
220,
198,
11748,
2878,
198,
11748,
20445,
1731,
198,
220,
220,
220,
220,
220,
220,
220,
220,
198,
220,
220,
220,
220,
220,
220,
1303,
3601,
7203,
4,
67,
4064,
67,
4064,
67,
4064,
67,
4064,
67,
4064,
67,
1,
4064,
357,... | 2.365079 | 126 |
# Copyright 2020 AstroLab Software
# Author: Biswajit Biswas
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from astropy.table import Table
| [
2,
15069,
12131,
35167,
17822,
10442,
198,
2,
6434,
25,
38045,
86,
1228,
270,
38045,
9776,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
... | 3.787356 | 174 |
from gym_mdptetris.envs.tetris import Tetris, TetrisFlat, TetrisHeuristic, MelaxTetris, MelaxTetrisFlat, MelaxTetrisHeuristic
from gym_mdptetris.envs.board import Board
from gym_mdptetris.envs.piece import Piece | [
6738,
11550,
62,
9132,
457,
316,
2442,
13,
268,
14259,
13,
83,
316,
2442,
1330,
27351,
2442,
11,
27351,
2442,
7414,
265,
11,
27351,
2442,
1544,
27915,
11,
5616,
897,
51,
316,
2442,
11,
5616,
897,
51,
316,
2442,
7414,
265,
11,
5616,
... | 2.65 | 80 |
import argparse
import re
from abc import ABC, abstractmethod
from enum import Enum
import PySimpleGUI as sg
import yaml
from pathlib2 import Path
"""
Simple tool to simplify the creation of hints for the learning platform.
The tool preserves the existing IDs and adds a given prefix to new entries.
These prefixes can be removed later by another script to separate different
groups of topic.
Uses the YAML file format_entry for input and output.
Since the tool was developed with time constraints, not all functions and
edge cases were tested.
"""
"""
DEFAULT_PREFIX defines the prefix used for IDs
DEFAULT_LENGTH defines the length of the numeric ID after DEFAULT_PREFIX
AUTO_SAVE makes the tool save after each change to the hints,
which is recommended
"""
DEFAULT_PATH = ""
DEFAULT_PREFIX = "prefix"
DEFAULT_LENGTH = 4
AUTO_SAVE = False
class State:
"""
Handles the state for the tool. This includes the management of hints,
their creation, removal and editing,
selecting the following entries and IO operations.
"""
def load_from_file(self, path=None):
"""
Load the hints from the file with the given path into the state data.
:param path: Path to the file to load
"""
if path is not None:
self.path = Path(path)
content = {}
with self.path.open(encoding="utf-8") as stream:
file_content = yaml.safe_load(stream)
if file_content is None or file_content == [None]:
return
for entry in file_content:
for key in entry:
content[key] = entry[key]
for key in content:
if "question_id" in content[key]:
question = YAMLParser.create_question_from_yaml(item_id=key,
**content[key])
self.entries[EntryType.QUESTION].add_entry(question)
elif "answer_id" in content[key]:
answer = YAMLParser.create_answer_from_yaml(item_id=key,
**content[key])
self.entries[EntryType.ANSWER].add_entry(answer)
else:
print("Found incorrectly formatted entry.")
self.selected_entry = None
def save_to_file(self, path=None):
"""
Saves the hints to the file with the given path.
If no path is given, then they are saved to the opened file.
If no file is opened or the contents have not been saved yet,
the hints are saved to "backup.yml" in the working directory.
:param path: Path to the file to save to
"""
if path is not None and path != "":
self.path = Path(path)
serialize_format = self._serialize_format()
serialize_format.sort(key=lambda x: x[0])
yml_dict = [{entry[0]: format_entry(entry[1])} for entry in
serialize_format]
yaml.add_representer(str, str_representer)
yaml.add_representer(list, list_representer)
yaml.add_representer(FormattedList, formatted_list_representer)
try:
with self.path.open("w", encoding="utf-8") as file:
yaml.dump(yml_dict, file, encoding="utf-8", allow_unicode=True,
sort_keys=False)
except FileNotFoundError:
with Path("crashBackup.yml").open("w", encoding="utf-8") as file:
yaml.dump(yml_dict, file, encoding="utf-8", allow_unicode=True,
sort_keys=False)
def set_entry(self, idx, entry_type):
"""
Sets the currently selected entry of the state.
param entry: Selected entry
"""
self.selected_entry = self.entries[entry_type].get_object_by_index(idx)
def get_content(self, entry_type=EntryType.QUESTION):
"""
Returns the objects for the selected type of hints.
:param entry_type: Type of entry to return
:return: List of hints for type
"""
return self.entries[entry_type].get_data()
def swap_next(self, index_1, index_2):
"""
Swaps the order of the following entries of the selected entry
"""
if self.selected_entry_type() == EntryType.ANSWER:
self.selected_entry.swap_next(index_1, index_2)
def update_next(self, indices):
"""
Updates the following hints with the given order.
Fully replaces with indices.
:param indices: List of indices for next entries
"""
other_entry_type = self.get_unselected_entry_type()
next_entries = filter(lambda x: x[0] != -1, self.get_next())
next_entries_indices = set(map(lambda x: x[0], next_entries))
next_entry_index = next_entries_indices.symmetric_difference(indices)
if len(next_entry_index) == 0:
return
index = list(next_entry_index)[0]
entry = self.entries[other_entry_type].get_object_by_index(index)
entry_id = entry.entry_id
if index in next_entries_indices:
self.selected_entry.remove_next_entry(entry_id)
else:
if self.selected_entry_type() == EntryType.QUESTION:
self.selected_entry.pop_next_entry(0)
self.add_next_entry(entry_id)
def get_next(self):
"""
Get the next entries for the selected entry
:return: Next entries
"""
other_hint_type = self.get_unselected_entry_type()
other_hints_manager = self.entries[other_hint_type]
return other_hints_manager.next_hints(self.selected_entry.next_entries)
def add_next_entry(self, text):
"""
Adds a following entry to the selected one.
Allows for cross-file IDs when given a manual ID.
:param text: Next entry ID
"""
self.selected_entry.add_next_entry(text)
def remove_next_entry(self, index):
"""
Remove entry at the index from the following hints
:param index: Index of entry to remove
"""
self.selected_entry.pop_next_entry(index[0])
def create_entry(self, prefix, prefix_length,
entry_type=EntryType.QUESTION):
"""
Creates a hint with a given prefix, prefix length and hint type.
:param prefix: Prefix for the ID
:param prefix_length: Length of the numeric part of the ID
:param entry_type: Hint type to create
:return: The newly created entry
"""
item_ids = [entry.item_id for entry in
self.entries[EntryType.QUESTION].get_data()] + \
[entry.item_id for entry in
self.entries[EntryType.ANSWER].get_data()]
collection_ids = self.entries[EntryType.QUESTION].order + self.entries[
EntryType.ANSWER].order
item_id = self._get_next_id(item_ids, ("item" + prefix), prefix_length)
collection_id = self._get_next_id(collection_ids, prefix, prefix_length)
entry = self.entries[entry_type].create_new_entry(item_id,
collection_id)
return entry
def remove_entry(self, idx=-1):
"""
Removes entry from the hints list.
If not given an index, the selected entry is removed.
:param idx: Either -1 or an index within
the length of the type of selected entry list
"""
if idx == -1:
idx = self.entries[self.selected_entry_type()].order.index(
self.selected_entry.entry_id)
other_entry_type = self.get_unselected_entry_type()
self.entries[self.selected_entry_type()].remove_entry(idx, self.entries[
other_entry_type])
self.selected_entry = None
def _get_next_id(self, ids, prefix, prefix_length):
"""
Finds the smallest ID that is unique in a given id list.
Fills the numeric ID with preceding 0s
until the given prefix length is reached.
:param ids: List of existing ids
:param prefix: Prefix for id
:param prefix_length: Length of numeric id
:return: The next unique ID
"""
ids = [pref_id for pref_id in ids if prefix in pref_id]
ids.sort(reverse=True)
highest_id = 0
if len(ids) > 0:
prefix_matches = re.search(r"(\d+)", ids[0])
highest_id = prefix_matches[0] if len(
prefix_matches.regs) > 0 else 0
next_id = "{}{}".format(prefix,
str(int(highest_id) + 1).zfill(prefix_length))
return next_id
def _serialize_format(self):
"""
Maps the hints to the correct YAML format_entry.
:return: Formatted entries
"""
serial_entries = []
serial_entries += self.entries[EntryType.QUESTION].serialize()
serial_entries += self.entries[EntryType.ANSWER].serialize()
return serial_entries
# Required to parse into appropriate YAML format
# Window and event logic
def make_window(prefix=DEFAULT_PREFIX, prefix_len=DEFAULT_LENGTH):
"""
Defines and returns the view model for the tool.
:return: Created window
:rtype: PySimpleGUI.PySimpleGUI.Window
"""
sg.theme("Topanga")
# Overview and list of the questions on the upper left part,
# including buttons to add and remove them
questions_frame = sg.Frame(layout=[
[sg.Button(button_text="Add", size=(8, 1), key="add_question"),
sg.Button(button_text="Remove", size=(8, 1),
key="remove_entry_question")],
[sg.Listbox(values=[], select_mode=sg.LISTBOX_SELECT_MODE_SINGLE,
size=(50, 20), bind_return_key=True,
key='question_list', expand_x=True, expand_y=True,
horizontal_scroll=True, enable_events=True)]
], title="Questions", expand_x=True, expand_y=True)
# Overview and list of the questions on the lower left part,
# including buttons to add and remove them
answers_frame = sg.Frame(layout=[
[sg.Button(button_text="Add", size=(8, 1), key="add_answer"),
sg.Button(button_text="Remove", size=(8, 1),
key="remove_entry_answer")],
[sg.Listbox(values=[], select_mode=sg.LISTBOX_SELECT_MODE_SINGLE,
size=(50, 20), bind_return_key=True,
key='answer_list', expand_x=True, expand_y=True,
horizontal_scroll=True, enable_events=True)]
], title="Answers", expand_x=True, expand_y=True)
# Left half of the window
left_col = [
[questions_frame],
[answers_frame],
]
# Inputs for individual prefixes and prefix lengths
pref_row = sg.Frame(layout=
[[sg.Text("Prefix:"),
sg.InputText(prefix, key="prefix"),
sg.Text("Prefix num. length:"),
sg.InputText(str(prefix_len), key="prefix_length")]],
title="editor")
# Editor for the hints
textbox = sg.Multiline(size=(30, 15), key="textbox", expand_x=True,
expand_y=True, enable_events=True,
default_text="", disabled=True)
# Part of the window responsible for editing
# the following entries for the seleted entry
next_frame = sg.Frame(layout=[
[sg.Listbox(values=[], select_mode=sg.LISTBOX_SELECT_MODE_SINGLE,
size=(20, 20), bind_return_key=True,
key='follow_order',
expand_x=True, expand_y=True, horizontal_scroll=True)],
[sg.Button(button_text="Up", size=(8, 1), key="item_up"),
sg.Button(button_text="Down", size=(8, 1), key="item_down"),
sg.Button(button_text="Remove", size=(8, 1), key="item_remove"),
sg.InputText("prefexample001", size=(20, 1), key="other_id"),
sg.Button(button_text="Add", size=(8, 1), key="item_add")
],
[sg.Listbox(values=[], select_mode=sg.LISTBOX_SELECT_MODE_SINGLE,
size=(30, 20), bind_return_key=True,
key='follow',
expand_x=True, expand_y=True, horizontal_scroll=True,
enable_events=True)]
], title="Next entries", expand_x=True, expand_y=True)
# Right half of the window
right_col = [
[pref_row],
[textbox],
[next_frame]
]
# Overall layout of the window, including the menu options for the tool
layout = [[sg.Menu(
[["File", ["New", "Open Crtl+o", "Save Ctrl+s", "Save As"]]])],
[sg.Text('Hints editor', font='Any 20')],
[sg.Column(left_col, element_justification='l', expand_x=True,
expand_y=True),
sg.Column(right_col, element_justification='c', expand_x=True,
expand_y=True)],
]
window = sg.Window('Hints editor', layout, finalize=True, resizable=True,
use_default_focus=False)
window.set_min_size(window.size)
# Specifically bind control keys
window.bind("<Control-s>", "Save")
window.bind("<Control-o>", "Open")
# Allow undo for the hints text editor
textbox.Widget.configure(undo=True)
return window
def update_window(state, window, components):
"""
Handles the update of the window after the hints data has been modified.
:param state: State with the hints data
:param window: The window to update
:param components: List of components to update
"""
if "answer_list" in components:
indices = []
if state.selected_entry is not None and state.selected_entry_type() == EntryType.ANSWER:
indices = window["answer_list"].get_indexes()
scroll_to_index = max(0, indices[0]) - 10 if len(indices) > 0 else 0
window["answer_list"].update(
state.get_content(entry_type=EntryType.ANSWER),
set_to_index=indices, scroll_to_index=scroll_to_index)
if "question_list" in components:
indices = []
if state.selected_entry is not None and state.selected_entry_type() == EntryType.QUESTION:
indices = window["question_list"].get_indexes()
scroll_to_index = max(0, indices[0] - 10) if len(indices) > 0 else 0
window["question_list"].update(
state.get_content(entry_type=EntryType.QUESTION),
set_to_index=indices, scroll_to_index=scroll_to_index)
if "textbox" in components:
# Only show text in the hints text editor
# when an entry is actually selected
if state.selected_entry is not None:
window["textbox"].update(state.selected_entry.content,
disabled=False)
else:
window["textbox"].update("", disabled=True)
if "follow" in components:
if state.selected_entry is not None:
other_hint_type = state.get_unselected_entry_type()
if other_hint_type == EntryType.QUESTION:
select_mode = sg.LISTBOX_SELECT_MODE_MULTIPLE
else:
select_mode = sg.LISTBOX_SELECT_MODE_SINGLE
selected = filter(lambda entry: entry[0] != -1, state.get_next())
selected = list(map(lambda x: x[0], selected))
window["item_up"].update(
disabled=state.selected_entry_type() == EntryType.QUESTION)
window["item_down"].update(
disabled=state.selected_entry_type() == EntryType.QUESTION)
window["follow"].update(
state.get_content(entry_type=other_hint_type),
select_mode=select_mode, set_to_index=selected)
else:
window["follow"].update([])
if "follow_order" in components:
if state.selected_entry is not None:
next_items = state.get_next()
next_items = map(lambda x: x[1], next_items)
window["follow_order"].update(next_items,
select_mode=sg.LISTBOX_SELECT_MODE_SINGLE)
else:
window["follow_order"].update([])
def event_loop(state, window):
"""
Processes events in the tool window.
:param state: State object with hints to change
:param window: Window to listen to
"""
while True:
if state.auto_save:
state.save_to_file()
event, values = window.read()
if event is None:
break
event_type = event_helper(event, False)
menu_events(event, state, window)
if event_type != "":
if "_list" in event:
index = window[event].get_indexes()
if len(index) == 0:
continue
state.set_entry(index[0], EntryType.from_str(event_type))
update_window(state, window,
["question_list", "answer_list", "textbox",
"follow", "follow_order"])
elif "add_" in event:
selected_list = event_type + "_list"
entry = state.create_entry(window["prefix"].get(),
int(window["prefix_length"].get()),
entry_type=EntryType.from_str(
event_type))
state.selected_entry = entry
update_window(state, window,
[selected_list, "textbox", "follow",
"follow_order"])
if state.selected_entry is not None:
selected_entry_events(event, state, window)
window.finalize()
def menu_events(event, state, window):
"""
Handles all events related to menu options
"""
if event == sg.WIN_CLOSED or event == 'Exit':
return
elif "Open" in event:
path = sg.popup_get_file("Hints file", no_window=True)
if path == "" or path == ():
return
state.load_from_file(path)
update_window(state, window,
["answer_list", "question_list", "textbox", "follow",
"follow_order"])
elif "Save" in event:
if state.path == "" or event == "Save As":
path = sg.popup_get_file("Hints file", no_window=True, save_as=True)
if path == "" or path == ():
return
state.save_to_file(path=path)
else:
state.save_to_file()
elif "New" == event:
if state.auto_save:
state.save_to_file()
state.reset()
update_window(state, window,
["answer_list", "question_list", "textbox", "follow",
"follow_order"])
def selected_entry_events(event, state, window):
"""
Handles all events related to the currently selected entry, especially
editing the content and the manipulation of the following entries.
"""
event_type = event_helper(event, False)
if event_type != "" and "remove_entry" in event:
if EntryType.from_str(
event.split("_")[2]) != state.selected_entry_type():
return
selected_list = event_type + "_list"
remove_index = window[selected_list].get_indexes()[0]
state.remove_entry(remove_index)
update_window(state, window, [selected_list])
if event == "textbox":
state.selected_entry.content = window["textbox"].get()
update_window(state, window, ["question_list", "answer_list"])
elif event == "follow":
update_window(state, window, ["follow_order"])
window.refresh()
indices = window["follow"].get_indexes()
state.update_next(indices)
update_window(state, window, ["follow_order"])
elif event == "item_up":
index = window["follow_order"].get_indexes()
if len(index) != 0:
state.swap_next(index[0], index[0] - 1)
update_window(state, window, ["follow_order"])
elif event == "item_down":
index = window["follow_order"].get_indexes()
if len(index) != 0:
state.swap_next(index[0], index[0] + 1)
update_window(state, window, ["follow_order"])
elif event == "item_remove":
index = window["follow_order"].get_indexes()
if len(index) != 0:
state.remove_next_entry(index)
update_window(state, window, ["follow", "follow_order"])
elif event == "item_add":
state.add_next_entry(window["other_id"].get())
update_window(state, window, ["follow", "follow_order"])
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Parse config values for the tool")
parser.add_argument("--path", type=str, default="",
help="Path to file to open at start")
parser.add_argument("--prefix", type=str, default=DEFAULT_PREFIX,
help="Prefix for the IDs")
parser.add_argument("--default-len", type=int, default=DEFAULT_LENGTH,
help="Length of the numeric part of the ID")
parser.add_argument("--auto-save", type=bool, default=AUTO_SAVE,
help="Automatically save to file when exiting")
args = parser.parse_args()
window = make_window(prefix=args.prefix, prefix_len=args.default_len)
state = State(auto_save=args.auto_save)
# Only load from file when a valid path is given
if Path(args.path).is_file():
state.load_from_file(args.path)
elif DEFAULT_PATH is not None and Path(DEFAULT_PATH).is_file():
state.load_from_file(DEFAULT_PATH)
update_window(state, window,
["answer_list", "question_list", "textbox", "follow",
"follow_order"])
event_loop(state, window)
window.close()
if state.auto_save:
state.save_to_file()
| [
11748,
1822,
29572,
198,
11748,
302,
198,
6738,
450,
66,
1330,
9738,
11,
12531,
24396,
198,
6738,
33829,
1330,
2039,
388,
198,
198,
11748,
9485,
26437,
40156,
355,
264,
70,
198,
11748,
331,
43695,
198,
6738,
3108,
8019,
17,
1330,
10644,... | 2.19998 | 10,026 |
from django import urls
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.test import TestCase
from ..permalinks import PermalinkError, expand, resolve
| [
6738,
42625,
14208,
1330,
2956,
7278,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
11299,
19199,
13,
27530,
1330,
14041,
6030,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
198,
198,
... | 3.727273 | 55 |
#!/usr/bin/env python3
"""
Script to provide convex tool functionality
"""
from convex_api.tool.convex_tool import convex_tool
if __name__ == "__main__":
convex_tool()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
37811,
628,
220,
220,
220,
12327,
284,
2148,
24748,
87,
2891,
11244,
198,
198,
37811,
628,
198,
6738,
24748,
87,
62,
15042,
13,
25981,
13,
1102,
303,
87,
62,
25981,
1330,
24... | 2.676471 | 68 |
from tqdm import tqdm
from module.expansion.ppr import PPR
from module.import_options import Options
from module.lfr.community_writer import WriteCommunities
from module.lfr.lfr_reader import LFRReader
from module.lfr.nmi_plot import PlotNMI
from module.seeding.seeder.spreadhub import Spreadhub
if __name__ == '__main__':
from sys import argv
import os
import datetime
option_import = Options(argv, parameters="smoc")
seeders = option_import.select_seeders()
directory = os.getcwd()
date = datetime.datetime.now().strftime("%y-%m-%H%S")
save_name = f"{directory}"
nmi_manager = NMIManager(seeders)
reader = option_import.generate_reader()
if reader is not None:
nmi_manager.plot(reader, save_name)
else:
graph, communities = option_import.import_real(directory, need_truth=True)
print("Graph and community imported")
seeders.append(Spreadhub(int(float(len(graph)) * 0.2)))
nmi_manager.read_real(graph, communities)
| [
6738,
256,
80,
36020,
1330,
256,
80,
36020,
198,
198,
6738,
8265,
13,
11201,
5487,
13,
381,
81,
1330,
350,
4805,
198,
6738,
8265,
13,
11748,
62,
25811,
1330,
18634,
198,
6738,
8265,
13,
1652,
81,
13,
28158,
62,
16002,
1330,
19430,
3... | 2.708556 | 374 |
from collections import deque
n, q = map(int, input().split())
links = [list(map(int, input().split())) for _ in range(n - 1)]
nodes = []
for i in range(n + 1):
nodes.append(Node(i))
for j in range(n - 1):
edge_start, edge_end = links[j]
nodes[edge_start].nears.append(edge_end)
nodes[edge_end].nears.append(edge_start)
queue = deque()
queue.append(nodes[1])
while queue:
node = queue.popleft()
nears = node.nears
for near in nears:
if nodes[near].sign == False:
queue.append(nodes[near])
nodes[near].sign = True
if node.color == "r":
nodes[near].color = "b"
else:
nodes[near].color = "r"
for k in range(q):
c, d = map(int, input().split())
if nodes[c].color == nodes[d].color:
print("Town")
else:
print("Road")
| [
6738,
17268,
1330,
390,
4188,
628,
198,
198,
77,
11,
10662,
796,
3975,
7,
600,
11,
5128,
22446,
35312,
28955,
198,
28751,
796,
685,
4868,
7,
8899,
7,
600,
11,
5128,
22446,
35312,
3419,
4008,
329,
4808,
287,
2837,
7,
77,
532,
352,
... | 2.133005 | 406 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
class ScrapySplashWrapperError(Exception):
"""Base class for other exceptions"""
pass
class InvalidProxy(ScrapySplashWrapperError):
"""Raised when the proxy is not valid"""
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
628,
198,
4871,
1446,
2416,
88,
26568,
1077,
36918,
2848,
12331,
7,
16922,
2599,
198,
220,
220,
220,
37227,
14881,
1398,... | 2.843373 | 83 |
import json
import requests
from flask import Flask, jsonify, render_template, redirect, request
from form_formatter.append_snapshot_formatter import AppendSnapshotFormatter
from form_formatter.update_frequency_formatter import UpdateFrequencyFormatter
from form_formatter.update_open_date_formatter import UpdateOpenDateFormatter
from portfolio_creator.data_source import DataSource
from portfolio_creator.portfolio_creator import PortfolioCreator
from report.balance_sheet import BalanceSheet
from report.line_graph import LineGraph
from utilities.constants import Constants
from utilities.epoch_date_converter import EpochDateConverter
from valid_options.account_type import AccountType
from flask_cors import CORS
app = Flask(__name__)
CORS(app)
portfolio = PortfolioCreator().create(DataSource())
@app.route("/")
@app.route("/accounts")
@app.route("/accounts/<account_uuid>")
@app.route("/append_snapshot", methods=['POST'])
@app.route("/update_frequency", methods=['POST'])
@app.route("/update_open_date", methods=['POST'])
@app.route("/balance_sheet")
@app.route("/balance_sheet_rows")
@app.route("/net_worth")
@app.route("/net_worth_vs_time")
if __name__ == "__main__":
app.run()
| [
11748,
33918,
198,
198,
11748,
7007,
198,
6738,
42903,
1330,
46947,
11,
33918,
1958,
11,
8543,
62,
28243,
11,
18941,
11,
2581,
198,
198,
6738,
1296,
62,
687,
1436,
13,
33295,
62,
45380,
9442,
62,
687,
1436,
1330,
2034,
437,
43826,
944... | 3.183727 | 381 |
from http import HTTPStatus
from io import StringIO
from django.core.exceptions import ObjectDoesNotExist
from django.core.management import call_command
from django.test import TestCase
from django.urls import reverse
from faker import Faker
from rest_framework.test import APIClient
from clients.models import Client
from .factories import ClientFactory
CLIENT_ENDPOINT = reverse('client-detail')
| [
6738,
2638,
1330,
14626,
19580,
198,
6738,
33245,
1330,
10903,
9399,
198,
198,
6738,
42625,
14208,
13,
7295,
13,
1069,
11755,
1330,
9515,
13921,
3673,
3109,
396,
198,
6738,
42625,
14208,
13,
7295,
13,
27604,
1330,
869,
62,
21812,
198,
6... | 3.633929 | 112 |
from setuptools import setup
setup(
name="ycd",
version="v0.1.0",
description="Youtube comment downloader",
url="https://github.com/koomook/youtube-comment-downloader",
author="Goobong",
author_email="bongbonggg97@gmail.com",
license="Unlicense",
packages=["ycd"],
install_requires=[],
dependency_links=[],
include_package_data=True,
zip_safe=False,
)
| [
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
40406,
7,
198,
220,
220,
220,
1438,
2625,
88,
10210,
1600,
198,
220,
220,
220,
2196,
2625,
85,
15,
13,
16,
13,
15,
1600,
198,
220,
220,
220,
6764,
2625,
56,
9762,
2912,
4321,
263,
160... | 2.537975 | 158 |
"""
A module with base components for rendering the scene
"""
from __future__ import annotations
import pygame
from abc import ABC, abstractmethod
from .simulation import EnvironmentComponent, SimObjectComponent, RenderEvent
from .event_system import EventSystem
from .event import Event
from typing import Optional, List, Union, Tuple
from .helper_functions import validate_positive_number
number = Union[int, float]
class Color:
"""
A class containing constants for colors
"""
TRANSPARENT = (0, 0, 0, 0)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
YELLOW = (255, 255, 0)
CYAN = (0, 255, 255)
MAGENTA = (255, 0, 255)
BLACK = (0, 0, 0)
class Camera(EnvironmentComponent):
"""
Manages all the renderers.
Provides methods for converting from World Coordinates into screenspace coordinates and vice versa
"""
def __init__(self, display: pygame.Surface,
position: Union[Tuple[number, number], List[number]] = (0, 0),
units_per_pixel: float = 1 / 80,
background_color = Color.BLACK,
n_layers: int = 32):
"""
:param display: surface on which the image will be drawn
:param units_per_pixel: number of world space units 1 pixel represents
:param background_color: color of the background
:param position: how much the camera is offset from the center of the screen (in pixels)
:param n_layers: amount of layers. More layers give more flexibility, but take up more memory.
"""
# initializing to then call the setters, which would check if the values are valid
self._units_per_pixel: Optional[float] = None
self._position = pygame.Vector2(position)
self._display = display
self.units_per_pixel = units_per_pixel
self.background_color = background_color
# layers are a list of surfaces. Each renderer has a layer that it draws on, to ensure that certain
# elements are drawn on top of each other
# after all drawing is done, the layers are blitted onto the display
# before the drawing all layers are cleared (i.e. filled with the transparent color)
self._layers: List[pygame.Surface] = []
self.__create_layer_surfaces(n_layers)
# tells the program which layers have been modified, unmodified layers will not be blitted onto the display
# and the will not be cleared (since it is assumed that they're transparent)
self.__layer_modified: List[bool] = [False] * n_layers
super().__init__()
def render_scene(self):
"""
Render the scene
"""
# Clear the screen
self.display.fill(self.background_color)
self.__clear_layer_surfaces()
# render onto the layers
self.environment.event_system.raise_event(CameraRenderEvent(self))
# blit the layers onto the display
for i, layer in enumerate(self._layers):
if(self.__layer_modified[i]):
self._display.blit(layer, (0, 0))
def __clear_layer_surfaces(self):
"""
Fills the layers with the transparent color
"""
for i, layer in enumerate(self._layers):
# only clear the surfaces that are modified
# unmodified surfaces should already be transparent
if not self.__layer_modified[i]:
continue
layer.fill(Color.TRANSPARENT)
self.__layer_modified[i] = False
def get_layer_for_rendering(self, index: int):
"""
Gets the layer surface with the specified index and marks that layer as modified
"""
self.__layer_modified[index] = True
return self._layers[index]
@property
def position(self) -> pygame.Vector2:
"""
The vector that describes how much the camera is offset from the world origin (in pixels).
The x direction is right
The y direction is down
"""
return self._position
@position.setter
def position(self, value):
"""
:param value: should be a valid argument for pygame.Vector2 constructor.
"""
self._position = pygame.Vector2(value)
def get_screen_center(self) -> pygame.Vector2:
"""
Returns the screenspace coordinates of the screen center
"""
return self.display.get_rect().center
@property
@display.setter
@property
def units_per_pixel(self) -> float:
"""
Worldspace units per pixel on the screen
"""
return self._units_per_pixel
@units_per_pixel.setter
@property
def pixels_per_unit(self) -> float:
"""
pixels on the screen per worldspace unit
"""
return 1 / self._units_per_pixel
@pixels_per_unit.setter
def world_to_screen(self, world_coords: Union[pygame.Vector2, Tuple[number, number]]) -> Tuple[float, float]:
"""
Converts a worldspace position into a position on the screen in pixels
"""
world_x, world_y = world_coords
surface_rect = self.display.get_rect()
screen_x = world_x * self.pixels_per_unit + surface_rect.centerx - self.position.x
screen_y = -(world_y * self.pixels_per_unit) + surface_rect.centery - self.position.y
return (screen_x, screen_y)
def screen_to_world(self, screen_coords: Union[pygame.Vector2, Tuple[number, number]]) -> Tuple[float, float]:
"""
Converts from a position on the screen into a position in the world
"""
screen_x, screen_y = screen_coords
surface_rect = self.display.get_rect()
world_x = (screen_x - surface_rect.centerx + self.position.x) * self.units_per_pixel
# this might cause world_y to be -0.0 in some cases, but it doesn't really matter.
world_y = -(screen_y - surface_rect.centery + self.position.y) * self.units_per_pixel
return (world_x, world_y)
class Renderer(SimObjectComponent, ABC):
"""
A base class for renderers
Handles the rendering of an object
"""
def __init__(self, color = Color.WHITE, layer: int = 0):
"""
:param layer: objects on lower layers will be drawn first and may be occluded by objects on higher levels.
"""
self.__is_active = True
self._layer = None
self.layer = layer
self.color = color
super().__init__()
@property
@is_active.setter
@abstractmethod
def render(self, surface: pygame.Surface, camera: Camera):
"""
Render the object on a specific surface
"""
pass
@property
@layer.setter
class CameraRenderEvent(Event):
"""
An event raised by the camera when rendering the scene.
Has a reference to the camera.
"""
@property
| [
37811,
198,
32,
8265,
351,
2779,
6805,
329,
14837,
262,
3715,
198,
37811,
198,
198,
6738,
11593,
37443,
834,
1330,
37647,
198,
198,
11748,
12972,
6057,
198,
198,
6738,
450,
66,
1330,
9738,
11,
12531,
24396,
198,
6738,
764,
14323,
1741,
... | 2.562823 | 2,706 |
import click
from paperspace import client, config
from paperspace.cli import common
from paperspace.cli.cli import cli
from paperspace.commands import models as models_commands
@cli.group("models", help="Manage models", cls=common.ClickGroup)
@models_group.command("list", help="List models with optional filtering")
@click.option(
"--experimentId",
"experimentId",
help="Use to filter by experiment ID",
)
@click.option(
"--projectId",
"projectId",
help="Use to filter by project ID",
)
@common.api_key_option
| [
11748,
3904,
198,
198,
6738,
9473,
10223,
1330,
5456,
11,
4566,
198,
6738,
9473,
10223,
13,
44506,
1330,
2219,
198,
6738,
9473,
10223,
13,
44506,
13,
44506,
1330,
537,
72,
198,
6738,
9473,
10223,
13,
9503,
1746,
1330,
4981,
355,
4981,
... | 3.109195 | 174 |
import datetime
import os
import torch
from torch import nn
from torch import optim
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torchvision import transforms
import joint_transforms
from config import duts_train_path
from datasets import ImageFolder
from misc import AvgMeter, check_mkdir
from Densenet_attention import AADFNet
from torch.backends import cudnn
cudnn.benchmark = True
torch.manual_seed(2018)
os.environ["CUDA_VISIBLE_DEVICES"] = "1,0"
# torch.cuda.set_device(0)
ckpt_path = './ckpt'
args = {
'iter_num': 30000,
'train_batch_size': 10,
'last_iter': 0,
'lr': 1e-3,
'lr_decay': 0.9,
'weight_decay': 5e-4,
'momentum': 0.9,
'snapshot': ''
}
joint_transform = joint_transforms.Compose([
joint_transforms.RandomCrop(400),
joint_transforms.RandomHorizontallyFlip(),
joint_transforms.RandomRotate(10)
])
img_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
target_transform = transforms.ToTensor()
train_set = ImageFolder(duts_train_path, joint_transform, img_transform, target_transform)
train_loader = DataLoader(train_set, batch_size=args['train_batch_size'], num_workers=12, shuffle=True, drop_last=True)
criterion = nn.BCEWithLogitsLoss().cuda()
save_points = range(8000, 30002, 1000)
if __name__ == '__main__':
main()
| [
11748,
4818,
8079,
198,
11748,
28686,
198,
198,
11748,
28034,
198,
6738,
28034,
1330,
299,
77,
198,
6738,
28034,
1330,
6436,
198,
6738,
28034,
13,
2306,
519,
6335,
1330,
35748,
198,
6738,
28034,
13,
26791,
13,
7890,
1330,
6060,
17401,
1... | 2.664783 | 531 |
# Copyright (c), Tiziano Müller
# SPDX-License-Identifier: MIT
import itertools
import re
import warnings
from dataclasses import dataclass
from typing import (
IO,
Any,
Callable,
Iterator,
List,
Mapping,
Optional,
Sequence,
Type,
TypeVar,
Union,
)
from typing_extensions import Protocol
SYM2NUM = {
"H": 1,
"He": 2,
"Li": 3,
"Be": 4,
"B": 5,
"C": 6,
"N": 7,
"O": 8,
"F": 9,
"Ne": 10,
"Na": 11,
"Mg": 12,
"Al": 13,
"Si": 14,
"P": 15,
"S": 16,
"Cl": 17,
"Ar": 18,
"K": 19,
"Ca": 20,
"Sc": 21,
"Ti": 22,
"V": 23,
"Cr": 24,
"Mn": 25,
"Fe": 26,
"Co": 27,
"Ni": 28,
"Cu": 29,
"Zn": 30,
"Ga": 31,
"Ge": 32,
"As": 33,
"Se": 34,
"Br": 35,
"Kr": 36,
"Rb": 37,
"Sr": 38,
"Y": 39,
"Zr": 40,
"Nb": 41,
"Mo": 42,
"Tc": 43,
"Ru": 44,
"Rh": 45,
"Pd": 46,
"Ag": 47,
"Cd": 48,
"In": 49,
"Sn": 50,
"Sb": 51,
"Te": 52,
"I": 53,
"Xe": 54,
"Cs": 55,
"Ba": 56,
"La": 57,
"Ce": 58,
"Pr": 59,
"Nd": 60,
"Pm": 61,
"Sm": 62,
"Eu": 63,
"Gd": 64,
"Tb": 65,
"Dy": 66,
"Ho": 67,
"Er": 68,
"Tm": 69,
"Yb": 70,
"Lu": 71,
"Hf": 72,
"Ta": 73,
"W": 74,
"Re": 75,
"Os": 76,
"Ir": 77,
"Pt": 78,
"Au": 79,
"Hg": 80,
"Tl": 81,
"Pb": 82,
"Bi": 83,
"Po": 84,
"At": 85,
"Rn": 86,
"Fr": 87,
"Ra": 88,
"Ac": 89,
"Th": 90,
"Pa": 91,
"U": 92,
"Np": 93,
"Pu": 94,
"Am": 95,
"Cm": 96,
"Bk": 97,
"Cf": 98,
"Es": 99,
"Fm": 100,
"Md": 101,
"No": 102,
"Lr": 103,
"Rf": 104,
"Db": 105,
"Sg": 106,
"Bh": 107,
"Hs": 108,
"Mt": 109,
"Ds": 110,
"Rg": 111,
"Cn": 112,
"Fl": 114,
"Lv": 116,
}
EOF_MARKER_LINE = "Eof marker"
EMPTY_LINE_MATCH = re.compile(r"^(\s*|\s*#.*)$")
BLOCK_MATCH = re.compile(r"^\s*(?P<element>[a-zA-Z]{1,3})\s+(?P<family>\S+).*")
def chained_exception(cls: type, msg: str, prev: Exception):
"""Create a chained exception"""
exc = cls(msg)
exc.__cause__ = prev
return exc
class MulitpleValueErrorsException(ValueError):
"""An exception which contains multiple value error exceptions"""
_T = TypeVar("_T")
@dataclass
def dformat(val, ndigits, slen):
"""
Right-pads a decimal with spaces such that there are max_exp number of characters after the dot
and the complete string is max_len characters in width.
"""
digits = ndigits + val.as_tuple().exponent if val.as_tuple().exponent < 0 else ndigits + 1
return f"{format(val, 'f') + ' '*(digits):>{slen}}"
| [
2,
15069,
357,
66,
828,
309,
528,
10115,
40790,
6051,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
17168,
198,
198,
11748,
340,
861,
10141,
198,
11748,
302,
198,
11748,
14601,
198,
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,... | 1.800259 | 1,542 |
import torch
from torch import nn
from torch.nn.parameter import Parameter
class TimeNormalization(nn.Module):
r"""
Time Normalization Layer, normalized the input using it's mean and variance over time as defined in the follow equation:
.. math::
\hat{h}_n=\frac{h_n-\mu_n}{\sqrt{\sigma_n^2+\epsilon}}\\
\mu_n=\alpha h_n +(1-\alpha)\mu_{n-1}
:param alpha:
:param num_features:
:param epsilon:
:param affine:
"""
def init_state(self, working_device, batch_size: int = 1) -> torch.Tensor:
"""
This function generate the initial state of the Module. This include only Time Normalization state
:param working_device: str that state the current working device.
:param batch_size: int represent the batch size.
:return: A Tensor, that hold the initial state.
"""
state = torch.stack(
[torch.zeros(batch_size, self.num_features, device=working_device),
torch.ones(batch_size, self.num_features, device=working_device)],
dim=0)
return state
| [
11748,
28034,
198,
6738,
28034,
1330,
299,
77,
198,
6738,
28034,
13,
20471,
13,
17143,
2357,
1330,
25139,
2357,
628,
198,
4871,
3862,
26447,
1634,
7,
20471,
13,
26796,
2599,
198,
220,
220,
220,
374,
37811,
198,
220,
220,
220,
3862,
14... | 2.475336 | 446 |
# =============================================
from django.conf import settings
from django.conf.urls.static import static
# =============================================
from django.contrib import admin
from django.urls import path, include
from django.contrib import admin
from django.contrib.auth import views as auth_views
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
from users import views as user_views
# from news import views
urlpatterns = [
path('admin/', admin.site.urls),
# path('notes/', include("notepad.urls")),
path('', include("news.urls")),
path('newsapi/', include('news.api.urls')),
path('userapi/', include('users.userapi.urls')),
path('register/', user_views.register, name='register'),
path('category_list/', user_views.category_list, name='category_list'),
path('profile/', user_views.profile, name='profile'),
path('login/', auth_views.LoginView.as_view(template_name='users/login.html'), name='login'),
path('logout/', auth_views.LogoutView.as_view(template_name='users/logout.html'), name='logout'),
path('password-reset/',
auth_views.PasswordResetView.as_view(
template_name='users/password_reset.html'
),
name='password_reset'),
path('password-reset/done/',
auth_views.PasswordResetDoneView.as_view(
template_name='users/password_reset_done.html'
),
name='password_reset_done'),
path('password-reset-confirm/<uidb64>/<token>/',
auth_views.PasswordResetConfirmView.as_view(
template_name='users/password_reset_confirm.html'
),
name='password_reset_confirm'),
path('password-reset-complete/',
auth_views.PasswordResetCompleteView.as_view(
template_name='users/password_reset_complete.html'
),
name='password_reset_complete'),
]
# path('cate/', user_views.cate, name='cate'),
# path('categorypicker/', user_views.categorypicker, name='categorypicker'),
# path('categorytracker/', user_views.categorytracker, name='categorytracker'),
# path('scrape/', views.scrape, name="scrape"),
# path('newslist/', views.news_list, name="home2"),
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL,
document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
| [
2,
46111,
25609,
201,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
201,
198,
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
13,
12708,
1330,
9037,
201,
198,
2,
46111,
25609,
201,
198,
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
2... | 2.538462 | 1,001 |
from __future__ import absolute_import
import os
import keras.models as models
from keras.layers.core import Layer, Dense, Dropout, Activation, Flatten, Reshape, Permute
from keras.layers.convolutional import Conv2D, MaxPooling2D, UpSampling2D, Cropping2D
from keras.layers.normalization import BatchNormalization
from keras.layers import add
from keras.layers import Conv2D, Conv2DTranspose
from keras import backend as K
from keras.regularizers import l2
import numpy as np
K.set_image_dim_ordering('tf') # Tensorflow dimension ordering
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
11748,
28686,
198,
198,
11748,
41927,
292,
13,
27530,
355,
4981,
198,
6738,
41927,
292,
13,
75,
6962,
13,
7295,
1330,
34398,
11,
360,
1072,
11,
14258,
448,
11,
13144,
341,
11,
1610,... | 3.188235 | 170 |
#
# @lc app=leetcode id=683 lang=python3
#
# [683] K Empty Slots
#
# https://leetcode.com/problems/k-empty-slots/description/
#
# algorithms
# Hard (35.43%)
# Likes: 573
# Dislikes: 560
# Total Accepted: 44.4K
# Total Submissions: 125.4K
# Testcase Example: '[1,3,2]\n1'
#
# You have N bulbs in a row numbered from 1 to N. Initially, all the bulbs are
# turned off. We turn on exactly one bulb everyday until all bulbs are on after
# N days.
#
# You are given an array bulbs of length N where bulbs[i] = x means that on the
# (i+1)th day, we will turn on the bulb at position x where i is 0-indexed and
# x is 1-indexed.
#
# Given an integer K, find out the minimum day number such that there exists
# two turned on bulbs that have exactly K bulbs between them that are all
# turned off.
#
# If there isn't such day, return -1.
#
#
#
# Example 1:
#
#
# Input:
# bulbs: [1,3,2]
# K: 1
# Output: 2
# Explanation:
# On the first day: bulbs[0] = 1, first bulb is turned on: [1,0,0]
# On the second day: bulbs[1] = 3, third bulb is turned on: [1,0,1]
# On the third day: bulbs[2] = 2, second bulb is turned on: [1,1,1]
# We return 2 because on the second day, there were two on bulbs with one off
# bulb between them.
#
#
# Example 2:
#
#
# Input:
# bulbs: [1,2,3]
# K: 1
# Output: -1
#
#
#
#
# Note:
#
#
# 1 <= N <= 20000
# 1 <= bulbs[i] <= N
# bulbs is a permutation of numbers from 1 to N.
# 0 <= K <= 20000
#
#
#
# @lc code=start
# @lc code=end
| [
2,
198,
2,
2488,
44601,
598,
28,
293,
316,
8189,
4686,
28,
47521,
42392,
28,
29412,
18,
198,
2,
198,
2,
685,
47521,
60,
509,
33523,
3454,
1747,
198,
2,
198,
2,
3740,
1378,
293,
316,
8189,
13,
785,
14,
1676,
22143,
14,
74,
12,
... | 2.488255 | 596 |
from pytest import raises
from remoto import log
from remoto.exc import TimeoutError
from mock import Mock
| [
6738,
12972,
9288,
1330,
12073,
198,
6738,
816,
2069,
1330,
2604,
198,
6738,
816,
2069,
13,
41194,
1330,
3862,
448,
12331,
198,
6738,
15290,
1330,
44123,
628
] | 4 | 27 |
# -*- coding: utf-8 -*-
"""
VTK pipeline to represent a set of points, as sphere glyphs.
"""
import numpy as np
import vtk
from vtk.util import numpy_support
import sksurgeryvtk.models.vtk_base_model as vbm
#pylint:disable=super-with-arguments
class VTKSphereModel(vbm.VTKBaseModel):
"""
Class to represent a set of points as sphere glyphs (one sphere per point).
"""
def __init__(self, points, radius, colour=(1.0, 1.0, 1.0),
visibility=True, opacity=1.0,
pickable=True, resolution = 12):
"""
Creates a new sphere model.
:param points: numpy N x 3 array containing x, y, z as float
:param colour: (R,G,B) where each are floats [0-1]
:param radius: sphere radius in millimetres
:param visibility: boolean, True|False
:param opacity: float [0,1]
:param pickable: boolean, True|False
:param resolution: the resolution (theta and phy)
"""
super(VTKSphereModel, self).__init__(colour, visibility, opacity,
pickable)
# Validate as much as possible, up front.
if points is None:
raise ValueError('points is None.')
if not isinstance(points, np.ndarray):
raise TypeError('points is not a numpy array.')
if points.shape[1] != 3:
raise ValueError('points should have 3 columns.')
if points.shape[0] == 0:
raise ValueError('points should have > 0 rows.')
if points.dtype != np.float:
raise TypeError('points should be float type.')
if radius <= 0:
raise ValueError('sphere radius should >= 0.')
self.points = points
self.vtk_point_array = numpy_support.numpy_to_vtk(
num_array=self.points, deep=True, array_type=vtk.VTK_FLOAT)
self.vtk_points = vtk.vtkPoints()
self.vtk_points.SetData(self.vtk_point_array)
number_of_points = points.shape[0]
cells = np.hstack((np.ones((number_of_points, 1), dtype=np.int64),
np.arange(number_of_points).reshape(-1, 1)))
cells = np.ascontiguousarray(cells, dtype=np.int64)
cell_array = numpy_support.numpy_to_vtk(
num_array=cells, deep=True, array_type=vtk.VTK_ID_TYPE)
self.vtk_cells = vtk.vtkCellArray()
self.vtk_cells.SetCells(number_of_points, cell_array)
self.vtk_poly = vtk.vtkPolyData()
self.vtk_poly.SetPoints(self.vtk_points)
self.vtk_poly.SetVerts(self.vtk_cells)
self.vtk_sphere = vtk.vtkSphereSource()
self.vtk_sphere.SetRadius(radius)
self.vtk_sphere.SetPhiResolution(resolution)
self.vtk_sphere.SetThetaResolution(resolution)
self.vtk_glyph = vtk.vtkGlyph3D()
self.vtk_glyph.SetSourceConnection(self.vtk_sphere.GetOutputPort())
self.vtk_glyph.SetInputData(self.vtk_poly)
self.vtk_glyph.Update()
self.mapper = vtk.vtkPolyDataMapper()
self.mapper.SetInputConnection(self.vtk_glyph.GetOutputPort())
self.mapper.Update()
self.actor.SetMapper(self.mapper)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
36392,
42,
11523,
284,
2380,
257,
900,
286,
2173,
11,
355,
16558,
25874,
82,
13,
198,
37811,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
410,
... | 2.159044 | 1,465 |
# -*- coding: utf-8 -*-
# Copyright (c) 2020 Kumagai group.
import fire
from pymatgen import Element, Structure
from pymatgen.analysis.defects.utils import ChargeDensityAnalyzer
from pymatgen.io.vasp import Chgcar, VolumetricData
from vise.util.structure_symmetrizer import StructureSymmetrizer
if __name__ == '__main__':
fire.Fire(interstitials_from_charge_density)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
220,
15069,
357,
66,
8,
12131,
19162,
363,
1872,
1448,
13,
198,
198,
11748,
2046,
198,
6738,
279,
4948,
265,
5235,
1330,
11703,
11,
32522,
198,
6738,
279,
4948,
265... | 2.834586 | 133 |
from github import Github
import boto3
import json
import os
import requests
def get_params():
"""
Retrieve environment variables
"""
pat_name = os.getenv("PAT_SECRET_NAME")
pat = get_pat(pat_name)
return pat
def get_user_repos(gh):
"""
Get all repos for an organization
"""
repos = []
user_repos = gh.get_user().get_repos(type="owner")
repos = [r for r in user_repos if not r.fork and not r.archived]
return repos
def get_workflows(repos):
"""
Get all the workflows with a `disabled_inactivity` state.
"""
disabled_workflows = []
for repo in repos:
workflows_to_enable = [
w for w in repo.get_workflows() if w.state == "active"
]
disabled_workflows += workflows_to_enable
return disabled_workflows
def disable_enable_workflows(pat, workflows):
"""
Enable all the workflows.
"""
for workflow in workflows:
disable_url = f"{workflow.url}/disable"
enable_url = f"{workflow.url}/enable"
header = {"Authorization": f"Bearer {pat}"}
requests.put(disable_url, headers=header)
requests.put(enable_url, headers=header)
def lambda_handler(event, context):
"""
Enable all inactive workflows.
"""
pat = get_params()
gh = Github(login_or_token=pat)
repos = get_user_repos(gh)
workflows = get_workflows(repos)
disable_enable_workflows(pat, workflows)
return {"statusCode": 200}
lambda_handler(1,1)
| [
6738,
33084,
1330,
38994,
198,
11748,
275,
2069,
18,
198,
11748,
33918,
198,
11748,
28686,
198,
11748,
7007,
628,
198,
4299,
651,
62,
37266,
33529,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
4990,
30227,
2858,
9633,
198,
220,
220,
... | 2.484349 | 607 |
MAX_BUCKETS = 33
BATCH_SIZE = 20 | [
22921,
62,
33,
16696,
32716,
796,
4747,
198,
33,
11417,
62,
33489,
796,
1160
] | 2.285714 | 14 |
# Copyright 2019 The Waymo Open Dataset Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils to manage range images."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
import tensorflow as tf
__all__ = [
'encode_lidar_features', 'decode_lidar_features', 'scatter_nd_with_pool',
'compute_range_image_polar', 'compute_range_image_cartesian',
'build_range_image_from_point_cloud', 'build_camera_depth_image',
'extract_point_cloud_from_range_image', 'crop_range_image',
'compute_inclination'
]
def _combined_static_and_dynamic_shape(tensor):
"""Returns a list containing static and dynamic values for the dimensions.
Returns a list of static and dynamic values for shape dimensions. This is
useful to preserve static shapes when available in reshape operation.
Args:
tensor: A tensor of any type.
Returns:
A list of size tensor.shape.ndims containing integers or a scalar tensor.
"""
static_tensor_shape = tensor.shape.as_list()
dynamic_tensor_shape = tf.shape(input=tensor)
combined_shape = []
for index, dim in enumerate(static_tensor_shape):
if dim is not None:
combined_shape.append(dim)
else:
combined_shape.append(dynamic_tensor_shape[index])
return combined_shape
# A magic number that provides a good resolution we need for lidar range after
# quantization from float to uint16.
_RANGE_TO_METERS = 0.00585532144
def _encode_range(r):
"""Encodes lidar range from float to uint16.
Args:
r: A float tensor represents lidar range.
Returns:
Encoded range with type as uint16.
"""
encoded_r = r / _RANGE_TO_METERS
with tf.control_dependencies([
tf.compat.v1.assert_non_negative(encoded_r),
tf.compat.v1.assert_less(encoded_r,
math.pow(2, 16) - 1.001)
]):
return tf.cast(encoded_r, dtype=tf.uint16)
def _decode_range(r):
"""Decodes lidar range from integers to float32.
Args:
r: A integer tensor.
Returns:
Decoded range.
"""
return tf.cast(r, dtype=tf.float32) * _RANGE_TO_METERS
def _encode_intensity(intensity):
"""Encodes lidar intensity from float to uint16.
The integer value stored here is the upper 16 bits of a float. This
preserves the exponent and truncates the mantissa to 7bits, which gives
plenty of dynamic range and preserves about 3 decimal places of
precision.
Args:
intensity: A float tensor represents lidar intensity.
Returns:
Encoded intensity with type as uint32.
"""
if intensity.dtype != tf.float32:
raise TypeError('intensity must be of type float32')
intensity_uint32 = tf.bitcast(intensity, tf.uint32)
intensity_uint32_shifted = tf.bitwise.right_shift(intensity_uint32, 16)
return tf.cast(intensity_uint32_shifted, dtype=tf.uint16)
def _decode_intensity(intensity):
"""Decodes lidar intensity from uint16 to float32.
The given intensity is encoded with _encode_intensity.
Args:
intensity: A uint16 tensor represents lidar intensity.
Returns:
Decoded intensity with type as float32.
"""
if intensity.dtype != tf.uint16:
raise TypeError('intensity must be of type uint16')
intensity_uint32 = tf.cast(intensity, dtype=tf.uint32)
intensity_uint32_shifted = tf.bitwise.left_shift(intensity_uint32, 16)
return tf.bitcast(intensity_uint32_shifted, tf.float32)
def _encode_elongation(elongation):
"""Encodes lidar elongation from float to uint8.
Args:
elongation: A float tensor represents lidar elongation.
Returns:
Encoded lidar elongation.
"""
encoded_elongation = elongation / _RANGE_TO_METERS
with tf.control_dependencies([
tf.compat.v1.assert_non_negative(encoded_elongation),
tf.compat.v1.assert_less(encoded_elongation,
math.pow(2, 8) - 1.001)
]):
return tf.cast(encoded_elongation, dtype=tf.uint8)
def _decode_elongation(elongation):
"""Decodes lidar elongation from uint8 to float.
Args:
elongation: A uint8 tensor represents lidar elongation.
Returns:
Decoded lidar elongation.
"""
return tf.cast(elongation, dtype=tf.float32) * _RANGE_TO_METERS
def encode_lidar_features(lidar_point_feature):
"""Encodes lidar features (range, intensity, enlongation).
This function encodes lidar point features such that all features have the
same ordering as lidar range.
Args:
lidar_point_feature: [N, 3] float32 tensor.
Returns:
[N, 3] int64 tensors that encodes lidar_point_feature.
"""
if lidar_point_feature.dtype != tf.float32:
raise TypeError('lidar_point_feature must be of type float32.')
r, intensity, elongation = tf.unstack(lidar_point_feature, axis=-1)
encoded_r = tf.cast(_encode_range(r), dtype=tf.uint32)
encoded_intensity = tf.cast(_encode_intensity(intensity), dtype=tf.uint32)
encoded_elongation = tf.cast(_encode_elongation(elongation), dtype=tf.uint32)
encoded_r_shifted = tf.bitwise.left_shift(encoded_r, 16)
encoded_intensity = tf.cast(
tf.bitwise.bitwise_or(encoded_r_shifted, encoded_intensity),
dtype=tf.int64)
encoded_elongation = tf.cast(
tf.bitwise.bitwise_or(encoded_r_shifted, encoded_elongation),
dtype=tf.int64)
encoded_r = tf.cast(encoded_r, dtype=tf.int64)
return tf.stack([encoded_r, encoded_intensity, encoded_elongation], axis=-1)
def decode_lidar_features(lidar_point_feature):
"""Decodes lidar features (range, intensity, enlongation).
This function decodes lidar point features encoded by 'encode_lidar_features'.
Args:
lidar_point_feature: [N, 3] int64 tensor.
Returns:
[N, 3] float tensors that encodes lidar_point_feature.
"""
r, intensity, elongation = tf.unstack(lidar_point_feature, axis=-1)
decoded_r = _decode_range(r)
intensity = tf.bitwise.bitwise_and(intensity, int(0xFFFF))
decoded_intensity = _decode_intensity(tf.cast(intensity, dtype=tf.uint16))
elongation = tf.bitwise.bitwise_and(elongation, int(0xFF))
decoded_elongation = _decode_elongation(tf.cast(elongation, dtype=tf.uint8))
return tf.stack([decoded_r, decoded_intensity, decoded_elongation], axis=-1)
def scatter_nd_with_pool(index,
value,
shape,
pool_method=tf.math.unsorted_segment_max):
"""Similar as tf.scatter_nd but allows custom pool method.
tf.scatter_nd accumulates (sums) values if there are duplicate indices.
Args:
index: [N, 2] tensor. Inner dims are coordinates along height (row) and then
width (col).
value: [N, ...] tensor. Values to be scattered.
shape: (height,width) list that specifies the shape of the output tensor.
pool_method: pool method when there are multiple points scattered to one
location.
Returns:
image: tensor of shape with value scattered. Missing pixels are set to 0.
"""
if len(shape) != 2:
raise ValueError('shape must be of size 2')
height = shape[0]
width = shape[1]
# idx: [N]
index_encoded, idx = tf.unique(index[:, 0] * width + index[:, 1])
value_pooled = pool_method(value, idx, tf.size(input=index_encoded))
index_unique = tf.stack(
[index_encoded // width,
tf.math.mod(index_encoded, width)], axis=-1)
shape = [height, width]
value_shape = _combined_static_and_dynamic_shape(value)
if len(value_shape) > 1:
shape = shape + value_shape[1:]
image = tf.scatter_nd(index_unique, value_pooled, shape)
return image
def compute_range_image_polar(range_image,
extrinsic,
inclination,
dtype=tf.float32,
scope=None):
"""Computes range image polar coordinates.
Args:
range_image: [B, H, W] tensor. Lidar range images.
extrinsic: [B, 4, 4] tensor. Lidar extrinsic.
inclination: [B, H] tensor. Inclination for each row of the range image.
0-th entry corresponds to the 0-th row of the range image.
dtype: float type to use internally. This is needed as extrinsic and
inclination sometimes have higher resolution than range_image.
scope: the name scope.
Returns:
range_image_polar: [B, H, W, 3] polar coordinates.
"""
# pylint: disable=unbalanced-tuple-unpacking
_, height, width = _combined_static_and_dynamic_shape(range_image)
range_image_dtype = range_image.dtype
range_image = tf.cast(range_image, dtype=dtype)
extrinsic = tf.cast(extrinsic, dtype=dtype)
inclination = tf.cast(inclination, dtype=dtype)
with tf.compat.v1.name_scope(scope, 'ComputeRangeImagePolar',
[range_image, extrinsic, inclination]):
with tf.compat.v1.name_scope('Azimuth'):
# [B].
az_correction = tf.atan2(extrinsic[..., 1, 0], extrinsic[..., 0, 0])
# [W].
ratios = (tf.cast(tf.range(width, 0, -1), dtype=dtype) - .5) / tf.cast(
width, dtype=dtype)
# [B, W].
azimuth = (ratios * 2. - 1.) * np.pi - tf.expand_dims(az_correction, -1)
# [B, H, W]
azimuth_tile = tf.tile(azimuth[:, tf.newaxis, :], [1, height, 1])
# [B, H, W]
inclination_tile = tf.tile(inclination[:, :, tf.newaxis], [1, 1, width])
range_image_polar = tf.stack([azimuth_tile, inclination_tile, range_image],
axis=-1)
return tf.cast(range_image_polar, dtype=range_image_dtype)
def compute_range_image_cartesian(range_image_polar,
extrinsic,
pixel_pose=None,
frame_pose=None,
dtype=tf.float32,
scope=None):
"""Computes range image cartesian coordinates from polar ones.
Args:
range_image_polar: [B, H, W, 3] float tensor. Lidar range image in polar
coordinate in sensor frame.
extrinsic: [B, 4, 4] float tensor. Lidar extrinsic.
pixel_pose: [B, H, W, 4, 4] float tensor. If not None, it sets pose for each
range image pixel.
frame_pose: [B, 4, 4] float tensor. This must be set when pixel_pose is set.
It decides the vehicle frame at which the cartesian points are computed.
dtype: float type to use internally. This is needed as extrinsic and
inclination sometimes have higher resolution than range_image.
scope: the name scope.
Returns:
range_image_cartesian: [B, H, W, 3] cartesian coordinates.
"""
range_image_polar_dtype = range_image_polar.dtype
range_image_polar = tf.cast(range_image_polar, dtype=dtype)
extrinsic = tf.cast(extrinsic, dtype=dtype)
if pixel_pose is not None:
pixel_pose = tf.cast(pixel_pose, dtype=dtype)
if frame_pose is not None:
frame_pose = tf.cast(frame_pose, dtype=dtype)
with tf.compat.v1.name_scope(
scope, 'ComputeRangeImageCartesian',
[range_image_polar, extrinsic, pixel_pose, frame_pose]):
azimuth, inclination, range_image_range = tf.unstack(
range_image_polar, axis=-1)
cos_azimuth = tf.cos(azimuth)
sin_azimuth = tf.sin(azimuth)
cos_incl = tf.cos(inclination)
sin_incl = tf.sin(inclination)
# [B, H, W].
x = cos_azimuth * cos_incl * range_image_range
y = sin_azimuth * cos_incl * range_image_range
z = sin_incl * range_image_range
# [B, H, W, 3]
range_image_points = tf.stack([x, y, z], -1)
# [B, 3, 3]
rotation = extrinsic[..., 0:3, 0:3]
# translation [B, 1, 3]
translation = tf.expand_dims(tf.expand_dims(extrinsic[..., 0:3, 3], 1), 1)
# To vehicle frame.
# [B, H, W, 3]
range_image_points = tf.einsum('bkr,bijr->bijk', rotation,
range_image_points) + translation
if pixel_pose is not None:
# To global frame.
# [B, H, W, 3, 3]
pixel_pose_rotation = pixel_pose[..., 0:3, 0:3]
# [B, H, W, 3]
pixel_pose_translation = pixel_pose[..., 0:3, 3]
# [B, H, W, 3]
range_image_points = tf.einsum(
'bhwij,bhwj->bhwi', pixel_pose_rotation,
range_image_points) + pixel_pose_translation
if frame_pose is None:
raise ValueError('frame_pose must be set when pixel_pose is set.')
# To vehicle frame corresponding to the given frame_pose
# [B, 4, 4]
world_to_vehicle = tf.linalg.inv(frame_pose)
world_to_vehicle_rotation = world_to_vehicle[:, 0:3, 0:3]
world_to_vehicle_translation = world_to_vehicle[:, 0:3, 3]
# [B, H, W, 3]
range_image_points = tf.einsum(
'bij,bhwj->bhwi', world_to_vehicle_rotation,
range_image_points) + world_to_vehicle_translation[:, tf.newaxis,
tf.newaxis, :]
range_image_points = tf.cast(
range_image_points, dtype=range_image_polar_dtype)
return range_image_points
def build_camera_depth_image(range_image_cartesian,
extrinsic,
camera_projection,
camera_image_size,
camera_name,
pool_method=tf.math.unsorted_segment_min,
scope=None):
"""Builds camera depth image given camera projections.
The depth value is the distance between a lidar point and camera frame origin.
It is decided by cartesian coordinates in vehicle frame and the camera
extrinsic. Optionally, the cartesian coordinates can be set in the vehicle
frame corresponding to each pixel pose which makes the depth generated to have
vehicle motion taken into account.
Args:
range_image_cartesian: [B, H, W, 3] tensor. Range image points in vehicle
frame. Note that if the range image is provided by pixel_pose, then you
can optionally pass in the cartesian coordinates in each pixel frame.
extrinsic: [B, 4, 4] tensor. Camera extrinsic.
camera_projection: [B, H, W, 6] tensor. Each range image pixel is associated
with at most two camera projections. See dataset.proto for more details.
camera_image_size: a list of [width, height] integers.
camera_name: an integer that identifies a camera. See dataset.proto.
pool_method: pooling method when multiple lidar points are projected to one
image pixel.
scope: the name scope.
Returns:
image: [B, width, height] depth image generated.
"""
with tf.compat.v1.name_scope(
scope, 'BuildCameraDepthImage',
[range_image_cartesian, extrinsic, camera_projection]):
# [B, 4, 4]
vehicle_to_camera = tf.linalg.inv(extrinsic)
# [B, 3, 3]
vehicle_to_camera_rotation = vehicle_to_camera[:, 0:3, 0:3]
# [B, 3]
vehicle_to_camera_translation = vehicle_to_camera[:, 0:3, 3]
# [B, H, W, 3]
range_image_camera = tf.einsum(
'bij,bhwj->bhwi', vehicle_to_camera_rotation,
range_image_cartesian) + vehicle_to_camera_translation[:, tf.newaxis,
tf.newaxis, :]
# [B, H, W]
range_image_camera_norm = tf.norm(tensor=range_image_camera, axis=-1)
camera_projection_mask_1 = tf.tile(
tf.equal(camera_projection[..., 0:1], camera_name), [1, 1, 1, 2])
camera_projection_mask_2 = tf.tile(
tf.equal(camera_projection[..., 3:4], camera_name), [1, 1, 1, 2])
camera_projection_selected = tf.ones_like(
camera_projection[..., 1:3], dtype=camera_projection.dtype) * -1
camera_projection_selected = tf.compat.v1.where(camera_projection_mask_2,
camera_projection[..., 4:6],
camera_projection_selected)
# [B, H, W, 2]
camera_projection_selected = tf.compat.v1.where(camera_projection_mask_1,
camera_projection[..., 1:3],
camera_projection_selected)
# [B, H, W]
camera_projection_mask = tf.logical_or(camera_projection_mask_1,
camera_projection_mask_2)[..., 0]
def fn(args):
"""Builds depth image for a single frame."""
# NOTE: Do not use ri_range > 0 as mask as missing range image pixels are
# not necessarily populated as range = 0.
mask, ri_range, cp = args
mask_ids = tf.compat.v1.where(mask)
index = tf.gather_nd(
tf.stack([cp[..., 1], cp[..., 0]], axis=-1), mask_ids)
value = tf.gather_nd(ri_range, mask_ids)
return scatter_nd_with_pool(index, value, camera_image_size, pool_method)
images = tf.map_fn(
fn,
elems=[
camera_projection_mask, range_image_camera_norm,
camera_projection_selected
],
dtype=range_image_camera_norm.dtype,
back_prop=False)
return images
def build_range_image_from_point_cloud(points_vehicle_frame,
num_points,
extrinsic,
inclination,
range_image_size,
point_features=None,
dtype=tf.float32,
scope=None):
"""Build virtual range image from point cloud assuming uniform azimuth.
Args:
points_vehicle_frame: tf tensor with shape [B, N, 3] in the vehicle frame.
num_points: [B] int32 tensor indicating the number of points for each frame.
extrinsic: tf tensor with shape [B, 4, 4].
inclination: tf tensor of shape [B, H] that is the inclination angle per
row. sorted from highest value to lowest.
range_image_size: a size 2 [height, width] list that configures the size of
the range image.
point_features: If not None, it is a tf tensor with shape [B, N, 2] that
represents lidar 'intensity' and 'elongation'.
dtype: the data type to use.
scope: tf name scope.
Returns:
range_images : [B, H, W, 3] or [B, H, W] tensor. Range images built from the
given points. Data type is the same as that of points_vehicle_frame. 0.0
is populated when a pixel is missing.
ri_indices: tf int32 tensor [B, N, 2]. It represents the range image index
for each point.
ri_ranges: [B, N] tensor. It represents the distance between a point and
sensor frame origin of each point.
"""
with tf.compat.v1.name_scope(
scope,
'BuildRangeImageFromPointCloud',
values=[points_vehicle_frame, extrinsic, inclination]):
points_vehicle_frame_dtype = points_vehicle_frame.dtype
points_vehicle_frame = tf.cast(points_vehicle_frame, dtype)
extrinsic = tf.cast(extrinsic, dtype)
inclination = tf.cast(inclination, dtype)
height, width = range_image_size
# [B, 4, 4]
vehicle_to_laser = tf.linalg.inv(extrinsic)
# [B, 3, 3]
rotation = vehicle_to_laser[:, 0:3, 0:3]
# [B, 1, 3]
translation = tf.expand_dims(vehicle_to_laser[::, 0:3, 3], 1)
# Points in sensor frame
# [B, N, 3]
points = tf.einsum('bij,bkj->bik', points_vehicle_frame,
rotation) + translation
# [B, N]
xy_norm = tf.norm(tensor=points[..., 0:2], axis=-1)
# [B, N]
point_inclination = tf.atan2(points[..., 2], xy_norm)
# [B, N, H]
point_inclination_diff = tf.abs(
tf.expand_dims(point_inclination, axis=-1) -
tf.expand_dims(inclination, axis=1))
# [B, N]
point_ri_row_indices = tf.argmin(
input=point_inclination_diff, axis=-1, output_type=tf.int32)
# [B, 1], within [-pi, pi]
az_correction = tf.expand_dims(
tf.atan2(extrinsic[..., 1, 0], extrinsic[..., 0, 0]), -1)
# [B, N], within [-2pi, 2pi]
point_azimuth = tf.atan2(points[..., 1], points[..., 0]) + az_correction
point_azimuth_gt_pi_mask = point_azimuth > np.pi
point_azimuth_lt_minus_pi_mask = point_azimuth < -np.pi
point_azimuth = point_azimuth - tf.cast(
point_azimuth_gt_pi_mask, dtype=dtype) * 2 * np.pi
point_azimuth = point_azimuth + tf.cast(
point_azimuth_lt_minus_pi_mask, dtype=dtype) * 2 * np.pi
# [B, N].
point_ri_col_indices = width - 1.0 + 0.5 - (point_azimuth +
np.pi) / (2.0 * np.pi) * width
point_ri_col_indices = tf.cast(
tf.round(point_ri_col_indices), dtype=tf.int32)
with tf.control_dependencies([
tf.compat.v1.assert_non_negative(point_ri_col_indices),
tf.compat.v1.assert_less(point_ri_col_indices, tf.cast(width, tf.int32))
]):
# [B, N, 2]
ri_indices = tf.stack([point_ri_row_indices, point_ri_col_indices], -1)
# [B, N]
ri_ranges = tf.cast(
tf.norm(tensor=points, axis=-1), dtype=points_vehicle_frame_dtype)
def fn(args):
"""Builds a range image for each frame.
Args:
args: a tuple containing:
- ri_index: [N, 2] int tensor.
- ri_value: [N] float tensor.
- num_point: scalar tensor
- point_feature: [N, 2] float tensor.
Returns:
range_image: [H, W]
"""
if len(args) == 3:
ri_index, ri_value, num_point = args
else:
ri_index, ri_value, num_point, point_feature = args
ri_value = tf.concat([ri_value[..., tf.newaxis], point_feature],
axis=-1)
ri_value = encode_lidar_features(ri_value)
# pylint: disable=unbalanced-tuple-unpacking
ri_index = ri_index[0:num_point, :]
ri_value = ri_value[0:num_point, ...]
range_image = scatter_nd_with_pool(ri_index, ri_value, [height, width],
tf.math.unsorted_segment_min)
if len(args) != 3:
range_image = decode_lidar_features(range_image)
return range_image
elems = [ri_indices, ri_ranges, num_points]
if point_features is not None:
elems.append(point_features)
range_images = tf.map_fn(
fn, elems=elems, dtype=points_vehicle_frame_dtype, back_prop=False)
return range_images, ri_indices, ri_ranges
def extract_point_cloud_from_range_image(range_image,
extrinsic,
inclination,
pixel_pose=None,
frame_pose=None,
dtype=tf.float32,
scope=None):
"""Extracts point cloud from range image.
Args:
range_image: [B, H, W] tensor. Lidar range images.
extrinsic: [B, 4, 4] tensor. Lidar extrinsic.
inclination: [B, H] tensor. Inclination for each row of the range image.
0-th entry corresponds to the 0-th row of the range image.
pixel_pose: [B, H, W, 4, 4] tensor. If not None, it sets pose for each range
image pixel.
frame_pose: [B, 4, 4] tensor. This must be set when pixel_pose is set. It
decides the vehicle frame at which the cartesian points are computed.
dtype: float type to use internally. This is needed as extrinsic and
inclination sometimes have higher resolution than range_image.
scope: the name scope.
Returns:
range_image_cartesian: [B, H, W, 3] with {x, y, z} as inner dims in vehicle
frame.
"""
with tf.compat.v1.name_scope(
scope, 'ExtractPointCloudFromRangeImage',
[range_image, extrinsic, inclination, pixel_pose, frame_pose]):
range_image_polar = compute_range_image_polar(
range_image, extrinsic, inclination, dtype=dtype)
range_image_cartesian = compute_range_image_cartesian(
range_image_polar,
extrinsic,
pixel_pose=pixel_pose,
frame_pose=frame_pose,
dtype=dtype)
return range_image_cartesian
def crop_range_image(range_images, new_width, shift=None, scope=None):
"""Crops range image by shrinking the width.
Requires: new_width is smaller than the existing width.
Args:
range_images: [B, H, W, ...]
new_width: an integer.
shift: a list of integer of same size as batch that shifts the crop window.
Positive is right shift. Negative is left shift. We assume the shift keeps
the window inside the image (i.e. no wrap).
scope: the name scope.
Returns:
range_image_crops: [B, H, new_width, ...]
"""
# pylint: disable=unbalanced-tuple-unpacking
shape = _combined_static_and_dynamic_shape(range_images)
batch = shape[0]
width = shape[2]
if width == new_width:
return range_images
if new_width < 1:
raise ValueError('new_width must be positive.')
if width is not None and new_width >= width:
raise ValueError('new_width {} should be < the old width {}.'.format(
new_width, width))
if shift is None:
shift = [0] * batch
diff = width - new_width
left = [diff // 2 + i for i in shift]
right = [i + new_width for i in left]
for l, r in zip(left, right):
if l < 0 or r > width:
raise ValueError(
'shift {} is invalid given new_width {} and width {}.'.format(
shift, new_width, width))
range_image_crops = []
with tf.compat.v1.name_scope(scope, 'CropRangeImage', [range_images]):
for i in range(batch):
range_image_crop = range_images[i, :, left[i]:right[i], ...]
range_image_crops.append(range_image_crop)
return tf.stack(range_image_crops, axis=0)
def compute_inclination(inclination_range, height, scope=None):
"""Computes uniform inclination range based the given range and height.
Args:
inclination_range: [..., 2] tensor. Inner dims are [min inclination, max
inclination].
height: an integer indicates height of the range image.
scope: the name scope.
Returns:
inclination: [..., height] tensor. Inclinations computed.
"""
with tf.compat.v1.name_scope(scope, 'ComputeInclination',
[inclination_range]):
diff = inclination_range[..., 1] - inclination_range[..., 0]
inclination = (
(.5 + tf.cast(tf.range(0, height), dtype=inclination_range.dtype)) /
tf.cast(height, dtype=inclination_range.dtype) *
tf.expand_dims(diff, axis=-1) + inclination_range[..., 0:1])
return inclination
| [
2,
15069,
13130,
383,
6378,
5908,
4946,
16092,
292,
316,
46665,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
... | 2.332036 | 11,565 |
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
| [
11748,
28686,
198,
198,
11748,
1332,
10745,
430,
13,
26791,
13,
504,
856,
62,
16737,
198,
198,
9288,
10745,
430,
62,
4774,
82,
796,
1332,
10745,
430,
13,
26791,
13,
504,
856,
62,
16737,
13,
2025,
82,
856,
49493,
7,
198,
220,
220,
... | 2.459459 | 74 |
# pylint:disable=redefined-outer-name
import json
import pytest
from tinvest import CandleStreamingResponse, Streaming
pytestmark = pytest.mark.asyncio
@pytest.fixture()
@pytest.fixture()
@pytest.fixture()
@pytest.mark.usefixtures('_candle_message')
| [
2,
279,
2645,
600,
25,
40223,
28,
445,
18156,
12,
39605,
12,
3672,
198,
11748,
33918,
198,
198,
11748,
12972,
9288,
198,
198,
6738,
19783,
4223,
1330,
44973,
12124,
278,
31077,
11,
43124,
198,
198,
9078,
9288,
4102,
796,
12972,
9288,
... | 2.806452 | 93 |
#!/usr/bin/python3.2
# illustrate how command line arguments work:
import sys
print("There are "+str(len(sys.argv))+" element(s) passed in from the command line.")
i=0
for x in sys.argv:
print("Argv("+str(i)+") is: ",x)
i=i+1
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
13,
17,
198,
2,
19418,
703,
3141,
1627,
7159,
670,
25,
198,
198,
11748,
25064,
198,
198,
4798,
7203,
1858,
389,
43825,
2536,
7,
11925,
7,
17597,
13,
853,
85,
4008,
10,
1,
5002,
7,
82,
8,... | 2.549451 | 91 |
#!/usr/bin/env python
# coding: utf-8
"""
xopt driver using Processes
Basic usage:
python -m xopt.run xopt.yaml
positional arguments:
input_file Xopt YAML input file
optional arguments:
-h, --help show this help message and exit
-n MAX_WORKERS Maximum workers
-logfile LOGFILE log file
-v, --verbose Verbosity level -v (INFO), -vv (DEBUG)
"""
from xopt import Xopt, xopt_logo
import logging
from concurrent.futures import ProcessPoolExecutor
import argparse
import os
import sys
from psutil import cpu_count
from pprint import pprint
#ARGS = sys.argv[1:]
#ARGS = 'xopt.in'.split()
parser = argparse.ArgumentParser(description='Configure xopt')
# Main input file
parser.add_argument('input_file', help='Xopt YAML input file')
# Max workers for processes.
parser.add_argument('-n', dest='max_workers', default=None, help='Maximum workers')
# Logging arguments
parser.add_argument('-logfile', dest='logfile', default=None, help='log file')
# Logging Verbosity levels
parser.add_argument('-v', '--verbose', action='count', default=0, help='Verbosity level -v (INFO), -vv (DEBUG) ')
# Parse args
args = parser.parse_args()
# handle -v, -vv logging levels
levels = [logging.WARNING, logging.INFO, logging.DEBUG]
level = levels[min(len(levels)-1, args.verbose)] # capped to number of levels
# Setup logging
logging.basicConfig(filename=args.logfile,
format='%(asctime)s %(name)s %(levelname)s:%(message)s',
level=level
)
logger = logging.getLogger(__name__)
logger.debug("a debug message")
logger.info("a info message")
logger.warning("a warning message")
infile = args.input_file
assert os.path.exists(infile), f'Input file does not exist: {infile}'
if __name__ == "__main__":
print(xopt_logo)
print('_________________________________')
print('Parallel execution with processes')
if args.max_workers:
print('max_workers', args.max_workers)
else:
print('Automatic max workers')
logger.info(f'CPU count: {cpu_count()}')
logger.debug('this is a debug message')
X = Xopt(infile)
print(X)
sys.stdout.flush()
with ProcessPoolExecutor() as executor:
X.run(executor=executor)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
19617,
25,
3384,
69,
12,
23,
628,
198,
37811,
198,
87,
8738,
4639,
1262,
10854,
274,
198,
198,
26416,
8748,
25,
198,
198,
29412,
532,
76,
2124,
8738,
13,
5143,
2124,
8738,
13,
8... | 2.585393 | 890 |
import io
import os
import time
import cv2
import numpy as np
from flask import Flask, Response, json, request, g, send_from_directory
from flask_cors import CORS
from detector import Detector
from recoer import Recoer
detector = Detector('./data/models/ctpn.pb')
recoer = Recoer('./tf_crnn/data/chars/chn.txt', './data/models/crnn.pb')
app = Flask(__name__, static_folder='web/build')
CORS(app)
@app.before_request
@app.after_request
@app.route("/ocr", methods=['POST'])
# Serve React App
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
if __name__ == "__main__":
app.run(host='0.0.0.0')
| [
11748,
33245,
198,
11748,
28686,
198,
11748,
640,
198,
198,
11748,
269,
85,
17,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
42903,
1330,
46947,
11,
18261,
11,
33918,
11,
2581,
11,
308,
11,
3758,
62,
6738,
62,
34945,
198,
6738,
4290... | 2.595041 | 242 |
import unittest
from operator import truediv
import sys
sys.path.append('./')
solutions = __import__('solutions.034_search_for_a_range', fromlist='*')
if __name__ == '__main__':
unittest.main()
| [
11748,
555,
715,
395,
198,
6738,
10088,
1330,
491,
1739,
452,
198,
11748,
25064,
198,
17597,
13,
6978,
13,
33295,
7,
4458,
14,
11537,
198,
198,
82,
14191,
796,
11593,
11748,
834,
10786,
82,
14191,
13,
49841,
62,
12947,
62,
1640,
62,
... | 2.54878 | 82 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2017 - Anil Lakhman - MIT License
# -----------------------------------------------------------------------------
from docutils import nodes
from docutils.parsers.rst import roles
import re
node_map = {
'tooltip-top': tooltip_top_node,
'tooltip-right': tooltip_right_node,
'tooltip-bottom': tooltip_bottom_node,
'tooltip-left': tooltip_left_node,
}
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
16529,
32501,
198,
2,
15069,
357,
66,
8,
2177,
532,
1052,
346,
406,
11322,
805,
532,
17168,
13789,
198,
2,
16529,
... | 3.541096 | 146 |
from picamera.array import PiRGBArray
from picamera import PiCamera
import time
import cv2
import os
# Center coordinates
cx = 160
cy = 120
os.system( "echo 0=150 > /dev/servoblaster" )
os.system( "echo 1=150 > /dev/servoblaster" )
xdeg = 150
ydeg = 150
# Setup the camera
camera = PiCamera()
camera.resolution = ( 320, 240 )
camera.framerate = 60
rawCapture = PiRGBArray( camera, size=( 320, 240 ) )
# Load a cascade file for detecting faces
face_cascade = cv2.CascadeClassifier('/usr/share/opencv/haarcascades/haarcascade_frontalface_alt.xml')
t_start = time.time()
fps = 0
### Main
######################################################################
# Capture frames from the camera
for frame in camera.capture_continuous( rawCapture, format="bgr",use_video_port=True ):
image = frame.array
# Use the cascade file we loaded to detect faces
gray = cv2.cvtColor( image, cv2.COLOR_BGR2GRAY )
faces = face_cascade.detectMultiScale( gray )
print("Found " + str( len( faces ) ) + " face(s)")
# Draw a rectangle around every face and move the motor towards the face
for ( x, y, w, h ) in faces:
cv2.rectangle( image, ( x, y ), ( x + w, y + h ), ( 100, 255, 100 ), 2 )
cv2.putText( image, "Face No." + str( len( faces ) ), ( x, y ),cv2.FONT_HERSHEY_SIMPLEX, 0.5, ( 0, 0, 255 ), 2 )
tx = x + w/2
ty = y + h/2
if ( cx - tx > 10 and xdeg <= 190 ):
xdeg += 3
os.system( "echo 0=" + str( xdeg ) + " > /dev/servoblaster" )
elif ( cx - tx < -10 and xdeg >= 110 ):
xdeg -= 3
os.system( "echo 0=" + str( xdeg ) + " > /dev/servoblaster" )
if ( cy - ty > 10 and ydeg >= 110 ):
ydeg -= 3
os.system( "echo 1=" + str( ydeg ) + " > /dev/servoblaster" )
elif ( cy - ty < -10 and ydeg <= 190 ):
ydeg += 3
os.system( "echo 1=" + str( ydeg ) + " > /dev/servoblaster" )
# Calculate and show the FPS
fps = fps + 1
sfps = fps / ( time.time() - t_start )
cv2.putText( image, "FPS : " + str( int( sfps ) ), ( 10, 10 ), cv2.FONT_HERSHEY_SIMPLEX, 0.5, ( 0, 0, 255 ), 2 )
# Show the frame
cv2.imshow( "Frame", image )
cv2.waitKey( 1 )
# Clear the stream in preparation for the next frame
rawCapture.truncate( 0 ) | [
6738,
8301,
18144,
13,
18747,
1330,
13993,
36982,
19182,
198,
6738,
8301,
18144,
1330,
13993,
35632,
198,
11748,
640,
198,
11748,
269,
85,
17,
198,
11748,
28686,
198,
198,
2,
3337,
22715,
198,
66,
87,
796,
13454,
198,
948,
796,
7982,
... | 2.331355 | 1,011 |
'''subsector.py - subsector'''
from traveller_utils.util import Die
from traveller_utils.ct.planet import Planet
D6 = Die(6)
class Subsector():
''' Subsector - collection of hexes, possibly containing systems
'''
def generate(self, dm: int=0):
''' Generate subsector contents
- dm: modifier to D6 roll for contents
'''
for hex_id in self.hexes.keys():
roll = D6.roll(1, int(dm))
if roll >= 4:
self.hexes[hex_id] = Planet()
| [
7061,
6,
7266,
34914,
13,
9078,
532,
850,
34914,
7061,
6,
198,
198,
6738,
49130,
62,
26791,
13,
22602,
1330,
6733,
198,
6738,
49130,
62,
26791,
13,
310,
13,
47427,
1330,
11397,
198,
198,
35,
21,
796,
6733,
7,
21,
8,
628,
198,
4871... | 2.293333 | 225 |
import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'PriorityEnum' : _MetaInfoEnum('PriorityEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper',
{
'critical':'critical',
'high':'high',
'medium':'medium',
'low':'low',
'very-low':'very_low',
}, 'Cisco-IOS-XR-infra-rsi-oper', _yang_ns._namespaces['Cisco-IOS-XR-infra-rsi-oper']),
'SourceEnum' : _MetaInfoEnum('SourceEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper',
{
'configured':'configured',
'from-group':'from_group',
'inherited':'inherited',
'from-optical':'from_optical',
'configured-and-notified':'configured_and_notified',
'from-group-and-notified':'from_group_and_notified',
'inherited-and-notified':'inherited_and_notified',
'from-optical-and-notified':'from_optical_and_notified',
}, 'Cisco-IOS-XR-infra-rsi-oper', _yang_ns._namespaces['Cisco-IOS-XR-infra-rsi-oper']),
'VrfGroup.Nodes.Node.Groups.Group.Vrf' : {
'meta_info' : _MetaInfoClass('VrfGroup.Nodes.Node.Groups.Group.Vrf',
False,
[
_MetaInfoClassMember('vrf-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' VRF name
''',
'vrf_name',
'Cisco-IOS-XR-infra-rsi-oper', False),
],
'Cisco-IOS-XR-infra-rsi-oper',
'vrf',
_yang_ns._namespaces['Cisco-IOS-XR-infra-rsi-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper'
),
},
'VrfGroup.Nodes.Node.Groups.Group' : {
'meta_info' : _MetaInfoClass('VrfGroup.Nodes.Node.Groups.Group',
False,
[
_MetaInfoClassMember('group-name', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' Group name
''',
'group_name',
'Cisco-IOS-XR-infra-rsi-oper', True),
_MetaInfoClassMember('forward-reference', ATTRIBUTE, 'bool' , None, None,
[], [],
''' VRF group not present but used
''',
'forward_reference',
'Cisco-IOS-XR-infra-rsi-oper', False),
_MetaInfoClassMember('vr-fs', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of VRFs in this VRF group
''',
'vr_fs',
'Cisco-IOS-XR-infra-rsi-oper', False),
_MetaInfoClassMember('vrf', REFERENCE_LIST, 'Vrf' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper', 'VrfGroup.Nodes.Node.Groups.Group.Vrf',
[], [],
''' VRF group's VRF
''',
'vrf',
'Cisco-IOS-XR-infra-rsi-oper', False),
],
'Cisco-IOS-XR-infra-rsi-oper',
'group',
_yang_ns._namespaces['Cisco-IOS-XR-infra-rsi-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper'
),
},
'VrfGroup.Nodes.Node.Groups' : {
'meta_info' : _MetaInfoClass('VrfGroup.Nodes.Node.Groups',
False,
[
_MetaInfoClassMember('group', REFERENCE_LIST, 'Group' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper', 'VrfGroup.Nodes.Node.Groups.Group',
[], [],
''' Group details
''',
'group',
'Cisco-IOS-XR-infra-rsi-oper', False),
],
'Cisco-IOS-XR-infra-rsi-oper',
'groups',
_yang_ns._namespaces['Cisco-IOS-XR-infra-rsi-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper'
),
},
'VrfGroup.Nodes.Node' : {
'meta_info' : _MetaInfoClass('VrfGroup.Nodes.Node',
False,
[
_MetaInfoClassMember('node-name', ATTRIBUTE, 'str' , None, None,
[], ['([a-zA-Z0-9_]*\\d+/){1,2}([a-zA-Z0-9_]*\\d+)'],
''' Node
''',
'node_name',
'Cisco-IOS-XR-infra-rsi-oper', True),
_MetaInfoClassMember('groups', REFERENCE_CLASS, 'Groups' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper', 'VrfGroup.Nodes.Node.Groups',
[], [],
''' Group operational data
''',
'groups',
'Cisco-IOS-XR-infra-rsi-oper', False),
],
'Cisco-IOS-XR-infra-rsi-oper',
'node',
_yang_ns._namespaces['Cisco-IOS-XR-infra-rsi-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper'
),
},
'VrfGroup.Nodes' : {
'meta_info' : _MetaInfoClass('VrfGroup.Nodes',
False,
[
_MetaInfoClassMember('node', REFERENCE_LIST, 'Node' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper', 'VrfGroup.Nodes.Node',
[], [],
''' Node details
''',
'node',
'Cisco-IOS-XR-infra-rsi-oper', False),
],
'Cisco-IOS-XR-infra-rsi-oper',
'nodes',
_yang_ns._namespaces['Cisco-IOS-XR-infra-rsi-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper'
),
},
'VrfGroup' : {
'meta_info' : _MetaInfoClass('VrfGroup',
False,
[
_MetaInfoClassMember('nodes', REFERENCE_CLASS, 'Nodes' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper', 'VrfGroup.Nodes',
[], [],
''' Node operational data
''',
'nodes',
'Cisco-IOS-XR-infra-rsi-oper', False),
],
'Cisco-IOS-XR-infra-rsi-oper',
'vrf-group',
_yang_ns._namespaces['Cisco-IOS-XR-infra-rsi-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper'
),
},
'Srlg.SrlgMaps.SrlgMap' : {
'meta_info' : _MetaInfoClass('Srlg.SrlgMaps.SrlgMap',
False,
[
_MetaInfoClassMember('srlg-name', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' SRLG name
''',
'srlg_name',
'Cisco-IOS-XR-infra-rsi-oper', True),
_MetaInfoClassMember('srlg-name-xr', ATTRIBUTE, 'str' , None, None,
[], [],
''' SRLG name
''',
'srlg_name_xr',
'Cisco-IOS-XR-infra-rsi-oper', False),
_MetaInfoClassMember('srlg-value', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' SRLG value
''',
'srlg_value',
'Cisco-IOS-XR-infra-rsi-oper', False),
],
'Cisco-IOS-XR-infra-rsi-oper',
'srlg-map',
_yang_ns._namespaces['Cisco-IOS-XR-infra-rsi-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper'
),
},
'Srlg.SrlgMaps' : {
'meta_info' : _MetaInfoClass('Srlg.SrlgMaps',
False,
[
_MetaInfoClassMember('srlg-map', REFERENCE_LIST, 'SrlgMap' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper', 'Srlg.SrlgMaps.SrlgMap',
[], [],
''' Configured SRLG name details
''',
'srlg_map',
'Cisco-IOS-XR-infra-rsi-oper', False),
],
'Cisco-IOS-XR-infra-rsi-oper',
'srlg-maps',
_yang_ns._namespaces['Cisco-IOS-XR-infra-rsi-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper'
),
},
'Srlg.Nodes.Node.SrlgMaps.SrlgMap' : {
'meta_info' : _MetaInfoClass('Srlg.Nodes.Node.SrlgMaps.SrlgMap',
False,
[
_MetaInfoClassMember('srlg-name', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' SRLG name
''',
'srlg_name',
'Cisco-IOS-XR-infra-rsi-oper', True),
_MetaInfoClassMember('srlg-name-xr', ATTRIBUTE, 'str' , None, None,
[], [],
''' SRLG name
''',
'srlg_name_xr',
'Cisco-IOS-XR-infra-rsi-oper', False),
_MetaInfoClassMember('srlg-value', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' SRLG value
''',
'srlg_value',
'Cisco-IOS-XR-infra-rsi-oper', False),
],
'Cisco-IOS-XR-infra-rsi-oper',
'srlg-map',
_yang_ns._namespaces['Cisco-IOS-XR-infra-rsi-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper'
),
},
'Srlg.Nodes.Node.SrlgMaps' : {
'meta_info' : _MetaInfoClass('Srlg.Nodes.Node.SrlgMaps',
False,
[
_MetaInfoClassMember('srlg-map', REFERENCE_LIST, 'SrlgMap' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper', 'Srlg.Nodes.Node.SrlgMaps.SrlgMap',
[], [],
''' Configured SRLG name details
''',
'srlg_map',
'Cisco-IOS-XR-infra-rsi-oper', False),
],
'Cisco-IOS-XR-infra-rsi-oper',
'srlg-maps',
_yang_ns._namespaces['Cisco-IOS-XR-infra-rsi-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper'
),
},
'Srlg.Nodes.Node.Groups.Group.SrlgAttribute' : {
'meta_info' : _MetaInfoClass('Srlg.Nodes.Node.Groups.Group.SrlgAttribute',
False,
[
_MetaInfoClassMember('priority', REFERENCE_ENUM_CLASS, 'PriorityEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper', 'PriorityEnum',
[], [],
''' Priority
''',
'priority',
'Cisco-IOS-XR-infra-rsi-oper', False),
_MetaInfoClassMember('srlg-index', ATTRIBUTE, 'int' , None, None,
[('0', '65535')], [],
''' Index
''',
'srlg_index',
'Cisco-IOS-XR-infra-rsi-oper', False),
_MetaInfoClassMember('srlg-value', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' SRLG value
''',
'srlg_value',
'Cisco-IOS-XR-infra-rsi-oper', False),
],
'Cisco-IOS-XR-infra-rsi-oper',
'srlg-attribute',
_yang_ns._namespaces['Cisco-IOS-XR-infra-rsi-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper'
),
},
'Srlg.Nodes.Node.Groups.Group' : {
'meta_info' : _MetaInfoClass('Srlg.Nodes.Node.Groups.Group',
False,
[
_MetaInfoClassMember('group-name', ATTRIBUTE, 'str' , None, None,
[], ['[\\w\\-\\.:,_@#%$\\+=\\|;]+'],
''' Group name
''',
'group_name',
'Cisco-IOS-XR-infra-rsi-oper', True),
_MetaInfoClassMember('group-name-xr', ATTRIBUTE, 'str' , None, None,
[], [],
''' Group name
''',
'group_name_xr',
'Cisco-IOS-XR-infra-rsi-oper', False),
_MetaInfoClassMember('group-values', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Group values
''',
'group_values',
'Cisco-IOS-XR-infra-rsi-oper', False),
_MetaInfoClassMember('srlg-attribute', REFERENCE_LIST, 'SrlgAttribute' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper', 'Srlg.Nodes.Node.Groups.Group.SrlgAttribute',
[], [],
''' SRLG attribute
''',
'srlg_attribute',
'Cisco-IOS-XR-infra-rsi-oper', False),
],
'Cisco-IOS-XR-infra-rsi-oper',
'group',
_yang_ns._namespaces['Cisco-IOS-XR-infra-rsi-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper'
),
},
'Srlg.Nodes.Node.Groups' : {
'meta_info' : _MetaInfoClass('Srlg.Nodes.Node.Groups',
False,
[
_MetaInfoClassMember('group', REFERENCE_LIST, 'Group' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper', 'Srlg.Nodes.Node.Groups.Group',
[], [],
''' SRLG group details
''',
'group',
'Cisco-IOS-XR-infra-rsi-oper', False),
],
'Cisco-IOS-XR-infra-rsi-oper',
'groups',
_yang_ns._namespaces['Cisco-IOS-XR-infra-rsi-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper'
),
},
'Srlg.Nodes.Node.InheritNodes.InheritNode.SrlgAttribute' : {
'meta_info' : _MetaInfoClass('Srlg.Nodes.Node.InheritNodes.InheritNode.SrlgAttribute',
False,
[
_MetaInfoClassMember('priority', REFERENCE_ENUM_CLASS, 'PriorityEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper', 'PriorityEnum',
[], [],
''' Priority
''',
'priority',
'Cisco-IOS-XR-infra-rsi-oper', False),
_MetaInfoClassMember('srlg-index', ATTRIBUTE, 'int' , None, None,
[('0', '65535')], [],
''' Index
''',
'srlg_index',
'Cisco-IOS-XR-infra-rsi-oper', False),
_MetaInfoClassMember('srlg-value', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' SRLG value
''',
'srlg_value',
'Cisco-IOS-XR-infra-rsi-oper', False),
],
'Cisco-IOS-XR-infra-rsi-oper',
'srlg-attribute',
_yang_ns._namespaces['Cisco-IOS-XR-infra-rsi-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper'
),
},
'Srlg.Nodes.Node.InheritNodes.InheritNode' : {
'meta_info' : _MetaInfoClass('Srlg.Nodes.Node.InheritNodes.InheritNode',
False,
[
_MetaInfoClassMember('inherit-node-name', ATTRIBUTE, 'str' , None, None,
[], ['((([a-zA-Z0-9_]*\\d+)|(\\*))/){2}(([a-zA-Z0-9_]*\\d+)|(\\*))'],
''' Inherit node
''',
'inherit_node_name',
'Cisco-IOS-XR-infra-rsi-oper', True),
_MetaInfoClassMember('node-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Inherit node name
''',
'node_name',
'Cisco-IOS-XR-infra-rsi-oper', False),
_MetaInfoClassMember('node-values', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Node values
''',
'node_values',
'Cisco-IOS-XR-infra-rsi-oper', False),
_MetaInfoClassMember('srlg-attribute', REFERENCE_LIST, 'SrlgAttribute' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper', 'Srlg.Nodes.Node.InheritNodes.InheritNode.SrlgAttribute',
[], [],
''' SRLG attribute
''',
'srlg_attribute',
'Cisco-IOS-XR-infra-rsi-oper', False),
],
'Cisco-IOS-XR-infra-rsi-oper',
'inherit-node',
_yang_ns._namespaces['Cisco-IOS-XR-infra-rsi-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper'
),
},
'Srlg.Nodes.Node.InheritNodes' : {
'meta_info' : _MetaInfoClass('Srlg.Nodes.Node.InheritNodes',
False,
[
_MetaInfoClassMember('inherit-node', REFERENCE_LIST, 'InheritNode' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper', 'Srlg.Nodes.Node.InheritNodes.InheritNode',
[], [],
''' SRLG inherit location details
''',
'inherit_node',
'Cisco-IOS-XR-infra-rsi-oper', False),
],
'Cisco-IOS-XR-infra-rsi-oper',
'inherit-nodes',
_yang_ns._namespaces['Cisco-IOS-XR-infra-rsi-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper'
),
},
'Srlg.Nodes.Node.Interfaces.Interface' : {
'meta_info' : _MetaInfoClass('Srlg.Nodes.Node.Interfaces.Interface',
False,
[
_MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None,
[], ['(([a-zA-Z0-9_]*\\d+/){3,4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3,4}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' Interface name
''',
'interface_name',
'Cisco-IOS-XR-infra-rsi-oper', True),
_MetaInfoClassMember('interface-name-xr', ATTRIBUTE, 'str' , None, None,
[], [],
''' Interface name
''',
'interface_name_xr',
'Cisco-IOS-XR-infra-rsi-oper', False),
_MetaInfoClassMember('registrations', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Registrations
''',
'registrations',
'Cisco-IOS-XR-infra-rsi-oper', False),
_MetaInfoClassMember('srlg-value', REFERENCE_LEAFLIST, 'int' , None, None,
[('0', '4294967295')], [],
''' SRLG values
''',
'srlg_value',
'Cisco-IOS-XR-infra-rsi-oper', False),
_MetaInfoClassMember('value-count', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Values
''',
'value_count',
'Cisco-IOS-XR-infra-rsi-oper', False),
],
'Cisco-IOS-XR-infra-rsi-oper',
'interface',
_yang_ns._namespaces['Cisco-IOS-XR-infra-rsi-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper'
),
},
'Srlg.Nodes.Node.Interfaces' : {
'meta_info' : _MetaInfoClass('Srlg.Nodes.Node.Interfaces',
False,
[
_MetaInfoClassMember('interface', REFERENCE_LIST, 'Interface' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper', 'Srlg.Nodes.Node.Interfaces.Interface',
[], [],
''' SRLG interface summary
''',
'interface',
'Cisco-IOS-XR-infra-rsi-oper', False),
],
'Cisco-IOS-XR-infra-rsi-oper',
'interfaces',
_yang_ns._namespaces['Cisco-IOS-XR-infra-rsi-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper'
),
},
'Srlg.Nodes.Node.InterfaceDetails.InterfaceDetail.SrlgAttribute' : {
'meta_info' : _MetaInfoClass('Srlg.Nodes.Node.InterfaceDetails.InterfaceDetail.SrlgAttribute',
False,
[
_MetaInfoClassMember('priority', REFERENCE_ENUM_CLASS, 'PriorityEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper', 'PriorityEnum',
[], [],
''' Priority
''',
'priority',
'Cisco-IOS-XR-infra-rsi-oper', False),
_MetaInfoClassMember('source', REFERENCE_ENUM_CLASS, 'SourceEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper', 'SourceEnum',
[], [],
''' Source
''',
'source',
'Cisco-IOS-XR-infra-rsi-oper', False),
_MetaInfoClassMember('source-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Source name
''',
'source_name',
'Cisco-IOS-XR-infra-rsi-oper', False),
_MetaInfoClassMember('srlg-index', ATTRIBUTE, 'int' , None, None,
[('0', '65535')], [],
''' Index
''',
'srlg_index',
'Cisco-IOS-XR-infra-rsi-oper', False),
_MetaInfoClassMember('srlg-value', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' SRLG value
''',
'srlg_value',
'Cisco-IOS-XR-infra-rsi-oper', False),
],
'Cisco-IOS-XR-infra-rsi-oper',
'srlg-attribute',
_yang_ns._namespaces['Cisco-IOS-XR-infra-rsi-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper'
),
},
'Srlg.Nodes.Node.InterfaceDetails.InterfaceDetail' : {
'meta_info' : _MetaInfoClass('Srlg.Nodes.Node.InterfaceDetails.InterfaceDetail',
False,
[
_MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None,
[], ['(([a-zA-Z0-9_]*\\d+/){3,4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3,4}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' Interface name
''',
'interface_name',
'Cisco-IOS-XR-infra-rsi-oper', True),
_MetaInfoClassMember('groups', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Groups
''',
'groups',
'Cisco-IOS-XR-infra-rsi-oper', False),
_MetaInfoClassMember('nodes', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Nodes
''',
'nodes',
'Cisco-IOS-XR-infra-rsi-oper', False),
_MetaInfoClassMember('srlg-attribute', REFERENCE_LIST, 'SrlgAttribute' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper', 'Srlg.Nodes.Node.InterfaceDetails.InterfaceDetail.SrlgAttribute',
[], [],
''' SRLG attributes
''',
'srlg_attribute',
'Cisco-IOS-XR-infra-rsi-oper', False),
],
'Cisco-IOS-XR-infra-rsi-oper',
'interface-detail',
_yang_ns._namespaces['Cisco-IOS-XR-infra-rsi-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper'
),
},
'Srlg.Nodes.Node.InterfaceDetails' : {
'meta_info' : _MetaInfoClass('Srlg.Nodes.Node.InterfaceDetails',
False,
[
_MetaInfoClassMember('interface-detail', REFERENCE_LIST, 'InterfaceDetail' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper', 'Srlg.Nodes.Node.InterfaceDetails.InterfaceDetail',
[], [],
''' SRLG interface details
''',
'interface_detail',
'Cisco-IOS-XR-infra-rsi-oper', False),
],
'Cisco-IOS-XR-infra-rsi-oper',
'interface-details',
_yang_ns._namespaces['Cisco-IOS-XR-infra-rsi-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper'
),
},
'Srlg.Nodes.Node.SrlgValues.SrlgValue' : {
'meta_info' : _MetaInfoClass('Srlg.Nodes.Node.SrlgValues.SrlgValue',
False,
[
_MetaInfoClassMember('value', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' SRLG value
''',
'value',
'Cisco-IOS-XR-infra-rsi-oper', True),
_MetaInfoClassMember('interface-name', REFERENCE_LEAFLIST, 'str' , None, None,
[], [],
''' Interface name
''',
'interface_name',
'Cisco-IOS-XR-infra-rsi-oper', False),
],
'Cisco-IOS-XR-infra-rsi-oper',
'srlg-value',
_yang_ns._namespaces['Cisco-IOS-XR-infra-rsi-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper'
),
},
'Srlg.Nodes.Node.SrlgValues' : {
'meta_info' : _MetaInfoClass('Srlg.Nodes.Node.SrlgValues',
False,
[
_MetaInfoClassMember('srlg-value', REFERENCE_LIST, 'SrlgValue' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper', 'Srlg.Nodes.Node.SrlgValues.SrlgValue',
[], [],
''' Configured SRLG value details
''',
'srlg_value',
'Cisco-IOS-XR-infra-rsi-oper', False),
],
'Cisco-IOS-XR-infra-rsi-oper',
'srlg-values',
_yang_ns._namespaces['Cisco-IOS-XR-infra-rsi-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper'
),
},
'Srlg.Nodes.Node.InterfaceSrlgNames.InterfaceSrlgName.Interfaces' : {
'meta_info' : _MetaInfoClass('Srlg.Nodes.Node.InterfaceSrlgNames.InterfaceSrlgName.Interfaces',
False,
[
_MetaInfoClassMember('interface-name', REFERENCE_LEAFLIST, 'str' , None, None,
[], [],
''' Interface name
''',
'interface_name',
'Cisco-IOS-XR-infra-rsi-oper', False),
],
'Cisco-IOS-XR-infra-rsi-oper',
'interfaces',
_yang_ns._namespaces['Cisco-IOS-XR-infra-rsi-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper'
),
},
'Srlg.Nodes.Node.InterfaceSrlgNames.InterfaceSrlgName' : {
'meta_info' : _MetaInfoClass('Srlg.Nodes.Node.InterfaceSrlgNames.InterfaceSrlgName',
False,
[
_MetaInfoClassMember('srlg-name', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' SRLG name
''',
'srlg_name',
'Cisco-IOS-XR-infra-rsi-oper', True),
_MetaInfoClassMember('interfaces', REFERENCE_CLASS, 'Interfaces' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper', 'Srlg.Nodes.Node.InterfaceSrlgNames.InterfaceSrlgName.Interfaces',
[], [],
''' Interfaces information
''',
'interfaces',
'Cisco-IOS-XR-infra-rsi-oper', False),
_MetaInfoClassMember('srlg-name-xr', ATTRIBUTE, 'str' , None, None,
[], [],
''' SRLG name
''',
'srlg_name_xr',
'Cisco-IOS-XR-infra-rsi-oper', False),
_MetaInfoClassMember('srlg-value', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' SRLG value
''',
'srlg_value',
'Cisco-IOS-XR-infra-rsi-oper', False),
],
'Cisco-IOS-XR-infra-rsi-oper',
'interface-srlg-name',
_yang_ns._namespaces['Cisco-IOS-XR-infra-rsi-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper'
),
},
'Srlg.Nodes.Node.InterfaceSrlgNames' : {
'meta_info' : _MetaInfoClass('Srlg.Nodes.Node.InterfaceSrlgNames',
False,
[
_MetaInfoClassMember('interface-srlg-name', REFERENCE_LIST, 'InterfaceSrlgName' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper', 'Srlg.Nodes.Node.InterfaceSrlgNames.InterfaceSrlgName',
[], [],
''' Configured SRLG name details
''',
'interface_srlg_name',
'Cisco-IOS-XR-infra-rsi-oper', False),
],
'Cisco-IOS-XR-infra-rsi-oper',
'interface-srlg-names',
_yang_ns._namespaces['Cisco-IOS-XR-infra-rsi-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper'
),
},
'Srlg.Nodes.Node' : {
'meta_info' : _MetaInfoClass('Srlg.Nodes.Node',
False,
[
_MetaInfoClassMember('node-name', ATTRIBUTE, 'str' , None, None,
[], ['([a-zA-Z0-9_]*\\d+/){1,2}([a-zA-Z0-9_]*\\d+)'],
''' Node
''',
'node_name',
'Cisco-IOS-XR-infra-rsi-oper', True),
_MetaInfoClassMember('groups', REFERENCE_CLASS, 'Groups' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper', 'Srlg.Nodes.Node.Groups',
[], [],
''' Set of Groups configured for SRLG
''',
'groups',
'Cisco-IOS-XR-infra-rsi-oper', False),
_MetaInfoClassMember('inherit-nodes', REFERENCE_CLASS, 'InheritNodes' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper', 'Srlg.Nodes.Node.InheritNodes',
[], [],
''' Set of inherit locations configured for SRLG
''',
'inherit_nodes',
'Cisco-IOS-XR-infra-rsi-oper', False),
_MetaInfoClassMember('interface-details', REFERENCE_CLASS, 'InterfaceDetails' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper', 'Srlg.Nodes.Node.InterfaceDetails',
[], [],
''' Set of interfaces configured for SRLG
''',
'interface_details',
'Cisco-IOS-XR-infra-rsi-oper', False),
_MetaInfoClassMember('interface-srlg-names', REFERENCE_CLASS, 'InterfaceSrlgNames' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper', 'Srlg.Nodes.Node.InterfaceSrlgNames',
[], [],
''' Set of SRLG names configured
''',
'interface_srlg_names',
'Cisco-IOS-XR-infra-rsi-oper', False),
_MetaInfoClassMember('interfaces', REFERENCE_CLASS, 'Interfaces' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper', 'Srlg.Nodes.Node.Interfaces',
[], [],
''' Set of interfaces configured for SRLG
''',
'interfaces',
'Cisco-IOS-XR-infra-rsi-oper', False),
_MetaInfoClassMember('srlg-maps', REFERENCE_CLASS, 'SrlgMaps' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper', 'Srlg.Nodes.Node.SrlgMaps',
[], [],
''' Set of SRLG name, value maps configured
''',
'srlg_maps',
'Cisco-IOS-XR-infra-rsi-oper', False),
_MetaInfoClassMember('srlg-values', REFERENCE_CLASS, 'SrlgValues' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper', 'Srlg.Nodes.Node.SrlgValues',
[], [],
''' Set of SRLG values configured
''',
'srlg_values',
'Cisco-IOS-XR-infra-rsi-oper', False),
],
'Cisco-IOS-XR-infra-rsi-oper',
'node',
_yang_ns._namespaces['Cisco-IOS-XR-infra-rsi-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper'
),
},
'Srlg.Nodes' : {
'meta_info' : _MetaInfoClass('Srlg.Nodes',
False,
[
_MetaInfoClassMember('node', REFERENCE_LIST, 'Node' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper', 'Srlg.Nodes.Node',
[], [],
''' RSI SRLG operational data
''',
'node',
'Cisco-IOS-XR-infra-rsi-oper', False),
],
'Cisco-IOS-XR-infra-rsi-oper',
'nodes',
_yang_ns._namespaces['Cisco-IOS-XR-infra-rsi-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper'
),
},
'Srlg.InterfaceSrlgNames.InterfaceSrlgName.Interfaces' : {
'meta_info' : _MetaInfoClass('Srlg.InterfaceSrlgNames.InterfaceSrlgName.Interfaces',
False,
[
_MetaInfoClassMember('interface-name', REFERENCE_LEAFLIST, 'str' , None, None,
[], [],
''' Interface name
''',
'interface_name',
'Cisco-IOS-XR-infra-rsi-oper', False),
],
'Cisco-IOS-XR-infra-rsi-oper',
'interfaces',
_yang_ns._namespaces['Cisco-IOS-XR-infra-rsi-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper'
),
},
'Srlg.InterfaceSrlgNames.InterfaceSrlgName' : {
'meta_info' : _MetaInfoClass('Srlg.InterfaceSrlgNames.InterfaceSrlgName',
False,
[
_MetaInfoClassMember('srlg-name', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' SRLG name
''',
'srlg_name',
'Cisco-IOS-XR-infra-rsi-oper', True),
_MetaInfoClassMember('interfaces', REFERENCE_CLASS, 'Interfaces' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper', 'Srlg.InterfaceSrlgNames.InterfaceSrlgName.Interfaces',
[], [],
''' Interfaces information
''',
'interfaces',
'Cisco-IOS-XR-infra-rsi-oper', False),
_MetaInfoClassMember('srlg-name-xr', ATTRIBUTE, 'str' , None, None,
[], [],
''' SRLG name
''',
'srlg_name_xr',
'Cisco-IOS-XR-infra-rsi-oper', False),
_MetaInfoClassMember('srlg-value', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' SRLG value
''',
'srlg_value',
'Cisco-IOS-XR-infra-rsi-oper', False),
],
'Cisco-IOS-XR-infra-rsi-oper',
'interface-srlg-name',
_yang_ns._namespaces['Cisco-IOS-XR-infra-rsi-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper'
),
},
'Srlg.InterfaceSrlgNames' : {
'meta_info' : _MetaInfoClass('Srlg.InterfaceSrlgNames',
False,
[
_MetaInfoClassMember('interface-srlg-name', REFERENCE_LIST, 'InterfaceSrlgName' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper', 'Srlg.InterfaceSrlgNames.InterfaceSrlgName',
[], [],
''' Configured SRLG name details
''',
'interface_srlg_name',
'Cisco-IOS-XR-infra-rsi-oper', False),
],
'Cisco-IOS-XR-infra-rsi-oper',
'interface-srlg-names',
_yang_ns._namespaces['Cisco-IOS-XR-infra-rsi-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper'
),
},
'Srlg' : {
'meta_info' : _MetaInfoClass('Srlg',
False,
[
_MetaInfoClassMember('interface-srlg-names', REFERENCE_CLASS, 'InterfaceSrlgNames' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper', 'Srlg.InterfaceSrlgNames',
[], [],
''' Set of SRLG names configured
''',
'interface_srlg_names',
'Cisco-IOS-XR-infra-rsi-oper', False),
_MetaInfoClassMember('nodes', REFERENCE_CLASS, 'Nodes' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper', 'Srlg.Nodes',
[], [],
''' RSI SRLG operational data
''',
'nodes',
'Cisco-IOS-XR-infra-rsi-oper', False),
_MetaInfoClassMember('srlg-maps', REFERENCE_CLASS, 'SrlgMaps' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper', 'Srlg.SrlgMaps',
[], [],
''' Set of SRLG name, value maps configured
''',
'srlg_maps',
'Cisco-IOS-XR-infra-rsi-oper', False),
],
'Cisco-IOS-XR-infra-rsi-oper',
'srlg',
_yang_ns._namespaces['Cisco-IOS-XR-infra-rsi-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper'
),
},
'SelectiveVrfDownload.State' : {
'meta_info' : _MetaInfoClass('SelectiveVrfDownload.State',
False,
[
_MetaInfoClassMember('is-svd-enabled', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Is SVD Enabled Operational
''',
'is_svd_enabled',
'Cisco-IOS-XR-infra-rsi-oper', False),
_MetaInfoClassMember('is-svd-enabled-cfg', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Is SVD Enabled Config
''',
'is_svd_enabled_cfg',
'Cisco-IOS-XR-infra-rsi-oper', False),
],
'Cisco-IOS-XR-infra-rsi-oper',
'state',
_yang_ns._namespaces['Cisco-IOS-XR-infra-rsi-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper'
),
},
'SelectiveVrfDownload' : {
'meta_info' : _MetaInfoClass('SelectiveVrfDownload',
False,
[
_MetaInfoClassMember('state', REFERENCE_CLASS, 'State' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper', 'SelectiveVrfDownload.State',
[], [],
''' Selective VRF Download feature state details
''',
'state',
'Cisco-IOS-XR-infra-rsi-oper', False),
],
'Cisco-IOS-XR-infra-rsi-oper',
'selective-vrf-download',
_yang_ns._namespaces['Cisco-IOS-XR-infra-rsi-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_rsi_oper'
),
},
}
_meta_table['VrfGroup.Nodes.Node.Groups.Group.Vrf']['meta_info'].parent =_meta_table['VrfGroup.Nodes.Node.Groups.Group']['meta_info']
_meta_table['VrfGroup.Nodes.Node.Groups.Group']['meta_info'].parent =_meta_table['VrfGroup.Nodes.Node.Groups']['meta_info']
_meta_table['VrfGroup.Nodes.Node.Groups']['meta_info'].parent =_meta_table['VrfGroup.Nodes.Node']['meta_info']
_meta_table['VrfGroup.Nodes.Node']['meta_info'].parent =_meta_table['VrfGroup.Nodes']['meta_info']
_meta_table['VrfGroup.Nodes']['meta_info'].parent =_meta_table['VrfGroup']['meta_info']
_meta_table['Srlg.SrlgMaps.SrlgMap']['meta_info'].parent =_meta_table['Srlg.SrlgMaps']['meta_info']
_meta_table['Srlg.Nodes.Node.SrlgMaps.SrlgMap']['meta_info'].parent =_meta_table['Srlg.Nodes.Node.SrlgMaps']['meta_info']
_meta_table['Srlg.Nodes.Node.Groups.Group.SrlgAttribute']['meta_info'].parent =_meta_table['Srlg.Nodes.Node.Groups.Group']['meta_info']
_meta_table['Srlg.Nodes.Node.Groups.Group']['meta_info'].parent =_meta_table['Srlg.Nodes.Node.Groups']['meta_info']
_meta_table['Srlg.Nodes.Node.InheritNodes.InheritNode.SrlgAttribute']['meta_info'].parent =_meta_table['Srlg.Nodes.Node.InheritNodes.InheritNode']['meta_info']
_meta_table['Srlg.Nodes.Node.InheritNodes.InheritNode']['meta_info'].parent =_meta_table['Srlg.Nodes.Node.InheritNodes']['meta_info']
_meta_table['Srlg.Nodes.Node.Interfaces.Interface']['meta_info'].parent =_meta_table['Srlg.Nodes.Node.Interfaces']['meta_info']
_meta_table['Srlg.Nodes.Node.InterfaceDetails.InterfaceDetail.SrlgAttribute']['meta_info'].parent =_meta_table['Srlg.Nodes.Node.InterfaceDetails.InterfaceDetail']['meta_info']
_meta_table['Srlg.Nodes.Node.InterfaceDetails.InterfaceDetail']['meta_info'].parent =_meta_table['Srlg.Nodes.Node.InterfaceDetails']['meta_info']
_meta_table['Srlg.Nodes.Node.SrlgValues.SrlgValue']['meta_info'].parent =_meta_table['Srlg.Nodes.Node.SrlgValues']['meta_info']
_meta_table['Srlg.Nodes.Node.InterfaceSrlgNames.InterfaceSrlgName.Interfaces']['meta_info'].parent =_meta_table['Srlg.Nodes.Node.InterfaceSrlgNames.InterfaceSrlgName']['meta_info']
_meta_table['Srlg.Nodes.Node.InterfaceSrlgNames.InterfaceSrlgName']['meta_info'].parent =_meta_table['Srlg.Nodes.Node.InterfaceSrlgNames']['meta_info']
_meta_table['Srlg.Nodes.Node.SrlgMaps']['meta_info'].parent =_meta_table['Srlg.Nodes.Node']['meta_info']
_meta_table['Srlg.Nodes.Node.Groups']['meta_info'].parent =_meta_table['Srlg.Nodes.Node']['meta_info']
_meta_table['Srlg.Nodes.Node.InheritNodes']['meta_info'].parent =_meta_table['Srlg.Nodes.Node']['meta_info']
_meta_table['Srlg.Nodes.Node.Interfaces']['meta_info'].parent =_meta_table['Srlg.Nodes.Node']['meta_info']
_meta_table['Srlg.Nodes.Node.InterfaceDetails']['meta_info'].parent =_meta_table['Srlg.Nodes.Node']['meta_info']
_meta_table['Srlg.Nodes.Node.SrlgValues']['meta_info'].parent =_meta_table['Srlg.Nodes.Node']['meta_info']
_meta_table['Srlg.Nodes.Node.InterfaceSrlgNames']['meta_info'].parent =_meta_table['Srlg.Nodes.Node']['meta_info']
_meta_table['Srlg.Nodes.Node']['meta_info'].parent =_meta_table['Srlg.Nodes']['meta_info']
_meta_table['Srlg.InterfaceSrlgNames.InterfaceSrlgName.Interfaces']['meta_info'].parent =_meta_table['Srlg.InterfaceSrlgNames.InterfaceSrlgName']['meta_info']
_meta_table['Srlg.InterfaceSrlgNames.InterfaceSrlgName']['meta_info'].parent =_meta_table['Srlg.InterfaceSrlgNames']['meta_info']
_meta_table['Srlg.SrlgMaps']['meta_info'].parent =_meta_table['Srlg']['meta_info']
_meta_table['Srlg.Nodes']['meta_info'].parent =_meta_table['Srlg']['meta_info']
_meta_table['Srlg.InterfaceSrlgNames']['meta_info'].parent =_meta_table['Srlg']['meta_info']
_meta_table['SelectiveVrfDownload.State']['meta_info'].parent =_meta_table['SelectiveVrfDownload']['meta_info']
| [
628,
198,
11748,
302,
198,
11748,
17268,
198,
198,
6738,
33829,
1330,
2039,
388,
198,
198,
6738,
331,
34388,
13557,
7295,
13557,
36020,
62,
28961,
62,
10951,
1330,
4808,
48526,
12360,
9487,
27608,
11,
4808,
48526,
12360,
9487,
11,
4808,
... | 1.654113 | 26,234 |
import machine
import dht
if __name__ == "__main__":
sensor = Sensor(13)
| [
11748,
4572,
198,
11748,
288,
4352,
628,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
12694,
796,
35367,
7,
1485,
8,
198
] | 2.724138 | 29 |
from SmellDetector import AbsSmellDectector, ModSmellDectector, HieSmellDectector, DepSmellDectector, EncSmellDectector
| [
6738,
2439,
695,
11242,
9250,
1330,
13051,
7556,
695,
35,
478,
9250,
11,
3401,
7556,
695,
35,
478,
9250,
11,
367,
494,
7556,
695,
35,
478,
9250,
11,
2129,
7556,
695,
35,
478,
9250,
11,
14711,
7556,
695,
35,
478,
9250,
628
] | 2.880952 | 42 |
from __future__ import division
from textwrap import dedent
import colorsys
import numpy as np
from scipy import stats
import pandas as pd
from pandas.core.series import remove_na
import matplotlib as mpl
import matplotlib.pyplot as plt
import warnings
from .external.six import string_types
from .external.six.moves import range
from . import utils
from .utils import desaturate, iqr, categorical_order
from .algorithms import bootstrap
from .palettes import color_palette, husl_palette, light_palette
from .axisgrid import FacetGrid
class _StripPlotter(_CategoricalPlotter):
"""1-d scatterplot with categorical organization."""
def __init__(self, x, y, hue, data, order, hue_order,
jitter, split, orient, color, palette):
"""Initialize the plotter."""
self.establish_variables(x, y, hue, data, orient, order, hue_order)
self.establish_colors(color, palette, 1)
# Set object attributes
self.split = split
self.width = .8
if jitter == 1: # Use a good default for `jitter = True`
jlim = 0.1
else:
jlim = float(jitter)
if self.hue_names is not None and split:
jlim /= len(self.hue_names)
self.jitterer = stats.uniform(-jlim, jlim * 2).rvs
def draw_stripplot(self, ax, kws):
"""Draw the points onto `ax`."""
# Set the default zorder to 2.1, so that the points
# will be drawn on top of line elements (like in a boxplot)
kws.setdefault("zorder", 2.1)
for i, group_data in enumerate(self.plot_data):
if self.plot_hues is None:
# Determine the positions of the points
strip_data = remove_na(group_data)
jitter = self.jitterer(len(strip_data))
kws["color"] = self.colors[i]
# Draw the plot
if self.orient == "v":
ax.scatter(i + jitter, strip_data, **kws)
else:
ax.scatter(strip_data, i + jitter, **kws)
else:
offsets = self.hue_offsets
for j, hue_level in enumerate(self.hue_names):
hue_mask = self.plot_hues[i] == hue_level
if not hue_mask.any():
continue
# Determine the positions of the points
strip_data = remove_na(group_data[hue_mask])
pos = i + offsets[j] if self.split else i
jitter = self.jitterer(len(strip_data))
kws["color"] = self.colors[j]
# Only label one set of plots
if i:
kws.pop("label", None)
else:
kws["label"] = hue_level
# Draw the plot
if self.orient == "v":
ax.scatter(pos + jitter, strip_data, **kws)
else:
ax.scatter(strip_data, pos + jitter, **kws)
def plot(self, ax, kws):
"""Make the plot."""
self.draw_stripplot(ax, kws)
self.annotate_axes(ax)
if self.orient == "h":
ax.invert_yaxis()
class _BarPlotter(_CategoricalStatPlotter):
"""Show point estimates and confidence intervals with bars."""
def __init__(self, x, y, hue, data, order, hue_order,
estimator, ci, n_boot, units,
orient, color, palette, saturation, errcolor):
"""Initialize the plotter."""
self.establish_variables(x, y, hue, data, orient,
order, hue_order, units)
self.establish_colors(color, palette, saturation)
self.estimate_statistic(estimator, ci, n_boot)
self.errcolor = errcolor
def draw_bars(self, ax, kws):
"""Draw the bars onto `ax`."""
# Get the right matplotlib function depending on the orientation
barfunc = ax.bar if self.orient == "v" else ax.barh
barpos = np.arange(len(self.statistic))
if self.plot_hues is None:
# Draw the bars
barfunc(barpos, self.statistic, self.width,
color=self.colors, align="center", **kws)
# Draw the confidence intervals
errcolors = [self.errcolor] * len(barpos)
self.draw_confints(ax, barpos, self.confint, errcolors)
else:
for j, hue_level in enumerate(self.hue_names):
# Draw the bars
offpos = barpos + self.hue_offsets[j]
barfunc(offpos, self.statistic[:, j], self.nested_width,
color=self.colors[j], align="center",
label=hue_level, **kws)
# Draw the confidence intervals
if self.confint.size:
confint = self.confint[:, j]
errcolors = [self.errcolor] * len(offpos)
self.draw_confints(ax, offpos, confint, errcolors)
def plot(self, ax, bar_kws):
"""Make the plot."""
self.draw_bars(ax, bar_kws)
self.annotate_axes(ax)
if self.orient == "h":
ax.invert_yaxis()
class _PointPlotter(_CategoricalStatPlotter):
"""Show point estimates and confidence intervals with (joined) points."""
def __init__(self, x, y, hue, data, order, hue_order,
estimator, ci, n_boot, units,
markers, linestyles, dodge, join, scale,
orient, color, palette):
"""Initialize the plotter."""
self.establish_variables(x, y, hue, data, orient,
order, hue_order, units)
self.establish_colors(color, palette, 1)
self.estimate_statistic(estimator, ci, n_boot)
# Override the default palette for single-color plots
if hue is None and color is None and palette is None:
self.colors = [color_palette()[0]] * len(self.colors)
# Don't join single-layer plots with different colors
if hue is None and palette is not None:
join = False
# Use a good default for `dodge=True`
if dodge is True and self.hue_names is not None:
dodge = .025 * len(self.hue_names)
# Make sure we have a marker for each hue level
if isinstance(markers, string_types):
markers = [markers] * len(self.colors)
self.markers = markers
# Make sure we have a line style for each hue level
if isinstance(linestyles, string_types):
linestyles = [linestyles] * len(self.colors)
self.linestyles = linestyles
# Set the other plot components
self.dodge = dodge
self.join = join
self.scale = scale
@property
def hue_offsets(self):
"""Offsets relative to the center position for each hue level."""
offset = np.linspace(0, self.dodge, len(self.hue_names))
offset -= offset.mean()
return offset
def draw_points(self, ax):
"""Draw the main data components of the plot."""
# Get the center positions on the categorical axis
pointpos = np.arange(len(self.statistic))
# Get the size of the plot elements
lw = mpl.rcParams["lines.linewidth"] * 1.8 * self.scale
mew = lw * .75
markersize = np.pi * np.square(lw) * 2
if self.plot_hues is None:
# Draw lines joining each estimate point
if self.join:
color = self.colors[0]
ls = self.linestyles[0]
if self.orient == "h":
ax.plot(self.statistic, pointpos,
color=color, ls=ls, lw=lw)
else:
ax.plot(pointpos, self.statistic,
color=color, ls=ls, lw=lw)
# Draw the confidence intervals
self.draw_confints(ax, pointpos, self.confint, self.colors, lw=lw)
# Draw the estimate points
marker = self.markers[0]
if self.orient == "h":
ax.scatter(self.statistic, pointpos,
linewidth=mew, marker=marker, s=markersize,
c=self.colors, edgecolor=self.colors)
else:
ax.scatter(pointpos, self.statistic,
linewidth=mew, marker=marker, s=markersize,
c=self.colors, edgecolor=self.colors)
else:
offsets = self.hue_offsets
for j, hue_level in enumerate(self.hue_names):
# Determine the values to plot for this level
statistic = self.statistic[:, j]
# Determine the position on the categorical and z axes
offpos = pointpos + offsets[j]
z = j + 1
# Draw lines joining each estimate point
if self.join:
color = self.colors[j]
ls = self.linestyles[j]
if self.orient == "h":
ax.plot(statistic, offpos, color=color,
zorder=z, ls=ls, lw=lw)
else:
ax.plot(offpos, statistic, color=color,
zorder=z, ls=ls, lw=lw)
# Draw the confidence intervals
if self.confint.size:
confint = self.confint[:, j]
errcolors = [self.colors[j]] * len(offpos)
self.draw_confints(ax, offpos, confint, errcolors,
zorder=z, lw=lw)
# Draw the estimate points
marker = self.markers[j]
if self.orient == "h":
ax.scatter(statistic, offpos, label=hue_level,
c=[self.colors[j]] * len(offpos),
linewidth=mew, marker=marker, s=markersize,
edgecolor=self.colors[j], zorder=z)
else:
ax.scatter(offpos, statistic, label=hue_level,
c=[self.colors[j]] * len(offpos),
linewidth=mew, marker=marker, s=markersize,
edgecolor=self.colors[j], zorder=z)
def plot(self, ax):
"""Make the plot."""
self.draw_points(ax)
self.annotate_axes(ax)
if self.orient == "h":
ax.invert_yaxis()
_categorical_docs = dict(
# Shared narrative docs
main_api_narrative=dedent("""\
Input data can be passed in a variety of formats, including:
- Vectors of data represented as lists, numpy arrays, or pandas Series
objects passed directly to the ``x``, ``y``, and/or ``hue`` parameters.
- A "long-form" DataFrame, in which case the ``x``, ``y``, and ``hue``
variables will determine how the data are plotted.
- A "wide-form" DataFrame, such that each numeric column will be plotted.
- Anything accepted by ``plt.boxplot`` (e.g. a 2d array or list of vectors)
In most cases, it is possible to use numpy or Python objects, but pandas
objects are preferable because the associated names will be used to
annotate the axes. Additionally, you can use Categorical types for the
grouping variables to control the order of plot elements.\
"""),
# Shared function parameters
input_params=dedent("""\
x, y, hue : names of variables in ``data`` or vector data, optional
Inputs for plotting long-form data. See examples for interpretation.\
"""),
string_input_params=dedent("""\
x, y, hue : names of variables in ``data``
Inputs for plotting long-form data. See examples for interpretation.\
"""),
data=dedent("""\
data : DataFrame, array, or list of arrays, optional
Dataset for plotting. If ``x`` and ``y`` are absent, this is
interpreted as wide-form. Otherwise it is expected to be long-form.\
"""),
long_form_data=dedent("""\
data : DataFrame
Long-form (tidy) dataset for plotting. Each column should correspond
to a variable, and each row should correspond to an observation.\
"""),
order_vars=dedent("""\
order, hue_order : lists of strings, optional
Order to plot the categorical levels in, otherwise the levels are
inferred from the data objects.\
"""),
stat_api_params=dedent("""\
estimator : callable that maps vector -> scalar, optional
Statistical function to estimate within each categorical bin.
ci : float or None, optional
Size of confidence intervals to draw around estimated values. If
``None``, no bootstrapping will be performed, and error bars will
not be drawn.
n_boot : int, optional
Number of bootstrap iterations to use when computing confidence
intervals.
units : name of variable in ``data`` or vector data, optional
Identifier of sampling units, which will be used to perform a
multilevel bootstrap and account for repeated measures design.\
"""),
orient=dedent("""\
orient : "v" | "h", optional
Orientation of the plot (vertical or horizontal). This is usually
inferred from the dtype of the input variables, but can be used to
specify when the "categorical" variable is a numeric or when plotting
wide-form data.\
"""),
color=dedent("""\
color : matplotlib color, optional
Color for all of the elements, or seed for :func:`light_palette` when
using hue nesting.\
"""),
palette=dedent("""\
palette : palette name, list, or dict, optional
Color palette that maps either the grouping variable or the hue
variable. If the palette is a dictionary, keys should be names of
levels and values should be matplotlib colors.\
"""),
saturation=dedent("""\
saturation : float, optional
Proportion of the original saturation to draw colors at. Large patches
often look better with slightly desaturated colors, but set this to
``1`` if you want the plot colors to perfectly match the input color
spec.\
"""),
width=dedent("""\
width : float, optional
Width of a full element when not using hue nesting, or width of all the
elements for one level of the major grouping variable.\
"""),
linewidth=dedent("""\
linewidth : float, optional
Width of the gray lines that frame the plot elements.\
"""),
ax_in=dedent("""\
ax : matplotlib Axes, optional
Axes object to draw the plot onto, otherwise uses the current Axes.\
"""),
ax_out=dedent("""\
ax : matplotlib Axes
Returns the Axes object with the boxplot drawn onto it.\
"""),
# Shared see also
boxplot=dedent("""\
boxplot : A traditional box-and-whisker plot with a similar API.\
"""),
violinplot=dedent("""\
violinplot : A combination of boxplot and kernel density estimation.\
"""),
stripplot=dedent("""\
stripplot : A scatterplot where one variable is categorical. Can be used
in conjunction with a other plots to show each observation.\
"""),
barplot=dedent("""\
barplot : Show point estimates and confidence intervals using bars.\
"""),
countplot=dedent("""\
countplot : Show the counts of observations in each categorical bin.\
"""),
pointplot=dedent("""\
pointplot : Show point estimates and confidence intervals using scatterplot
glyphs.\
"""),
factorplot=dedent("""\
factorplot : Combine categorical plots and a class:`FacetGrid`.\
"""),
)
boxplot.__doc__ = dedent("""\
Draw a box plot to show distributions with respect to categories.
A box plot (or box-and-whisker plot) shows the distribution of quantitative
data in a way that facilitates comparisons between variables or across
levels of a categorical variable. The box shows the quartiles of the
dataset while the whiskers extend to show the rest of the distribution,
except for points that are determined to be "outliers" using a method
that is a function of the inter-quartile range.
{main_api_narrative}
Parameters
----------
{input_params}
{data}
{order_vars}
{orient}
{color}
{palette}
{saturation}
{width}
fliersize : float, optional
Size of the markers used to indicate outlier observations.
{linewidth}
whis : float, optional
Proportion of the IQR past the low and high quartiles to extend the
plot whiskers. Points outside this range will be identified as
outliers.
notch : boolean, optional
Whether to "notch" the box to indicate a confidence interval for the
median. There are several other parameters that can control how the
notches are drawn; see the ``plt.boxplot`` help for more information
on them.
{ax_in}
kwargs : key, value mappings
Other keyword arguments are passed through to ``plt.boxplot`` at draw
time.
Returns
-------
{ax_out}
See Also
--------
{violinplot}
{stripplot}
Examples
--------
Draw a single horizontal boxplot:
.. plot::
:context: close-figs
>>> import seaborn as sns
>>> sns.set_style("whitegrid")
>>> tips = sns.load_dataset("tips")
>>> ax = sns.boxplot(x=tips["total_bill"])
Draw a vertical boxplot grouped by a categorical variable:
.. plot::
:context: close-figs
>>> ax = sns.boxplot(x="day", y="total_bill", data=tips)
Draw a boxplot with nested grouping by two categorical variables:
.. plot::
:context: close-figs
>>> ax = sns.boxplot(x="day", y="total_bill", hue="smoker",
... data=tips, palette="Set3")
Draw a boxplot with nested grouping when some bins are empty:
.. plot::
:context: close-figs
>>> ax = sns.boxplot(x="day", y="total_bill", hue="time",
... data=tips, linewidth=2.5)
Control box order by sorting the input data:
.. plot::
:context: close-figs
>>> ax = sns.boxplot(x="size", y="tip", data=tips.sort("size"))
Control box order by passing an explicit order:
.. plot::
:context: close-figs
>>> ax = sns.boxplot(x="size", y="tip", data=tips,
... order=np.arange(1, 7), palette="Blues_d")
Draw a boxplot for each numeric variable in a DataFrame:
.. plot::
:context: close-figs
>>> iris = sns.load_dataset("iris")
>>> ax = sns.boxplot(data=iris, orient="h", palette="Set2")
Use :func:`stripplot` to show the datapoints on top of the boxes:
.. plot::
:context: close-figs
>>> ax = sns.boxplot(x="day", y="total_bill", data=tips)
>>> ax = sns.stripplot(x="day", y="total_bill", data=tips,
... size=4, jitter=True, edgecolor="gray")
Draw a box plot on to a :class:`FacetGrid` to group within an additional
categorical variable:
.. plot::
:context: close-figs
>>> g = sns.FacetGrid(tips, col="time", size=4, aspect=.7)
>>> (g.map(sns.boxplot, "sex", "total_bill", "smoker")
... .despine(left=True)
... .add_legend(title="smoker")) #doctest: +ELLIPSIS
<seaborn.axisgrid.FacetGrid object at 0x...>
""").format(**_categorical_docs)
violinplot.__doc__ = dedent("""\
Draw a combination of boxplot and kernel density estimate.
A violin plot plays a similar role as a box and whisker plot. It shows the
distribution of quantitative data across several levels of one (or more)
categorical variables such that those distributions can be compared. Unlike
a box plot, in which all of the plot components correspond to actual
datapoints, the violin plot features a kernel density estimation of the
underlying distribution.
This can be an effective and attractive way to show multiple distributions
of data at once, but keep in mind that the estimation procedure is
influenced by the sample size, and violins for relatively small samples
might look misleadingly smooth.
{main_api_narrative}
Parameters
----------
{input_params}
{data}
{order_vars}
bw : {{'scott', 'silverman', float}}, optional
Either the name of a reference rule or the scale factor to use when
computing the kernel bandwidth. The actual kernel size will be
determined by multiplying the scale factor by the standard deviation of
the data within each bin.
cut : float, optional
Distance, in units of bandwidth size, to extend the density past the
extreme datapoints. Set to 0 to limit the violin range within the range
of the observed data (i.e., to have the same effect as ``trim=True`` in
``ggplot``.
scale : {{"area", "count", "width"}}, optional
The method used to scale the width of each violin. If ``area``, each
violin will have the same area. If ``count``, the width of the violins
will be scaled by the number of observations in that bin. If ``width``,
each violin will have the same width.
scale_hue : bool, optional
When nesting violins using a ``hue`` variable, this parameter
determines whether the scaling is computed within each level of the
major grouping variable (``scale_hue=True``) or across all the violins
on the plot (``scale_hue=False``).
gridsize : int, optional
Number of points in the discrete grid used to compute the kernel
density estimate.
{width}
inner : {{"box", "quartile", "point", "stick", None}}, optional
Representation of the datapoints in the violin interior. If ``box``,
draw a miniature boxplot. If ``quartiles``, draw the quartiles of the
distribution. If ``point`` or ``stick``, show each underlying
datapoint. Using ``None`` will draw unadorned violins.
split : bool, optional
When using hue nesting with a variable that takes two levels, setting
``split`` to True will draw half of a violin for each level. This can
make it easier to directly compare the distributions.
{orient}
{linewidth}
{color}
{palette}
{saturation}
{ax_in}
Returns
-------
{ax_out}
See Also
--------
{boxplot}
{stripplot}
Examples
--------
Draw a single horizontal violinplot:
.. plot::
:context: close-figs
>>> import seaborn as sns
>>> sns.set_style("whitegrid")
>>> tips = sns.load_dataset("tips")
>>> ax = sns.violinplot(x=tips["total_bill"])
Draw a vertical violinplot grouped by a categorical variable:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="day", y="total_bill", data=tips)
Draw a violinplot with nested grouping by two categorical variables:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="day", y="total_bill", hue="smoker",
... data=tips, palette="muted")
Draw split violins to compare the across the hue variable:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="day", y="total_bill", hue="smoker",
... data=tips, palette="muted", split=True)
Control violin order by sorting the input data:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="size", y="tip", data=tips.sort("size"))
Control violin order by passing an explicit order:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="size", y="tip", data=tips,
... order=np.arange(1, 7), palette="Blues_d")
Scale the violin width by the number of observations in each bin:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="day", y="total_bill", hue="sex",
... data=tips, palette="Set2", split=True,
... scale="count")
Draw the quartiles as horizontal lines instead of a mini-box:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="day", y="total_bill", hue="sex",
... data=tips, palette="Set2", split=True,
... scale="count", inner="quartile")
Show each observation with a stick inside the violin:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="day", y="total_bill", hue="sex",
... data=tips, palette="Set2", split=True,
... scale="count", inner="stick")
Scale the density relative to the counts across all bins:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="day", y="total_bill", hue="sex",
... data=tips, palette="Set2", split=True,
... scale="count", inner="stick", scale_hue=False)
Use a narrow bandwidth to reduce the amount of smoothing:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="day", y="total_bill", hue="sex",
... data=tips, palette="Set2", split=True,
... scale="count", inner="stick",
... scale_hue=False, bw=.2)
Draw horizontal violins:
.. plot::
:context: close-figs
>>> planets = sns.load_dataset("planets")
>>> ax = sns.violinplot(x="orbital_period", y="method",
... data=planets[planets.orbital_period < 1000],
... scale="width", palette="Set3")
Draw a violin plot on to a :class:`FacetGrid` to group within an additional
categorical variable:
.. plot::
:context: close-figs
>>> g = sns.FacetGrid(tips, col="time", size=4, aspect=.7)
>>> (g.map(sns.violinplot, "sex", "total_bill", "smoker", split=True)
... .despine(left=True)
... .add_legend(title="smoker")) # doctest: +ELLIPSIS
<seaborn.axisgrid.FacetGrid object at 0x...>
""").format(**_categorical_docs)
stripplot.__doc__ = dedent("""\
Draw a scatterplot where one variable is categorical.
A strip plot can be drawn on its own, but it is also a good complement
to a box or violin plot in cases where you want to show all observations
along with some representation of the underlying distribution.
{main_api_narrative}
Parameters
----------
{input_params}
{data}
{order_vars}
jitter : float, ``True``/``1`` is special-cased, optional
Amount of jitter (only along the categorical axis) to apply. This
can be useful when you have many points and they overlap, so that
it is easier to see the distribution. You can specify the amount
of jitter (half the width of the uniform random variable support),
or just use ``True`` for a good default.
split : bool, optional
When using ``hue`` nesting, setting this to ``True`` will separate
the strips for different hue levels along the categorical axis.
Otherwise, the points for each level will be plotted on top of
each other.
{orient}
{color}
{palette}
size : float, optional
Diameter of the markers, in points. (Although ``plt.scatter`` is used
to draw the points, the ``size`` argument here takes a "normal"
markersize and not size^2 like ``plt.scatter``.
edgecolor : matplotlib color, "gray" is special-cased, optional
Color of the lines around each point. If you pass ``"gray"``, the
brightness is determined by the color palette used for the body
of the points.
{linewidth}
{ax_in}
Returns
-------
{ax_out}
See Also
--------
{boxplot}
{violinplot}
Examples
--------
Draw a single horizontal strip plot:
.. plot::
:context: close-figs
>>> import seaborn as sns
>>> sns.set_style("whitegrid")
>>> tips = sns.load_dataset("tips")
>>> ax = sns.stripplot(x=tips["total_bill"])
Group the strips by a categorical variable:
.. plot::
:context: close-figs
>>> ax = sns.stripplot(x="day", y="total_bill", data=tips)
Add jitter to bring out the distribution of values:
.. plot::
:context: close-figs
>>> ax = sns.stripplot(x="day", y="total_bill", data=tips, jitter=True)
Use a smaller amount of jitter:
.. plot::
:context: close-figs
>>> ax = sns.stripplot(x="day", y="total_bill", data=tips, jitter=0.05)
Draw horizontal strips:
.. plot::
:context: close-figs
>>> ax = sns.stripplot(x="total_bill", y="day", data=tips,
... jitter=True)
Nest the strips within a second categorical variable:
.. plot::
:context: close-figs
>>> ax = sns.stripplot(x="sex", y="total_bill", hue="day",
... data=tips, jitter=True)
Draw each level of the ``hue`` variable at the same location on the
major categorical axis:
.. plot::
:context: close-figs
>>> ax = sns.stripplot(x="day", y="total_bill", hue="smoker",
... data=tips, jitter=True,
... palette="Set2", split=False)
Control strip order by sorting the input data:
.. plot::
:context: close-figs
>>> ax = sns.stripplot(x="size", y="tip", data=tips.sort("size"))
Control strip order by passing an explicit order:
.. plot::
:context: close-figs
>>> ax = sns.stripplot(x="size", y="tip", data=tips,
... order=np.arange(1, 7), palette="Blues_d")
Draw strips with large points and different aesthetics:
.. plot::
:context: close-figs
>>> ax = sns.stripplot("day", "total_bill", "smoker", data=tips,
... palette="Set2", size=20, marker="D",
... edgecolor="gray", alpha=.25)
Draw strips of observations on top of a box plot:
.. plot::
:context: close-figs
>>> ax = sns.boxplot(x="tip", y="day", data=tips, whis=np.inf)
>>> ax = sns.stripplot(x="tip", y="day", data=tips, jitter=True)
Draw strips of observations on top of a violin plot:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="day", y="total_bill", data=tips, inner=None)
>>> ax = sns.stripplot(x="day", y="total_bill", data=tips,
... jitter=True, color="white", edgecolor="gray")
""").format(**_categorical_docs)
barplot.__doc__ = dedent("""\
Show point estimates and confidence intervals as rectangular bars.
A bar plot represents an estimate of central tendency for a numeric
variable with the height of each rectangle and provides some indication of
the uncertainty around that estimate using error bars. Bar plots include 0
in the quantitative axis range, and they are a good choice when 0 is a
meaningful value for the quantitative variable, and you want to make
comparisons against it.
For datasets where 0 is not a meaningful value, a point plot will allow you
to focus on differences between levels of one or more categorical
variables.
It is also important to keep in mind that a bar plot shows only the mean
(or other estimator) value, but in many cases it may be more informative to
show the distribution of values at each level of the categorical variables.
In that case, other approaches such as a box or violin plot may be more
appropriate.
{main_api_narrative}
Parameters
----------
{input_params}
{data}
{order_vars}
{stat_api_params}
{orient}
{color}
{palette}
{saturation}
errcolor : matplotlib color
Color for the lines that represent the confidence interval.
{ax_in}
kwargs : key, value mappings
Other keyword arguments are passed through to ``plt.bar`` at draw
time.
Returns
-------
{ax_out}
See Also
--------
{countplot}
{pointplot}
{factorplot}
Examples
--------
Draw a set of vertical bar plots grouped by a categorical variable:
.. plot::
:context: close-figs
>>> import seaborn as sns
>>> sns.set_style("whitegrid")
>>> tips = sns.load_dataset("tips")
>>> ax = sns.barplot(x="day", y="total_bill", data=tips)
Draw a set of vertical bars with nested grouping by a two variables:
.. plot::
:context: close-figs
>>> ax = sns.barplot(x="day", y="total_bill", hue="sex", data=tips)
Draw a set of horizontal bars:
.. plot::
:context: close-figs
>>> ax = sns.barplot(x="tip", y="day", data=tips)
Control bar order by sorting the input data:
.. plot::
:context: close-figs
>>> ax = sns.barplot(x="size", y="tip", data=tips.sort("size"))
Control bar order by passing an explicit order:
.. plot::
:context: close-figs
>>> ax = sns.barplot(x="size", y="tip", data=tips,
... order=np.arange(1, 7), palette="Blues_d")
Use median as the estimate of central tendency:
.. plot::
:context: close-figs
>>> from numpy import median
>>> ax = sns.barplot(x="day", y="tip", data=tips, estimator=median)
Show the standard error of the mean with the error bars:
.. plot::
:context: close-figs
>>> ax = sns.barplot(x="day", y="tip", data=tips, ci=68)
Use a different color palette for the bars:
.. plot::
:context: close-figs
>>> ax = sns.barplot("size", y="total_bill", data=tips.sort("size"),
... palette="Blues_d")
Plot all bars in a single color:
.. plot::
:context: close-figs
>>> ax = sns.barplot("size", y="total_bill", data=tips.sort("size"),
... color="salmon", saturation=.5)
Use ``plt.bar`` keyword arguments to further change the aesthetic:
.. plot::
:context: close-figs
>>> ax = sns.barplot("day", "total_bill", data=tips,
... linewidth=2.5, facecolor=(1, 1, 1, 0),
... errcolor=".2", edgecolor=".2")
""").format(**_categorical_docs)
pointplot.__doc__ = dedent("""\
Show point estimates and confidence intervals using scatter plot glyphs.
A point plot represents an estimate of central tendency for a numeric
variable by the position of scatter plot points and provides some
indication of the uncertainty around that estimate using error bars.
Point plots can be more useful than bar plots for focusing comparisons
between different levels of one or more categorical variables. They are
particularly adept at showing interactions: how the relationship between
levels of one categorical variable changes across levels of a second
categorical variable. The lines that join each point from the same ``hue``
level allow interactions to be judged by differences in slope, which is
easier for the eyes than comparing the heights of several groups of points
or bars.
It is important to keep in mind that a point plot shows only the mean (or
other estimator) value, but in many cases it may be more informative to
show the distribution of values at each level of the categorical variables.
In that case, other approaches such as a box or violin plot may be more
appropriate.
{main_api_narrative}
Parameters
----------
{input_params}
{data}
{order_vars}
{stat_api_params}
markers : string or list of strings, optional
Markers to use for each of the ``hue`` levels.
linestyles : string or list of strings, optional
Line styles to use for each of the ``hue`` levels.
dodge : bool or float, optional
Amount to separate the points for each level of the ``hue`` variable
along the categorical axis.
join : bool, optional
If ``True``, lines will be drawn between point estimates at the same
``hue`` level.
scale : float, optional
Scale factor for the plot elements.
{orient}
{color}
{palette}
{ax_in}
Returns
-------
{ax_out}
See Also
--------
{barplot}
{factorplot}
Examples
--------
Draw a set of vertical point plots grouped by a categorical variable:
.. plot::
:context: close-figs
>>> import seaborn as sns
>>> sns.set_style("darkgrid")
>>> tips = sns.load_dataset("tips")
>>> ax = sns.pointplot(x="time", y="total_bill", data=tips)
Draw a set of vertical points with nested grouping by a two variables:
.. plot::
:context: close-figs
>>> ax = sns.pointplot(x="time", y="total_bill", hue="smoker",
... data=tips)
Separate the points for different hue levels along the categorical axis:
.. plot::
:context: close-figs
>>> ax = sns.pointplot(x="time", y="total_bill", hue="smoker",
... data=tips, dodge=True)
Use a different marker and line style for the hue levels:
.. plot::
:context: close-figs
>>> ax = sns.pointplot(x="time", y="total_bill", hue="smoker",
... data=tips,
... markers=["o", "x"],
... linestyles=["-", "--"])
Draw a set of horizontal points:
.. plot::
:context: close-figs
>>> ax = sns.pointplot(x="tip", y="day", data=tips)
Don't draw a line connecting each point:
.. plot::
:context: close-figs
>>> ax = sns.pointplot(x="tip", y="day", data=tips, join=False)
Use a different color for a single-layer plot:
.. plot::
:context: close-figs
>>> ax = sns.pointplot("time", y="total_bill", data=tips,
... color="#bb3f3f")
Use a different color palette for the points:
.. plot::
:context: close-figs
>>> ax = sns.pointplot(x="time", y="total_bill", hue="smoker",
... data=tips, palette="Set2")
Control point order by sorting the input data:
.. plot::
:context: close-figs
>>> ax = sns.pointplot(x="size", y="tip", data=tips.sort("size"))
Control point order by passing an explicit order:
.. plot::
:context: close-figs
>>> ax = sns.pointplot(x="size", y="tip", data=tips,
... order=np.arange(1, 7), palette="Blues_d")
Use median as the estimate of central tendency:
.. plot::
:context: close-figs
>>> from numpy import median
>>> ax = sns.pointplot(x="day", y="tip", data=tips, estimator=median)
Show the standard error of the mean with the error bars:
.. plot::
:context: close-figs
>>> ax = sns.pointplot(x="day", y="tip", data=tips, ci=68)
""").format(**_categorical_docs)
countplot.__doc__ = dedent("""\
Show the counts of observations in each categorical bin using bars.
A count plot can be thought of as a histogram across a categorical, instead
of quantitative, variable. The basic API and options are identical to those
for :func:`barplot`, so you can compare counts across nested variables.
{main_api_narrative}
Parameters
----------
{input_params}
{data}
{order_vars}
{orient}
{color}
{palette}
{saturation}
{ax_in}
kwargs : key, value mappings
Other keyword arguments are passed to ``plt.bar``.
Returns
-------
{ax_out}
See Also
--------
{barplot}
{factorplot}
Examples
--------
Show value counts for a single categorical variable:
.. plot::
:context: close-figs
>>> import seaborn as sns
>>> sns.set(style="darkgrid")
>>> titanic = sns.load_dataset("titanic")
>>> ax = sns.countplot(x="class", data=titanic)
Show value counts for two categorical variables:
.. plot::
:context: close-figs
>>> ax = sns.countplot(x="class", hue="who", data=titanic)
Plot the bars horizontally:
.. plot::
:context: close-figs
>>> ax = sns.countplot(y="class", hue="who", data=titanic)
Use a different color palette:
.. plot::
:context: close-figs
>>> ax = sns.countplot(x="who", data=titanic, palette="Set3")
Use ``plt.bar`` keyword arguments for a different look:
.. plot::
:context: close-figs
>>> ax = sns.countplot(x="who", data=titanic,
... facecolor=(0, 0, 0, 0),
... linewidth=5,
... edgecolor=sns.color_palette("dark", 3))
""").format(**_categorical_docs)
factorplot.__doc__ = dedent("""\
Draw a categorical plot onto a FacetGrid.
The default plot that is shown is a point plot, but other seaborn
categorical plots can be chosen with the ``kind`` parameter, including
box plots, violin plots, bar plots, or strip plots.
It is important to choose how variables get mapped to the plot structure
such that the most important comparisons are easiest to make. As a general
rule, it is easier to compare positions that are closer together, so the
``hue`` variable should be used for the most important comparisons. For
secondary comparisons, try to share the quantitative axis (so, use ``col``
for vertical plots and ``row`` for horizontal plots). Note that, although
it is possible to make rather complex plots using this function, in many
cases you may be better served by created several smaller and more focused
plots than by trying to stuff many comparisons into one figure.
After plotting, the :class:`FacetGrid` with the plot is returned and can
be used directly to tweak supporting plot details or add other layers.
Note that, unlike when using the underlying plotting functions directly,
data must be passed in a long-form DataFrame with variables specified by
passing strings to ``x``, ``y``, ``hue``, and other parameters.
As in the case with the underlying plot functions, if variables have a
``categorical`` data type, the correct orientation of the plot elements,
the levels of the categorical variables, and their order will be inferred
from the objects. Otherwise you may have to use the function parameters
(``orient``, ``order``, ``hue_order``, etc.) to set up the plot correctly.
Parameters
----------
{string_input_params}
{long_form_data}
row, col : names of variables in ``data``, optional
Categorical variables that will determine the faceting of the grid.
col_wrap : int, optional
"Wrap" the column facets at this number so that they occupy multiple
rows. Can be useful when using a variable with a large number of
levels. Cannot be used with a ``row`` variable.
{stat_api_params}
{order_vars}
row_order, col_order : lists of strings, optional
Order to organize the rows and/or columns of the grid in, otherwise the
orders are inferred from the data objects.
kind : {{``point``, ``bar``, ``count``, ``box``, ``violin``, ``strip``}}
The kind of plot to draw.
size : float, optional
The size (height) of each facet, in inches.
aspect : float, optional
The aspect ratio of the plot, ``size * aspect`` gives the width of each
facet, in inches.
{orient}
{color}
{palette}
legend : bool, optional
If ``True`` and there is a ``hue`` variable, draw a legend on the plot.
legend_out : bool, optional
If ``True``, draw the plot outside of the plot axes.
sharex, sharey : bool, optional
If ``True``, the axeas are shared across the rows and columns of the
grid.
margin_titles : bool, optional
If ``True``, the titles for the row variable are drawn to the right of
the last column. This option is experimental and may not work in all
cases.
facet_kws : dict, optional
Dictionary of other keyword arguments to pass to :class:`FacetGrid`.
kwargs : key, value pairings
Other keyword arguments are passed through to the underlying plotting
function.
Returns
-------
g : FacetGrid
Returns the :class:`FacetGrid` object with the plot on it for further
tweaking.
Examples
--------
Draw a single facet to use the :class:`FacetGrid` legend placement:
.. plot::
:context: close-figs
>>> import seaborn as sns
>>> sns.set(style="ticks")
>>> exercise = sns.load_dataset("exercise")
>>> g = sns.factorplot(x="time", y="pulse", hue="kind", data=exercise)
Use a different plot kind to visualize the same data:
.. plot::
:context: close-figs
>>> g = sns.factorplot(x="time", y="pulse", hue="kind",
... data=exercise, kind="violin")
Facet along the columns to show a third categorical variable:
.. plot::
:context: close-figs
>>> g = sns.factorplot(x="time", y="pulse", hue="kind",
... col="diet", data=exercise)
Use a different size and aspect ratio for the facets:
.. plot::
:context: close-figs
>>> g = sns.factorplot(x="time", y="pulse", hue="kind",
... col="diet", data=exercise,
... size=5, aspect=.8)
Make many column facets and wrap them into the rows of the grid:
.. plot::
:context: close-figs
>>> titanic = sns.load_dataset("titanic")
>>> g = sns.factorplot("alive", col="deck", col_wrap=4,
... data=titanic[titanic.deck.notnull()],
... kind="count", size=2.5, aspect=.8)
Plot horizontally and pass other keyword arguments to the plot function:
.. plot::
:context: close-figs
>>> g = sns.factorplot(x="age", y="embark_town",
... hue="sex", row="class",
... data=titanic[titanic.embark_town.notnull()],
... orient="h", size=2, aspect=3.5, palette="Set3",
... kind="violin", split=True, cut=0, bw=.2)
Use methods on the returned :class:`FacetGrid` to tweak the presentation:
.. plot::
:context: close-figs
>>> g = sns.factorplot(x="who", y="survived", col="class",
... data=titanic, saturation=.5,
... kind="bar", ci=None, aspect=.6)
>>> (g.set_axis_labels("", "Survival Rate")
... .set_xticklabels(["Men", "Women", "Children"])
... .set_titles("{{col_name}} {{col_var}}")
... .set(ylim=(0, 1))
... .despine(left=True)) #doctest: +ELLIPSIS
<seaborn.axisgrid.FacetGrid object at 0x...>
""").format(**_categorical_docs)
| [
6738,
11593,
37443,
834,
1330,
7297,
198,
6738,
2420,
37150,
1330,
4648,
298,
198,
11748,
7577,
893,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
629,
541,
88,
1330,
9756,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
19798,
292,
... | 2.392268 | 19,943 |
# Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
import importlib
from pyhocon import ConfigFactory, ConfigTree
from typing import Any, Dict, Iterator, Optional
from databuilder.models.dashboard.dashboard_metadata import DashboardMetadata
from databuilder.models.dashboard.dashboard_last_modified import DashboardLastModifiedTimestamp
from databuilder.models.dashboard.dashboard_owner import DashboardOwner
from databuilder.models.dashboard.dashboard_query import DashboardQuery
from databuilder.models.dashboard.dashboard_table import DashboardTable
from databuilder.models.dashboard.dashboard_chart import DashboardChart
from databuilder.models.table_metadata import TableMetadata
from databuilder.extractor.base_extractor import Extractor
from databuilder.rest_api.rest_api_query import RestApiQuery
from databuilder.rest_api.base_rest_api_query import EmptyRestApiQuerySeed
from databuilder.extractor.restapi.rest_api_extractor import RestAPIExtractor, REST_API_QUERY
from databuilder.extractor.dashboard.redash.redash_dashboard_utils import \
get_auth_headers, get_text_widgets, get_visualization_widgets, sort_widgets, \
generate_dashboard_description, RedashPaginatedRestApiQuery
from databuilder.transformer.base_transformer import ChainedTransformer
from databuilder.transformer.timestamp_string_to_epoch import TimestampStringToEpoch, FIELD_NAME as TS_FIELD_NAME
class TableRelationData:
"""
This is sort of like a stripped down version of `TableMetadata`.
It is used as the type returned by the (optional) table parser.
"""
@property
class RedashDashboardExtractor(Extractor):
"""
An extractor for retrieving dashboards and associated queries
(and possibly tables) from Redash.
There are five configuration values:
- `redash_base_url`: (e.g., `https://redash.example.com`) Base URL for the user-facing
Redash application
- `api_base_url`: (e.g., `https://redash.example.com/api`) Base URL for the API
- `api_key`: Redash API key
- (optional) `cluster`: A cluster name for this Redash instance (defaults to `prod`)
- (optional) `table_parser`: A function `(RedashVisualizationWidget) -> List[TableRelationData]`.
Given a `RedashVisualizationWidget`, this should return a list of potentially related tables
in Amundsen. Any table returned that exists in Amundsen will be linked to the dashboard.
Any table that does not exist will be ignored.
"""
REDASH_BASE_URL_KEY = 'redash_base_url'
API_BASE_URL_KEY = 'api_base_url'
API_KEY_KEY = 'api_key'
CLUSTER_KEY = 'cluster' # optional config
TABLE_PARSER_KEY = 'table_parser' # optional config
DEFAULT_CLUSTER = 'prod'
PRODUCT = 'redash'
DASHBOARD_GROUP_ID = 'redash'
DASHBOARD_GROUP_NAME = 'Redash'
| [
2,
15069,
25767,
669,
284,
262,
1703,
917,
6248,
1628,
13,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
24843,
12,
17,
13,
15,
198,
198,
11748,
1330,
8019,
198,
198,
6738,
12972,
71,
36221,
1330,
17056,
22810,
11,
17056,
27660... | 3.060606 | 924 |
# DIT-NR-NTT & DIF-RN-INTT & precompute
import random
import math
from scipy import poly1d
q = 12289
kesai = 7
inv_kesai = pow(kesai,2047) % q
kesai4_1 = pow(inv_kesai,1536) % q
print("kesai4_1 = ",kesai4_1)
print(pow(inv_kesai,1024) % q)
w = 49
inv_w = 1254
n_inv = 12277
w4 = [[],[],[],[]]
for j in range(4):
for k in range(4):
w4[j].insert(k,int(pow(w,k*256*j % 1024)%q))
print("w4_1 = ",w4[1][1])
inv_w4 = [[],[],[],[]]
for j in range(4):
for k in range(4):
inv_w4[j].insert(k,int(pow(inv_w,k*256*j % 1024)%q))
print("inv_w4_1 = ",inv_w4[1][1])
warray1 = [7143, 3542, 10643, 9744, 3195, 2319, 9088, 11334, 5086, 3091, 9326, 7969, 9238, 4846, 4805, 9154, 11227, 11112, 12149, 10654, 7678, 2401, 390, 7188, 8456, 1017, 27, 1632, 8526, 354, 5012, 9377, 2859, 1537, 9611, 4714, 5019, 2166, 12129, 12176, 12286, 3364, 9442, 4057, 2174, 3636, 10863, 5291, 1663, 7247, 5195, 4053, 7394, 1002, 7313, 5088, 8509, 11224, 1168, 11885, 11082, 9852, 9723, 6022, 6250, 493, 7952, 6845, 1378, 9042, 9919, 8311, 5332, 9890, 9289, 7098, 3016, 1630, 11136, 5407, 10040, 6730, 3985, 10111, 3531, 7, 3154, 845, 3285, 3120, 8348, 6203, 3536, 216, 767, 6763, 10076, 3229, 1282, 10583, 2021, 8820, 4693, 7846, 9996, 11009, 11385, 12265, 6742, 1802, 7878, 5103, 1223, 881, 5461, 1015, 2637, 3944, 2171, 5604, 11024, 9348, 3837, 6627, 3221, 9344, 9057, 2633, 4855, 4050, 11309, 844, 4590, 4684, 7302, 7154, 3670, 5618, 5043, 5789, 3090, 578, 7628, 11839, 9667, 3065, 6389, 6586, 7570, 343, 7078, 4538, 1208, 5412, 3515, 9011, 1218, 10584, 716, 11873, 2164, 10753, 1373, 2429, 717, 2065, 8755, 3495, 10533, 11014, 4860, 11113, 10844, 2275, 5063, 4267, 10771, 6302, 9520, 579, 6323, 8921, 8067, 4238, 11749, 3359, 3678, 5209, 10361, 3163, 1389, 6127, 4404, 1826, 1136, 4489, 3708, 8314, 1417, 6454, 7784, 4924, 1327, 1014, 3942, 3744, 5102, 2528, 6701, 2717, 5836, 3200, 2260, 4518, 2730, 1160, 10036, 7119, 189, 11424, 10526, 2478, 10506, 4194, 7724, 10759, 5832, 8420, 10555, 2873, 11169, 11498, 12268, 11259, 4649, 3821, 2929, 874, 2307, 170, 11641, 1573, 11787, 3793, 2602, 7014, 2035, 11038, 10407, 4834, 8176, 9461, 3840, 7519, 6616, 5287, 6883, 3451, 6508, 11048, 9646, 1849, 7988, 9021, 457, 7785, 3578, 530, 8823, 11410, 4218, 982, 8835, 10243, 3317, 9332, 139, 180, 10880, 7684, 204, 4739, 9261, 6771, 11925, 10821, 10945, 8882, 9806, 11053, 3121, 7043, 1057, 5598, 6565, 10397, 11260, 10975, 6599, 2894, 8342, 5959, 2442, 8330, 5115, 3343, 12269, 1522, 4608, 11883, 1403, 146, 6094, 3375, 7376, 8896, 3825, 12050, 4670, 994, 5464, 9342, 11667, 636, 5672, 4578, 10453, 11914, 10104, 506, 3276, 1392, 2212, 6085, 10058, 11251, 2800, 10347, 2776, 2575, 6811]
warray2 = [10810, 10984, 5736, 722, 8155, 7468, 9664, 2639, 11340, 5728, 5023, 7698, 5828, 11726, 9283, 9314, 9545, 8961, 7311, 6512, 1351, 1260, 4632, 4388, 6534, 2013, 729, 9000, 3241, 2426, 1428, 334, 1696, 2881, 7197, 3284, 10200, 9447, 1022, 480, 9, 10616, 6958, 4278, 7300, 9821, 5791, 339, 544, 8112, 1381, 8705, 9764, 8595, 10530, 7110, 8582, 3637, 145, 3459, 6747, 3382, 9741, 11934, 8058, 9558, 7399, 8357, 6378, 11336, 827, 8541, 5767, 3949, 4452, 8993, 2396, 2476, 2197, 118, 7222, 7935, 2837, 130, 6915, 49, 5915, 1263, 1483, 1512, 10474, 350, 5383, 9789, 10706, 10800, 6347, 5369, 9087, 10232, 4493, 3030, 2361, 4115, 10446, 3963, 6142, 576, 9842, 2908, 3434, 218, 8760, 1954, 9407, 10238, 10484, 9551, 6554, 6421, 2655, 10314, 347, 8532, 2925, 9280, 174, 1693, 723, 8974, 1858, 11863, 4754, 3991, 9522, 8320, 156, 3772, 5908, 418, 11836, 2281, 10258, 5876, 5333, 5429, 7552, 7515, 1293, 7048, 8120, 9369, 9162, 5057, 4780, 4698, 8844, 6821, 8807, 1010, 787, 12097, 4912, 1321, 10240, 12231, 3532, 12048, 11286, 3477, 142, 6608, 11184, 1956, 11404, 7280, 6281, 9445, 11314, 3438, 4212, 677, 6234, 6415, 8953, 1579, 9784, 11858, 5906, 1323, 12237, 9523, 3174, 3957, 151, 9450, 10162, 9260, 4782, 6695, 5886, 11868, 3602, 8209, 6068, 8076, 2302, 504, 11684, 8689, 6077, 3263, 7665, 295, 5766, 6099, 652, 325, 11143, 10885, 11341, 8273, 8527, 4077, 9370, 5990, 8561, 1159, 8240, 8210, 922, 11231, 441, 4046, 9139, 709, 1319, 1958, 1112, 4322, 2078, 4240, 6224, 8719, 11454, 3329, 12121, 4298, 2692, 6167, 7105, 9734, 11089, 5961, 10327, 7183, 1594, 1360, 6170, 3956, 5297, 2459, 3656, 683, 12225, 9166, 9235, 10542, 6803, 10723, 9341, 5782, 9786, 7856, 3834, 6370, 7032, 7822, 6752, 7500, 4749, 6118, 1190, 8471, 9606, 4449, 12142, 6833, 8500, 3860, 7753, 5445, 11239, 654, 1702, 3565, 1987, 6136, 6874, 6427, 8646, 6760, 3199, 5206, 12233, 4948, 400, 6152, 10561, 5079, 2169, 9027, 11767, 11011, 1973, 9945, 6715, 7965, 8214, 4916, 5315, 8775, 5925, 11248, 11271, 5339, 3710, 5446, 6093, 10256, 3879, 8291, 1922, 468, 316, 8301, 11907, 10930, 973, 6854, 11035]
warray3 = [4043, 10643, 8785, 5860, 2545, 3091, 9238, 11289, 2963, 9088, 11119, 10963, 955, 12149, 8034, 11563, 1635, 9154, 8736, 7443, 1062, 2166, 12286, 7370, 160, 7247, 7394, 2645, 7094, 10863, 4938, 10512, 6998, 4057, 7875, 8925, 10115, 1017, 8526, 7205, 12262, 390, 442, 3778, 5101, 9611, 242, 11744, 7575, 9377, 9808, 11935, 9430, 9890, 3016, 9153, 3000, 9919, 9603, 3510, 3978, 3985, 420, 476, 2178, 5407, 9405, 10659, 2249, 9852, 6250, 2987, 2566, 1168, 2143, 3248, 404, 5088, 10682, 11287, 3780, 6845, 11854, 11796, 10911, 343, 1208, 10381, 5211, 10753, 717, 8186, 10916, 716, 2450, 6873, 416, 9011, 11851, 6877, 11071, 8314, 7784, 3087, 10872, 2717, 2260, 10754, 6453, 5102, 4963, 6444, 9761, 1014, 3607, 7365, 8347, 3359, 10361, 1092, 8611, 8067, 4227, 12164, 8051, 1136, 2926, 9051, 7800, 6127, 10221, 9126, 7885, 2275, 10771, 5653, 7226, 4860, 5508, 11158, 1176, 3495, 3961, 10224, 1756, 579, 3114, 5987, 5966, 8820, 9996, 8871, 7596, 881, 2637, 10362, 6828, 7878, 1555, 9955, 7186, 12265, 9804, 1280, 5547, 3120, 3536, 5646, 3941, 3154, 1936, 7929, 11444, 1282, 4730, 9457, 1706, 6763, 8484, 12073, 2213, 5618, 3090, 3502, 7246, 7302, 3360, 3808, 5135, 6389, 1506, 11538, 5703, 11839, 11779, 11711, 2622, 9344, 4855, 1406, 3232, 3837, 11722, 4273, 5662, 5604, 8809, 8345, 1265, 844, 11607, 8239, 7699, 5598, 11260, 8665, 5724, 3343, 4608, 10138, 20, 2442, 10141, 4939, 3959, 2894, 9834, 1314, 3947, 4739, 11925, 1226, 3028, 10880, 4138, 5509, 4605, 3121, 9272, 9689, 5246, 8882, 9247, 1468, 2483, 506, 2212, 5784, 9013, 10453, 377, 11897, 375, 2776, 8881, 3511, 9714, 11251, 6197, 6204, 9489, 12050, 5464, 4554, 7619, 7376, 9998, 8054, 3393, 146, 1804, 406, 6195, 636, 10552, 2947, 6617, 7014, 10407, 6879, 10254, 3451, 9646, 4378, 5781, 6616, 944, 7624, 7002, 9461, 72, 7455, 8449, 11259, 2929, 1681, 7640, 11169, 2827, 6481, 791, 11787, 8443, 10388, 8496, 170, 4289, 11415, 648, 2478, 7724, 3019, 1783, 189, 2672, 2209, 865, 1160, 5411, 7771, 2253, 8420, 1350, 1530, 1734, 11410, 8835, 10013, 8071, 3578, 778, 1701, 11759, 9021, 7766, 10440, 11832, 9332, 9757, 2046, 12150]
a = []
for i in range(1024):
a.append(i)
b = [0]*1024
b[0] = 1
#a = reverse(a)
#b = reverse(b)
ffta = DIT_NTT(a,kesai)
print("ffta = ",ffta)
#iffta = re_DIF_INTT(ffta,inv_kesai)
#print("iffta = ",iffta)
fftb = DIT_NTT(b,kesai)
print("fftb = ",fftb)
c = pwm(ffta,fftb)
ifftc = DIF_INTT(c,kesai)
#ifft = []
#for i in ifftc:
# ifft.append((i * n_inv) % 12289)
print("ifftc = ",ifftc)
| [
2,
360,
2043,
12,
24723,
12,
11251,
51,
1222,
360,
5064,
12,
42336,
12,
12394,
51,
1222,
662,
5589,
1133,
201,
198,
11748,
4738,
201,
198,
11748,
10688,
201,
198,
6738,
629,
541,
88,
1330,
7514,
16,
67,
201,
198,
201,
198,
80,
796... | 1.992659 | 3,678 |
# Generated by Django 2.2.13 on 2020-11-23 18:09
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
13,
1485,
319,
12131,
12,
1157,
12,
1954,
1248,
25,
2931,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.875 | 32 |
from download_data import get_outdoor_seatings_for_country
country_name = 'Ukraine'
get_outdoor_seatings_for_country(country_name)
| [
6738,
4321,
62,
7890,
1330,
651,
62,
448,
9424,
62,
24073,
654,
62,
1640,
62,
19315,
198,
198,
19315,
62,
3672,
796,
705,
44814,
6,
198,
1136,
62,
448,
9424,
62,
24073,
654,
62,
1640,
62,
19315,
7,
19315,
62,
3672,
8,
198
] | 3.069767 | 43 |
import logging
from flask_babel import lazy_gettext
from .jsontools import dict_to_json
from .widgets import ChartWidget, DirectChartWidget
from ..baseviews import BaseModelView, expose
from ..models.group import DirectProcessData, GroupByProcessData
from ..security.decorators import has_access
from ..urltools import get_filter_args
from ..widgets import SearchWidget
log = logging.getLogger(__name__)
class BaseChartView(BaseModelView):
"""
This is the base class for all chart views.
Use DirectByChartView or GroupByChartView, override their properties
and their base classes
(BaseView, BaseModelView, BaseChartView) to customise your charts
"""
chart_template = "appbuilder/general/charts/chart.html"
""" The chart template, override to implement your own """
chart_widget = ChartWidget
""" Chart widget override to implement your own """
search_widget = SearchWidget
""" Search widget override to implement your own """
chart_title = "Chart"
""" A title to be displayed on the chart """
title = "Title"
group_by_label = lazy_gettext("Group by")
""" The label that is displayed for the chart selection """
default_view = "chart"
chart_type = "PieChart"
""" The chart type PieChart, ColumnChart, LineChart """
chart_3d = "true"
""" Will display in 3D? """
width = 400
""" The width """
height = "400px"
group_bys = {}
""" New for 0.6.4, on test, don't use yet """
def _get_view_widget(self, **kwargs):
"""
:return:
Returns a widget
"""
return self._get_chart_widget(**kwargs).get("chart")
class DirectByChartView(GroupByChartView):
"""
Use this class to display charts with multiple series,
based on columns or methods defined on models.
You can display multiple charts on the same view.
Default routing point is '/chart'
Setup definitions property to configure the chart
:label: (optional) String label to display on chart selection.
:group: String with the column name or method from model.
:formatter: (optional) function that formats the output of 'group' key
:series: A list of tuples with the aggregation function and the column name
to apply the aggregation
The **definitions** property respects the following grammar::
definitions = [
{
'label': 'label for chart definition',
'group': '<COLNAME>'|'<MODEL FUNCNAME>',
'formatter': <FUNC FORMATTER FOR GROUP COL>,
'series': ['<COLNAME>'|'<MODEL FUNCNAME>',...]
}, ...
]
example::
class CountryDirectChartView(DirectByChartView):
datamodel = SQLAInterface(CountryStats)
chart_title = 'Direct Data Example'
definitions = [
{
'label': 'Unemployment',
'group': 'stat_date',
'series': ['unemployed_perc',
'college_perc']
}
]
"""
ProcessClass = DirectProcessData
# -------------------------------------------------------
# DEPRECATED SECTION
# -------------------------------------------------------
class ChartView(BaseSimpleGroupByChartView): # pragma: no cover
"""
**DEPRECATED**
Provides a simple (and hopefully nice) way to draw charts on your application.
This will show Google Charts based on group by of your tables.
"""
@expose("/chart/<group_by>")
@expose("/chart/")
@has_access
class TimeChartView(BaseSimpleGroupByChartView): # pragma: no cover
"""
**DEPRECATED**
Provides a simple way to draw some time charts on your application.
This will show Google Charts based on count and group
by month and year for your tables.
"""
chart_template = "appbuilder/general/charts/chart_time.html"
chart_type = "ColumnChart"
@expose("/chart/<group_by>/<period>")
@expose("/chart/")
@has_access
class DirectChartView(BaseSimpleDirectChartView): # pragma: no cover
"""
**DEPRECATED**
This class is responsible for displaying a Google chart with
direct model values. Chart widget uses json.
No group by is processed, example::
class StatsChartView(DirectChartView):
datamodel = SQLAInterface(Stats)
chart_title = lazy_gettext('Statistics')
direct_columns = {'Some Stats': ('X_col_1', 'stat_col_1', 'stat_col_2'),
'Other Stats': ('X_col2', 'stat_col_3')}
"""
chart_type = "ColumnChart"
chart_widget = DirectChartWidget
@expose("/chart/<group_by>")
@expose("/chart/")
@has_access
| [
11748,
18931,
198,
198,
6738,
42903,
62,
65,
9608,
1330,
16931,
62,
1136,
5239,
198,
198,
6738,
764,
8457,
756,
10141,
1330,
8633,
62,
1462,
62,
17752,
198,
6738,
764,
28029,
11407,
1330,
22086,
38300,
11,
4128,
45488,
38300,
198,
6738,... | 2.51634 | 1,989 |
import pandas as pd
import numpy as np
from IPython.display import Markdown, display
class MissingDataImputer_Numerical:
'''
Various imputation methods available in this module are:
Mean, Median, Mode, User define value, Random Sample distribution
Parameters:
-----------
Allowed values for
method : 'mean', 'median', 'mode', 'custom_value', 'random'
value : if method ='custom_value' then user can pass on the imputation value in this parameter
add_indicator : True / False. If True then a new binary variable will be created of the name "var_nan"
which will take value 1 if there's a missing value in var or
0 if there's no missing value in var
'''
def __init__ (self, method, add_indicator = True, value=None, random_state =1):
'''
Parameters:
-----------
method : strategy for imputation like 'mean', 'median', 'mode', 'custom_value', 'random'
value : if method ='custom_value' then user can pass on the imputation value in this parameter
add_indicator : True / False. If True then a new binary variable will be created of the name "var_nan"
which will take value 1 if there's a missing value in var or
'''
self.method = method
self.value = value
self.random_state = random_state
self.add_indicator = add_indicator
self.variables = variables
def fit (self, df, variables):
'''
The fit function imputes the dataset for the missing values based on the strateghy or method used and stores it into a
dictionary 'param_dict_' variable
Parameters:
----------
df : df defines the dataset
variables : set of categorical columns to be imputed
Returns :
--------
returns all the imputed values
'''
self.variables = variables
if self.method =='mean':
self.param_dict_ = df[self.variables].mean().to_dict()
if self.method =='median':
self.param_dict_ = df[self.variables].median().to_dict()
if self.method =='mode':
self.param_dict_ = df[self.variables].mode().to_dict()
if self.method =='UB' or self.method =='ub' or self.method =='upper_bound':
self.param_dict_ = df[self.variables].mode().to_dict()
if self.method =='custom_value':
if value==None:
raise ValueError("for 'custom_value' method provide a valid value in the 'value' parameter")
else:
self.param_dict_ = {var:self.value for var in variables}
if self.method =='random':
None
return self
def transform(self, df):
'''
The transform function applies the changes to the data after the fit function imputation is made in a dictionary variable
Parameters:
-----------
df : df defines the dataset
Return :
-------
returns the dataset after apply of imputed values
'''
if self.add_indicator == True:
df[var + '_nan'] = np.where(df[var].isnull(), 1, 0)
if self.method == 'random':
df = self.__random_imputer__(df)
else:
for var in self.param_dict_:
df[var].fillna(self.param_dict_[var] , inplace=True)
return df
def __random_imputer__(self, df):
'''
This function is for random imputation of the missing values
Parameters:
----------
df : df defines the dataset
Return:
------
This function returns the dataset after the ramdom imputation.
'''
for var in self.variables:
if df[var].isnull().sum()>0:
# number of data point to extract at random
n_samples = df[var].isnull().sum()
#extract values
random_sample = df[var].dropna().sample(n_samples, random_state=self.random_state)
# re-index for pandas so that missing values are filled in the correct observations
random_sample.index = df[df[var].isnull()].index
# replace na
df.loc[df[var].isnull(), var] = random_sample
return df
def __get_upper_bound__(self, df):
'''
This function is to obtain the upper bound or threshold values
Parameters:
----------
df : df defines the dataset
Return:
------
This function does not return any value
'''
for var in self.variables:
None
return None
class MissingDataImputer_Categorical:
'''
Various imputation methods available in this module are:
Mean, Median, Mode, User define value, Random Sample distribution
Parameters:
----------
Allowed values for
method : 'frequent', 'custom_value', 'random'
value : if method ='custom_value' then user can pass on the imputation value in this parameter
add_indicator : True / False. If True then a new binary variable will be created of the name "var_nan"
which will take value 1 if there's a missing value in var or
0 if there's no missing value in var
'''
def fit (self, df, variables):
'''
The fit function imputes the dataset for the missing values based on the strategy or method used and stores it into a
dictionary 'param_dict_' variable
Parameters:
-----------
df : df defines the dataset
variables : list of variables to be imputed
Return :
-------
returns the self variables after imputing the values
'''
if self.method =='frequent':
self.param_dict_ = {}
for var in variables:
value = df[var].mode()
# Careful : because some variable can have multiple modes
if len(value) ==1:
self.param_dict_[var] = value[0]
else:
raise ValueError(f'Variable {var} contains multiple frequent categories')
if self.method =='custom_value':
#if value==None:
# raise ValueError("for 'custom_value' method provide a valid value in the 'value' parameter")
#else:
self.param_dict_ = {var:self.value for var in variables}
if self.method =='random':
None
return self
def transform(self, df):
'''
The transform function applies the changes to the data after the fit function imputation is made in a dictionary variable
Parameters:
-----------
df : df defines the dataset
Return :
-------
returns the dataset after apply of imputed values
'''
if self.method == 'random':
df = self.__random_imputer__(df)
else:
for var in self.param_dict_:
# Add indicator
if self.add_indicator == True:
df[var + '_nan'] = np.where(df[var].isnull(), 1, 0)
# impute missing values
df[var].fillna(self.param_dict_[var] , inplace=True)
return df
def __random_imputer__(self, df):
'''
This function is for random imputation of the missing values
Parameters:
----------
df : df defines the dataset
Return:
------
This function returns the dataset after the ramdom imputation.
'''
for var in self.variables:
if df[var].isnull().sum()>0:
# number of data point to extract at random
n_samples = df[var].isnull().sum()
#extract values
random_sample = df[var].dropna().sample(n_samples, random_state=self.random_state)
# re-index for pandas so that missing values are filled in the correct observations
random_sample.index = df[df[var].isnull()].index
# add missing indicator
if self.add_indicator == True:
df[var + '_nan'] = np.where(df[var].isnull(), 1, 0)
# replace na
df.loc[df[var].isnull(), var] = random_sample
return df | [
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
6101,
7535,
13,
13812,
1330,
2940,
2902,
11,
3359,
628,
628,
198,
4871,
25639,
6601,
3546,
10549,
62,
45,
6975,
605,
25,
198,
220,
220,
220,
705,
7... | 2.022971 | 4,658 |
#!/usr/bin/env python3.8
# -*- coding: utf-8 -*-
"""
https://projecteuler.net/problem=2
Each new term in the Fibonacci sequence is generated by adding the previous two terms. By starting with 1 and 2,
the first 10 terms will be:
1, 2, 3, 5, 8, 13, 21, 34, 55, 89, ...
By considering the terms in the Fibonacci sequence whose values do not exceed four million, find the sum of the
even-valued terms.
Answer: 4613732
"""
if __name__ == '__main__':
from .evaluate import Watchdog
with Watchdog() as wd:
result = wd.evaluate_range(sum_even_fibonacci_numbers, answers={10: 10, 100: 44, 4000000: 4613732})
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
13,
23,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
5450,
1378,
16302,
68,
18173,
13,
3262,
14,
45573,
28,
17,
198,
10871,
649,
3381,
287,
262,
... | 2.866359 | 217 |
#!/usr/bin/env python3
import logging
import os
from glob import glob
from pathlib import Path
import pandas as pd
import yaml
from tensorflow.keras.preprocessing.image import (DirectoryIterator,
ImageDataGenerator)
from src.data import load_params
# set tf warning options
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
def create(
input_dir,
output_dir,
output_filename="mapfile_df.csv",
na_rep="nan",
segmentation=False,
save_format=".png",
):
"""Runs data processing scripts to accept a directory INPUT_DIR containing
sub-folders of images with one sub-folder per class and create a CSV file
mapping the image filepath to integer class labels, which is saved in
OUTPUT_DIR.
"""
# resolve relative paths and ensure directors are uniform of type Path()
input_dir = Path(input_dir).resolve()
output_dir = Path(output_dir).resolve()
logger = logging.getLogger(__name__)
logger.info("Creating mapfile_df from directory structure")
if segmentation:
filepaths, labels = segmentation_dir(input_dir, save_format=save_format)
else:
filepaths, labels = classification_dir(input_dir, output_dir)
# create mapfile_df with a list of 0: relative filepath and 1:class labels
if filepaths:
mapfile_df = pd.DataFrame({"filename": filepaths, "class": labels})
# save output mapfile_df
mapfile_df.to_csv(
output_dir.joinpath(output_filename),
sep=",",
na_rep=na_rep,
)
return mapfile_df
else:
# raise error if dir_iterator is empty
logging.error(f"{len(filepaths)} files found in directory:\n\t{str(input_dir)}")
return None
# raise ValueError(f"No subdirectories with images identified in:\t{input_dir}")
def segmentation_dir(input_dir, save_format=".png"):
"""Subfunction to glob image data and masks from input_dir and return
lists of the filepaths to images and masks."""
# set vars
image_prefix = "data"
mask_prefix = "mask"
input_dir = Path(input_dir)
for subdir in [image_prefix, mask_prefix]:
assert input_dir.joinpath(subdir).exists(), NotADirectoryError(
f"Expected subdirectory name: {subdir}"
)
# glob image filepaths in folder 'data'
image_filepaths = []
mask_filepaths = []
# set save_format to list
if type(save_format) is str:
save_format = [save_format]
# glob using multiple file types, if necessary
for file_ext in save_format:
image_dir = input_dir.joinpath(image_prefix, "*" + file_ext)
image_filepaths += sorted(glob(str(image_dir)))
# glob image filepaths in folder 'data'
mask_dir = input_dir.joinpath(mask_prefix, "*" + file_ext)
mask_filepaths += sorted(glob(str(mask_dir)))
# TODO - check EXIF to ensure all are grayscale
# sort
image_filepaths = sorted(image_filepaths)
mask_filepaths = sorted(mask_filepaths)
assert len(image_filepaths) == len(mask_filepaths), ValueError(
f"List of filepaths for images {len(image_filepaths)} and "
f"masks {len(mask_filepaths)} must have the same length"
)
return image_filepaths, mask_filepaths
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
18931,
198,
11748,
28686,
198,
6738,
15095,
1330,
15095,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
331,
43695,
198,
6738,... | 2.532667 | 1,301 |
# Distância
km=int(input())
print('{} minutos'.format(km*2)) | [
2,
4307,
22940,
10782,
544,
198,
198,
13276,
28,
600,
7,
15414,
28955,
198,
198,
4798,
10786,
90,
92,
949,
315,
418,
4458,
18982,
7,
13276,
9,
17,
4008
] | 2.137931 | 29 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# This file is part of nautilus-pdf-tools
#
# Copyright (c) 2012 Lorenzo Carbonell Cerezo <a.k.a. atareao>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import gi
try:
gi.require_version('Gtk', '3.0')
except ValueError as e:
print(e)
exit(1)
from gi.repository import Gtk
from basicdialog import BasicDialog
from comun import _
if __name__ == '__main__':
dialog = ReduceDialog('Test', None)
dialog.run()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
770,
2393,
318,
636,
286,
299,
2306,
35815,
12,
12315,
12,
31391,
198,
2,
198,
2,
15069,
357,
66,
... | 3.334081 | 446 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import re
from heat.engine import template
PARAMETER_KEYS = (
TYPE, DEFAULT, NO_ECHO, VALUES, PATTERN,
MAX_LENGTH, MIN_LENGTH, MAX_VALUE, MIN_VALUE,
DESCRIPTION, CONSTRAINT_DESCRIPTION
) = (
'Type', 'Default', 'NoEcho', 'AllowedValues', 'AllowedPattern',
'MaxLength', 'MinLength', 'MaxValue', 'MinValue',
'Description', 'ConstraintDescription'
)
PARAMETER_TYPES = (
STRING, NUMBER, COMMA_DELIMITED_LIST
) = (
'String', 'Number', 'CommaDelimitedList'
)
PSEUDO_PARAMETERS = (
PARAM_STACK_ID, PARAM_STACK_NAME, PARAM_REGION
) = (
'AWS::StackId', 'AWS::StackName', 'AWS::Region'
)
class Parameter(object):
'''A template parameter.'''
def __new__(cls, name, schema, value=None):
'''Create a new Parameter of the appropriate type.'''
if cls is not Parameter:
return super(Parameter, cls).__new__(cls)
param_type = schema[TYPE]
if param_type == STRING:
ParamClass = StringParam
elif param_type == NUMBER:
ParamClass = NumberParam
elif param_type == COMMA_DELIMITED_LIST:
ParamClass = CommaDelimitedListParam
else:
raise ValueError('Invalid Parameter type "%s"' % param_type)
return ParamClass(name, schema, value)
def __init__(self, name, schema, value=None):
'''
Initialise the Parameter with a name, schema and optional user-supplied
value.
'''
self.name = name
self.schema = schema
self.user_value = value
self._constraint_error = self.schema.get(CONSTRAINT_DESCRIPTION)
if self.has_default():
self._validate(self.default())
if self.user_value is not None:
self._validate(self.user_value)
def value(self):
'''Get the parameter value, optionally sanitising it for output.'''
if self.user_value is not None:
return self.user_value
if self.has_default():
return self.default()
raise KeyError('Missing parameter %s' % self.name)
def no_echo(self):
'''
Return whether the parameter should be sanitised in any output to
the user.
'''
return self.schema.get(NO_ECHO, 'false').lower() == 'true'
def description(self):
'''Return the description of the parameter.'''
return self.schema.get(DESCRIPTION, '')
def has_default(self):
'''Return whether the parameter has a default value.'''
return DEFAULT in self.schema
def default(self):
'''Return the default value of the parameter.'''
return self.schema.get(DEFAULT)
def __str__(self):
'''Return a string representation of the parameter'''
value = self.value()
if self.no_echo():
return '******'
else:
return value
class NumberParam(Parameter):
'''A template parameter of type "Number".'''
@staticmethod
def str_to_num(s):
'''Convert a string to an integer (if possible) or float.'''
try:
return int(s)
except ValueError:
return float(s)
def _validate(self, value):
'''Check that the supplied value is compatible with the constraints.'''
num = self.str_to_num(value)
minn = self.str_to_num(self.schema.get(MIN_VALUE, value))
maxn = self.str_to_num(self.schema.get(MAX_VALUE, value))
if num > maxn or num < minn:
raise ValueError(self._error_msg('%s is out of range' % value))
Parameter._validate(self, value)
def __int__(self):
'''Return an integer representation of the parameter'''
return int(self.value())
def __float__(self):
'''Return a float representation of the parameter'''
return float(self.value())
class StringParam(Parameter):
'''A template parameter of type "String".'''
def _validate(self, value):
'''Check that the supplied value is compatible with the constraints'''
if not isinstance(value, basestring):
raise ValueError(self._error_msg('value must be a string'))
length = len(value)
if MAX_LENGTH in self.schema:
max_length = int(self.schema[MAX_LENGTH])
if length > max_length:
message = 'length (%d) overflows %s %s' % (length,
MAX_LENGTH,
max_length)
raise ValueError(self._error_msg(message))
if MIN_LENGTH in self.schema:
min_length = int(self.schema[MIN_LENGTH])
if length < min_length:
message = 'length (%d) underflows %s %d' % (length,
MIN_LENGTH,
min_length)
raise ValueError(self._error_msg(message))
if PATTERN in self.schema:
pattern = self.schema[PATTERN]
match = re.match(pattern, value)
if match is None or match.end() != length:
message = '"%s" does not match %s "%s"' % (value,
PATTERN,
pattern)
raise ValueError(self._error_msg(message))
Parameter._validate(self, value)
class CommaDelimitedListParam(Parameter, collections.Sequence):
'''A template parameter of type "CommaDelimitedList".'''
def _validate(self, value):
'''Check that the supplied value is compatible with the constraints'''
try:
sp = value.split(',')
except AttributeError:
raise ValueError('Value must be a comma-delimited list string')
for li in self:
Parameter._validate(self, li)
def __len__(self):
'''Return the length of the list'''
return len(self.value().split(','))
def __getitem__(self, index):
'''Return an item from the list'''
return self.value().split(',')[index]
class Parameters(collections.Mapping):
'''
The parameters of a stack, with type checking, defaults &c. specified by
the stack's template.
'''
def __init__(self, stack_name, tmpl, user_params={}, stack_id=None):
'''
Create the parameter container for a stack from the stack name and
template, optionally setting the user-supplied parameter values.
'''
self.params = dict((p.name, p) for p in parameters())
def __contains__(self, key):
'''Return whether the specified parameter exists'''
return key in self.params
def __iter__(self):
'''Return an iterator over the parameter names.'''
return iter(self.params)
def __len__(self):
'''Return the number of parameters defined'''
return len(self.params)
def __getitem__(self, key):
'''Get a parameter value.'''
return self.params[key].value()
def map(self, func, filter_func=lambda p: True):
'''
Map the supplied filter function onto each Parameter (with an
optional filter function) and return the resulting dictionary.
'''
return dict((n, func(p))
for n, p in self.params.iteritems() if filter_func(p))
def user_parameters(self):
'''
Return a dictionary of all the parameters passed in by the user
'''
return self.map(lambda p: p.user_value,
lambda p: p.user_value is not None)
def set_stack_id(self, stack_id):
'''
Set the AWS::StackId pseudo parameter value
'''
self.params[PARAM_STACK_ID].schema[DEFAULT] = stack_id
| [
2,
43907,
25,
7400,
11338,
28,
19,
6482,
10394,
28,
19,
2705,
8658,
11338,
28,
19,
198,
198,
2,
198,
2,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
220,
2... | 2.283787 | 3,707 |
"""Class for making calls to the Archiver Appliance Rest API."""
import requests
import aa
class AaRestClient(object):
"""Class used for making calls to the AA Rest API."""
def _construct_url(self, command, **kwargs):
"""Construct the appropriate URL for the AA Rest API.
Args:
command: AA Rest API command
kwargs: any parameters used in the URL
"""
url = 'http://{}/mgmt/bpl/{}'.format(self._hostname, command)
if kwargs:
k, v = kwargs.popitem()
url += '?{}={}'.format(k, str(v))
for k, v in kwargs.items():
url += '&{}={}'.format(k, str(v))
return url
def _rest_get(self, command, **kwargs):
"""Construct appropriate URL and call GET.
Args:
command: AA Rest API command
kwargs: any parameters used in the URL
Returns:
parsed JSON objects
"""
url = self._construct_url(command, **kwargs)
response = requests.get(url)
response.raise_for_status()
return response.json()
def _rest_post(self, command, payload, headers, **kwargs):
"""Construct and POST payload to appropriate URL.
Args:
command: AA Rest API command
payload: appropriate payload for POST
headers: HTTP headers including appropriate MIME-TYPE
kwargs: any parameters used in the URL
Returns:
parsed JSON objects
"""
url = self._construct_url(command, **kwargs)
response = requests.post(url, payload, headers=headers)
return response.json()
| [
37811,
9487,
329,
1642,
3848,
284,
262,
5579,
1428,
39100,
3610,
8324,
7824,
526,
15931,
198,
11748,
7007,
198,
198,
11748,
257,
64,
628,
198,
4871,
317,
64,
19452,
11792,
7,
15252,
2599,
198,
220,
220,
220,
37227,
9487,
973,
329,
164... | 2.322222 | 720 |
from django.core.management.base import BaseCommand, CommandError
from signup.models import create_or_update_signup
from signup.models import add_user_to_global_list
from sequence import models as sequence_model
from signup import db
import datetime
| [
6738,
42625,
14208,
13,
7295,
13,
27604,
13,
8692,
1330,
7308,
21575,
11,
9455,
12331,
198,
198,
6738,
1051,
929,
13,
27530,
1330,
2251,
62,
273,
62,
19119,
62,
12683,
929,
198,
6738,
1051,
929,
13,
27530,
1330,
751,
62,
7220,
62,
1... | 3.705882 | 68 |
# Numerical features that are marked as continuous
INT_FEATURES = ['user_id', 'timestamp']
# Feature that can be grouped into buckets
FLOAT_FEATURES = ['x-acc', 'y-acc', 'z-acc']
# Feature that the model will predict
LABEL_KEY = 'activity'
# Utility function for renaming the feature
| [
198,
2,
399,
6975,
605,
3033,
326,
389,
7498,
355,
12948,
198,
12394,
62,
15112,
47471,
796,
37250,
7220,
62,
312,
3256,
705,
16514,
27823,
20520,
198,
198,
2,
27018,
326,
460,
307,
32824,
656,
38674,
198,
3697,
46,
1404,
62,
15112,
... | 3.272727 | 88 |
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from trinoadmin.util.exception import ExceptionWithCause, \
ConfigFileNotFoundError
import pickle
import re
from unittest import TestCase
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13... | 3.646154 | 195 |
import RPi.GPIO as GPIO
import time
import os
# Set up the GPIO
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BOARD)
GPIO.setup(11, GPIO.OUT)
print "l : left"
print "r : right"
print "m : middle"
print "t : test"
print "q : exit"
while True:
Servo = GPIO.PWM(11, 50)
Servo.start(2.5)
input = raw_input("Selection: ")
if(input == "t"):
Servo.ChangeDutyCycle(7.5)
time.sleep(1)
Servo.ChangeDutyCycle(12.5)
time.sleep(1)
Servo.ChangeDutyCycle(2.5)
time.sleep(1)
Servo.stop()
# right
if(input == "r"):
steps = raw_input("steps (1 - 10): ")
stepslength = 12.5 / int(steps)
for Counter in range(int(steps)):
Servo.ChangeDutyCycle(stepslength * (Counter + 1))
print stepslength * (Counter + 1)
time.sleep(0.5)
time.sleep(1)
Servo.stop()
elif(input == "m"):
Servo.start(7.5)
time.sleep(1)
Servo.stop()
# move to the left
elif(input == "l"):
Servo.start(12.5)
# how many steps...
steps = raw_input("steps (1 - 10): ")
stepslength = 12.5 / int(steps)
for Counter in range(int(steps)):
Servo.ChangeDutyCycle(12.5 - (stepslength * (Counter + 1)))
time.sleep(0.5)
time.sleep(1)
Servo.stop()
elif(input == "q"):
os._exit(1)
Servo.stop()
GPIO.cleanup()
else:
print "Input not recognized"
| [
11748,
25812,
72,
13,
16960,
9399,
355,
50143,
198,
11748,
640,
198,
11748,
28686,
198,
198,
2,
5345,
510,
262,
50143,
198,
16960,
9399,
13,
2617,
40539,
654,
7,
25101,
8,
198,
16960,
9399,
13,
2617,
14171,
7,
16960,
9399,
13,
8202,
... | 2.09772 | 614 |
"""
Treemap builder using pylab.
Uses algorithm straight from http://hcil.cs.umd.edu/trs/91-03/91-03.html
James Casbon 29/7/2006
"""
import pylab
from matplotlib.patches import Rectangle
from functools import reduce
if __name__ == '__main__':
# example using nested lists, iter to walk and random colors
size_cache = {}
import random
tree= ((5,(3,5)), 4, (5,2,(2,3,(3,2,2)),(3,3)), (3,2) )
Treemap(tree, iter, size, random_color)
pylab.show()
| [
37811,
198,
31055,
368,
499,
27098,
1262,
279,
2645,
397,
13,
198,
198,
5842,
274,
11862,
3892,
422,
2638,
1378,
71,
2856,
13,
6359,
13,
388,
67,
13,
15532,
14,
2213,
82,
14,
6420,
12,
3070,
14,
6420,
12,
3070,
13,
6494,
198,
198,... | 2.453608 | 194 |
from scapy.all import *
from ccsds_base import CCSDSPacket
class HS_HK_TLM_PKT_TlmPkt(Packet):
"""Housekeeping Packet Structure
app = HS
command = HK_TLM_PKT
msg_id = HS_HK_TLM_MID = 0x08ad = 0x0800 + 0x0ad
"""
name = "HS_HK_TLM_PKT_TlmPkt"
fields_desc = [
# APPEND_ITEM CMD_VALID_COUNT 8 UINT "HS Application Command Counter."
ByteField("CMD_VALID_COUNT", 0),
# APPEND_ITEM CMD_ERROR_COUNT 8 UINT "HS Application Command Error Counter."
ByteField("CMD_ERROR_COUNT", 0),
# APPEND_ITEM CURRENTAPPMONSTATE 8 UINT "Status of HS Critical Application Monitor."
ByteField("CURRENTAPPMONSTATE", 0),
# APPEND_ITEM CURRENTEVENTMONSTATE 8 UINT "Status of HS Critical Events Monitor."
ByteField("CURRENTEVENTMONSTATE", 0),
# APPEND_ITEM CURRENTALIVENESSSTATE 8 UINT "Status of HS Aliveness Indicator."
ByteField("CURRENTALIVENESSSTATE", 0),
# APPEND_ITEM CURRENTCPUHOGSTATE 8 UINT "Status of HS Hogging Indicator."
ByteField("CURRENTCPUHOGSTATE", 0),
# APPEND_ITEM STATUSFLAGS 8 UINT "Internal HS Error States."
ByteField("STATUSFLAGS", 0),
# APPEND_ITEM SPAREBYTES 8 UINT "Alignment Spares."
ByteField("SPAREBYTES", 0),
# APPEND_ITEM RESETSPERFORMED 16 UINT "HS Performed Processor Reset Count."
ShortField("RESETSPERFORMED", 0),
# APPEND_ITEM MAXRESETS 16 UINT "HS Maximum Processor Reset Count."
ShortField("MAXRESETS", 0),
# APPEND_ITEM EVENTSMONITOREDCOUNT 32 UINT "Total count of Event Messages Monitored by the Critical Events Monitor."
IntField("EVENTSMONITOREDCOUNT", 0),
# APPEND_ITEM INVALIDEVENTMONCOUNT 32 UINT "Total count of Invalid Event Monitors Monitored by the Critical Events Monitor."
IntField("INVALIDEVENTMONCOUNT", 0),
# APPEND_ARRAY_ITEM APPMONENABLES 32 UINT 32 "Enable states of App Monitor Entries."
StrFixedLenField("APPMONENABLES__0", b"", 4), # FIXME: XNBytesField should be better, if supported
# APPEND_ITEM MSGACTEXEC 32 UINT "Number of Software Bus Message Actions Executed."
IntField("MSGACTEXEC", 0),
# APPEND_ITEM UTILCPUAVG 32 UINT "Current CPU Utilization Average."
IntField("UTILCPUAVG", 0),
# APPEND_ITEM UTILCPUPEAK 32 UINT "Current CPU Utilization Peak."
IntField("UTILCPUPEAK", 0),
# APPEND_ARRAY_ITEM EXECCOUNTS 32 UINT 1024 "Execution Counters"
StrFixedLenField("EXECCOUNTS__0", b"", 4), # FIXME: XNBytesField should be better, if supported
StrFixedLenField("EXECCOUNTS__1", b"", 4), # FIXME: XNBytesField should be better, if supported
StrFixedLenField("EXECCOUNTS__2", b"", 4), # FIXME: XNBytesField should be better, if supported
StrFixedLenField("EXECCOUNTS__3", b"", 4), # FIXME: XNBytesField should be better, if supported
StrFixedLenField("EXECCOUNTS__4", b"", 4), # FIXME: XNBytesField should be better, if supported
StrFixedLenField("EXECCOUNTS__5", b"", 4), # FIXME: XNBytesField should be better, if supported
StrFixedLenField("EXECCOUNTS__6", b"", 4), # FIXME: XNBytesField should be better, if supported
StrFixedLenField("EXECCOUNTS__7", b"", 4), # FIXME: XNBytesField should be better, if supported
StrFixedLenField("EXECCOUNTS__8", b"", 4), # FIXME: XNBytesField should be better, if supported
StrFixedLenField("EXECCOUNTS__9", b"", 4), # FIXME: XNBytesField should be better, if supported
StrFixedLenField("EXECCOUNTS__10", b"", 4), # FIXME: XNBytesField should be better, if supported
StrFixedLenField("EXECCOUNTS__11", b"", 4), # FIXME: XNBytesField should be better, if supported
StrFixedLenField("EXECCOUNTS__12", b"", 4), # FIXME: XNBytesField should be better, if supported
StrFixedLenField("EXECCOUNTS__13", b"", 4), # FIXME: XNBytesField should be better, if supported
StrFixedLenField("EXECCOUNTS__14", b"", 4), # FIXME: XNBytesField should be better, if supported
StrFixedLenField("EXECCOUNTS__15", b"", 4), # FIXME: XNBytesField should be better, if supported
StrFixedLenField("EXECCOUNTS__16", b"", 4), # FIXME: XNBytesField should be better, if supported
StrFixedLenField("EXECCOUNTS__17", b"", 4), # FIXME: XNBytesField should be better, if supported
StrFixedLenField("EXECCOUNTS__18", b"", 4), # FIXME: XNBytesField should be better, if supported
StrFixedLenField("EXECCOUNTS__19", b"", 4), # FIXME: XNBytesField should be better, if supported
StrFixedLenField("EXECCOUNTS__20", b"", 4), # FIXME: XNBytesField should be better, if supported
StrFixedLenField("EXECCOUNTS__21", b"", 4), # FIXME: XNBytesField should be better, if supported
StrFixedLenField("EXECCOUNTS__22", b"", 4), # FIXME: XNBytesField should be better, if supported
StrFixedLenField("EXECCOUNTS__23", b"", 4), # FIXME: XNBytesField should be better, if supported
StrFixedLenField("EXECCOUNTS__24", b"", 4), # FIXME: XNBytesField should be better, if supported
StrFixedLenField("EXECCOUNTS__25", b"", 4), # FIXME: XNBytesField should be better, if supported
StrFixedLenField("EXECCOUNTS__26", b"", 4), # FIXME: XNBytesField should be better, if supported
StrFixedLenField("EXECCOUNTS__27", b"", 4), # FIXME: XNBytesField should be better, if supported
StrFixedLenField("EXECCOUNTS__28", b"", 4), # FIXME: XNBytesField should be better, if supported
StrFixedLenField("EXECCOUNTS__29", b"", 4), # FIXME: XNBytesField should be better, if supported
StrFixedLenField("EXECCOUNTS__30", b"", 4), # FIXME: XNBytesField should be better, if supported
StrFixedLenField("EXECCOUNTS__31", b"", 4), # FIXME: XNBytesField should be better, if supported
]
bind_layers(CCSDSPacket, HS_HK_TLM_PKT_TlmPkt, pkttype=0, apid=173)
| [
6738,
629,
12826,
13,
439,
1330,
1635,
198,
6738,
269,
6359,
9310,
62,
8692,
1330,
327,
7902,
35,
4303,
8317,
628,
198,
4871,
18070,
62,
38730,
62,
14990,
44,
62,
40492,
51,
62,
51,
75,
76,
47,
21841,
7,
47,
8317,
2599,
198,
220,
... | 2.494746 | 2,379 |
import time
from pyhdx.batch_processing import yaml_to_hdxm
from pyhdx.fileIO import csv_to_protein, save_fitresult, dataframe_to_file
from pyhdx.fitting import fit_gibbs_global_batch_aligned, fit_gibbs_global
from pyhdx.models import HDXMeasurementSet
from pyhdx.support import pprint_df_to_file
from pyhdx.fitting_torch import CheckPoint
from functions.align import alignments
from functions.base import *
from functions.logging import write_log
write_log(__file__)
fit_kwargs = settings_dict['ecsecb_mtsecb']
output_base_dir = fitresults_dir / 'batch_fits' / 'ecSecB_mtSecB'
output_base_dir.mkdir(parents=True, exist_ok=True)
output_base_dir_single = fitresults_dir / 'ecSecB_mtSecB_single'
output_base_dir_single.mkdir(parents=True, exist_ok=True)
# Load the HDX-MS data and initial guesses
states = ['ecSecB', 'mtSecB']
hdxm_list = [yaml_to_hdxm(data_dict[state], data_dir=input_data_dir) for state in states]
hdx_set = HDXMeasurementSet(hdxm_list)
guesses = [csv_to_protein(f'guesses/{state}_initial_guess.csv')['rate'] for state in states]
gibbs_guess = hdx_set.guess_deltaG(guesses)
for output_folder, alignment in alignments.items():
output_dir = output_base_dir / output_folder
output_dir.mkdir(parents=True, exist_ok=True)
hdx_set.add_alignment(list(alignment.values()))
sequence_alignment = hdx_set.aligned_dataframes['sequence']
pprint_df_to_file(sequence_alignment, output_dir / 'sequence_alignment.txt')
t0 = time.time()
checkpoint = CheckPoint(epoch_step=1000)
fr = fit_gibbs_global_batch_aligned(hdx_set, gibbs_guess, callbacks=[checkpoint], **fit_kwargs)
t1 = time.time()
log_lines = [f"Time elapsed: {(t1 - t0):.2f} s"]
save_fitresult(output_dir, fr, log_lines=log_lines)
history_df = checkpoint.to_dataframe(names=hdx_set.names)
dataframe_to_file(output_dir / 'model_history.csv', history_df)
dataframe_to_file(output_dir / 'model_history.txt', history_df, fmt='pprint')
# individual fits
fit_kwargs.pop('r2')
for state, hdxm, guess in zip(states, hdxm_list, guesses):
gibbs_guess = hdxm.guess_deltaG(guess)
t0 = time.time()
checkpoint = CheckPoint(epoch_step=1000)
fr = fit_gibbs_global(hdxm, gibbs_guess, callbacks=[checkpoint], **fit_kwargs)
t1 = time.time()
log_lines = [f"Time elapsed: {(t1 - t0):.2f} s"]
output_dir = output_base_dir_single / state
save_fitresult(output_dir, fr, log_lines=log_lines)
history_df = checkpoint.to_dataframe(names=hdx_set.names)
dataframe_to_file(output_dir / 'model_history.csv', history_df)
dataframe_to_file(output_dir / 'model_history.txt', history_df, fmt='pprint') | [
11748,
640,
198,
198,
6738,
12972,
31298,
87,
13,
43501,
62,
36948,
1330,
331,
43695,
62,
1462,
62,
31298,
87,
76,
198,
6738,
12972,
31298,
87,
13,
7753,
9399,
1330,
269,
21370,
62,
1462,
62,
48693,
11,
3613,
62,
11147,
20274,
11,
1... | 2.56741 | 1,031 |
# Copyright (c) 2011-2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Filter support
"""
from oslo_log import log as logging
from cinder.scheduler import base_handler
LOG = logging.getLogger(__name__)
class BaseFilter(object):
"""Base class for all filter classes."""
def _filter_one(self, obj, filter_properties):
"""Return True if it passes the filter, False otherwise.
Override this in a subclass.
"""
return True
def filter_all(self, filter_obj_list, filter_properties):
"""Yield objects that pass the filter.
Can be overridden in a subclass, if you need to base filtering
decisions on all objects. Otherwise, one can just override
_filter_one() to filter a single object.
"""
for obj in filter_obj_list:
if self._filter_one(obj, filter_properties):
yield obj
# Set to true in a subclass if a filter only needs to be run once
# for each request rather than for each instance
run_filter_once_per_request = False
def run_filter_for_index(self, index):
"""Return True if the filter needs to be run for n-th instances.
Only need to override this if a filter needs anything other than
"first only" or "all" behaviour.
"""
return not (self.run_filter_once_per_request and index > 0)
class BaseFilterHandler(base_handler.BaseHandler):
"""Base class to handle loading filter classes.
This class should be subclassed where one needs to use filters.
"""
def get_filtered_objects(self, filter_classes, objs,
filter_properties, index=0):
"""Get objects after filter
:param filter_classes: filters that will be used to filter the
objects
:param objs: objects that will be filtered
:param filter_properties: client filter properties
:param index: This value needs to be increased in the caller
function of get_filtered_objects when handling
each resource.
"""
list_objs = list(objs)
LOG.debug("Starting with %d host(s)", len(list_objs))
# The 'part_filter_results' list just tracks the number of hosts
# before and after the filter, unless the filter returns zero
# hosts, in which it records the host/nodename for the last batch
# that was removed. Since the full_filter_results can be very large,
# it is only recorded if the LOG level is set to debug.
part_filter_results = []
full_filter_results = []
for filter_cls in filter_classes:
cls_name = filter_cls.__name__
start_count = len(list_objs)
filter_class = filter_cls()
if filter_class.run_filter_for_index(index):
objs = filter_class.filter_all(list_objs, filter_properties)
if objs is None:
LOG.info("Filter %s returned 0 hosts", cls_name)
full_filter_results.append((cls_name, None))
list_objs = None
break
list_objs = list(objs)
end_count = len(list_objs)
part_filter_results.append((cls_name, start_count, end_count))
remaining = [getattr(obj, "host", obj)
for obj in list_objs]
full_filter_results.append((cls_name, remaining))
LOG.debug("Filter %(cls_name)s returned "
"%(obj_len)d host(s)",
{'cls_name': cls_name, 'obj_len': len(list_objs)})
if not list_objs:
self._log_filtration(full_filter_results,
part_filter_results, filter_properties)
return list_objs
| [
2,
15069,
357,
66,
8,
2813,
12,
6999,
4946,
25896,
5693,
13,
198,
2,
1439,
6923,
33876,
13,
198,
2,
198,
2,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
22... | 2.414262 | 1,837 |
from numpy import sqrt, ceil, sin, cos, arctan2, diff, minimum, maximum
from numpy import array, ones, zeros, full, linspace, arange, int64, concatenate
from numpy import in1d, unique, isclose, nan, atleast_1d, intersect1d
from warnings import warn
from tokamesh.geometry import build_edge_map
from tokamesh.triangle import run_triangle
def equilateral_mesh(R_range=(0, 1), z_range=(0, 1), resolution=0.1, rotation=None, pivot=(0, 0)):
"""
Construct a mesh from equilateral triangles which fills a rectangular region.
:param R_range: \
A tuple in the form ``(R_min, R_max)`` specifying the range of major radius
values to cover with triangles.
:param z_range: \
A tuple in the form ``(z_min, z_max)`` specifying the range of z-height values
to cover with triangles.
:param float resolution: \
The side-length of the triangles.
:param rotation: \
Angle (in radians) by which the mesh will be rotated.
:param pivot: \
Pivot point around which the rotation is applied.
:return: \
A tuple containing ``R_vert``, ``z_vert`` and ``triangles``.
``R_vert`` is the major radius of the vertices as a 1D array. ``z_vert`` the is
z-height of the vertices as a 1D array. ``triangles`` is a 2D array of integers
of shape ``(N,3)`` specifying the indices of the vertices which form each
triangle in the mesh, where ``N`` is the total number of triangles.
"""
# determine how many rows / columns of triangles to create
N = int(ceil((R_range[1] - R_range[0]) / resolution))
M = int(ceil((z_range[1] - z_range[0]) / (resolution * 0.5 * sqrt(3))))
# create the vertices by producing a rectangular grid
# and shifting every other row
x_ax = linspace(0, N-1, N) * resolution
y_ax = linspace(0, M-1, M) * resolution * 0.5 * sqrt(3)
x = zeros([N,M])
y = zeros([N,M])
y[:,:] = y_ax[None,:] + z_range[0]
x[:,:] = x_ax[:,None] + R_range[0]
x[:,1::2] += 0.5 * resolution
# rotate the vertices around a point if requested
if rotation is not None:
x, y = rotate(x, y, rotation, pivot)
# divide up the grid into triangles
triangle_inds = []
for i in range(N-1):
for j in range(M-1):
v1 = M*i + j
v2 = M*(i+1) + j
v3 = M*i + j + 1
v4 = M*(i+1) + j + 1
if j % 2 == 0:
triangle_inds.append([v1, v2, v3])
triangle_inds.append([v2, v3, v4])
else:
triangle_inds.append([v1, v3, v4])
triangle_inds.append([v1, v2, v4])
return x.flatten(), y.flatten(), array(triangle_inds)
def trim_vertices(R, z, triangles, trim_bools):
"""
Removes chosen vertices (and any triangles containing those vertices) from a mesh.
:param R: \
The major radius of each mesh vertex as a 1D numpy array.
:param z: \
The z-height of each mesh vertex as a 1D numpy array.
:param triangles: \
A 2D numpy array of integers specifying the indices of the vertices which form
each of the triangles in the mesh. The array must have shape ``(N,3)`` where ``N``
is the total number of triangles.
:param trim_bools: \
A 1D array of boolean values corresponding to the vertices, which is ``True`` for
any vertices which are to be removed from the mesh.
:return R, z, triangles: \
The ``R``, ``z`` and ``triangles`` arrays (defined as described above) with the
specified vertices removed.
"""
vert_inds = (~trim_bools).nonzero()[0]
tri_bools = in1d(triangles[:,0],vert_inds)
tri_bools &= in1d(triangles[:,1],vert_inds)
tri_bools &= in1d(triangles[:,2],vert_inds)
tri_inds = tri_bools.nonzero()[0]
index_converter = zeros(R.size, dtype=int64)
index_converter[vert_inds] = arange(vert_inds.size)
trim_triangles = index_converter[triangles[tri_inds,:]]
trim_triangles.sort(axis=1)
return R[vert_inds], z[vert_inds], trim_triangles
class Polygon(object):
"""
Class for evaluating whether a given point is inside a polygon,
or the distance between it and the nearest point on the polygon.
:param x: \
The x-values of the polygon vertices as a 1D numpy array.
:param y: \
The y-values of the polygon vertices as a 1D numpy array.
"""
def find_boundaries(triangles):
"""
Find all the boundaries of a given mesh.
:param triangles: \
A 2D numpy array of integers specifying the indices of the vertices which form
each of the triangles in the mesh. The array must have shape ``(N,3)`` where ``N`` is
the total number of triangles.
:return: \
A list of 1D numpy arrays containing the indices of the vertices in each boundary.
"""
# Construct a mapping from triangles to edges, and edges to vertices
triangle_edges, edge_vertices, _ = build_edge_map(triangles)
# identify edges on the boundary by finding edges which are only part of one triangle
unique_vals, counts = unique(triangle_edges, return_counts=True)
boundary_edges_indices = (counts == 1).nonzero()[0]
boundary_edges = edge_vertices[boundary_edges_indices, :]
# now create a map between an edge, and the other edges to which it's connected
boundary_connections = {}
for i in range(boundary_edges.shape[0]):
edges = ((boundary_edges[i, 0] == boundary_edges) | (boundary_edges[i, 1] == boundary_edges)).nonzero()[0]
boundary_connections[i] = [e for e in edges if e != i]
# we use a set to keep track of which edges have already been used as part of a boundary
unused_edges = {i for i in range(boundary_edges.shape[0])}
# now follow the connections map to build the boundaries
boundaries = []
while len(unused_edges) > 0:
current_boundary = [unused_edges.pop()] # start at an arbitrary unused edge
while True:
connected_edges = boundary_connections[current_boundary[-1]]
for edge in connected_edges:
if edge in unused_edges:
current_boundary.append(edge)
unused_edges.remove(edge)
break
else:
break
boundaries.append(boundary_edges_indices[current_boundary])
_, edges_per_vertex = unique(boundary_edges, return_counts=True)
if edges_per_vertex.max() > 2:
warn(
"""
[ find_boundaries warning ]
>> The given mesh contains at least two sub-meshes which
>> are connected by only one vertex. Currently, it is not
>> guaranteed that this function will draw separate boundaries
>> for each sub-mesh - this will be addressed in future update.
"""
)
# Now we need to convert the boundaries from edge indices to vertex indices
vertex_boundaries = []
for boundary in boundaries:
# the order of the first two vertex indices needs to match the direction
# in which the boundary is being traced.
v1, v2 = edge_vertices[boundary[0],:]
if v1 in edge_vertices[boundary[1],:]:
vertex_boundary = [v2, v1]
else:
vertex_boundary = [v1, v2]
# now loop over all the other edges and add the new vertex that appears
for edge in boundary[1:]:
v1, v2 = edge_vertices[edge,:]
next_vertex = v1 if (v1 not in vertex_boundary) else v2
vertex_boundary.append(next_vertex)
vertex_boundaries.append(array(vertex_boundary))
return vertex_boundaries
def build_central_mesh(R_boundary, z_boundary, resolution, padding_factor=1., rotation=None):
"""
Generate an equilateral mesh which fills the space inside a given boundary,
up to a chosen distance to the boundary edge.
:param R_boundary: \
The major-radius values of the boundary as a 1D numpy array.
:param z_boundary: \
The z-height values of the boundary as a 1D numpy array.
:param resolution: \
The side-length of the equilateral triangles.
:param padding_factor: \
A multiplicative factor which defines the minimum distance to the boundary
such that ``min_distance = padding_factor*scale``. No vertices in the returned
mesh will be closer to the boundary than ``min_distance``.
:param rotation: \
Angle (in radians) by which the orientations of mesh triangles are rotated,
relative to their default orientation.
:return: \
A tuple containing ``R_vert``, ``z_vert`` and ``triangles``.
``R_vert`` is the major-radius of the vertices as a 1D array. ``z_vert`` the is
z-height of the vertices as a 1D array. ``triangles`` is a 2D array of integers
of shape ``(N,3)`` specifying the indices of the vertices which form each triangle
in the mesh, where ``N`` is the total number of triangles.
"""
poly = Polygon(R_boundary, z_boundary)
pad = 2*0.5*sqrt(3)
if rotation is None:
R_range = (R_boundary.min() - pad, R_boundary.max() + pad)
z_range = (z_boundary.min() - pad, z_boundary.max() + pad)
R, z, triangles = equilateral_mesh(R_range=R_range, z_range=z_range, resolution=resolution)
else:
rot_R, rot_z = rotate(R_boundary, z_boundary, -rotation, [0., 0.])
R_range = (rot_R.min() - pad, rot_R.max() + pad)
z_range = (rot_z.min() - pad, rot_z.max() + pad)
R, z, triangles = equilateral_mesh(R_range=R_range, z_range=z_range, resolution=resolution)
R, z = rotate(R, z, rotation, [0., 0.])
# remove all triangles which are too close too or inside walls
bools = array([poly.is_inside(p) * poly.distance(p) < resolution * padding_factor for p in zip(R, z)])
return trim_vertices(R, z, triangles, bools)
def refine_mesh(R, z, triangles, refinement_bools):
"""
Refine a mesh by partitioning specified triangles into 4 sub-triangles.
Triangles sharing one or more edges with those being refined will also
be partitioned in such a way to ensure the resulting mesh is valid.
:param R: \
The major radius of each mesh vertex as a 1D numpy array.
:param z: \
The z-height of each mesh vertex as a 1D numpy array.
:param triangles: \
A 2D numpy array of integers specifying the indices of the vertices which form
each of the triangles in the mesh. The array must have shape ``(N,3)`` where ``N``
is the total number of triangles.
:param refinement_bools: \
A numpy array of bools specifying which triangles will be refined. Triangles with
indices corresponding to ``True`` values in the bool array will be refined.
:return R, z, triangles: \
The ``R``, ``z`` and ``triangles`` arrays (defined as described above) for the
refined mesh.
"""
# convert the bools to indices
refinement_inds = refinement_bools.nonzero()[0]
# build a set as we'll be performing membership checks
refine_set = {i for i in refinement_inds}
# get the edge mapping data
triangle_edges, edge_vertices, edge_to_triangles = build_edge_map(triangles)
new_mesh_triangles = []
for t in range(triangles.shape[0]):
# for the current triangle, find which of its neighbours are being refined
refined_neighbours = []
for edge in triangle_edges[t,:]:
refined_neighbours.extend([i for i in edge_to_triangles[edge] if i != t and i in refine_set])
vertices = [(R[i], z[i]) for i in triangles[t,:]]
# if either the triangle itself, or all of its neighbours, are being refined, it must be quadrisected
if t in refine_set or len(refined_neighbours) == 3:
v1, v2, v3 = vertices
# get the mid-point of each side
m12 = (0.5*(v1[0] + v2[0]), 0.5*(v1[1] + v2[1]))
m23 = (0.5*(v2[0] + v3[0]), 0.5*(v2[1] + v3[1]))
m31 = (0.5*(v3[0] + v1[0]), 0.5*(v3[1] + v1[1]))
# add the new triangles
new_mesh_triangles.extend([[v1, m12, m31], [v2, m12, m23], [v3, m23, m31], [m12, m23, m31]])
# if no neighbours are being refined, the triangle remains unchanged
elif len(refined_neighbours) == 0:
new_mesh_triangles.append(vertices)
# if the triangle has two refined neighbours, it must be trisected
elif len(refined_neighbours) == 2:
# first we need to find the two edges shared with the neighbours
shared_edges = [intersect1d(triangle_edges[k,:], triangle_edges[t,:]) for k in refined_neighbours]
# now find the point that these two edges share
shared_vertex_index = intersect1d(*[edge_vertices[k,:] for k in shared_edges])
shared_vertex = (R[shared_vertex_index[0]], z[shared_vertex_index[0]])
# get the two points which are not shared
v1, v2 = [v for v in vertices if v != shared_vertex]
# get the mid points of the shared sides
midpoint_1 = (0.5*(v1[0]+shared_vertex[0]), 0.5*(v1[1]+shared_vertex[1]))
midpoint_2 = (0.5*(v2[0]+shared_vertex[0]), 0.5*(v2[1]+shared_vertex[1]))
# add the new triangles
new_mesh_triangles.append([midpoint_1, midpoint_2, shared_vertex])
new_mesh_triangles.append([midpoint_1, v1, v2])
new_mesh_triangles.append([midpoint_1, midpoint_2, v2])
# if the triangle has one refined neighbour, it must be bisected
elif len(refined_neighbours) == 1:
# find the shared edge
shared_edge = intersect1d(triangle_edges[refined_neighbours[0],:], triangle_edges[t,:])
# get the vertices of the shared edge
v1, v2 = [(R[i], z[i]) for i in edge_vertices[shared_edge,:].squeeze()]
# get the remaining vertex
v3 = [v for v in vertices if v not in [v1, v2]][0]
# get the midpoint of the shared edge
midpoint = (0.5*(v1[0]+v2[0]), 0.5*(v1[1]+v2[1]))
new_mesh_triangles.extend([[midpoint, v3, v1], [midpoint, v3, v2]])
else:
raise ValueError('more than 3 refined neighbours detected')
# number all the vertices
vertex_map = {}
for vertices in new_mesh_triangles:
for vertex in vertices:
if vertex not in vertex_map:
vertex_map[vertex] = len(vertex_map)
# build the mesh data arrays
new_R = array([v[0] for v in vertex_map.keys()])
new_z = array([v[1] for v in vertex_map.keys()])
new_triangles = array([[vertex_map[v] for v in verts] for verts in new_mesh_triangles], dtype=int64)
return new_R, new_z, new_triangles
def mesh_generator(R_boundary, z_boundary, resolution=0.03, edge_resolution=None, edge_padding=0.75,
edge_max_area=1.1, rotation=None):
"""
Generate a triangular mesh which fills the space inside a given boundary using a 2-stage
process. First, a mesh of equilateral triangles is created which fills the space up to a
chosen minimum distance from the boundary. An irregular mesh is then generated which fills
the space between the central equilateral mesh and the boundary. The two meshes are then
merged, and the resulting mesh is returned.
:param R_boundary: \
The major-radius values of the boundary as a 1D numpy array.
:param z_boundary: \
The z-height values of the boundary as a 1D numpy array.
:param resolution: \
The side-length of triangles in the central equilateral mesh.
:param edge_resolution: \
Sets the target area of triangles in the irregular edge mesh, which fills the space between
the central equilateral mesh and the boundary. The `Triangle` C-code, which is used to
generate the irregular mesh, will attempt to construct triangles with areas equal to that
of an equilateral triangle with side length ``edge_resolution``. If not specified, the value
passed as the ``resolution`` argument is used instead.
:param edge_padding: \
A multiplicative factor which defines the minimum allowed distance between a
vertex in the central equilateral mesh and the boundary such that
``min_distance = edge_padding * resolution``. No vertices in the central equilateral
mesh will be closer to the boundary than ``min_distance``.
:param edge_max_area: \
A multiplicative factor which sets the maximum allowed area of triangles in the
irregular edge mesh, such that no triangle will have an area larger than
``edge_max_area`` times the target area set by the ``edge_resolution`` argument.
:param rotation: \
Angle (in radians) by which the orientations of triangles in the central
equilateral mesh are rotated, relative to their default orientation.
:return: \
A tuple containing ``R_vert``, ``z_vert`` and ``triangles``.
``R_vert`` is the major-radius of the vertices as a 1D array. ``z_vert`` the is
z-height of the vertices as a 1D array. ``triangles`` is a 2D array of integers
of shape ``(N,3)`` specifying the indices of the vertices which form each triangle
in the mesh, where ``N`` is the total number of triangles.
"""
# build the central mesh
central_R, central_z, central_triangles = build_central_mesh(
R_boundary=R_boundary,
z_boundary=z_boundary,
resolution=resolution,
padding_factor=edge_padding,
rotation=rotation
)
# now construct the boundary for the central mesh
boundaries = find_boundaries(central_triangles)
# if there are multiple boundaries, sort them by length
if len(boundaries) > 1:
boundaries = sorted(boundaries, key=lambda x: len(x))
central_boundary = boundaries[-1]
central_boundary = concatenate([central_boundary, atleast_1d(central_boundary[0])])
# now we have the boundary, we can build the edge mesh using triangle.
# prepare triangle inputs:
if edge_resolution is None:
edge_resolution = resolution
eq_area = (edge_resolution**2) * 0.25 * sqrt(3)
area_multiplier = edge_max_area
outer = (R_boundary, z_boundary)
inner = (central_R[central_boundary], central_z[central_boundary])
voids = [[inner[0].mean()], [inner[1].mean()]]
# run triangle using the python wrapper
edge_R, edge_z, edge_triangles = run_triangle(
outer_boundary=outer,
inner_boundary=inner,
void_markers=voids,
max_area=eq_area*area_multiplier)
# combine the central and edge meshes
R = concatenate([central_R, edge_R])
z = concatenate([central_z, edge_z])
triangles = concatenate([central_triangles, edge_triangles + central_R.size], axis=0)
R, z, triangles = remove_duplicate_vertices(R, z, triangles)
return R, z, triangles
| [
198,
6738,
299,
32152,
1330,
19862,
17034,
11,
2906,
346,
11,
7813,
11,
8615,
11,
610,
310,
272,
17,
11,
814,
11,
5288,
11,
5415,
198,
6738,
299,
32152,
1330,
7177,
11,
3392,
11,
1976,
27498,
11,
1336,
11,
300,
1040,
10223,
11,
61... | 2.529693 | 7,527 |
from Acquisition import aq_inner
from Acquisition import aq_parent
from Products.CMFCore.utils import getToolByName
| [
6738,
44564,
1330,
257,
80,
62,
5083,
198,
6738,
44564,
1330,
257,
80,
62,
8000,
198,
6738,
18675,
13,
24187,
4851,
382,
13,
26791,
1330,
651,
25391,
3886,
5376,
628
] | 3.9 | 30 |
import os
import time
import cv2
import numpy as np
| [
11748,
28686,
198,
11748,
640,
198,
198,
11748,
269,
85,
17,
198,
11748,
299,
32152,
355,
45941,
628,
628,
198
] | 2.85 | 20 |
import pytest
from bank import Bank
from atm_controller import AtmController, ControlType
from cash_bin import CashBin
from card_reader import CardReader
@pytest.fixture(scope="module")
def cards():
""" 카드 데이터 fixture 생성 """
# 쉽게 알아보기 위하여 card number와 account를 문자열로 처리
card_list = {
"card1": [
"1234",
{
"account1-card1": 100000,
"account2-card1": 100000,
},
],
"card2": [
"4321",
{
"account1-card2": 100000,
},
],
"card3": [
"1515",
{
"account1-card3": 100000,
},
],
}
return card_list
@pytest.fixture(scope="module")
def one_bank(cards):
""" 은행 fixture 생성 """
bank1 = Bank("은행1")
for k, v in cards.items():
for account, balance in v[1].items():
bank1.add_account(k, v[0], account, balance)
return bank1
@pytest.fixture(scope="module")
def one_cash_bin():
""" 현금통 fixture 생성 """
cash = CashBin(1000000)
return cash
@pytest.fixture(scope="module")
def one_card_reader(cards):
""" 카드리더 fixture 생성 """
# 임의로 하나의 카드만 선택
card = CardReader([list(cards.keys())[1]])
return card
@pytest.fixture(scope="module")
def atm(one_bank, one_cash_bin, one_card_reader):
""" atm contoller fixture 생성 """
controller = AtmController(one_bank, one_cash_bin, one_card_reader)
return controller
def test_test():
""" 테스트 동작 확인 """
assert True
# class 생성 테스트
def test_init_bank(one_bank):
""" virtual bank 객체 생성 확인 """
card_list = ["card1", "card2", "card3"]
for card in card_list:
if card not in one_bank.card_numbers:
assert False
assert one_bank is not None
def test_init_cash_bin(one_cash_bin):
""" 현금통 생성 확인 """
assert one_cash_bin is not None
def test_init_card_reader(one_card_reader):
""" 카드리더 생성 확인 """
assert one_card_reader is not None
def test_init_atmcontroller(atm):
""" atm contoller 객체 생성 확인 """
assert atm is not None
# bank test
def test_bank_add_account(one_bank):
""" 계좌 생성 가능 여부 확인 """
one_bank.add_account("card3", "1515", "account2-card3", 1)
assert one_bank.get_balance("card3", "account2-card3") == 1
def test_bank_duplicate_account(one_bank):
""" 계좌 중복 예외 발생 확인 """
assert one_bank.add_account("card1", "1234", "account1-card1", 1) == False
def test_bank_pin_error(one_bank):
""" pin error 확인 """
assert one_bank.add_account("card1", "4321", "account3-card1", 1) == False
def test_bank_control_balance_withdraw(one_bank):
""" 계좌 잔고 조정 확인 withdraw """
assert one_bank.control_balance("card1", "account2-card1", -100000) == 0
def test_bank_control_balance_deposit(one_bank):
""" 계좌 잔고 조정 확인 deposit """
assert one_bank.control_balance("card1", "account2-card1", 100000) == 100000
def test_bank_get_balance_withdraw(one_bank):
""" 계좌 잔고 확인 """
assert one_bank.get_balance("card1", "account2-card1") == 100000
# cash bin 테스트
def test_cash_bin_get_balance(one_cash_bin):
""" 현금통 잔고 확인 """
assert one_cash_bin.get_balance() == 1000000
def test_cash_bin_deposit(one_cash_bin):
""" 현금통 입금 확인 """
assert one_cash_bin.deposit(1) == 1000001
def test_cash_bin_withdraw(one_cash_bin):
""" 현금통 출금 확인 """
assert one_cash_bin.withdraw(1) == 1000000
# card reader 테스트
def test_card_reader_get_card_number(cards, one_card_reader):
""" 카드리더 카드 번호 선택 확인 """
assert one_card_reader.get_card_number() in cards
# atm controller 테스트
def test_atm_input_pin(atm):
""" pin 입력 """
atm.input_pin("1234")
assert atm.pin == "1234"
def test_atm_authentication_fail(atm):
""" 잘못된 핀 입력 테스트 """
assert not atm.authentication()[0]
def test_atm_authentication_success(atm):
""" 유효한 핀 입력 테스트 """
atm.input_pin("4321")
assert atm.authentication()[0]
def test_atm_select_account_fail(atm):
""" 유효하지 않은 계좌 선택 """
# fixture에서 card2가 선택되어 있음
assert not atm.select_account("account10-card2")[0]
def test_atm_select_account_success(atm):
""" 유효한 계좌 선택 """
# 이전 테스트에서 유효하지 않은 계좌 선택으로 상태 초기화
atm.input_pin("4321")
atm.authentication()
assert atm.select_account("account1-card2")[0]
def test_atm_control_account_balance(atm):
""" 계좌 잔고 출력 """
atm.input_pin("4321")
atm.authentication()
atm.select_account("account1-card2")
rst = atm.control_account(ControlType.SeeBalance)
assert rst[0] and rst[2] == 100000
def test_atm_control_account_deposit(atm):
""" 계좌 입금 """
atm.input_pin("4321")
atm.authentication()
atm.select_account("account1-card2")
rst = atm.control_account(ControlType.Deposit, 1)
assert rst[0] and rst[2] == 100001
def test_atm_control_account_withdraw_fail(atm):
""" 계좌 출금 실패 """
atm.input_pin("4321")
atm.authentication()
atm.select_account("account1-card2")
rst = atm.control_account(ControlType.Withdraw, 100002)
assert not rst[0] and rst[1] == "계좌에 잔고가 부족합니다."
def test_atm_control_account_withdraw_success(atm):
""" 계좌 출금 성공 """
atm.input_pin("4321")
atm.authentication()
atm.select_account("account1-card2")
rst = atm.control_account(ControlType.Withdraw, 100001)
assert rst[0] and rst[1] == "성공" and rst[2] == 0
| [
11748,
12972,
9288,
198,
198,
6738,
3331,
1330,
5018,
198,
6738,
379,
76,
62,
36500,
1330,
1629,
76,
22130,
11,
6779,
6030,
198,
6738,
5003,
62,
8800,
1330,
16210,
33,
259,
198,
6738,
2657,
62,
46862,
1330,
5172,
33634,
628,
198,
31,
... | 1.793677 | 3,005 |
from dataclasses import dataclass
from typing import Tuple, Union
import numpy as np
from ksc.type import Type
# make_dims(1) = (1,)
# make_dims([1, 2, 3]) = (1,2,3)
# make_dims((1,2,3)) = (1,2,3)
# Shape of a scalar is just just empty tuple
ScalarShape = ()
# Shape class hierarchy
class Shape:
"""
Shape classes.
Shapes in the abstract interpreter follow the algebra described in "make_edef"
"""
@staticmethod
def from_ks_shape(val, type):
"""
Translate from ks_value as returned by shape_def to Shape class
"""
if type.is_scalar:
assert val == ()
return ScalarShape
if type.is_tensor:
dims, el_shape = val
return TensorShape(
dims, Shape.from_ks_shape(el_shape, type.tensor_elem_type)
)
if type.is_tuple:
assert isinstance(val, tuple)
assert len(val) == type.tuple_len
return tuple(
Shape.from_ks_shape(val[i], type.tuple_elem(i)) for i in range(len(val))
)
assert False
@staticmethod
def of_Index_of_Tensor_of_rank(rank: int):
"""
Make the shape of the return of the ks builtin "size" function, i.e. a Tuple of ScalarShapes
"""
if rank == 1:
return ScalarShape # Returns an int
else:
return tuple(ScalarShape for _ in range(rank))
@dataclass(frozen=True)
@dataclass(frozen=True)
| [
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
198,
6738,
19720,
1330,
309,
29291,
11,
4479,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
479,
1416,
13,
4906,
1330,
5994,
198,
198,
2,
787,
62,
67,
12078,
7,
16,
8,
796,
357,
... | 2.118644 | 708 |
import sys
import time
from ifsApprover import db
from ifsApprover.Log import get_logger
from ifsApprover.Utils import make_dirs_if_needed, rnd_string
logger = get_logger("MailActions")
def get_first_matching_content_type(mail_mime_part, content_type_to_find):
"""
Recursively searches all mime parts of the mail_mime_part until a jpeg attachment is found.
:param mail_mime_part: part to start with
:return: the part that represents the attachment or None
"""
if not mail_mime_part.is_multipart():
content_type = mail_mime_part.get_content_type()
if content_type == content_type_to_find:
return mail_mime_part
return None
for payload in mail_mime_part.get_payload():
result = get_first_matching_content_type(payload, content_type_to_find)
if result is not None:
return result
return None
def receive_and_store_mail(filename):
"""
Receives the email content from stdin and stores it to the passed file. Returns also the content.
:param filename:
:return: the mail content
"""
make_dirs_if_needed(filename)
mail_data = []
with open(filename, "wb") as fo:
for line in sys.stdin:
fo.write(line.encode())
mail_data.append(line)
logger.debug(f"Mail stored: {filename}")
# the lines have already a line break
return "".join(mail_data)
def extract_and_store_image(mail, image_filename_full):
"""
Finds the (first) image from the parsed mail and stores it to image_filename.
If the image is missing, None will be returned.
:param mail:
:param image_filename_full: the filename with full path
:return: True for an image or False if missing
"""
make_dirs_if_needed(image_filename_full)
image_mime_part = get_first_matching_content_type(mail, "image/jpeg")
if image_mime_part is None:
return False
with open(image_filename_full, "wb") as fo:
fo.write(image_mime_part.get_payload(decode=True))
logger.debug("Image stored: %s" % image_filename_full)
return True
| [
11748,
25064,
198,
11748,
640,
198,
198,
6738,
611,
82,
4677,
305,
332,
1330,
20613,
198,
6738,
611,
82,
4677,
305,
332,
13,
11187,
1330,
651,
62,
6404,
1362,
198,
6738,
611,
82,
4677,
305,
332,
13,
18274,
4487,
1330,
787,
62,
15908... | 2.614622 | 807 |
import readline
import textwrap
import os
import pygments.token
import logging
from colorama import Fore, Back
from typing import List, Iterable
from privex.helpers import empty, DictObject
from prompt_toolkit import PromptSession, print_formatted_text, ANSI
from prompt_toolkit.completion import Completer, WordCompleter
from prompt_toolkit.formatted_text import PygmentsTokens
from prompt_toolkit.history import FileHistory
from prompt_toolkit.lexers import PygmentsLexer
from prompt_toolkit.styles import Style, merge_styles, style_from_pygments_cls
from pygments.lexers.python import Python3Lexer
from pygments.styles.fruity import FruityStyle
from privex.pyrewall import VERSION, PyreParser
from privex.pyrewall.core import columnize, find_file
from privex.pyrewall.PyreLexer import PyreLexer
log = logging.getLogger(__name__)
BLUE, GREEN, RED, YELLOW = Fore.BLUE, Fore.GREEN, Fore.RED, Fore.YELLOW
RESET = Fore.RESET
colorcodes = [getattr(Fore, c) for c in dir(Fore) if c[0] != '_']
fmt_v4 = ['ipt', 'iptables', 'ip4', 'ipt4', 'iptables4', 'v4']
fmt_v6 = ['ipt6', 'ip6tables', 'ip6', 'iptables6', 'v6']
header = textwrap.dedent(f'''
{YELLOW}PyreWall Version v{VERSION}
(C) 2019 Privex Inc. ( https://wwww.privex.io )
Official Repo: https://github.com/Privex/pyrewall{RESET}
''')
pyre_repl = PyreRepl()
| [
11748,
1100,
1370,
198,
11748,
2420,
37150,
198,
11748,
28686,
198,
11748,
12972,
11726,
13,
30001,
198,
11748,
18931,
198,
6738,
3124,
1689,
1330,
4558,
11,
5157,
198,
6738,
19720,
1330,
7343,
11,
40806,
540,
198,
6738,
1293,
303,
87,
... | 2.843011 | 465 |
import torch
| [
11748,
28034,
201,
198
] | 3.5 | 4 |
import cgi
import httplib
import webapp2
from . import channel
from . import compute
from . import error
from . import middleware
from . import model
from . import settings
from . import shared
from . import wsgi_config
from error import Abort
from google.appengine.api import app_identity
from google.appengine.api import urlfetch
# From https://cloud.google.com/console/project/apps~little-black-box/apiui/credential
CLIENT_ID = '638064416490.apps.googleusercontent.com'
# TODO: create redirect URI dynamically
if shared.IsDevMode():
redirect_uri = 'http://localhost:8080/'
else:
redirect_uri = 'https://{}/'.format(app_identity.get_default_version_hostname())
class AppHandler(shared.AccessCheckHandler):
"""Convenience request handler with app specific functionality."""
@webapp2.cached_property
@webapp2.cached_property
def handle_exception(self, exception, debug_mode):
"""Called if this handler throws an exception during execution.
Args:
exception: the exception that was thrown
debug_mode: True if the web application is running in debug mode
"""
status, headers, body = error.MakeErrorResponse(exception, debug_mode)
self.response.clear()
self.error(status)
self.response.headers.extend(headers)
if self.response.headers.get('X-App-Error'):
self.response.write(body)
else:
self.response.write('{}'.format(cgi.escape(body, quote=True)))
APPLICATION = webapp2.WSGIApplication([
('/api/config', ConfigHandler),
('/api/oauth2', Oauth2Handler),
('/api/instance', InstanceHandler),
], debug=settings.DEBUG)
APPLICATION = middleware.Session(APPLICATION, wsgi_config.WSGI_CONFIG)
APPLICATION = middleware.ErrorHandler(APPLICATION, debug=settings.DEBUG)
| [
11748,
269,
12397,
198,
11748,
1841,
489,
571,
198,
11748,
3992,
1324,
17,
198,
198,
6738,
764,
1330,
6518,
198,
6738,
764,
1330,
24061,
198,
6738,
764,
1330,
4049,
198,
6738,
764,
1330,
3504,
1574,
198,
6738,
764,
1330,
2746,
198,
67... | 3.053913 | 575 |
YELLOW = '\x1b[33m'
GREEN = '\x1b[32m'
RED = '\x1b[31m'
MAGENTA = '\x1b[35m'
RESET_COLOR = '\x1b[0m'
| [
56,
23304,
3913,
796,
705,
59,
87,
16,
65,
58,
2091,
76,
6,
198,
43016,
796,
705,
59,
87,
16,
65,
58,
2624,
76,
6,
198,
22083,
796,
705,
59,
87,
16,
65,
58,
3132,
76,
6,
198,
45820,
3525,
32,
796,
705,
59,
87,
16,
65,
58,
... | 1.507463 | 67 |
from mrp import process_def
import pytest
| [
6738,
285,
81,
79,
1330,
1429,
62,
4299,
198,
11748,
12972,
9288,
628
] | 3.307692 | 13 |
import testlib
fee = 20
initialsend = 200000
capacity = 1000000
| [
11748,
1332,
8019,
198,
198,
39071,
796,
1160,
198,
36733,
21280,
796,
939,
830,
198,
42404,
796,
1802,
2388,
198
] | 3.25 | 20 |
from django.test import TestCase
from django.contrib.auth import get_user_model
| [
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
651,
62,
7220,
62,
19849,
628
] | 3.375 | 24 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builder function to construct tf-slim arg_scope for convolution, fc ops."""
import tensorflow.compat.v1 as tf
import tf_slim as slim
from object_detection.core import freezable_batch_norm
from object_detection.protos import hyperparams_pb2
from object_detection.utils import context_manager
from object_detection.utils import tf_version
# pylint: disable=g-import-not-at-top
if tf_version.is_tf2():
from object_detection.core import freezable_sync_batch_norm
# pylint: enable=g-import-not-at-top
class KerasLayerHyperparams(object):
"""
A hyperparameter configuration object for Keras layers used in
Object Detection models.
"""
def __init__(self, hyperparams_config):
"""Builds keras hyperparameter config for layers based on the proto config.
It automatically converts from Slim layer hyperparameter configs to
Keras layer hyperparameters. Namely, it:
- Builds Keras initializers/regularizers instead of Slim ones
- sets weights_regularizer/initializer to kernel_regularizer/initializer
- converts batchnorm decay to momentum
- converts Slim l2 regularizer weights to the equivalent Keras l2 weights
Contains a hyperparameter configuration for ops that specifies kernel
initializer, kernel regularizer, activation. Also contains parameters for
batch norm operators based on the configuration.
Note that if the batch_norm parameters are not specified in the config
(i.e. left to default) then batch norm is excluded from the config.
Args:
hyperparams_config: hyperparams.proto object containing
hyperparameters.
Raises:
ValueError: if hyperparams_config is not of type hyperparams.Hyperparams.
"""
if not isinstance(hyperparams_config,
hyperparams_pb2.Hyperparams):
raise ValueError('hyperparams_config not of type '
'hyperparams_pb.Hyperparams.')
self._batch_norm_params = None
self._use_sync_batch_norm = False
if hyperparams_config.HasField('batch_norm'):
self._batch_norm_params = _build_keras_batch_norm_params(
hyperparams_config.batch_norm)
elif hyperparams_config.HasField('sync_batch_norm'):
self._use_sync_batch_norm = True
self._batch_norm_params = _build_keras_batch_norm_params(
hyperparams_config.sync_batch_norm)
self._force_use_bias = hyperparams_config.force_use_bias
self._activation_fn = _build_activation_fn(hyperparams_config.activation)
# TODO(kaftan): Unclear if these kwargs apply to separable & depthwise conv
# (Those might use depthwise_* instead of kernel_*)
# We should probably switch to using build_conv2d_layer and
# build_depthwise_conv2d_layer methods instead.
self._op_params = {
'kernel_regularizer': _build_keras_regularizer(
hyperparams_config.regularizer),
'kernel_initializer': _build_initializer(
hyperparams_config.initializer, build_for_keras=True),
'activation': _build_activation_fn(hyperparams_config.activation)
}
def batch_norm_params(self, **overrides):
"""Returns a dict containing batchnorm layer construction hyperparameters.
Optionally overrides values in the batchnorm hyperparam dict. Overrides
only apply to individual calls of this method, and do not affect
future calls.
Args:
**overrides: keyword arguments to override in the hyperparams dictionary
Returns: dict containing the layer construction keyword arguments, with
values overridden by the `overrides` keyword arguments.
"""
if self._batch_norm_params is None:
new_batch_norm_params = dict()
else:
new_batch_norm_params = self._batch_norm_params.copy()
new_batch_norm_params.update(overrides)
return new_batch_norm_params
def build_batch_norm(self, training=None, **overrides):
"""Returns a Batch Normalization layer with the appropriate hyperparams.
If the hyperparams are configured to not use batch normalization,
this will return a Keras Lambda layer that only applies tf.Identity,
without doing any normalization.
Optionally overrides values in the batch_norm hyperparam dict. Overrides
only apply to individual calls of this method, and do not affect
future calls.
Args:
training: if True, the normalization layer will normalize using the batch
statistics. If False, the normalization layer will be frozen and will
act as if it is being used for inference. If None, the layer
will look up the Keras learning phase at `call` time to decide what to
do.
**overrides: batch normalization construction args to override from the
batch_norm hyperparams dictionary.
Returns: Either a FreezableBatchNorm layer (if use_batch_norm() is True),
or a Keras Lambda layer that applies the identity (if use_batch_norm()
is False)
"""
if self.use_batch_norm():
if self._use_sync_batch_norm:
return freezable_sync_batch_norm.FreezableSyncBatchNorm(
training=training, **self.batch_norm_params(**overrides))
else:
return freezable_batch_norm.FreezableBatchNorm(
training=training, **self.batch_norm_params(**overrides))
else:
return tf.keras.layers.Lambda(tf.identity)
def build_activation_layer(self, name='activation'):
"""Returns a Keras layer that applies the desired activation function.
Args:
name: The name to assign the Keras layer.
Returns: A Keras lambda layer that applies the activation function
specified in the hyperparam config, or applies the identity if the
activation function is None.
"""
if self._activation_fn:
return tf.keras.layers.Lambda(self._activation_fn, name=name)
else:
return tf.keras.layers.Lambda(tf.identity, name=name)
def get_regularizer_weight(self):
"""Returns the l1 or l2 regularizer weight.
Returns: A float value corresponding to the l1 or l2 regularization weight,
or None if neither l1 or l2 regularization is defined.
"""
regularizer = self._op_params['kernel_regularizer']
if hasattr(regularizer, 'l1'):
return regularizer.l1
elif hasattr(regularizer, 'l2'):
return regularizer.l2
else:
return None
def params(self, include_activation=False, **overrides):
"""Returns a dict containing the layer construction hyperparameters to use.
Optionally overrides values in the returned dict. Overrides
only apply to individual calls of this method, and do not affect
future calls.
Args:
include_activation: If False, activation in the returned dictionary will
be set to `None`, and the activation must be applied via a separate
layer created by `build_activation_layer`. If True, `activation` in the
output param dictionary will be set to the activation function
specified in the hyperparams config.
**overrides: keyword arguments to override in the hyperparams dictionary.
Returns: dict containing the layer construction keyword arguments, with
values overridden by the `overrides` keyword arguments.
"""
new_params = self._op_params.copy()
new_params['activation'] = None
if include_activation:
new_params['activation'] = self._activation_fn
new_params['use_bias'] = self.use_bias()
new_params.update(**overrides)
return new_params
def build(hyperparams_config, is_training):
"""Builds tf-slim arg_scope for convolution ops based on the config.
Returns an arg_scope to use for convolution ops containing weights
initializer, weights regularizer, activation function, batch norm function
and batch norm parameters based on the configuration.
Note that if no normalization parameters are specified in the config,
(i.e. left to default) then both batch norm and group norm are excluded
from the arg_scope.
The batch norm parameters are set for updates based on `is_training` argument
and conv_hyperparams_config.batch_norm.train parameter. During training, they
are updated only if batch_norm.train parameter is true. However, during eval,
no updates are made to the batch norm variables. In both cases, their current
values are used during forward pass.
Args:
hyperparams_config: hyperparams.proto object containing
hyperparameters.
is_training: Whether the network is in training mode.
Returns:
arg_scope_fn: A function to construct tf-slim arg_scope containing
hyperparameters for ops.
Raises:
ValueError: if hyperparams_config is not of type hyperparams.Hyperparams.
"""
if not isinstance(hyperparams_config,
hyperparams_pb2.Hyperparams):
raise ValueError('hyperparams_config not of type '
'hyperparams_pb.Hyperparams.')
if hyperparams_config.force_use_bias:
raise ValueError('Hyperparams force_use_bias only supported by '
'KerasLayerHyperparams.')
if hyperparams_config.HasField('sync_batch_norm'):
raise ValueError('Hyperparams sync_batch_norm only supported by '
'KerasLayerHyperparams.')
normalizer_fn = None
batch_norm_params = None
if hyperparams_config.HasField('batch_norm'):
normalizer_fn = slim.batch_norm
batch_norm_params = _build_batch_norm_params(
hyperparams_config.batch_norm, is_training)
if hyperparams_config.HasField('group_norm'):
normalizer_fn = slim.group_norm
affected_ops = [slim.conv2d, slim.separable_conv2d, slim.conv2d_transpose]
if hyperparams_config.HasField('op') and (
hyperparams_config.op == hyperparams_pb2.Hyperparams.FC):
affected_ops = [slim.fully_connected]
return scope_fn
def _build_activation_fn(activation_fn):
"""Builds a callable activation from config.
Args:
activation_fn: hyperparams_pb2.Hyperparams.activation
Returns:
Callable activation function.
Raises:
ValueError: On unknown activation function.
"""
if activation_fn == hyperparams_pb2.Hyperparams.NONE:
return None
if activation_fn == hyperparams_pb2.Hyperparams.RELU:
return tf.nn.relu
if activation_fn == hyperparams_pb2.Hyperparams.RELU_6:
return tf.nn.relu6
if activation_fn == hyperparams_pb2.Hyperparams.SWISH:
return tf.nn.swish
raise ValueError('Unknown activation function: {}'.format(activation_fn))
def _build_slim_regularizer(regularizer):
"""Builds a tf-slim regularizer from config.
Args:
regularizer: hyperparams_pb2.Hyperparams.regularizer proto.
Returns:
tf-slim regularizer.
Raises:
ValueError: On unknown regularizer.
"""
regularizer_oneof = regularizer.WhichOneof('regularizer_oneof')
if regularizer_oneof == 'l1_regularizer':
return slim.l1_regularizer(scale=float(regularizer.l1_regularizer.weight))
if regularizer_oneof == 'l2_regularizer':
return slim.l2_regularizer(scale=float(regularizer.l2_regularizer.weight))
if regularizer_oneof is None:
return None
raise ValueError('Unknown regularizer function: {}'.format(regularizer_oneof))
def _build_keras_regularizer(regularizer):
"""Builds a keras regularizer from config.
Args:
regularizer: hyperparams_pb2.Hyperparams.regularizer proto.
Returns:
Keras regularizer.
Raises:
ValueError: On unknown regularizer.
"""
regularizer_oneof = regularizer.WhichOneof('regularizer_oneof')
if regularizer_oneof == 'l1_regularizer':
return tf.keras.regularizers.l1(float(regularizer.l1_regularizer.weight))
if regularizer_oneof == 'l2_regularizer':
# The Keras L2 regularizer weight differs from the Slim L2 regularizer
# weight by a factor of 2
return tf.keras.regularizers.l2(
float(regularizer.l2_regularizer.weight * 0.5))
if regularizer_oneof is None:
return None
raise ValueError('Unknown regularizer function: {}'.format(regularizer_oneof))
def _build_initializer(initializer, build_for_keras=False):
"""Build a tf initializer from config.
Args:
initializer: hyperparams_pb2.Hyperparams.regularizer proto.
build_for_keras: Whether the initializers should be built for Keras
operators. If false builds for Slim.
Returns:
tf initializer.
Raises:
ValueError: On unknown initializer.
"""
initializer_oneof = initializer.WhichOneof('initializer_oneof')
if initializer_oneof == 'truncated_normal_initializer':
return tf.truncated_normal_initializer(
mean=initializer.truncated_normal_initializer.mean,
stddev=initializer.truncated_normal_initializer.stddev)
if initializer_oneof == 'random_normal_initializer':
return tf.random_normal_initializer(
mean=initializer.random_normal_initializer.mean,
stddev=initializer.random_normal_initializer.stddev)
if initializer_oneof == 'variance_scaling_initializer':
enum_descriptor = (hyperparams_pb2.VarianceScalingInitializer.
DESCRIPTOR.enum_types_by_name['Mode'])
mode = enum_descriptor.values_by_number[initializer.
variance_scaling_initializer.
mode].name
if build_for_keras:
if initializer.variance_scaling_initializer.uniform:
return tf.variance_scaling_initializer(
scale=initializer.variance_scaling_initializer.factor,
mode=mode.lower(),
distribution='uniform')
else:
# In TF 1.9 release and earlier, the truncated_normal distribution was
# not supported correctly. So, in these earlier versions of tensorflow,
# the ValueError will be raised, and we manually truncate the
# distribution scale.
#
# It is insufficient to just set distribution to `normal` from the
# start, because the `normal` distribution in newer Tensorflow versions
# creates a truncated distribution, whereas it created untruncated
# distributions in older versions.
try:
return tf.variance_scaling_initializer(
scale=initializer.variance_scaling_initializer.factor,
mode=mode.lower(),
distribution='truncated_normal')
except ValueError:
truncate_constant = 0.87962566103423978
truncated_scale = initializer.variance_scaling_initializer.factor / (
truncate_constant * truncate_constant
)
return tf.variance_scaling_initializer(
scale=truncated_scale,
mode=mode.lower(),
distribution='normal')
else:
return slim.variance_scaling_initializer(
factor=initializer.variance_scaling_initializer.factor,
mode=mode,
uniform=initializer.variance_scaling_initializer.uniform)
if initializer_oneof is None:
return None
raise ValueError('Unknown initializer function: {}'.format(
initializer_oneof))
def _build_batch_norm_params(batch_norm, is_training):
"""Build a dictionary of batch_norm params from config.
Args:
batch_norm: hyperparams_pb2.ConvHyperparams.batch_norm proto.
is_training: Whether the models is in training mode.
Returns:
A dictionary containing batch_norm parameters.
"""
batch_norm_params = {
'decay': batch_norm.decay,
'center': batch_norm.center,
'scale': batch_norm.scale,
'epsilon': batch_norm.epsilon,
# Remove is_training parameter from here and deprecate it in the proto
# once we refactor Faster RCNN models to set is_training through an outer
# arg_scope in the meta architecture.
'is_training': is_training and batch_norm.train,
}
return batch_norm_params
def _build_keras_batch_norm_params(batch_norm):
"""Build a dictionary of Keras BatchNormalization params from config.
Args:
batch_norm: hyperparams_pb2.ConvHyperparams.batch_norm proto.
Returns:
A dictionary containing Keras BatchNormalization parameters.
"""
# Note: Although decay is defined to be 1 - momentum in batch_norm,
# decay in the slim batch_norm layers was erroneously defined and is
# actually the same as momentum in the Keras batch_norm layers.
# For context, see: github.com/keras-team/keras/issues/6839
batch_norm_params = {
'momentum': batch_norm.decay,
'center': batch_norm.center,
'scale': batch_norm.scale,
'epsilon': batch_norm.epsilon,
}
return batch_norm_params
| [
2,
15069,
2177,
383,
309,
22854,
37535,
46665,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
... | 2.920559 | 5,866 |
from collections import defaultdict
import json
# Utillity to convert raw coefficient files to more useful formats.
# Outputs both json file and python file (mapping represented as python dict)
# fmt: off
files = [
# format is (input_name, output_name)
("C2110H2R", "2018_v21"),
("C2214O5P", "2018_v22"),
("C2318P1Q", "2019_v23")
]
# fmt: on
output_dir = "../../pyriskadjust/coefficients/"
for (inname, outname) in files:
with open(inname + ".csv") as f:
header = f.readline().strip().replace('"', "").split(",")
vals = map(float, f.readline().strip().split(","))
coefficient_mapping = dict(zip([coeff.lower() for coeff in header], vals))
# uncomment to generate json as well
# with open(output_dir + "coefficients_" + outname + ".json", "w") as fo:
# json.dump(coefficient_mapping, fo, indent=4)
with open(output_dir + "coefficients_" + outname + ".py", "w") as fo:
fo.write('"""Model coefficients, take from {}"""\n\n'.format(inname))
fo.write("COEFFICIENTS = ")
json.dump(coefficient_mapping, fo, indent=4)
| [
6738,
17268,
1330,
4277,
11600,
198,
11748,
33918,
198,
198,
2,
7273,
359,
414,
284,
10385,
8246,
35381,
3696,
284,
517,
4465,
17519,
13,
198,
2,
25235,
82,
1111,
33918,
2393,
290,
21015,
2393,
357,
76,
5912,
7997,
355,
21015,
8633,
8... | 2.474836 | 457 |
"""Reader application to read log entries in real time from server"""
import time
import json
import logging
import argparse
import sys
import socketio
from socketio.exceptions import ConnectionError as SocketConnectionError
from decentralized_logger import setup_logging, disable_loggers, parse_logging_format
DISABLE_LOGGERS = [
'urllib3.connectionpool'
]
OUTPUT_STRING = '%(asctime)s | %(name)-20s | %(levelname)-9s | %(message)s'
sio = socketio.Client() # pylint: disable=invalid-name
setup_logging(
log_format=parse_logging_format("%(message)s"),
enable_stream_handler=True,
enable_server_handler=False
)
log = logging.getLogger('Reader') # pylint: disable=invalid-name
@sio.event
def log_stream(message):
"""Socket endpoint for reading log entries"""
log.info(OUTPUT_STRING, json.loads(message))
def main():
"""Main function"""
disable_loggers(DISABLE_LOGGERS)
try:
parser = argparse.ArgumentParser(usage="""
Usage: log_reader [OPTIONS] SERVER_IP_ADDRESS
Tool to read log event stream based from the decentralized logger eco system.
Read more about the decentralized logger at https://github.com/rikpet/decentralized-logger.
SERVER_IP_ADDRESS should be set to the ip address of the device where the log server application is running
Options:
-p, --port Port for the log server application if changed from default
"""
)
parser.add_argument('server_ip')
parser.add_argument('--port', type=int, default=9020)
args = parser.parse_args(sys.argv[1:])
while True:
try:
url = f'http://{args.server_ip}:{args.port}'
sio.connect(url)
except SocketConnectionError:
log.warning('Could not connect to log server')
time.sleep(1)
else:
log.info('Connected to server. Listening to messages')
sio.wait()
except KeyboardInterrupt:
pass
except Exception as ex: # pylint: disable=broad-except
print(str(ex))
input('')
if __name__ == '__main__':
main()
| [
37811,
33634,
3586,
284,
1100,
2604,
12784,
287,
1103,
640,
422,
4382,
37811,
198,
198,
11748,
640,
198,
11748,
33918,
198,
11748,
18931,
198,
11748,
1822,
29572,
198,
11748,
25064,
198,
11748,
17802,
952,
198,
6738,
17802,
952,
13,
1069,... | 2.562954 | 826 |
'''
author : https://github.com/Harnek'''
a = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9,10,11,12],
[13,14,15,16],]
r = 4
c = 4
#Diagonally Up
'''
1
5 2
9 6 3
13 10 7 4
14 11 8
15 12
16
'''
for i in range(r + c - 1):
k1 = 0 if i < c else i - c + 1
k2 = 0 if i < r else i - r + 1
for j in range(i - k2, k1-1, -1):
print(a[j][i-j], end=" ")
print()
print()
#Diagonally Down
'''
13
9 14
5 10 15
1 6 11 16
2 7 12
3 8
4
'''
for i in range(r + c - 1):
k1 = 0 if i < c else i - c + 1
k2 = 0 if i < r else i - r + 1
for j in range(i - k2, k1-1, -1):
print(a[r-j-1][i-j], end=" ")
print()
'''Also try
Easier to print by understanding the pattern
Observing the pattern::
+-------------------------+---------------------------+
| 1 2 3 4 | 1 2 3 4 |
| 5 6 7 8 | 5 6 7 8 |
| 9 10 11 12 | 9 10 11 12 |
| 13 14 15 16 | 13 14 15 16 |
+-------------------------|---------------------------+
Just add blank spaces and print vertically
'''
| [
7061,
6,
198,
9800,
1058,
3740,
1378,
12567,
13,
785,
14,
13587,
710,
74,
7061,
6,
198,
198,
64,
796,
16410,
16,
11,
362,
11,
513,
11,
604,
4357,
198,
220,
220,
220,
220,
685,
20,
11,
718,
11,
767,
11,
807,
4357,
198,
220,
220... | 1.893401 | 591 |
from importlib import import_module
import os
import sys
import click
from .core import (
generate_url,
create_xml_url
)
XML_HEADER = """<?xml version="1.0" encoding="UTF-8"?>
<urlset
xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"
xmlns:image="http://www.google.com/schemas/sitemap-image/1.1"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.sitemaps.org/schemas/sitemap/0.9
http://www.sitemaps.org/schemas/sitemap/0.9/sitemap.xsd">
"""
XML_FOOTER = "</urlset>"
@click.option('--output', default='sitemap.xml')
@click.argument('config')
@click.command()
def create_sitemap(config, output):
"""
Create final sitemap
"""
# import config
# add current dir to sys.path to find module in your directory
current_dir = os.getcwd()
sys.path.insert(0, current_dir)
obj = import_module(config)
configs = {
key: item
for key, item in obj.__dict__.items()
if key.isupper()
}
domain = configs['DOMAIN']
routes = configs['ROUTES']
queries = configs.get('QUERIES', {})
with open(output, mode='w') as f:
f.write(XML_HEADER)
for url in generate_url(domain, routes, queries):
f.write(create_xml_url(url))
f.write(XML_FOOTER)
print(output, 'created.')
if __name__ == "__main__":
create_sitemap()
| [
6738,
1330,
8019,
1330,
1330,
62,
21412,
198,
11748,
28686,
198,
11748,
25064,
198,
198,
11748,
3904,
198,
198,
6738,
764,
7295,
1330,
357,
198,
220,
220,
220,
7716,
62,
6371,
11,
198,
220,
220,
220,
2251,
62,
19875,
62,
6371,
198,
... | 2.247191 | 623 |
from zzcore import StdAns, mysakuya
import requests
from config import LOLIKEY
| [
6738,
1976,
89,
7295,
1330,
520,
67,
2025,
82,
11,
616,
82,
29863,
198,
11748,
7007,
198,
198,
6738,
4566,
1330,
35513,
40,
20373,
628,
198
] | 3.153846 | 26 |
"""
Locus-based Genetic Algorithm (LGA)
@auth: Yu-Hsiang Fu
@date: 2016/04/28
@update: 2018/03/26
"""
# --------------------------------------------------------------------------------
# 1.Import modular
# --------------------------------------------------------------------------------
# import modular
import copy as c
import networkx as nx
import numpy as np
import pickle
import random as r
# import custom-modular
import util.data_structure.disjoint_set as djs
import util.handler.edgelist_handler as eh
import util.handler.pickle_handler as ph
import util.handler.pairvalue_handler as pvh
# import folder-constant
from util.constant.constant_folder import FOLDER_EDGELIST
from util.constant.constant_folder import FOLDER_FILE
# import graph-constant
from util.constant.constant_graph import NODE_COMMUNITY
from util.constant.constant_graph import NODE_DEGREE
from util.constant.constant_graph import NODE_LAYOUT_XY
# --------------------------------------------------------------------------------
# 2.Define variable
# --------------------------------------------------------------------------------
# program variable
GENE_TO_NODE = {}
NODE_TO_GENE = {}
NEIGHBOR_LIST = {}
# GA variable
IDV_LENGTH = 0
IDV_FITNESS = 'fitness'
IDV_GENOTYPE = 'genotype'
IDV_PHENOTYPE = 'phenotype'
# --------------------------------------------------------------------------------
# 3.Define function
# --------------------------------------------------------------------------------
# --------------------------------------------------
# generate function
# --------------------------------------------------
# --------------------------------------------------
# GA operator
# --------------------------------------------------
# --------------------------------------------------
# --------------------------------------------------------------------------------
# 4.Main function
# --------------------------------------------------------------------------------
if __name__ == "__main__":
main_function()
| [
37811,
201,
198,
43,
10901,
12,
3106,
42295,
978,
42289,
357,
43,
9273,
8,
201,
198,
201,
198,
31,
18439,
25,
10605,
12,
39,
13396,
648,
13333,
201,
198,
31,
4475,
25,
1584,
14,
3023,
14,
2078,
201,
198,
31,
19119,
25,
2864,
14,
... | 3.830018 | 553 |
from tilapia.lib.provider.chains.bch.provider import BCHProvider
from tilapia.lib.provider.chains.btc.clients.blockbook import BlockBook # noqa
| [
6738,
21502,
499,
544,
13,
8019,
13,
15234,
1304,
13,
38861,
13,
65,
354,
13,
15234,
1304,
1330,
347,
3398,
29495,
198,
6738,
21502,
499,
544,
13,
8019,
13,
15234,
1304,
13,
38861,
13,
18347,
66,
13,
565,
2334,
13,
9967,
2070,
1330,... | 2.9 | 50 |