content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
import os
import argparse
import torch
import onnx
from metro.modeling.bert import BertConfig, METRO
from metro.modeling._smpl import SMPL, Mesh
from metro.modeling.hrnet.hrnet_cls_net_featmaps import get_cls_net
from metro.modeling.hrnet.config import config as hrnet_config
from metro.modeling.hrnet.config import update_config as hrnet_update_config
import metro.modeling.data.config as cfg
from metro.utils.renderer import Renderer, visualize_reconstruction, visualize_reconstruction_test, visualize_reconstruction_no_text, visualize_reconstruction_and_att_local
from metro.utils.geometric_layers import orthographic_projection
from metro.utils.logger import setup_logger
from metro.utils.miscellaneous import mkdir, set_seed
if __name__ == "__main__":
main()
| [
11748,
28686,
198,
11748,
1822,
29572,
198,
11748,
28034,
198,
11748,
319,
77,
87,
198,
6738,
24536,
13,
4666,
10809,
13,
4835,
1330,
22108,
16934,
11,
31243,
13252,
198,
6738,
24536,
13,
4666,
10809,
13557,
5796,
489,
1330,
9447,
6489,
... | 3.285106 | 235 |
from random import randint
print(' === Vamos jogar par ou ímpar? === \n\n')
venceu = 0
jog = 0
while True:
num = int(input('\n\nDigite um número para jogar: '))
jog += 1
escolha = str(input('\n\nDigite P ou I para Par ou Ímpar: ')).upper().strip()[0]
compu = randint(0, 10)
if escolha == 'P':
if (num + compu) % 2 == 0:
print('\n\nO computador escolheu {}. Você venceu'.format(compu))
venceu += 1
break
else:
print('\n\nO computador escolheu {}. Você perdeu, jogue novamente'.format(compu))
if escolha == 'I':
if (num + compu) % 2 != 0:
print('\n\nO computador escolheu {}. Você venceu'.format(compu))
venceu +=1
break
else:
print('\n\nO computador escolheu {}. Você perdeu, jogue novamente'.format(compu))
print('\n\nVocê jogou {} veze(s) e ganhou {} partida(s)'.format(jog, venceu))
| [
6738,
4738,
1330,
43720,
600,
198,
4798,
10786,
24844,
569,
321,
418,
48342,
283,
1582,
267,
84,
6184,
255,
76,
1845,
30,
24844,
3467,
77,
59,
77,
11537,
198,
574,
344,
84,
796,
657,
198,
73,
519,
796,
657,
198,
4514,
6407,
25,
19... | 1.905138 | 506 |
#!/usr/bin/python3
import argparse
import collections
import pandas as pd
import numpy as np
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Generate the results table of the social instances")
parser.add_argument("csv", help="The CSV input file")
parser.add_argument("gurobi_csv", help="The Gurobi CSV input file")
parser.add_argument("gurobi_fpt_comparison_csv",
help="The FPT results to compare to Gurobi")
parser.add_argument(
'--time-limit',
type=int,
help="The maximum running time to use in seconds, default: 1000",
default=1000)
args = parser.parse_args()
df = pd.read_csv(args.csv)
filtered_df = df[df['Total Time [s]'] <= args.time_limit]
gurobi_df = pd.read_csv(args.gurobi_csv)
filtered_gurobi_df = gurobi_df[~gurobi_df.Algorithm.str.contains('Heuristic')]
gurobi_fpt_df = pd.read_csv(args.gurobi_fpt_comparison_csv)
filtered_gurobi_fpt_df = gurobi_fpt_df[gurobi_fpt_df['Total Time [s]'] <= args.time_limit]
fpt_algo = 'FPT-LS-MP'
ilp_algo = 'ILP-S-R-C4'
all_solutions = False
fpt_data = filtered_gurobi_fpt_df[(filtered_gurobi_fpt_df.Algorithm == fpt_algo) & (filtered_gurobi_fpt_df['All Solutions'] == all_solutions)]
ilp_data = filtered_gurobi_df[filtered_gurobi_df.Algorithm == ilp_algo]
general_data = fpt_data.groupby('Graph')[['n', 'm']].first()
solved_data = ilp_data.groupby('Graph')['Solved'].any()
fpt_st_data = get_max_k_time(fpt_data[~fpt_data.MT])
fpt_mt_data = get_max_k_time(fpt_data[fpt_data.MT])
ilp_st_data = get_max_k_time(ilp_data[~ilp_data.MT])
ilp_mt_data = get_max_k_time(ilp_data[ilp_data.MT])
df = pd.DataFrame(collections.OrderedDict([
(('', '', 'Graph'), general_data.index),
(('', '', 'n'), general_data.n),
(('', '', 'm'), general_data.m),
#(('', '', 'Solved'), solved_data),
(('FPT', '1 core', 'k'), fpt_st_data.k),
(('FPT', '1 core', 'Time [s]'), fpt_st_data['Total Time [s]']),
(('FPT', '16 cores', 'k'), fpt_mt_data.k),
(('FPT', '16 cores', 'Time [s]'), fpt_mt_data['Total Time [s]']),
# subtract one for unsolved graphs
(('ILP', '1 core', 'k'), ilp_st_data.k - (~ilp_st_data.Solved)),
(('ILP', '1 core', 'Time [s]'), ilp_st_data['Total Time [s]']),
(('ILP', '16 cores', 'k'), ilp_mt_data.k - (~ilp_mt_data.Solved)),
(('ILP', '16 cores', 'Time [s]'), ilp_mt_data['Total Time [s]']),
]))
df.sort_values(by=('FPT', '1 core', 'Time [s]'), inplace=True)
print(df.to_latex(index=False, formatters=
{('', '', 'Solved') : lambda x : 'Yes' if x else 'No'},
float_format=lambda x : "{:.2f}".format(x), na_rep=" "))
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
198,
11748,
1822,
29572,
198,
11748,
17268,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
19... | 2.106287 | 1,336 |
#!/usr/bin/env python
# a bar plot with errorbars
import matplotlib.pyplot as plt
import numpy as np
import string
graph_dir = "charts/"
N = 1
width = 0.01 # the width of the bars
ind = np.arange(N) # the x locations for the groups
fig, ax = plt.subplots()
# initial run
(tdbMeans, tdbStd) = readFile("scaling_tdb.txt")
tdbMeans = [1.557289895]
rects1 = ax.bar(ind, tdbMeans, width, color='#3c78d8')
# update 10
#oneMeans= [0.3277027027]
#oneMeans = [0]
#rects2 = ax.bar(ind+width, oneMeans, width, color='#6aa84f')
# update 100
twoMeans = [0.9172152797]
#twoMeans = [0]
rects3 = ax.bar(ind+width, twoMeans, width, color='#e69138')
# update 1000
threeMeans = [1.136780069]
#threeMeans = [0]
rects4 = ax.bar(ind+width*2, threeMeans, width, color='#6aa84f')
#f1c232')
fontsize = '20'
# add some text for labels, title and axes ticks
ax.set_xlabel('Machines', fontsize=fontsize)
ax.set_xlim([-width, (N - 1) + 4 * width])
ax.set_ylabel('Speedup', fontsize=fontsize)
ax.set_title('Distributed', fontsize=fontsize)
ax.set_xticks(ind+width * 1.5)
ax.set_xticklabels( ('2'))
plt.tick_params(axis='both', which='major', labelsize=fontsize)
plt.tick_params(axis='both', which='minor', labelsize=fontsize)
#ax.legend( (rects1[0], rects2[0], rects3[0], rects4[0]), ('Initial Run', 'Update 10', 'Update 100', 'Update 1000'), loc='best' )
ax.legend( (rects1[0], rects3[0], rects4[0]), ('Initial Run', 'Update 100', 'Update 1000'), loc='best' )
#ax.legend( (rects1[0], rects2[0]), ('Initial Run', 'Update 10'), loc='best' )
#ax.legend( (rects1[0],), ('Initial Run',), loc='best' )
#plt.show()
plt.gcf().set_size_inches(7, 10)
plt.savefig(graph_dir + 'distributed_scaling.png')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
257,
2318,
7110,
351,
4049,
34046,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
4731,
198,
198,
34960,
62,
15908,
79... | 2.371469 | 708 |
# -*- coding: utf-8 -*-
"""
Description:
EPython's custom exceptions
Author:
Ray Gomez
Date:
3/20/21
"""
from epython.errors import filters, poke, network, ssh, util
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
11828,
25,
198,
220,
220,
220,
14724,
7535,
338,
2183,
13269,
198,
198,
13838,
25,
198,
220,
220,
220,
7760,
33231,
198,
198,
10430,
25,
198,
220,
220,
220... | 2.585714 | 70 |
from detectron2.config import LazyCall as L
from detectron2.layers import ShapeSpec
from detectron2.modeling.poolers import ROIPooler
from detectron2.modeling.roi_heads import KRCNNConvDeconvUpsampleHead
from .mask_rcnn_fpn import model
[model.roi_heads.pop(x) for x in ["mask_in_features", "mask_pooler", "mask_head"]]
model.roi_heads.update(
num_classes=1,
keypoint_in_features=["p2", "p3", "p4", "p5"],
keypoint_pooler=L(ROIPooler)(
output_size=14,
scales=(1.0 / 4, 1.0 / 8, 1.0 / 16, 1.0 / 32),
sampling_ratio=0,
pooler_type="ROIAlignV2",
),
keypoint_head=L(KRCNNConvDeconvUpsampleHead)(
input_shape=ShapeSpec(channels=256, width=14, height=14),
num_keypoints=17,
conv_dims=[512] * 8,
loss_normalizer="visible",
),
)
# Detectron1 uses 2000 proposals per-batch, but this option is per-image in detectron2.
# 1000 proposals per-image is found to hurt box AP.
# Therefore we increase it to 1500 per-image.
model.proposal_generator.post_nms_topk = (1500, 1000)
# Keypoint AP degrades (though box AP improves) when using plain L1 loss
model.roi_heads.box_predictor.smooth_l1_beta = 0.5
| [
6738,
4886,
1313,
17,
13,
11250,
1330,
406,
12582,
14134,
355,
406,
198,
6738,
4886,
1313,
17,
13,
75,
6962,
1330,
25959,
22882,
198,
6738,
4886,
1313,
17,
13,
4666,
10809,
13,
7742,
364,
1330,
15107,
4061,
970,
263,
198,
6738,
4886,
... | 2.389452 | 493 |
import chainer
from chainer import functions
import numpy as cp
# import cupy as cp
import json
'''
whole framework: a->(p->A->s)
'''
if __name__ == '__main__':
import chainer
from chainer import serializers
from chainer.iterators import SerialIterator
from chainer_chemistry.dataset.converters import concat_mols
from dataset import uspto_dataset
from models.nn import ggnngwm_stop_step, ggnngwm_atom, ggnngwm_pair_step, ggnngwn_action_step
import logging
import argparse
from distutils.util import strtobool
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(description='')
parser.add_argument('--hdim', type=int, default=100)
parser.add_argument('--n_layers', type=int, default=3)
parser.add_argument('--nn_hidden_dim', type=int, default=50)
parser.add_argument('--concat_hidden', type=strtobool, default='false')
parser.add_argument('--weight_tying', type=strtobool, default='false')
parser.add_argument('--gwm', type=strtobool, default='true')
parser.add_argument('--topK', type=int, default=10)
parser.add_argument('--g_stop', default='inference/snapshot_stop')
parser.add_argument('--g_atom', default='inference/snapshot_atom')
parser.add_argument('--g_pair', default='inference/snapshot_pair')
parser.add_argument('--g_action', default='inference/snapshot_action')
parser.add_argument('--test_path', default='dataset/test.txt.proc')
parser.add_argument('--out', default='result_all/inference1')
args = parser.parse_args()
g_stop = ggnngwm_stop_step(out_dim=args.hdim, hidden_dim=args.hdim, n_layers=args.n_layers,
concat_hidden=args.concat_hidden, weight_tying=args.weight_tying,
nn_hidden_dim=args.nn_hidden_dim,
gwm=args.gwm)
chainer.serializers.load_npz(args.g_stop, g_stop)
g_atom = ggnngwm_atom(out_dim=args.hdim, hidden_dim=args.hdim, n_layers=args.n_layers,
concat_hidden=args.concat_hidden, weight_tying=args.weight_tying,
nn_hidden_dim=args.nn_hidden_dim,
gwm=args.gwm,
topK=args.topK)
chainer.serializers.load_npz(args.g_atom, g_atom)
g_pair = ggnngwm_pair_step(out_dim=args.hdim, hidden_dim=args.hdim, n_layers=args.n_layers,
concat_hidden=args.concat_hidden, weight_tying=args.weight_tying,
nn_hidden_dim=args.nn_hidden_dim,
gwm=args.gwm,
topK=args.topK)
chainer.serializers.load_npz(args.g_pair, g_pair)
g_action = ggnngwn_action_step(out_dim=args.hdim, hidden_dim=args.hdim, n_layers=args.n_layers,
concat_hidden=args.concat_hidden, weight_tying=args.weight_tying,
nn_hidden_dim=args.nn_hidden_dim,
gwm=args.gwm)
chainer.serializers.load_npz(args.g_action, g_action)
# chainer.cuda.get_device_from_id(0).use()
# g_stop.to_gpu()
valid_raw = uspto_dataset.read_data(args.test_path)
valid_dataset = uspto_dataset.USPTO_dataset(valid_raw)
valid_iter = SerialIterator(valid_dataset, 20, repeat=False, shuffle=False)
one_part_acc = []
for batch in valid_iter:
# get one batch of test data
f_atoms, f_bonds, super_node_x, \
atom_label, mask_reagents, mask_reactants_reagents, pair_label, mask_pair_select, \
action, step_num, \
stop_idx, \
sample_index = concat_mols(batch, device=-1)
atom_label -= 1
mask_reagents -= 2
mask_reactants_reagents -= 2
action -= 1
with chainer.using_config('train', False):
inference(g_stop, g_atom, g_pair, g_action,
f_atoms, f_bonds, super_node_x,
atom_label, mask_reagents, mask_reactants_reagents, pair_label, mask_pair_select,
action, step_num,
stop_idx,
sample_index, valid_raw, args.out)
| [
11748,
6333,
263,
198,
6738,
6333,
263,
1330,
5499,
198,
198,
11748,
299,
32152,
355,
31396,
198,
2,
1330,
6508,
88,
355,
31396,
198,
198,
11748,
33918,
628,
198,
198,
7061,
6,
198,
1929,
2305,
9355,
25,
257,
3784,
7,
79,
3784,
32,
... | 2.053458 | 2,039 |
input_file = open("D:/My Files/Projects/Python Programming/coursera_course_2/files/project_twitter_data.csv","r")
output_file = open("D:/My Files/Projects/Python Programming/coursera_course_2/files/resulting_data.csv","w")
punctuation_chars = ["'", '"', ",", ".", "!", ":", ";", '#', '@']
# lists of words to use
positive_words = []
with open("D:/My Files/Projects/Python Programming/coursera_course_2/files/positive_words.txt") as pos_f:
for lin in pos_f:
if lin[0] != ';' and lin[0] != '\n':
positive_words.append(lin.strip())
negative_words = []
with open("D:/My Files/Projects/Python Programming/coursera_course_2/files/negative_words.txt") as pos_f:
for lin in pos_f:
if lin[0] != ';' and lin[0] != '\n':
negative_words.append(lin.strip())
write_file(output_file)
input_file.close()
output_file.close()
| [
15414,
62,
7753,
796,
1280,
7203,
35,
14079,
3666,
13283,
14,
16775,
82,
14,
37906,
30297,
14,
43220,
2655,
64,
62,
17319,
62,
17,
14,
16624,
14,
16302,
62,
6956,
62,
7890,
13,
40664,
2430,
81,
4943,
201,
198,
22915,
62,
7753,
796,
... | 2.378016 | 373 |
import os
import time
import math
import random
import numpy as np
import argparse
import torch
import torch.nn as nn
from yaml import parse
from gnn_data import GNN_DATA
from gnn_model import GIN_Net2, GIN_Net3
from utils import Metrictor_PPI, print_file
from tensorboardX import SummaryWriter
np.random.seed(1)
torch.manual_seed(1)
torch.cuda.manual_seed(1)
parser = argparse.ArgumentParser(description='Train Model')
parser.add_argument('--description', default=None, type=str,
help='train description')
parser.add_argument('--ppi_path', default=None, type=str,
help="ppi path")
parser.add_argument('--pseq_path', default=None, type=str,
help="protein sequence path")
parser.add_argument('--vec_path', default=None, type=str,
help='protein sequence vector path')
parser.add_argument('--split_new', default=None, type=boolean_string,
help='split new index file or not')
parser.add_argument('--split_mode', default=None, type=str,
help='split method, random, bfs or dfs')
parser.add_argument('--train_valid_index_path', default=None, type=str,
help='cnn_rnn and gnn unified train and valid ppi index')
parser.add_argument('--use_lr_scheduler', default=None, type=boolean_string,
help="train use learning rate scheduler or not")
parser.add_argument('--save_path', default=None, type=str,
help='model save path')
parser.add_argument('--graph_only_train', default=None, type=boolean_string,
help='train ppi graph conctruct by train or all(train with test)')
parser.add_argument('--batch_size', default=None, type=int,
help="gnn train batch size, edge batch size")
parser.add_argument('--lr', default=1e-3, type=float)
parser.add_argument('--epochs', default=None, type=int,
help='train epoch number')
if __name__ == "__main__":
main() | [
11748,
28686,
198,
11748,
640,
198,
11748,
10688,
198,
11748,
4738,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
1822,
29572,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
6738,
331,
43695,
1330,
21136,
198,
198,
... | 2.47875 | 800 |
import random
import sys
textfile = "wordlist.txt"
userinput = True
wordcounts = dict()
words = dict()
#Print an error and exit gracefully if wordlist is not found
try:
open(textfile)
except FileNotFoundError:
print(textfile + " not find. Please make sure it exists in current directory.")
sys.exit()
#Open the wordlist.txt and read each line into the dictionary wordcounts in a with the word length as the key
with open(textfile) as infile:
for line in infile:
for word in line.split():
wordcounts.setdefault(len(word),[]).append(word)
print("Welcome to Hangman")
#Ask for user input, check if it's an int bigger than 2 and smaller than the maximum length
while userinput:
wordlength = input("What length word would you like me to choose? ==> ")
try:
int(wordlength)
except ValueError:
print("Input is invalid, please try again")
continue
wordlength = int(wordlength)
words = wordcounts.get(wordlength)
if not words or wordlength < 2:
print("Input is invalid, please try again")
else:
userinput = False
#choose a random word with the specified length
word = random.choice(words)
#create a list of *s with the same length as the chosen word
filled_word = ['*'] * len(word)
guesses_left = 7
#loop: ask for user input for a letter or a word, give and error and try again if not
while guesses_left > 0:
print("\nWord: " + ''.join(filled_word))
print("You have " + str(guesses_left) + " guesses remaining.")
guess = input("Type a letter or a word guess: ")
if not guess.isalpha():
print("Wrong input.\n")
continue
elif len(guess) == 1: #letter guess, check the occurence of the letter, 'reveal' the letter in the word, or if it doesn't exist, decrement guesses_left and restart loop
guess = guess.lower()
index = 0
found = []
while index < len(word):
index = word.find(guess, index)
if index >= 0:
found.append(index)
index += 1
elif len(found) > 0:
print("There is " + str(len(found)) + " " + guess + "!")
break
else:
print("Sorry, there are no " + guess + "'s.")
guesses_left -= 1
break
for x in found:
filled_word[x] = guess
if ''.join(filled_word) == word:
print("\nCongratulations, you guessed it!")
break
else: #word guess, check if the guess matches the word. End the game if guessed correctly, otherwise decrement guesses_left and restart loop
guess = guess.lower()
if guess == word:
print("\nCongratulations, you guessed it!")
break
else:
guesses_left -= 1
print("Sorry, the word is not '" + guess + "'")
if guesses_left == 0: #if the user runs out of guesses, reveal the word
print("\nYou are out of guesses. The word was: " + word)
print("Game Over") | [
11748,
4738,
198,
11748,
25064,
198,
198,
5239,
7753,
796,
366,
4775,
4868,
13,
14116,
1,
198,
7220,
15414,
796,
6407,
198,
4775,
9127,
82,
796,
8633,
3419,
198,
10879,
796,
8633,
3419,
198,
198,
2,
18557,
281,
4049,
290,
8420,
11542,... | 2.51405 | 1,210 |
from Monument import Monument, Dataset
import importer_utils as utils
import importer as importer
if __name__ == "__main__":
"""Command line entry point for importer."""
args = importer.handle_args()
dataset = Dataset("se-ship", "sv", SeShipSv)
dataset.data_files = {
"functions": "se-ship_(sv)_functions.json"}
importer.main(args, dataset)
| [
6738,
33075,
1330,
33075,
11,
16092,
292,
316,
198,
11748,
848,
4337,
62,
26791,
355,
3384,
4487,
198,
11748,
848,
4337,
355,
848,
4337,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
37227... | 2.735294 | 136 |
from tensorflow.keras import backend as K
abs_definitions = [
{'name': 'add_class',
'nargs': '+',
'type': int,
'help': 'flag to add abstention (per task)'},
{'name': 'alpha',
'nargs': '+',
'type': float,
'help': 'abstention penalty coefficient (per task)'},
{'name': 'min_acc',
'nargs': '+',
'type': float,
'help': 'minimum accuracy required (per task)'},
{'name': 'max_abs',
'nargs': '+',
'type': float,
'help': 'maximum abstention fraction allowed (per task)'},
{'name': 'alpha_scale_factor',
'nargs': '+',
'type': float,
'help': 'scaling factor for modifying alpha (per task)'},
{'name': 'init_abs_epoch',
'action': 'store',
'type': int,
'help': 'number of epochs to skip before modifying alpha'},
{'name': 'n_iters',
'action': 'store',
'type': int,
'help': 'number of iterations to iterate alpha'},
{'name': 'acc_gain',
'type': float,
'default': 5.0,
'help': 'factor to weight accuracy when determining new alpha scale'},
{'name': 'abs_gain',
'type': float,
'default': 1.0,
'help': 'factor to weight abstention fraction when determining new alpha scale'},
{'name': 'task_list',
'nargs': '+',
'type': int,
'help': 'list of task indices to use'},
{'name': 'task_names',
'nargs': '+',
'type': int,
'help': 'list of names corresponding to each task to use'},
]
| [
6738,
11192,
273,
11125,
13,
6122,
292,
1330,
30203,
355,
509,
198,
198,
8937,
62,
4299,
50101,
796,
685,
198,
220,
220,
220,
1391,
6,
3672,
10354,
705,
2860,
62,
4871,
3256,
198,
220,
220,
220,
220,
705,
77,
22046,
10354,
705,
10,
... | 2.371383 | 622 |
# -*- coding: utf-8 -*-
'''
Authored by: Tim Keefer
Licensed under CDDL 1.0
'''
from datetime import datetime, timedelta
from ebaysdk.trading import Connection as Trading
from ebaysdk.poller import parse_args, file_lock
from ebaysdk import log
if __name__ == '__main__':
(opts, args) = parse_args("usage: python -m ebaysdk.poller.orders [options]")
poller = Poller(opts, Storage())
poller.run()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
7061,
6,
198,
30515,
1850,
416,
25,
5045,
3873,
41027,
198,
26656,
15385,
739,
6458,
19260,
352,
13,
15,
198,
7061,
6,
198,
198,
6738,
4818,
8079,
1330,
4818,
807... | 2.694805 | 154 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.common import utils
from tempest import config
from tempest.lib import decorators
from neutron_tempest_plugin.api import base
CONF = config.CONF
| [
2,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
220,
220,
220,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
921,
743,
7330,
198,
2,
220,
220,
... | 3.418605 | 215 |
from collections import Counter, deque
from random import choice, randint, shuffle
from typing import Dict, Iterable, List, Tuple
import numpy as np
from utils import helper_functions
import pathfinding
import cProfile
import functools
import pstats
import tempfile
| [
6738,
17268,
1330,
15034,
11,
390,
4188,
201,
198,
6738,
4738,
1330,
3572,
11,
43720,
600,
11,
36273,
201,
198,
6738,
19720,
1330,
360,
713,
11,
40806,
540,
11,
7343,
11,
309,
29291,
201,
198,
201,
198,
11748,
299,
32152,
355,
45941,
... | 3.25 | 88 |
import open3d as o3d
import numpy as np
# def draw_geometries(pcds):
# """
# Draw Geometries
# Args:
# - pcds (): [pcd1,pcd2,...]
# """
# o3d.visualization.draw_geometries(pcds)
# def get_o3d_FOR(origin=[0, 0, 0], size=10):
# """
# Create a FOR that can be added to the open3d point cloud
# """
# mesh_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=size)
# mesh_frame.translate(origin)
# return mesh_frame
def vector_magnitude(vec):
"""
Calculates a vector's magnitude.
Args:
- vec ():
"""
magnitude = np.sqrt(np.sum(vec ** 2))
return magnitude
def calculate_zy_rotation_for_arrow(vec):
"""
Calculates the rotations required to go from the vector vec to the
z axis vector of the original FOR. The first rotation that is
calculated is over the z axis. This will leave the vector vec on the
XZ plane. Then, the rotation over the y axis.
Returns the angles of rotation over axis z and y required to
get the vector vec into the same orientation as axis z
of the original FOR
Args:
- vec ():
"""
# Rotation over z axis of the FOR
gamma = np.arctan(vec[1] / vec[0])
Rz = np.array(
[
[np.cos(gamma), -np.sin(gamma), 0],
[np.sin(gamma), np.cos(gamma), 0],
[0, 0, 1],
]
)
# Rotate vec to calculate next rotation
vec = Rz.T @ vec.reshape(-1, 1)
vec = vec.reshape(-1)
# Rotation over y axis of the FOR
beta = np.arctan(vec[0] / (vec[2] + 1e-8))
Ry = np.array(
[[np.cos(beta), 0, np.sin(beta)], [0, 1, 0], [-np.sin(beta), 0, np.cos(beta)]]
)
return (Rz, Ry)
def get_arrow(scale=10):
"""
Create an arrow in for Open3D
"""
cone_height = scale * 0.2
cylinder_height = scale * 0.8
cone_radius = scale / 10
cylinder_radius = scale / 20
mesh_frame = o3d.geometry.TriangleMesh.create_arrow(
cone_radius=0.5,
cone_height=cone_height,
cylinder_radius=0.25,
cylinder_height=cylinder_height,
)
return mesh_frame
def create_arrow(origin=[0, 0, 0], end=None, color=[1, 0, 0], vec=None):
"""
Creates an arrow from an origin point to an end point,
or create an arrow from a vector vec starting from origin.
Args:
- end (): End point. [x,y,z]
- vec (): Vector. [i,j,k]
"""
scale = 10
Ry = Rz = np.eye(3)
T = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
T[:3, -1] = origin
if end is not None:
vec = np.array(end) - np.array(origin)
elif vec is not None:
vec = np.array(vec)
if end is not None or vec is not None:
scale = vector_magnitude(vec)
Rz, Ry = calculate_zy_rotation_for_arrow(vec)
mesh = get_arrow(scale)
# Create the arrow
mesh.rotate(Ry, center=np.array([0, 0, 0]))
mesh.rotate(Rz, center=np.array([0, 0, 0]))
mesh.translate(origin)
mesh.paint_uniform_color(color)
return mesh
# # Create a Cartesian Frame of Reference
# FOR = get_o3d_FOR()
# # Create an arrow from point (5,5,5) to point (10,10,10)
# arrow = get_arrow([5,5,5],[10,10,10])
# # Create an arrow representing vector vec, starting at (5,5,5)
# # arrow = get_arrow([5,5,5],vec=[5,5,5])
# # Create an arrow in the same place as the z axis
# # arrow = get_arrow()
# # Draw everything
# draw_geometries([FOR,arrow])
| [
11748,
1280,
18,
67,
355,
267,
18,
67,
198,
11748,
299,
32152,
355,
45941,
628,
198,
2,
825,
3197,
62,
469,
908,
1678,
7,
14751,
9310,
2599,
198,
2,
220,
220,
220,
220,
37227,
198,
2,
220,
220,
220,
220,
15315,
2269,
908,
1678,
... | 2.263261 | 1,527 |
"""
Modules that can consolidate inputs from different sources
and produce combined output file (typically JSON).
"""
import os
import json
from pyveg.src.file_utils import save_json, get_tag
from pyveg.src.date_utils import get_date_strings_for_time_period
from pyveg.src.pyveg_pipeline import BaseModule, logger
class VegAndWeatherJsonCombiner(CombinerModule):
"""
Expect directory structures like:
<something>/<input_veg_location>/<date>/network_centralities.json
<something>/<input_weather_location>/RESULTS/weather_data.json
"""
def set_default_parameters(self):
"""
See if we can set our input directories from the output directories
of previous Sequences in the pipeline.
The pipeline (if there is one) will be a grandparent,
i.e. self.parent.parent
and the names of the Sequences we will want to combine should be
in the variable self.depends_on.
"""
super().set_default_parameters()
# get the parent Sequence and Pipeline
if self.parent and self.parent.parent:
# we're running in a Pipeline
for seq_name in self.parent.depends_on:
seq = self.parent.parent.get(seq_name)
if seq.data_type == "vegetation":
self.input_veg_sequence = seq_name
elif seq.data_type == "weather":
self.input_weather_sequence = seq_name
if not (
"input_veg_sequence" in vars(self)
and "input_weather_sequence" in vars(self)
):
raise RuntimeError(
"{}: Unable to find vegetation and weather sequences in depends_on".format(
self.name, self.depends_on
)
)
# now get other details from the input sequences
veg_sequence = self.parent.parent.get(self.input_veg_sequence)
self.input_veg_location = veg_sequence.output_location
self.input_veg_location_type = veg_sequence.output_location_type
self.veg_collection = veg_sequence.collection_name
weather_sequence = self.parent.parent.get(self.input_weather_sequence)
self.input_weather_location = weather_sequence.output_location
self.input_weather_location_type = weather_sequence.output_location_type
self.weather_collection = weather_sequence.collection_name
else:
# No parent Sequence or Pipeline - perhaps running standalone
self.weather_collection = "ECMWF/ERA5/MONTHLY"
self.veg_collection = "COPERNICUS/S2"
self.input_veg_location_type = "local"
self.input_weather_location_type = "local"
self.output_location_type = "local"
if not "output_filename" in vars(self):
self.output_filename = "results_summary.json"
def combine_json_lists(self, json_lists):
"""
If for example we have json files from the NetworkCentrality
and NDVI calculators, all containing lists of dicts for sub-images,
combine them here by matching by coordinate.
"""
if len(json_lists) == 0:
return None
elif len(json_lists) == 1:
return json_lists[0]
## any way to do this without a huge nested loop?
# loop over all the lists apart from the first, which we will add to
for jlist in json_lists[1:]:
# loop through all items (sub-images) in each list
for p in jlist:
match_found = False
# loop through all items (sub-images) in the first/output list
for p0 in json_lists[0]:
# match by latitude, longitude.
if (p["latitude"], p["longitude"], p["date"]) == (
p0["latitude"],
p0["longitude"],
p0["date"],
):
match_found = True
for k, v in p.items():
if not k in p0.keys():
p0[k] = v
break
if not match_found:
json_lists[0].append(p)
return json_lists[0]
def get_veg_time_series(self):
"""
Combine contents of JSON files written by the NetworkCentrality
and NDVI calculator Modules.
If we are running in a Pipeline, get the expected set of date strings
from the vegetation sequence we depend on, and if there is no data
for a particular date, make a null entry in the output.
"""
dates_with_data = self.list_directory(
self.input_veg_location, self.input_veg_location_type
)
if self.parent and self.parent.parent and "input_veg_sequence" in vars(self):
veg_sequence = self.parent.parent.get(self.input_veg_sequence)
start_date, end_date = veg_sequence.date_range
time_per_point = veg_sequence.time_per_point
date_strings = get_date_strings_for_time_period(
start_date, end_date, time_per_point
)
else:
date_strings = dates_with_data
date_strings.sort()
veg_time_series = {}
for date_string in date_strings:
if not date_string in dates_with_data:
veg_time_series[date_string] = None
# if there is no JSON directory for this date, add a null entry
if "JSON" not in self.list_directory(
self.join_path(self.input_veg_location, date_string),
self.input_veg_location_type,
):
veg_time_series[date_string] = None
continue
# find the subdirs of the JSON directory
subdirs = self.list_directory(
self.join_path(self.input_veg_location, date_string, "JSON"),
self.input_veg_location_type,
)
veg_lists = []
for subdir in subdirs:
logger.debug(
"{}: getting vegetation time series for {}".format(
self.name,
self.join_path(
self.input_veg_location, date_string, "JSON", subdir
),
)
)
# list the JSON subdirectories and find any .json files in them
dir_contents = self.list_directory(
self.join_path(self.input_veg_location, date_string, "JSON", subdir),
self.input_veg_location_type,
)
json_files = [
filename for filename in dir_contents if filename.endswith(".json")
]
for filename in json_files:
j = self.get_json(
self.join_path(
self.input_veg_location,
date_string,
"JSON",
subdir,
filename,
),
self.input_veg_location_type,
)
veg_lists.append(j)
# combine the lists from the different subdirectories
veg_time_point = self.combine_json_lists(veg_lists)
# update the final veg_time_series dictionary
veg_time_series[date_string] = veg_time_point
return veg_time_series
def check_output_dict(self, output_dict):
"""
For all the keys (i.e. dates) in the vegetation time-series,
count how many have data for both veg and weather
"""
veg_dates = output_dict[self.veg_collection]["time-series-data"].keys()
weather_dates = output_dict[self.weather_collection]["time-series-data"].keys()
for date in veg_dates:
if output_dict[self.veg_collection]["time-series-data"][date] \
and date in weather_dates \
and output_dict[self.weather_collection]["time-series-data"][date]:
self.run_status["succeeded"] += 1
return
def get_metadata(self):
"""
Fill a dictionary with info about this job - coords, date range etc.
"""
metadata = {}
if self.parent and self.parent.parent and "input_veg_sequence" in vars(self):
veg_sequence = self.parent.parent.get(self.input_veg_sequence)
metadata["start_date"], metadata["end_date"] = veg_sequence.date_range
metadata["time_per_point"] = veg_sequence.time_per_point
metadata["longitude"] = veg_sequence.coords[0]
metadata["latitude"] = veg_sequence.coords[1]
metadata["collection"] = veg_sequence.collection_name
metadata["num_data_points"] = self.run_status["succeeded"]
if "config_filename" in vars(self.parent.parent):
metadata["config_filename"] = self.parent.parent.config_filename
if "coords_id" in vars(self.parent.parent):
metadata["coords_id"] = self.parent.parent.coords_id
if "pattern_type" in vars(self.parent.parent):
metadata["pattern_type"] = self.parent.parent.pattern_type
metadata["tag"] = get_tag()
return metadata
| [
37811,
198,
5841,
5028,
326,
460,
38562,
17311,
422,
1180,
4237,
198,
392,
4439,
5929,
5072,
2393,
357,
48126,
19449,
737,
198,
37811,
198,
11748,
28686,
198,
11748,
33918,
198,
198,
6738,
12972,
303,
70,
13,
10677,
13,
7753,
62,
26791,... | 2.063534 | 4,596 |
power = {'BUSES': {'Area': 1.33155,
'Bus/Area': 1.33155,
'Bus/Gate Leakage': 0.00662954,
'Bus/Peak Dynamic': 0.0,
'Bus/Runtime Dynamic': 0.0,
'Bus/Subthreshold Leakage': 0.0691322,
'Bus/Subthreshold Leakage with power gating': 0.0259246,
'Gate Leakage': 0.00662954,
'Peak Dynamic': 0.0,
'Runtime Dynamic': 0.0,
'Subthreshold Leakage': 0.0691322,
'Subthreshold Leakage with power gating': 0.0259246},
'Core': [{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 5.66814e-06,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.202693,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 2.02403e-05,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.369616,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.64004,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.367081,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 1.37674,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.365347,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 5.59398,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 3.82383e-06,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0133989,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0968933,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0990927,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0968971,
'Execution Unit/Register Files/Runtime Dynamic': 0.112492,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.234135,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.601487,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 2.69879,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00417854,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00417854,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00365895,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.00142708,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00142347,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.0134395,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0393685,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0952604,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 6.05938,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.360214,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.323547,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 8.57647,
'Instruction Fetch Unit/Runtime Dynamic': 0.831829,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0734183,
'L2/Runtime Dynamic': 0.0164294,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 4.12483,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.41181,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0934243,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0934243,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 4.5678,
'Load Store Unit/Runtime Dynamic': 1.96598,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.230368,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.460737,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0817585,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.082562,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.37675,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0599378,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.676593,
'Memory Management Unit/Runtime Dynamic': 0.1425,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 24.0499,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 1.38203e-05,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.0189003,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.191359,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 0.210273,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 5.86579,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0495887,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.241638,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.265616,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.115524,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.186336,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.0940563,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.395917,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.0914043,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.47088,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0501805,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0048456,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0536937,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0358362,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.103874,
'Execution Unit/Register Files/Runtime Dynamic': 0.0406818,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.125518,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.311915,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.39553,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000350937,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000350937,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000322059,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.00013364,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00051479,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00153872,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00277904,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0344503,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 2.19133,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.0797619,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.117009,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 4.5162,
'Instruction Fetch Unit/Runtime Dynamic': 0.235539,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0462159,
'L2/Runtime Dynamic': 0.00399916,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.59016,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.654742,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.043774,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.043774,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.79687,
'Load Store Unit/Runtime Dynamic': 0.914394,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.107939,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.215879,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0383079,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0390002,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.136249,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0130814,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.358164,
'Memory Management Unit/Runtime Dynamic': 0.0520816,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 15.7778,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.132002,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00681857,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0567221,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.195543,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 2.79709,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0980777,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.279723,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.621299,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.189316,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.30536,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.154135,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.648812,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.121269,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 5.10233,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.117377,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00794077,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.090414,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0587269,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.207791,
'Execution Unit/Register Files/Runtime Dynamic': 0.0666677,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.215002,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.561929,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.96251,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 2.12418e-05,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 2.12418e-05,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 1.8525e-05,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 7.18413e-06,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000843616,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.000904625,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.000202829,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0564557,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 3.59106,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.138931,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.191749,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 5.98386,
'Instruction Fetch Unit/Runtime Dynamic': 0.388242,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0398899,
'L2/Runtime Dynamic': 0.0110088,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 3.61893,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.15718,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0770573,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0770572,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 3.98281,
'Load Store Unit/Runtime Dynamic': 1.61425,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.19001,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.38002,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0674352,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0680181,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.223279,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0228233,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.49523,
'Memory Management Unit/Runtime Dynamic': 0.0908414,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 19.1936,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.308764,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.012299,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0906719,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.411735,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 4.47859,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.144,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.315792,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.828175,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.300609,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.484871,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.244747,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 1.03023,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.216839,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 5.67756,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.15646,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0126089,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.143035,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0932505,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.299495,
'Execution Unit/Register Files/Runtime Dynamic': 0.105859,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.337343,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.759655,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 2.61691,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000561368,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000561368,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000492564,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000192656,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00133955,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00295485,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00525326,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0896441,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 5.70213,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.185252,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.304472,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 8.19738,
'Instruction Fetch Unit/Runtime Dynamic': 0.587575,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0331415,
'L2/Runtime Dynamic': 0.00690054,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 3.74011,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.20486,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0809777,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0809777,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 4.1225,
'Load Store Unit/Runtime Dynamic': 1.6852,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.199677,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.399355,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0708661,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0713609,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.354538,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0303777,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.632382,
'Memory Management Unit/Runtime Dynamic': 0.101739,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 22.2524,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.411575,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.0185714,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.145597,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.575743,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 5.57406,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328}],
'DRAM': {'Area': 0,
'Gate Leakage': 0,
'Peak Dynamic': 3.8525058798762615,
'Runtime Dynamic': 3.8525058798762615,
'Subthreshold Leakage': 4.252,
'Subthreshold Leakage with power gating': 4.252},
'L3': [{'Area': 61.9075,
'Gate Leakage': 0.0484137,
'Peak Dynamic': 0.210089,
'Runtime Dynamic': 0.065201,
'Subthreshold Leakage': 6.80085,
'Subthreshold Leakage with power gating': 3.32364}],
'Processor': {'Area': 191.908,
'Gate Leakage': 1.53485,
'Peak Dynamic': 81.4839,
'Peak Power': 114.596,
'Runtime Dynamic': 18.7807,
'Subthreshold Leakage': 31.5774,
'Subthreshold Leakage with power gating': 13.9484,
'Total Cores/Area': 128.669,
'Total Cores/Gate Leakage': 1.4798,
'Total Cores/Peak Dynamic': 81.2738,
'Total Cores/Runtime Dynamic': 18.7155,
'Total Cores/Subthreshold Leakage': 24.7074,
'Total Cores/Subthreshold Leakage with power gating': 10.2429,
'Total L3s/Area': 61.9075,
'Total L3s/Gate Leakage': 0.0484137,
'Total L3s/Peak Dynamic': 0.210089,
'Total L3s/Runtime Dynamic': 0.065201,
'Total L3s/Subthreshold Leakage': 6.80085,
'Total L3s/Subthreshold Leakage with power gating': 3.32364,
'Total Leakage': 33.1122,
'Total NoCs/Area': 1.33155,
'Total NoCs/Gate Leakage': 0.00662954,
'Total NoCs/Peak Dynamic': 0.0,
'Total NoCs/Runtime Dynamic': 0.0,
'Total NoCs/Subthreshold Leakage': 0.0691322,
'Total NoCs/Subthreshold Leakage with power gating': 0.0259246}} | [
6477,
796,
1391,
6,
45346,
1546,
10354,
1391,
6,
30547,
10354,
352,
13,
2091,
18742,
11,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
705,
16286,
14,
30547,
10354,
352,
13,
2091,
18742,
11,
198,
220,
220,
220,
220,
220,
... | 2.341135 | 29,311 |
from setuptools import setup
setup(name='imgTransformer',
version='0.1',
description='Apply Affine transformations to images and to their corresponding box annotations(optional).',
url='https://bitbucket.org/aganitha/image-transformer',
author='Daksh Varshneya',
author_email='daksh@aganitha.ai',
license='MIT',
packages=['imgTransformer'],
zip_safe=False) | [
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
40406,
7,
3672,
11639,
9600,
8291,
16354,
3256,
198,
220,
220,
220,
220,
220,
2196,
11639,
15,
13,
16,
3256,
198,
220,
220,
220,
220,
220,
6764,
11639,
44836,
6708,
500,
38226,
284,
4263,... | 2.718121 | 149 |
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
from genomon_post_analysis import __version__
setup(name='genomon_post_analysis',
version=__version__,
description="parser result files created by genomon",
long_description="""\n
parser result files created by genomon (SV, mutaion-call and so on)""",
classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords='genomon post analysis',
author='ai okada',
author_email='genomon_team@gamil.com',
url='https://github.com/Genomon-Project/Genomon.git',
license='License of GenomonPipeline',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
scripts=['genomon_pa'],
data_files=[('config', ['genomon_post_analysis.cfg'])],
include_package_data=True,
zip_safe=False,
install_requires=[
# -*- Extra requirements: -*-
],
entry_points="""
# -*- Entry points: -*-
""",
)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
6738,
2429,
16698,
62,
7353,
62,
20930,
1330,
11593,
9641,
834,
198,
198,
40406,
7,
3672,
11639,
5235... | 2.493827 | 405 |
import random
with open('in', 'w') as f:
for _ in xrange(1000):
x = random.randint(1, 100)
f.write(str(random.randint(1, 10**x)) + '\n')
| [
11748,
4738,
198,
198,
4480,
1280,
10786,
259,
3256,
705,
86,
11537,
355,
277,
25,
198,
220,
220,
220,
329,
4808,
287,
2124,
9521,
7,
12825,
2599,
198,
220,
220,
220,
220,
220,
220,
220,
2124,
796,
4738,
13,
25192,
600,
7,
16,
11,... | 2.051948 | 77 |
"""
drain.types.py
~~~~~~~~~~~~~~
Contains custom types definitions and utilities
"""
from .record import Record
from typing import (
Awaitable,
AsyncIterable,
Callable,
Union,
TypeVar,
)
RecordT = TypeVar("RecordT", bound=Record)
Source = AsyncIterable[RecordT]
Processor = Union[
Callable[[RecordT], RecordT], Callable[[RecordT], Awaitable[RecordT]]
]
Predicate = Callable[[RecordT], bool]
| [
37811,
198,
67,
3201,
13,
19199,
13,
9078,
198,
15116,
8728,
4907,
198,
198,
4264,
1299,
2183,
3858,
17336,
290,
20081,
198,
37811,
198,
6738,
764,
22105,
1330,
13266,
198,
6738,
19720,
1330,
357,
198,
220,
220,
220,
5851,
4548,
540,
... | 2.85034 | 147 |
"""
Functions for evaluating the performance of the model on the
squad dataset
Modified the official squad dataset evaluation script
"""
import string
import re
from collections import Counter
def normalize_answer(s):
"""Lower text and remove punctuation, articles, and extra whitespace."""
return white_space_fix(remove_articles(remove_punc(lower(s))))
def evaluate(predictions, answerss):
"""
Returns a tuple of (F1 score, EM score, sentence score)
The sentence score is our evaluation method for determining the
effectiveness of finding the correct sentence within the context
paragraph that may contain the answer. This metric is much softer
than the F1 or EM score as it does not consider the difficulty in
finding the span within the sentence with the answer. The SQUAD
leaderboard and evaluation scripts only consider the F1 and EM score.
"""
f1 = sscore = total = 0
for prediction, answers in zip(predictions, answerss):
total += 1
f1 += metric_max_over_ground_truths(f1_score, prediction, answers)
sscore += metric_max_over_ground_truths(sentence_score, prediction, answers)
sscore = 100.0 * sscore / total
f1 = 100.0 * f1 / total
return {'sscore':sscore, 'f1':f1}
| [
37811,
198,
24629,
2733,
329,
22232,
262,
2854,
286,
262,
2746,
319,
262,
220,
198,
16485,
324,
27039,
198,
198,
5841,
1431,
262,
1743,
8244,
27039,
12660,
4226,
198,
37811,
198,
11748,
4731,
198,
11748,
302,
198,
6738,
17268,
1330,
150... | 3.530086 | 349 |
"""Policies
This script provides policies for RL algorithms.
Class:
* Model - (arbitrary) neural network architecture
* BetaActor - actor with beta policy
* GaussianActor - actor with gaussian policy
* Critic - state value function
* ActorCritic - actor and critic combined
"""
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import MultivariateNormal, Beta
| [
37811,
47,
4160,
444,
198,
198,
1212,
4226,
3769,
4788,
329,
45715,
16113,
13,
198,
198,
9487,
25,
198,
220,
220,
220,
1635,
9104,
532,
357,
283,
2545,
11619,
8,
17019,
3127,
10959,
198,
220,
220,
220,
1635,
17993,
40277,
532,
8674,
... | 3.527559 | 127 |
from django.db import models
from django.contrib.auth.models import User
from django_countries.fields import CountryField
# Imports so it can receive data from signals.py
from django.db.models.signals import post_save
from django.dispatch import receiver
# Create or update user profile
@receiver(post_save, sender=User)
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
198,
6738,
42625,
14208,
62,
9127,
1678,
13,
25747,
1330,
12946,
15878,
198,
198,
2,
1846,
3742,
523,
340,
460,
3328,
... | 3.494624 | 93 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'qrvt_dialog_about.ui'
#
# Created by: PyQt5 UI code generator 5.15.1
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
5178,
7822,
7560,
422,
3555,
334,
72,
2393,
705,
80,
81,
36540,
62,
38969,
519,
62,
10755,
13,
9019,
6,
198,
2,
198,
2,
15622,
416,
25,
9485,
48,
83,
20,
... | 2.965517 | 116 |
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests usage of i2c interface for beaglebone devices."""
import mox
import unittest
import bbi2c
DEFAULT_BUS_NUM = 3
SLAVE_ADDRESS = 0x20
DATA_ADDRESS = 0x0
if __name__ == '__main__':
unittest.main()
| [
2,
15069,
357,
66,
8,
2211,
383,
18255,
1505,
7294,
46665,
13,
1439,
2489,
10395,
13,
198,
2,
5765,
286,
428,
2723,
2438,
318,
21825,
416,
257,
347,
10305,
12,
7635,
5964,
326,
460,
307,
198,
2,
1043,
287,
262,
38559,
24290,
2393,
... | 2.885496 | 131 |
#!/usr/bin/python
# Copyright (c) Microsoft Corporation
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""A deep MNIST classifier using convolutional layers."""
import logging
import math
import tempfile
import time
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
FLAGS = None
logger = logging.getLogger('mnist_AutoML')
class MnistNetwork(object):
'''
MnistNetwork is for initlizing and building basic network for mnist.
'''
def build_network(self):
'''
Building network for mnist
'''
# Reshape to use within a convolutional neural net.
# Last dimension is for "features" - there is only one here, since images are
# grayscale -- it would be 3 for an RGB image, 4 for RGBA, etc.
with tf.name_scope('reshape'):
try:
input_dim = int(math.sqrt(self.x_dim))
except:
print(
'input dim cannot be sqrt and reshape. input dim: ' + str(self.x_dim))
logger.debug(
'input dim cannot be sqrt and reshape. input dim: %s', str(self.x_dim))
raise
x_image = tf.reshape(self.images, [-1, input_dim, input_dim, 1])
# First convolutional layer - maps one grayscale image to 32 feature maps.
with tf.name_scope('conv1'):
w_conv1 = weight_variable(
[self.conv_size, self.conv_size, 1, self.channel_1_num])
b_conv1 = bias_variable([self.channel_1_num])
"""@nni.function_choice(tf.nn.relu(conv2d(x_image, w_conv1) + b_conv1), tf.nn.sigmoid(conv2d(x_image, w_conv1) + b_conv1), tf.nn.tanh(conv2d(x_image, w_conv1) + b_conv1), name=tf.nn.relu)"""
h_conv1 = tf.nn.relu(conv2d(x_image, w_conv1) + b_conv1)
# Pooling layer - downsamples by 2X.
with tf.name_scope('pool1'):
"""@nni.function_choice(max_pool(h_conv1, self.pool_size), avg_pool(h_conv1, self.pool_size), name=max_pool)"""
h_pool1 = max_pool(h_conv1, self.pool_size)
# Second convolutional layer -- maps 32 feature maps to 64.
with tf.name_scope('conv2'):
w_conv2 = weight_variable([self.conv_size, self.conv_size,
self.channel_1_num, self.channel_2_num])
b_conv2 = bias_variable([self.channel_2_num])
h_conv2 = tf.nn.relu(conv2d(h_pool1, w_conv2) + b_conv2)
# Second pooling layer.
with tf.name_scope('pool2'):
h_pool2 = max_pool(h_conv2, self.pool_size)
# Fully connected layer 1 -- after 2 round of downsampling, our 28x28 image
# is down to 7x7x64 feature maps -- maps this to 1024 features.
last_dim = int(input_dim / (self.pool_size * self.pool_size))
with tf.name_scope('fc1'):
w_fc1 = weight_variable(
[last_dim * last_dim * self.channel_2_num, self.hidden_size])
b_fc1 = bias_variable([self.hidden_size])
h_pool2_flat = tf.reshape(
h_pool2, [-1, last_dim * last_dim * self.channel_2_num])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, w_fc1) + b_fc1)
# Dropout - controls the complexity of the model, prevents co-adaptation of features.
with tf.name_scope('dropout'):
h_fc1_drop = tf.nn.dropout(h_fc1, self.keep_prob)
# Map the 1024 features to 10 classes, one for each digit
with tf.name_scope('fc2'):
w_fc2 = weight_variable([self.hidden_size, self.y_dim])
b_fc2 = bias_variable([self.y_dim])
y_conv = tf.matmul(h_fc1_drop, w_fc2) + b_fc2
with tf.name_scope('loss'):
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=self.labels, logits=y_conv))
with tf.name_scope('adam_optimizer'):
self.train_step = tf.train.AdamOptimizer(
self.learning_rate).minimize(cross_entropy)
with tf.name_scope('accuracy'):
correct_prediction = tf.equal(
tf.argmax(y_conv, 1), tf.argmax(self.labels, 1))
self.accuracy = tf.reduce_mean(
tf.cast(correct_prediction, tf.float32))
def conv2d(x_input, w_matrix):
"""conv2d returns a 2d convolution layer with full stride."""
return tf.nn.conv2d(x_input, w_matrix, strides=[1, 1, 1, 1], padding='SAME')
def max_pool(x_input, pool_size):
"""max_pool downsamples a feature map by 2X."""
return tf.nn.max_pool(x_input, ksize=[1, pool_size, pool_size, 1],
strides=[1, pool_size, pool_size, 1], padding='SAME')
def weight_variable(shape):
"""weight_variable generates a weight variable of a given shape."""
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
"""bias_variable generates a bias variable of a given shape."""
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def download_mnist_retry(data_dir, max_num_retries=20):
"""Try to download mnist dataset and avoid errors"""
for _ in range(max_num_retries):
try:
return input_data.read_data_sets(data_dir, one_hot=True)
except tf.errors.AlreadyExistsError:
time.sleep(1)
raise Exception("Failed to download MNIST.")
def main(params):
'''
Main function, build mnist network, run and send result to NNI.
'''
# Import data
mnist = download_mnist_retry(params['data_dir'])
print('Mnist download data done.')
logger.debug('Mnist download data done.')
# Create the model
# Build the graph for the deep net
mnist_network = MnistNetwork(channel_1_num=params['channel_1_num'],
channel_2_num=params['channel_2_num'],
conv_size=params['conv_size'],
hidden_size=params['hidden_size'],
pool_size=params['pool_size'],
learning_rate=params['learning_rate'])
mnist_network.build_network()
logger.debug('Mnist build network done.')
# Write log
graph_location = tempfile.mkdtemp()
logger.debug('Saving graph to: %s', graph_location)
train_writer = tf.summary.FileWriter(graph_location)
train_writer.add_graph(tf.get_default_graph())
test_acc = 0.0
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
"""@nni.variable(nni.choice(50, 250, 500), name=batch_num)"""
batch_num = params['batch_num']
for i in range(batch_num):
batch = mnist.train.next_batch(batch_num)
"""@nni.variable(nni.choice(1, 5), name=dropout_rate)"""
dropout_rate = params['dropout_rate']
mnist_network.train_step.run(feed_dict={mnist_network.images: batch[0],
mnist_network.labels: batch[1],
mnist_network.keep_prob: dropout_rate}
)
if i % 100 == 0:
test_acc = mnist_network.accuracy.eval(
feed_dict={mnist_network.images: mnist.test.images,
mnist_network.labels: mnist.test.labels,
mnist_network.keep_prob: 1.0})
"""@nni.report_intermediate_result(test_acc)"""
logger.debug('test accuracy %g', test_acc)
logger.debug('Pipe send intermediate result done.')
test_acc = mnist_network.accuracy.eval(
feed_dict={mnist_network.images: mnist.test.images,
mnist_network.labels: mnist.test.labels,
mnist_network.keep_prob: 1.0})
"""@nni.report_final_result(test_acc)"""
logger.debug('Final result is %g', test_acc)
logger.debug('Send final result done.')
def generate_defualt_params():
'''
Generate default parameters for mnist network.
'''
params = {
'data_dir': '/tmp/tensorflow/mnist/input_data',
'dropout_rate': 0.5,
'channel_1_num': 32,
'channel_2_num': 64,
'conv_size': 5,
'pool_size': 2,
'hidden_size': 1024,
'learning_rate': 1e-4,
'batch_num': 200}
return params
if __name__ == '__main__':
"""@nni.get_next_parameter()"""
try:
main(generate_defualt_params())
except Exception as exception:
logger.exception(exception)
raise
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
15069,
357,
66,
8,
5413,
10501,
198,
2,
1439,
2489,
10395,
13,
198,
2,
198,
2,
17168,
13789,
198,
2,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16... | 2.187755 | 4,410 |
from __future__ import absolute_import
import unittest
import six
import dicttools
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
198,
11748,
555,
715,
395,
198,
198,
11748,
2237,
198,
198,
11748,
8633,
31391,
628,
628
] | 3.56 | 25 |
# Inheritance
class Checking( Account ): # We pass the base class as an argument for Checking class to inherit
"""This class generates checking account objects""" # doc strings to describe a class
type = "checking" # is a class variable. declared outside the methods of a class. shared by all instances of a class
#---------------------
account = Account( "importedFiles/balance.txt" ) # <account.bankapp.Account > Package, Module, Class
print( account )
print( "Current Balance: %s" %( account.balance ) )
# account.withdraw( 100 )
# account.deposit( 100 )
# print( "New Balance: %s" %( account.balance ) )
# account.commit()
#---------------------
# Inheritance
checking = Checking( "importedFiles/balance.txt" , 1 ) # `checking` is an object # has an atribute `fee`
print( checking )
print( "Current Balance: %s" %( checking.balance ) )
# checking.deposit(10)
# checking.transfer(100)
# checking.commit()
# print( "New Balance: %s" %( checking.balance ) )
#---------------------
# Inheritance class variable
# jacks_checking = Checking( "importedFiles/jack.txt" , 1 )
# jacks_checking.transfer(100)
# print( jacks_checking )
# print( "Current Balance for Jack: %s" %( jacks_checking.balance ) )
# jacks_checking.commit()
# print( jacks_checking.type )
# johns_checking = Checking( "importedFiles/john.txt" , 1 )
# johns_checking.transfer(100)
# print( johns_checking )
# print( "Current Balance for Jack: %s" %( johns_checking.balance ) )
# johns_checking.commit()
# print( johns_checking.type )
#---------------------
# Doc String
# print( johns_checking.__doc__) # doc strings to describe a class
#---------------------
# Data Memebers
# are instance variables or class variables
#---------------------
# Constructors
# are the __init__ functions or methods in a class and constructs the class
#---------------------
# Class Methods
# applied to the objects instance, eg transfer, deposit
#---------------------
# Instantiation
# is the process of creating object instances or instances of a class
# eg: johns_checking = Checking( "importedFiles/john.txt" , 1 )
#---------------------
# Inheritance
# is the process of creating a subclass. has methods of inherited class plus its own methods
#---------------------
# Attributes
# class and instance variables that can be accessed
# eg: # print( johns_checking.type ) where .type is an arttribute
# eg: # print( johns_checking.balance ) where .balance is an arttribute
| [
198,
2,
47025,
42942,
220,
198,
4871,
39432,
7,
10781,
15179,
220,
220,
220,
220,
220,
1303,
775,
1208,
262,
2779,
1398,
355,
281,
4578,
329,
39432,
1398,
284,
16955,
198,
220,
220,
220,
37227,
1212,
1398,
18616,
10627,
1848,
5563,
37... | 3.39213 | 737 |
import socket
from struct import pack
ip='192.168.8.211'
port='9999'
commands = {'on' : '{"system":{"set_relay_state":{"state":1}}}',
'off' : '{"system":{"set_relay_state":{"state":0}}}',
'info' : '{"system":{"get_sysinfo":{}}}'}
| [
11748,
17802,
198,
6738,
2878,
1330,
2353,
198,
198,
541,
11639,
17477,
13,
14656,
13,
23,
13,
21895,
6,
198,
634,
11639,
24214,
6,
198,
198,
9503,
1746,
796,
1391,
6,
261,
6,
220,
220,
220,
220,
220,
220,
1058,
705,
4895,
10057,
... | 2.133858 | 127 |
# Copyright 2018 JanusGraph Python Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from janusgraph_python.core.datatypes.GeoShape import GeoShape
class TestPoint(unittest.TestCase):
"""
This method is used to unit test when Invalid coordinates are passed to Point class.
"""
"""
This method is used to unit test when Valid coordinates are passed to Point class.
"""
"""
This method is used to unit test equality and non equality of 2 Point classes defined by __eq__ and __ne__ methods.
"""
"""
This method is used to unit test, once Point objects are created, it can return valid and correct coordinates.
"""
"""
This method is used to unit test the Shape of Object being created.
"""
| [
2,
15069,
2864,
2365,
385,
37065,
11361,
46665,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
... | 3.582633 | 357 |
"""Note: this code was modified from:
https://github.com/lpigou/Theano-3D-ConvNet/blob/master/LICENSE
by @lpigou and collaborators
"""
import numpy as np
import theano.tensor as T
import keras.backend as K
from keras.layers.core import Layer
class NormLayer(Layer):
""" Normalization layer """
def __init__(self, method="lcn", kernel_size=9, threshold=1e-4,
nb_channels=3,
use_divisor=True, **kwargs):
"""
method: "lcn", "gcn", "mean"
LCN: local contrast normalization
kwargs:
kernel_size=9, threshold=1e-4, use_divisor=True
GCN: global contrast normalization
kwargs:
scale=1., subtract_mean=True, use_std=False, sqrt_bias=0.,
min_divisor=1e-8
MEAN: local mean subtraction
kwargs:
kernel_size=5
"""
super(NormLayer, self).__init__(**kwargs)
self.method = method
self.kernel_size = kernel_size
self.threshold = threshold
self.use_divisor = use_divisor
self.nb_channels = nb_channels
self.input = K.placeholder(ndim=4)
def lecun_lcn(self, X, kernel_size=7, threshold=1e-4, use_divisor=True):
"""
Yann LeCun's local contrast normalization
Orginal code in Theano by: Guillaume Desjardins
"""
filter_shape = (1, 1, kernel_size, kernel_size)
filters = self.gaussian_filter(
kernel_size).reshape(filter_shape)
# filters = shared(_asarray(filters, dtype=floatX), borrow=True)
filters = K.variable(filters)
convout = K.conv2d(X, filters, filter_shape=filter_shape,
border_mode='same')
# For each pixel, remove mean of kernel_sizexkernel_size neighborhood
new_X = X - convout
if use_divisor:
# Scale down norm of kernel_sizexkernel_size patch
sum_sqr_XX = K.conv2d(K.pow(K.abs(new_X), 2), filters,
filter_shape=filter_shape, border_mode='same')
denom = T.sqrt(sum_sqr_XX)
per_img_mean = denom.mean(axis=[2, 3])
divisor = T.largest(per_img_mean.dimshuffle(0, 1, 'x', 'x'), denom)
divisor = T.maximum(divisor, threshold)
new_X /= divisor
return new_X
| [
37811,
6425,
25,
428,
2438,
373,
9518,
422,
25,
198,
198,
5450,
1378,
12567,
13,
785,
14,
34431,
328,
280,
14,
464,
5733,
12,
18,
35,
12,
3103,
85,
7934,
14,
2436,
672,
14,
9866,
14,
43,
2149,
24290,
198,
1525,
2488,
34431,
328,
... | 2.021386 | 1,169 |
import copy
import datetime
import logging
from dateutil import rrule
from datetime import timedelta, datetime
from spaceone.core.service import *
from spaceone.core import utils, config
from spaceone.cost_analysis.error import *
from spaceone.cost_analysis.model.job_task_model import JobTask
from spaceone.cost_analysis.model.job_model import Job
from spaceone.cost_analysis.model.data_source_model import DataSource
from spaceone.cost_analysis.model.cost_model import CostQueryHistory
from spaceone.cost_analysis.manager.cost_manager import CostManager
from spaceone.cost_analysis.manager.job_manager import JobManager
from spaceone.cost_analysis.manager.job_task_manager import JobTaskManager
from spaceone.cost_analysis.manager.data_source_plugin_manager import DataSourcePluginManager
from spaceone.cost_analysis.manager.data_source_manager import DataSourceManager
from spaceone.cost_analysis.manager.secret_manager import SecretManager
from spaceone.cost_analysis.manager.budget_manager import BudgetManager
from spaceone.cost_analysis.manager.budget_usage_manager import BudgetUsageManager
_LOGGER = logging.getLogger(__name__)
@authentication_handler
@authorization_handler
@mutation_handler
@event_handler
| [
11748,
4866,
198,
11748,
4818,
8079,
198,
11748,
18931,
198,
6738,
3128,
22602,
1330,
374,
25135,
198,
6738,
4818,
8079,
1330,
28805,
12514,
11,
4818,
8079,
198,
198,
6738,
2272,
505,
13,
7295,
13,
15271,
1330,
1635,
198,
6738,
2272,
50... | 3.768519 | 324 |
# TODO: временный файл - удалить
s = "программа"
s2 = 'продукт'
print(s)
print(s2)
# Использование служебных спецсимволов.
print("Программа 1\nПрограмма 2\nПрограмма 3\n\tЧасть 1\n\tЧасть 2")
print(len("12345\n"))
print("""Программа 1
Программа 2
Программа 3
Часть 1
Часть 2""")
print(r"Программа 1\nПрограмма 2") | [
2,
16926,
46,
25,
12466,
110,
21169,
16843,
43108,
16843,
22177,
22177,
45035,
140,
117,
220,
141,
226,
16142,
140,
117,
30143,
532,
220,
35072,
43666,
16142,
30143,
18849,
20375,
45367,
198,
82,
796,
366,
140,
123,
21169,
25443,
111,
2... | 1.127946 | 297 |
# 2. (Função sem retorno sem parâmetro) Faça uma função/método que leia dois valores positivos e apresente a soma dos N números existentes entre eles (inclusive).
main()
| [
2,
362,
13,
357,
24629,
16175,
28749,
5026,
1005,
46447,
5026,
1582,
22940,
4164,
305,
8,
18350,
50041,
334,
2611,
1257,
16175,
28749,
14,
76,
25125,
24313,
8358,
443,
544,
466,
271,
1188,
2850,
46436,
452,
418,
304,
2471,
2028,
68,
2... | 2.514286 | 70 |
import os
from gym import spaces
import numpy as np
import gym
class Point(gym.Env):
"""Superclass for all MuJoCo environments.
"""
@property
@property
| [
11748,
28686,
198,
6738,
11550,
1330,
9029,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
11550,
628,
198,
4871,
6252,
7,
1360,
76,
13,
4834,
85,
2599,
198,
220,
220,
220,
37227,
12442,
4871,
329,
477,
8252,
9908,
7222,
12493,
13,
1... | 2.915254 | 59 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2019-01-31 18:13
from __future__ import unicode_literals
from django.db import migrations, models
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
940,
13,
22,
319,
13130,
12,
486,
12,
3132,
1248,
25,
1485,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
... | 2.736842 | 57 |
# Copyright (c) 2017, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.model.utils import get_fetch_values
from frappe.model.mapper import get_mapped_doc
from frappe.utils import cstr, flt, getdate, comma_and, cint, nowdate, add_days
import datetime
from frappe import sendmail
@frappe.whitelist()
@frappe.whitelist()
@frappe.whitelist()
@frappe.whitelist()
def resolve_work_order(docname):
# update without checking permissions
""" Called from client side on Stop/Unstop event"""
status = 'Resolved'
if not frappe.has_permission("Work Order", "write"):
frappe.throw(_("Not permitted"), frappe.PermissionError)
frappe.db.sql("update `tabWork Order` set status = 'Resolved', modified='%s',modified_by='%s' , skip_transfer=1"
" where name = '%s'" % (datetime.datetime.now(), frappe.session.user,docname))
frappe.msgprint(_("Work Order has been {0}").format(status))
return True
@frappe.whitelist()
@frappe.whitelist()
| [
2,
15069,
357,
66,
8,
2177,
11,
39313,
27768,
21852,
18367,
83,
13,
12052,
13,
290,
25767,
669,
198,
2,
13789,
25,
22961,
3611,
5094,
13789,
410,
18,
13,
4091,
5964,
13,
14116,
198,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
6... | 2.785714 | 406 |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="HC-05-ConfigTool",
version="0.1.1",
author="Joseph Lam",
author_email="mhlamaf@connect.ust.hk",
description="A terminal tool for configuring HC-05 with AT mode.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Doma1204/HC-05_Bluetooth_Tool",
packages=setuptools.find_packages(),
license="MIT",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Topic :: Utilities",
"Natural Language :: English",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7"
],
python_requires='>=3',
keywords="bluetooth hc-05",
install_requires=["pyserial"]
)
| [
11748,
900,
37623,
10141,
198,
198,
4480,
1280,
7203,
15675,
11682,
13,
9132,
1600,
366,
81,
4943,
355,
277,
71,
25,
198,
220,
220,
220,
890,
62,
11213,
796,
277,
71,
13,
961,
3419,
198,
198,
2617,
37623,
10141,
13,
40406,
7,
198,
... | 2.576923 | 442 |
"""Tests for backref schemas processing."""
import pytest
from open_alchemy.schemas import helpers
@pytest.mark.parametrize(
"schema, schemas, expected_backref",
[
pytest.param({}, {}, None, id="no items, allOf nor $ref"),
pytest.param(
{"$ref": "#/components/schemas/RefSchema"},
{"RefSchema": {}},
None,
id="$ref no backref",
),
pytest.param(
{"$ref": "#/components/schemas/RefSchema"},
{"RefSchema": {"x-backref": "schema"}},
"schema",
id="$ref backref",
),
pytest.param({"allOf": []}, {}, None, id="allOf empty"),
pytest.param(
{"allOf": [{"$ref": "#/components/schemas/RefSchema"}]},
{"RefSchema": {"x-backref": "schema"}},
"schema",
id="allOf single $ref",
),
pytest.param(
{"allOf": [{"x-backref": "schema"}]},
{},
"schema",
id="allOf single x-backref",
),
pytest.param({"allOf": [{}]}, {}, None, id="allOf single no backref"),
pytest.param({"allOf": [{}, {}]}, {}, None, id="allOf multiple no backref"),
pytest.param(
{"allOf": [{"$ref": "#/components/schemas/RefSchema"}, {}]},
{"RefSchema": {"x-backref": "schema"}},
"schema",
id="allOf multiple first",
),
pytest.param(
{
"allOf": [
{"$ref": "#/components/schemas/RefSchema"},
{"x-backref": "schema"},
]
},
{"RefSchema": {}},
"schema",
id="allOf multiple second",
),
pytest.param(
{
"allOf": [
{"$ref": "#/components/schemas/RefSchema"},
{"x-backref": "schema"},
]
},
{"RefSchema": {"x-backref": "schema"}},
"schema",
id="allOf multiple all",
),
pytest.param(
{"items": {"$ref": "#/components/schemas/RefSchema"}},
{"RefSchema": {"x-backref": "schema"}},
"schema",
id="items $ref backref",
),
pytest.param(
{"allOf": [{"items": {"$ref": "#/components/schemas/RefSchema"}}]},
{"RefSchema": {"x-backref": "schema"}},
"schema",
id="items allOf $ref backref",
),
],
)
@pytest.mark.schemas
def test_get(schema, schemas, expected_backref):
"""
GIVEN schema, schemas and expected backref
WHEN get is called with the schema and schemas
THEN the expected backref is returned.
"""
returned_backref = helpers.backref.get(schemas, schema)
assert returned_backref == expected_backref
@pytest.mark.parametrize(
"schema, schemas, expected_result",
[
pytest.param({}, {}, False, id="no items, allOf nor $ref"),
pytest.param(
{"$ref": "#/components/schemas/RefSchema"},
{"RefSchema": {}},
False,
id="$ref no backref",
),
pytest.param(
{"$ref": "#/components/schemas/RefSchema"},
{"RefSchema": {"x-backref": "schema"}},
True,
id="$ref backref",
),
pytest.param({"allOf": []}, {}, False, id="allOf empty"),
pytest.param(
{"allOf": [{"$ref": "#/components/schemas/RefSchema"}]},
{"RefSchema": {"x-backref": "schema"}},
True,
id="allOf single $ref",
),
pytest.param(
{"allOf": [{"x-backref": "schema"}]},
{},
True,
id="allOf single x-backref",
),
pytest.param({"allOf": [{}]}, {}, False, id="allOf single no backref"),
pytest.param({"allOf": [{}, {}]}, {}, False, id="allOf multiple no backref"),
pytest.param(
{"allOf": [{"$ref": "#/components/schemas/RefSchema"}, {}]},
{"RefSchema": {"x-backref": "schema"}},
True,
id="allOf multiple first",
),
pytest.param(
{
"allOf": [
{"$ref": "#/components/schemas/RefSchema"},
{"x-backref": "schema"},
]
},
{"RefSchema": {}},
True,
id="allOf multiple second",
),
pytest.param(
{
"allOf": [
{"$ref": "#/components/schemas/RefSchema"},
{"x-backref": "schema"},
]
},
{"RefSchema": {"x-backref": "schema"}},
True,
id="allOf multiple all",
),
pytest.param(
{"items": {"$ref": "#/components/schemas/RefSchema"}},
{"RefSchema": {"x-backref": "schema"}},
True,
id="items $ref backref",
),
pytest.param(
{"allOf": [{"items": {"$ref": "#/components/schemas/RefSchema"}}]},
{"RefSchema": {"x-backref": "schema"}},
True,
id="items allOf $ref backref",
),
],
)
@pytest.mark.schemas
def test_defined(schema, schemas, expected_result):
"""
GIVEN schema, schemas and expected result
WHEN defined is called with the schema and schemas
THEN the expected result is returned.
"""
returned_result = helpers.backref.defined(schemas, schema)
assert returned_result == expected_result
| [
37811,
51,
3558,
329,
736,
5420,
3897,
5356,
7587,
526,
15931,
198,
198,
11748,
12972,
9288,
198,
198,
6738,
1280,
62,
282,
26599,
13,
1416,
4411,
292,
1330,
49385,
628,
198,
31,
9078,
9288,
13,
4102,
13,
17143,
316,
380,
2736,
7,
1... | 1.833498 | 3,045 |
# -*- coding: utf-8 -*-
from django.db import models
from django.contrib.auth.models import User
from wechat_client import weclient
import os
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
198,
6738,
356,
17006,
62,
16366,
1330,
356,
16366,
... | 3 | 48 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.12.0
# kernelspec:
# display_name: 'Python 3.7.7 64-bit (''.venv'': poetry)'
# name: python3
# ---
# %% [markdown]
# # Interactive plots to explore line of sight / shadowing tables
#
# ## Setup
# %%
import matplotlib.pyplot as plt
import ipywidgets as ipyw
from ipywidgets import interact
from roughness import plotting as rp
from roughness import roughness as rn
from roughness import make_los_table as mlt
# Load lookups (losfrac, Num los facets, Num facets total)
lookup = rn.load_los_lookup(mlt.FLOOKUP)
# Get coord arrays and interactive plot sliders for rms, inc, az
rmss = lookup.rms.values
incs = lookup.inc.values
azs = lookup.az.values
slopes = lookup.theta.values
rms_slider = ipyw.IntSlider(20, min=rmss.min(), max=rmss.max(), step=1)
inc_slider = ipyw.IntSlider(30, min=incs.min(), max=incs.max(), step=1)
az_slider = ipyw.IntSlider(270, min=azs.min(), max=azs.max(), step=15)
# %% [markdown]
# ## Shadow table
# %%
@interact
def plot_shadow_table(rms=rms_slider, inc=inc_slider, az=az_slider):
"""Plot shadowing conditions at rms, inc, az."""
shadow_table = rn.get_shadow_table(rms, inc, az, lookup)
clabel = "P(shadowed)"
ax = rp.plot_slope_az_table(shadow_table, True, clabel)
ax.set_title("Fraction of facets shadowed in slope / az bin")
# %% [markdown]
# ## View table
# %%
@interact
def plot_shadow_table(rms=rms_slider, inc=inc_slider, az=az_slider):
"""Plot shadowing conditions at rms, inc, az."""
view_table = rn.get_view_table(rms, inc, az, lookup)
clabel = "P(visible)"
ax = rp.plot_slope_az_table(view_table, True, clabel)
ax.set_title("Fraction of facets visible in slope / az bin")
# %% [markdown]
# ## Total facets
# %%
@interact
def plot_shadow_table(rms=rms_slider, inc=inc_slider, az=az_slider):
"""Plot shadowing conditions at rms, inc, az."""
total_facet_table = rn.get_los_table(rms, inc, az, lookup, "prob")
clabel = "Total facets"
ax = rp.plot_slope_az_table(total_facet_table, True, clabel)
ax.set_title("Total facet count in slope / az bin")
# %% [markdown]
# ## Line of sight facets
# %%
@interact
def plot_shadow_table(rms=rms_slider, inc=inc_slider, az=az_slider):
"""Plot shadowing conditions at rms, inc, az."""
los_facet_table = rn.get_los_table(rms, inc, az, lookup, "los")
clabel = "LOS facets"
ax = rp.plot_slope_az_table(los_facet_table, True, clabel)
ax.set_title("Line of sight facet count in slope / az bin")
# %%
titles = [
"Fraction of facets shadowed in slope / az bin",
"Norm prob of visible facets in slope / az bin",
"Line of sight facet count in slope / az bin",
"Total facet count in slope / az bin",
]
clabels = [
"P(shadowed)",
"P(visible)/sum(visible)",
"N(lineofsight)",
"N(total)",
]
@interact
def plot_all_tables(rms=rms_slider, inc=inc_slider, az=az_slider):
"""Plot shadowing conditions at rms, inc, az."""
_, axs = plt.subplots(2, 2, figsize=(12, 10))
tables = [
rn.get_shadow_table(rms, inc, az, lookup),
rn.get_view_table(rms, inc, az, lookup),
rn.get_los_table(rms, inc, az, lookup, "los"),
rn.get_los_table(rms, inc, az, lookup, "total"),
]
for i, ax in enumerate(axs.flatten()):
cmap_r = i == 0
ax = rp.plot_slope_az_table(tables[i], cmap_r, clabels[i], ax)
ax.set_title(titles[i])
# %%
| [
2,
11420,
198,
2,
474,
929,
88,
353,
25,
198,
2,
220,
220,
474,
929,
88,
5239,
25,
198,
2,
220,
220,
220,
220,
2420,
62,
15603,
341,
25,
198,
2,
220,
220,
220,
220,
220,
220,
7552,
25,
764,
9078,
198,
2,
220,
220,
220,
220,
... | 2.36509 | 1,501 |
import os
from pickle import dump
from time import time
from keras.applications.inception_v3 import InceptionV3
from keras.models import Model
from src.lib.libic import init, set_opener
from src.lib.model_lib import feature_extractor
if __name__ == '__main__':
parameters = initialize()
total, start = process_image(parameters)
print("Total Features Extracted :", total)
print("Processing Time :", time() - start, "sec")
| [
11748,
28686,
198,
6738,
2298,
293,
1330,
10285,
198,
6738,
640,
1330,
640,
198,
6738,
41927,
292,
13,
1324,
677,
602,
13,
924,
1159,
62,
85,
18,
1330,
554,
4516,
53,
18,
198,
6738,
41927,
292,
13,
27530,
1330,
9104,
198,
198,
6738,... | 3.040268 | 149 |
# -*- coding: utf-8 -*-
"""
Written by Daniel M. Aukes and CONTRIBUTORS
Email: danaukes<at>asu.edu.
Please see LICENSE for full license.
"""
import popupcad
import numpy
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
25354,
416,
7806,
337,
13,
317,
31469,
290,
27342,
9865,
3843,
20673,
198,
15333,
25,
46078,
559,
5209,
27,
265,
29,
27345,
13,
15532,
13,
198,
5492,
766,
... | 2.714286 | 63 |
import dgl
import numpy as np
from pathlib import Path
import torch
from deepstochlog.term import Term, List
from deepstochlog.context import ContextualizedTerm, Context
from deepstochlog.dataset import ContextualizedTermDataset
root_path = Path(__file__).parent
dataset = dgl.data.CiteseerGraphDataset()
g = dataset[0]
# get node feature
documents = g.ndata['feat']
# get data split
train_ids = np.where(g.ndata['train_mask'].numpy())[0]
val_ids = np.where(g.ndata['val_mask'].numpy())[0]
test_ids = np.where(g.ndata['test_mask'].numpy())[0]
# get labels
labels = g.ndata['label'].numpy()
edges = []
pretraining_data = documents[train_ids], torch.tensor(labels[train_ids])
citations = []
for eid in range(g.num_edges()):
a, b = g.find_edges(eid)
a, b = a.numpy().tolist()[0], b.numpy().tolist()[0],
edges.append((a,b))
citations.append("cite(%d, %d)." % (a,b))
citations = "\n".join(citations)
train_dataset = CiteseerDataset(split="train", documents=documents, labels=labels)
valid_dataset = CiteseerDataset(split="valid", documents=documents, labels=labels)
test_dataset = CiteseerDataset(split="test", documents=documents, labels=labels)
queries_for_model = train_dataset.queries_for_model + valid_dataset.queries_for_model + test_dataset.queries_for_model
| [
11748,
288,
4743,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
3108,
8019,
1330,
10644,
198,
11748,
28034,
198,
6738,
2769,
301,
5374,
6404,
13,
4354,
1330,
35118,
11,
7343,
198,
6738,
2769,
301,
5374,
6404,
13,
22866,
1330,
30532,
72... | 2.568047 | 507 |
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import AgglomerativeClustering
from sklearn.datasets import make_blobs
n_samples = 200
random_state = 10
X, y = make_blobs(n_samples=n_samples, random_state=random_state)
y_predicted = AgglomerativeClustering(n_clusters=3).fit_predict(X)
plt.scatter(X[:, 0], X[:, 1], c=y_predicted)
plt.title("Agglomerative Clustering")
plt.show()
X_1, y_1 = make_blobs(n_samples=n_samples, cluster_std=[1,0.5,3.0], random_state=random_state)
y_predicted = AgglomerativeClustering(n_clusters=3).fit_predict(X_1)
plt.scatter(X_1[:, 0], X_1[:, 1], c=y_predicted)
plt.title("Agglomerative Clustering II")
plt.show()
X_1, y_1 = make_blobs(n_samples=n_samples, cluster_std=[1,0.5,3.0], random_state=random_state)
y_predicted = AgglomerativeClustering(n_clusters=3).fit_predict(X_1)
plt.scatter(X_1[:, 0], X_1[:, 1], c=y_predicted)
plt.title("Agglomerative Clustering II")
plt.show()
X_not_balanced = np.vstack((X[y == 0][:500], X[y == 1][:200], X[y == 2][:10]))
y_predicted = AgglomerativeClustering(n_clusters=3).fit_predict(X_not_balanced)
plt.scatter(X_not_balanced[:, 0], X_not_balanced[:, 1], c=y_predicted)
plt.title("Blobs having differnt number of elements")
plt.show() | [
11748,
299,
32152,
355,
45941,
201,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
201,
198,
6738,
1341,
35720,
13,
565,
5819,
1330,
19015,
75,
12057,
876,
2601,
436,
1586,
201,
198,
6738,
1341,
35720,
13,
19608,
292,
1... | 2.21466 | 573 |
import arrow
from test.base import BaseTest
from test.functional.base_functional import BaseServerRestApi
| [
11748,
15452,
198,
198,
6738,
1332,
13,
8692,
1330,
7308,
14402,
198,
6738,
1332,
13,
45124,
13,
8692,
62,
45124,
1330,
7308,
10697,
19452,
32,
14415,
628
] | 4 | 27 |
'''Models: base class definitions.'''
###############################################################################
## For reference:
## Throughout this library, we work with the tacit assumption
## that the "parameters" (i.e., values of "paras" dicts) are
## such that paras[key].ndim >= 2, even if they are in essence
## just a single scalar rather than a vector/mtx/array.
## Default general-purpose random parameter initialization function(s).
init_range = 0.05
def random_init(shape, rg, range_low=-init_range, range_high=init_range):
'''
A simple initial randomizer using uniformly generated values.
'''
return rg.uniform(low=range_low,
high=range_high,
size=shape)
## Definition of base model class.
class Model:
'''
Model objects represent collections of parametrized
functions. Each function takes some "inputs" (denoted X),
and is determined by a dictionary of "parameters" (denoted paras).
These parameters are the "state" of the Model object, and
represent a particular choice of candidate from the
hypothesis class implicitly represented by the Model object.
Handy references (property, getter/setter):
https://docs.python.org/3/library/functions.html#property
https://stackoverflow.com/a/15930977
'''
@property
def paras(self):
'''
Get the current parameter dict.
'''
return self._paras
@paras.setter
def paras(self, paras_new):
'''
Set new parameters.
Can do the entire dictionary all at once,
or one can do it one element at a time,
e.g., something like
>> model.paras["key"] = value
can be done as desired.
'''
self._paras = paras_new
def __str__(self):
'''
For printing out the relevant model name.
'''
out = "Model name: {}".format(self.name)
return out
def __call__(self, X=None):
'''
Lets us compute model outputs as model(X).
'''
return self.func(paras=self._paras, X=X)
def func(self, paras=None, X=None):
'''
Execute the model on given inputs.
(implemented in child classes)
'''
raise NotImplementedError
def grad(self, paras=None, X=None):
'''
When applicable, compute the gradient with
respect to the relevant parameters.
(implemented in child classes)
'''
raise NotImplementedError
def hess(self, paras=None, X=None):
'''
When applicable, compute the Hessian with
respect to the relevant parameters.
(implemented in child classes)
'''
raise NotImplementedError
###############################################################################
| [
7061,
6,
5841,
1424,
25,
2779,
1398,
17336,
2637,
7061,
628,
198,
29113,
29113,
7804,
4242,
21017,
628,
198,
2235,
1114,
4941,
25,
198,
2235,
24581,
428,
5888,
11,
356,
670,
351,
262,
40787,
13196,
198,
2235,
326,
262,
366,
17143,
730... | 2.604714 | 1,103 |
#/usr/bin/python3
import os
| [
2,
14,
14629,
14,
8800,
14,
29412,
18,
198,
11748,
28686,
198
] | 2.333333 | 12 |
# This code fills superpixels by scribbling over the image with a given labeled color.
# It requires all jpg faces storaged in the same folder and the .dat super-pixels in the same LFW format.
# R. Redondo, Eurecat 2019 (c).
import numpy as np
import operator
import cv2
import os
import sys
resize = 3
pointer = (-1,-1)
super_scribbles = []
isDrawing = False
radius = 10
category = 1
label_colors = [
( 0, 0, 0),
( 0,255, 0),
( 0, 0,255),
(255,255, 0),
(255, 0, 0),
(255, 0,255)]
label_names = [
"eraser",
"skin",
"hair",
"beard-mustache",
"sunglasses",
"wearable"]
# ---------------------------------------------------------------------------------------
if len(sys.argv) != 4:
print("Usage: $ elfw-scribbleMe <faces_folder> <superpixels_folder> <output_folder>")
exit(0)
faces_folder = sys.argv[1]
sp_folder = sys.argv[2]
output_folder = sys.argv[3]
if not os.path.exists(output_folder):
os.mkdir(output_folder)
# faces_folder = '../Datasets/lfw-deepfunneled/'
# sp_folder = '../Datasets/lfw-deepfunneled-sp/'
# output_folder = '../Datasets/lfw-deepfunneled-sp-overlay/'
for face_file in sorted(os.listdir(faces_folder)):
if not face_file.endswith(".jpg"):
continue
file_name = os.path.splitext(face_file)[0]
super_scribbles_file = os.path.join(output_folder, file_name + '.png')
if os.path.exists(super_scribbles_file):
continue
face = cv2.imread(os.path.join(faces_folder, face_file))
person_name = file_name[:-5]
sp_file = os.path.join(os.path.join(sp_folder, person_name), file_name + '.dat')
if not os.path.exists( sp_file ):
print('\033[1m' + 'Superpixels not found in ' + sp_file + '\033[0m')
exit(0)
print('Editing ' + '\033[1m' + file_name + '\033[0m' + "...")
# Superpixels: watch out, SP do not have univoque numbering
sp = np.fromfile(sp_file, dtype=int, count=-1, sep=' ')
sp = np.array(sp, dtype=np.uint8)
sp = np.reshape(sp, (250, -1))
h, w = sp.shape
# Superpixels bounds
bounds = np.zeros(sp.shape)
for y in range(0, h):
for x in range(0, w):
if y > 0:
if sp[y, x] != sp[y-1, x ]:
bounds[y,x] = 255;
continue
if y < h-1:
if sp[y, x] != sp[y+1, x ]:
bounds[y,x] = 255;
continue
if y < h-1 and x > 0:
if sp[y, x] != sp[y+1, x-1]:
bounds[y,x] = 255;
continue
if y < h-1 and x < w-1:
if sp[y, x] != sp[y+1, x+1]:
bounds[y,x] = 255;
continue
if y > 0 and x > 0:
if sp[y, x] != sp[y-1, x-1]:
bounds[y,x] = 255;
continue
if y > 0 and x < w-1:
if sp[y, x] != sp[y-1, x+1]:
bounds[y,x] = 255;
continue
if x > 0:
if sp[y, x] != sp[y , x-1]:
bounds[y,x] = 255;
continue
if x < w-1:
if sp[y, x] != sp[y , x+1]:
bounds[y,x] = 255;
continue
# Erode
kernel = np.ones((2,2),np.uint8)
bounds = cv2.erode(bounds, kernel, iterations = 1)
# Boundaries visualization
b,g,r = cv2.split(face)
r[bounds > 0] = r[bounds > 0] * 0.2 + 255 * 0.8;
bounds = cv2.merge((b,g,r))
## SP re-indexing: there could be several superpixels for each SP index label
index = 0
sp_reindex = np.zeros(sp.shape, dtype='uint32')
for s in range(0,np.amax(sp)+1):
mask = np.zeros(sp.shape, dtype='uint8')
mask[sp == s] = 255
_, components = cv2.connectedComponents(mask, connectivity=4)
if np.amax(components):
for c in range(1,np.amax(components)+1):
index = index + 1
sp_reindex[components == c] = index
# Scribbles
scribbles = np.zeros(face.shape)
super_scribbles = scribbles.copy()
face_canvas = face.copy()
# Mouse events callback
cv2.namedWindow(file_name)
cv2.setMouseCallback(file_name, onClick)
# Defaults
radius = 2
category = 1
while True:
# Key handlers
k = cv2.waitKey(1) & 0xFF
if k >= 48 and k <= 53:
category = k - 48
elif k == ord('e'):
category = 0
elif k == ord('q'):
radius = min(radius + 2, 16)
elif k == ord('a'):
radius = max(radius - 2, 2)
elif k == 32:
if radius < 10:
radius = 16
else:
radius = 2
elif k == 13:
break
elif k == 27:
exit(0)
# Compositing
alpha = 0.12
face_canvas = face.copy()
face_canvas[super_scribbles != 0] = face_canvas[super_scribbles != 0] * alpha + super_scribbles[super_scribbles != 0] * (1-alpha)
alpha = 0.12
bounds_canvas = bounds.copy()
bounds_canvas[scribbles != 0] = bounds_canvas[scribbles != 0] * alpha + scribbles[scribbles != 0] * (1-alpha)
alpha = 0.5
overlay = bounds_canvas.copy()
cv2.circle(overlay, pointer, radius, label_colors[category], -1)
bounds_canvas = cv2.addWeighted(bounds_canvas, alpha, overlay, 1 - alpha, 0)
vis = np.concatenate((bounds_canvas, face_canvas), axis=1)
vis = cv2.resize(vis, (vis.shape[1] * resize, vis.shape[0] * resize), cv2.INTER_NEAREST)
# Info
font_size = 0.6
font_thickness = 2
hstep = 25
info = "Label (0-5,e): "
cv2.putText(vis, info, (10, hstep * 1), cv2.FONT_HERSHEY_SIMPLEX, font_size, (255,255,255))
info = " " + label_names[category]
cv2.putText(vis, info, (10, hstep * 1), cv2.FONT_HERSHEY_SIMPLEX, font_size, label_colors[category], font_thickness)
info = "Stroke (q-a,space): " + str(radius)
cv2.putText(vis, info, (10, hstep * 2), cv2.FONT_HERSHEY_SIMPLEX, font_size, (255,255,255))
info = "Save and give me more (enter)"
cv2.putText(vis, info, (10, hstep * 3), cv2.FONT_HERSHEY_SIMPLEX, font_size, (255,255,255))
info = "Exit (esc)"
cv2.putText(vis, info, (10, hstep * 4), cv2.FONT_HERSHEY_SIMPLEX, font_size, (255,255,255))
cv2.imshow(file_name, vis)
cv2.destroyWindow(file_name)
# Save output
cv2.imwrite(super_scribbles_file, super_scribbles)
print("Labels saved in " + super_scribbles_file)
cv2.destroyAllWindows() | [
2,
770,
2438,
23816,
2208,
79,
14810,
416,
44661,
11108,
625,
262,
2939,
351,
257,
1813,
15494,
3124,
13,
198,
2,
632,
4433,
477,
474,
6024,
6698,
336,
273,
1886,
287,
262,
976,
9483,
290,
262,
764,
19608,
2208,
12,
79,
14810,
287,
... | 2.185922 | 2,614 |
import numpy as np
from benchmark.analyzer.analyzer import Analyzer
from pymoo.indicators.igd import IGD
from pymoo.util.misc import from_dict
| [
11748,
299,
32152,
355,
45941,
198,
198,
6738,
18335,
13,
38200,
9107,
13,
38200,
9107,
1330,
16213,
9107,
198,
6738,
279,
4948,
2238,
13,
521,
44549,
13,
328,
67,
1330,
35336,
35,
198,
6738,
279,
4948,
2238,
13,
22602,
13,
44374,
133... | 3.104167 | 48 |
import datetime
from .base import BaseReader
| [
11748,
4818,
8079,
198,
198,
6738,
764,
8692,
1330,
7308,
33634,
628
] | 3.916667 | 12 |
import json
import os
import constants
import accounts
| [
11748,
33918,
198,
11748,
28686,
198,
11748,
38491,
198,
11748,
5504,
628
] | 4.666667 | 12 |
from __future__ import print_function, division
import numpy as np
from numpy import diff, concatenate
import gc
from .goodsectionsresults import GoodSectionsResults
from ..timeframe import TimeFrame
from ..utils import timedelta64_to_secs
from ..node import Node
from ..timeframe import list_of_timeframes_from_list_of_dicts, timeframe_from_dict
class GoodSections(Node):
"""Locate sections of data where the sample period is <= max_sample_period.
Attributes
----------
previous_chunk_ended_with_open_ended_good_section : bool
"""
requirements = {'device': {'max_sample_period': 'ANY VALUE'}}
postconditions = {'statistics': {'good_sections': []}}
results_class = GoodSectionsResults
def _process_chunk(self, df, metadata):
"""
Parameters
----------
df : pd.DataFrame
with attributes:
- look_ahead : pd.DataFrame
- timeframe : nilmtk.TimeFrame
metadata : dict
with ['device']['max_sample_period'] attribute
Returns
-------
None
Notes
-----
Updates `self.results`
Each good section in `df` is marked with a TimeFrame.
If this df ends with an open-ended good section (assessed by
examining df.look_ahead) then the last TimeFrame will have
`end=None`. If this df starts with an open-ended good section
then the first TimeFrame will have `start=None`.
"""
# Retrieve relevant metadata
max_sample_period = metadata['device']['max_sample_period']
look_ahead = getattr(df, 'look_ahead', None)
timeframe = df.timeframe
# Process dataframe
good_sections = get_good_sections(
df, max_sample_period, look_ahead,
self.previous_chunk_ended_with_open_ended_good_section)
# Set self.previous_chunk_ended_with_open_ended_good_section
if good_sections:
self.previous_chunk_ended_with_open_ended_good_section = (
good_sections[-1].end is None)
# Update self.results
self.results.append(timeframe, {'sections': [good_sections]})
def get_good_sections(df, max_sample_period, look_ahead=None,
previous_chunk_ended_with_open_ended_good_section=False):
"""
Parameters
----------
df : pd.DataFrame
look_ahead : pd.DataFrame
max_sample_period : number
Returns
-------
sections : list of TimeFrame objects
Each good section in `df` is marked with a TimeFrame.
If this df ends with an open-ended good section (assessed by
examining `look_ahead`) then the last TimeFrame will have
`end=None`. If this df starts with an open-ended good section
then the first TimeFrame will have `start=None`.
"""
index = df.dropna().sort_index().index
del df
if len(index) < 2:
return []
timedeltas_sec = timedelta64_to_secs(diff(index.values))
timedeltas_check = timedeltas_sec <= max_sample_period
# Memory management
del timedeltas_sec
gc.collect()
timedeltas_check = concatenate(
[[previous_chunk_ended_with_open_ended_good_section],
timedeltas_check])
transitions = diff(timedeltas_check.astype(np.int))
# Memory management
last_timedeltas_check = timedeltas_check[-1]
del timedeltas_check
gc.collect()
good_sect_starts = list(index[:-1][transitions == 1])
good_sect_ends = list(index[:-1][transitions == -1])
# Memory management
last_index = index[-1]
del index
gc.collect()
# Use look_ahead to see if we need to append a
# good sect start or good sect end.
look_ahead_valid = look_ahead is not None and not look_ahead.empty
if look_ahead_valid:
look_ahead_timedelta = look_ahead.dropna().index[0] - last_index
look_ahead_gap = look_ahead_timedelta.total_seconds()
if last_timedeltas_check: # current chunk ends with a good section
if not look_ahead_valid or look_ahead_gap > max_sample_period:
# current chunk ends with a good section which needs to
# be closed because next chunk either does not exist
# or starts with a sample which is more than max_sample_period
# away from df.index[-1]
good_sect_ends += [last_index]
elif look_ahead_valid and look_ahead_gap <= max_sample_period:
# Current chunk appears to end with a bad section
# but last sample is the start of a good section
good_sect_starts += [last_index]
# Work out if this chunk ends with an open ended good section
if len(good_sect_ends) == 0:
ends_with_open_ended_good_section = (
len(good_sect_starts) > 0 or
previous_chunk_ended_with_open_ended_good_section)
elif len(good_sect_starts) > 0:
# We have good_sect_ends and good_sect_starts
ends_with_open_ended_good_section = (
good_sect_ends[-1] < good_sect_starts[-1])
else:
# We have good_sect_ends but no good_sect_starts
ends_with_open_ended_good_section = False
# If this chunk starts or ends with an open-ended
# good section then the relevant TimeFrame needs to have
# a None as the start or end.
if previous_chunk_ended_with_open_ended_good_section:
good_sect_starts = [None] + good_sect_starts
if ends_with_open_ended_good_section:
good_sect_ends += [None]
assert len(good_sect_starts) == len(good_sect_ends)
sections = [TimeFrame(start, end)
for start, end in zip(good_sect_starts, good_sect_ends)
if not (start == end and start is not None)]
# Memory management
del good_sect_starts
del good_sect_ends
gc.collect()
return sections
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
11,
7297,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
299,
32152,
1330,
814,
11,
1673,
36686,
378,
198,
11748,
308,
66,
198,
6738,
764,
11274,
23946,
43420,
1330,
4599,
50,
478,
507,
2... | 2.479695 | 2,364 |
from core import colors
version = "pre-1.0"
apiversion = "pre-1.0"
update_date = "2021-07-11"
codename = "phoenix"
about = ("Hakku Framework "+version+" "+codename+
"\nauthor: Noa-Emil Nissinen (4shadoww)"
"\nemail: 4shadoww0@gmail.com"
"\ngithub: 4shadoww")
| [
6738,
4755,
1330,
7577,
198,
198,
9641,
796,
366,
3866,
12,
16,
13,
15,
1,
198,
499,
1191,
295,
796,
366,
3866,
12,
16,
13,
15,
1,
198,
19119,
62,
4475,
796,
366,
1238,
2481,
12,
2998,
12,
1157,
1,
198,
19815,
12453,
796,
366,
... | 2.372727 | 110 |
# -*- coding:utf-8 -*-
import pandas as pd
import numpy as np
import math as math
import os
import time
from sklearn.svm import SVC
from models.cpuutils import *
from sklearn.externals import joblib
import tensorflow as tf
import time
| [
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
10688,
355,
10688,
198,
11748,
28686,
198,
11748,
640,
198,
6738,
1341,
35720,
13,
82,
14761,... | 3.105263 | 76 |
# -*- coding: utf-8 -*-
# standard library
from enum import Enum
from typing import Dict, Optional
# scip plugin
from eureka.client.app_info.lease_info import LeaseInfo
from eureka.utils.timestamp import current_timestamp
__author__ = "Haribo (haribo1558599@gmail.com)"
__license__ = "Apache 2.0"
class InstanceInfo:
"""
The class that holds information required for registration with
Eureka Server and to be discovered by other components.
See com.netflix.appinfo.InstanceInfo.
"""
__slots__ = (
"_instance_id",
"_app_name",
"_app_group_name",
"_ip_address",
"_vip_address",
"_secure_vip_address",
"_lease_info",
"_metadata",
"_last_updated_timestamp",
"_last_dirty_timestamp",
"_action_type",
"_host_name",
"_is_coordinating_discovery_server",
"_is_secure_port_enabled",
"_is_unsecure_port_enabled",
"_port",
"_secure_port",
"_status",
"_overridden_status",
"_is_instance_info_dirty",
)
DEFAULT_PORT = 7001
DEFAULT_SECURE_PORT = 7002
class ActionType(Enum):
"""
Eureka server will set the action type on the instance to let
Eureka client know what action to perform on this instance in
its local registry.
"""
ADD = "ADD"
MODIFIED = "MODIFIED"
DELETED = "DELETED"
def __init__(
self,
instance_id: str,
app_name: str,
ip_address: str,
vip_address: str,
secure_vip_address: str,
lease_info: LeaseInfo,
host_name: str,
app_group_name: str = None,
metadata: Dict[str, str] = None,
last_updated_timestamp: int = None,
last_dirty_timestamp: int = None,
action_type: ActionType = None,
is_coordinating_discovery_server: bool = False,
is_secure_port_enabled: bool = False,
is_unsecure_port_enabled: bool = True,
port: int = DEFAULT_PORT,
secure_port: int = DEFAULT_SECURE_PORT,
status: Status = Status.UP,
overridden_status: Status = Status.UNKNOWN,
is_instance_info_dirty: bool = False,
):
"""
Args:
instance_id: the unique id of the instance.
app_name: the application name of the instance.This is mostly used in querying of instances.
ip_address: the ip address, in AWS scenario it is a private IP.
vip_address: the Virtual Internet Protocol address for this instance. Defaults to hostname if not specified.
secure_vip_address: the Secure Virtual Internet Protocol address for this instance. Defaults to hostname if not specified.
lease_info: the lease information regarding when it expires.
host_name: the default network address to connect to this instance. Typically this would be the fully qualified public hostname.
metadata: all application specific metadata set on the instance.
last_updated_timestamp: last time when the instance was updated.
last_dirty_timestamp: the last time when this instance was touched.
port: the unsecure port number that is used for servicing requests.
secure_port: the secure port that is used for servicing requests.
status: the status indicating whether the instance can handle requests.
overridden_status:the status indicating whether an external process has changed the status.
"""
self._instance_id = instance_id
self._app_name = app_name
self._app_group_name = app_group_name
self._ip_address = ip_address
self._vip_address = vip_address
self._secure_vip_address = secure_vip_address
self._lease_info = lease_info
self._metadata = metadata
self._last_updated_timestamp = last_updated_timestamp
self._last_dirty_timestamp = last_dirty_timestamp
self._action_type = action_type
self._host_name = host_name
self._is_coordinating_discovery_server = is_coordinating_discovery_server
self._is_secure_port_enabled = is_secure_port_enabled
self._is_unsecure_port_enabled = is_unsecure_port_enabled
self._port = port
self._secure_port = secure_port
self._status = status
self._overridden_status = overridden_status
self._is_instance_info_dirty = is_instance_info_dirty
@property
@instance_id.setter
@property
@app_name.setter
@property
@app_group_name.setter
@property
@ip_address.setter
@property
@vip_address.setter
@property
@secure_vip_address.setter
@property
@lease_info.setter
@property
@metadata.setter
@property
@last_updated_timestamp.setter
@property
@last_dirty_timestamp.setter
@property
@port.setter
@property
@secure_port.setter
@property
@action_type.setter
@property
@host_name.setter
@property
@is_secure_port_enabled.setter
@property
@is_unsecure_port_enabled.setter
@property
@status.setter
@property
@overridden_status.setter
@property
@is_instance_info_dirty.setter
@property
@is_coordinating_discovery_server.setter
def is_port_enabled(self, port_type: PortType) -> bool:
"""
Checks whether a port is enabled for traffic or not.
Args:
port_type: indicates whether it is secure or unsecure port.
Returns: true if the port is enabled, false otherwise.
"""
return {
InstanceInfo.PortType.UNSECURE: self._is_unsecure_port_enabled,
InstanceInfo.PortType.SECURE: self._is_secure_port_enabled,
}.get(port_type, False)
def is_dirty(self) -> bool:
"""
Return whether any state changed so that EurekaClient can
check whether to retransmit info or not on the next heartbeat.
Returns: true if the instance is dirty, false otherwise.
"""
return self._is_instance_info_dirty
def set_is_dirty(self):
"""
Set the dirty flag so that the instance information can be carried to
the eureka server on the next heartbeat.
"""
self._is_instance_info_dirty = True
self._last_dirty_timestamp = current_timestamp()
def set_is_dirty_with_time(self) -> int:
"""
Set the dirty flag, and also return the timestamp of the is_dirty event.
Returns: the timestamp when the isDirty flag is set.
"""
self.set_is_dirty()
return self._last_dirty_timestamp
def unset_is_dirty(self, unset_dirty_timestamp: int):
"""
Unset the dirty flag iff the unset_dirty_timestamp matches the last_dirty_timestamp. No-op if
last_dirty_timestamp > unset_dirty_timestamp
Args:
unset_dirty_timestamp: the expected last_dirty_timestamp to unset.
"""
if self._last_dirty_timestamp <= unset_dirty_timestamp:
self._is_instance_info_dirty = False
def set_is_coordinating_discovery_server(self):
"""
Set the flag if this instance is the same as the eureka discovery server that is
return the instances. This flag is used by the discovery clients to
identify the discovery server which is coordinating/returning the
information.
"""
self._is_coordinating_discovery_server = True
def set_status(self, status: Status) -> Optional[Status]:
"""
Set the status for this instance.
Args:
status: status to be set for this instance.
Returns: the previous status if a different status from the current was set, none otherwise.
"""
if self._status != status:
previous_status = self._status
self._status = status
self.set_is_dirty()
return previous_status
return None
def set_status_without_dirty(self, status: Status):
"""
Set the status for this instance without updating the dirty timestamp.
Args:
status: status to be set for this instance.
"""
if self._status != status:
self._status = status
def set_overridden_status(self, status: Status):
"""
Set the overridden status for this instance. Normally set by an external
process to disable instance from taking traffic.
Args:
status: overridden status to be for this instance.
"""
if self._overridden_status != status:
self._overridden_status = status
def register_runtime_metadata(self, metadata: Dict[str, str]):
"""
Register application specific metadata to be sent to the eureka
server.
Args:
metadata: Dictionary containing key/value pairs.
"""
self._metadata.update(metadata)
self.set_is_dirty()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
3210,
5888,
198,
6738,
33829,
1330,
2039,
388,
198,
6738,
19720,
1330,
360,
713,
11,
32233,
198,
198,
2,
629,
541,
13877,
198,
6738,
304,
495,
4914,
13,
16366,... | 2.46547 | 3,678 |
import functools
import torch.nn as nn
import torch
import numpy as np
from edflow.util import retrieve
_norm_options = {
"in": nn.InstanceNorm2d,
"bn": nn.BatchNorm2d,
"an": ActNorm}
| [
11748,
1257,
310,
10141,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
1225,
11125,
13,
22602,
1330,
19818,
628,
198,
198,
62,
27237,
62,
25811,
796,
1391,
198,
220,
220,
... | 2.291667 | 96 |
# -*- coding: utf-8 -*-
"""
Created on Sun May 1 15:02:38 2016
@author: eman
"""
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from sklearn.metrics import accuracy_score, precision_score, f1_score, \
fbeta_score, recall_score
from skll.metrics import kappa
import numpy as np
import pandas as pd
# this is my classification experiment function which basically delegates
# which classification method I want to use on the data.
# my simple naive LDA function to classify my data. Since I have
# multiple datasets, I loop through each dataset in my list and
# and perform classificaiton on that
#---------
# LDA Prediction
#---------------
def lda_pred(Xtrain, Xtest, Ytrain, Ytest):
""" Simple Naive Implementation of the the LDA
"""
# empty list for the predictions
Ypred = []
# loop through and perform classification
for xtrain, xtest, ytrain, ytest in zip(Xtrain,Xtest,
Ytrain, Ytest):
# initialize the model
lda_model = LDA()
# fit the model to the training data
lda_model.fit(xtrain, ytrain.ravel())
# save the results of the model predicting the testing data
Ypred.append(lda_model.predict(xtest))
# return this list
return Ypred
# the same function as before except with list comprehension
# (trying to practice that pythonic-ism a bit)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
3825,
1737,
220,
352,
1315,
25,
2999,
25,
2548,
1584,
198,
198,
31,
9800,
25,
31184,
198,
37811,
198,
6738,
1341,
35720,
13,
15410,
3036,
42483,
... | 2.505691 | 615 |
# Generated by Django 3.2.7 on 2021-10-31 22:24
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
17,
13,
22,
319,
33448,
12,
940,
12,
3132,
2534,
25,
1731,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
import numpy as np
from time import time
| [
11748,
299,
32152,
355,
45941,
198,
6738,
640,
1330,
640,
628
] | 3.818182 | 11 |
"""
`dumpdbfields` demonstrates how to enumerate tables and records.
"""
import os
import os.path
from Database import Database
if __name__ == "__main__":
main()
| [
37811,
198,
63,
39455,
9945,
25747,
63,
15687,
703,
284,
27056,
378,
8893,
290,
4406,
13,
198,
37811,
198,
11748,
28686,
198,
11748,
28686,
13,
6978,
198,
6738,
24047,
1330,
24047,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
... | 3.313725 | 51 |
"""Support for winrm commands to turn a switch on/off."""
import logging
import winrm
import voluptuous as vol
from homeassistant.components.switch import (
ENTITY_ID_FORMAT,
PLATFORM_SCHEMA,
SwitchDevice,
)
from homeassistant.const import (
CONF_COMMAND_OFF,
CONF_COMMAND_ON,
CONF_COMMAND_STATE,
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_FRIENDLY_NAME,
CONF_SWITCHES,
CONF_USERNAME,
CONF_VALUE_TEMPLATE,
)
from .const import DOMAIN
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
SWITCH_SCHEMA = vol.Schema(
{
vol.Optional(CONF_COMMAND_OFF, default="true"): cv.string,
vol.Optional(CONF_COMMAND_ON, default="true"): cv.string,
vol.Optional(CONF_COMMAND_STATE): cv.string,
vol.Optional(CONF_FRIENDLY_NAME): cv.string,
vol.Optional(CONF_HOST): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_USERNAME): cv.string,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_SWITCHES): cv.schema_with_slug_keys(SWITCH_SCHEMA)}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Find and return switches controlled by shell commands."""
devices = config.get(CONF_SWITCHES, {})
switches = []
for object_id, device_config in devices.items():
value_template = device_config.get(CONF_VALUE_TEMPLATE)
if value_template is not None:
value_template.hass = hass
switches.append(
CommandSwitch(
hass,
object_id,
device_config.get(CONF_FRIENDLY_NAME, object_id),
device_config.get(CONF_COMMAND_ON),
device_config.get(CONF_COMMAND_OFF),
device_config.get(CONF_COMMAND_STATE),
device_config.get(CONF_HOST),
device_config.get(CONF_PASSWORD),
device_config.get(CONF_USERNAME),
value_template,
)
)
if not switches:
_LOGGER.error("No switches added")
return False
add_entities(switches)
class CommandSwitch(SwitchDevice):
"""Representation a switch that can be toggled using shell commands."""
def __init__(
self,
hass,
object_id,
friendly_name,
command_on,
command_off,
command_state,
host,
password,
username,
value_template,
):
"""Initialize the switch."""
self._hass = hass
self.entity_id = ENTITY_ID_FORMAT.format(object_id)
self._name = friendly_name
self._state = False
self._command_on = command_on
self._command_off = command_off
self._command_state = command_state
self._host = host
self._password = password
self._username = username
self._value_template = value_template
@staticmethod
def _switch(command, host, password, username):
"""Execute the actual commands."""
_LOGGER.info("Running command: %s on host: %s" % (command, host))
try:
session = winrm.Session(host, auth=(username, password))
response_object = session.run_ps(command)
success = response_object.status_code == 0
except Exception as e:
_LOGGER.error("Command failed: %s on host: %s. %s" % (command, host, e))
_LOGGER.error(" %s" % (e))
if not success:
_LOGGER.error("Command failed: %s on host: %s" % (command, host))
return success
@staticmethod
def _query_state_value(command, host, password, username):
"""Execute state command for return value."""
_LOGGER.info("Running state command: %s on host: %s" % (command, host))
try:
session = winrm.Session(host, auth=(username, password))
response_object = session.run_ps(command)
std_out = response_object.std_out.strip().decode("utf-8")
std_err = response_object.std_err.strip().decode("utf-8")
status_code = response_object.status_code
return std_out
except Exception as e:
_LOGGER.error("State command failed: %s on host: %s. %s" % (command, host, e))
@staticmethod
def _query_state_code(command, host, password, username):
"""Execute state command for return code."""
_LOGGER.info("Running state command: %s on host: %s" % (command, host))
try:
session = winrm.Session(host, auth=(username, password))
response_object = session.run_ps(command)
status_code = response_object.status_code
return status_code
except Exception as e:
_LOGGER.error("State command failed: %s on host: %s. %s" % (command, host, e))
@property
def should_poll(self):
"""Only poll if we have state command."""
return self._command_state is not None
@property
def name(self):
"""Return the name of the switch."""
return self._name
@property
def is_on(self):
"""Return true if device is on."""
return self._state
@property
def assumed_state(self):
"""Return true if we do optimistic updates."""
return self._command_state is None
def _query_state(self):
"""Query for state."""
if not self._command_state:
_LOGGER.error("No state command specified")
return
if self._value_template:
return CommandSwitch._query_state_value(
self._command_state, self._host, self._password, self._username,
)
return CommandSwitch._query_state_code(
self._command_state, self._host, self._password, self._username,
)
def update(self):
"""Update device state."""
if self._command_state:
payload = str(self._query_state())
if self._value_template:
payload = self._value_template.render_with_possible_json_value(payload)
self._state = payload.lower() == "true"
def turn_on(self, **kwargs):
"""Turn the device on."""
if (
CommandSwitch._switch(
self._command_on, self._host, self._password, self._username
)
and not self._command_state
):
self._state = True
self.schedule_update_ha_state()
def turn_off(self, **kwargs):
"""Turn the device off."""
if (
CommandSwitch._switch(
self._command_off, self._host, self._password, self._username
)
and not self._command_state
):
self._state = False
self.schedule_update_ha_state()
| [
37811,
15514,
329,
1592,
26224,
9729,
284,
1210,
257,
5078,
319,
14,
2364,
526,
15931,
198,
11748,
18931,
198,
198,
11748,
1592,
26224,
198,
198,
11748,
2322,
37623,
5623,
355,
2322,
198,
198,
6738,
1363,
562,
10167,
13,
5589,
3906,
13,... | 2.201214 | 3,131 |
import toolz as tz
from call_map.core import UserScopeSettings, ScopeSettings, OrganizerNode
from call_map.jedi_dump import make_scope_settings
from call_map import project_settings_module
from call_map.project_settings_module import Project
from pathlib import Path
from sys import path as runtime_sys_path
test_modules_dir = Path(__file__).parent.joinpath('test_modules')
user_scope_settings = UserScopeSettings(
module_names=[],
file_names=test_modules_dir.glob('*.py'),
include_runtime_sys_path=True,
add_to_sys_path=([str(test_modules_dir)] + runtime_sys_path),
)
scope_settings = make_scope_settings(is_new_project=True,
saved_scope_settings=ScopeSettings([], [], []),
user_scope_settings=user_scope_settings) # type: ScopeSettings
project = Project(None)
project.settings.update(
{project_settings_module.modules: scope_settings.module_names,
project_settings_module.scripts: scope_settings.scripts,
project_settings_module.sys_path: scope_settings.effective_sys_path})
project.make_platform_specific_nodes('python')
root_node = OrganizerNode('Root', [],
list(tz.concatv(project.module_nodes['python'].values(),
project.script_nodes['python'].values())))
| [
11748,
2891,
89,
355,
256,
89,
198,
198,
6738,
869,
62,
8899,
13,
7295,
1330,
11787,
43642,
26232,
11,
41063,
26232,
11,
7221,
7509,
19667,
198,
6738,
869,
62,
8899,
13,
73,
13740,
62,
39455,
1330,
787,
62,
29982,
62,
33692,
198,
67... | 2.543561 | 528 |
# Copyright 2020 Aeris Communications Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import unittest
| [
2,
15069,
12131,
15781,
271,
14620,
3457,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
... | 3.875 | 160 |
# Exercício 031 - Custo da Viagem
km = float(input('Qual é a distância da sua viagem? '))
if km <= 200:
preco = 0.50 * km
else:
preco = 0.45 * km
print(f'Você está prestes a começar uma viagem de {km:.1f}Km.')
print(f'E o preço da sua passagem será de R${preco:.2f}')
| [
2,
1475,
2798,
8836,
66,
952,
657,
3132,
532,
40619,
78,
12379,
16049,
363,
368,
198,
198,
13276,
796,
12178,
7,
15414,
10786,
46181,
38251,
257,
1233,
22940,
10782,
544,
12379,
424,
64,
25357,
363,
368,
30,
705,
4008,
198,
198,
361,
... | 2.036496 | 137 |
# File: ds_abstract_service.py
#
# Licensed under Apache 2.0 (https://www.apache.org/licenses/LICENSE-2.0.txt)
#
from abc import ABCMeta
from ..httplib2 import Http, ProxyInfo, socks, proxy_info_from_environment
from dsapi.config.ds_proxy_config import DSProxyConfig
class DSAbstractService(object, metaclass=ABCMeta):
"""
Abstract Service that provides http methods to implementing services.
Proxy Settings - By default this class will use proxy settings from the environment.
For more control, pass a DSProxyConfig object as the keyword argument 'proxy' to
this class. The keyword argument will take precedence.
"""
def _prepare_proxy(self, ds_proxy_config):
"""
Transform a DSProxyConfig object to httplib ProxyInfo object
:type ds_proxy_config: DSProxyConfig
:return: ProxyInfo
"""
proxy_type_map = {
DSProxyConfig.Type.HTTP: socks.PROXY_TYPE_HTTP,
DSProxyConfig.Type.HTTP_NO_TUNNEL: socks.PROXY_TYPE_HTTP_NO_TUNNEL,
DSProxyConfig.Type.SOCKS4: socks.PROXY_TYPE_SOCKS4,
DSProxyConfig.Type.SOCKS5: socks.PROXY_TYPE_SOCKS5
}
return ProxyInfo(
proxy_type=proxy_type_map[ds_proxy_config.proxy_type],
proxy_host=ds_proxy_config.proxy_host,
proxy_port=ds_proxy_config.proxy_port,
proxy_rdns=ds_proxy_config.proxy_reverse_dns,
proxy_user=ds_proxy_config.proxy_user,
proxy_pass=ds_proxy_config.proxy_pass
)
| [
2,
9220,
25,
288,
82,
62,
397,
8709,
62,
15271,
13,
9078,
198,
2,
198,
2,
49962,
739,
24843,
362,
13,
15,
357,
5450,
1378,
2503,
13,
43073,
13,
2398,
14,
677,
4541,
14,
43,
2149,
24290,
12,
17,
13,
15,
13,
14116,
8,
198,
2,
... | 2.404088 | 636 |
import re
texto = 'Testando, grupos especias!'
texto2 = 'supermercado superacao hiperMERCADO'
# lookahead
print(re.findall(r'\w+(?=,|!)', texto))
# lookbehind
# positive
print(re.findall(r'(?<=super)\w+', texto2))
# negative
print(re.findall(r'(?<!super)mercado', texto2, re.IGNORECASE))
| [
11748,
302,
198,
198,
5239,
78,
796,
705,
14402,
25440,
11,
22848,
1930,
1658,
431,
979,
292,
13679,
198,
198,
5239,
78,
17,
796,
705,
16668,
647,
66,
4533,
2208,
330,
5488,
289,
9346,
29296,
34,
2885,
46,
6,
198,
198,
2,
804,
382... | 2.212121 | 132 |
import pytest
import os
SERVICE_CONFIG_NAME = "service_manifest.yml"
TEMP_SERVICE_CONFIG_PATH = os.path.join("/tmp", SERVICE_CONFIG_NAME)
@pytest.fixture
@pytest.fixture
@pytest.fixture
| [
11748,
12972,
9288,
198,
11748,
28686,
198,
198,
35009,
27389,
62,
10943,
16254,
62,
20608,
796,
366,
15271,
62,
805,
8409,
13,
88,
4029,
1,
198,
51,
39494,
62,
35009,
27389,
62,
10943,
16254,
62,
34219,
796,
28686,
13,
6978,
13,
2217... | 2.457831 | 83 |
import os
import env
import tweepy
from tweepy import OAuthHandler
### Twitter Authentication
CONSUMER_KEY = os.environ.get("CONSUMER_KEY")
CONSUMER_SECRET = os.environ.get("CONSUMER_SECRET")
OAUTH_TOKEN = os.environ.get("OAUTH_TOKEN")
OAUTH_TOKEN_SECRET = os.environ.get("OAUTH_TOKEN_SECRET")
auth = OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
twitter_api = tweepy.API(auth)
| [
11748,
28686,
198,
11748,
17365,
198,
11748,
4184,
538,
88,
198,
6738,
4184,
538,
88,
1330,
440,
30515,
25060,
198,
198,
21017,
3009,
48191,
198,
198,
10943,
50,
5883,
1137,
62,
20373,
796,
28686,
13,
268,
2268,
13,
1136,
7203,
10943,
... | 2.430168 | 179 |
# coding=utf-8
import collections
import pandas as pd
import tensorflow as tf
import _pickle as pickle
from absl import logging
from transformers import BertTokenizer
LABELS = []
if __name__ == "__main__":
main()
| [
2,
19617,
28,
40477,
12,
23,
198,
198,
11748,
17268,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
4808,
27729,
293,
355,
2298,
293,
198,
6738,
2352,
75,
1330,
18931,
198,
6738,
6121,
364... | 3.066667 | 75 |
from __future__ import annotations
import datetime
import math
import enum
import time
from abc import abstractmethod
DAYS_CCSDS_TO_UNIX = -4383
SECONDS_PER_DAY = 86400
UNIX_EPOCH = datetime.datetime.utcfromtimestamp(0)
def convert_unix_days_to_ccsds_days(unix_days: int) -> int:
"""Convert Unix days to CCSDS days
CCSDS epoch: 1958 Januar 1
Unix epoch: 1970 January 1
"""
return unix_days - DAYS_CCSDS_TO_UNIX
def convert_ccsds_days_to_unix_days(ccsds_days: int) -> int:
"""Convert CCSDS days to Unix days
CCSDS epoch: 1958 Januar 1
Unix epoch: 1970 January 1
"""
return ccsds_days + DAYS_CCSDS_TO_UNIX
def read_p_field(p_field: int) -> CcsdsTimeCodeId:
"""Read the p field and return the CCSDS Time Code ID
:param p_field:
:return:
:raise IndexError: P field has invalid value
"""
return CcsdsTimeCodeId((p_field & 0x70) >> 4)
class CdsShortTimestamp(CcsdsTimeCode):
"""Unpacks the time datafield of the TM packet. Right now, CDS Short timeformat is used,
and the size of the time stamp is expected to be seven bytes.
"""
CDS_SHORT_ID = 0b100
TIMESTAMP_SIZE = 7
@classmethod
@classmethod
@classmethod
@staticmethod
def init_from_current_time() -> CdsShortTimestamp:
"""Returns a seven byte CDS short timestamp with the current time"""
unix_days = (datetime.datetime.utcnow() - UNIX_EPOCH).days
seconds = time.time()
fraction_ms = seconds - math.floor(seconds)
days_ms = int((seconds % SECONDS_PER_DAY) * 1000 + fraction_ms)
time_packet = CdsShortTimestamp.init_from_unix_days(
unix_days=unix_days, ms_of_day=days_ms
)
return time_packet
@abstractmethod
@abstractmethod
| [
6738,
11593,
37443,
834,
1330,
37647,
198,
11748,
4818,
8079,
198,
11748,
10688,
198,
11748,
33829,
198,
11748,
640,
198,
6738,
450,
66,
1330,
12531,
24396,
198,
198,
26442,
50,
62,
4093,
50,
5258,
62,
10468,
62,
4944,
10426,
796,
532,
... | 2.463989 | 722 |
# File: exp.py
# Author: raycp
# Date: 2019-06-06
# Description: exp for three, uaf to brute force to overwrite stdout to leak libc
from pwn_debug import *
pdbg=pwn_debug("./three")
pdbg.context.terminal=['tmux', 'splitw', '-h']
#pdbg.local()
pdbg.debug("2.27")
#pdbg.remote('127.0.0.1', 22)
#p=pdbg.run("local")
#p=pdbg.run("remote")
p=pdbg.run("debug")
membp=pdbg.membp
#print hex(membp.elf_base),hex(membp.libc_base)
elf=pdbg.elf
libc=pdbg.libc
#io_file=IO_FILE_plus()
#io_file.show()
if __name__ == '__main__':
pwn()
| [
2,
9220,
25,
1033,
13,
9078,
198,
2,
6434,
25,
26842,
13155,
198,
2,
7536,
25,
13130,
12,
3312,
12,
3312,
198,
2,
12489,
25,
1033,
329,
1115,
11,
334,
1878,
284,
33908,
2700,
284,
49312,
14367,
448,
284,
13044,
9195,
66,
198,
198,... | 2.106299 | 254 |
#!/usr/bin/env python
# encoding: utf-8
import eventlet
import pytun
import os
import sys
eventlet.monkey_patch(all=True)
tap = pytun.open('tap')
os.system("ip link set %s up" % tap.name)
os.system("ip link set dev %s mtu 520" % tap.name)
os.system("ip addr add 192.167.100.1/24 dev %s" % tap.name)
eventlet.spawn_n(handletap)
server = eventlet.listen(('0.0.0.0', 25702))
while True:
try:
new_sock, address = server.accept()
eventlet.spawn_n(handlenet, new_sock)
except (SystemExit, KeyboardInterrupt):
tap.close()
break
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
21004,
25,
3384,
69,
12,
23,
198,
198,
11748,
1785,
1616,
198,
11748,
12972,
28286,
198,
11748,
28686,
198,
11748,
25064,
198,
198,
15596,
1616,
13,
49572,
62,
17147,
7,
439,
28,
... | 2.358333 | 240 |
import sys;
import abc;
import math;
import multiprocessing;
import psutil;
import numpy as np;
from scipy.stats import t, f;
import DataHelper;
'''
d
f(x) = Σ βj * x^d
j=0
degree of freedom = d + 1
'''
'''
K
f(x) = β0 + Σβk * I(Ck < x <= Ck+1)
k=1
degree of freedom = K + 1
'''
'''
M-1 K
f(x) = Σ βj * x^(j-1) + Σ θk * (x-ξk)+^(M-1)
j=0 k=1
f, f', f'', ... d^(M-2)f is continuous at ξk, k = 1, 2, ..., K
degree of freedom = K + M
the default is cubic spline with M = 4.
'''
'''
K-2
f = β0 + β1x + Σ θj * (ξK - ξj) * [d(j, x) - d(K-1, x)]
j=1
d(j, x) = [(x - ξj)+^3 - (x - ξK)+^3] / (ξK - ξj)
f''(x) = 0, when x ∈ (-∞, ξ1] ∪ [ξK, ∞)
degree of freedom = K
when K = 1 and 2, f(x) = β0 + β1x.
'''
| [
11748,
25064,
26,
198,
11748,
450,
66,
26,
198,
11748,
10688,
26,
198,
11748,
18540,
305,
919,
278,
26,
198,
11748,
26692,
22602,
26,
198,
11748,
299,
32152,
355,
45941,
26,
198,
6738,
629,
541,
88,
13,
34242,
1330,
256,
11,
277,
26... | 1.646465 | 495 |
# -*- coding: utf-8 -*-
#
# Django components
#
# :copyright: 2020 Sonu Kumar
# :license: BSD-3-Clause
#
from .utils import DefaultDjangoContextBuilder, DjangoNotification, DefaultDjangoViewPermission
from .settings import *
from .utils import DjangoNotification, DefaultDjangoContextBuilder
from error_tracker.libs.utils import Masking, get_class_from_path, get_class_instance
from error_tracker import ModelMixin, MaskingMixin, TicketingMixin, NotificationMixin, ContextBuilderMixin, \
ViewPermissionMixin
from django.apps import apps as django_apps
import warnings
def get_exception_model():
"""
Return the APP error model that is active in this project.
"""
from .models import ErrorModel
model_path = APP_ERROR_DB_MODEL
if model_path is None:
warnings.warn("APP_ERROR_DB_MODEL is not set using default model")
return ErrorModel
try:
return django_apps.get_model(model_path, require_ready=False)
except ValueError:
model = get_class_from_path(model_path, ModelMixin, raise_exception=False,
warning_message="Model " + model_path + " is not importable")
if model is not None:
return model
warnings.warn("APP_ERROR_DB_MODEL must be of the form 'app_label.model_name'")
except LookupError:
model = get_class_from_path(model_path, ModelMixin, raise_exception=False,
warning_message="Model " + model_path + " is not importable")
if model is not None:
return model
warnings.warn(
"APP_ERROR_DB_MODEL refers to model '%s' that has not been installed" % model_path
)
raise LookupError("APP_ERROR_DB_MODEL is set to '%s' but it's not importable" % model_path)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
220,
220,
220,
37770,
6805,
198,
2,
198,
2,
220,
220,
220,
1058,
22163,
4766,
25,
12131,
6295,
84,
26105,
198,
2,
220,
220,
220,
1058,
43085,
25,
347,
1... | 2.553041 | 707 |
sandwich('Jamon','Queso','Lechuga','Toamte')
sandwich('Queso','Mantequilla')
sandwich('Tocino','Carne','Salsa BBQ') | [
198,
38142,
11451,
10786,
41,
16487,
41707,
48,
947,
78,
41707,
43,
3055,
30302,
41707,
2514,
321,
660,
11537,
198,
38142,
11451,
10786,
48,
947,
78,
41707,
44,
415,
4853,
5049,
11537,
198,
38142,
11451,
10786,
51,
420,
2879,
41707,
991... | 2.468085 | 47 |
"""
@Time : 2021/6/23 17:08
@File : convnet.py
@Software: PyCharm
@Desc :
"""
from typing import Union, List
import torch.nn as nn
class ResidualBlock1D(nn.Module):
"""
The basic block of the 1d residual convolutional network
"""
def __init__(self, in_channel, out_channel, kernel_size=7, stride=1):
"""
Args:
in_channel ():
out_channel ():
kernel_size ():
stride ():
"""
super(ResidualBlock1D, self).__init__()
# assert kernel_size % 2 == 1
self.layers = nn.Sequential(
nn.Conv1d(in_channel, out_channel, kernel_size=kernel_size, stride=stride, padding=kernel_size // 2,
bias=False),
nn.BatchNorm1d(out_channel),
nn.ReLU(inplace=True),
nn.Conv1d(out_channel, out_channel, kernel_size=kernel_size, stride=1, padding=kernel_size // 2,
bias=False),
nn.BatchNorm1d(out_channel)
)
self.downsample = nn.Sequential(
nn.Conv1d(in_channel, out_channel, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm1d(out_channel)
)
self.relu = nn.ReLU(inplace=True)
def resnet_1d(in_channel: int, classes: int):
"""
Args:
in_channel ():
classes ():
Returns:
"""
return ConvNet1D(ResidualBlock1D, in_channel=in_channel, hidden_channel=16, kernel_size=[7, 11, 11, 7],
stride=[1, 2, 2, 2], num_layers=[2, 2, 2, 2], classes=classes)
def convnet_1d(in_channel: int, classes: int):
"""
Args:
in_channel ():
classes ():
Returns:
"""
return ConvNet1D(BasicConvBlock1D, in_channel=in_channel, hidden_channel=16, kernel_size=[7, 11, 11, 7],
stride=[1, 2, 2, 2], num_layers=[2, 2, 2, 2], classes=classes)
| [
37811,
198,
31,
7575,
220,
220,
220,
1058,
33448,
14,
21,
14,
1954,
1596,
25,
2919,
198,
31,
8979,
220,
220,
220,
1058,
3063,
3262,
13,
9078,
198,
31,
25423,
25,
9485,
1925,
1670,
198,
31,
24564,
220,
220,
220,
1058,
220,
198,
378... | 1.995789 | 950 |
from os.path import basename
from enum import IntEnum
from re import search
from dataclasses import dataclass
from cw_map import Map
SECTION_REGEX = r"^\s*.section\s+(?P<Name>.[a-zA-Z0-9_$]+)"
@dataclass
@dataclass | [
6738,
28686,
13,
6978,
1330,
1615,
12453,
198,
6738,
33829,
1330,
2558,
4834,
388,
198,
6738,
302,
1330,
2989,
198,
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
198,
6738,
269,
86,
62,
8899,
1330,
9347,
198,
198,
50,
24565,
62,
315... | 2.552941 | 85 |
from .helper import *
| [
6738,
764,
2978,
525,
1330,
1635,
628
] | 3.285714 | 7 |
import moeda
p = float(input("Digite um preço: R$"))
aum = int(input("Aumento de quantos %: "))
red = int(input('Reduzindo de quantos %: '))
moeda.resumo(p, aum, red)
| [
11748,
6941,
18082,
198,
198,
79,
796,
12178,
7,
15414,
7203,
19511,
578,
23781,
662,
16175,
78,
25,
371,
3,
48774,
198,
26043,
796,
493,
7,
15414,
7203,
32,
1713,
78,
390,
5554,
418,
4064,
25,
366,
4008,
198,
445,
796,
493,
7,
15... | 2.366197 | 71 |
from . import compat
have_greenlet = False
if compat.py3k:
try:
import greenlet # noqa F401
except ImportError:
pass
else:
have_greenlet = True
from ._concurrency_py3k import await_only
from ._concurrency_py3k import await_fallback
from ._concurrency_py3k import greenlet_spawn
from ._concurrency_py3k import is_exit_exception
from ._concurrency_py3k import AsyncAdaptedLock
from ._concurrency_py3k import _util_async_run # noqa F401
from ._concurrency_py3k import (
_util_async_run_coroutine_function,
) # noqa F401, E501
from ._concurrency_py3k import asyncio # noqa F401
from ._concurrency_py3k import asynccontextmanager
if not have_greenlet:
asyncio = None # noqa F811
| [
6738,
764,
1330,
8330,
198,
198,
14150,
62,
14809,
1616,
796,
10352,
198,
198,
361,
8330,
13,
9078,
18,
74,
25,
198,
220,
220,
220,
1949,
25,
198,
220,
220,
220,
220,
220,
220,
220,
1330,
4077,
1616,
220,
1303,
645,
20402,
376,
21... | 2.315341 | 352 |
from tkPDFViewer import tkPDFViewer as pdf
from tkinter import *
from PySide2 import QtCore
from PySide2.QtCore import *
from PySide2.QtGui import *
from PySide2.QtWidgets import *
from PySide2.QtWebEngineWidgets import *
from PySide2 import QtCore
from functools import partial
import json
import validators
import sys
import os
_web_actions = [QWebEnginePage.Back, QWebEnginePage.Forward,
QWebEnginePage.Reload,
QWebEnginePage.Undo, QWebEnginePage.Redo,
QWebEnginePage.Cut, QWebEnginePage.Copy,
QWebEnginePage.Paste, QWebEnginePage.SelectAll]
# A Find tool bar (bottom area)
class DownloadWidget(QProgressBar):
"""Lets you track progress of a QWebEngineDownloadItem."""
finished = QtCore.Signal()
remove_requested = QtCore.Signal()
@staticmethod
@staticmethod
class BrowserTabWidget(QTabWidget):
"""Enables having several tabs with QWebEngineView."""
url_changed = QtCore.Signal(QUrl)
enabled_changed = QtCore.Signal(QWebEnginePage.WebAction, bool)
download_requested = QtCore.Signal(QWebEngineDownloadItem)
_url_role = Qt.UserRole + 1
# Default bookmarks as an array of arrays which is the form
# used to read from/write to a .json bookmarks file
_default_bookmarks = [
['Tool Bar'],
]
_bookmark_file = 'bookmarks.json'
# Create the model from an array of arrays
# Serialize model into an array of arrays, writing out the icons
# into .png files under directory in the process
# Bookmarks as a tree view to be used in a dock widget with
# functionality to persist and populate tool bars and menus.
class BookmarkWidget(QTreeView):
"""Provides a tree view to manage the bookmarks."""
open_bookmark = QtCore.Signal(QUrl)
open_bookmark_in_new_tab = QtCore.Signal(QUrl)
changed = QtCore.Signal()
# Synchronize the bookmarks under parent_item to a target_object
# like QMenu/QToolBar, which has a list of actions. Update
# the existing actions, append new ones if needed or hide
# superfluous ones
# Return a short title for a bookmark action,
# "Qt | Cross Platform.." -> "Qt"
@staticmethod
main_windows = []
cwd = os.getcwd()
DYNOBITEFILES = "Dynobite Files"
DynobiteCreateFolder = os.path.join(cwd, DYNOBITEFILES)
DynobiteHistoryFolder = os.path.join(DYNOBITEFILES, "History.txt")
print("Dynobite\nVersion 94.0.992.31")
try:
os.mkdir(DynobiteCreateFolder)
except FileExistsError:
pass
picturefolder = "Images"
completedir = os.path.join(DynobiteCreateFolder, picturefolder)
try:
os.mkdir(completedir)
url="https://raw.githubusercontent.com/abhinavsatheesh/dynfiles/main/Dynobite/Images/1.png"
downloadpath=completedir
download(url,f'{downloadpath}/1.png')
except FileExistsError:
pass
def create_main_window():
"""Creates a MainWindow using 75% of the available screen resolution."""
main_win = MainWindow()
main_windows.append(main_win)
available_geometry = app.desktop().availableGeometry(main_win)
main_win.resize(available_geometry.width() * 2 / 3,
available_geometry.height() * 2 / 3)
main_win.show()
return main_win
def create_main_window_with_browser():
"""Creates a MainWindow with a BrowserTabWidget."""
main_win = create_main_window()
return main_win.add_browser_tab()
class MainWindow(QMainWindow):
"""Provides the parent window that includes the BookmarkWidget,
BrowserTabWidget, and a DownloadWidget, to offer the complete
web browsing experience."""
if __name__ == '__main__':
app = QApplication(sys.argv)
main_win = create_main_window()
initial_urls = sys.argv[1:]
if not initial_urls:
initial_urls.append('https://www.google.com')
for url in initial_urls:
main_win.load_url_in_new_tab(QUrl.fromUserInput(url))
exit_code = app.exec_()
main_win.write_bookmarks()
sys.exit(exit_code)
| [
6738,
256,
74,
20456,
7680,
263,
1330,
256,
74,
20456,
7680,
263,
355,
37124,
198,
6738,
256,
74,
3849,
1330,
1635,
198,
6738,
9485,
24819,
17,
1330,
33734,
14055,
198,
6738,
9485,
24819,
17,
13,
48,
83,
14055,
1330,
1635,
198,
6738,
... | 2.72545 | 1,446 |
#!/usr/bin/env python3
import click
import os
import sys
import toml
lu = { 'text': 'TEXT', 'real': 'DOUBLE PRECISION', 'time': 'TIMESTAMPTZ NOT NULL' }
@click.command()
@click.option('--no-geo-join', default=False, is_flag=True, show_default=True,
help="Don't produce the joined view to lat/lon, for example if this is the lat/lon data")
@click.argument('input', type=click.Path(exists=True), nargs=1)
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
11748,
3904,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
284,
4029,
198,
198,
2290,
796,
1391,
705,
5239,
10354,
705,
32541,
3256,
705,
5305,
10354,
705,
35,
2606,
19146,
22814... | 2.690476 | 168 |
"""Features to allow run restart from a given step."""
from argparse import ArgumentTypeError
from functools import partial
from haddock.libs.libutil import non_negative_int, remove_folder
_help_cli = """Restart the run from a given step. Previous folders from
the selected step onward will be deleted."""
_arg_non_neg_int = partial(
non_negative_int,
exception=ArgumentTypeError,
emsg="Minimum value is 0, {!r} given.",
)
def add_restart_arg(parser):
"""Adds `--restart` option to argument parser."""
parser.add_argument(
"--restart",
type=_arg_non_neg_int,
default=None,
help=_help_cli,
)
def remove_folders_after_number(run_dir, num):
"""
Remove calculation folder after (included) a given number.
Example
-------
If the following step folders exist:
00_topoaa
01_rigidbody
02_mdref
03_flexref
and the number `2` is given, folders `02_` and `03_` will be
deleted.
Parameters
----------
run_dir : pathlib.Path
The run directory.
num : int
The number of the folder from which to delete calculation step
folders. `num` must be non-negative integer, or equivalent
representation.
"""
num = _arg_non_neg_int(num)
previous = sorted(list(run_dir.resolve().glob('[0-9][0-9]*/')))
for folder in previous[num:]:
remove_folder(folder)
return
| [
37811,
23595,
284,
1249,
1057,
15765,
422,
257,
1813,
2239,
526,
15931,
198,
6738,
1822,
29572,
1330,
45751,
6030,
12331,
198,
6738,
1257,
310,
10141,
1330,
13027,
198,
198,
6738,
550,
67,
735,
13,
8019,
82,
13,
8019,
22602,
1330,
1729,... | 2.571936 | 563 |
# -*- coding: utf-8 -*-
"""Parses for MacOS Wifi log (wifi.log) files."""
import re
import pyparsing
from dfdatetime import time_elements as dfdatetime_time_elements
from plaso.containers import events
from plaso.containers import time_events
from plaso.lib import errors
from plaso.lib import definitions
from plaso.lib import timelib
from plaso.parsers import logger
from plaso.parsers import manager
from plaso.parsers import text_parser
class MacWifiLogEventData(events.EventData):
"""Mac Wifi log event data.
Attributes:
action (str): known WiFI action, for example connected to an AP,
configured, etc. If the action is not known, the value is
the message of the log (text variable).
agent (str): name and identifier of process that generated the log message.
function (str): name of function that generated the log message.
text (str): log message
"""
DATA_TYPE = 'mac:wifilog:line'
def __init__(self):
"""Initializes event data."""
super(MacWifiLogEventData, self).__init__(data_type=self.DATA_TYPE)
self.action = None
self.agent = None
self.function = None
self.text = None
class MacWifiLogParser(text_parser.PyparsingSingleLineTextParser):
"""Parses MacOS Wifi log (wifi.log) files."""
NAME = 'macwifi'
DATA_FORMAT = 'MacOS Wifi log (wifi.log) file'
_ENCODING = 'utf-8'
THREE_DIGITS = text_parser.PyparsingConstants.THREE_DIGITS
THREE_LETTERS = text_parser.PyparsingConstants.THREE_LETTERS
# Regular expressions for known actions.
_CONNECTED_RE = re.compile(r'Already\sassociated\sto\s(.*)\.\sBailing')
_WIFI_PARAMETERS_RE = re.compile(
r'\[ssid=(.*?), bssid=(.*?), security=(.*?), rssi=')
_KNOWN_FUNCTIONS = [
'airportdProcessDLILEvent',
'_doAutoJoin',
'_processSystemPSKAssoc']
_AGENT = (
pyparsing.Literal('<') +
pyparsing.Combine(
pyparsing.Literal('airportd') + pyparsing.CharsNotIn('>'),
joinString='', adjacent=True).setResultsName('agent') +
pyparsing.Literal('>'))
_DATE_TIME = pyparsing.Group(
THREE_LETTERS.setResultsName('day_of_week') +
THREE_LETTERS.setResultsName('month') +
text_parser.PyparsingConstants.ONE_OR_TWO_DIGITS.setResultsName('day') +
text_parser.PyparsingConstants.TIME_ELEMENTS + pyparsing.Suppress('.') +
THREE_DIGITS.setResultsName('milliseconds'))
# Log line with a known function name.
_MAC_WIFI_KNOWN_FUNCTION_LINE = (
_DATE_TIME.setResultsName('date_time') + _AGENT +
pyparsing.oneOf(_KNOWN_FUNCTIONS).setResultsName('function') +
pyparsing.Literal(':') +
pyparsing.SkipTo(pyparsing.lineEnd).setResultsName('text'))
# Log line with an unknown function name.
_MAC_WIFI_LINE = (
_DATE_TIME.setResultsName('date_time') + pyparsing.NotAny(
_AGENT +
pyparsing.oneOf(_KNOWN_FUNCTIONS) +
pyparsing.Literal(':')) +
pyparsing.SkipTo(pyparsing.lineEnd).setResultsName('text'))
_MAC_WIFI_HEADER = (
_DATE_TIME.setResultsName('date_time') +
pyparsing.Literal('***Starting Up***').setResultsName('text'))
_DATE_TIME_TURNED_OVER_HEADER = pyparsing.Group(
text_parser.PyparsingConstants.MONTH.setResultsName('month') +
text_parser.PyparsingConstants.ONE_OR_TWO_DIGITS.setResultsName('day') +
text_parser.PyparsingConstants.TIME_ELEMENTS)
_MAC_WIFI_TURNED_OVER_HEADER = (
_DATE_TIME_TURNED_OVER_HEADER.setResultsName('date_time') +
pyparsing.Combine(
pyparsing.Word(pyparsing.printables) +
pyparsing.Word(pyparsing.printables) +
pyparsing.Literal('logfile turned over') +
pyparsing.LineEnd(),
joinString=' ', adjacent=False).setResultsName('text'))
# Define the available log line structures.
LINE_STRUCTURES = [
('header', _MAC_WIFI_HEADER),
('turned_over_header', _MAC_WIFI_TURNED_OVER_HEADER),
('known_function_logline', _MAC_WIFI_KNOWN_FUNCTION_LINE),
('logline', _MAC_WIFI_LINE)]
_SUPPORTED_KEYS = frozenset([key for key, _ in LINE_STRUCTURES])
def __init__(self):
"""Initializes a parser."""
super(MacWifiLogParser, self).__init__()
self._last_month = 0
self._year_use = 0
def _GetAction(self, action, text):
"""Parse the well known actions for easy reading.
Args:
action (str): the function or action called by the agent.
text (str): mac Wifi log text.
Returns:
str: a formatted string representing the known (or common) action.
If the action is not known the original log text is returned.
"""
# TODO: replace "x in y" checks by startswith if possible.
if 'airportdProcessDLILEvent' in action:
interface = text.split()[0]
return 'Interface {0:s} turn up.'.format(interface)
if 'doAutoJoin' in action:
match = self._CONNECTED_RE.match(text)
if match:
ssid = match.group(1)[1:-1]
else:
ssid = 'Unknown'
return 'Wifi connected to SSID {0:s}'.format(ssid)
if 'processSystemPSKAssoc' in action:
wifi_parameters = self._WIFI_PARAMETERS_RE.search(text)
if wifi_parameters:
ssid = wifi_parameters.group(1)
bssid = wifi_parameters.group(2)
security = wifi_parameters.group(3)
if not ssid:
ssid = 'Unknown'
if not bssid:
bssid = 'Unknown'
if not security:
security = 'Unknown'
return (
'New wifi configured. BSSID: {0:s}, SSID: {1:s}, '
'Security: {2:s}.').format(bssid, ssid, security)
return text
def _GetTimeElementsTuple(self, key, structure):
"""Retrieves a time elements tuple from the structure.
Args:
key (str): name of the parsed structure.
structure (pyparsing.ParseResults): structure of tokens derived from
a line of a text file.
Returns:
tuple: containing:
year (int): year.
month (int): month, where 1 represents January.
day_of_month (int): day of month, where 1 is the first day of the month.
hours (int): hours.
minutes (int): minutes.
seconds (int): seconds.
milliseconds (int): milliseconds.
"""
time_elements_tuple = self._GetValueFromStructure(structure, 'date_time')
# TODO: what if time_elements_tuple is None.
if key == 'turned_over_header':
month, day, hours, minutes, seconds = time_elements_tuple
milliseconds = 0
else:
_, month, day, hours, minutes, seconds, milliseconds = time_elements_tuple
# Note that dfdatetime_time_elements.TimeElements will raise ValueError
# for an invalid month.
month = timelib.MONTH_DICT.get(month.lower(), 0)
if month != 0 and month < self._last_month:
# Gap detected between years.
self._year_use += 1
return self._year_use, month, day, hours, minutes, seconds, milliseconds
def _ParseLogLine(self, parser_mediator, key, structure):
"""Parse a single log line and produce an event object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
key (str): name of the parsed structure.
structure (pyparsing.ParseResults): structure of tokens derived from
a line of a text file.
"""
time_elements_tuple = self._GetTimeElementsTuple(key, structure)
try:
date_time = dfdatetime_time_elements.TimeElementsInMilliseconds(
time_elements_tuple=time_elements_tuple)
except ValueError:
parser_mediator.ProduceExtractionWarning(
'invalid date time value: {0!s}'.format(time_elements_tuple))
return
self._last_month = time_elements_tuple[1]
function = self._GetValueFromStructure(structure, 'function')
text = self._GetValueFromStructure(structure, 'text')
if text:
text = text.strip()
event_data = MacWifiLogEventData()
event_data.agent = self._GetValueFromStructure(structure, 'agent')
event_data.function = function
event_data.text = text
if key == 'known_function_logline':
event_data.action = self._GetAction(
event_data.function, event_data.text)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_ADDED)
parser_mediator.ProduceEventWithEventData(event, event_data)
def ParseRecord(self, parser_mediator, key, structure):
"""Parses a log record structure and produces events.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
key (str): name of the parsed structure.
structure (pyparsing.ParseResults): structure of tokens derived from
a line of a text file.
Raises:
ParseError: when the structure type is unknown.
"""
if key not in self._SUPPORTED_KEYS:
raise errors.ParseError(
'Unable to parse record, unknown structure: {0:s}'.format(key))
self._ParseLogLine(parser_mediator, key, structure)
def VerifyStructure(self, parser_mediator, line):
"""Verify that this file is a Mac Wifi log file.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
line (str): line from a text file.
Returns:
bool: True if the line is in the expected format, False if not.
"""
self._last_month = 0
self._year_use = parser_mediator.GetEstimatedYear()
key = 'header'
try:
structure = self._MAC_WIFI_HEADER.parseString(line)
except pyparsing.ParseException:
structure = None
if not structure:
key = 'turned_over_header'
try:
structure = self._MAC_WIFI_TURNED_OVER_HEADER.parseString(line)
except pyparsing.ParseException:
structure = None
if not structure:
logger.debug('Not a Mac Wifi log file')
return False
time_elements_tuple = self._GetTimeElementsTuple(key, structure)
try:
dfdatetime_time_elements.TimeElementsInMilliseconds(
time_elements_tuple=time_elements_tuple)
except ValueError:
logger.debug(
'Not a Mac Wifi log file, invalid date and time: {0!s}'.format(
time_elements_tuple))
return False
self._last_month = time_elements_tuple[1]
return True
manager.ParsersManager.RegisterParser(MacWifiLogParser)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
47,
945,
274,
329,
4100,
2640,
370,
22238,
2604,
357,
86,
22238,
13,
6404,
8,
3696,
526,
15931,
198,
198,
11748,
302,
198,
198,
11748,
279,
4464,
945,
278,
198,... | 2.496782 | 4,195 |
MAX_NUM_PROCESSES = 4
REPO_URL = 'https://api.github.com/repos/srp33/WishBuilder/'
WB_DIRECTORY = '/app/'
SQLITE_FILE = WB_DIRECTORY + 'WishBuilderCI/history.sql'
TESTING_LOCATION = WB_DIRECTORY + 'WishBuilderCI/testing/'
RAW_DATA_STORAGE = WB_DIRECTORY + 'RawDatasets/'
GENEY_DATA_LOCATION = WB_DIRECTORY + 'GeneyDatasets/'
GENEY_CONVERTER = WB_DIRECTORY + 'GeneyTypeConverter/typeconverter.py'
MIN_TEST_CASES = 8
MIN_FEATURES = 2
MIN_SAMPLES = 2
MAX_TITLE_SIZE = 300
NUM_SAMPLE_ROWS = 5
NUM_SAMPLE_COLUMNS = 5
CHECK_MARK = '✅'
RED_X = '❌'
WARNING_SYMBOL = "<p><font color=\"orange\" size=\"+2\">⚠\t</font>"
KEY_DATA_NAME = 'test_data.tsv'
KEY_META_DATA_NAME = 'test_metadata.tsv'
TEST_DATA_NAME = 'data.tsv.gz'
TEST_META_DATA_NAME = 'metadata.tsv.gz'
DOWNLOAD_FILE_NAME = 'download.sh'
INSTALL_FILE_NAME = 'install.sh'
PARSE_FILE_NAME = 'parse.sh'
CLEANUP_FILE_NAME = 'cleanup.sh'
DESCRIPTION_FILE_NAME = 'description.md'
CONFIG_FILE_NAME = 'config.yaml'
REQUIRED_FILES = [KEY_DATA_NAME, KEY_META_DATA_NAME, DOWNLOAD_FILE_NAME, INSTALL_FILE_NAME, PARSE_FILE_NAME,
CLEANUP_FILE_NAME, DESCRIPTION_FILE_NAME, CONFIG_FILE_NAME]
REQUIRED_CONFIGS = ['title', 'featureDescription', 'featureDescriptionPlural']
# These are the executables that will be ran to produce the data and metadata files (They are executed in this order)
USER_SCRIPTS = [INSTALL_FILE_NAME, DOWNLOAD_FILE_NAME, PARSE_FILE_NAME]
KEY_FILES = [KEY_DATA_NAME, KEY_META_DATA_NAME]
| [
22921,
62,
41359,
62,
4805,
4503,
7597,
1546,
796,
604,
198,
2200,
16402,
62,
21886,
796,
705,
5450,
1378,
15042,
13,
12567,
13,
785,
14,
260,
1930,
14,
27891,
79,
2091,
14,
54,
680,
32875,
14,
6,
198,
45607,
62,
17931,
23988,
15513... | 2.422259 | 611 |
import subprocess
| [
11748,
850,
14681,
628,
628
] | 4.2 | 5 |
import numpy as np
from rlkit.data_management.obs_dict_replay_buffer import \
ObsDictRelabelingBuffer
from rlkit.samplers.rollout_functions import (
create_rollout_function,
multitask_rollout,
)
from rlkit.torch.her.her import HER
from rlkit.torch.sac.sac import TwinSAC
| [
11748,
299,
32152,
355,
45941,
198,
6738,
374,
75,
15813,
13,
7890,
62,
27604,
13,
8158,
62,
11600,
62,
260,
1759,
62,
22252,
1330,
3467,
198,
220,
220,
220,
11086,
35,
713,
6892,
9608,
278,
28632,
198,
6738,
374,
75,
15813,
13,
376... | 2.654206 | 107 |
from pyqode.core import modes
from qtpy import QtGui
| [
6738,
12972,
80,
1098,
13,
7295,
1330,
12881,
198,
6738,
10662,
83,
9078,
1330,
33734,
8205,
72,
628,
628,
198
] | 2.85 | 20 |
import numpy as np
from sklearn import metrics
def reformat_train_ratings(train_data):
"""
@param train_data: data ratings for train
"""
train_data = train_data.tolist()
train_data = sorted(train_data, key=lambda key: key[1], reverse=False)
return np.array(train_data)
| [
11748,
299,
32152,
355,
45941,
198,
6738,
1341,
35720,
1330,
20731,
628,
198,
198,
4299,
4975,
265,
62,
27432,
62,
10366,
654,
7,
27432,
62,
7890,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
220,
220,
220,
220,
2488,
17143,
... | 2.672566 | 113 |
from .config import configure
| [
6738,
764,
11250,
1330,
17425,
628
] | 5.166667 | 6 |
<warning descr="Python version 3.0, 3.1, 3.2, 3.3, 3.4, 3.5, 3.6 do not have type long. Use int instead.">long("abc")</warning> | [
27,
43917,
1715,
81,
2625,
37906,
2196,
513,
13,
15,
11,
513,
13,
16,
11,
513,
13,
17,
11,
513,
13,
18,
11,
513,
13,
19,
11,
513,
13,
20,
11,
513,
13,
21,
466,
407,
423,
2099,
890,
13,
5765,
493,
2427,
526,
29,
6511,
7203,
... | 2.442308 | 52 |
# For testing that invalid plugin raising error doesn't break the app
raise BaseException('Must be caught in plugin module')
| [
2,
1114,
4856,
326,
12515,
13877,
8620,
4049,
1595,
470,
2270,
262,
598,
198,
40225,
7308,
16922,
10786,
34320,
307,
4978,
287,
13877,
8265,
11537,
198
] | 4.807692 | 26 |
from django.conf.urls import url
from django.views.decorators.csrf import csrf_exempt
from .views import SearchPriceView, SearchPlacesView, ContactView, FavouriteView
urlpatterns = [
url(r'prices/(?P<type>1|2)/$', SearchPriceView.as_view(), name='search_prices'),
url(r'places/(?P<name>\w+)/$', SearchPlacesView.as_view(), name='search_places'),
url(r'contact/(?P<type>sale|letting)/$', csrf_exempt(ContactView.as_view()), name='contact'),
url(r'favourite/(?P<type>sale|letting)/(?P<slug>[-\w]+)/$', csrf_exempt(FavouriteView.as_view()), name='favourite')
]
| [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
19016,
198,
6738,
42625,
14208,
13,
33571,
13,
12501,
273,
2024,
13,
6359,
41871,
1330,
269,
27891,
69,
62,
42679,
198,
198,
6738,
764,
33571,
1330,
11140,
18124,
7680,
11,
11140,
3646,... | 2.482759 | 232 |
from sims4.tuning.tunable import HasTunableFactory, AutoFactoryInit, Tunable, TunableTuple, TunableVariant
| [
6738,
985,
82,
19,
13,
28286,
278,
13,
28286,
540,
1330,
7875,
51,
403,
540,
22810,
11,
11160,
22810,
31768,
11,
13932,
540,
11,
13932,
540,
51,
29291,
11,
13932,
540,
23907,
415,
198
] | 3.147059 | 34 |
import setuptools
setuptools.setup(
name="vrpy",
version="0.5.0",
description="A python framework for solving vehicle routing problems",
license="MIT",
author="Romain Montagne, David Torres",
author_email="r.montagne@hotmail.fr",
keywords=["vehicle routing problem", "vrp", "column generation"],
long_description=open("README.md", "r").read(),
long_description_content_type="text/x-rst",
url="https://github.com/Kuifje02/vrpy",
packages=setuptools.find_packages(),
install_requires=["cspy", "networkx", "numpy", "pulp"],
classifiers=[
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| [
11748,
900,
37623,
10141,
198,
198,
2617,
37623,
10141,
13,
40406,
7,
198,
220,
220,
220,
1438,
2625,
37020,
9078,
1600,
198,
220,
220,
220,
2196,
2625,
15,
13,
20,
13,
15,
1600,
198,
220,
220,
220,
6764,
2625,
32,
21015,
9355,
329,... | 2.683706 | 313 |
from auxiliaries import datetime_from_epoch
from auxiliaries import set_verbosity
from globalvalues import ANSI_RESET, ANSI_YEL, ANSI_GR, ANSI_RED
from globalvalues import NETWORK_LED_BLINK_PERIOD_S
from globalvalues import DEFAULT_DATA_BACKLOG_FILE
from globalvalues import CPM_DISPLAY_TEXT
from globalvalues import strf, FLUSH_PAUSE_S
from collections import deque
import socket
import time
import ast
import os
import errno
class Data_Handler(object):
"""
Object for sending data to server.
Also handles writing to datalog and
storing to memory.
"""
def test_send(self, cpm, cpm_err):
"""
Test Mode
"""
self.vprint(
1, ANSI_RED + " * Test mode, not sending to server * " +
ANSI_RESET)
def no_config_send(self, cpm, cpm_err):
"""
Configuration file not present
"""
self.vprint(1, "Missing config file, not sending to server")
def no_publickey_send(self, cpm, cpm_err):
"""
Publickey not present
"""
self.vprint(1, "Missing public key, not sending to server")
def send_to_memory(self, cpm, cpm_err):
"""
Network is not up
"""
if self.led:
self.led.start_blink(interval=self.blink_period_s)
self.send_to_queue(cpm, cpm_err)
self.vprint(1, "Network down, saving to queue in memory")
def regular_send(self, this_end, cpm, cpm_err):
"""
Normal send. Socket errors are handled in the main method.
"""
if self.led:
if self.led.blinker:
self.led.stop_blink()
self.led.on()
self.manager.sender.send_cpm_new(this_end, cpm, cpm_err)
if self.queue:
self.vprint(1, "Flushing memory queue to server")
no_error_yet = True
while self.queue and no_error_yet:
time.sleep(FLUSH_PAUSE_S)
trash = self.queue.popleft()
try:
self.manager.sender.send_cpm_new(
trash[0], trash[1], trash[2])
except (socket.gaierror, socket.error, socket.timeout) as e:
if e == socket.gaierror:
if e[0] == socket.EAI_AGAIN:
# TCP and UDP
# network is down,
# but NetworkStatus didn't notice yet
# (resolving DNS like dosenet.dhcp.lbl.gov)
self.vprint(
1, 'Failed to send packet! ' +
'Address resolution error')
else:
self.vprint(
1, 'Failed to send packet! Address error: ' +
'{}: {}'.format(*e))
elif e == socket.error:
if e[0] == errno.ECONNREFUSED:
# TCP
# server is not accepting connections
self.vprint(
1, 'Failed to send packet! Connection refused')
elif e[0] == errno.ENETUNREACH:
# TCP and UDP
# network is down,
# but NetworkStatus didn't notice yet
# (IP like 131.243.51.241)
self.vprint(
1, 'Failed to send packet! ' +
'Network is unreachable')
else:
# consider handling errno.ECONNABORTED,
# errno.ECONNRESET
self.vprint(
1, 'Failed to send packet! Socket error: ' +
'{}: {}'.format(*e))
elif e == socket.timeout:
# TCP
self.vprint(1, 'Failed to send packet! Socket timeout')
self.send_to_memory(trash[0], trash[1], trash[2])
no_error_yet = False
else:
self.manager.sender.send_cpm(cpm, cpm_err)
def send_to_queue(self, cpm, cpm_err):
"""
Adds the time, cpm, and cpm_err to the deque object.
"""
time_string = time.time()
self.queue.append([time_string, cpm, cpm_err])
def backlog_to_queue(self, path=DEFAULT_DATA_BACKLOG_FILE):
"""
Sends data in backlog to queue and deletes the backlog
"""
if os.path.isfile(path):
self.vprint(2, "Flushing backlog file to memory queue")
with open(path, 'r') as f:
data = f.read()
data = ast.literal_eval(data)
for i in data:
self.queue.append([i[0], i[1], i[2]])
print(self.queue)
os.remove(path)
def main(self, datalog, cpm, cpm_err, this_start, this_end, counts):
"""
Determines how to handle the cpm data.
"""
start_text = datetime_from_epoch(this_start).strftime(strf)
end_text = datetime_from_epoch(this_end).strftime(strf)
self.vprint(
1, CPM_DISPLAY_TEXT.format(
time=datetime_from_epoch(time.time()),
counts=counts,
cpm=cpm,
cpm_err=cpm_err,
start_time=start_text,
end_time=end_text))
self.manager.data_log(datalog, cpm=cpm, cpm_err=cpm_err)
if self.manager.test:
# for testing the memory queue
self.send_to_memory(cpm, cpm_err)
elif not self.manager.config:
self.no_config_send(cpm, cpm_err)
elif not self.manager.publickey:
self.no_publickey_send(cpm, cpm_err)
else:
try:
self.regular_send(this_end, cpm, cpm_err)
except (socket.gaierror, socket.error, socket.timeout) as e:
if e == socket.gaierror:
if e[0] == socket.EAI_AGAIN:
# TCP and UDP
# network is down, but NetworkStatus didn't notice yet
# (resolving DNS like dosenet.dhcp.lbl.gov)
self.vprint(
1,
'Failed to send packet! Address resolution error')
else:
self.vprint(
1, 'Failed to send packet! Address error: ' +
'{}: {}'.format(*e))
elif e == socket.error:
if e[0] == errno.ECONNREFUSED:
# TCP
# server is not accepting connections
self.vprint(
1, 'Failed to send packet! Connection refused')
elif e[0] == errno.ENETUNREACH:
# TCP and UDP
# network is down, but NetworkStatus didn't notice yet
# (IP like 131.243.51.241)
self.vprint(
1, 'Failed to send packet! Network is unreachable')
else:
# consider handling errno.ECONNABORTED errno.ECONNRESET
self.vprint(
1, 'Failed to send packet! Socket error: ' +
'{}: {}'.format(*e))
elif e == socket.timeout:
# TCP
self.vprint(1, 'Failed to send packet! Socket timeout')
self.send_to_memory(cpm, cpm_err)
| [
6738,
27506,
2403,
3166,
1330,
4818,
8079,
62,
6738,
62,
538,
5374,
198,
6738,
27506,
2403,
3166,
1330,
900,
62,
19011,
16579,
198,
6738,
3298,
27160,
1330,
3537,
11584,
62,
19535,
2767,
11,
3537,
11584,
62,
56,
3698,
11,
3537,
11584,
... | 1.77783 | 4,276 |
from typing import Type
from linum.excel_renderer.base.date_cell import DateCell
from linum.excel_renderer.base.date_row import DateRow
from linum.excel_renderer.calendar.space.space_cell import SpaceCell
| [
6738,
19720,
1330,
5994,
198,
198,
6738,
9493,
388,
13,
1069,
5276,
62,
10920,
11882,
13,
8692,
13,
4475,
62,
3846,
1330,
7536,
28780,
198,
6738,
9493,
388,
13,
1069,
5276,
62,
10920,
11882,
13,
8692,
13,
4475,
62,
808,
1330,
7536,
... | 3.136364 | 66 |