blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e3688f37181081a87ea6b287d0b2aa3a016ab020 | 378393b9afbc81f2844268f00c63add0abc45467 | /OpenNE/L3Hope.py | 0a5a7d622eec716c84c9f565b764c7ca06fceeb8 | [] | no_license | mayunlong89/SiGraC | 90a517fea9a451c304107581ae2f1e40bf9e7d35 | 0f7bb5283d14c328af86d2b51446b6b8f02d20af | refs/heads/main | 2023-04-28T02:06:06.766543 | 2021-05-20T17:43:34 | 2021-05-20T17:43:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,523 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 9 20:26:32 2020
@author: Secil
"""
# -*- coding: utf-8 -*-
import networkx as nx
import numpy as np
import scipy.sparse.linalg as lg
__author__ = "Alan WANG"
__email__ = "alan1995wang@outlook.com"
import scipy.sparse as sp
class HOPE(object):
def __init__(self, graph, d):
'''
d: representation vector dimension
'''
self._d = d
self._graph = graph.G
self.g = graph
self._node_num = graph.node_size
self.learn_embedding()
def calc_A_hat(adj_matrix):
nnodes = adj_matrix.shape[0]
mu = 0.95
eta = 1e-6
A = adj_matrix# + sp.eye(nnodes)
D_vec = np.sum(A, axis=1)
D_vec_invsqrt_corr = 1 / np.sqrt(D_vec)
D_invsqrt_corr = sp.diags(D_vec_invsqrt_corr)
return mu*D_invsqrt_corr @ A @ D_invsqrt_corr + (1-mu)*sp.eye(nnodes) + eta*sp.eye(nnodes)
def learn_embedding(self):
#graph = self.g.G
graph = self.g.G.to_undirected()
A = nx.to_numpy_matrix(graph)
mu = 0.1;
eta = 1e-6
norm_lap_mat = nx.laplacian_matrix(graph)
A = mu*norm_lap_mat + (1-mu)*np.eye(graph.number_of_nodes()) + eta*np.eye(graph.number_of_nodes())
#A = norm_lap_mat
# self._beta = 0.0728
# M_g = np.eye(graph.number_of_nodes()) - self._beta * A
# M_l = self._beta * A
print("dimension = ", self._d)
print("PPR")
M_g = np.eye(graph.number_of_nodes())
M_l = np.dot(A, A)
S = np.dot(np.linalg.inv(M_g), M_l)
# s: \sigma_k
u, s, vt = lg.svds(S, k=self._d // 2)
sigma = np.diagflat(np.sqrt(s))
X1 = np.dot(u, sigma)
X2 = np.dot(vt.T, sigma)
# self._X = X2
self._X = np.concatenate((X1, X2), axis=1)
@property
def vectors(self):
vectors = {}
look_back = self.g.look_back_list
for i, embedding in enumerate(self._X):
vectors[look_back[i]] = embedding
return vectors
def save_embeddings(self, filename):
fout = open(filename, 'w')
node_num = len(self.vectors.keys())
fout.write("{} {}\n".format(node_num, self._d))
for node, vec in self.vectors.items():
fout.write("{} {}\n".format(node,
' '.join([str(x) for x in vec])))
fout.close()
| [
"noreply@github.com"
] | noreply@github.com |
7d18c8912f2bc161a91f7ac5b3d9b07057aa99c7 | 62e6a618dad3a6750cbd53dad185b799a6727a43 | /max_subarrays.py | f69a3d315913fce505c8ddd78050fc37ef0019a0 | [] | no_license | petr-tik/misc | 2319dbec8dc961056411319688fcaa38ae4cc419 | e32dce8a30f1a1f456a9e8086dc5470d95f4745e | refs/heads/master | 2020-04-12T09:37:36.163879 | 2017-03-10T14:12:26 | 2017-03-11T00:22:14 | 49,777,002 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,445 | py | #! /usr/bin/env python
# https://www.hackerrank.com/challenges/maxsubarray
import unittest
MAX_INT_VALUE = 1000
"""
Given an array of ints between -MAX_VALUE < int < MAX_VALUE
return the sum of maximum contiguous and non-contiguous subarrays
"""
def max_cont(arr, res):
"""
Given an array and a starting value, return the sum of the maximum contiguous array
Solve dynamically
[2, 6, -9] returns 8
[-3, -1, -5] returns -1
[2, -1, 2, 3, 4, -5] returns 10
[6, -7, 2, 3, 4, -5] returns 9
[6, -7, 2, 3, -5] returns 6
"""
if not arr:
return res
new_val = arr.pop(0)
print arr, new_val, res
if res + new_val < 0:
res = max(res, new_val)
print res
max_cont(arr, res)
def max_non_cont(arr):
""" To find the max non-contiguous sum, add up all positive integers
if all ints are negative, find and returns the largest
"""
non_cont_sum = 0
all_negatives_max = -MAX_INT_VALUE
for element in arr:
if element > 0:
non_cont_sum += element
else:
# if all ints are negative, this will hold the maximum value, which we will take
all_negatives_max = max(element, all_negatives_max)
if non_cont_sum == 0:
return all_negatives_max
else:
return non_cont_sum
def solve(arr):
"""
Given an array, return the maximum possible sums of:
contiguous
non-contiguous subarray
"""
res = 0
max_cont_sum = max_cont(arr, res)
return max_cont_sum, max_non_cont(arr)
# print solve([2, -1, 2, 3, 4, -5])
class TestSolve(unittest.TestCase):
def setUp(self):
self.arr1 = [2, 6, -9]
self.arr2 = [-3, -1, -5]
self.arr3 = [2, -1, 2, 3, 4, -5]
self.arr4 = [6, -7, 2, 3, 4, -5]
self.arr5 = [6, -7, 2, 3, -5]
def test_max_cont1(self):
self.assertEqual(8, max_cont(self.arr1, 0))
def test_max_cont2(self):
self.assertEqual(-1, max_cont(self.arr2, 0))
def test_max_cont3(self):
self.assertEqual(10, max_cont(self.arr3, 0))
def test_max_cont4(self):
self.assertEqual(9, max_cont(self.arr4, 0))
def test_max_cont5(self):
self.assertEqual(6, max_cont(self.arr5, 0))
if __name__ == '__main__':
print max_cont([-1, -5], -3)
print max_cont([-3, -1, -5], -6)
# unittest.main()
| [
"petr-tik@users.noreply.github.com"
] | petr-tik@users.noreply.github.com |
2d89cfafb7a661d6b1f99a976e6ca4e33e734ba8 | 5446820fb20aaf93c440bdf63b97348ea110a583 | /src/samples/glut/hello_glut.py | 234cc5da1ef114af9e5397390d85dec3b4c9cb18 | [
"BSD-3-Clause"
] | permissive | duchowski/pyopenvr | e529ebe9b1d048e359f579dd9d39ed39a581baa7 | 179c61cdedd5e60661927c08a46b8a904c5c5cae | refs/heads/master | 2022-12-19T18:01:59.229103 | 2020-09-24T19:19:38 | 2020-09-24T19:19:38 | 298,374,471 | 0 | 0 | BSD-3-Clause | 2020-09-24T19:20:08 | 2020-09-24T19:20:07 | null | UTF-8 | Python | false | false | 491 | py | #!/bin/env python
# file hello_glfw.py
from openvr.glframework.glut_app import GlutApp
from openvr.gl_renderer import OpenVrGlRenderer
from openvr.color_cube_actor import ColorCubeActor
"""
Minimal glfw programming example which colored OpenGL cube scene that can be closed by pressing ESCAPE.
"""
if __name__ == "__main__":
actor = ColorCubeActor()
renderer = OpenVrGlRenderer(actor)
with GlutApp(renderer, b"glut OpenVR color cube") as glutApp:
glutApp.run_loop()
| [
"cmbruns@rotatingpenguin.com"
] | cmbruns@rotatingpenguin.com |
a417056bb5f191941267d717c2371b027a5dbed7 | 4824918f12954e76642054a4abffd2af25a5fab4 | /day02/kill.py | 40190465744162cb1048ff3adf3bb1fdbbd8471a | [] | no_license | karta059488/project01 | 987fcdb68e13454120083327233404fd7cb69b6a | d7a5e08c00ab186117c761e2f26a9d8fcd4b2c72 | refs/heads/master | 2020-07-09T08:24:51.041464 | 2019-08-23T05:10:19 | 2019-08-23T05:10:19 | 203,926,135 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 77 | py | import os
import signal
# 向4935方送信號
os.kill(4935, signal.SIGKILL)
| [
"karta@gmail.com"
] | karta@gmail.com |
580af19f0e613446007763b6a1d2e69f914ef882 | ec7f558786ef15bf6793553ed4fa90b225f7ecd4 | /thesis-work/Userdriven_composition/serializers.py | ec165973f24ba60cd98250d02304856d9b05dec3 | [] | no_license | AmbientIntelligenceLab/User-driven-composition-of-collaborative-services-middleware | 08999e525ddacfde265bc5b06be6980dafc28ebd | cbe2d070d5b2bf52412f9d890dafec0b3ee47d13 | refs/heads/master | 2020-04-17T01:18:47.476912 | 2019-01-24T14:22:45 | 2019-01-24T14:22:45 | 166,086,836 | 3 | 5 | null | 2019-03-31T17:28:50 | 2019-01-16T18:04:58 | Python | UTF-8 | Python | false | false | 330 | py | from rest_framework import serializers
from .models import *
class ActuatorSerializer(serializers.ModelSerializer):
class Meta:
model = Actuator
fields = ('topic','value', 'time', 'name')
class LowerSensorSerializer(serializers.ModelSerializer):
class Meta:
model = Actuator
fields = ('topic','value', 'time', 'name')
| [
"aveechakra@gmail.com"
] | aveechakra@gmail.com |
8d01a6b1d7a2563ad35c253619a115d22ec24cef | e45ee37561d6676689d3a98cf01f0169fd61d64e | /function_arguments.py | 80d7397b96e273073c0f52c7455900541ec34f3f | [] | no_license | vara0257/python-practise | 3708b2556b099a173945c64ba244e9dfc46ad200 | e234f3b09ada9d84732b79ac70b458966f4b9d43 | refs/heads/main | 2023-06-04T11:42:09.740608 | 2021-06-21T02:29:12 | 2021-06-21T02:29:12 | 378,663,660 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 779 | py | def my_function(value):
print(value)
my_function(input("Enter a value:"))
==========================================
def my_function(fname, lname):
print ("Full name is", fname, lname)
a, b = input("Enter firstname and lastname:").split()
my_function(a,b)
==========================================
def my_function(*students):
print("Students names are: " + students[0], students[1])
my_function("1. Vara", "2. Mahesh", "3. Krishna")
==========================================
def my_function(**students):
print("His last name is " + kid["fname"])
my_function(fname = "Vara", lname = "prasad")
==========================================
def my_function(students):
for x in students:
print(x)
my_function(["vara", "prasad", "mahesh"])
| [
"noreply@github.com"
] | noreply@github.com |
942765fc04bee69382b6e468382c5c2aba6690e0 | aa9bf50d047205ce15c198272bed05ed17108d55 | /test/clear.py | 816a600e68d66042aaffccf4ff5f2cf10fae270c | [] | no_license | sgpthomas/py-moodlight | d03bf3d0f5158770424ba5b541b480ab4ddaa662 | de7798e8c5234eb444484c4333f4ba6fbb9b75bd | refs/heads/master | 2020-04-04T19:54:53.596598 | 2018-11-05T13:57:40 | 2018-11-05T13:57:40 | 156,225,795 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 235 | py | #!/usr/bin/env python3
import rpi_ws281x as ws
if __name__ == "__main__":
lights = ws.PixelStrip(450, 18, brightness=100)
lights.begin()
for i in range(450):
lights.setPixelColorRGB(i, 0, 0, 0)
lights.show()
| [
"sgpthomas@gmail.com"
] | sgpthomas@gmail.com |
06df909280fc43d59b472c7be2fb0157f2ebb761 | 9f45b1ee33d27e97307b4c72e1514b6fb0cbeae6 | /final_project/wizard/orders.py | a1dd4187bc70fd6bfc535dd376f01dcf31e75250 | [] | no_license | DSrijon01/FinalProjecct_Bjit01 | 1f54b47eff36c1dcb3e7ff7cc52ae97f6a7f3815 | a8d7fd1c25c58b1f95e65b6853c6c37950c64177 | refs/heads/main | 2023-08-26T21:08:47.041226 | 2021-10-28T06:10:02 | 2021-10-28T06:10:02 | 420,889,503 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 689 | py | from datetime import timedelta
from odoo import models, fields, api
class orderList(models.TransientModel):
_name = 'order.list.wizard'
_description = 'Order Details'
oname = fields.Char(string='Order Details',required=True)
# ordered_product = fields.Many2many('product.list', string='Available Products')
customer_id = fields.Many2many('customer.estate', string='Related Customer')
def action_print_report(self):
data = {
# 'model': 'order.list.wizard',
# 'form': self.read()[0],
}
return self.env.ref('my_hotel.action_report_order_overview').report_action(self, data=data) | [
"srijonbiswas17@gmail.com"
] | srijonbiswas17@gmail.com |
7f4166cc6e3bfcd55f7567fb8041ca3a0408af41 | 717f82034cb6134ca7dba02bc18c2d0c6cc71295 | /apply.py | 9d7a33b41b7fd127bba6351c8798ed002dc80130 | [] | no_license | amseram/GQMS | 803db168b118b5233465d2897ab67c75606663c2 | 5a82b1b5f6b04aa4ca64a44fb11cb91ce5cfa89b | refs/heads/master | 2021-09-15T09:37:02.533788 | 2018-05-30T02:29:52 | 2018-05-30T02:29:52 | 125,800,580 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,015 | py | #!/usr/bin/env python2
#coding=utf8
"""
# Author: amsera
# Created Time : 2018-05-29 14:03:14
# File Name: apply.py
# Description:
# Apply an account on GQMS (console version)
"""
import sys,socket
class create_connect:
def __init__(self):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def initial(self,add,port=80):
self.conn = self.socket.connect((add,port))
def send(self,query):
self.mess = query
self.sent = self.socket.sendall(self.mess.encode('ascii'))
self.data = self.socket.recv(64).decode('utf8')
class show_option:
def __init__(self):
self.sock = create_connect()
self.sock.initial("gqms.shu.edu.cn",8080)
self.options = {}
self.options['0'] = "GQMS application options:\n"
self.options['1'] = "[1] Apply an account \n"
self.options['2'] = "[2] Check acc querys \n"
self.options['3'] = "[3] Set ur passwords\n"
self.options['4'] = "[4] Exit"
for i in range(5):
print self.options.get(str(i)).strip()
while True:
while True:
try :
targ = int(raw_input("Select : "))
break
except ValueError:
print "Plz input an integer"
self.select(targ)
def select(self,targ=0):
self.targ = targ
if targ == 0 :
for i in range(5):
print self.options.get(str(i)).strip()
if targ == 4 :
sys.exit(0)
if targ == 1 :
mailaddr = ""
username = ""
supervisor = ""
while mailaddr == "" or username == "" or supervisor == "":
username = str(raw_input("username : "))
mailaddr = str(raw_input("mailaddr : "))
supervisor = str(raw_input("supervisor : "))
self.sock.send("gqms,apply,"+str(supervisor)+","+str(username)+","+str(mailaddr))
print self.sock.data
if targ == 2 :
username = ""
while username == "":
username = str(raw_input("username : "))
self.sock.send("gqms,check,"+str(username))
print self.sock.data
if targ == 3 :
username = ""
passwd = ""
curpass = ""
while passwd == "" or username == "" or curpass == "":
username = str(raw_input("username : "))
curpass = str(raw_input("current passwd : "))
passwd = str(raw_input("neopasswd : "))
tmppss = str(raw_input("retype passwd : "))
if tmppss != passwd :
passwd = ""
self.sock.send("gqms,passwd,"+str(curpass)+","+str(passwd)+","+str(username))
print self.sock.data
if __name__ == "__main__":
print "#"*50
print " "*12+"GQMS application system bate 01"
print "#"*50
test = show_option()
| [
"noreply@github.com"
] | noreply@github.com |
e026f543ddc3da673b0f4a95a1b989ab0692e6d9 | 36739fb573c1a99d9301f2140680d0d7cc4a3833 | /assignment3/q2_rnn.py | a6838f45f13bffd4c72b7c5ae53eca7f70d172af | [] | no_license | jingshuangliu22/cs224n | 21db34c91846ea0dea095e2032b2016cc11a7f84 | 1951a4b7a25c142c860d456f3f7a3afa32171a51 | refs/heads/master | 2020-12-30T11:28:20.855167 | 2017-05-17T12:25:02 | 2017-05-17T12:25:02 | 91,565,500 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,674 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Q2: Recurrent neural nets for NER
"""
from __future__ import absolute_import
from __future__ import division
import argparse
import logging
import sys
import time
from datetime import datetime
import tensorflow as tf
import numpy as np
from util import print_sentence, write_conll, read_conll
from data_util import load_and_preprocess_data, load_embeddings, ModelHelper
from ner_model import NERModel
from defs import LBLS
from q2_rnn_cell import RNNCell
from q3_gru_cell import GRUCell
logger = logging.getLogger("hw3.q2")
logger.setLevel(logging.DEBUG)
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
class Config:
"""Holds model hyperparams and data information.
The config class is used to store various hyperparameters and dataset
information parameters. Model objects are passed a Config() object at
instantiation.
"""
n_word_features = 2 # Number of features for every word in the input.
window_size = 1
n_features = (2 * window_size + 1) * n_word_features # Number of features for every word in the input.
max_length = 120 # longest sequence to parse
n_classes = 5
dropout = 0.5
embed_size = 50
hidden_size = 300
batch_size = 32
n_epochs = 10
max_grad_norm = 10.
lr = 0.001
def __init__(self, args):
self.cell = args.cell
if "model_path" in args:
# Where to save things.
self.output_path = args.model_path
else:
self.output_path = "results/{}/{:%Y%m%d_%H%M%S}/".format(self.cell, datetime.now())
self.model_output = self.output_path + "model.weights"
self.eval_output = self.output_path + "results.txt"
self.conll_output = self.output_path + "{}_predictions.conll".format(self.cell)
self.log_output = self.output_path + "log"
def pad_sequences(data, max_length):
"""Ensures each input-output seqeunce pair in @data is of length
@max_length by padding it with zeros and truncating the rest of the
sequence.
TODO: In the code below, for every sentence, labels pair in @data,
(a) create a new sentence which appends zero feature vectors until
the sentence is of length @max_length. If the sentence is longer
than @max_length, simply truncate the sentence to be @max_length
long.
(b) create a new label sequence similarly.
(c) create a _masking_ sequence that has a True wherever there was a
token in the original sequence, and a False for every padded input.
Example: for the (sentence, labels) pair: [[4,1], [6,0], [7,0]], [1,
0, 0], and max_length = 5, we would construct
- a new sentence: [[4,1], [6,0], [7,0], [0,0], [0,0]]
- a new label seqeunce: [1, 0, 0, 4, 4], and
- a masking seqeunce: [True, True, True, False, False].
Args:
data: is a list of (sentence, labels) tuples. @sentence is a list
containing the words in the sentence and @label is a list of
output labels. Each word is itself a list of
@n_features features. For example, the sentence "Chris
Manning is amazing" and labels "PER PER O O" would become
([[1,9], [2,9], [3,8], [4,8]], [1, 1, 4, 4]). Here "Chris"
the word has been featurized as "[1, 9]", and "[1, 1, 4, 4]"
is the list of labels.
max_length: the desired length for all input/output sequences.
Returns:
a new list of data points of the structure (sentence', labels', mask).
Each of sentence', labels' and mask are of length @max_length.
See the example above for more details.
"""
ret = []
# Use this zero vector when padding sequences.
zero_vector = [0] * Config.n_features
zero_label = 4 # corresponds to the 'O' tag
for sentence, labels in data:
### YOUR CODE HERE (~4-6 lines)
if len(sentence) >= max_length:
sentence = sentence[:max_length]
labels = labels[:max_length]
mask = [True for i in range(max_length)]
else:
diff = max_length - len(sentence)
mask = [True for i in range(len(sentence))]
new_sentence = sentence[:]
new_labels = labels[:]
for i in range(diff):
new_sentence.append(zero_vector)
new_labels.append(zero_label)
mask.append(False)
ret.append([new_sentence, new_labels, mask])
### END YOUR CODE ###
return ret
class RNNModel(NERModel):
"""
Implements a recursive neural network with an embedding layer and
single hidden layer.
This network will predict a sequence of labels (e.g. PER) for a
given token (e.g. Henry) using a featurized window around the token.
"""
def add_placeholders(self):
"""Generates placeholder variables to represent the input tensors
These placeholders are used as inputs by the rest of the model building and will be fed
data during training. Note that when "None" is in a placeholder's shape, it's flexible
(so we can use different batch sizes without rebuilding the model).
Adds following nodes to the computational graph
input_placeholder: Input placeholder tensor of shape (None, self.max_length, n_features), type tf.int32
labels_placeholder: Labels placeholder tensor of shape (None, self.max_length), type tf.int32
mask_placeholder: Mask placeholder tensor of shape (None, self.max_length), type tf.bool
dropout_placeholder: Dropout value placeholder (scalar), type tf.float32
TODO: Add these placeholders to self as the instance variables
self.input_placeholder
self.labels_placeholder
self.mask_placeholder
self.dropout_placeholder
HINTS:
- Remember to use self.max_length NOT Config.max_length
(Don't change the variable names)
"""
### YOUR CODE HERE (~4-6 lines)
self.input_placeholder = tf.placeholder(tf.int32,(None, self.config.max_length, self.config.n_features))
self.labels_placeholder = tf.placeholder(tf.int32,(None, self.config.max_length))
self.mask_placeholder = tf.placeholder(tf.bool,(None, self.config.max_length))
self.dropout_placeholder = tf.placeholder(tf.float32)
### END YOUR CODE
def create_feed_dict(self, inputs_batch, mask_batch, labels_batch=None, dropout=1):
"""Creates the feed_dict for the dependency parser.
A feed_dict takes the form of:
feed_dict = {
<placeholder>: <tensor of values to be passed for placeholder>,
....
}
Hint: The keys for the feed_dict should be a subset of the placeholder
tensors created in add_placeholders.
Hint: When an argument is None, don't add it to the feed_dict.
Args:
inputs_batch: A batch of input data.
mask_batch: A batch of mask data.
labels_batch: A batch of label data.
dropout: The dropout rate.
Returns:
feed_dict: The feed dictionary mapping from placeholders to values.
"""
### YOUR CODE (~6-10 lines)
feed_dict = {}
feed_dict[self.input_placeholder] = inputs_batch
feed_dict[self.mask_placeholder] = mask_batch
feed_dict[self.dropout_placeholder] = dropout
if labels_batch is not None:
feed_dict[self.labels_placeholder] = labels_batch
### END YOUR CODE
return feed_dict
### END YOUR CODE
return feed_dict
def add_embedding(self):
"""Adds an embedding layer that maps from input tokens (integers) to vectors and then
concatenates those vectors:
TODO:
- Create an embedding tensor and initialize it with self.pretrained_embeddings.
- Use the input_placeholder to index into the embeddings tensor, resulting in a
tensor of shape (None, max_length, n_features, embed_size).
- Concatenates the embeddings by reshaping the embeddings tensor to shape
(None, max_length, n_features * embed_size).
HINTS:
- You might find tf.nn.embedding_lookup useful.
- You can use tf.reshape to concatenate the vectors. See
following link to understand what -1 in a shape means.
https://www.tensorflow.org/api_docs/python/array_ops/shapes_and_shaping#reshape.
Returns:
embeddings: tf.Tensor of shape (None, max_length, n_features*embed_size)
"""
### YOUR CODE HERE (~4-6 lines)
pretrained_embeddings = tf.Variable(self.pretrained_embeddings)
embeddings = tf.nn.embedding_lookup(pretrained_embeddings,self.input_placeholder)
embeddings = tf.reshape(embeddings,(-1, self.config.max_length, self.config.n_features*self.config.embed_size))
### END YOUR CODE
return embeddings
def add_prediction_op(self):
"""Adds the unrolled RNN:
h_0 = 0
for t in 1 to T:
o_t, h_t = cell(x_t, h_{t-1})
o_drop_t = Dropout(o_t, dropout_rate)
y_t = o_drop_t U + b_2
TODO: There a quite a few things you'll need to do in this function:
- Define the variables U, b_2.
- Define the vector h as a constant and inititalize it with
zeros. See tf.zeros and tf.shape for information on how
to initialize this variable to be of the right shape.
https://www.tensorflow.org/api_docs/python/constant_op/constant_value_tensors#zeros
https://www.tensorflow.org/api_docs/python/array_ops/shapes_and_shaping#shape
- In a for loop, begin to unroll the RNN sequence. Collect
the predictions in a list.
- When unrolling the loop, from the second iteration
onwards, you will HAVE to call
tf.get_variable_scope().reuse_variables() so that you do
not create new variables in the RNN cell.
See https://www.tensorflow.org/versions/master/how_tos/variable_scope/
- Concatenate and reshape the predictions into a predictions
tensor.
Hint: You will find the function tf.pack (similar to np.asarray)
useful to assemble a list of tensors into a larger tensor.
https://www.tensorflow.org/api_docs/python/array_ops/slicing_and_joining#pack
Hint: You will find the function tf.transpose and the perms
argument useful to shuffle the indices of the tensor.
https://www.tensorflow.org/api_docs/python/array_ops/slicing_and_joining#transpose
Remember:
* Use the xavier initilization for matrices.
* Note that tf.nn.dropout takes the keep probability (1 - p_drop) as an argument.
The keep probability should be set to the value of self.dropout_placeholder
Returns:
pred: tf.Tensor of shape (batch_size, max_length, n_classes)
"""
x = self.add_embedding()
dropout_rate = self.dropout_placeholder
preds = [] # Predicted output at each timestep should go here!
# Use the cell defined below. For Q2, we will just be using the
# RNNCell you defined, but for Q3, we will run this code again
# with a GRU cell!
if self.config.cell == "rnn":
cell = RNNCell(Config.n_features * Config.embed_size, Config.hidden_size)
elif self.config.cell == "gru":
cell = GRUCell(Config.n_features * Config.embed_size, Config.hidden_size)
else:
raise ValueError("Unsuppported cell type: " + self.config.cell)
# Define U and b2 as variables.
# Initialize state as vector of zeros.
### YOUR CODE HERE (~4-6 lines)
U = tf.get_variable("U",[self.config.hidden_size, self.config.n_classes],
initializer=tf.contrib.layers.xavier_initializer())
b2 = tf.get_variable("b2",[self.config.n_classes,],
initializer=tf.contrib.layers.xavier_initializer())
#h.shape?
h = tf.zeros(shape=(tf.shape(x)[0], self.config.hidden_size))
### END YOUR CODE
with tf.variable_scope("RNN"):
for time_step in range(self.max_length):
### YOUR CODE HERE (~6-10 lines)
if time_step > 0:
tf.get_variable_scope().reuse_variables()
o_t, h = cell(x[:,time_step,:],h)
o_dropout_t = tf.nn.dropout(o_t,self.dropout_placeholder)
y_t = tf.matmul(o_dropout_t, U) + b2
preds.append(y_t)
### END YOUR CODE
# Make sure to reshape @preds here.
### YOUR CODE HERE (~2-4 lines)
preds = tf.stack(preds,axis=1)
### END YOUR CODE
assert preds.get_shape().as_list() == [None, self.max_length, self.config.n_classes], "predictions are not of the right shape. Expected {}, got {}".format([None, self.max_length, self.config.n_classes], preds.get_shape().as_list())
return preds
def add_loss_op(self, preds):
"""Adds Ops for the loss function to the computational graph.
TODO: Compute averaged cross entropy loss for the predictions.
Importantly, you must ignore the loss for any masked tokens.
Hint: You might find tf.boolean_mask useful to mask the losses on masked tokens.
Hint: You can use tf.nn.sparse_softmax_cross_entropy_with_logits to simplify your
implementation. You might find tf.reduce_mean useful.
Args:
pred: A tensor of shape (batch_size, max_length, n_classes) containing the output of the neural
network before the softmax layer.
Returns:
loss: A 0-d tensor (scalar)
"""
### YOUR CODE HERE (~2-4 lines)
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.labels_placeholder,logits=preds)
loss = tf.reduce_mean(loss)
### END YOUR CODE
return loss
def add_training_op(self, loss):
"""Sets up the training Ops.
Creates an optimizer and applies the gradients to all trainable variables.
The Op returned by this function is what must be passed to the
`sess.run()` call to cause the model to train. See
https://www.tensorflow.org/versions/r0.7/api_docs/python/train.html#Optimizer
for more information.
Use tf.train.AdamOptimizer for this model.
Calling optimizer.minimize() will return a train_op object.
Args:
loss: Loss tensor, from cross_entropy_loss.
Returns:
train_op: The Op for training.
"""
### YOUR CODE HERE (~1-2 lines)
optimizer = tf.train.AdamOptimizer(Config.lr)
train_op = optimizer.minimize(loss)
### END YOUR CODE
return train_op
def preprocess_sequence_data(self, examples):
def featurize_windows(data, start, end, window_size = 1):
"""Uses the input sequences in @data to construct new windowed data points.
"""
ret = []
for sentence, labels in data:
from util import window_iterator
sentence_ = []
for window in window_iterator(sentence, window_size, beg=start, end=end):
sentence_.append(sum(window, []))
ret.append((sentence_, labels))
return ret
examples = featurize_windows(examples, self.helper.START, self.helper.END)
return pad_sequences(examples, self.max_length)
def consolidate_predictions(self, examples_raw, examples, preds):
"""Batch the predictions into groups of sentence length.
"""
assert len(examples_raw) == len(examples)
assert len(examples_raw) == len(preds)
ret = []
for i, (sentence, labels) in enumerate(examples_raw):
_, _, mask = examples[i]
labels_ = [l for l, m in zip(preds[i], mask) if m] # only select elements of mask.
assert len(labels_) == len(labels)
ret.append([sentence, labels, labels_])
return ret
def predict_on_batch(self, sess, inputs_batch, mask_batch):
feed = self.create_feed_dict(inputs_batch=inputs_batch, mask_batch=mask_batch)
predictions = sess.run(tf.argmax(self.pred, axis=2), feed_dict=feed)
return predictions
def train_on_batch(self, sess, inputs_batch, labels_batch, mask_batch):
feed = self.create_feed_dict(inputs_batch, labels_batch=labels_batch, mask_batch=mask_batch,
dropout=Config.dropout)
_, loss = sess.run([self.train_op, self.loss], feed_dict=feed)
return loss
def __init__(self, helper, config, pretrained_embeddings, report=None):
super(RNNModel, self).__init__(helper, config, report)
self.max_length = min(Config.max_length, helper.max_length)
Config.max_length = self.max_length # Just in case people make a mistake.
self.pretrained_embeddings = pretrained_embeddings
# Defining placeholders.
self.input_placeholder = None
self.labels_placeholder = None
self.mask_placeholder = None
self.dropout_placeholder = None
self.build()
def test_pad_sequences():
Config.n_features = 2
data = [
([[4,1], [6,0], [7,0]], [1, 0, 0]),
([[3,0], [3,4], [4,5], [5,3], [3,4]], [0, 1, 0, 2, 3]),
]
ret = [
([[4,1], [6,0], [7,0], [0,0]], [1, 0, 0, 4], [True, True, True, False]),
([[3,0], [3,4], [4,5], [5,3]], [0, 1, 0, 2], [True, True, True, True])
]
ret_ = pad_sequences(data, 4)
assert len(ret_) == 2, "Did not process all examples: expected {} results, but got {}.".format(2, len(ret_))
for i in range(2):
assert len(ret_[i]) == 3, "Did not populate return values corrected: expected {} items, but got {}.".format(3, len(ret_[i]))
for j in range(3):
assert ret_[i][j] == ret[i][j], "Expected {}, but got {} for {}-th entry of {}-th example".format(ret[i][j], ret_[i][j], j, i)
def do_test1(_):
logger.info("Testing pad_sequences")
test_pad_sequences()
logger.info("Passed!")
def do_test2(args):
logger.info("Testing implementation of RNNModel")
config = Config(args)
helper, train, dev, train_raw, dev_raw = load_and_preprocess_data(args)
embeddings = load_embeddings(args, helper)
config.embed_size = embeddings.shape[1]
with tf.Graph().as_default():
logger.info("Building model...",)
start = time.time()
model = RNNModel(helper, config, embeddings)
logger.info("took %.2f seconds", time.time() - start)
init = tf.global_variables_initializer()
saver = None
with tf.Session() as session:
session.run(init)
model.fit(session, saver, train, dev)
logger.info("Model did not crash!")
logger.info("Passed!")
def do_train(args):
# Set up some parameters.
config = Config(args)
helper, train, dev, train_raw, dev_raw = load_and_preprocess_data(args)
embeddings = load_embeddings(args, helper)
config.embed_size = embeddings.shape[1]
helper.save(config.output_path)
handler = logging.FileHandler(config.log_output)
handler.setLevel(logging.DEBUG)
handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s'))
logging.getLogger().addHandler(handler)
report = None #Report(Config.eval_output)
with tf.Graph().as_default():
logger.info("Building model...",)
start = time.time()
model = RNNModel(helper, config, embeddings)
logger.info("took %.2f seconds", time.time() - start)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
with tf.Session() as session:
session.run(init)
model.fit(session, saver, train, dev)
if report:
report.log_output(model.output(session, dev_raw))
report.save()
else:
# Save predictions in a text file.
output = model.output(session, dev_raw)
sentences, labels, predictions = zip(*output)
predictions = [[LBLS[l] for l in preds] for preds in predictions]
output = zip(sentences, labels, predictions)
with open(model.config.conll_output, 'w') as f:
write_conll(f, output)
with open(model.config.eval_output, 'w') as f:
for sentence, labels, predictions in output:
print_sentence(f, sentence, labels, predictions)
def do_evaluate(args):
config = Config(args)
helper = ModelHelper.load(args.model_path)
input_data = read_conll(args.data)
embeddings = load_embeddings(args, helper)
config.embed_size = embeddings.shape[1]
with tf.Graph().as_default():
logger.info("Building model...",)
start = time.time()
model = RNNModel(helper, config, embeddings)
logger.info("took %.2f seconds", time.time() - start)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
with tf.Session() as session:
session.run(init)
saver.restore(session, model.config.model_output)
for sentence, labels, predictions in model.output(session, input_data):
predictions = [LBLS[l] for l in predictions]
print_sentence(args.output, sentence, labels, predictions)
def do_shell(args):
config = Config(args)
helper = ModelHelper.load(args.model_path)
embeddings = load_embeddings(args, helper)
config.embed_size = embeddings.shape[1]
with tf.Graph().as_default():
logger.info("Building model...",)
start = time.time()
model = RNNModel(helper, config, embeddings)
logger.info("took %.2f seconds", time.time() - start)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
with tf.Session() as session:
session.run(init)
saver.restore(session, model.config.model_output)
print("""Welcome!
You can use this shell to explore the behavior of your model.
Please enter sentences with spaces between tokens, e.g.,
input> Germany 's representative to the European Union 's veterinary committee .
""")
while True:
# Create simple REPL
try:
sentence = raw_input("input> ")
tokens = sentence.strip().split(" ")
for sentence, _, predictions in model.output(session, [(tokens, ["O"] * len(tokens))]):
predictions = [LBLS[l] for l in predictions]
print_sentence(sys.stdout, sentence, [""] * len(tokens), predictions)
except EOFError:
print("Closing session.")
break
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Trains and tests an NER model')
subparsers = parser.add_subparsers()
command_parser = subparsers.add_parser('test1', help='')
command_parser.set_defaults(func=do_test1)
command_parser = subparsers.add_parser('test2', help='')
command_parser.add_argument('-dt', '--data-train', type=argparse.FileType('r'), default="data/tiny.conll", help="Training data")
command_parser.add_argument('-dd', '--data-dev', type=argparse.FileType('r'), default="data/tiny.conll", help="Dev data")
command_parser.add_argument('-v', '--vocab', type=argparse.FileType('r'), default="data/vocab.txt", help="Path to vocabulary file")
command_parser.add_argument('-vv', '--vectors', type=argparse.FileType('r'), default="data/wordVectors.txt", help="Path to word vectors file")
command_parser.add_argument('-c', '--cell', choices=["rnn", "gru"], default="rnn", help="Type of RNN cell to use.")
command_parser.set_defaults(func=do_test2)
command_parser = subparsers.add_parser('train', help='')
command_parser.add_argument('-dt', '--data-train', type=argparse.FileType('r'), default="data/train.conll", help="Training data")
command_parser.add_argument('-dd', '--data-dev', type=argparse.FileType('r'), default="data/dev.conll", help="Dev data")
command_parser.add_argument('-v', '--vocab', type=argparse.FileType('r'), default="data/vocab.txt", help="Path to vocabulary file")
command_parser.add_argument('-vv', '--vectors', type=argparse.FileType('r'), default="data/wordVectors.txt", help="Path to word vectors file")
command_parser.add_argument('-c', '--cell', choices=["rnn", "gru"], default="rnn", help="Type of RNN cell to use.")
command_parser.set_defaults(func=do_train)
command_parser = subparsers.add_parser('evaluate', help='')
command_parser.add_argument('-d', '--data', type=argparse.FileType('r'), default="data/dev.conll", help="Training data")
command_parser.add_argument('-m', '--model-path', help="Training data")
command_parser.add_argument('-v', '--vocab', type=argparse.FileType('r'), default="data/vocab.txt", help="Path to vocabulary file")
command_parser.add_argument('-vv', '--vectors', type=argparse.FileType('r'), default="data/wordVectors.txt", help="Path to word vectors file")
command_parser.add_argument('-c', '--cell', choices=["rnn", "gru"], default="rnn", help="Type of RNN cell to use.")
command_parser.add_argument('-o', '--output', type=argparse.FileType('w'), default=sys.stdout, help="Training data")
command_parser.set_defaults(func=do_evaluate)
command_parser = subparsers.add_parser('shell', help='')
command_parser.add_argument('-m', '--model-path', help="Training data")
command_parser.add_argument('-v', '--vocab', type=argparse.FileType('r'), default="data/vocab.txt", help="Path to vocabulary file")
command_parser.add_argument('-vv', '--vectors', type=argparse.FileType('r'), default="data/wordVectors.txt", help="Path to word vectors file")
command_parser.add_argument('-c', '--cell', choices=["rnn", "gru"], default="rnn", help="Type of RNN cell to use.")
command_parser.set_defaults(func=do_shell)
ARGS = parser.parse_args()
if ARGS.func is None:
parser.print_help()
sys.exit(1)
else:
ARGS.func(ARGS)
| [
"6044475200@qq.com"
] | 6044475200@qq.com |
d5d59aeea65b312759873f6b56b88848914a251a | 59ad573ea792adc4ec2e99eb1b695aabb340b43a | /7.reverse-integer.python3.py | 378643549bff976542f738ebe26672a6076d4246 | [] | no_license | ClarkChen26/leetcode-solution | cf1e3548ae998462c42094fe87dce0a9aa1ed8c2 | 8df918851eb5dcbc2cd8d1053fcc5da81849baa9 | refs/heads/master | 2020-03-07T03:19:58.640619 | 2018-03-29T03:45:24 | 2018-03-29T03:45:24 | 127,232,765 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,557 | py | #
# [7] Reverse Integer
#
# https://leetcode.com/problems/reverse-integer/description/
#
# algorithms
# Easy (24.36%)
# Total Accepted: 386.3K
# Total Submissions: 1.6M
# Testcase Example: '123'
#
# Given a 32-bit signed integer, reverse digits of an integer.
#
# Example 1:
#
# Input: 123
# Output: 321
# Example 2:
#
# Input: -123
# Output: -321
#
#
#
# Example 3:
#
# Input: 120
# Output: 21
#
#
#
# Note:
# Assume we are dealing with an environment which could only hold integers
# within the 32-bit signed integer range. For the purpose of this problem,
# assume that your function returns 0 when the reversed integer overflows.
#
#
class Solution:
def reverse(self, x):
"""
:type x: int
:rtype: int
"""
num = list(str(x))
if num[0] == "-":
num.remove('-')
num.reverse()
num_s = ("-" + ''.join(num))
else:
num.reverse()
num_s = ''.join(num)
result = int(num_s)
if result > 2147483647 or result < -2147483648:
return 0
return result
#数学方法
# curr = x
# result = 0
# if x < 0:
# curr *= -1
# while curr != 0:
# a = curr % 10
# curr = curr // 10
# result = result * 10 + a
# if x < 0:
# result *= -1
# if result > 2147483647 or result < -2147483648:
# return 0
# return result
# s = Solution()
# print(s.reverse(-123))
| [
"clarkczj@hotmail.com"
] | clarkczj@hotmail.com |
a5543229643b05381eaae5f8d31659482f0fd807 | 2562d3a102af3a7e590b28b0d388aaf980edcd97 | /HackerRank/graph_bfs.py | 90dc9d09c49bff9abfad7566fa4534c2c4535c70 | [] | no_license | svidyart/PracticeCode | 16bc3565d4b0cfc383aad144b6fe70310b55840d | 8b03977d606fa4c82039003a3e3e1affd83fb75e | refs/heads/master | 2021-01-19T06:46:45.673755 | 2016-08-09T17:46:48 | 2016-08-09T17:46:48 | 65,315,509 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,268 | py | # Enter your code here. Read input from STDIN. Print output to STDOUT
def update(lvl, edges, node):
lvl += 1
ls = edges[node]
lst_ret = [(lvl,i) for i in ls]
return lst_ret
def main():
edges = []
e = []
visited = {}
level = 0
T = int(raw_input())
print T
for num in range(T):
N,M = [int(i) for i in raw_input().strip().split()]
print M,N
for i in range(N):
edges.append([])
for i in range(M):
a,b = [int(i) for i in raw_input().strip().split()]
e.append(a)
e.append(b)
edges[a].append(b)
edges[b].append(a)
print edges
S = int(raw_input())
print S
que = [(level,S)]
print que
while(len(que) > 0):
lvl,current = que.pop(0)
que.extend(update(lvl, edges, S))
if(current in visited.keys()):
continue
else:
visited[current] = lvl*6
print que
k = visited.keys()
ret = [visited[i] if i in k else -1 for i in e]
print ' '.join(ret)
if __name__ == '__main__':
main()
| [
"shreyasvidyarthi@gmail.com"
] | shreyasvidyarthi@gmail.com |
66ed8285b75dc0ca0f1a5a553377198a23782b48 | 26df2d494d2626a9b0ed5d91127308b865529d76 | /timebudget/__init__.py | 542aeaaf417fe755b68a1fae7dd121eb966843c9 | [
"Apache-2.0"
] | permissive | leopd/timebudget | e34fee7e4314066577b5654e67ca209d8c243302 | e58b7121aa5846db784fb80ab6b8dfffdcc8fae5 | refs/heads/master | 2022-06-11T05:13:45.263419 | 2020-05-29T20:17:32 | 2020-05-29T20:17:32 | 216,866,218 | 156 | 14 | Apache-2.0 | 2022-05-21T03:11:37 | 2019-10-22T17:01:42 | Python | UTF-8 | Python | false | false | 26 | py | from .timebudget import *
| [
"deepembedding@gmail.com"
] | deepembedding@gmail.com |
7533ca90907b697d0dc23a74d914beb543005ff5 | 03f6ad21c4332b9b26dfb11ed04e63bdb9236b3c | /codegen/funcs2_testgen.py | edada136932bf16c484a72dc13c4369ce9f380ad | [
"Apache-2.0"
] | permissive | m1griffin/arrayfunc | ddf9ea9c8fa363f79babd788c8d0428ede8dfc60 | c04561c5d565ae8d3ee776783bfb34b242deca93 | refs/heads/master | 2023-08-05T00:09:27.530893 | 2023-07-19T12:46:37 | 2023-07-19T12:46:37 | 40,577,669 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 103,190 | py | #!/usr/bin/env python3
##############################################################################
# Project: arrayfunc
# Purpose: Generate the unit tests for math functions which use two
# input parameters.
# Language: Python 3.5
# Date: 08-Dec-2017
#
###############################################################################
#
# Copyright 2014 - 2017 Michael Griffin <m12.griffin@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
##############################################################################
# ==============================================================================
import itertools
import codegen_common
# ==============================================================================
# ==============================================================================
# This template is for operators which use a second numeric parameter.
test_template = '''
##############################################################################
class %(funclabel)s_general_%(typelabel)s(unittest.TestCase):
"""Test for basic general function operation using numeric
data %(test_op_y)s.
test_template
"""
##############################################################################
def FloatassertEqual(self, expecteditem, dataoutitem, msg=None):
"""This function is patched into assertEqual to allow testing for
the floating point special values NaN, Inf, and -Inf.
"""
# NaN cannot be compared using normal means.
if math.isnan(dataoutitem) and math.isnan(expecteditem):
pass
# Anything else can be compared normally.
else:
if not math.isclose(expecteditem, dataoutitem, rel_tol=0.01, abs_tol=0.0):
raise self.failureException('%%0.3f != %%0.3f' %% (expecteditem, dataoutitem))
########################################################
def setUp(self):
"""Initialise.
"""
self.addTypeEqualityFunc(float, self.FloatassertEqual)
########################################################
def test_%(funclabel)s_basic_array_num_none_a1(self):
"""Test %(funclabel)s as *array-num-none* for basic function - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
data1 = array.array('%(typecode)s', [%(test_op_x)s])
expected = [%(pyoperator)s(x, testval) for x in data1]
arrayfunc.%(funcname)s(data1, testval)
for dataoutitem, expecteditem in zip(data1, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_basic_array_num_none_a2(self):
"""Test %(funclabel)s as *array-num-none* for basic function with matherrors=True - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
data1 = array.array('%(typecode)s', [%(test_op_x)s])
expected = [%(pyoperator)s(x, testval) for x in data1]
arrayfunc.%(funcname)s(data1, testval, matherrors=True)
for dataoutitem, expecteditem in zip(data1, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_basic_array_num_none_a3(self):
"""Test %(funclabel)s as *array-num-none* for basic function with array limit - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
data1 = array.array('%(typecode)s', [%(test_op_x)s])
limited = len(data1) // 2
pydataout = [%(pyoperator)s(x, testval) for x in data1]
expected = pydataout[0:limited] + list(data1)[limited:]
arrayfunc.%(funcname)s(data1, testval, maxlen=limited)
for dataoutitem, expecteditem in zip(data1, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_basic_array_num_none_a4(self):
"""Test %(funclabel)s as *array-num-none* for basic function with matherrors=True and with array limit - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
data1 = array.array('%(typecode)s', [%(test_op_x)s])
limited = len(data1) // 2
pydataout = [%(pyoperator)s(x, testval) for x in data1]
expected = pydataout[0:limited] + list(data1)[limited:]
arrayfunc.%(funcname)s(data1, testval, matherrors=True, maxlen=limited)
for dataoutitem, expecteditem in zip(data1, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_basic_array_num_array_b1(self):
"""Test %(funclabel)s as *array-num-array* for basic function - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
data1 = array.array('%(typecode)s', [%(test_op_x)s])
dataout = array.array('%(typecode)s', [0]*len(data1))
expected = [%(pyoperator)s(x, testval) for x in data1]
arrayfunc.%(funcname)s(data1, testval, dataout)
for dataoutitem, expecteditem in zip(dataout, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_basic_array_num_array_b2(self):
"""Test %(funclabel)s as *array-num-array* for basic function with matherrors=True - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
data1 = array.array('%(typecode)s', [%(test_op_x)s])
dataout = array.array('%(typecode)s', [0]*len(data1))
expected = [%(pyoperator)s(x, testval) for x in data1]
arrayfunc.%(funcname)s(data1, testval, dataout, matherrors=True)
for dataoutitem, expecteditem in zip(dataout, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_basic_array_num_array_b3(self):
"""Test %(funclabel)s as *array-num-array* for basic function with array limit - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
data1 = array.array('%(typecode)s', [%(test_op_x)s])
dataout = array.array('%(typecode)s', [0]*len(data1))
limited = len(data1) // 2
pydataout = [%(pyoperator)s(x, testval) for x in data1]
expected = pydataout[0:limited] + list(dataout)[limited:]
arrayfunc.%(funcname)s(data1, testval, dataout, maxlen=limited)
for dataoutitem, expecteditem in zip(dataout, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_basic_array_num_array_b4(self):
"""Test %(funclabel)s as *array-num-array* for basic function with matherrors=True and with array limit - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
data1 = array.array('%(typecode)s', [%(test_op_x)s])
dataout = array.array('%(typecode)s', [0]*len(data1))
limited = len(data1) // 2
pydataout = [%(pyoperator)s(x, testval) for x in data1]
expected = pydataout[0:limited] + list(dataout)[limited:]
arrayfunc.%(funcname)s(data1, testval, dataout, matherrors=True, maxlen=limited)
for dataoutitem, expecteditem in zip(dataout, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_basic_num_array_none_c1(self):
"""Test %(funclabel)s as *num-array-none* for basic function - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
data1 = array.array('%(typecode)s', [%(test_op_x)s])
expected = [%(pyoperator)s(testval, x) for x in data1]
arrayfunc.%(funcname)s(testval, data1)
for dataoutitem, expecteditem in zip(data1, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_basic_num_array_none_c2(self):
"""Test %(funclabel)s as *num-array-none* for basic function with matherrors=True - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
data1 = array.array('%(typecode)s', [%(test_op_x)s])
expected = [%(pyoperator)s(testval, x) for x in data1]
arrayfunc.%(funcname)s(testval, data1, matherrors=True)
for dataoutitem, expecteditem in zip(data1, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_basic_num_array_none_c3(self):
"""Test %(funclabel)s as *num-array-none* for basic function with array limit - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
data1 = array.array('%(typecode)s', [%(test_op_x)s])
limited = len(data1) // 2
pydataout = [%(pyoperator)s(testval, x) for x in data1]
expected = pydataout[0:limited] + list(data1)[limited:]
arrayfunc.%(funcname)s(testval, data1, maxlen=limited)
for dataoutitem, expecteditem in zip(data1, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_basic_num_array_none_c4(self):
"""Test %(funclabel)s as *num-array-none* for basic function with matherrors=True and with array limit - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
data1 = array.array('%(typecode)s', [%(test_op_x)s])
limited = len(data1) // 2
pydataout = [%(pyoperator)s(testval, x) for x in data1]
expected = pydataout[0:limited] + list(data1)[limited:]
arrayfunc.%(funcname)s(testval, data1, matherrors=True, maxlen=limited)
for dataoutitem, expecteditem in zip(data1, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_basic_num_array_array_d1(self):
"""Test %(funclabel)s as *num-array-array* for basic function - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
data1 = array.array('%(typecode)s', [%(test_op_x)s])
dataout = array.array('%(typecode)s', [0]*len(data1))
expected = [%(pyoperator)s(testval, x) for x in data1]
arrayfunc.%(funcname)s(testval, data1, dataout)
for dataoutitem, expecteditem in zip(dataout, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_basic_num_array_array_d2(self):
"""Test %(funclabel)s as *num-array-array* for basic function with matherrors=True - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
data1 = array.array('%(typecode)s', [%(test_op_x)s])
dataout = array.array('%(typecode)s', [0]*len(data1))
expected = [%(pyoperator)s(testval, x) for x in data1]
arrayfunc.%(funcname)s(testval, data1, dataout, matherrors=True)
for dataoutitem, expecteditem in zip(dataout, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_basic_num_array_array_d3(self):
"""Test %(funclabel)s as *num-array-array* for basic function with array limit - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
data1 = array.array('%(typecode)s', [%(test_op_x)s])
dataout = array.array('%(typecode)s', [0]*len(data1))
limited = len(data1) // 2
pydataout = [%(pyoperator)s(testval, x) for x in data1]
expected = pydataout[0:limited] + list(dataout)[limited:]
arrayfunc.%(funcname)s(testval, data1, dataout, maxlen=limited)
for dataoutitem, expecteditem in zip(dataout, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_basic_num_array_array_d4(self):
"""Test %(funclabel)s as *num-array-array* for basic function with matherrors=True and with array limit - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
data1 = array.array('%(typecode)s', [%(test_op_x)s])
dataout = array.array('%(typecode)s', [0]*len(data1))
limited = len(data1) // 2
pydataout = [%(pyoperator)s(testval, x) for x in data1]
expected = pydataout[0:limited] + list(dataout)[limited:]
arrayfunc.%(funcname)s(testval, data1, dataout, matherrors=True, maxlen=limited)
for dataoutitem, expecteditem in zip(dataout, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_basic_array_array_none_e1(self):
"""Test %(funclabel)s as *array-array-none* for basic function - Array code %(typelabel)s.
"""
data1 = array.array('%(typecode)s', [%(test_op_x)s])
data2 = array.array('%(typecode)s', [x for (x,y) in zip(itertools.cycle([%(test_op_y)s]), data1)])
expected = [%(pyoperator)s(x, y) for (x, y) in zip(data1, data2)]
arrayfunc.%(funcname)s(data1, data2)
for dataoutitem, expecteditem in zip(data1, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_basic_array_array_none_e2(self):
"""Test %(funclabel)s as *array-array-none* for basic function with matherrors=True - Array code %(typelabel)s.
"""
data1 = array.array('%(typecode)s', [%(test_op_x)s])
data2 = array.array('%(typecode)s', [x for (x,y) in zip(itertools.cycle([%(test_op_y)s]), data1)])
expected = [%(pyoperator)s(x, y) for (x, y) in zip(data1, data2)]
arrayfunc.%(funcname)s(data1, data2, matherrors=True)
for dataoutitem, expecteditem in zip(data1, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_basic_array_array_none_e3(self):
"""Test %(funclabel)s as *array-array-none* for basic function with array limit - Array code %(typelabel)s.
"""
data1 = array.array('%(typecode)s', [%(test_op_x)s])
data2 = array.array('%(typecode)s', [x for (x,y) in zip(itertools.cycle([%(test_op_y)s]), data1)])
limited = len(data1) // 2
pydataout = [%(pyoperator)s(x, y) for (x, y) in zip(data1, data2)]
expected = pydataout[0:limited] + list(data1)[limited:]
arrayfunc.%(funcname)s(data1, data2, maxlen=limited)
for dataoutitem, expecteditem in zip(data1, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_basic_array_array_none_e4(self):
"""Test %(funclabel)s as *array-array-none* for basic function with matherrors=True and with array limit - Array code %(typelabel)s.
"""
data1 = array.array('%(typecode)s', [%(test_op_x)s])
data2 = array.array('%(typecode)s', [x for (x,y) in zip(itertools.cycle([%(test_op_y)s]), data1)])
limited = len(data1) // 2
pydataout = [%(pyoperator)s(x, y) for (x, y) in zip(data1, data2)]
expected = pydataout[0:limited] + list(data1)[limited:]
arrayfunc.%(funcname)s(data1, data2, matherrors=True, maxlen=limited)
for dataoutitem, expecteditem in zip(data1, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_basic_array_array_array_e5(self):
"""Test %(funclabel)s as *array-array-array* for basic function - Array code %(typelabel)s.
"""
data1 = array.array('%(typecode)s', [%(test_op_x)s])
data2 = array.array('%(typecode)s', [x for (x,y) in zip(itertools.cycle([%(test_op_y)s]), data1)])
dataout = array.array('%(typecode)s', [0]*len(data1))
expected = [%(pyoperator)s(x, y) for (x, y) in zip(data1, data2)]
arrayfunc.%(funcname)s(data1, data2, dataout)
for dataoutitem, expecteditem in zip(dataout, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_basic_array_array_array_e6(self):
"""Test %(funclabel)s as *array-array-array* for basic function - Array code %(typelabel)s.
"""
data1 = array.array('%(typecode)s', [%(test_op_x)s])
data2 = array.array('%(typecode)s', [x for (x,y) in zip(itertools.cycle([%(test_op_y)s]), data1)])
dataout = array.array('%(typecode)s', [0]*len(data1))
expected = [%(pyoperator)s(x, y) for (x, y) in zip(data1, data2)]
arrayfunc.%(funcname)s(data1, data2, dataout, matherrors=True)
for dataoutitem, expecteditem in zip(dataout, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_basic_array_array_array_e7(self):
"""Test %(funclabel)s as *array-array-array* for basic function - Array code %(typelabel)s.
"""
data1 = array.array('%(typecode)s', [%(test_op_x)s])
data2 = array.array('%(typecode)s', [x for (x,y) in zip(itertools.cycle([%(test_op_y)s]), data1)])
dataout = array.array('%(typecode)s', [0]*len(data1))
limited = len(data1) // 2
pydataout = [%(pyoperator)s(x, y) for (x, y) in zip(data1, data2)]
expected = pydataout[0:limited] + list(dataout)[limited:]
arrayfunc.%(funcname)s(data1, data2, dataout, matherrors=True, maxlen=limited)
for dataoutitem, expecteditem in zip(dataout, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
##############################################################################
'''
# ==============================================================================
# The template used to generate the tests for testing invalid parameter types.
param_invalid_template = '''
##############################################################################
class %(funclabel)s_param_errors_%(typelabel)s(unittest.TestCase):
"""Test for invalid parameters.
param_invalid_template
"""
########################################################
def setUp(self):
"""Initialise.
"""
self.floatarray1 = array.array('%(typecode)s', [%(test_op_x)s])
self.floatarray2 = array.array('%(typecode)s', [x for (x,y) in zip(itertools.cycle([%(test_op_y)s]), self.floatarray1)])
arraysize = len(self.floatarray1)
self.dataout = array.array('%(typecode)s', itertools.repeat(0.0, arraysize))
# Create some integer array equivalents.
self.intarray1 = array.array('i', [int(x) for x in self.floatarray1])
self.intarray2 = array.array('i', [int(x) for x in self.floatarray2])
self.intdataout = array.array('i', [int(x) for x in self.dataout])
########################################################
def test_%(funclabel)s_array_num_none_a1(self):
"""Test %(funclabel)s as *array-num-none* for integer array - Array code %(typelabel)s.
"""
for testfloat, testint in zip(self.floatarray2, self.intarray2):
with self.subTest(msg='Failed with parameter', testfloat = testfloat):
# Copy the array so we don't change the original data.
floatarray1 = copy.copy(self.floatarray1)
intarray1 = copy.copy(self.intarray1)
# This version is expected to pass.
arrayfunc.%(funcname)s(floatarray1, testfloat)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.%(funcname)s(intarray1, testfloat)
########################################################
def test_%(funclabel)s_array_num_none_a2(self):
"""Test %(funclabel)s as *array-num-none* for integer number - Array code %(typelabel)s.
"""
for testfloat, testint in zip(self.floatarray2, self.intarray2):
with self.subTest(msg='Failed with parameter', testfloat = testfloat):
# Copy the array so we don't change the original data.
floatarray1 = copy.copy(self.floatarray1)
# This version is expected to pass.
arrayfunc.%(funcname)s(floatarray1, testfloat)
floatarray1 = copy.copy(self.floatarray1)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.%(funcname)s(floatarray1, testint)
########################################################
def test_%(funclabel)s_array_num_none_a3(self):
"""Test %(funclabel)s as *array-num-none* for integer number and array - Array code %(typelabel)s.
"""
for testfloat, testint in zip(self.floatarray2, self.intarray2):
with self.subTest(msg='Failed with parameter', testfloat = testfloat):
# Copy the array so we don't change the original data.
floatarray1 = copy.copy(self.floatarray1)
# This version is expected to pass.
arrayfunc.%(funcname)s(floatarray1, testfloat)
intarray1 = copy.copy(self.intarray1)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.%(funcname)s(intarray1, testint)
########################################################
def test_%(funclabel)s_array_num_none_a4(self):
"""Test %(funclabel)s as *array-num-none* for matherrors='a' - Array code %(typelabel)s.
"""
# Copy the array so we don't change the original data.
floatarray1 = copy.copy(self.floatarray1)
testfloat = self.floatarray2[0]
# This version is expected to pass.
arrayfunc.%(funcname)s(floatarray1, testfloat, matherrors=True)
floatarray1 = copy.copy(self.floatarray1)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.%(funcname)s(floatarray1, testfloat, matherrors='a')
########################################################
def test_%(funclabel)s_array_num_none_a5(self):
"""Test %(funclabel)s as *array-num-none* for maxlen='a' - Array code %(typelabel)s.
"""
# Copy the array so we don't change the original data.
floatarray1 = copy.copy(self.floatarray1)
testfloat = self.floatarray2[0]
testmaxlen = len(floatarray1) // 2
# This version is expected to pass.
arrayfunc.%(funcname)s(floatarray1, testfloat, maxlen=testmaxlen)
floatarray1 = copy.copy(self.floatarray1)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.%(funcname)s(floatarray1, testfloat, maxlen='a')
########################################################
def test_%(funclabel)s_array_num_array_b1(self):
"""Test %(funclabel)s as *array-num-array* for integer array - Array code %(typelabel)s.
"""
for testfloat, testint in zip(self.floatarray2, self.intarray2):
with self.subTest(msg='Failed with parameter', testfloat = testfloat):
# Copy the array so we don't change the original data.
floatarray1 = copy.copy(self.floatarray1)
intarray1 = copy.copy(self.intarray1)
# This version is expected to pass.
arrayfunc.%(funcname)s(floatarray1, testfloat, self.dataout)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.%(funcname)s(intarray1, testfloat, self.dataout)
########################################################
def test_%(funclabel)s_array_num_array_b2(self):
"""Test %(funclabel)s as *array-num-array* for integer number - Array code %(typelabel)s.
"""
for testfloat, testint in zip(self.floatarray2, self.intarray2):
with self.subTest(msg='Failed with parameter', testfloat = testfloat):
# Copy the array so we don't change the original data.
intarray1 = copy.copy(self.intarray1)
# This version is expected to pass.
arrayfunc.%(funcname)s(self.floatarray1, testfloat, self.dataout)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.%(funcname)s(self.floatarray1, testint, self.dataout)
########################################################
def test_%(funclabel)s_array_num_array_b3(self):
"""Test %(funclabel)s as *array-num-array* for integer output array - Array code %(typelabel)s.
"""
for testfloat in self.floatarray2:
with self.subTest(msg='Failed with parameter', testfloat = testfloat):
# Copy the array so we don't change the original data.
floatarray1 = copy.copy(self.floatarray1)
# This version is expected to pass.
arrayfunc.%(funcname)s(floatarray1, testfloat, self.dataout)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.%(funcname)s(floatarray1, testfloat, self.intdataout)
########################################################
def test_%(funclabel)s_array_num_array_b4(self):
"""Test %(funclabel)s as *array-num-array* for integer number and array - Array code %(typelabel)s.
"""
for testfloat, testint in zip(self.floatarray2, self.intarray2):
with self.subTest(msg='Failed with parameter', testfloat = testfloat):
# This version is expected to pass.
arrayfunc.%(funcname)s(self.floatarray1, testfloat, self.dataout)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.%(funcname)s(self.intarray1, testint, self.intdataout)
########################################################
def test_%(funclabel)s_array_num_array_b5(self):
"""Test %(funclabel)s as *array-num-array* for matherrors='a' - Array code %(typelabel)s.
"""
# Copy the array so we don't change the original data.
floatarray1 = copy.copy(self.floatarray1)
testfloat = self.floatarray2[0]
# This version is expected to pass.
arrayfunc.%(funcname)s(floatarray1, testfloat, self.dataout, matherrors=True)
floatarray1 = copy.copy(self.floatarray1)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.%(funcname)s(floatarray1, testfloat, self.dataout, matherrors='a')
########################################################
def test_%(funclabel)s_array_num_array_b6(self):
"""Test %(funclabel)s as *array-num-array* for maxlen='a' - Array code %(typelabel)s.
"""
# Copy the array so we don't change the original data.
floatarray1 = copy.copy(self.floatarray1)
testfloat = self.floatarray2[0]
testmaxlen = len(floatarray1) // 2
# This version is expected to pass.
arrayfunc.%(funcname)s(floatarray1, testfloat, self.dataout, maxlen=testmaxlen)
floatarray1 = copy.copy(self.floatarray1)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.%(funcname)s(floatarray1, testfloat, self.dataout, maxlen='a')
########################################################
def test_%(funclabel)s_num_array_none_c1(self):
"""Test %(funclabel)s as *num-array-none* for integer array - Array code %(typelabel)s.
"""
for testfloat, testint in zip(self.floatarray2, self.intarray2):
with self.subTest(msg='Failed with parameter', testfloat = testfloat):
# Copy the array so we don't change the original data.
floatarray1 = copy.copy(self.floatarray1)
intarray1 = copy.copy(self.intarray1)
# This version is expected to pass.
arrayfunc.%(funcname)s(testfloat, floatarray1)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.%(funcname)s(testfloat, intarray1)
########################################################
def test_%(funclabel)s_num_array_none_c2(self):
"""Test %(funclabel)s as *num-array-none* for integer number - Array code %(typelabel)s.
"""
for testfloat, testint in zip(self.floatarray2, self.intarray2):
with self.subTest(msg='Failed with parameter', testfloat = testfloat):
# Copy the array so we don't change the original data.
floatarray1 = copy.copy(self.floatarray1)
# This version is expected to pass.
arrayfunc.%(funcname)s(testfloat, floatarray1)
floatarray1 = copy.copy(self.floatarray1)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.%(funcname)s(testint, floatarray1)
########################################################
def test_%(funclabel)s_num_array_none_c3(self):
"""Test %(funclabel)s as *num-array-none* for integer number and array - Array code %(typelabel)s.
"""
for testfloat, testint in zip(self.floatarray2, self.intarray2):
with self.subTest(msg='Failed with parameter', testfloat = testfloat):
# Copy the array so we don't change the original data.
floatarray1 = copy.copy(self.floatarray1)
intarray1 = copy.copy(self.intarray1)
# This version is expected to pass.
arrayfunc.%(funcname)s(testfloat, floatarray1)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.%(funcname)s(testint, intarray1)
########################################################
def test_%(funclabel)s_num_array_none_c4(self):
"""Test %(funclabel)s as *num-array-none* for matherrors='a' - Array code %(typelabel)s.
"""
# Copy the array so we don't change the original data.
floatarray1 = copy.copy(self.floatarray1)
testfloat = self.floatarray2[0]
# This version is expected to pass.
arrayfunc.%(funcname)s(testfloat, floatarray1, matherrors=True)
floatarray1 = copy.copy(self.floatarray1)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.%(funcname)s(testfloat, floatarray1, matherrors='a')
########################################################
def test_%(funclabel)s_num_array_none_c5(self):
"""Test %(funclabel)s as *num-array-none* for maxlen='a' - Array code %(typelabel)s.
"""
# Copy the array so we don't change the original data.
floatarray1 = copy.copy(self.floatarray1)
testfloat = self.floatarray2[0]
testmaxlen = len(floatarray1) // 2
# This version is expected to pass.
arrayfunc.%(funcname)s(testfloat, floatarray1, maxlen=testmaxlen)
floatarray1 = copy.copy(self.floatarray1)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.%(funcname)s(testfloat, floatarray1, maxlen='a')
########################################################
def test_%(funclabel)s_num_array_array_d1(self):
"""Test %(funclabel)s as *num-array-array* for integer array - Array code %(typelabel)s.
"""
for testfloat, testint in zip(self.floatarray2, self.intarray2):
with self.subTest(msg='Failed with parameter', testfloat = testfloat):
# This version is expected to pass.
arrayfunc.%(funcname)s(testfloat, self.floatarray1, self.dataout)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.%(funcname)s(testfloat, self.intarray1, self.dataout)
########################################################
def test_%(funclabel)s_num_array_array_d2(self):
"""Test %(funclabel)s as *num-array-array* for integer number - Array code %(typelabel)s.
"""
for testfloat, testint in zip(self.floatarray2, self.intarray2):
with self.subTest(msg='Failed with parameter', testfloat = testfloat):
# This version is expected to pass.
arrayfunc.%(funcname)s(testfloat, self.floatarray1, self.dataout)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.%(funcname)s(testfloat, self.intarray1, self.dataout)
########################################################
def test_%(funclabel)s_num_array_array_d3(self):
"""Test %(funclabel)s as *num-array-array* for integer output array - Array code %(typelabel)s.
"""
for testfloat in self.floatarray2:
with self.subTest(msg='Failed with parameter', testfloat = testfloat):
# This version is expected to pass.
arrayfunc.%(funcname)s(testfloat, self.floatarray1, self.dataout)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.%(funcname)s(testfloat, self.floatarray1, self.intdataout)
########################################################
def test_%(funclabel)s_num_array_array_d4(self):
"""Test %(funclabel)s as *num-array-array* for integer number and array - Array code %(typelabel)s.
"""
for testfloat, testint in zip(self.floatarray2, self.intarray2):
with self.subTest(msg='Failed with parameter', testfloat = testfloat):
# This version is expected to pass.
arrayfunc.%(funcname)s(testfloat, self.floatarray1, self.dataout)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.%(funcname)s(testint, self.intarray1, self.intdataout)
########################################################
def test_%(funclabel)s_num_array_array_d5(self):
"""Test %(funclabel)s as *num-array-array* for matherrors='a' - Array code %(typelabel)s.
"""
testfloat = self.floatarray2[0]
# This version is expected to pass.
arrayfunc.%(funcname)s(testfloat, self.floatarray1, self.dataout, matherrors=True)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.%(funcname)s(testfloat, self.intarray1, self.dataout, matherrors='a')
########################################################
def test_%(funclabel)s_num_array_array_d6(self):
"""Test %(funclabel)s as *num-array-array* for maxlen='a' - Array code %(typelabel)s.
"""
testfloat = self.floatarray2[0]
testmaxlen = len(self.floatarray1) // 2
# This version is expected to pass.
arrayfunc.%(funcname)s(testfloat, self.floatarray1, self.dataout, maxlen=testmaxlen)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.%(funcname)s(testfloat, self.intarray1, self.dataout, maxlen='a')
########################################################
def test_%(funclabel)s_array_array_none_e1(self):
"""Test %(funclabel)s as *array-array-none* for integer array - Array code %(typelabel)s.
"""
# Copy the array so we don't change the original data.
floatarray1 = copy.copy(self.floatarray1)
# This version is expected to pass.
arrayfunc.%(funcname)s(floatarray1, self.floatarray2)
# Copy the array so we don't change the original data.
floatarray1 = copy.copy(self.floatarray1)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.%(funcname)s(floatarray1, self.intarray2)
########################################################
def test_%(funclabel)s_array_array_none_e2(self):
"""Test %(funclabel)s as *array-array-none* for integer array - Array code %(typelabel)s.
"""
# This version is expected to pass.
arrayfunc.%(funcname)s(self.floatarray1, self.floatarray2)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.%(funcname)s(self.intarray1, self.floatarray2)
########################################################
def test_%(funclabel)s_array_array_none_e3(self):
"""Test %(funclabel)s as *array-array-none* for all integer array - Array code %(typelabel)s.
"""
# This version is expected to pass.
arrayfunc.%(funcname)s(self.floatarray1, self.floatarray2)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.%(funcname)s(self.intarray1, self.intarray2)
########################################################
def test_%(funclabel)s_array_array_none_e4(self):
"""Test %(funclabel)s as *array-array-none* for matherrors='a' - Array code %(typelabel)s.
"""
# Copy the array so we don't change the original data.
floatarray1 = copy.copy(self.floatarray1)
# This version is expected to pass.
arrayfunc.%(funcname)s(floatarray1, self.floatarray2, matherrors=True)
# Copy the array so we don't change the original data.
floatarray1 = copy.copy(self.floatarray1)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.%(funcname)s(floatarray1, self.floatarray2, matherrors='a')
########################################################
def test_%(funclabel)s_array_array_none_e5(self):
"""Test %(funclabel)s as *array-array-none* for maxlen='a' - Array code %(typelabel)s.
"""
# Copy the array so we don't change the original data.
floatarray1 = copy.copy(self.floatarray1)
testmaxlen = len(floatarray1) // 2
# This version is expected to pass.
arrayfunc.%(funcname)s(floatarray1, self.floatarray2, maxlen=testmaxlen)
# Copy the array so we don't change the original data.
floatarray1 = copy.copy(self.floatarray1)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.%(funcname)s(floatarray1, self.floatarray2, maxlen='a')
########################################################
def test_%(funclabel)s_array_array_array_f1(self):
"""Test %(funclabel)s as *array-array-array* for integer array - Array code %(typelabel)s.
"""
# This version is expected to pass.
arrayfunc.%(funcname)s(self.floatarray1, self.floatarray2, self.dataout)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.%(funcname)s(self.floatarray1, self.intarray2, self.dataout)
########################################################
def test_%(funclabel)s_array_array_array_f2(self):
"""Test %(funclabel)s as *array-array-array* for integer array - Array code %(typelabel)s.
"""
# This version is expected to pass.
arrayfunc.%(funcname)s(self.floatarray1, self.floatarray2, self.dataout)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.%(funcname)s(self.intarray1, self.floatarray2, self.dataout)
########################################################
def test_%(funclabel)s_array_array_array_f3(self):
"""Test %(funclabel)s as *array-array-array* for integer output array - Array code %(typelabel)s.
"""
# This version is expected to pass.
arrayfunc.%(funcname)s(self.floatarray1, self.floatarray2, self.dataout)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.%(funcname)s(self.floatarray1, self.floatarray2, self.intdataout)
########################################################
def test_%(funclabel)s_array_array_array_f4(self):
"""Test %(funclabel)s as *array-array-array* for all integer array - Array code %(typelabel)s.
"""
# This version is expected to pass.
arrayfunc.%(funcname)s(self.floatarray1, self.floatarray2, self.dataout)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.%(funcname)s(self.intarray1, self.intarray2, self.intdataout)
########################################################
def test_%(funclabel)s_array_array_array_f5(self):
"""Test %(funclabel)s as *array-array-array* for matherrors='a' - Array code %(typelabel)s.
"""
# This version is expected to pass.
arrayfunc.%(funcname)s(self.floatarray1, self.floatarray2, self.dataout, matherrors=True)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.%(funcname)s(self.floatarray1, self.floatarray2, self.dataout, matherrors='a')
########################################################
def test_%(funclabel)s_array_array_array_f6(self):
"""Test %(funclabel)s as *array-array-array* for maxlen='a' - Array code %(typelabel)s.
"""
testmaxlen = len(self.floatarray1) // 2
# This version is expected to pass.
arrayfunc.%(funcname)s(self.floatarray1, self.floatarray2, self.dataout, maxlen=testmaxlen)
# This is the actual test.
with self.assertRaises(TypeError):
arrayfunc.%(funcname)s(self.floatarray1, self.floatarray2, self.dataout, maxlen='a')
########################################################
def test_%(funclabel)s_no_params_g1(self):
"""Test %(funclabel)s with no parameters - Array code %(typelabel)s.
"""
with self.assertRaises(TypeError):
arrayfunc.%(funcname)s()
##############################################################################
'''
# ==============================================================================
# The template used to generate the tests for nan, inf, -inf in data arrays
# when exceptions are expected.
nan_data_error_template = '''
##############################################################################
class %(funclabel)s_%(errorlabel)s_errors_%(typelabel)s(unittest.TestCase):
"""Test for basic general function operation using parameter %(errordata)s.
nan_data_error_template
"""
##############################################################################
def FloatassertEqual(self, expecteditem, dataoutitem, msg=None):
"""This function is patched into assertEqual to allow testing for
the floating point special values NaN, Inf, and -Inf.
"""
# NaN cannot be compared using normal means.
if math.isnan(dataoutitem) and math.isnan(expecteditem):
pass
# Anything else can be compared normally.
else:
if not math.isclose(expecteditem, dataoutitem, rel_tol=0.01, abs_tol=0.0):
raise self.failureException('%%0.3f != %%0.3f' %% (expecteditem, dataoutitem))
########################################################
def PyOp(self, x, y, default):
"""Handle exceptions due to math domain errors when calling the math
library function. If an exception occurs, return the default value
instead.
"""
try:
return %(pyoperator)s(x, y)
except:
return default
########################################################
def setUp(self):
"""Initialise.
"""
self.addTypeEqualityFunc(float, self.FloatassertEqual)
self.dataok1 = array.array('%(typecode)s', [%(test_op_x)s])
self.dataok2 = array.array('%(typecode)s', [x for (x,y) in zip(itertools.cycle([%(test_op_y)s]), self.dataok1)])
arraysize = len(self.dataok1)
self.dataout = array.array('%(typecode)s', itertools.repeat(0.0, arraysize))
self.errordata = array.array('%(typecode)s', [float('%(errordata)s')] * arraysize)
self.expectedep = [self.PyOp(x, y, float('%(test_nan_default)s')) for x,y in zip(self.errordata, self.dataok2)]
self.expectedpe = [self.PyOp(y, x, float('%(test_nan_default)s')) for x,y in zip(self.errordata, self.dataok1)]
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_num_none_a1(self):
"""Test %(funclabel)s as *array-num-none* for %(errordata)s - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
# Copy the array so we don't change the original data.
dataok1 = copy.copy(self.dataok1)
errordata = copy.copy(self.errordata)
# This version is expected to pass.
arrayfunc.%(funcname)s(dataok1, testval)
# This is the actual test.
with self.assertRaises(ArithmeticError):
arrayfunc.%(funcname)s(errordata, testval)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_num_none_a2(self):
"""Test %(funclabel)s as *array-num-none* for %(errordata)s with error check off - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
# Copy the array so we don't change the original data.
errordata = copy.copy(self.errordata)
expectedep = [self.PyOp(x, testval, float('%(test_nan_default)s')) for x in self.errordata]
arrayfunc.%(funcname)s(errordata, testval, matherrors=True)
for dataoutitem, expecteditem in zip(errordata, expectedep):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_num_array_b1(self):
"""Test %(funclabel)s as *array-num-array* for %(errordata)s - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
# Copy the array so we don't change the original data.
dataok1 = copy.copy(self.dataok1)
errordata = copy.copy(self.errordata)
# This version is expected to pass.
arrayfunc.%(funcname)s(dataok1, testval, self.dataout)
# This is the actual test.
with self.assertRaises(ArithmeticError):
arrayfunc.%(funcname)s(errordata, testval, self.dataout)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_num_array_b2(self):
"""Test %(funclabel)s as *array-num-array* for %(errordata)s with error check off - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
expectedep = [self.PyOp(x, testval, float('%(test_nan_default)s')) for x in self.errordata]
arrayfunc.%(funcname)s(self.errordata, testval, self.dataout, matherrors=True)
for dataoutitem, expecteditem in zip(self.dataout, expectedep):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_num_array_none_c1(self):
"""Test %(funclabel)s as *num-array-none* for %(errordata)s - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
# Copy the array so we don't change the original data.
dataok1 = copy.copy(self.dataok1)
errordata = copy.copy(self.errordata)
# This version is expected to pass.
arrayfunc.%(funcname)s(testval, dataok1)
# This is the actual test.
with self.assertRaises(ArithmeticError):
arrayfunc.%(funcname)s(testval, errordata)
########################################################
def test_%(funclabel)s_%(errorlabel)s_num_array_none_c2(self):
"""Test %(funclabel)s as *num-array-none* for %(errordata)s with error check off - Array code %(typelabel)s.
"""
for testval in [%(test_op_x)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
# Copy the array so we don't change the original data.
errordata = copy.copy(self.errordata)
expectedpe = [self.PyOp(testval, x, float('%(test_nan_default)s')) for x in self.errordata]
arrayfunc.%(funcname)s(testval, errordata, matherrors=True)
for dataoutitem, expecteditem in zip(errordata, expectedpe):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_num_array_array_d1(self):
"""Test %(funclabel)s as *num-array-array* for %(errordata)s - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
# This version is expected to pass.
arrayfunc.%(funcname)s(testval, self.dataok1, self.dataout)
# This is the actual test.
with self.assertRaises(ArithmeticError):
arrayfunc.%(funcname)s(testval, self.errordata, self.dataout)
########################################################
def test_%(funclabel)s_%(errorlabel)s_num_array_array_d2(self):
"""Test %(funclabel)s as *num-array-array* for %(errordata)s with error check off - Array code %(typelabel)s.
"""
for testval in [%(test_op_x)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
expectedpe = [self.PyOp(testval, x, float('%(test_nan_default)s')) for x in self.errordata]
arrayfunc.%(funcname)s(testval, self.errordata, self.dataout, matherrors=True)
for dataoutitem, expecteditem in zip(self.dataout, expectedpe):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_array_none_e1(self):
"""Test %(funclabel)s as *array-array-none* for %(errordata)s - Array code %(typelabel)s.
"""
# Copy the array so we don't change the original data.
dataok1 = copy.copy(self.dataok1)
dataok2 = copy.copy(self.dataok2)
# This version is expected to pass.
arrayfunc.%(funcname)s(dataok1, dataok2)
# Copy the array so we don't change the original data.
dataok1 = copy.copy(self.dataok1)
# This is the actual test.
with self.assertRaises(ArithmeticError):
arrayfunc.%(funcname)s(dataok1, self.errordata)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_array_none_e2(self):
"""Test %(funclabel)s as *array-array-none* for %(errordata)s with error check off - Array code %(typelabel)s.
"""
arrayfunc.%(funcname)s(self.dataok1, self.errordata, matherrors=True)
for dataoutitem, expecteditem in zip(self.dataok1, self.expectedpe):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_array_array_f1(self):
"""Test %(funclabel)s as *array-array-array* for %(errordata)s - Array code %(typelabel)s.
"""
# This version is expected to pass.
arrayfunc.%(funcname)s(self.dataok1, self.dataok2, self.dataout)
# This is the actual test.
with self.assertRaises(ArithmeticError):
arrayfunc.%(funcname)s(self.dataok1, self.errordata, self.dataout)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_array_array_f2(self):
"""Test %(funclabel)s as *array-array-array* for %(errordata)s with error check off - Array code %(typelabel)s.
"""
arrayfunc.%(funcname)s(self.dataok1, self.errordata, self.dataout, matherrors=True)
for dataoutitem, expecteditem in zip(self.dataout, self.expectedpe):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
##############################################################################
'''
# ==============================================================================
# The template used to generate the tests for nan, inf, -inf in data arrays
# when exceptions are not expected.
nan_data_noerror_template = '''
##############################################################################
class %(funclabel)s_%(errorlabel)s_noerrors_%(typelabel)s(unittest.TestCase):
"""Test for basic general function operation using parameter %(errordata)s.
nan_data_noerror_template
"""
##############################################################################
def FloatassertEqual(self, expecteditem, dataoutitem, msg=None):
"""This function is patched into assertEqual to allow testing for
the floating point special values NaN, Inf, and -Inf.
"""
# NaN cannot be compared using normal means.
if math.isnan(dataoutitem) and math.isnan(expecteditem):
pass
# Anything else can be compared normally.
else:
if not math.isclose(expecteditem, dataoutitem, rel_tol=0.01, abs_tol=0.0):
raise self.failureException('%%0.3f != %%0.3f' %% (expecteditem, dataoutitem))
########################################################
def PyOp(self, x, y, default):
"""Handle exceptions due to math domain errors when calling the math
library function. If an exception occurs, return the default value
instead.
"""
try:
return %(pyoperator)s(x, y)
except:
return default
########################################################
def setUp(self):
"""Initialise.
"""
self.addTypeEqualityFunc(float, self.FloatassertEqual)
self.dataok1 = array.array('%(typecode)s', [%(test_op_x)s])
self.dataok2 = array.array('%(typecode)s', [x for (x,y) in zip(itertools.cycle([%(test_op_y)s]), self.dataok1)])
arraysize = len(self.dataok1)
self.dataout = array.array('%(typecode)s', itertools.repeat(0.0, arraysize))
self.errordata = array.array('%(typecode)s', [float('%(errordata)s')] * arraysize)
self.expectedep = [self.PyOp(x, y, float('%(test_nan_default)s')) for x,y in zip(self.errordata, self.dataok2)]
self.expectedpe = [self.PyOp(y, x, float('%(test_nan_default)s')) for x,y in zip(self.errordata, self.dataok1)]
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_num_none_a1(self):
"""Test %(funclabel)s as *array-num-none* for %(errordata)s - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
# Copy the array so we don't change the original data.
errordata = copy.copy(self.errordata)
arrayfunc.%(funcname)s(errordata, testval)
for dataoutitem, expecteditem in zip(errordata, self.expectedep):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_num_none_a2(self):
"""Test %(funclabel)s as *array-num-none* for %(errordata)s with error check off - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
# Copy the array so we don't change the original data.
errordata = copy.copy(self.errordata)
arrayfunc.%(funcname)s(errordata, testval, matherrors=True)
for dataoutitem, expecteditem in zip(errordata, self.expectedep):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_num_array_b1(self):
"""Test %(funclabel)s as *array-num-array* for %(errordata)s - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
arrayfunc.%(funcname)s(self.errordata, testval, self.dataout)
for dataoutitem, expecteditem in zip(self.dataout, self.expectedep):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_num_array_b2(self):
"""Test %(funclabel)s as *array-num-array* for %(errordata)s with error check off - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
arrayfunc.%(funcname)s(self.errordata, testval, self.dataout, matherrors=True)
for dataoutitem, expecteditem in zip(self.dataout, self.expectedep):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_num_array_none_c1(self):
"""Test %(funclabel)s as *num-array-none* for %(errordata)s - Array code %(typelabel)s.
"""
for testval in [%(test_op_x)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
# Copy the array so we don't change the original data.
errordata = copy.copy(self.errordata)
arrayfunc.%(funcname)s(testval, errordata)
for dataoutitem, expecteditem in zip(errordata, self.expectedpe):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_num_array_none_c2(self):
"""Test %(funclabel)s as *num-array-none* for %(errordata)s with error check off - Array code %(typelabel)s.
"""
for testval in [%(test_op_x)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
# Copy the array so we don't change the original data.
errordata = copy.copy(self.errordata)
arrayfunc.%(funcname)s(testval, errordata, matherrors=True)
for dataoutitem, expecteditem in zip(errordata, self.expectedpe):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_num_array_array_d1(self):
"""Test %(funclabel)s as *num-array-array* for %(errordata)s - Array code %(typelabel)s.
"""
for testval in [%(test_op_x)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
arrayfunc.%(funcname)s(testval, self.errordata, self.dataout)
for dataoutitem, expecteditem in zip(self.dataout, self.expectedpe):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_num_array_array_d2(self):
"""Test %(funclabel)s as *num-array-array* for %(errordata)s with error check off - Array code %(typelabel)s.
"""
for testval in [%(test_op_x)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
arrayfunc.%(funcname)s(testval, self.errordata, self.dataout, matherrors=True)
for dataoutitem, expecteditem in zip(self.dataout, self.expectedpe):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_array_none_e1(self):
"""Test %(funclabel)s as *array-array-none* for %(errordata)s - Array code %(typelabel)s.
"""
arrayfunc.%(funcname)s(self.dataok1, self.errordata)
for dataoutitem, expecteditem in zip(self.dataok1, self.expectedpe):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_array_none_e2(self):
"""Test %(funclabel)s as *array-array-none* for %(errordata)s with error check off - Array code %(typelabel)s.
"""
arrayfunc.%(funcname)s(self.dataok1, self.errordata, matherrors=True)
for dataoutitem, expecteditem in zip(self.dataok1, self.expectedpe):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_array_array_f1(self):
"""Test %(funclabel)s as *array-array-array* for %(errordata)s - Array code %(typelabel)s.
"""
arrayfunc.%(funcname)s(self.dataok1, self.errordata, self.dataout)
for dataoutitem, expecteditem in zip(self.dataout, self.expectedpe):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_array_array_f2(self):
"""Test %(funclabel)s as *array-array-array* for %(errordata)s with error check off - Array code %(typelabel)s.
"""
arrayfunc.%(funcname)s(self.dataok1, self.errordata, self.dataout, matherrors=True)
for dataoutitem, expecteditem in zip(self.dataout, self.expectedpe):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
##############################################################################
'''
# ==============================================================================
# The template used to generate the tests forinf, -inf in data arrays
# when exceptions are expected. This is a special version for fmod.
nan_data_fmod_inf_template = '''
##############################################################################
class %(funclabel)s_%(errorlabel)s_noerrors_%(typelabel)s(unittest.TestCase):
"""Test for fmod(x, y) operation using parameter %(errordata)s.
For math.fmod:
if x=nan, the result is always nan
if y=nan, the result is always nan
if x=inf or -inf, the result is always err
if y=inf or -inf, the result is OK
For our purposes here, we treat a "NaN" output as an error even if
"math.fmod" does not.
nan_data_fmod_inf_template
"""
##############################################################################
def FloatassertEqual(self, expecteditem, dataoutitem, msg=None):
"""This function is patched into assertEqual to allow testing for
the floating point special values NaN, Inf, and -Inf.
"""
# NaN cannot be compared using normal means.
if math.isnan(dataoutitem) and math.isnan(expecteditem):
pass
# Anything else can be compared normally.
else:
if not math.isclose(expecteditem, dataoutitem, rel_tol=0.01, abs_tol=0.0):
raise self.failureException('%%0.3f != %%0.3f' %% (expecteditem, dataoutitem))
########################################################
def setUp(self):
"""Initialise.
"""
self.addTypeEqualityFunc(float, self.FloatassertEqual)
# A "1" suffix means the data is meant for the first parameter.
# A "2" suffix means the data is meant for the second parameter.
self.okarray1 = array.array('%(typecode)s', [%(test_op_x)s])
self.okarray2 = array.array('%(typecode)s', [x for x,y in zip(itertools.cycle([%(test_op_y)s]), self.okarray1)])
# This is how long the test arrays should be.
testarraysize = len(self.okarray1)
self.dataout = array.array('%(typecode)s', itertools.repeat(0.0, testarraysize))
self.errorarray = array.array('%(typecode)s', [float('%(errordata)s')] * testarraysize)
self.errorparam = float('%(errordata)s')
# When error data is calculated with error checking off, the result is
# always NaN.
self.nanresult = [math.nan] * testarraysize
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_num_none_a1(self):
"""Test %(funclabel)s as *array-num-none* for error array with error check on - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
# Copy the array so we don't change the original data.
okarray1 = copy.copy(self.okarray1)
errorarray = copy.copy(self.errorarray)
# This version is expected to pass.
arrayfunc.%(funcname)s(okarray1, testval)
# This is the actual test.
with self.assertRaises(ArithmeticError):
arrayfunc.%(funcname)s(errorarray, testval)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_num_none_a2(self):
"""Test %(funclabel)s as *array-num-none* for error array with error check off - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
# Copy the array so we don't change the original data.
errorarray = copy.copy(self.errorarray)
# The output goes into the first array.
arrayfunc.%(funcname)s(errorarray, testval, matherrors=True)
for dataoutitem, expecteditem in zip(errorarray, self.nanresult):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_num_array_a3(self):
"""Test %(funclabel)s as *array-num-array* for error array with error check on - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
# Copy the array so we don't change the original data.
okarray1 = copy.copy(self.okarray1)
errorarray = copy.copy(self.errorarray)
# This version is expected to pass.
arrayfunc.%(funcname)s(okarray1, testval, self.dataout)
# This is the actual test.
with self.assertRaises(ArithmeticError):
arrayfunc.%(funcname)s(errorarray, testval, self.dataout)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_num_array_a4(self):
"""Test %(funclabel)s as *array-num-array* for error array with error check off - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
arrayfunc.%(funcname)s(self.errorarray, testval, self.dataout, matherrors=True)
for dataoutitem, expecteditem in zip(self.dataout, self.nanresult):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_num_none_a5(self):
"""Test %(funclabel)s as *array-num-none* for error number with error check on - Array code %(typelabel)s.
"""
expected = [%(pyoperator)s(x, self.errorparam) for x in self.okarray1]
# The output goes into the first array.
arrayfunc.%(funcname)s(self.okarray1, self.errorparam)
for dataoutitem, expecteditem in zip(self.okarray1, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_num_none_a6(self):
"""Test %(funclabel)s as *array-num-none* for error number with error check off - Array code %(typelabel)s.
"""
expected = [%(pyoperator)s(x, self.errorparam) for x in self.okarray1]
# The output goes into the first array.
arrayfunc.%(funcname)s(self.okarray1, self.errorparam, matherrors=True)
for dataoutitem, expecteditem in zip(self.okarray1, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_num_array_a7(self):
"""Test %(funclabel)s as *array-num-array* for error number with error check on - Array code %(typelabel)s.
"""
expected = [%(pyoperator)s(x, self.errorparam) for x in self.okarray1]
arrayfunc.%(funcname)s(self.okarray1, self.errorparam, self.dataout)
for dataoutitem, expecteditem in zip(self.dataout, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_num_array_a8(self):
"""Test %(funclabel)s as *array-num-array* for error number with error check off - Array code %(typelabel)s.
"""
expected = [%(pyoperator)s(x, self.errorparam) for x in self.okarray1]
arrayfunc.%(funcname)s(self.okarray1, self.errorparam, self.dataout, matherrors=True)
for dataoutitem, expecteditem in zip(self.dataout, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_num_array_none_b1(self):
"""Test %(funclabel)s as *num-array-none* for error number with error check on - Array code %(typelabel)s.
"""
for testval in [%(test_op_x)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
# Copy the array so we don't change the original data.
okarray2 = copy.copy(self.okarray2)
# This version is expected to pass.
arrayfunc.%(funcname)s(testval, okarray2)
# This is the actual test.
with self.assertRaises(ArithmeticError):
arrayfunc.%(funcname)s(self.errorparam, okarray2)
########################################################
def test_%(funclabel)s_%(errorlabel)s_num_array_none_b2(self):
"""Test %(funclabel)s as *num-array-none* for error number with error check off - Array code %(typelabel)s.
"""
# The output goes into the first array.
arrayfunc.%(funcname)s(self.errorparam, self.okarray2, matherrors=True)
for dataoutitem, expecteditem in zip(self.okarray2, self.nanresult):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_num_array_array_b3(self):
"""Test %(funclabel)s as *num-array-array* for error number with error check on - Array code %(typelabel)s.
"""
for testval in [%(test_op_x)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
# This version is expected to pass.
arrayfunc.%(funcname)s(testval, self.okarray2, self.dataout)
# This is the actual test.
with self.assertRaises(ArithmeticError):
arrayfunc.%(funcname)s(self.errorparam, self.okarray2, self.dataout)
########################################################
def test_%(funclabel)s_%(errorlabel)s_num_array_array_b4(self):
"""Test %(funclabel)s as *num-array-array* for error number with error check off - Array code %(typelabel)s.
"""
arrayfunc.%(funcname)s(self.errorparam, self.okarray2, self.dataout, matherrors=True)
for dataoutitem, expecteditem in zip(self.dataout, self.nanresult):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_num_array_none_b5(self):
"""Test %(funclabel)s as *num-array-none* for error array with error check on - Array code %(typelabel)s.
"""
for testval in [%(test_op_x)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
# Copy the array so we don't change the original data.
errorarray = copy.copy(self.errorarray)
expected = [%(pyoperator)s(testval, x) for x in self.errorarray]
# The output goes into the first array.
arrayfunc.%(funcname)s(testval, errorarray)
for dataoutitem, expecteditem in zip(errorarray, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_num_array_none_b6(self):
"""Test %(funclabel)s as *num-array-none* for error array with error check off - Array code %(typelabel)s.
"""
for testval in [%(test_op_x)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
# Copy the array so we don't change the original data.
errorarray = copy.copy(self.errorarray)
expected = [%(pyoperator)s(testval, x) for x in self.errorarray]
# The output goes into the first array.
arrayfunc.%(funcname)s(testval, errorarray, matherrors=True)
for dataoutitem, expecteditem in zip(errorarray, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_num_array_array_b7(self):
"""Test %(funclabel)s as *num-array-array* for error array with error check on - Array code %(typelabel)s.
"""
for testval in [%(test_op_x)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
expected = [%(pyoperator)s(testval, x) for x in self.errorarray]
arrayfunc.%(funcname)s(testval, self.errorarray, self.dataout)
for dataoutitem, expecteditem in zip(self.dataout, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_num_array_array_b8(self):
"""Test %(funclabel)s as *num-array-array* for error array with error check off - Array code %(typelabel)s.
"""
for testval in [%(test_op_x)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
expected = [%(pyoperator)s(testval, x) for x in self.errorarray]
arrayfunc.%(funcname)s(testval, self.errorarray, self.dataout, matherrors=True)
for dataoutitem, expecteditem in zip(self.dataout, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_array_none_c1(self):
"""Test %(funclabel)s as *array-array-none* for error array with error check on - Array code %(typelabel)s.
"""
# This version is expected to pass.
arrayfunc.%(funcname)s(self.okarray1, self.okarray2)
# This is the actual test.
with self.assertRaises(ArithmeticError):
arrayfunc.%(funcname)s(self.errorarray, self.okarray2)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_array_none_c2(self):
"""Test %(funclabel)s as *array-array-none* for error array with error check off - Array code %(typelabel)s.
"""
# The output goes into the first array.
arrayfunc.%(funcname)s(self.errorarray, self.okarray2, matherrors=True)
for dataoutitem, expecteditem in zip(self.errorarray, self.nanresult):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_array_array_c3(self):
"""Test %(funclabel)s as *array-array-array* for error array with error check on - Array code %(typelabel)s.
"""
# This version is expected to pass.
arrayfunc.%(funcname)s(self.okarray1, self.okarray2, self.dataout)
# This is the actual test.
with self.assertRaises(ArithmeticError):
arrayfunc.%(funcname)s(self.errorarray, self.okarray2, self.dataout)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_array_array_c4(self):
"""Test %(funclabel)s as *array-array-array* for error array with error check off - Array code %(typelabel)s.
"""
arrayfunc.%(funcname)s(self.errorarray, self.okarray2, self.dataout, matherrors=True)
for dataoutitem, expecteditem in zip(self.dataout, self.nanresult):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_array_none_c5(self):
"""Test %(funclabel)s as *array-array-none* for error array with error check on - Array code %(typelabel)s.
"""
expected = [%(pyoperator)s(x, y) for x,y in zip(self.okarray1, self.errorarray)]
# The output goes into the first array.
arrayfunc.%(funcname)s(self.okarray1, self.errorarray)
for dataoutitem, expecteditem in zip(self.okarray1, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_array_none_c6(self):
"""Test %(funclabel)s as *array-array-none* for error array with error check off - Array code %(typelabel)s.
"""
expected = [%(pyoperator)s(x, y) for x,y in zip(self.okarray1, self.errorarray)]
# The output goes into the first array.
arrayfunc.%(funcname)s(self.okarray1, self.errorarray, matherrors=True)
for dataoutitem, expecteditem in zip(self.okarray1, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_array_array_c7(self):
"""Test %(funclabel)s as *array-array-array* for error array with error check on - Array code %(typelabel)s.
"""
expected = [%(pyoperator)s(x, y) for x,y in zip(self.okarray1, self.errorarray)]
arrayfunc.%(funcname)s(self.okarray1, self.errorarray, self.dataout)
for dataoutitem, expecteditem in zip(self.dataout, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_array_array_c8(self):
"""Test %(funclabel)s as *array-array-array* for error array with error check off - Array code %(typelabel)s.
"""
expected = [%(pyoperator)s(x, y) for x,y in zip(self.okarray1, self.errorarray)]
arrayfunc.%(funcname)s(self.okarray1, self.errorarray, self.dataout, matherrors=True)
for dataoutitem, expecteditem in zip(self.dataout, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
##############################################################################
'''
# ==============================================================================
# Used for pow only.
nan_data_powerror_template = '''
##############################################################################
class %(funclabel)s_%(errorlabel)s_errors_%(typelabel)s(unittest.TestCase):
"""Test for pow using parameter %(errordata)s.
nan_data_powerror_template
"""
##############################################################################
def FloatassertEqual(self, expecteditem, dataoutitem, msg=None):
"""This function is patched into assertEqual to allow testing for
the floating point special values NaN, Inf, and -Inf.
"""
# NaN cannot be compared using normal means.
if math.isnan(dataoutitem) and math.isnan(expecteditem):
pass
# Anything else can be compared normally.
else:
if not math.isclose(expecteditem, dataoutitem, rel_tol=0.01, abs_tol=0.0):
raise self.failureException('%%0.3f != %%0.3f' %% (expecteditem, dataoutitem))
########################################################
def PyOp(self, x, y, default):
"""Handle exceptions due to math domain errors when calling the math
library function. If an exception occurs, return the default value
instead.
"""
try:
return %(pyoperator)s(x, y)
except:
return default
########################################################
def setUp(self):
"""Initialise.
"""
self.addTypeEqualityFunc(float, self.FloatassertEqual)
self.dataok1 = array.array('%(typecode)s', [%(test_op_x)s])
self.dataok2 = array.array('%(typecode)s', [x for (x,y) in zip(itertools.cycle([%(test_op_y)s]), self.dataok1)])
arraysize = len(self.dataok1)
self.dataout = array.array('%(typecode)s', itertools.repeat(0.0, arraysize))
self.errordata = array.array('%(typecode)s', [float('%(errordata)s')] * arraysize)
self.expectedep = [self.PyOp(x, y, float('%(test_nan_default)s')) for x,y in zip(self.errordata, self.dataok2)]
self.expectedpe = [self.PyOp(y, x, float('%(test_nan_default)s')) for x,y in zip(self.errordata, self.dataok1)]
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_num_none_a1(self):
"""Test %(funclabel)s as *array-num-none* for %(errordata)s - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
# Copy the array so we don't change the original data.
dataok1 = copy.copy(self.dataok1)
errordata = copy.copy(self.errordata)
# This version is expected to pass.
arrayfunc.%(funcname)s(dataok1, testval)
# This is the actual test. When the test value parameter is 0,
# no error is expected. Any other value should raise an error.
if testval != 0.0:
with self.assertRaises(ArithmeticError):
arrayfunc.%(funcname)s(errordata, testval)
else:
arrayfunc.%(funcname)s(errordata, testval)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_num_none_a2(self):
"""Test %(funclabel)s as *array-num-none* for %(errordata)s with error check off - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
# Copy the array so we don't change the original data.
errordata = copy.copy(self.errordata)
expectedep = [self.PyOp(x, testval, float('%(test_nan_default)s')) for x in self.errordata]
arrayfunc.%(funcname)s(errordata, testval, matherrors=True)
for dataoutitem, expecteditem in zip(errordata, expectedep):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_num_array_b1(self):
"""Test %(funclabel)s as *array-num-array* for %(errordata)s - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
# Copy the array so we don't change the original data.
dataok1 = copy.copy(self.dataok1)
errordata = copy.copy(self.errordata)
# This version is expected to pass.
arrayfunc.%(funcname)s(dataok1, testval, self.dataout)
# This is the actual test. When the test value parameter is 0,
# no error is expected. Any other value should raise an error.
if testval != 0.0:
with self.assertRaises(ArithmeticError):
arrayfunc.%(funcname)s(errordata, testval, self.dataout)
else:
arrayfunc.%(funcname)s(errordata, testval, self.dataout)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_num_array_b2(self):
"""Test %(funclabel)s as *array-num-array* for %(errordata)s with error check off - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
expectedep = [self.PyOp(x, testval, float('%(test_nan_default)s')) for x in self.errordata]
arrayfunc.%(funcname)s(self.errordata, testval, self.dataout, matherrors=True)
for dataoutitem, expecteditem in zip(self.dataout, expectedep):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_num_array_none_c1(self):
"""Test %(funclabel)s as *num-array-none* for %(errordata)s - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
# Copy the array so we don't change the original data.
dataok1 = copy.copy(self.dataok1)
errordata = copy.copy(self.errordata)
# This version is expected to pass.
arrayfunc.%(funcname)s(testval, dataok1)
# This is the actual test. When testing for errors, the result
# will depend upon whether the test is for nan or inf, and
# what numeric values are involved.
# The template auto-generating this unit test is re-used for
# different test values, so we need a conditional test for this.
if '%(errordata)s' == 'nan' and testval != 1.0:
with self.assertRaises(ArithmeticError):
arrayfunc.%(funcname)s(testval, errordata)
elif '%(errordata)s' == 'inf' and ((testval < -1.0) or (testval > 1.0)):
with self.assertRaises(ArithmeticError):
arrayfunc.%(funcname)s(testval, errordata)
elif '%(errordata)s' == '-inf' and ((testval > -1.0) and (testval < 1.0)):
with self.assertRaises(ArithmeticError):
arrayfunc.%(funcname)s(testval, errordata)
else:
arrayfunc.%(funcname)s(testval, errordata)
########################################################
def test_%(funclabel)s_%(errorlabel)s_num_array_none_c2(self):
"""Test %(funclabel)s as *num-array-none* for %(errordata)s with error check off - Array code %(typelabel)s.
"""
for testval in [%(test_op_x)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
# Copy the array so we don't change the original data.
errordata = copy.copy(self.errordata)
expectedpe = [self.PyOp(testval, x, float('%(test_nan_default)s')) for x in self.errordata]
arrayfunc.%(funcname)s(testval, errordata, matherrors=True)
for dataoutitem, expecteditem in zip(errordata, expectedpe):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_num_array_array_d1(self):
"""Test %(funclabel)s as *num-array-array* for %(errordata)s - Array code %(typelabel)s.
"""
for testval in [%(test_op_y)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
# This version is expected to pass.
arrayfunc.%(funcname)s(testval, self.dataok1, self.dataout)
# This is the actual test. When testing for errors, the result
# will depend upon whether the test is for nan or inf, and
# what numeric values are involved.
# The template auto-generating this unit test is re-used for
# different test values, so we need a conditional test for this.
if '%(errordata)s' == 'nan' and testval != 1.0:
with self.assertRaises(ArithmeticError):
arrayfunc.%(funcname)s(testval, self.errordata, self.dataout)
elif '%(errordata)s' == 'inf' and ((testval < -1.0) or (testval > 1.0)):
with self.assertRaises(ArithmeticError):
arrayfunc.%(funcname)s(testval, self.errordata, self.dataout)
elif '%(errordata)s' == '-inf' and ((testval > -1.0) and (testval < 1.0)):
with self.assertRaises(ArithmeticError):
arrayfunc.%(funcname)s(testval, self.errordata, self.dataout)
else:
arrayfunc.%(funcname)s(testval, self.errordata, self.dataout)
########################################################
def test_%(funclabel)s_%(errorlabel)s_num_array_array_d2(self):
"""Test %(funclabel)s as *num-array-array* for %(errordata)s with error check off - Array code %(typelabel)s.
"""
for testval in [%(test_op_x)s]:
with self.subTest(msg='Failed with parameter', testval = testval):
expectedpe = [self.PyOp(testval, x, float('%(test_nan_default)s')) for x in self.errordata]
arrayfunc.%(funcname)s(testval, self.errordata, self.dataout, matherrors=True)
for dataoutitem, expecteditem in zip(self.dataout, expectedpe):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_array_none_e1(self):
"""Test %(funclabel)s as *array-array-none* for %(errordata)s - Array code %(typelabel)s.
"""
# Copy the array so we don't change the original data.
dataok1 = copy.copy(self.dataok1)
dataok2 = copy.copy(self.dataok2)
# This version is expected to pass.
arrayfunc.%(funcname)s(dataok1, dataok2)
# Copy the array so we don't change the original data.
dataok1 = copy.copy(self.dataok1)
# This is the actual test.
with self.assertRaises(ArithmeticError):
arrayfunc.%(funcname)s(dataok1, self.errordata)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_array_none_e2(self):
"""Test %(funclabel)s as *array-array-none* for %(errordata)s with error check off - Array code %(typelabel)s.
"""
arrayfunc.%(funcname)s(self.dataok1, self.errordata, matherrors=True)
for dataoutitem, expecteditem in zip(self.dataok1, self.expectedpe):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_array_array_f1(self):
"""Test %(funclabel)s as *array-array-array* for %(errordata)s - Array code %(typelabel)s.
"""
# This version is expected to pass.
arrayfunc.%(funcname)s(self.dataok1, self.dataok2, self.dataout)
# This is the actual test.
with self.assertRaises(ArithmeticError):
arrayfunc.%(funcname)s(self.dataok1, self.errordata, self.dataout)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_array_array_f2(self):
"""Test %(funclabel)s as *array-array-array* for %(errordata)s with error check off - Array code %(typelabel)s.
"""
arrayfunc.%(funcname)s(self.dataok1, self.errordata, self.dataout, matherrors=True)
for dataoutitem, expecteditem in zip(self.dataout, self.expectedpe):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
'''
##############################################################################
# ==============================================================================
# The template used to generate the tests for nan, inf, -inf in data arrays
# specifically for copysign.
nan_data_copysign_template = '''
##############################################################################
class %(funclabel)s_%(errorlabel)s_errors_%(typelabel)s(unittest.TestCase):
"""Test for copysign function operation using parameter %(errordata)s.
nan_data_copysign_template
"""
##############################################################################
def FloatassertEqual(self, expecteditem, dataoutitem, msg=None):
"""This function is patched into assertEqual to allow testing for
the floating point special values NaN, Inf, and -Inf.
"""
# NaN cannot be compared using normal means.
if math.isnan(dataoutitem) and math.isnan(expecteditem):
pass
# Anything else can be compared normally.
else:
if not math.isclose(expecteditem, dataoutitem, rel_tol=0.01, abs_tol=0.0):
raise self.failureException('%%0.3f != %%0.3f' %% (expecteditem, dataoutitem))
########################################################
def setUp(self):
"""Initialise.
"""
self.addTypeEqualityFunc(float, self.FloatassertEqual)
self.okarray1 = array.array('%(typecode)s', [%(test_op_y)s])
# This is the same data, but with signs reversed.
self.okarray2 = array.array('%(typecode)s', [-x for x in [%(test_op_y)s]])
arraysize = len(self.okarray1)
self.dataout = array.array('%(typecode)s', itertools.repeat(0.0, arraysize))
self.errordata = array.array('%(typecode)s', [float('%(errordata)s')] * arraysize)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_num_none_a1(self):
"""Test %(funclabel)s as *array-num-none* for %(errordata)s - Array code %(typelabel)s.
"""
for testval in self.okarray2:
with self.subTest(msg='Failed with parameter', testval = testval):
# Copy the array so we don't change the original data.
okarray1 = copy.copy(self.okarray1)
errordata = copy.copy(self.errordata)
# This version is expected to pass.
arrayfunc.%(funcname)s(okarray1, testval)
# This is the actual test.
with self.assertRaises(ArithmeticError):
arrayfunc.%(funcname)s(errordata, testval)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_num_none_a2(self):
"""Test %(funclabel)s as *array-num-none* for %(errordata)s with error check off - Array code %(typelabel)s.
"""
for testval in self.okarray2:
with self.subTest(msg='Failed with parameter', testval = testval):
# Copy the array so we don't change the original data.
errordata = copy.copy(self.errordata)
expected = [math.%(funcname)s(x, testval) for x in errordata]
arrayfunc.%(funcname)s(errordata, testval, matherrors=True)
for dataoutitem, expecteditem in zip(errordata, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_num_array_b1(self):
"""Test %(funclabel)s as *array-num-array* for %(errordata)s - Array code %(typelabel)s.
"""
for testval in self.okarray2:
with self.subTest(msg='Failed with parameter', testval = testval):
# Copy the array so we don't change the original data.
okarray1 = copy.copy(self.okarray1)
errordata = copy.copy(self.errordata)
# This version is expected to pass.
arrayfunc.%(funcname)s(okarray1, testval, self.dataout)
# This is the actual test.
with self.assertRaises(ArithmeticError):
arrayfunc.%(funcname)s(errordata, testval, self.dataout)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_num_array_b2(self):
"""Test %(funclabel)s as *array-num-array* for %(errordata)s with error check off - Array code %(typelabel)s.
"""
for testval in self.okarray2:
with self.subTest(msg='Failed with parameter', testval = testval):
expected = [math.%(funcname)s(x, testval) for x in self.errordata]
arrayfunc.%(funcname)s(self.errordata, testval, self.dataout, matherrors=True)
for dataoutitem, expecteditem in zip(self.dataout, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_num_array_none_c1(self):
"""Test %(funclabel)s as *num-array-none* for %(errordata)s - Array code %(typelabel)s.
"""
for testval in self.okarray2:
with self.subTest(msg='Failed with parameter', testval = testval):
# Copy the array so we don't change the original data.
okarray1 = copy.copy(self.okarray1)
errordata = copy.copy(self.errordata)
expected = [math.%(funcname)s(testval, x) for x in errordata]
arrayfunc.%(funcname)s(testval, errordata)
for dataoutitem, expecteditem in zip(errordata, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_num_array_none_c2(self):
"""Test %(funclabel)s as *num-array-none* for %(errordata)s with error check off - Array code %(typelabel)s.
"""
for testval in self.okarray2:
with self.subTest(msg='Failed with parameter', testval = testval):
# Copy the array so we don't change the original data.
errordata = copy.copy(self.errordata)
expected = [math.%(funcname)s(testval, x) for x in errordata]
arrayfunc.%(funcname)s(testval, errordata, matherrors=True)
for dataoutitem, expecteditem in zip(errordata, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_num_array_array_d1(self):
"""Test %(funclabel)s as *num-array-array* for %(errordata)s - Array code %(typelabel)s.
"""
for testval in self.okarray2:
with self.subTest(msg='Failed with parameter', testval = testval):
expected = [math.%(funcname)s(testval, x) for x in self.errordata]
arrayfunc.%(funcname)s(testval, self.errordata, self.dataout)
for dataoutitem, expecteditem in zip(self.dataout, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_num_array_array_d2(self):
"""Test %(funclabel)s as *num-array-array* for %(errordata)s with error check off - Array code %(typelabel)s.
"""
for testval in self.okarray2:
with self.subTest(msg='Failed with parameter', testval = testval):
expected = [math.%(funcname)s(testval, x) for x in self.errordata]
arrayfunc.%(funcname)s(testval, self.errordata, self.dataout, matherrors=True)
for dataoutitem, expecteditem in zip(self.dataout, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_array_none_e1(self):
"""Test %(funclabel)s as *array-array-none* for %(errordata)s - Array code %(typelabel)s.
"""
expected = [math.%(funcname)s(x, y) for x,y in zip(self.okarray1, self.errordata)]
arrayfunc.%(funcname)s(self.okarray1, self.errordata)
for dataoutitem, expecteditem in zip(self.okarray1, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_array_none_e2(self):
"""Test %(funclabel)s as *array-array-none* for %(errordata)s with error check off - Array code %(typelabel)s.
"""
expected = [math.%(funcname)s(x, y) for x,y in zip(self.okarray1, self.errordata)]
arrayfunc.%(funcname)s(self.okarray1, self.errordata, matherrors=True)
for dataoutitem, expecteditem in zip(self.okarray1, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_array_array_f1(self):
"""Test %(funclabel)s as *array-array-array* for %(errordata)s - Array code %(typelabel)s.
"""
expected = [math.%(funcname)s(x, y) for x,y in zip(self.okarray1, self.errordata)]
arrayfunc.%(funcname)s(self.okarray1, self.errordata, self.dataout)
for dataoutitem, expecteditem in zip(self.dataout, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_%(funclabel)s_%(errorlabel)s_array_array_array_f2(self):
"""Test %(funclabel)s as *array-array-array* for %(errordata)s with error check off - Array code %(typelabel)s.
"""
expected = [math.%(funcname)s(x, y) for x,y in zip(self.okarray1, self.errordata)]
arrayfunc.%(funcname)s(self.okarray1, self.errordata, self.dataout, matherrors=True)
for dataoutitem, expecteditem in zip(self.dataout, expected):
# The behavour of assertEqual is modified by addTypeEqualityFunc.
self.assertEqual(dataoutitem, expecteditem)
##############################################################################
'''
# ==============================================================================
# ==============================================================================
# These are all the test code templates.
test_templates = {'test_template' : test_template,
'nan_data_error_template' : nan_data_error_template,
'nan_data_noerror_template' : nan_data_noerror_template,
'nan_data_powerror_template' : nan_data_powerror_template,
'nan_data_fmod_inf_template' : nan_data_fmod_inf_template,
'nan_data_copysign_template' : nan_data_copysign_template,
}
# ==============================================================================
# Read in the op codes.
opdata = codegen_common.ReadINI('affuncdata.ini')
# Filter out the desired math functions.
funclist = [(x,dict(y)) for x,y in opdata.items() if y.get('test_op_templ') == 'test_template']
# ==============================================================================
# This defines the module name.
modulename = 'arrayfunc'
# Import the array module for testing.
arrayimport = 'import array'
for funcname, func in funclist:
filenamebase = 'test_' + funcname
filename = filenamebase + '.py'
headerdate = codegen_common.FormatHeaderData(filenamebase, '09-Dec-2017', funcname)
# Add additional header data.
headerdate['modulename'] = modulename
headerdate['arrayimport'] = arrayimport
# One function (one output file).
with open(filename, 'w') as f:
# The copyright header.
f.write(codegen_common.HeaderTemplate % headerdate)
# Check each array type.
for functype in codegen_common.floatarrays:
testtemplate = test_templates[func['test_op_templ']]
# Basic tests.
funcdata = {'funclabel' : funcname, 'funcname' : funcname, 'pyoperator' : func['pyoperator'],
'typelabel' : functype, 'typecode' : functype, 'test_op_x' : func['test_op_x'],
'test_op_y' : func['test_op_y']}
f.write(testtemplate % funcdata)
# Test for invalid parameters. One template should work for all
# functions of this style.
f.write(param_invalid_template % funcdata)
# NaN, inf, -inf tests.
funcdata = {'funclabel' : funcname, 'funcname' : funcname, 'pyoperator' : func['pyoperator'],
'typelabel' : functype, 'typecode' : functype, 'test_op_x' : func['test_op_x'],
'test_op_y' : func['test_op_y'],
'test_nan_default' : func['test_nan_default']
}
# NaN
testtemplate = test_templates[func['test_nan_data_template']]
funcdata['errorlabel'] = 'NaN'
funcdata['errordata'] = 'nan'
f.write(testtemplate % funcdata)
# inf
testtemplate = test_templates[func['test_inf_data_template']]
funcdata['errorlabel'] = 'inf'
funcdata['errordata'] = 'inf'
f.write(testtemplate % funcdata)
# -inf
testtemplate = test_templates[func['test_ninf_data_template']]
funcdata['errorlabel'] = 'ninf'
funcdata['errordata'] = '-inf'
f.write(testtemplate % funcdata)
f.write(codegen_common.testendtemplate % {'funcname' : funcname, 'testprefix' : 'af'})
# ==============================================================================
| [
"m12.griffin@gmail.com"
] | m12.griffin@gmail.com |
4e3f97da1c3c17d1e97d0f810018ec929253705d | d42ad62322d6bb655ec54f184b6023d5dc9a5ff5 | /examples/SOAPHelloApplication.py | 38b0a78200121a8d9d2c570c1bf9d8c7ddf30633 | [] | no_license | jmkogut/framework | 76efc3ead9353f8b9f5e4ab14e682a1ed8390e35 | 5e891767830c1b2bd440fede4c74fc87c8ccc1ed | refs/heads/master | 2021-01-16T18:39:38.464522 | 2010-03-03T16:20:03 | 2010-03-03T16:20:03 | 26,396 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 983 | py | from framework.Applications import SOAPApplication, SOAPMethod
class HelloSOAP(SOAPApplication):
@SOAPMethod
def HelloTest(self):
'''
No SOAP docstring here, will default ReturnType to string
and not allow any arguments. (This is desired behaviour).
'''
return "Hello, World!"
@SOAPMethod
def AddArgumentsAsString(self, num1, num2):
'''
Adds num1 to num2 and returns the value as a string phrase.
##AddArguments:string, num1:long, num2:long
'''
return "The sum of %s and %s is %s" % (num1, num2, (num1 + num2))
@SOAPMethod
def AddArguments(self, num1, num2):
'''
Adds num1 to num2 and returns the value.
##AddArguments:long, num1:long, num2:long
'''
return num1+num2
@SOAPMethod
def DescribePerson(self, name, age, male):
'''
##DescribePerson:string, name:string, age:int, male:boolean
'''
if (male):
gpronoun = "him"
else:
gpronoun = "her"
return "%s, age %s, has a lot going for %s!" % \
(name, age, gpronoun)
| [
"joshua.kogut@gmail.com"
] | joshua.kogut@gmail.com |
9407a3410c3adf54c911ab96278515594e083f7c | 8cd15fba24b6dfa431f3764932101969f5fb524f | /JAMediaVideo/gtk2/Globales.py | 814c31044263af9b38ece76b1e5a3998450b5472 | [] | no_license | srevinsaju/JAMediaSuite | c872b4781657bf1bcf63908f71abeca799b8c666 | 1813d1205cf31f89be3c4512eb495baed427494f | refs/heads/master | 2020-12-04T12:14:53.794749 | 2019-01-05T12:52:13 | 2019-01-05T12:52:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,966 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Globals.py por:
# Flavio Danesse <fdanesse@gmail.com>
# Uruguay
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
def get_ip():
"""
Devuelve ip rango de difusión en la red.
"""
import socket
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("google.com", 80))
ret = s.getsockname()[0]
s.close()
return ret
except:
return ""
def get_color(color):
"""
Devuelve Colores predefinidos.
"""
from gtk import gdk
colors = {
"GRIS": gdk.Color(60156, 60156, 60156),
"AMARILLO": gdk.Color(65000, 65000, 40275),
"NARANJA": gdk.Color(65000, 26000, 0),
"BLANCO": gdk.Color(65535, 65535, 65535),
"NEGRO": gdk.Color(0, 0, 0),
"ROJO": gdk.Color(65000, 0, 0),
"VERDE": gdk.Color(0, 65000, 0),
"AZUL": gdk.Color(0, 0, 65000),
}
return colors.get(color, None)
def get_colors(key):
from gtk import gdk
_dict = {
"window": "#ffffff",
"barradeprogreso": "#778899",
"toolbars": "#f0e6aa",
"drawingplayer": "#000000",
}
return gdk.color_parse(_dict.get(key, "#ffffff"))
def describe_archivo(archivo):
"""
Devuelve el tipo de un archivo (imagen, video, texto).
-z, --uncompress para ver dentro de los zip.
"""
import commands
datos = commands.getoutput('file -ik %s%s%s' % ("\"", archivo, "\""))
retorno = ""
for dat in datos.split(":")[1:]:
retorno += " %s" % (dat)
return retorno
def make_base_directory():
"""
Crea toda la estructura de Directorios de JAMedia.
"""
import os
import commands
if not os.path.exists(os.path.join(
os.environ["HOME"], "JAMediaDatos")):
os.mkdir(os.path.join(os.environ["HOME"], "JAMediaDatos"))
os.chmod(os.path.join(os.environ["HOME"], "JAMediaDatos"), 0755)
# unificar directorios de JAMedia, JAMediaVideo y JAMediaImagenes
directorio_viejo = os.path.join(os.environ["HOME"], ".JAMediaDatos")
directorio_nuevo = os.path.join(os.environ["HOME"], "JAMediaDatos")
if os.path.exists(directorio_viejo):
for elemento in os.listdir(directorio_viejo):
commands.getoutput('mv %s %s' % (os.path.join(directorio_viejo,
elemento), directorio_nuevo))
commands.getoutput('rm -r %s' % (directorio_viejo))
# Directorios JAMedia
DIRECTORIO_MIS_ARCHIVOS = os.path.join(
os.environ["HOME"], "JAMediaDatos", "MisArchivos")
DIRECTORIO_DATOS = os.path.join(os.environ["HOME"],
"JAMediaDatos", "Datos")
if not os.path.exists(DIRECTORIO_MIS_ARCHIVOS):
os.mkdir(DIRECTORIO_MIS_ARCHIVOS)
os.chmod(DIRECTORIO_MIS_ARCHIVOS, 0755)
if not os.path.exists(DIRECTORIO_DATOS):
os.mkdir(DIRECTORIO_DATOS)
os.chmod(DIRECTORIO_DATOS, 0755)
# Directorio JAMediaTube
DIRECTORIO_YOUTUBE = os.path.join(os.environ["HOME"],
"JAMediaDatos", "YoutubeVideos")
if not os.path.exists(DIRECTORIO_YOUTUBE):
os.mkdir(DIRECTORIO_YOUTUBE)
os.chmod(DIRECTORIO_YOUTUBE, 0755)
# Directorios JAMediaVideo
AUDIO_JAMEDIA_VIDEO = os.path.join(os.environ["HOME"],
"JAMediaDatos", "Audio")
if not os.path.exists(AUDIO_JAMEDIA_VIDEO):
os.mkdir(AUDIO_JAMEDIA_VIDEO)
os.chmod(AUDIO_JAMEDIA_VIDEO, 0755)
VIDEO_JAMEDIA_VIDEO = os.path.join(os.environ["HOME"],
"JAMediaDatos", "Videos")
if not os.path.exists(VIDEO_JAMEDIA_VIDEO):
os.mkdir(VIDEO_JAMEDIA_VIDEO)
os.chmod(VIDEO_JAMEDIA_VIDEO, 0755)
IMAGENES_JAMEDIA_VIDEO = os.path.join(os.environ["HOME"],
"JAMediaDatos", "Fotos")
if not os.path.exists(IMAGENES_JAMEDIA_VIDEO):
os.mkdir(IMAGENES_JAMEDIA_VIDEO)
os.chmod(IMAGENES_JAMEDIA_VIDEO, 0755)
def get_data_directory():
"""
Devuelve el Directorio de Datos de JAMedia y JAMediaTube.
"""
import os
DIRECTORIO_DATOS = os.path.join(os.environ["HOME"],
"JAMediaDatos", "Datos")
if not os.path.exists(DIRECTORIO_DATOS):
make_base_directory()
return DIRECTORIO_DATOS
def get_tube_directory():
"""
Devuelve el Directorio de Videos de JAMediaTube.
"""
import os
DIRECTORIO_YOUTUBE = os.path.join(os.environ["HOME"],
"JAMediaDatos", "YoutubeVideos")
if not os.path.exists(DIRECTORIO_YOUTUBE):
make_base_directory()
return DIRECTORIO_YOUTUBE
def get_audio_directory():
"""
Devuelve el Directorio de Audio de JAMedia y JAMediaTube.
"""
import os
AUDIO_JAMEDIA_VIDEO = os.path.join(os.environ["HOME"],
"JAMediaDatos", "Audio")
if not os.path.exists(AUDIO_JAMEDIA_VIDEO):
make_base_directory()
return AUDIO_JAMEDIA_VIDEO
def get_imagenes_directory():
"""
Devuelve el Directorio de Imagenes de JAMediaVideo y JAMediaImagenes.
"""
import os
IMAGENES_JAMEDIA_VIDEO = os.path.join(os.environ["HOME"],
"JAMediaDatos", "Fotos")
if not os.path.exists(IMAGENES_JAMEDIA_VIDEO):
make_base_directory()
return IMAGENES_JAMEDIA_VIDEO
def get_video_directory():
"""
Devuelve el Directorio de Video de JAMediaVideo.
"""
import os
VIDEO_JAMEDIA_VIDEO = os.path.join(os.environ["HOME"],
"JAMediaDatos", "Videos")
if not os.path.exists(VIDEO_JAMEDIA_VIDEO):
make_base_directory()
return VIDEO_JAMEDIA_VIDEO
'''
def get_my_files_directory():
"""
Devuelve el Directorio de Archivos del usuario en JAMedia.
"""
import os
DIRECTORIO_MIS_ARCHIVOS = os.path.join(os.environ["HOME"],
"JAMediaDatos", "MisArchivos")
if not os.path.exists(DIRECTORIO_MIS_ARCHIVOS):
make_base_directory()
return DIRECTORIO_MIS_ARCHIVOS
'''
def get_separador(draw=False, ancho=0, expand=False):
"""
Devuelve un separador generico.
"""
import gtk
separador = gtk.SeparatorToolItem()
separador.props.draw = draw
separador.set_size_request(ancho, -1)
separador.set_expand(expand)
return separador
'''
def get_togle_boton(archivo, flip=False,
color=get_color("GRIS"), pixels=24):
# Gdk.Color(65000, 65000, 65000)
"""
Devuelve un toggletoolbutton generico.
"""
import gtk
boton = gtk.ToggleToolButton()
imagen = gtk.Image()
pixbuf = gtk.gdk.pixbuf_new_from_file_at_size(
archivo, pixels, pixels)
if flip:
pixbuf = pixbuf.flip(True)
imagen.set_from_pixbuf(pixbuf)
boton.set_icon_widget(imagen)
imagen.show()
boton.show()
return boton
'''
def get_boton(archivo, flip=False, rotacion=None,
pixels=24, tooltip_text=None):
"""
Devuelve un toolbutton generico.
"""
import gtk
boton = gtk.ToolButton()
imagen = gtk.Image()
pixbuf = gtk.gdk.pixbuf_new_from_file_at_size(archivo, pixels, pixels)
if flip:
pixbuf = pixbuf.flip(True)
if rotacion:
pixbuf = pixbuf.rotate_simple(rotacion)
imagen.set_from_pixbuf(pixbuf)
boton.set_icon_widget(imagen)
imagen.show()
boton.show()
if tooltip_text:
boton.set_tooltip_text(tooltip_text)
boton.TOOLTIP = tooltip_text
return boton
| [
"fdanesse@gmail.com"
] | fdanesse@gmail.com |
c7a9a6e5b9e1cf1f3e8dcf31b1f6ee0aaf7480ce | 0b95518353f172a0d3f53c3afb0608ab975974d2 | /fishc/sms/sms.py | c2294aad0d7fe5037d3968c200d0118203763f70 | [] | no_license | uba888/uba_python | 54b19e6483f5daacec6d2e0e5a4d9cf02ca2d7b5 | 1b63378ab86cda8221c6f7f9bad68c364874ccb6 | refs/heads/master | 2020-01-23T22:00:05.122331 | 2016-12-30T10:31:15 | 2016-12-30T10:31:15 | 74,717,680 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,892 | py | #!/usr/bin/env python
#encoding=utf-8
import time
import base64
import hashlib
import httplib
import uuid
import hmac
class SMSClient:
def __init__(self, app_key, app_secret):
self.__app_key, self.__app_secret = app_key, app_secret
def send(self, receiver, sign, template_code, parameters=''):
print receiver, sign, template_code, parameters
self.__host = 'sms.market.alicloudapi.com'
self.__str_uri = '/singleSendSms?ParamString=%s&RecNum=%s&SignName=%s&TemplateCode=%s' % (parameters, receiver, sign, template_code)
print self.__str_uri
self.build_headers()
self.__connection = httplib.HTTPConnection(self.__host, 80)
self.__connection.connect()
self.__connection.request('GET', self.__str_uri, headers=self.__headers)
response = self.__connection.getresponse()
print response.status, response.getheaders(), response.read()
def build_headers(self):
headers = dict()
headers['X-Ca-Key'] = self.__app_key
headers['X-Ca-Nonce'] = str(uuid.uuid4())
headers['X-Ca-Timestamp'] = str(int(time.time() * 1000))
headers['X-Ca-Signature-Headers'] = 'X-Ca-Key,X-Ca-Nonce,X-Ca-Timestamp'
str_header = '\n'.join('%s:%s' % (k, headers[k]) for k in ['X-Ca-Key','X-Ca-Nonce','X-Ca-Timestamp'])
str_to_sign = '%s\n\n\n\n\n%s\n%s' % ('GET', str_header, self.__str_uri)
headers['X-Ca-Signature'] = self.__get_sign(str_to_sign, self.__app_secret)
self.__headers = headers
def __get_sign(self, source, secret):
h = hmac.new(secret, source, hashlib.sha256)
signature = base64.encodestring(h.digest()).strip()
return signature
cli = SMSClient(app_key="23472464", app_secret="af81a5c0c398a679d0a302dab59f0b3d")
cli.send('18657173220', 'EC82B6214DAFD1BB58F5BDF8B7623F7A', 'SMS_16820035', '{"content":"test"}')
| [
"lsqtyihui@163.com"
] | lsqtyihui@163.com |
4ec65bf797fd519390932f21927af6966f94336b | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/168/usersdata/276/70781/submittedfiles/exercicio24.py | 65d4c1fabd7be393ddb9b1c8b13fb77cc8cf2feb | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 231 | py | # -*- coding: utf-8 -*-
import math
x = int (input('Digite o valor de x: '))
y = int (input('Digite o valor de y: '))
i = 1
mdc = 0
while (i<=y):
if (x%i==0) and (y%i==0):
mdc = i
print (i)
i = i + 1
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
47aea685f056902f32f25bcb32efe16019a031da | c74c932136a80207e34d56d8775cabc2509eb571 | /pyjld.system/trunk/make.py | 44f7fe909069cad5fb85d39b3c3dab2ecf84deeb | [] | no_license | jldupont/pyjld | 1d73824ce9e11b80a34c5a3baeda81cfecdc771e | 1308b7e0ad3a3dbaa3e05faf4485ebeefc5ae2ba | refs/heads/master | 2020-12-24T13:36:22.392002 | 2011-11-17T19:35:29 | 2011-11-17T19:35:29 | 2,742,884 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | #!/usr/bin/env python
"""
@author: Jean-Lou Dupont
"""
__author__ = "Jean-Lou Dupont"
__email = "python (at) jldupont.com"
import os
import sys
cdir=os.getcwd()
while True:
print cdir
if cdir.endswith("/pyjld"):
break
cdir=os.path.dirname(cdir)
sys.path.insert(0, os.path.join(cdir, "pyjld.builder", "trunk", "src"))
from pyjld.builder.make import make
make()
| [
"jl@jldupont.com"
] | jl@jldupont.com |
39defe150001c2805ae5c7822c51642555a4b3dc | 2bd8fbe6e2ee2511d00479440aa589249234c2d8 | /01-Supervised/11-16/day17/day17-01-integrate-2-RandomForest-2-parallelized.py | 0698a4110a7311f17677f8802c4b7f25c36c8f54 | [] | no_license | LeenonGo/sklearn-learn | 71d21f9b26cfb5cc6d65a22883127db873a31091 | 460d6e75e82943c802f7c025a03c821d02b5d232 | refs/heads/master | 2023-07-13T18:42:17.510938 | 2021-08-18T11:34:06 | 2021-08-18T11:34:06 | 371,628,997 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,362 | py | # -*- coding: utf-8 -*-
# @Author : Lee
# @Time : 2021/7/20 15:09
# @Function: 并行化: https://www.scikitlearn.com.cn/0.21.3/12/#11124
# n_jobs = k ,则计算被划分为 k 个作业,并运行在机器的 k 个核上
# 如果设置 n_jobs = -1 ,则使用机器的所有核。
#
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.ensemble import ExtraTreesClassifier
"""
这个例子展示了在图像分类任务(faces)中使用树的森林来评估基于杂质的像素重要性。像素越热,越重要。
"""
n_jobs = 1 # 调整
data = fetch_olivetti_faces()
X, y = data.data, data.target
mask = y < 5 # Limit to 5 classes
X = X[mask]
y = y[mask]
# Build a forest and compute the pixel importances
print("Fitting ExtraTreesClassifier on faces data with %d cores..." % n_jobs)
t0 = time()
forest = ExtraTreesClassifier(n_estimators=1000,
max_features=128,
n_jobs=n_jobs,
random_state=0)
forest.fit(X, y)
print("done in %0.3fs" % (time() - t0))
importances = forest.feature_importances_
importances = importances.reshape(data.images[0].shape)
# Plot pixel importances
plt.matshow(importances, cmap=plt.cm.hot)
plt.title("Pixel importances with forests of trees")
plt.show()
| [
"yaa.lee@hotmail.com"
] | yaa.lee@hotmail.com |
aab4df250c592385d82820219aa267e2404e3028 | ff47883b2dbf55022dd01bc4b22a79cb6323fe61 | /mysite/bookmark/views.py | a88c59504a4459d152d124594af4b831c9f0f9af | [] | no_license | miloking7/git_portfolio_DjangoBlog | 3af0deaebdbddace84fec3d459c0b68722e97416 | 8aa5e7b0525f546aa8ca063b5147904761853662 | refs/heads/master | 2023-04-04T01:53:54.152792 | 2021-04-07T03:21:10 | 2021-04-07T03:21:10 | 355,391,074 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,208 | py | from django.views.generic import ListView, DetailView
from bookmark.models import Bookmark
from django.views.generic import CreateView, UpdateView, DeleteView
from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import reverse_lazy
from mysite.views import OwnerOnlyMixin
class BookmarkLV(ListView):
model = Bookmark
class BookmarkDV(DetailView):
model = Bookmark
# Create your views here.
class BookmarkCreateView(LoginRequiredMixin, CreateView):
model = Bookmark
fields = ['title', 'url']
success_url = reverse_lazy('bookmark:index')
def form_valid(self, form):
form.instance.owner = self.request.user
return super().form_valid(form)
class BookmarkChangeLV(LoginRequiredMixin, ListView):
template_name = 'bookmark/bookmark_change_list.html'
def get_queryset(self):
return Bookmark.objects.filter(owner=self.request.user)
class BookmarkUpdateView(OwnerOnlyMixin, UpdateView):
model = Bookmark
fields = ['title', 'url']
success_url = reverse_lazy('bookmark:index')
class BookmarkDeleteView(OwnerOnlyMixin, DeleteView):
model = Bookmark
success_url = reverse_lazy('bookmark:index')
| [
"miloking@hanmail.net"
] | miloking@hanmail.net |
0d178fa066c1f4c5d384bfd333819d9ac8351337 | fd5edffed3c69a4d749880e18189c391a0a92562 | /blog/migrations/0002_auto_20181026_1956.py | b69729dbed11c9f0b6dd0221836d82990b3583f9 | [] | no_license | bgarcial/hostayni_platform | 4e9768bc1a13f006167d16b6d33bce88a029c524 | 2cf136b24b27db1a907ccc1274d32c1523abe1a2 | refs/heads/master | 2021-10-14T07:42:30.095351 | 2018-11-14T16:11:54 | 2018-11-14T16:11:54 | 103,794,415 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 672 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2018-10-26 19:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='article',
name='category',
),
migrations.AlterField(
model_name='article',
name='draft',
field=models.BooleanField(default=False, help_text='Si seleccionas esta opción tu artículo no será publicado por el momento', verbose_name='Guardar publicación'),
),
]
| [
"botibagl@gmail.com"
] | botibagl@gmail.com |
c19857a7a626e2502e6afa93cf570f5173046049 | 602c982d619fd49a0105dbe53b1b5fdedc4e708f | /products/urls.py | 02f078b4378bb2cce4281ca09d974794e8f2f633 | [] | no_license | rafelmm/ecommerce | 3e5b2f86e42bc262ed5780e0e8195a434ba39795 | 2414105ac171bd00e7bf59e52a99051dcc216c8e | refs/heads/master | 2023-06-11T09:01:12.274122 | 2021-07-07T22:14:50 | 2021-07-07T22:14:50 | 383,597,464 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 163 | py |
from django.contrib import admin
from django.urls import path
from .views import product_list
urlpatterns = [
path('', product_list, name='product_list'),
]
| [
"rafelmm@gmail.com"
] | rafelmm@gmail.com |
8e76debaea8ecc60552b1c5384895640a9b54d55 | bbc8fbbdd40665af61fedf69962b38c1d5939683 | /deploy/pinax.wsgi | 702a6744a1f0adf054e2dce91b62c0c1158c1580 | [] | no_license | braskin/pd | 64b299ad8058e8d3939bc9778fd1576522f786b0 | df32f96b432c2f07e1a20bcbd84df3eccad5e29a | refs/heads/master | 2021-01-10T22:10:34.318229 | 2013-01-23T11:50:37 | 2013-01-23T11:50:37 | 7,773,119 | 0 | 1 | null | 2020-07-25T19:53:06 | 2013-01-23T11:09:43 | Python | UTF-8 | Python | false | false | 454 | wsgi | # pinax.wsgi is configured to live in projects/playdation/deploy.
import os
import sys
from os.path import abspath, dirname, join
from site import addsitedir
sys.path.insert(0, abspath(join(dirname(__file__), "../../")))
from django.conf import settings
os.environ["DJANGO_SETTINGS_MODULE"] = "playdation.settings"
sys.path.insert(0, join(settings.PROJECT_ROOT, "apps"))
from django.core.handlers.wsgi import WSGIHandler
application = WSGIHandler() | [
"boris.raskin@gmail.com"
] | boris.raskin@gmail.com |
ccd07e782ba302eaba43b3b517b58b8b67f736ae | 62758b6067133b1a4c75da979197d21a5691c34e | /ichnaea/cache.py | 8cc5409f92eecfc5ca30f27177bcc16e2e11344f | [
"Apache-2.0"
] | permissive | mate1983/ichnaea | 903450705f9a83fd74aeb16e5b6fd9644de04065 | ac3ed0640ee8cc7f142ba21cb6976dbf2bd488cb | refs/heads/master | 2020-12-03T10:44:19.124756 | 2016-03-11T11:17:57 | 2016-03-11T11:59:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,454 | py | """
Functionality related to using Redis as a cache and a queue.
"""
from contextlib import contextmanager
import redis
from redis.exceptions import RedisError
from six.moves.urllib.parse import urlparse
def configure_redis(cache_url, _client=None):
"""
Configure and return a :class:`~ichnaea.cache.RedisClient` instance.
:param _client: Test-only hook to provide a pre-configured client.
"""
if cache_url is None or _client is not None:
return _client
url = urlparse(cache_url)
netloc = url.netloc.split(':')
host = netloc[0]
if len(netloc) > 1:
port = int(netloc[1])
else: # pragma: no cover
port = 6379
if len(url.path) > 1:
db = int(url.path[1:])
else: # pragma: no cover
db = 0
pool = redis.ConnectionPool(
max_connections=20,
host=host,
port=port,
db=db,
socket_timeout=30.0,
socket_connect_timeout=60.0,
socket_keepalive=True,
)
return RedisClient(connection_pool=pool)
@contextmanager
def redis_pipeline(redis_client, execute=True):
"""
Return a Redis pipeline usable as a context manager.
:param execute: Should the pipeline be executed or aborted at the end?
:type execute: bool
"""
with redis_client.pipeline() as pipe:
yield pipe
if execute:
pipe.execute()
class RedisClient(redis.StrictRedis):
"""A strict pingable RedisClient."""
# The last part of these keys is a counter than can be incremented
# whenever the contents/structure of the cache changes. This allows
# for easy `cache-busting'.
cache_keys = {
'downloads': b'cache:downloads:3',
'fallback_blue': b'cache:fallback:blue:',
'fallback_cell': b'cache:fallback:cell:',
'fallback_wifi': b'cache:fallback:wifi:',
'leaders': b'cache:leaders:2',
'leaders_weekly': b'cache:leaders_weekly:2',
'stats': b'cache:stats:3',
'stats_regions': b'cache:stats_regions:4',
'stats_blue_json': b'cache:stats_blue_json:2',
'stats_cell_json': b'cache:stats_cell_json:2',
'stats_wifi_json': b'cache:stats_wifi_json:2',
}
def ping(self):
"""
Ping the Redis server. On success return `True`, otherwise `False`.
"""
try:
self.execute_command('PING')
except RedisError:
return False
return True
| [
"hanno@hannosch.eu"
] | hanno@hannosch.eu |
43be27267c5cc3db18a1f19e8b35979f63b27ec0 | a310626af804bfe9cda37f664ee3efff4b976f4a | /heartFEM/lcleeHeart/vtk_py/addMappingFromPointsToCells.py | 68eea9658c01814637642585cfd25fa852806f79 | [
"MIT"
] | permissive | SamanehNia/heartFEM | 4ea17940e5cb594ee80048bdef3e99fba5640180 | 61b0fabb171a857ffb703e96161f7e3e9ac1f0d1 | refs/heads/main | 2023-07-18T06:34:47.355930 | 2021-09-03T09:47:11 | 2021-09-03T09:47:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,345 | py | ########################################################################
import sys
import math
import numpy
import vtk
from heartFEM.lcleeHeart.vtk_py.createIntArray import *
########################################################################
def addMappingFromPointsToCells(ugrid_points, ugrid_cells, verbose=True):
if (verbose): print ('*** addMappingFromPointsToCells ***')
nb_points = ugrid_points.GetNumberOfPoints()
nb_cells = ugrid_cells.GetNumberOfCells()
print ("nb_points = " + str(nb_points))
print ("nb_cells = " + str(nb_cells))
cell_locator = vtk.vtkCellLocator()
cell_locator.SetDataSet(ugrid_cells)
cell_locator.Update()
closest_point = [0.]*3
generic_cell = vtk.vtkGenericCell()
num_cell = vtk.mutable(0)
subId = vtk.mutable(0)
dist = vtk.mutable(0.)
iarray_num_cell = createIntArray("num_cell", 1, nb_points)
for num_point in range(nb_points):
point = ugrid_points.GetPoint(num_point)
cell_locator.FindClosestPoint(point, closest_point, generic_cell, num_cell, subId, dist)
#num_cell = cell_locator.FindCell(point)
iarray_num_cell.InsertTuple(num_point, [num_cell])
#print "num_point = " + str(num_point)
#print "num_cell = " + str(num_cell)
ugrid_points.GetPointData().AddArray(iarray_num_cell)
| [
"noreply@github.com"
] | noreply@github.com |
46b72eb23e247a42eb015c5232e10f72713cee02 | 288dddd9be0fa09101d760e0ede4ae7f2097b638 | /node_modules/restify/node_modules/dtrace-provider/build/config.gypi | 2a6796f82494231438204416813bee011dee3e6f | [
"MIT",
"BSD-2-Clause"
] | permissive | gregmercer/express-mongo-rest | 2a49973f14ec8aa33389e30a68437eb788b1f7ce | 62b02ba4cc689e4559760f3906c4a9832e3e4dca | refs/heads/master | 2021-01-21T13:11:40.614408 | 2013-08-25T15:42:18 | 2013-08-25T15:42:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 768 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"host_arch": "x64",
"node_install_npm": "true",
"node_install_waf": "true",
"node_no_strict_aliasing": 1,
"node_prefix": "out/dist-osx/usr/local",
"node_shared_openssl": "false",
"node_shared_v8": "false",
"node_shared_zlib": "false",
"node_use_dtrace": "false",
"node_use_etw": "false",
"node_use_openssl": "true",
"target_arch": "x64",
"v8_no_strict_aliasing": 1,
"v8_use_snapshot": "false",
"nodedir": "/Users/macgmercer/.node-gyp/0.8.2",
"copy_dev_lib": "true"
}
}
| [
"gmercer@stanford.edu"
] | gmercer@stanford.edu |
81f7e1a32b448da0c2743974e650e6d2b9659d73 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/9/v-1.py | 3c7d5e588805d8f24918006b23a615c626a88574 | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'v-1':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"juliettaylorswift@gmail.com"
] | juliettaylorswift@gmail.com |
dc603f741ec7a6cad7e7ee1a466f129c31103982 | 3f8ba7918eff5e6b9d04621ec59e63d728ea204c | /Jeju_Coding_Base_Code_Festival_Python/Jeju_Coding_Q26.py | 4eac082adf768032d204913f71c67a3ef4e44bbb | [] | no_license | Tasty-Programmer/Python_2021_start | d8f5bd51faa021a5fea28e037e24220539242227 | 2d623576a64a2fe81a979f8e8fb4137b036f49ac | refs/heads/master | 2023-08-28T15:06:25.281567 | 2021-10-09T17:21:05 | 2021-10-09T17:21:05 | 379,224,492 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 621 | py | #문제 26 : 행성 문제2
'''
우리 태양계를 이루는 행성은 수성, 금성 , 지구, 화성, 목성, 토성, 천왕성 , 해왕성 이 있습니다.
이ㅅ행성들의 영어 이름은 Mecury, Venus, Earth, Mars, Jupiter, Saturn,Uranus,Neptune 입니다.
행성의 한글 이름을 입력하면 영어 이름을 반환하는 프로그램을 만들어주세요.
>> 입력
지구
>> 출력
Earth
'''
planet = {'지구':'Mercury', '금성':'Venus','지구':'Earth','화성':'Mars',
'목성':'Jupiter','토성':'Saturn','천왕성':'Uranus','해왕성':'Neptune'}
key = input()
print(planet.get(key)) | [
"kimhunsup159@gmail.com"
] | kimhunsup159@gmail.com |
a4b3899d13db603e9377255638f695f7a6233743 | 6d7add9e5009d88a3a1d33f37b646bf033351a4a | /views.py | bd4b4716e392da21d9a0a40701fe863042f5ee14 | [] | no_license | RahabNderitu/EventsApp | f3485c35f8732f99b81b703c712684fd4aa0711a | b6c230ab3808bc2c37ea47a129d6ab969acdec95 | refs/heads/master | 2020-08-08T22:10:08.544232 | 2019-10-14T14:23:38 | 2019-10-14T14:23:38 | 213,931,568 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,056 | py | from django.shortcuts import render
from django.template import loader
# Create your views here.
def login(request):
return render(request, 'events/login.html')
def register(request):
return render(request, 'events/register.html')
def forgotpassword(request):
return render(request, 'events/forgotpassword.html')
def tables(request):
return render(request, 'events/tables.html')
def flot(request):
return render(request, 'events/flot.html')
def morris(request):
return render(request, 'events/morris.html')
def forms(request):
return render(request, 'events/forms.html')
def panels_wells(request):
return render(request, 'events/panels_wells.html')
def buttons(request):
return render(request, 'events/buttons.html')
def notifications(request):
return render(request, 'events/notifications.html')
def typography(request):
return render(request, 'events/typography.html')
def icons(request):
return render(request, 'events/icons.html')
def grid(request):
return render(request, 'events/grid.html')
def blank(request):
return render(request, 'events/blank.html')
def index(request):
context = {
'page': 'index',
'coverHeading': 'Search Events'
}
return render(request, 'events/editEvents.html', context)
def signin(request):
context = {
'page': 'signin'
}
return render(request, 'events/register.html', context)
def createEventPage(request):
context = {
'page': 'createEvent',
'coverHeading': 'Create Event'
}
return render(request, 'events/createEvent.html', context)
def allEvents(request):
# Get Events
events = Event.objects.all()
context = {
'page': 'allEvents',
'coverHeading': 'All Events',
'events': events
}
return render(request, 'events/allEvents.html', context)
def myEvents(request):
# Dec Vars
user = request.user
# redirect to signin page if user not found
try:
events = Event.objects.filter(creator=user)
except TypeError:
return redirect('register')
context = {
'page': 'myEvents',
'coverHeading': 'My Events',
'events': events
}
return render(request, 'events/myEvents.html', context)
def editEvent(request, event_id):
# Dec Vars
event = get_object_or_404(Event, pk=event_id)
context = {
'page': 'editEvent',
'coverHeading': 'Edit Event',
'event': event
}
return render(request, 'events/editEvent.html', context)
# AJAX
def register(request):
# dec vars
username = str(request.POST['register-username']).lower()
email = str(request.POST['register-email']).lower()
password = str(request.POST['register-password'])
# check if username or email is used
username_check = User.objects.filter(username=username)
email_check = User.objects.filter(email=email)
if username_check:
response = {
'status': 'fail',
'error_msg': 'username already in use'
}
elif email_check:
response = {
'status': 'fail',
'error_msg': 'email already in use'
}
elif len(password) < 8:
response = {
'status': 'fail',
'error_msg': 'password must be atleast 8 characters long'
}
else:
# create user
user = User.objects.create_user(username, email, password)
# login user
login(request, user)
# create response
response = {
'status': 'success',
}
# send reponse JSON
return JsonResponse(response)
def loginUser(request):
# dec vars
username = request.POST['signin-username']
password = request.POST['signin-password']
# Auth user
user = authenticate(request, username=username, password=password)
if user:
login(request, user)
# create response
response = {
'status': 'success'
}
else:
# create response
response = {
'status': 'fail'
}
# send reponse JSON
return JsonResponse(response)
def logoutUser(request):
# log out user
logout(request)
# send to home page
return redirect('index')
def searchEvents(request):
# dec vars
event_search = json.loads(request.body)['event_search']
# filter for matching events and serialize for json
event_search_results = list(Event.objects.filter(
name__icontains=event_search
).values(
'id',
'name',
'event_type',
'start_date',
'attendees'
))
# create response
response = {
'status': 'success',
'event_search_results': event_search_results
}
# send reponse JSON
return JsonResponse(response)
def eventDetails(request):
# get event
event_id = json.loads(request.body)['event_id']
event = get_object_or_404(Event, pk=event_id)
# serialize json
serialized_event = serializers.serialize('json', [event])
# create response
response = {
'status': 'success',
'event': serialized_event
}
# send reponse JSON
return JsonResponse(response)
def eventJoin(request):
# get event
user_id = int(request.POST['user-id'])
event_id = int(request.POST['event-id'])
user = User.objects.get(pk=user_id)
event = Event.objects.get(pk=event_id)
# add user to event
event.attendees.add(user)
# get updated attendance count
attendance = event.attendees.all().count()
# create response
response = {
'status': 'success',
'attendance': attendance
}
# send reponse JSON
return JsonResponse(response)
def createEvent(request):
# dec vars
event_title = str(request.POST['event-title']).title()
event_type = str(request.POST['event-type'])
event_location = str(request.POST['event-location'])
event_description = str(request.POST['event-description'])
event_start_date = str(request.POST['event-start-date'])
event_start_time = str(request.POST['event-start-time'])
event_end_date = str(request.POST['event-end-date'])
event_end_time = str(request.POST['event-end-time'])
creator = request.user
# create event
Event.objects.create(
name=event_title,
event_type=event_type,
creator=creator,
location=event_location,
description=event_description,
start_date=event_start_date,
start_time=event_start_time,
end_date=event_end_date,
end_time=event_end_time
)
# #create response
response = {
'status': 'success',
}
# send reponse JSON
return JsonResponse(response)
def updateEvent(request, event_id):
# dec vars
event_title = str(request.POST['event-title']).title()
event_type = str(request.POST['event-type'])
event_location = str(request.POST['event-location'])
event_description = str(request.POST['event-description'])
event_start_date = str(request.POST['edit-event-start-date'])
event_start_time = str(request.POST['edit-event-start-time'])
event_end_date = str(request.POST['edit-event-end-date'])
event_end_time = str(request.POST['edit-event-end-time'])
event = get_object_or_404(Event, pk=event_id)
# Update Event
event.name = event_title
event.event_type = event_type
event.location = event_location
event.description = event_description
# only update new dates/times
if event_start_date:
event.start_date = event_start_date
if event_end_date:
event.end_date = event_end_date
if event_start_time:
event.start_time = event_start_time
if event_end_time:
event.end_time = event_end_time
# Save updated event
event.save()
# create response
response = {
'status': 'success',
}
# send reponse JSON
return JsonResponse(response)
def removeEvent(request):
# dec vars
event_id = json.loads(request.body)['event_id']
event = get_object_or_404(Event, pk=event_id)
# delete event
event.delete()
# create response
response = {
'status': 'success',
}
# send reponse JSON
return JsonResponse(response)
def searchSystems(request):
system_query = json.loads(request.body)['system_query']
results = list(SolarSystem.objects.filter(name__icontains=system_query).values('name')[:5])
# create response
response = {
'status': 'success',
'results': results
}
# send reponse JSON
return JsonResponse(response)
| [
"noreply@github.com"
] | noreply@github.com |
3499a0b759535d1f9435736d748c883e415186d6 | 20b7522ea6de49416e8ec0be8aa2049bc9d561fc | /Dice_roll_GUI.py | 7e2e4e92d9b03717d4ceba580007082ca31b1803 | [] | no_license | Saksham-10/diceroll | 9b2e004a7f2c26ae193a40bb2a95960a6384855d | 31a0b99a99c7d682e04b3f11c6718b575ea2a307 | refs/heads/main | 2023-08-11T08:49:18.478678 | 2021-09-10T08:20:22 | 2021-09-10T08:20:22 | 404,998,230 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 364 | py | import random
from tkinter import *
root=Tk()
root.geometry("700x450")
l1=Label(root,font=("times",200))
def roll():
number=['\u2680','\u2681','\u2682','\u2683','\u2684','\u2685']
l1.config(text=f'{random.choice(number)}{random.choice(number)}')
l1.pack()
b1=Button(root,text="ROll It...",command=roll)
b1.place(x=330,y=0)
root.mainloop()
| [
"noreply@github.com"
] | noreply@github.com |
79a6bd1bd4c4a106b21e7d931958f60298534be3 | 0ba9f66cd4db73e49a0beb644a893a2ef7040486 | /objdet/modelloader/ssd.py | e5a159c1de99c234af427f4f2eb54897f32fd2af | [] | no_license | NMADALI97/objdet | 7009d8d2d05190b9aa108575c5eec8441883c524 | f67834b252e0bf1938b794af33a226241fa6899a | refs/heads/master | 2021-09-25T05:21:14.578893 | 2018-10-18T14:37:27 | 2018-10-18T14:37:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,838 | py | # -*- coding: utf-8 -*-
import torch
from torch import nn
import torch.nn.functional as F
import itertools
import math
from . import utils
class StrideConv(nn.Module):
"""
StrideConv:H,W根据stride进行下采样,H*W->(H/stride)*(W/stride)
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, groups=1, bias=True):
"""
:param in_channels:
:param out_channels:
:param kernel_size:
:param stride:
:param dilation:
:param groups:
:param bias:
"""
super(StrideConv, self).__init__()
padding = (kernel_size - 1) // 2
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation,
groups=groups, bias=bias)
def forward(self, x):
return self.conv(x)
class StridePool(nn.Module):
"""
StridePool:H,W根据stride进行下采样,H*W->(H/stride)*(W/stride)
"""
def __init__(self, kernel_size, stride=None, ceil_mode=False):
super(StridePool, self).__init__()
padding = (kernel_size - 1) // 2
self.pool = nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=padding, ceil_mode=ceil_mode)
def forward(self, x):
return self.pool(x)
class L2Norm(nn.Module):
def __init__(self, in_features, scale):
super(L2Norm, self).__init__()
self.weight = nn.Parameter(torch.Tensor(in_features))
self.reset_parameters(scale)
def reset_parameters(self, scale):
nn.init.constant(self.weight, scale)
def forward(self, x):
x = F.normalize(x, dim=1)
scale = self.weight[None, :, None, None]
return scale * x
class SSDBoxCoder:
def __init__(self, ssd_model):
"""
:type ssd_model: SSD300
"""
self.steps = ssd_model.steps
self.fm_sizes = ssd_model.fm_sizes
self.fm_num = len(self.fm_sizes)
self.aspect_ratios = ssd_model.aspect_ratios
self.box_sizes = ssd_model.box_sizes
self.variances = (0.1, 0.2)
self.default_boxes = self._get_default_boxes()
def _get_default_boxes(self):
"""
:return: boxes: (#boxes, 4), 4 is for (cx, cy, h, w) box format
"""
boxes = []
for fm_id, fm_size in enumerate(self.fm_sizes):
for h, w in itertools.product(range(fm_size), repeat=2):
# print('(h,w):({},{})'.format(h, w))
cx = (w + 0.5) * self.steps[fm_id] # steps recover the center to the origin map
cy = (h + 0.5) * self.steps[fm_id] # steps recover the center to the origin map
# print('(cx,cy):({},{})'.format(cx, cy))
s = self.box_sizes[fm_id]
boxes.append((cx, cy, s, s)) # boxes append (cx, cy, h, w)
s_prime = math.sqrt(self.box_sizes[fm_id] * self.box_sizes[fm_id + 1]) # append large box
boxes.append((cx, cy, s_prime, s_prime)) # boxes append (cx, cy, h, w)
# aspect_ratio just save 2, 3 and append 1/2, 1/3
for aspect_ratio in self.aspect_ratios[fm_id]:
boxes.append((cx, cy, s / math.sqrt(aspect_ratio),
s * math.sqrt(aspect_ratio))) # boxes append (cx, cy, h, w)
boxes.append((cx, cy, s * math.sqrt(aspect_ratio),
s / math.sqrt(aspect_ratio))) # boxes append (cx, cy, h, w)
return torch.Tensor(boxes)
def encode(self, boxes, labels):
"""
SSD编码规则:
tx = (x-anchor_x) / (variance[0]*anchor_w)
ty = (y-anchor_y) / (variance[0]*anchor_h)
tw = log(w/anchor_w) / variance[1]
th = log(h/anchor_h) / variance[1]
:param boxes: 输入的bounding boxes格式为(x_lt, y_lt, x_rb, y_rb),size [#obj, 4]
:param labels:输入的目标类的标签,size [#obj, ]
:return:
"""
def argmax(x):
x_v, x_i = x.max(0)
x_j = x_v.max(0)[1][0]
return x_i[x_j], x_j
default_boxes = self.default_boxes # xywh
default_boxes_xyxy = utils.change_box_format(default_boxes, 'xywh2xyxy')
ious = utils.box_iou(default_boxes_xyxy, boxes) # 计算boxes和默认的boxes之间的IOU
index = torch.LongTensor(len(default_boxes)).fill_(-1)
masked_ious = ious.clone()
# 不断寻找到最大值,直到最大值也比较小的时候
while True:
i, j = argmax(masked_ious)
# print('(i,j):({},{})'.format(i, j))
if masked_ious[i, j] < 1e-6:
break
index[i] = j
masked_ious[i, :] = 0
masked_ious[:, j] = 0
# masked_ious_np = masked_ious.numpy()
# ious_np = ious.numpy()
# index_np = index.numpy()
# print(masked_ious)
mask = (index < 0) & (ious.max(1)[0] >= 0.5)
if mask.any():
index[mask] = ious[mask.nonzero().squeeze()].max(1)[1]
boxes = boxes[index.clamp(min=0)]
boxes_xywh = utils.change_box_format(boxes, 'xyxy2xywh')
# ssd tx ty tw th编码
loc_xy = (boxes_xywh[:, :2] - default_boxes[:, :2]) / default_boxes[:, 2:] / self.variances[0]
loc_wh = torch.log(boxes_xywh[:, 2:] / default_boxes[:, 2:]) / self.variances[1]
loc_targets = torch.cat([loc_xy, loc_wh], 1)
cls_targets = 1 + labels[index.clamp(min=0)]
cls_targets[index < 0] = 0
return loc_targets, cls_targets
def decode(self, loc_preds, cls_preds, score_thresh=0.6, nms_thresh=0.45):
xy = loc_preds[:, :2] * self.variances[0] * self.default_boxes[:, 2:] + self.default_boxes[:, :2]
wh = torch.exp(loc_preds[:, 2:] * self.variances[1]) * self.default_boxes[:, 2:]
box_preds = torch.cat([xy - wh / 2, xy + wh / 2], 1)
boxes = []
labels = []
scores = []
num_classes = cls_preds.size(1)
for i in range(num_classes - 1):
score = cls_preds[:, i + 1] # class i corresponds to (i+1) column
mask = score > score_thresh
if not mask.any():
continue
box = box_preds[mask.nonzero().squeeze()]
score = score[mask]
keep = utils.box_nms(box, score, nms_thresh)
boxes.append(box[keep])
labels.append(torch.LongTensor(len(box[keep])).fill_(i))
scores.append(score[keep])
boxes = torch.cat(boxes, 0)
labels = torch.cat(labels, 0)
scores = torch.cat(scores, 0)
return boxes, labels, scores
class VGG16Extractor300(nn.Module):
def __init__(self):
super(VGG16Extractor300, self).__init__()
self.conv1_1 = StrideConv(in_channels=3, out_channels=64, kernel_size=3, stride=1)
self.conv1_2 = StrideConv(in_channels=64, out_channels=64, kernel_size=3, stride=1)
self.pool1 = StridePool(kernel_size=2, stride=2, ceil_mode=True)
self.conv2_1 = StrideConv(in_channels=64, out_channels=128, kernel_size=3, stride=1)
self.conv2_2 = StrideConv(in_channels=128, out_channels=128, kernel_size=3, stride=1)
self.pool2 = StridePool(kernel_size=2, stride=2, ceil_mode=True)
self.conv3_1 = StrideConv(in_channels=128, out_channels=256, kernel_size=3, stride=1)
self.conv3_2 = StrideConv(in_channels=256, out_channels=256, kernel_size=3, stride=1)
self.conv3_3 = StrideConv(in_channels=256, out_channels=256, kernel_size=3, stride=1)
self.pool3 = StridePool(kernel_size=2, stride=2, ceil_mode=True)
self.conv4_1 = StrideConv(in_channels=256, out_channels=512, kernel_size=3, stride=1)
self.conv4_2 = StrideConv(in_channels=512, out_channels=512, kernel_size=3, stride=1)
self.conv4_3 = StrideConv(in_channels=512, out_channels=512, kernel_size=3, stride=1)
self.norm4 = L2Norm(512, 20) # 使用Norm层正则化
self.pool4 = StridePool(kernel_size=2, stride=2, ceil_mode=True)
self.conv5_1 = StrideConv(in_channels=512, out_channels=512, kernel_size=3, stride=1)
self.conv5_2 = StrideConv(in_channels=512, out_channels=512, kernel_size=3, stride=1)
self.conv5_3 = StrideConv(in_channels=512, out_channels=512, kernel_size=3, stride=1)
self.pool5 = StridePool(kernel_size=3, stride=1, ceil_mode=True)
self.conv6 = nn.Conv2d(in_channels=512, out_channels=1024, kernel_size=3, padding=6, dilation=6) # 这个conv特别需要注意
self.conv7 = StrideConv(in_channels=1024, out_channels=1024, kernel_size=1, stride=1)
self.conv8_1 = StrideConv(in_channels=1024, out_channels=256, kernel_size=1, stride=1)
self.conv8_2 = StrideConv(in_channels=256, out_channels=512, kernel_size=3, stride=2)
self.conv9_1 = StrideConv(in_channels=512, out_channels=128, kernel_size=1, stride=1)
self.conv9_2 = StrideConv(in_channels=128, out_channels=256, kernel_size=3, stride=2)
self.conv10_1 = StrideConv(in_channels=256, out_channels=128, kernel_size=1, stride=1)
self.conv10_2 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=1)
self.conv11_1 = StrideConv(in_channels=256, out_channels=128, kernel_size=1, stride=1)
self.conv11_2 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=1)
def forward(self, x):
xs = []
x = self.conv1_1(x)
x = F.relu(x)
x = self.conv1_2(x)
x = F.relu(x)
x = self.pool1(x)
x = self.conv2_1(x)
x = F.relu(x)
x = self.conv2_2(x)
x = F.relu(x)
x = self.pool2(x)
x = self.conv3_1(x)
x = F.relu(x)
x = self.conv3_2(x)
x = F.relu(x)
x = self.conv3_3(x)
x = F.relu(x)
x = self.pool3(x)
x = self.conv4_1(x)
x = F.relu(x)
x = self.conv4_2(x)
x = F.relu(x)
x = self.conv4_3(x)
x = F.relu(x)
x1 = self.norm4(x)
# print('x1.size():{}'.format(x1.size()))
xs.append(x1) # conv4_3 38*38*512
x = self.pool4(x)
x = self.conv5_1(x)
x = F.relu(x)
x = self.conv5_2(x)
x = F.relu(x)
x = self.conv5_3(x)
x = F.relu(x)
x = self.pool5(x)
x = self.conv6(x)
x = F.relu(x)
x = self.conv7(x)
x = F.relu(x)
x2 = x
# print('x2.size():{}'.format(x2.size()))
xs.append(x2) # conv7 19*19*1024
x = self.conv8_1(x)
x = F.relu(x)
x = self.conv8_2(x)
x = F.relu(x)
x3 = x
# print('x3.size():{}'.format(x3.size()))
xs.append(x3) # conv8_2 10*10*512
x = self.conv9_1(x)
x = F.relu(x)
x = self.conv9_2(x)
x = F.relu(x)
x4 = x
# print('x4.size():{}'.format(x4.size()))
xs.append(x4) # conv9_2 5*5*256
x = self.conv10_1(x)
x = F.relu(x)
x = self.conv10_2(x)
x = F.relu(x)
x5 = x
# print('x5.size():{}'.format(x5.size()))
xs.append(x5) # conv10_2 3*3*256
x = self.conv11_1(x)
x = F.relu(x)
x = self.conv11_2(x)
x = F.relu(x)
x6 = x
# print('x6.size():{}'.format(x6.size()))
xs.append(x6) # conv11_2 1*1*256
# print('x.size():{}'.format(x.size()))
return xs
class SSD300(nn.Module):
steps = (8, 16, 32, 64, 100, 300) # steps for recover to the origin image size
fm_sizes = (38, 19, 10, 5, 3, 1) # feature map size
aspect_ratios = ((2,), (2, 3), (2, 3), (2, 3), (2,), (2,)) # aspect ratio
box_sizes = (30, 60, 111, 162, 213, 264, 315) # box size
def __init__(self, num_classes=21):
super(SSD300, self).__init__()
self.num_classes = num_classes
self.num_anchors = (4, 6, 6, 6, 4, 4)
self.in_channels = (512, 1024, 512, 256, 256, 256)
self.extractor = VGG16Extractor300()
self.loc_layers = nn.ModuleList()
self.cls_layers = nn.ModuleList()
for i in range(len(self.in_channels)):
self.loc_layers += [nn.Conv2d(self.in_channels[i], self.num_anchors[i] * 4, kernel_size=3, padding=1)]
self.cls_layers += [
nn.Conv2d(self.in_channels[i], self.num_anchors[i] * self.num_classes, kernel_size=3, padding=1)]
def forward(self, x):
loc_preds = []
cls_preds = []
xs = self.extractor(x)
for i, x in enumerate(xs):
loc_pred = self.loc_layers[i](x)
loc_pred = loc_pred.permute(0, 2, 3, 1).contiguous()
loc_preds.append(loc_pred.view(loc_pred.size(0), -1, 4))
cls_pred = self.cls_layers[i](x)
cls_pred = cls_pred.permute(0, 2, 3, 1).contiguous()
cls_preds.append(cls_pred.view(cls_pred.size(0), -1, self.num_classes))
loc_preds = torch.cat(loc_preds, 1)
cls_preds = torch.cat(cls_preds, 1)
return loc_preds, cls_preds
class SSDLoss(nn.Module):
def __init__(self, num_classes):
super(SSDLoss, self).__init__()
self.num_classes = num_classes
def _hard_negative_mining(self, cls_loss, pos):
'''Return negative indices that is 3x the number as postive indices.
Args:
cls_loss: (tensor) cross entroy loss between cls_preds and cls_targets, sized [N,#anchors].
pos: (tensor) positive class mask, sized [N,#anchors].
Return:
(tensor) negative indices, sized [N,#anchors].
'''
cls_loss = cls_loss * (pos.float() - 1)
_, idx = cls_loss.sort(1) # sort by negative losses
_, rank = idx.sort(1) # [N,#anchors]
num_neg = 3 * pos.sum(1) # [N,]
neg = rank < num_neg[:, None] # [N,#anchors]
return neg
def forward(self, loc_preds, loc_targets, cls_preds, cls_targets):
"""Compute loss between (loc_preds, loc_targets) and (cls_preds, cls_targets).
Args:
loc_preds: (tensor) predicted locations, sized [N, #anchors, 4].
loc_targets: (tensor) encoded target locations, sized [N, #anchors, 4].
cls_preds: (tensor) predicted class confidences, sized [N, #anchors, #classes].
cls_targets: (tensor) encoded target labels, sized [N, #anchors].
loss:
(tensor) loss = SmoothL1Loss(loc_preds, loc_targets) + CrossEntropyLoss(cls_preds, cls_targets).
"""
pos = cls_targets > 0 # [N,#anchors]
batch_size = pos.size(0)
num_pos = pos.sum().item()
# ===============================================================
# loc_loss = SmoothL1Loss(pos_loc_preds, pos_loc_targets)
# ===============================================================
mask = pos.unsqueeze(2).expand_as(loc_preds) # [N,#anchors,4]
loc_loss = F.smooth_l1_loss(loc_preds[mask], loc_targets[mask], size_average=False)
# ===============================================================
# cls_loss = CrossEntropyLoss(cls_preds, cls_targets)
# ===============================================================
cls_loss = F.cross_entropy(cls_preds.view(-1, self.num_classes), cls_targets.view(-1), reduce=False) # [N*#anchors,]
cls_loss = cls_loss.view(batch_size, -1)
cls_loss[cls_targets < 0] = 0 # set ignored loss to 0
neg = self._hard_negative_mining(cls_loss, pos) # [N,#anchors]
cls_loss = cls_loss[pos | neg].sum()
print('loc_loss: {} | cls_loss: {}'.format(loc_loss.item() / num_pos, cls_loss.item() / num_pos))
loss = (loc_loss + cls_loss) / num_pos
return loss
| [
"guanfuchen@zju.edu.cn"
] | guanfuchen@zju.edu.cn |
0ca462dfcd05822d27fc73a3b8faea125b88322a | 71290e9c34a99cdcbe68e7136723d18b408f94b0 | /081_to_083.py | 2d2ff3a4ba9ff89f1ae8467446e00141f5a60f5a | [] | no_license | julenka/euler | 8558380f6b121c7b591d38a47919d8e0213a7b96 | e176403a2fb0aa3c937dac480d2c96e9a1c89df4 | refs/heads/master | 2023-07-07T15:29:25.292033 | 2015-09-14T10:33:25 | 2015-09-14T10:33:25 | 38,571,126 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,999 | py | #!/usr/bin/env python
import sys
import os
FILE_DIR = os.path.dirname(__file__)
sys.path.append(os.path.join(FILE_DIR, '..'))
# Uses python-graph library: https://code.google.com/p/python-graph/
# easy_install python-graph-core
import pygraph.classes.graph
import pygraph.algorithms.minmax
class Node:
def __init__(self, value):
self.value = value
def __str__(self):
return str(self.value)
def __repr__(self):
return "node(" + str(self.value) + ")"
def problem81(matrix,graph,n_rows, n_cols):
for r in xrange(n_rows):
for c in xrange(n_cols):
cur = matrix[r][c]
# right
if(c < n_cols - 1):
right = matrix[r][c+1]
graph.add_edge((cur, right), wt=right.value)
# bottom, don't allow leftmost column to go down
if(r < n_rows - 1 and c > 0):
bottom = matrix[r+1][c]
graph.add_edge((cur, bottom), wt=bottom.value)
_, node_to_cost = pygraph.algorithms.minmax.shortest_path(graph, matrix[0][0])
goal = matrix[n_rows - 1][n_cols - 1]
print goal, node_to_cost[goal]
def problem82(matrix, graph,n_rows, n_cols):
for r in xrange(n_rows):
for c in xrange(n_cols):
cur = matrix[r][c]
# right
if(c < n_cols - 1):
right = matrix[r][c+1]
graph.add_edge((cur, right), wt=right.value)
# bottom, don't allow leftmost column to go down
if(r < n_rows - 1 and c > 0):
bottom = matrix[r+1][c]
graph.add_edge((cur, bottom), wt=bottom.value)
# top, don't allow leftmost column to go down
if(r > 0 and c > 0):
top = matrix[r-1][c]
graph.add_edge((cur, top), wt=top.value)
min_cost = 10e100
for r in xrange(n_rows):
print "row", r
path, distances = pygraph.algorithms.minmax.shortest_path(graph, matrix[r][0])
for r2 in xrange(n_rows):
cur = matrix[r2][n_cols - 1]
if cur in distances and distances[cur] < min_cost:
min_cost = distances[cur]
min_start = matrix[r][1]
min_end = cur
min_path = path
print min_cost, min_start, min_end
def problem83(matrix,graph,n_rows, n_cols):
for r in xrange(n_rows):
for c in xrange(n_cols):
cur = matrix[r][c]
# right
if(c < n_cols - 1):
right = matrix[r][c+1]
graph.add_edge((cur, right), wt=right.value)
# bottom, don't allow leftmost column to go down
if(r < n_rows - 1 and c > 0):
bottom = matrix[r+1][c]
graph.add_edge((cur, bottom), wt=bottom.value)
# top, don't allow leftmost column to go down
if(r > 0 and c > 0):
top = matrix[r-1][c]
graph.add_edge((cur, top), wt=top.value)
# left
if(c > 2):
left = matrix[r][c-1]
graph.add_edge((cur,left), wt=left.value)
_, node_to_cost = pygraph.algorithms.minmax.shortest_path(graph, matrix[0][0])
goal = matrix[n_rows - 1][n_cols - 1]
print goal, node_to_cost[goal]
def main():
matrix = []
# input_file = "p081_matrix.txt"
# input_file = "p082_matrix.txt"
# input_file = "small.txt"
input_file = "p083_matrix.txt"
for line in open (input_file):
# add a column to the left since edge weight = node value
row = [Node(0)]
for x in line.split(','):
row.append(Node(int(x)))
matrix.append(row)
n_rows = len(matrix)
n_cols = len(matrix[0])
graph = pygraph.classes.digraph.digraph()
for r in xrange(n_rows):
for c in xrange(n_cols):
cur = matrix[r][c]
graph.add_node(cur)
problem83(matrix, graph, n_rows, n_cols)
if __name__ == '__main__':
main()
| [
"julenka@Julias-MacBook-Pro.local"
] | julenka@Julias-MacBook-Pro.local |
4adb56b19f422e4b95744f384d76d14ff2d0e9c6 | e6ede210d500b8f0772ff09f6a91578297ad6395 | /tests/database/database_perf_load01.py | 5d0fed1c12eb4b69890a20306a01f56a6878d493 | [
"BSD-3-Clause"
] | permissive | pnarvor/nephelae_base | 392d70e001c49d03e7027989d75adaf065f968ee | d5f1abeae0b0473b895b4735f182ddae0516a1bd | refs/heads/master | 2020-06-23T14:23:41.294273 | 2020-02-28T17:01:26 | 2020-02-28T17:01:26 | 198,648,271 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,619 | py | #! /usr/bin/python3
import sys
sys.path.append('../../')
import os
import signal
import time
from ivy.std_api import *
import logging
from nephelae_mapping.database import NephelaeDataServer
from helpers.helpers import *
print("loading database... ", end='', flush=True)
t0 = time.time()
# dtbase = NephelaeDataServer.load('output/database_perf01.neph')
# dtbase = NephelaeDataServer.load('output/database_perf02.neph')
dtbase = NephelaeDataServer.load('output/database_perf03.neph')
t1 = time.time()
print("Done. (ellapsed : ", t1 - t0,")", flush=True)
print("Reading database... ", flush=True)
t0 = time.time()
for i in range(10):
output = [entry.data for entry in dtbase.find_entries(['GPS','101'],
Fancy()[0:10.0,0:10.0,0:10.0,0:10.0])]
# for item in output:
# print(item)
t1 = time.time()
print("Done. (ellapsed : ", t1 - t0,")", flush=True)
print("Reading database... ", flush=True)
t0 = time.time()
for i in range(10):
output = [entry.data for entry in dtbase.find_entries(['101','var_0'],
Fancy()[0:10.0,0:10.0,0:10.0,0:10.0])]
# for item in output:
# print(item)
t1 = time.time()
print("Done. (ellapsed : ", t1 - t0,")", flush=True)
print("Reading database... ", flush=True)
t0 = time.time()
for i in range(10):
output = [entry.data for entry in dtbase.find_entries(['var_0','101'],
Fancy()[0:10.0,0:10.0,0:10.0,0:10.0])]
# for item in output:
# print(item)
t1 = time.time()
print("Done. (ellapsed : ", t1 - t0,")", flush=True)
| [
"pnarvor@laas.fr"
] | pnarvor@laas.fr |
70afc5d6bfdce3b2c1c6fffff51d2bdf3431cb4a | 2cc2c73afee23674c2bd2bb918fc2f7824fc8818 | /api/migrations/0005_alter_ringkasan_sekolah.py | 8396a47977fdce288243b9ec76830c7e7dae5f7b | [] | no_license | developbyarya/simaknilai | c9e11d59fad25017419dce9ad187160a68287f16 | 34e2d808a59999bb6d80b853f1911e46c33eac81 | refs/heads/master | 2023-07-08T16:44:14.871961 | 2021-08-14T12:33:47 | 2021-08-14T12:33:47 | 396,003,982 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 459 | py | # Generated by Django 3.2.5 on 2021-08-03 08:50
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('api', '0004_ringkasan'),
]
operations = [
migrations.AlterField(
model_name='ringkasan',
name='sekolah',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.sekolah'),
),
]
| [
"aryain3rd@gmail.com"
] | aryain3rd@gmail.com |
7917b5e21e2f33ac3e02015db815a15624786a70 | 874cfdb173c10f2bd1a28b8fbae8f5c53b6e7e9b | /Ayushi Gupta/facedetect-master/Face-Recognition-Login-System/djangoproject/mysite/pages/models.py | ba2ae7b6ce5045295fbcd04aecc07a660853e155 | [] | permissive | gargarchit/UdacityOpenSource | 8040a454f97149e7eb12349828653b9c091110b7 | 8a9bfafacad4009835c0610a24b8c87e5a15c602 | refs/heads/master | 2020-07-10T15:30:05.441031 | 2019-10-27T10:57:10 | 2019-10-27T10:57:10 | 203,367,451 | 0 | 0 | Apache-2.0 | 2019-08-20T12:01:45 | 2019-08-20T12:01:44 | null | UTF-8 | Python | false | false | 980 | py | from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from vote.models import VoteModel
# Create your models here.
class UserProfile(models.Model):
user=models.OneToOneField(User,on_delete=None,primary_key=True)
firstname=models.CharField(max_length=100,default=' ')
Aadharno=models.CharField(max_length=12,default=' ')
Voteridno=models.CharField(max_length=12,default=' ')
city=models.CharField(max_length=20,default="")
phone=models.IntegerField(default=0)
head_shot=models.ImageField(upload_to='profil_images',blank=True)
class Meta:
ordering = ["user"]
def __str__(self):
return self.user.username
def create_profile(sender,**kwargs):
if kwargs['created']:
user_profile=UserProfile.objects.get_or_create(user=kwargs['instance'])
post_save.connect(create_profile,sender=User)
class ArticleReview(VoteModel, models.Model):
...
| [
"noreply@github.com"
] | noreply@github.com |
c9f61caccaa7549d1995595eef86bc32bfba3d49 | ed6bea0a68dfa7a9777662f63b2a01e7fbdfacef | /kidswithmaxcandies.py | 13c2e3fdf0a6e2e7b79d69cd37d53a039ab7476c | [] | no_license | pooja-k-swamy/leetcode | 0a00c4ce2797d5b75f96ec10b6168404a83c41a9 | 048b087b0eda00c9ebdfb81e663f8720bbe3fdd6 | refs/heads/master | 2022-12-09T01:24:28.145043 | 2020-09-02T03:57:53 | 2020-09-02T03:57:53 | 262,882,786 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 387 | py | class Solution:
def kidsWithCandies(self, candies: List[int], extraCandies: int) -> List[bool]:
greatest = max(candies)
bool_output = [False] * len(candies)
for i in range(len(candies)):
if ((candies[i] + extraCandies) >= greatest):
bool_output[i] = True
else:
pass
return bool_output
| [
"noreply@github.com"
] | noreply@github.com |
be7d5c13550bebdce4d54671a1263edd8dafe493 | 52808af8f8a23f7f48dbc484473d6b2303ec9c89 | /pypoll/main.py | 20a83cc7de5725b03e3d651244f387fab486743e | [] | no_license | mghumman/python-challenge | 1f57cb7b88089c37f29132a38e7d23762228bac5 | 8c3bb75d8af5e451aa0ee26c7289a17da99d1dc2 | refs/heads/main | 2023-03-04T02:29:23.018679 | 2021-02-08T05:52:10 | 2021-02-08T05:52:10 | 336,645,987 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,117 | py | import csv
from math import * #from zybooks
with open('pypoll.csv') as r:
string = csv.reader(r,delimiter=',')
next(string)
totalvoters = 0 # voter count
candidates = {} # dictionary for the candidates
for row in string:
totalvoters += 1 # add a voter to total count all rows
if row[2] in candidates:
candidates[row[2]] += 1 # adds a vote
else: # or else
candidates[row[2]] = 1 # adds to dict
candidatelist = [candidates.keys()] # list of names
candidatevotes = [candidates.values()] #list o values
maxvotes = max(candidatevotes)
winner = candidatelist[candidatevotes.index(maxvotes)]
print("Election Results:")
print("_____________________________")
print("Total Votes: {}".format(totalvoters))
print("_____________________________")
for candidate in candidates:
print("{}: {}% ({})".format(candidate,round(candidates[candidate]/totalvoters*100,2),candidates[candidate]))
print("______________________________")
print("Winner: {}".format(winner))
print("______________________________")
| [
"noreply@github.com"
] | noreply@github.com |
b38cc83718ba67b213d350be50f5983e023c5b64 | 838a0c32eb0ab8fa513cfdc698a09ab1eaaef00a | /codes/275. H-Index II.py | e0705b2c4b55f09ebc65aec748c8f9f6ec607acd | [] | no_license | zcgu/leetcode | ff270db9deb000e63dc9f338131c746ce7d24dfb | a041962eeab9192799ad7f74b4bbd3e4f74933d0 | refs/heads/master | 2021-01-11T20:02:49.126449 | 2016-12-31T23:51:38 | 2016-12-31T23:51:38 | 68,346,234 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | class Solution(object):
def hIndex(self, citations):
"""
:type citations: List[int]
:rtype: int
"""
if not citations:
return 0
h = 0
i = len(citations) - 1
while citations[i] > h and i >= 0: # this citations[i] > h not citations[i] >= h
h += 1
i -= 1
return h | [
"patron@loan-rds-490-x.local"
] | patron@loan-rds-490-x.local |
cfeaa1123dc61f9dae1362dfeefc5ff39fdc5b35 | d84506ed7b2d35edf4850b11f86a77b91fae8c95 | /DATA_STRUCTURE/jianzhi_offer/其他/孩子们的游戏(圆圈中最后剩下的数).py | a2e9fd91f3cb8c3b8ff681af0b3eca271744b2f1 | [] | no_license | li2ui2/Python_Personal_DEMO | b5a5cfd0523596f4553fc065e1e71bdefb8b3580 | 57ce8c2d4e4636e774a9acf138ca5c77aa018cd4 | refs/heads/master | 2022-04-22T05:50:08.022777 | 2020-04-24T13:51:57 | 2020-04-24T13:51:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,099 | py | """
每年六一儿童节,牛客都会准备一些小礼物去看望孤儿院的小朋友,今年亦是如此。
HF作为牛客的资深元老,自然也准备了一些小游戏。
其中,有个游戏是这样的:首先,让小朋友们围成一个大圈。
然后,他随机指定一个数m,让编号为0的小朋友开始报数。每
次喊到m-1的那个小朋友要出列唱首歌,然后可以在礼品箱中任意的挑选礼物,
并且不再回到圈中,从他的下一个小朋友开始,继续0...m-1报数....这样下去....直到剩下最后一个小朋友,
可以不用表演,并且拿到牛客名贵的“名侦探柯南”典藏版(名额有限哦!!^_^)。
请你试着想下,哪个小朋友会得到这份礼品呢?(注:小朋友的编号是从0到n-1)
如果没有小朋友,请返回-1
"""
class Solution:
def LastRemaining_Solution(self, n, m):
# write code here
if not n or not m:
return -1
cycle = range(n)
i = 0
while len(cycle) > 1:
i = (m+i-1)%len(cycle)
cycle.pop(i)
return cycle[-1]
| [
"21824087@zju.edu.cn"
] | 21824087@zju.edu.cn |
f041b01ef4c05b4c1329551d1fd5d2ee9b8c8fb0 | b1272417635cd57b538981cb316bc765ed6f19cc | /graph/edge.py | d5121c57e4f28a167fd399f89bf332529a093ed5 | [] | no_license | lvanroy/Analysis-of-Focal-Methods-using-Intermediary-LLVM | 865c84c50477c037e2e4e5fb3871ab50e47cb2b6 | cf4fb76831969f934da8c48fb36acfa36ea4c3c3 | refs/heads/master | 2023-03-23T07:51:28.245511 | 2021-03-17T19:09:15 | 2021-03-17T19:09:15 | 298,274,397 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 298 | py | class Edge:
def __init__(self, start_node, end_node, label):
self.start_node = start_node
self.end_node = end_node
self.label = label
def __str__(self):
return "{} -> {} [label = \"{}\"]".format(self.start_node.get_id(), self.end_node.get_id(), self.label)
| [
"s0161083@ad.ua.ac.be"
] | s0161083@ad.ua.ac.be |
7adcf3af90dc069ab9bec98b2839947c8aeeb910 | 0c2130f0aabf2e27fae19ba93a52b444d4abdffd | /webscraping_beautifulSoup/09 Hand on with AMAZON projects/043 amazon-project2-part2-get-book-detail-information-for-one-book.py | e17c7d86efde3a513d5c15b75c8bf65a8b03a310 | [] | no_license | abuzarrizvi/WebScrapingBeautifulSoup4 | 3e583b736f575596b69e0102dbde797d46f47a61 | 9e847e83cef9a914bc1774295fc48f974a1ab796 | refs/heads/master | 2020-06-17T15:01:16.657407 | 2019-08-14T05:08:32 | 2019-08-14T05:08:32 | 195,956,866 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 967 | py | # strategy
# soup --> ISBN table id="productDetailsTable"
# find_all li tag --> get 4th li
# --> Detail --> iframe --> div.text
from bs4 import BeautifulSoup
from selenium import webdriver
#driver = webdriver.PhantomJS(executable_path = r'C:\phantomjs-2.1.1-windows\bin\phantomjs.exe')
driver = webdriver.Chrome('C:\chromedriver_win32\chromedriver.exe')
url = 'https://www.amazon.com/Python-Programming-Introduction-Computer-Science/dp/1590282418/ref=sr_1_1?ie=UTF8&qid=1473731166&sr=8-1&keywords=python+programming'
driver.get(url)
soup = BeautifulSoup(driver.page_source,'lxml')
table = soup.find('table', {'id':'productDetailsTable'})
all_li = table.find_all('li')
isbn = all_li[3].text.strip('ISBN-10: ')
print isbn
driver.switch_to_frame( driver.find_element_by_tag_name('iframe'))
soup = BeautifulSoup(driver.page_source,'lxml')
description = soup.find('div').text
print description
driver.quit()
| [
"noreply@github.com"
] | noreply@github.com |
5c51ce84d8a7f57e674f00bd71f46427b9970d6c | 0199803b29eb46c406653311a828dc004dfecaed | /unet/unet_parts.py | bdb6ba84c7a7eda780f9fa13a15ba73ce92b217d | [] | no_license | Marroh/Medical-image-segmentation | 17bcec160ca49d434546b9468c147568ff0d5709 | 2fd46973b6327e59b24ff5fd5c58518ea4ad0598 | refs/heads/master | 2023-04-01T23:16:29.039647 | 2023-03-27T08:39:06 | 2023-03-27T08:39:06 | 280,319,636 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,540 | py | """ Parts of the U-Net model """
import torch
import torch.nn as nn
import torch.nn.functional as F
class DoubleConv(nn.Module):
"""(convolution => [BN] => ReLU) * 2"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.double_conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels),
nn.LeakyReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels),
nn.LeakyReLU(inplace=True)
)
def forward(self, x):
return self.double_conv(x)
class Down(nn.Module):
"""Downscaling with maxpool then double conv"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.maxpool_conv = nn.Sequential(
nn.MaxPool2d(2),
DoubleConv(in_channels, out_channels)
)
def forward(self, x):
return self.maxpool_conv(x)
class Up(nn.Module):
"""Upscaling then double conv"""
def __init__(self, in_channels, out_channels, bilinear=True):
super().__init__()
# if bilinear, use the normal convolutions to reduce the number of channels
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
else:
self.up = nn.ConvTranspose2d(in_channels // 2, in_channels // 2, kernel_size=2, stride=2)
self.conv = DoubleConv(in_channels, out_channels)
def forward(self, x1, x2):
x1 = self.up(x1)
# input is CHW
diffY = torch.tensor([x2.size()[2] - x1.size()[2]])
diffX = torch.tensor([x2.size()[3] - x1.size()[3]])
x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2,
diffY // 2, diffY - diffY // 2])
# if you have padding issues, see
# https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a
# https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd
x = torch.cat([x2, x1], dim=1)
return self.conv(x)
class OutConv(nn.Module):
def __init__(self, in_channels, out_channels):
super(OutConv, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1)
#TODO 输出也许要映射到【0,1】 *update:交叉熵已经包含softmax操作
def forward(self, x):
return self.conv(x)
| [
"noreply@github.com"
] | noreply@github.com |
a17520cbc98c41ee3eb915457e026cd46d1858d3 | 1778e6f647a93be434fd291092cc9eefd0636f89 | /AlexeyAkimov/package1/mylib/test_other.py | 45ce8b37182e1b86372279686c9c3f66bd3462da | [] | no_license | AkimovLab/CHE512-Spring2023 | 61e7ed7fe9bc314ff6ab62dddffa6b252168067c | 0228f225873fb79bcc91bc3c7f4437a480e1004d | refs/heads/main | 2023-06-03T15:49:45.530881 | 2023-05-18T16:46:33 | 2023-05-18T16:46:33 | 595,336,694 | 0 | 7 | null | 2023-05-18T16:46:34 | 2023-01-30T21:51:25 | Jupyter Notebook | UTF-8 | Python | false | false | 182 | py | import pytest
def true_or_false(x):
return x
@pytest.mark.parametrize("x, y", [(True, True), (False, False)])
def test_torf(x, y):
assert true_or_false(x) == y
| [
"alexvakimov@gmail.com"
] | alexvakimov@gmail.com |
5863502d0daa13bbc559944b98bd448d9fe1c443 | f0ecd1ab01f7dacdd55611db3fe8cfb8c556c8ef | /tuplas/desv_pad.py | c2875668d5ed7743e176ea64f64dece075a1ff1a | [] | no_license | GuiJR777/INE5603-01238A-20201---Programa-o-Orientada-a-Objetos-I | 042fa2dd1d72fdd19379e387a54f88a556d6f654 | bd23ec0ba6f8ca19aa72d88d8894105e625197d0 | refs/heads/main | 2023-02-04T06:41:55.355279 | 2020-12-16T02:55:10 | 2020-12-16T02:55:10 | 312,139,910 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 625 | py | def media_simples(valores):
soma = 0
for n in valores:
soma = soma + n
media = soma/(len(valores))
return media
def somatorio_dist(valores, media):
somatorio = 0
for i in valores:
dist = (i - media)**2
somatorio = somatorio + dist
return somatorio
def dp(n, valores):
media = media_simples(valores)
dist = somatorio_dist(valores, media)
next = dist / (len(valores)-1)
desv_pad = next**(1/2)
return desv_pad
n = int(input(''))
valores = []
for i in range(n):
v = float(input(''))
valores.append(v)
resposta = dp(n, valores)
print(resposta)
| [
"guilherme.ramires@spinver.com"
] | guilherme.ramires@spinver.com |
fb6e5a1d34f92c85573f7bb2dd81c73b149320f3 | 637e5532f2946001a688735298ba5d6ef2a5b469 | /importcsv.py | 456bc3e315e690f2da19629c48d2af03bb423b7d | [
"MIT",
"LicenseRef-scancode-public-domain"
] | permissive | residuum/Money-Talks | b4a20db246041cda89ec33017530f4123b0f1498 | 85a3515248fccc0520a0b309cf7600b03b873b33 | refs/heads/master | 2021-01-01T06:05:08.591659 | 2012-04-13T17:06:05 | 2012-04-13T17:06:05 | 2,529,551 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,442 | py | #!/usr/bin/env python
from couchdbkit import Server, Database
from couchdbkit.loaders import FileSystemDocsLoader
from csv import DictReader
from datetime import datetime
import sys, subprocess, math, os
def parseDoc(doc):
for k,v in doc.items():
if k=='Date':
closeDate = datetime.strptime(v, '%Y-%m-%d')
doc['Year'] = closeDate.year
doc['DayOfYear'] = closeDate.timetuple().tm_yday
if (isinstance(v,str)):
#print k, v, v.isdigit()
# #see if this string is really an int or a float
if v.isdigit()==True: #int
doc[k] = int(v)
else: #try a float
try:
if math.isnan(float(v))==False:
doc[k] = float(v)
except:
pass
return doc
def upload(db, docs):
db.bulk_save(docs)
del docs
return list()
def uploadFile(fname, uri, dbname):
print 'Upload contents of %s to %s/%s' % (fname, uri, dbname)
# #connect to the db
theServer = Server(uri)
db = theServer.get_or_create_db(dbname)
#loop on file for upload
reader = DictReader(open(fname, 'rU'), dialect = 'excel') #see the python csv module
#for other options, such as using the tab delimeter. The first line in your csv
#file should contain all of the "key" and all subsequent lines hold the values
#for those keys.
indexName = fname.split('/')[1].split('.')[0]
#used for bulk uploading
docs = list()
checkpoint = 100
prevday = 0
for doc in reader:
newdoc = parseDoc(doc) #this just converts strings that are really numbers into ints and floats
newdoc['index'] = indexName
while prevday - 1 > newdoc['DayOfYear'] or (newdoc['DayOfYear'] > 300 and prevday > 1 and prevday < 10) and newdoc['Year'] > 1994:
prevdoc = newdoc.copy()
prevday = prevday - 1
if prevday < newdoc['DayOfYear']:
prevdoc['Year'] = newdoc['Year'] + 1
prevdoc['DayOfYear'] = prevday
docs.append(prevdoc)
if newdoc['Year'] > 1994:
prevday = newdoc['DayOfYear']
docs.append(newdoc)
if len(docs)%checkpoint==0:
docs = upload(db,docs)
#don't forget the last batch
docs = upload(db,docs)
if __name__=='__main__':
filename = sys.argv[1]
uri = sys.argv[2]
dbname = sys.argv[3]
uploadFile(filename, uri, dbname)
| [
"thomas@residuum.org"
] | thomas@residuum.org |
897b4b4603dbf5f9033d8118ac43b22dcdf16323 | f3f2ee90c7d45b537abfd54a7e7ecf63251390d2 | /Part1-MachineLearningandDataAnalytics/Examples/Chapter2_Pandas/Chapter2_Pandas_DataStructuresSeriesDataframe.py | 5888075b692132747990625635656e666839c245 | [] | no_license | datawisdomx/DataScienceCourse | 49eacf9a647f19eb70e2144b6b022031bf69240f | 375e58042fe78ff2244b499e6c03d33635ef5226 | refs/heads/main | 2023-05-05T17:58:31.481954 | 2021-05-28T05:56:56 | 2021-05-28T05:56:56 | 340,570,384 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 940 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 19 16:47:15 2021
@author: nitinsinghal
"""
# Chapter 2 - Pandas - Data Analysis Library
# Data Structures
import pandas as pd
import numpy as np
#Series
s = pd.Series([1,2,5,11,16,23])
print(s)
print('s[0] is: ',s[0])
print(s/2)
print(s*s)
si = pd.Series([-11, -8, -5, -2, 1, 4, 7, 10], index=['a', 'b', 'c','d','e','f','g','h'])
print(si)
print(si[0])
print('si[a] is: ',si['a'])
print('si mean is: ', np.mean(si))
print('si std dev is: ', np.std(si))
dictionary = {'a':3,'b':'balls','c':7.21,'d':'price','e':'2/9/18'}
sd = pd.Series(dictionary)
print(sd)
sd = pd.Series(dictionary, name='dictionary')
print(sd)
#DataFrame
d = {'item': ['a','b','c','d','e','f'],
'price': [10, 25, 33.43, 51.2, 9, np.nan],
'quantity': [48, 12, 7, 3, 80, 100]}
df = pd.DataFrame(d)
print(df)
print(df['price']*10)
df['value'] = df['price']*df['quantity']
print(df)
| [
"noreply@github.com"
] | noreply@github.com |
0d0107c5fc211ba55a7f4194bd58bfb09b71cc71 | 0764489a1cb0793a39252bb0e6afa76854119644 | /scenarios/credit_failed_state/executable.py | 965e78d1121ac9ff409962b2dae867a57ae767dc | [] | no_license | rserna2010/balanced-python | 8ac7bef3cb309be8affaa2aa62e882d631b62bda | 093a401d187bc09293a88214156e9e316185bfa3 | refs/heads/master | 2021-01-21T07:20:47.157987 | 2013-11-08T17:39:54 | 2013-11-08T17:39:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 323 | py | import balanced
balanced.configure('ak-test-1p1Tsac7gHeMQowL2seB7ieliuAJAufyq')
bank_account_info = {
"routing_number": "121000358",
"type": "checking",
"account_number": "9900000005",
"name": "Johann Bernoulli"
}
credit = balanced.Credit(
amount=10000,
bank_account=bank_account_info
).save() | [
"ben@unfiniti.com"
] | ben@unfiniti.com |
f6588639e6480140e4ca5522ba6434637eccbae4 | 183caf378df099da122f65ea9b75002b1e12b774 | /projFocus/ceRNA/model/projCeRNA_step3-4_WGSC.py | d6335c4a2e5f5ffe52644dc2776b872148729be7 | [] | no_license | cwt1/scripts-1 | f58e476ddb2c83e0480856a95a95a644ad3c001c | 061d6592aa6ab11c93363fcb40305a57db05e3f2 | refs/heads/master | 2021-05-28T01:31:30.896133 | 2014-08-25T19:02:37 | 2014-08-25T19:02:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,846 | py | import numpy as np
import sys,getopt
from collections import defaultdict
from parseKeyRegFile import parseKeyRegFile
from collections import Counter, Sequence
from parseGslistFile import parseGslistFile
class MutSet():
def __init__(self, gene, mutSample, zscore):
self.gene = gene
self.mutSample = mutSample
self.mutZscore = dict(zip(mutSample,zscore))
def __str__(self):
return "(%s, %s)" % (self.gene, self.mutSample)
def __expr__(self):
return "(%s, %i mutSmp)" % (self.gene, len(self.mutSample))
def update(self, unionSample):
for smp in self.mutSample:
if not smp in unionSample:
self.mutSample = self.mutSample.remove(smp)
self.mutZscore = self.mutZscore.remove(smp)
else:
pass
def findWegitedMin(S, R, wtype = 'mean'):
'''
wtype is to select the method to use weight,
total: the summation of all mutation zscores;
mean: the mean of all mutation zscores;
max: the maximization of mutation zscores for each gene
'''
## get the minimum cost set
minCost = 99999.0
minElement = -1
minSet = ''
minSname = ''
for i, s in enumerate(S):
sname_i = s.gene; ss = s.mutSample; sw = s.mutZscore
ss_i = set(R).intersection(set(ss))
if len(ss_i) == 0:
continue
sw_i = [sw[a] for a in ss_i ]
if wtype == 'total':
cost = 1/reduce(lambda x, y: x+y , sw_i)
elif wtype == 'mean':
cost = len(sw_i)/sum(sw_i)
elif wtype == 'max':
cost = 1/ reduce(lambda x, y: max(x,y), sw_i)
else:
print "ERRor wtype: use default mean; other option total, max or mean"
cost = len(sw_i)/sum(sw_i)
if cost < minCost:
minCost = cost
minElement = i
minSname = sname_i
minSet = ss_i
return minSname, minSet, minCost
def wgsc(S, U, alpha = 0.8, tol = 0.001, wtype = 'mean'):
R = U
C = []
G = []
costs = []
while len(R) != 0:
g_i, S_i, cost = findWegitedMin(S, R, wtype = wtype)
C.append(list(S_i))
G.append(g_i)
R = list(set(R).difference(set(S_i)))
costs.append(cost)
if len(R) < int((1 - alpha) * len(U)):
break
return G, C, costs
def __test__():
from collections import defaultdict
seqSet = {'G1':['S2','S4','S6'],
'G2':['S1','S3'],
'G3':['S1'],
'G4':['S1'],
'G5':['S5'],
'G6':['S3']}
seq = ['S1', 'S2', 'S3', 'S4', 'S5','S6']
weightSet = {'G1':[1.0,0.5,1.5],
'G2':[2.0, 2.5],
'G3':[2.3],
'G4':[1.2],
'G5':[2.5],
'G6':[3.0]}
setObjL = []
for sk, ss in seqSet.items():
setObjL.append(MutSet(sk,ss,weightSet[sk]))
geneL, smpL, costL = wgsc(setObjL, seq, wtype = "mean")
geneL, smpL, costL = wgsc(setObjL, seq, wtype = "total")
geneL, smpL, costL = wgsc(setObjL, seq, wtype = "max")
####-------compute the zscore matrix for each mutation
import numpy as np
# def myZscore(a,b):
# bArr = np.array(b)
# m = np.mean(bArr)
# sd = np.std(bArr)
# return (np.array(a) - m)/sd
def myZscore(a,b):
return abs((np.array(a) - b[0])/b[1])
def formatSampleName(code19):
if len(code19) >11:
return code19[5:16].replace("-",".")
else :
return code19.replace("-", ".")
def loadNormExpfile(filename):
expD = defaultdict(list)
with open(filename) as f:
samples = f.readline().strip().split("\t")
line = f.readline()
while line:
gCrt, valCrt = line.strip().split("\t",1)
valCrt = np.array(map(float, valCrt.split("\t")))
expD[gCrt] = [np.mean(valCrt), np.std(valCrt)]
line = f.readline()
return expD
def loadExpfile(filename, expND):
expD = defaultdict(list)
with open(filename) as f:
expD['samples'] = f.readline().strip().split("\t")
line = f.readline()
while line:
gCrt, valCrt = line.strip().split("\t",1)
try:
expD[gCrt] = map(lambda x:\
myZscore(float(x),expND[gCrt]),\
valCrt.split("\t"))
except:
pass
line = f.readline()
return expD
def loadMutInfo(mutfile, zscoreD):
'''
load all mutated ceRNA driver,
and return all mutated ceRNA driver's mutated sample, and zscores
'''
mutD = defaultdict(list)
mutZscoreD = defaultdict(list)
cnt = 0
with open(mutfile) as f:
gene, samples = f.readline().strip().split("\t",1)
samples = map(formatSampleName, samples.split("\t"))
mutD['samples'] = samples
line = f.readline()
while line:
cnt = cnt + 1
gene, vals = line.strip().split("\t",1)
mutIdx = [id for (id,m) in enumerate(vals.split("\t")) if m != "0"]
mutSmp = map(samples.__getitem__, mutIdx)
mutSmpInExpSmpID = []; mutSmpInExpSmp = []
for (id, a) in enumerate(zscoreD['samples']) :
if a in mutSmp:
mutSmpInExpSmpID.append(id)
mutSmpInExpSmp.append(a)
mutZscoreD[gene] = map(zscoreD[gene].__getitem__, mutSmpInExpSmpID)
mutD[gene] = mutSmpInExpSmp
line = f.readline()
print " input target genes:\t",cnt
return mutD, mutZscoreD
def prepareDataWGSC(mutfile, gslistfile, keygenefile, pvalCut = 0.01 ):
'''
given, mutation dict, zscore dict,
intact sample list for each cancer genen,
file from group lasso result;
prepare mutation data, zscore data for each cancer gene
using MutSet objects
'''
tgeneSum, regsSum = parseKeyRegFile(keygenefile, pvalCut)
if not regsSum or not tgeneSum:
return tgeneSum, ''
reglist = regsSum
regMutObjL = []
tgene = tgeneSum[0]
gintsmplist = parseGslistFile(tgene, gslistfile)
##---check whether mutated samples are intact
cnt = 0
regMutAllSmp = []
mutRegs = []
for gene in reglist:
cnt = cnt + 1
crtMut = mutD[gene]; crtMutZscore = mutZscoreD[gene]
if crtMut:
mutRegs.append(gene)
for idx, smp in enumerate(mutD[gene]) :
if smp not in gintsmplist:
crtMut.remove(smp)
del crtMutZscore
else:
regMutAllSmp.append(smp)
pass
if crtMut:
regMutObjL.append(MutSet(gene, crtMut, crtMutZscore))
tempCnter = Counter(regMutAllSmp)
regMutAllSmp, regMutAllSmpLoad = tempCnter.keys(), tempCnter.values()
outInfo = {}
outInfo['gintSmp'] = gintsmplist ## all gint sample
outInfo['mutGintSmp'] = regMutAllSmp ## mutated gint sample
outInfo['mutGintSmpLoad'] = regMutAllSmpLoad ## mutated gint sam ple's mutation frequency for each mutation
outInfo['tgene'] = tgene ## target gene
outInfo['allRegs'] = reglist ## all regulators
outInfo['mutRegs'] = mutRegs## all mutated regulators
outInfo['intMutRegs'] = map(lambda x:x.gene, regMutObjL)
return {tgene:[regMutObjL, regMutAllSmp]}, outInfo
def __test__():
expTumorM="/Volumes//ifs/data/c2b2/ac_lab/jh3283/projFocus/result/03102014/exp/brca_exp_l3_tumor_Mar-21-2014.matrix_Mar-26-2014.voomNormed.matrix"
expNormalM="/Volumes//ifs/data/c2b2/ac_lab/jh3283/projFocus/result/03102014/exp/brca_exp_l3_normal_Mar-21-2014.matrix_Mar-26-2014.voomNormed.matrix"
expnD = loadNormExpfile(expNormalM)
zscoreD = loadExpfile(expTumorM, expnD)
mutfile = "/Volumes//ifs/data/c2b2/ac_lab/jh3283/projFocus/result/03102014/sigMut/step2_mutKeyReg/kegRegs_Apr-18-2014.summary.driverRegs.list.uniq.mut.matrix"
mutD, mutZscoreD = loadMutInfo(mutfile, zscoreD)
gslistfile="/Volumes//ifs/data/c2b2/ac_lab/jh3283/projFocus/result/03102014/gslist/gslist_Mar-24-2014_CnvMethSomFree.10smapMore.deg_20140325.txt.10more.hasReg.list"
keygenefile="/Volumes//ifs/data/c2b2/ac_lab/jh3283/projFocus/result/03102014/candiReg/run-Apr-1-2014/data/BCL9_candidateRegs_Mar-31-2014.txt"
pvalCut=0.01
tRegMutObjDict, info = prepareDataWGSC(mutfile, gslistfile, keygenefile)
targ = tRegMutObjDict.keys()[0]
print wgsc(tRegMutObjDict[targ][0],tRegMutObjDict[targ][1] , wtype = "mean")
print wgsc(tRegMutObjDict[targ][0],tRegMutObjDict[targ][1] , wtype = "total")
print wgsc(tRegMutObjDict[targ][0],tRegMutObjDict[targ][1] , wtype = "max")
| [
"violet.hj@gmail.com"
] | violet.hj@gmail.com |
7a6f270126790be7facdfa5e5a3b92c6f829e482 | 501d97fe1a0d8540f218e8d2a8aaf0adba640c97 | /015.三数之和/答案.py | 9ff20fee93168f9fb92ed23f796b04183613a5a1 | [
"Apache-2.0"
] | permissive | yangeren/pydatas-leecode | 77d667d81829ca6cf00a87bb88d803f90c7f2f95 | a9656fd0c61d619e4748f58a50f8f5ece675ffe7 | refs/heads/master | 2022-12-29T10:58:33.807605 | 2020-10-17T15:41:26 | 2020-10-17T15:41:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 800 | py | class Solution(object):
def threeSum(self, nums):
"""
:nums类型: List[int]
:返回类型: List[List[int]]
"""
# 先排序然后双指针
res = []
nums.sort()
for i in range(0, len(nums)):
if i > 0 and nums[i] == nums[i - 1]:
continue
target = 0 - nums[i]
start, end = i + 1, len(nums) - 1
while start < end:
if nums[start] + nums[end] > target:
end -= 1
elif nums[start] + nums[end] < target:
start += 1
else:
res.append((nums[i], nums[start], nums[end]))
end -= 1
start += 1
while start < end and nums[end] == nums[end + 1]:
end -= 1
while start < end and nums[start] == nums[start - 1]:
start += 1
return res | [
"2476271447@qq.com"
] | 2476271447@qq.com |
1b10ff1579271a1232c1aa6dffaea63bf8a0342d | 8bd6b0784de9a1e6a39d0f5f23f2d8fb50c73d49 | /MethodRefine-Rand/satellite/MethodRefine/satellite_benchmark-high/validating/validating_20.py | 6b5fe7da51099a834f291860f10739404d5f325e | [] | no_license | sysulic/MethodRefine | a483d74e65337dff4bc2539ce3caa3bf83748b48 | adbb22d4663041d853d3132f75032b7561bf605c | refs/heads/master | 2020-09-14T10:45:55.948174 | 2020-05-01T09:13:59 | 2020-05-01T09:13:59 | 223,104,986 | 3 | 2 | null | 2020-04-27T11:01:36 | 2019-11-21T06:33:16 | Python | UTF-8 | Python | false | false | 3,502 | py | #!/usr/bin/env python
# coding=utf-8
import sys
sys.path.insert(0, './')
from satellite import *
import new_tihtn_planner
state0 = new_tihtn_planner.State('state0')
allow = False
state0.sate_num = 3
state0.inst_num = 13
state0.mode_num = 3
state0.direc_num = 5
state0.img_num = 4
state0.on_board = {'inst-3-1':'sate-3','inst-1-1':'sate-1','inst-1-3':'sate-1','inst-1-2':'sate-1','sate-1':['inst-1-1', 'inst-1-2', 'inst-1-3'],'sate-2':['inst-2-1', 'inst-2-2', 'inst-2-3', 'inst-2-4', 'inst-2-5'],'sate-3':['inst-3-1', 'inst-3-2', 'inst-3-3', 'inst-3-4', 'inst-3-5'],'inst-3-4':'sate-3','inst-3-3':'sate-3','inst-3-2':'sate-3','inst-2-4':'sate-2','inst-2-5':'sate-2','inst-2-2':'sate-2','inst-2-3':'sate-2','inst-3-5':'sate-3','inst-2-1':'sate-2',}
state0.mode = {'inst-3-1':'mode-1','inst-1-1':'mode-1','inst-1-3':'mode-3','inst-1-2':'mode-2','mode-1':['inst-2-2', 'inst-1-1', 'inst-2-1', 'inst-3-1', 'inst-3-4'],'mode-2':['inst-3-5', 'inst-1-2', 'inst-2-3', 'inst-3-2', 'inst-3-3'],'mode-3':['inst-2-4', 'inst-1-3', 'inst-2-5'],'inst-3-4':'mode-1','inst-3-3':'mode-2','inst-3-2':'mode-2','inst-2-4':'mode-3','inst-2-5':'mode-3','inst-2-2':'mode-1','inst-2-3':'mode-2','inst-3-5':'mode-2','inst-2-1':'mode-1',}
state0.calib_target = {'inst-1-1':'direc-5','inst-1-3':'direc-4','inst-1-2':'direc-2','inst-2-3':'direc-5','inst-3-4':'direc-4','inst-3-3':'direc-3','inst-3-2':'direc-1','inst-2-4':'direc-4','inst-2-5':'direc-1','inst-2-2':'direc-5','inst-3-1':'direc-4','inst-3-5':'direc-3','inst-2-1':'direc-4',}
state0.pointing = {'sate-1':'direc-3','sate-2':'direc-2','sate-3':'direc-2',}
state0.power_avail = {'sate-1':True,'sate-2':True,'sate-3':True,}
state0.power_on = {'inst-1-1':False,'inst-1-2':False,'inst-1-3':False,'inst-2-1':False,'inst-2-2':False,'inst-2-3':False,'inst-2-4':False,'inst-2-5':False,'inst-3-1':False,'inst-3-2':False,'inst-3-3':False,'inst-3-4':False,'inst-3-5':False,}
state0.calibrate = {'inst-1-1':False,'inst-1-2':False,'inst-1-3':False,'inst-2-1':False,'inst-2-2':False,'inst-2-3':False,'inst-2-4':False,'inst-2-5':False,'inst-3-1':False,'inst-3-2':False,'inst-3-3':False,'inst-3-4':False,'inst-3-5':False,}
state0.have_img = {'direc-1':{'mode-1': False,'mode-2': False,'mode-3': False,},'direc-2':{'mode-1': False,'mode-2': False,'mode-3': False,},'direc-3':{'mode-1': False,'mode-2': False,'mode-3': False,},'direc-4':{'mode-1': False,'mode-2': False,'mode-3': False,},'direc-5':{'mode-1': False,'mode-2': False,'mode-3': False,},}
new_tihtn_planner.declare_types({'satellite':['sate-1','sate-2','sate-3',],'instrument':['inst-1-1','inst-1-2','inst-1-3','inst-2-1','inst-2-2','inst-2-3','inst-2-4','inst-2-5','inst-3-1','inst-3-2','inst-3-3','inst-3-4','inst-3-5',],'mode':['mode-1','mode-2','mode-3',],'direction':['direc-1','direc-2','direc-3','direc-4','direc-5',]})
new_tihtn_planner.declare_funs({switch_off:['satellite'],switch_on:['instrument', 'satellite'],turn_to:['satellite', 'direction'],calibrate:['instrument', 'satellite', 'direction'], take_img:['satellite', 'direction', 'instrument', 'mode']})
new_tihtn_planner.instance()
def execute(completable):
return new_tihtn_planner.pyhop(completable, allow, state0,[('get_img','direc-1', 'mode-3'),('get_img','direc-2', 'mode-1'),('get_img','direc-3', 'mode-2'),('get_img','direc-3', 'mode-3'),], [[0, 1],[1, 2],[2, 3],],9)
def add_methods(fun_obj_list):
for fun in fun_obj_list:
new_tihtn_planner.add_method(fun.func_name.split('__')[0], fun)
def reverse_methods():
new_tihtn_planner.reverse_methods() | [
"526552330@qq.com"
] | 526552330@qq.com |
e1a64d3bff5988fb4f8865402a4b92a79fba1cc4 | 0a00b61ca6f970ef9e19e3ee2b0a6b9b12fc3e36 | /Exercicios001a106/ex019.py | b54cdbc1e9cb634d26f6bcdc0a287e49f7de3c7c | [] | no_license | gabrielrbernardi/pyhtoncev | bf588f1c2388f3124f5a575cf2ef240360cd8c98 | fd9d0986196d1685366c55281b42b315595f380f | refs/heads/master | 2021-07-23T20:41:50.258090 | 2020-07-24T14:25:20 | 2020-07-24T14:25:20 | 201,134,473 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 258 | py | import random
a1 = str(input('Primeiro aluno: '))
a2 = str(input('Segundo aluno: '))
a3 = str(input('Terceiro aluno: '))
a4 = str(input('Quarto aluno: '))
lista = [a1, a2, a3, a4]
choose = random.choice(lista)
print('O aluno escolhido foi {}'.format(choose)) | [
"gabrielrbernardi@gmail.com"
] | gabrielrbernardi@gmail.com |
65aabc1185420c1de3350fef656d55b4d0889e67 | f3050b7f84e584dcde54ca1690944bfccc6f5d9c | /doReport.py | 1f3e2459f51cf6acf2c67e58f657274c4e11e715 | [] | no_license | azhenglianxi/api_Project | 0c8444c2bad7464fd57911be4fdcd131a63c46b2 | 2ae87b87e41f522d4ef20f63bad6adcaec1f9874 | refs/heads/master | 2020-09-14T12:08:07.080748 | 2019-12-12T09:08:22 | 2019-12-12T09:08:22 | 223,124,370 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,537 | py | import unittest
import ddt
from testCase.course.courseTest1 import CourseTest1
from testCase.course.courseTest2 import CourseTest2
import HtmlTestRunner
from HTMLTestRunner import HTMLTestRunner
# 测试套件 test_suite
# 1.-1: 用例 一个个的添加到suite
# suite=unittest.TestSuite()
# suite.addTest(CourseTest1("test_101"))
# suite.addTest(CourseTest1("test_103"))
# suite.addTest(CourseTest1("test_102"))
# suite.addTest(CourseTest1("test_102"))
# suite.addTest(CourseTest2("test_202"))
# 1-2: 用例放入列表中 在添加suite
# suite=unittest.TestSuite()
# list=[CourseTest1("test_101"),CourseTest1("test_103"),CourseTest1("test_102"),CourseTest2("test_202")]
# suite.addTests(list)
# 1-3 :用Testloader类的discover方法来
suite=unittest.defaultTestLoader.discover('testCase',pattern="*Test*.py")
# 2 运行用例,查看结果
# 2-1 第1种情况:不使用HtmlTestRunner插件
# runner=unittest.TextTestRunner()
# runner.run(suite)
# 2-2 第2种情况:使用【经典版】HtmlTestRunner插件
# 新建一个可写二进制文件
# reportFile=open('./report/经典Html报告4.html','wb')
# runner=HTMLTestRunner(stream=reportFile,verbosity=2,description="用例执行详细信息",
# title="测试报告")
# runner.run(suite)
# 2-3 第3种情况:使用【最新版】HtmlTestRunner插件
runner=HtmlTestRunner.HTMLTestRunner(output='./report/',report_name='【最新版】html2测试报告',
report_title='my_report')
runner.run(suite)
| [
"azhenglianxi@163.com"
] | azhenglianxi@163.com |
303259a3aaaedc72647f69f374321f24c870cfbb | cb98d404b1f99755fa051082e32f8725ee50f6c7 | /adult/loss.py | 12a1c3bcab193cb2b5839ee911cdf38dddd3a30c | [] | no_license | kingofspace0wzz/hsic-fair | 00f76a3068190f295ce7d7595f1b3581ea0e32e1 | ff0c71a34497767f20626674c4e770421ecbd045 | refs/heads/master | 2023-01-20T21:13:28.070288 | 2020-12-01T11:26:02 | 2020-12-01T11:26:02 | 187,965,566 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,412 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions.normal import Normal
from torch.distributions.kl import kl_divergence
import numpy as np
import pyro.contrib.gp as gp
def recon_loss(recon, target):
loss = F.binary_cross_entropy(recon, target, reduction='sum')
return loss
def total_kld(q_z, p_z):
return torch.sum(kl_divergence(q_z, p_z))
def disc_loss(D_z, target):
loss = F.cross_entropy(D_z, target, reduction='sum')
return loss
def perm_loss(joint, indepedent):
zeros = torch.zeros(joint.size(0), dtype=torch.long, device=joint.device)
ones = torch.ones(indepedent.size(0), dtype=torch.long, device=indepedent.device)
loss = 0.5 * (F.cross_entropy(joint, zeros) + F.cross_entropy(indepedent, ones))
return loss
def kernel_loss(weight1, weight2, z, kernel):
loss = torch.pow(torch.norm(torch.matmul(weight1, torch.matmul(kernel(F.relu(z), F.relu(z)), weight2.t()))), 2)
# loss = torch.pow(torch.norm(torch.matmul(weight1, weight2.t())), 2)
return loss
def HSIC(z, s, fix=False):
n = z.size(0)
# k(x, y) = <z(x), z(y)>, K = z * z^T
if fix:
K = rbf(z, z)
else:
K = torch.matmul(z, z.t())
# H = I - 1 * 1^T / (n-1)^2
H = torch.eye(z.size(0)).to(z.device) - torch.ones_like(K) / n
# encode protected factor into one_hot
h = F.one_hot(s).float()
# L = h * h^T
L = torch.matmul(h, h.t())
return torch.sum(torch.diag(torch.chain_matmul(K,H,L,H))) / (n-1)**2
def COCO(z, s, fix=False):
n = z.size(0)
if fix:
K = rbf(z, z)
else:
K = torch.matmul(z, z.t())
H = torch.eye(n).to(z.device) - torch.ones_like(K) / n
# encode protected factor into one_hot
h = F.one_hot(s).float()
# L = h * h^T
L = torch.matmul(h, h.t())
return torch.norm(torch.chain_matmul(H, K, H, H, L, H)) / n**2
def KCC(z, s):
pass
def KMI(z, s):
pass
def centering(K):
n = K.size(0)
unit = torch.ones_like(K)
I = torch.eye(n).to(K.device)
Q = I - unit / n
return torch.matmul(torch.matmul(Q, K), Q)
class hinge_loss(nn.Module):
def __init__(self):
super(hinge_loss, self).__init__()
def forward(self, output, target):
return torch.clamp(1 - torch.mul(output, target.float()), min=0)
def rbf(x, y):
rbf = gp.kernels.RBF(input_dim=x.size(-1))
return rbf(x, y) | [
"zizhuang_wang@umail.ucsb.edu"
] | zizhuang_wang@umail.ucsb.edu |
8f5c22d6d0bccf7a693d5312c5d3dcd28e422297 | 7053f887eb8cc3970c114802bc22df0c0083139b | /notes/security/optimized_assert.py | 837bdd4673cdd2b6c1ab82a483776e55aa2353db | [] | no_license | ofhellsfire/python-notes | 344e354aa51675080b9e9e99beff9435deaf4a8c | 3f9addc720fbaced3b36330300f8c3b1902fd14e | refs/heads/master | 2023-05-25T08:09:07.991294 | 2023-02-12T14:03:31 | 2023-02-12T14:03:31 | 202,361,412 | 5 | 0 | null | 2023-05-23T01:15:25 | 2019-08-14T13:56:58 | HTML | UTF-8 | Python | false | false | 676 | py | """
Shows what can happen if use assers with optimized mode.
Try to run as usual: python filename
Also try to run in optimized mode: python -O filename
"""
class User:
def __init__(self, name, is_admin=False):
self.name = name
self.is_admin = is_admin
def superuser_action(user, cmd):
assert user.is_admin, f'User "{user.name}" does not have admin privileges'
print(f'Execute cmd as {user.name}: {cmd}')
if __name__ == '__main__':
ordinary_user = User('Vlad', is_admin=False)
admin_user = User('Masha', is_admin=True)
cmd = 'cat /etc/passwd'
superuser_action(ordinary_user, cmd=cmd)
superuser_action(admin_user, cmd=cmd)
| [
"ofhellsfire@yandex.ru"
] | ofhellsfire@yandex.ru |
0a812bdc739543891a048953e785287cb642c599 | 73bd01b665f455f6889a513ef64605195c55f2cb | /Test/TestLexly/TestJsonImporter.py | 86b9c7aeac7c384a0078d35734952d8f4180110b | [] | no_license | fstakem/Lexly | 3d9e3648186df373166e9bf07dcfa8594213e740 | e5817c085723fa945c955054e471df7ea6535b9b | refs/heads/master | 2020-04-09T07:36:14.081826 | 2013-08-28T12:57:51 | 2013-08-28T12:57:51 | 12,242,335 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,802 | py | # ------------------------------------------------------
#
# TestJsonImporter.py
# By: Fred Stakem
# Created: 8.22.13
#
# ------------------------------------------------------
# Libs
import unittest
import json
# User defined
from Globals import *
from Utilities import *
from Lexly import JsonImporter
from Lexly import RawEventSeparator
from Lexly import Token
#Main
class JsonImporterTest(unittest.TestCase):
# Setup logging
logger = Utilities.getLogger(__name__)
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
self.json_parser_path = '../../parsers/ubuntu_syslog.json'
self.test_data_path = '../logs/ubuntu_sysint_logs/syslog_small'
self.test_event = None
self.fail_on_error = True
self.lexer = None
self.parser = None
self.tmp_debug_lexer = globals.debug_lexer
self.tmp_debug_parser = globals.debug_parser
self.tmp_debug_state_machine = globals.debug_state_machine
globals.debug_lexer = True
globals.debug_parser = True
globals.debug_state_machine = True
def tearDown(self):
globals.debug_lexer = self.tmp_debug_lexer
globals.debug_parser = self.tmp_debug_parser
globals.debug_state_machine = self.tmp_debug_state_machine
@log_test(logger, globals.log_separator)
def testImportParser(self):
JsonImporterTest.logger.debug('Test the importation of a parser from json.')
# Test data
data = self.getJsonFromFile()
# Show test data
JsonImporterTest.logger.debug('Initial parser data:\n%s' % json.dumps(data, indent=4))
# Run test
JsonImporterTest.logger.debug('Parsing the json parser.')
name, created, author, separator, self.lexer, self.parser = JsonImporter.importParser(data)
# Show test output
JsonImporterTest.logger.debug('Found name: %s' % name)
JsonImporterTest.logger.debug('Found creation time: %s' % created)
JsonImporterTest.logger.debug('Found author: %s' % author)
JsonImporterTest.logger.debug('Found separator:\n%s' % separator.to_pretty_json())
JsonImporterTest.logger.debug('Found lexer:\n%s' % self.lexer.to_pretty_json())
JsonImporterTest.logger.debug('Found parser:\n%s' % self.parser.to_pretty_json())
# Verify results
events = self.getTestData()
self.verifyParser(self.parser, events)
JsonImporterTest.logger.debug('Test succeeded!')
def verifyParser(self, parser, events):
if self.test_event == None:
JsonImporterTest.logger.debug('Testing %d events.' % (len(events)))
else:
JsonImporterTest.logger.debug('Testing a single event: %d' % (self.test_event))
events = [ events[self.test_event] ]
all_errors = []
for i, event in enumerate(events):
JsonImporterTest.logger.debug('Working on event %d.' % (i))
self.lexEvent(event)
tokens = self.lexer.getAllTokens()
errors = self.lexer.getAllErrors()
if len(errors) > 0:
all_errors.append((i, errors))
self.parseEvent(tokens)
errors = self.parser.getAllErrors()
if len(errors) > 0:
all_errors.append((i, errors))
JsonImporterTest.logger.debug('Found the following errors while parsing the data.')
for error_set in all_errors:
JsonImporterTest.logger.debug('Found %d errors parsing event %d.' % (len(error_set[1]), error_set[0]))
if self.fail_on_error:
assert len(all_errors) == 0, 'Found errors during parsing.'
def getJsonFromFile(self):
JsonImporterTest.logger.debug('Using test data from file %s.' % (self.json_parser_path))
json_data = Utilities.readDataFromFile(self.json_parser_path)
JsonImporterTest.logger.debug('Found %d bytes in the data file.' % (len(json_data)))
data = json.loads(json_data)
return data
def getTestData(self):
JsonImporterTest.logger.debug('Using test data from file %s.' % (self.test_data_path))
test_data = Utilities.readDataFromFile(self.test_data_path)
JsonImporterTest.logger.debug('Found %d bytes in the data file.' % (len(test_data)))
separator = RawEventSeparator('\n', 'Unit Test RawEventSeparator')
events = separator.seperateEvents(test_data)
JsonImporterTest.logger.debug('Found %d events in the data.' % (len(events)))
return events
def lexEvent(self, event):
token = Token('event', Token.ALWAYS_DATA, False, event)
JsonImporterTest.logger.debug('Created token:\n%s' % (token.to_pretty_json()))
self.lexer.start(token)
tokens = self.lexer.getAllTokens()
JsonImporterTest.logger.debug('Found %d tokens.' % (len(tokens)))
assert len(tokens) > 0, 'No tokens found in list.'
for i, sub_token in enumerate(tokens):
JsonImporterTest.logger.debug('Found token %d:\n%s' % (i, sub_token.to_pretty_json()))
def parseEvent(self, tokens):
self.parser.start(tokens)
fields = self.parser.getAllFlatFields()
JsonImporterTest.logger.debug('Found %d fields.' % (len(fields)))
assert len(fields) > 0, 'No fields found in list.'
for i, field in enumerate(fields):
JsonImporterTest.logger.debug('Found field %d:\n%s' % (i, field.to_pretty_json()))
| [
"fstakem@fstakem-mac.(none)"
] | fstakem@fstakem-mac.(none) |
78d781b8f81b16601a05083f18e8517528c0ccd2 | f9f94ac82a5e78adedd8c87118b13725f7e1cb13 | /service_management/faulty_logging/apps.py | 6db74420b66659fc35b37e95947ad2b4bda92048 | [] | no_license | poppykode/sm | 1b4245c8b03ecb0385ce282e5ab3c89bc3cb57d1 | 44dcf34db94de336a689f1e8456aa6bd802b7734 | refs/heads/master | 2022-11-30T02:25:39.471284 | 2020-01-13T12:04:19 | 2020-01-13T12:04:19 | 220,422,113 | 0 | 0 | null | 2022-11-22T04:18:23 | 2019-11-08T08:32:21 | JavaScript | UTF-8 | Python | false | false | 102 | py | from django.apps import AppConfig
class FaultyLoggingConfig(AppConfig):
name = 'faulty_logging'
| [
"ngonimug@gmail.com"
] | ngonimug@gmail.com |
168fdb10b4718fe7c7c87e348c7b7ec7d041b22f | 6b42c5b9990cee8592f35827d878f436af17c0e8 | /pcc/ej93.py | 21cc933a2672da5fa3b6b095827270e75a68207d | [] | no_license | ignaciop/python_varios | 1f7c2f025e6197f6067433e50bb8d1a02c5e28a1 | e1b3919603885eb4d243226c6e42d0bcf3d78738 | refs/heads/master | 2020-05-22T16:20:32.085361 | 2020-04-14T04:00:37 | 2020-04-14T04:00:37 | 84,701,306 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 597 | py | class User():
def __init__(self, first_name, last_name, age, sex):
self.first_name = first_name
self.last_name = last_name
self.age = age
self.sex = sex
def describe_user(self):
print(f"{self.first_name} {self.last_name} is a {self.age} years old {self.sex}")
def greet_user(self):
print(f"Welcome, {self.first_name} {self.last_name}!")
user1 = User("Pablo", "Perez", 34, "Male")
user2 = User("Maria", "Gonzalez", 65, "Female")
user1.describe_user()
user2.describe_user()
user1.greet_user()
user2.greet_user() | [
"ignaciop.3@gmail.com"
] | ignaciop.3@gmail.com |
93a33757267c9f7d69975301f674bd87eb387394 | 2710c12bcfcb322bdc3b3128ad63bca5d4eb4eab | /2class_clf_pytorch/models/dpn.py | c99aaece35bed61264fda4e2398bd9d1af0ba875 | [] | no_license | chaffeechen/pyTorchClassification | 0888f2ebfbe61ef6114d1d6753daa1cf5d7c0e1e | 5620e52538e9932874f9ebbae691c9334bbac3d4 | refs/heads/master | 2022-03-16T21:49:50.261021 | 2019-12-10T06:12:36 | 2019-12-10T06:12:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,318 | py | """ PyTorch implementation of DualPathNetworks
Ported to PyTorch by [Ross Wightman](https://github.com/rwightman/pytorch-dpn-pretrained)
Based on original MXNet implementation https://github.com/cypw/DPNs with
many ideas from another PyTorch implementation https://github.com/oyam/pytorch-DPNs.
This implementation is compatible with the pretrained weights
from cypw's MXNet implementation.
"""
from __future__ import print_function, division, absolute_import
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
from collections import OrderedDict
__all__ = ['DPN', 'dpn68', 'dpn68b', 'dpn92', 'dpn98', 'dpn131', 'dpn107']
pretrained_settings = {
'dpn68': {
'imagenet': {
'url': 'http://data.lip6.fr/cadene/pretrainedmodels/dpn68-4af7d88d2.pth',
'input_space': 'RGB',
'input_size': [3, 224, 224],
'input_range': [0, 1],
'mean': [124 / 255, 117 / 255, 104 / 255],
'std': [1 / (.0167 * 255)] * 3,
'num_classes': 1000
}
},
'dpn68b': {
'imagenet+5k': {
'url': 'http://data.lip6.fr/cadene/pretrainedmodels/dpn68b_extra-363ab9c19.pth',
'input_space': 'RGB',
'input_size': [3, 224, 224],
'input_range': [0, 1],
'mean': [124 / 255, 117 / 255, 104 / 255],
'std': [1 / (.0167 * 255)] * 3,
'num_classes': 1000
}
},
'dpn92': {
# 'imagenet': {
# 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/dpn68-66bebafa7.pth',
# 'input_space': 'RGB',
# 'input_size': [3, 224, 224],
# 'input_range': [0, 1],
# 'mean': [124 / 255, 117 / 255, 104 / 255],
# 'std': [1 / (.0167 * 255)] * 3,
# 'num_classes': 1000
# },
'imagenet+5k': {
'url': 'http://data.lip6.fr/cadene/pretrainedmodels/dpn92_extra-fda993c95.pth',
'input_space': 'RGB',
'input_size': [3, 224, 224],
'input_range': [0, 1],
'mean': [124 / 255, 117 / 255, 104 / 255],
'std': [1 / (.0167 * 255)] * 3,
'num_classes': 1000
}
},
'dpn98': {
'imagenet': {
'url': 'http://data.lip6.fr/cadene/pretrainedmodels/dpn98-722954780.pth',
'input_space': 'RGB',
'input_size': [3, 224, 224],
'input_range': [0, 1],
'mean': [124 / 255, 117 / 255, 104 / 255],
'std': [1 / (.0167 * 255)] * 3,
'num_classes': 1000
}
},
'dpn131': {
'imagenet': {
'url': 'http://data.lip6.fr/cadene/pretrainedmodels/dpn131-7af84be88.pth',
'input_space': 'RGB',
'input_size': [3, 224, 224],
'input_range': [0, 1],
'mean': [124 / 255, 117 / 255, 104 / 255],
'std': [1 / (.0167 * 255)] * 3,
'num_classes': 1000
}
},
'dpn107': {
'imagenet+5k': {
'url': 'http://data.lip6.fr/cadene/pretrainedmodels/dpn107_extra-b7f9f4cc9.pth',
'input_space': 'RGB',
'input_size': [3, 224, 224],
'input_range': [0, 1],
'mean': [124 / 255, 117 / 255, 104 / 255],
'std': [1 / (.0167 * 255)] * 3,
'num_classes': 1000
}
}
}
def dpn68(num_classes=1000, pretrained='imagenet'):
model = DPN(
small=True, num_init_features=10, k_r=128, groups=32,
k_sec=(3, 4, 12, 3), inc_sec=(16, 32, 32, 64),
num_classes=num_classes, test_time_pool=True)
if pretrained:
settings = pretrained_settings['dpn68'][pretrained]
assert num_classes == settings['num_classes'], \
"num_classes should be {}, but is {}".format(settings['num_classes'], num_classes)
model.load_state_dict(model_zoo.load_url(settings['url']))
model.input_space = settings['input_space']
model.input_size = settings['input_size']
model.input_range = settings['input_range']
model.mean = settings['mean']
model.std = settings['std']
return model
def dpn68b(num_classes=1000, pretrained='imagenet+5k'):
model = DPN(
small=True, num_init_features=10, k_r=128, groups=32,
b=True, k_sec=(3, 4, 12, 3), inc_sec=(16, 32, 32, 64),
num_classes=num_classes, test_time_pool=True)
if pretrained:
settings = pretrained_settings['dpn68b']['imagenet+5k']
# assert num_classes == settings['num_classes'], \
# "num_classes should be {}, but is {}".format(settings['num_classes'], num_classes)
model.load_state_dict(model_zoo.load_url(settings['url']))
model.input_space = settings['input_space']
model.input_size = settings['input_size']
model.input_range = settings['input_range']
model.mean = settings['mean']
model.std = settings['std']
print('load imagenet pre-trained model successfully!')
return model
def dpn92(num_classes=1000, pretrained='imagenet+5k'):
model = DPN(
num_init_features=64, k_r=96, groups=32,
k_sec=(3, 4, 20, 3), inc_sec=(16, 32, 24, 128),
num_classes=num_classes, test_time_pool=True)
if pretrained:
settings = pretrained_settings['dpn92']['imagenet+5k']
# assert num_classes == settings['num_classes'], \
# "num_classes should be {}, but is {}".format(settings['num_classes'], num_classes)
model.load_state_dict(model_zoo.load_url(settings['url']))
model.input_space = settings['input_space']
model.input_size = settings['input_size']
model.input_range = settings['input_range']
model.mean = settings['mean']
model.std = settings['std']
print('load imagenet pre-trained model successfully!')
return model
def dpn98(num_classes=1000, pretrained='imagenet'):
model = DPN(
num_init_features=96, k_r=160, groups=40,
k_sec=(3, 6, 20, 3), inc_sec=(16, 32, 32, 128),
num_classes=num_classes, test_time_pool=True)
if pretrained:
settings = pretrained_settings['dpn98'][pretrained]
assert num_classes == settings['num_classes'], \
"num_classes should be {}, but is {}".format(settings['num_classes'], num_classes)
model.load_state_dict(model_zoo.load_url(settings['url']))
model.input_space = settings['input_space']
model.input_size = settings['input_size']
model.input_range = settings['input_range']
model.mean = settings['mean']
model.std = settings['std']
return model
def dpn131(num_classes=1000, pretrained='imagenet'):
model = DPN(
num_init_features=128, k_r=160, groups=40,
k_sec=(4, 8, 28, 3), inc_sec=(16, 32, 32, 128),
num_classes=num_classes, test_time_pool=True)
if pretrained:
settings = pretrained_settings['dpn131'][pretrained]
assert num_classes == settings['num_classes'], \
"num_classes should be {}, but is {}".format(settings['num_classes'], num_classes)
model.load_state_dict(model_zoo.load_url(settings['url']))
model.input_space = settings['input_space']
model.input_size = settings['input_size']
model.input_range = settings['input_range']
model.mean = settings['mean']
model.std = settings['std']
return model
def dpn107(num_classes=1000, pretrained='imagenet+5k'):
model = DPN(
num_init_features=128, k_r=200, groups=50,
k_sec=(4, 8, 20, 3), inc_sec=(20, 64, 64, 128),
num_classes=num_classes, test_time_pool=True)
if pretrained:
settings = pretrained_settings['dpn107'][pretrained]
assert num_classes == settings['num_classes'], \
"num_classes should be {}, but is {}".format(settings['num_classes'], num_classes)
model.load_state_dict(model_zoo.load_url(settings['url']))
model.input_space = settings['input_space']
model.input_size = settings['input_size']
model.input_range = settings['input_range']
model.mean = settings['mean']
model.std = settings['std']
return model
class CatBnAct(nn.Module):
def __init__(self, in_chs, activation_fn=nn.ReLU(inplace=True)):
super(CatBnAct, self).__init__()
self.bn = nn.BatchNorm2d(in_chs, eps=0.001)
self.act = activation_fn
def forward(self, x):
x = torch.cat(x, dim=1) if isinstance(x, tuple) else x
return self.act(self.bn(x))
class BnActConv2d(nn.Module):
def __init__(self, in_chs, out_chs, kernel_size, stride,
padding=0, groups=1, activation_fn=nn.ReLU(inplace=True)):
super(BnActConv2d, self).__init__()
self.bn = nn.BatchNorm2d(in_chs, eps=0.001)
self.act = activation_fn
self.conv = nn.Conv2d(in_chs, out_chs, kernel_size, stride, padding, groups=groups, bias=False)
def forward(self, x):
return self.conv(self.act(self.bn(x)))
class InputBlock(nn.Module):
def __init__(self, num_init_features, kernel_size=7,
padding=3, activation_fn=nn.ReLU(inplace=True)):
super(InputBlock, self).__init__()
self.conv = nn.Conv2d(
3, num_init_features, kernel_size=kernel_size, stride=2, padding=padding, bias=False)
self.bn = nn.BatchNorm2d(num_init_features, eps=0.001)
self.act = activation_fn
self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.act(x)
x = self.pool(x)
return x
class DualPathBlock(nn.Module):
def __init__(
self, in_chs, num_1x1_a, num_3x3_b, num_1x1_c, inc, groups, block_type='normal', b=False):
super(DualPathBlock, self).__init__()
self.num_1x1_c = num_1x1_c
self.inc = inc
self.b = b
if block_type is 'proj':
self.key_stride = 1
self.has_proj = True
elif block_type is 'down':
self.key_stride = 2
self.has_proj = True
else:
assert block_type is 'normal'
self.key_stride = 1
self.has_proj = False
if self.has_proj:
# Using different member names here to allow easier parameter key matching for conversion
if self.key_stride == 2:
self.c1x1_w_s2 = BnActConv2d(
in_chs=in_chs, out_chs=num_1x1_c + 2 * inc, kernel_size=1, stride=2)
else:
self.c1x1_w_s1 = BnActConv2d(
in_chs=in_chs, out_chs=num_1x1_c + 2 * inc, kernel_size=1, stride=1)
self.c1x1_a = BnActConv2d(in_chs=in_chs, out_chs=num_1x1_a, kernel_size=1, stride=1)
self.c3x3_b = BnActConv2d(
in_chs=num_1x1_a, out_chs=num_3x3_b, kernel_size=3,
stride=self.key_stride, padding=1, groups=groups)
if b:
self.c1x1_c = CatBnAct(in_chs=num_3x3_b)
self.c1x1_c1 = nn.Conv2d(num_3x3_b, num_1x1_c, kernel_size=1, bias=False)
self.c1x1_c2 = nn.Conv2d(num_3x3_b, inc, kernel_size=1, bias=False)
else:
self.c1x1_c = BnActConv2d(in_chs=num_3x3_b, out_chs=num_1x1_c + inc, kernel_size=1, stride=1)
def forward(self, x):
x_in = torch.cat(x, dim=1) if isinstance(x, tuple) else x
if self.has_proj:
if self.key_stride == 2:
x_s = self.c1x1_w_s2(x_in)
else:
x_s = self.c1x1_w_s1(x_in)
x_s1 = x_s[:, :self.num_1x1_c, :, :]
x_s2 = x_s[:, self.num_1x1_c:, :, :]
else:
x_s1 = x[0]
x_s2 = x[1]
x_in = self.c1x1_a(x_in)
x_in = self.c3x3_b(x_in)
if self.b:
x_in = self.c1x1_c(x_in)
out1 = self.c1x1_c1(x_in)
out2 = self.c1x1_c2(x_in)
else:
x_in = self.c1x1_c(x_in)
out1 = x_in[:, :self.num_1x1_c, :, :]
out2 = x_in[:, self.num_1x1_c:, :, :]
resid = x_s1 + out1
dense = torch.cat([x_s2, out2], dim=1)
return resid, dense
class DPN(nn.Module):
def __init__(self, small=False, num_init_features=64, k_r=96, groups=32,
b=False, k_sec=(3, 4, 20, 3), inc_sec=(16, 32, 24, 128),
num_classes=1000, test_time_pool=False):
super(DPN, self).__init__()
self.test_time_pool = test_time_pool
self.b = b
bw_factor = 1 if small else 4
blocks = OrderedDict()
# conv1
if small:
blocks['conv1_1'] = InputBlock(num_init_features, kernel_size=3, padding=1)
else:
blocks['conv1_1'] = InputBlock(num_init_features, kernel_size=7, padding=3)
# conv2
bw = 64 * bw_factor
inc = inc_sec[0]
r = (k_r * bw) // (64 * bw_factor)
blocks['conv2_1'] = DualPathBlock(num_init_features, r, r, bw, inc, groups, 'proj', b)
in_chs = bw + 3 * inc
for i in range(2, k_sec[0] + 1):
blocks['conv2_' + str(i)] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'normal', b)
in_chs += inc
# conv3
bw = 128 * bw_factor
inc = inc_sec[1]
r = (k_r * bw) // (64 * bw_factor)
blocks['conv3_1'] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'down', b)
in_chs = bw + 3 * inc
for i in range(2, k_sec[1] + 1):
blocks['conv3_' + str(i)] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'normal', b)
in_chs += inc
# conv4
bw = 256 * bw_factor
inc = inc_sec[2]
r = (k_r * bw) // (64 * bw_factor)
blocks['conv4_1'] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'down', b)
in_chs = bw + 3 * inc
for i in range(2, k_sec[2] + 1):
blocks['conv4_' + str(i)] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'normal', b)
in_chs += inc
# conv5
bw = 512 * bw_factor
inc = inc_sec[3]
r = (k_r * bw) // (64 * bw_factor)
blocks['conv5_1'] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'down', b)
in_chs = bw + 3 * inc
for i in range(2, k_sec[3] + 1):
blocks['conv5_' + str(i)] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'normal', b)
in_chs += inc
blocks['conv5_bn_ac'] = CatBnAct(in_chs)
self.features = nn.Sequential(blocks)
# Using 1x1 conv for the FC layer to allow the extra pooling scheme
self.last_linear = nn.Conv2d(in_chs, num_classes, kernel_size=1, bias=True)
def logits(self, features):
if not self.training and self.test_time_pool:
x = F.avg_pool2d(features, kernel_size=7, stride=1)
out = self.last_linear(x)
# The extra test time pool should be pooling an img_size//32 - 6 size patch
out = adaptive_avgmax_pool2d(out, pool_type='avgmax')
else:
x = adaptive_avgmax_pool2d(features, pool_type='avg')
out = self.last_linear(x)
return out.view(out.size(0), -1)
def forward(self, input):
x = self.features(input)
x = self.logits(x)
return x
""" PyTorch selectable adaptive pooling
Adaptive pooling with the ability to select the type of pooling from:
* 'avg' - Average pooling
* 'max' - Max pooling
* 'avgmax' - Sum of average and max pooling re-scaled by 0.5
* 'avgmaxc' - Concatenation of average and max pooling along feature dim, doubles feature dim
Both a functional and a nn.Module version of the pooling is provided.
Author: Ross Wightman (rwightman)
"""
def pooling_factor(pool_type='avg'):
return 2 if pool_type == 'avgmaxc' else 1
def adaptive_avgmax_pool2d(x, pool_type='avg', padding=0, count_include_pad=False):
"""Selectable global pooling function with dynamic input kernel size
"""
if pool_type == 'avgmaxc':
x = torch.cat([
F.avg_pool2d(
x, kernel_size=(x.size(2), x.size(3)), padding=padding, count_include_pad=count_include_pad),
F.max_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=padding)
], dim=1)
elif pool_type == 'avgmax':
x_avg = F.avg_pool2d(
x, kernel_size=(x.size(2), x.size(3)), padding=padding, count_include_pad=count_include_pad)
x_max = F.max_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=padding)
x = 0.5 * (x_avg + x_max)
elif pool_type == 'max':
x = F.max_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=padding)
else:
if pool_type != 'avg':
print('Invalid pool type %s specified. Defaulting to average pooling.' % pool_type)
x = F.avg_pool2d(
x, kernel_size=(x.size(2), x.size(3)), padding=padding, count_include_pad=count_include_pad)
return x
class AdaptiveAvgMaxPool2d(torch.nn.Module):
"""Selectable global pooling layer with dynamic input kernel size
"""
def __init__(self, output_size=1, pool_type='avg'):
super(AdaptiveAvgMaxPool2d, self).__init__()
self.output_size = output_size
self.pool_type = pool_type
if pool_type == 'avgmaxc' or pool_type == 'avgmax':
self.pool = nn.ModuleList([nn.AdaptiveAvgPool2d(output_size), nn.AdaptiveMaxPool2d(output_size)])
elif pool_type == 'max':
self.pool = nn.AdaptiveMaxPool2d(output_size)
else:
if pool_type != 'avg':
print('Invalid pool type %s specified. Defaulting to average pooling.' % pool_type)
self.pool = nn.AdaptiveAvgPool2d(output_size)
def forward(self, x):
if self.pool_type == 'avgmaxc':
x = torch.cat([p(x) for p in self.pool], dim=1)
elif self.pool_type == 'avgmax':
x = 0.5 * torch.sum(torch.stack([p(x) for p in self.pool]), 0).squeeze(dim=0)
else:
x = self.pool(x)
return x
def factor(self):
return pooling_factor(self.pool_type)
def __repr__(self):
return self.__class__.__name__ + ' (' \
+ 'output_size=' + str(self.output_size) \
+ ', pool_type=' + self.pool_type + ')' | [
""
] | |
96abf21b0b9d194f1c945970763a2e2171fd35f8 | f347ddf8f11b748b09646aabd3c4d807e49d6e86 | /clients/migrations/0038_clientaquaaerobics_block_comment.py | e58624dd513bc4c26a72d543183e3577ceec36b2 | [] | no_license | gitavk/fcbp | b630a8570b46557ee0ffd20ae1baa57741147766 | 02ffcc54a805861a098952b388bfd28ec69b176a | refs/heads/master | 2021-01-17T02:19:58.572362 | 2018-11-12T07:09:07 | 2018-11-12T07:09:07 | 39,645,922 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 476 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('clients', '0037_prolongationaqua'),
]
operations = [
migrations.AddField(
model_name='clientaquaaerobics',
name='block_comment',
field=models.CharField(max_length=150, null=True, blank=True),
preserve_default=True,
),
]
| [
"3apa3ka3@gmail.com"
] | 3apa3ka3@gmail.com |
1630d428b45f4ba249a3ce615b8614472bebbcec | efd55bc63da8ab6ee964ec82bd0b761fd36107cc | /leetcode/easy/add-strings.py | 9a65e671a52bd084d149cc8082da1b152c7e4665 | [] | no_license | gsantam/competitive-programming | f9a2c9999470eeae9ef4aada6af43b91a65fcb50 | 0b208516a6ae3e72bc7b79ef0ac83dcbfa100496 | refs/heads/master | 2021-06-20T23:27:30.274275 | 2021-06-20T19:44:51 | 2021-06-20T19:44:51 | 162,201,988 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 621 | py | class Solution:
def addStrings(self, num1: str, num2: str) -> str:
i = 0
l1 = len(num1)
l2 = len(num2)
rest = 0
total_sum = 0
while l1-1-i>=0 or l2-1-i>=0:
sum_ = rest
if l1-1-i>=0:
sum_+=int(num1[l1-1-i])
if l2-1-i>=0:
sum_+=int(num2[l2-1-i])
rest = sum_//10
sum_ = sum_%10
total_sum+=sum_*(10**i)
i+=1
if rest!=0:
total_sum+=rest*(10**i)
return str(total_sum)
| [
"santamaria.guille@gmail.com"
] | santamaria.guille@gmail.com |
06733454c03a927c81749213c56d3ce4e68a7c5d | e23d16c738296df79f932aeab48d671f24ae3732 | /563 - Binary Tree Tilt.py | 60f1e0f3780ba058417ab814f0dfb92b42a467fe | [] | no_license | y4lp/leets-easy | 32e579b18036eb41cc15858355ddb60267c96c33 | 7ef430042455a193fea05a54e6638a6917fc4783 | refs/heads/master | 2023-05-30T19:26:18.665390 | 2021-06-18T09:17:48 | 2021-06-18T09:17:48 | 378,097,146 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 537 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
absd = 0
def findTilt(self, root: TreeNode) -> int:
self.post(root)
return self.absd
def post(self, root):
if root is None:
return 0
lsum = self.post(root.left)
rsum = self.post(root.right)
self.absd += abs(lsum - rsum)
return lsum + rsum + root.val | [
"821738231@qq.com"
] | 821738231@qq.com |
31a0056a22fc8bf8b4a21bb38c296bb8223aae2e | e234483365006af2accfdc068ce5a39b21f2491f | /C/project_advance_views/project_advance_views/settings.py | 33816ff65fe857ea41af1af1ccfd283fa5cc7bd9 | [] | no_license | hamdiranu/DJANGO_MVC | 0536a401139caae8adb9f8a20d070a24c136c80c | 75483d7405f8b28a831328ca09ad8f7eb6a5f4cd | refs/heads/master | 2021-09-23T07:45:08.925050 | 2019-12-11T08:53:07 | 2019-12-11T08:53:07 | 226,776,038 | 0 | 0 | null | 2021-09-22T18:06:24 | 2019-12-09T03:24:08 | Python | UTF-8 | Python | false | false | 3,152 | py | """
Django settings for project_advance_views project.
Generated by 'django-admin startproject' using Django 3.0.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '52au!llud8wufre_y*r&kl@tb%)b+#eeht2-yzfqcaoi5+8l24'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'blog',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'project_advance_views.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'project_advance_views.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Jakarta'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
| [
"hamdi@alterra.id"
] | hamdi@alterra.id |
df755ed0b68012deb2fba335ba102bf8c0a74940 | 544c4d9822ca42764a60d55b804e8eaabc345cab | /operation/urls_api_0_1.py | 4ff56cef10af6bfa0df6fa4a11d65e05c80fa2ba | [] | no_license | lxguidu/parkhero | 24a3cf28ed3f9ed594137080c36bc317453f66ba | b5f5e2d13ac46812666c0e9d20bfd35b335a4994 | refs/heads/master | 2021-01-12T14:49:29.404775 | 2016-10-27T10:57:45 | 2016-10-27T10:57:45 | 72,099,407 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,000 | py | #-*- coding: utf-8 -*-
from django.conf.urls import url
from operation import views
urlpatterns = [
url(r'^vehicle_in/$', views.vehicle_in_api, name='vehicle_in_api'),
url(r'^vehicle_out/$', views.vehicle_out_api, name='vehicle_out_api'),
url(r'^offline_payment/$', views.offline_payment_api, name='offline_payment_api'),
url(r'^online_payment/$', views.online_payment_api, name='online_payment_api'),
url(r'^finance/$', views.Finance.as_view(), name='Finance'),
url(r'^prepayment/$', views.prepayment_api, name='prepayment_api'),
url(r'^parklots/$', views.ParkLot.as_view(), name='ParkLot'),
#url(r'^parking_lots/$', views.parking_lots_api, name='parking_lots_api'),
#url(r'^parking_lots/upload_tool_info/$', views.upload_tool_info_api, name='upload_tool_info_api'),
url(r'^parking_lots/image/$', views.parking_lot_image_api, name='parking_lot_image_api'),
url(r'^app_version/$', views.app_version_api, name='app_version_api'),
#url(r'^app/version/upload/$', views.app_package_upload_api, name='app_package_upload_api'),
url(r'^app_pack_upload/$', views.app_package_upload_api, name='app_package_upload_api'),
url(r'^app_startup_page/$', views.app_startup_image_api, name='app_startup_image_api'),
url(r'^app_index_page/$', views.app_index_image_api, name='app_index_image_api'),
url(r'^app_cover_page/$', views.app_cover_image_api, name='app_cover_image_api'),
url(r'^end_user/user_info/$', views.end_user_info_api, name='end_user_info_api'),
url(r'^end_user/comments/$', views.end_user_comments_api, name='end_user_comments_api'),
url(r'^parkinglot_online/$', views.parkinglot_online_api, name='parkinglot_online_api'),
url(r'^parkinglot_connected/$', views.parkinglot_connected_api, name='parkinglot_connected_api'),
url(r'^parkinglot_disconnected/$', views.parkinglot_disconnected_api, name='parkinglot_disconnected_api'),
#url(r'^mobile_app/version/$', views.file_upload_api, name='file_upload_api'),
]
| [
"root@work.linxg.com"
] | root@work.linxg.com |
c698cc940395b86d51ad607617ad4d30166fe1b3 | 4ebbe0832a996253f5dc850d09ab001c702f4131 | /multi_trigger_multi_target_repaired.py | 0ee143b489eefb934d49034f45ba00f363d93677 | [] | no_license | raghuslash/sec2020 | 435d3ec29fc17ef27c6a3d5646cc38b6e0f96259 | e7fcb96810bad0c5f049a628b5c3f4d0e8bf94da | refs/heads/main | 2023-02-03T14:45:54.956235 | 2020-12-22T17:16:13 | 2020-12-22T17:16:13 | 320,161,691 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,238 | py | import keras
import sys
import h5py
import numpy as np
import matplotlib.pyplot as plt
import keract
import repair
def data_loader(filepath):
data = h5py.File(filepath, 'r')
x_data = np.array(data['data'])
y_data = np.array(data['label'])
x_data = x_data.transpose((0,2,3,1))
return x_data/255.0, y_data
def first_run_for_info(clean_data_path, pois_data_path, model_path, info_path):
G1 = repair.Repair(model_path, clean_data_path, pois_data_path)
G1.find_target()
G1.explore()
G1.save_info(info_path)
def main():
clean_data_path = 'data/clean_test_data.h5'
model_path = 'models/multi_trigger_multi_target_bd_net.h5'
pois_data_path1 = 'data/sunglasses_poisoned_data.h5'
pois_data_path2 = 'data/eyebrows_poisoned_data.h5'
pois_data_path3 = 'data/lipstick_poisoned_data.h5'
# first_run_for_info(clean_data_path, pois_data_path1, model_path, 'multi_trig1.dat') #Uncomment like if info not available
# first_run_for_info(clean_data_path, pois_data_path2, model_path, 'multi_trig2.dat') #Uncomment like if info not available
# first_run_for_info(clean_data_path, pois_data_path3, model_path, 'multi_trig3.dat') #Uncomment like if info not available
t1 = repair.Repair(model_path, clean_data_path, pois_data_path1)
t1.load_info('multi_trig1.dat')
t2 = repair.Repair(model_path, clean_data_path, pois_data_path2)
t2.load_info('multi_trig2.dat')
t3 = repair.Repair(model_path, clean_data_path, pois_data_path3)
t3.load_info('multi_trig3.dat')
model = keras.models.load_model(model_path)
input = sys.argv[1]
if input.endswith('.h5'):
X, _ = data_loader(input)
else:
x = plt.imread(input)
x = x[:,:,:3]
X = np.array([x])
y = np.argmax(model.predict(X), axis=1)
y_filtered = t1.detect_and_filter(X, y) #Filter for trigger 1
y_filtered = t2.detect_and_filter(X, y_filtered) # Filter for trigger 2
y_filtered = t3.detect_and_filter(X, y_filtered) # Filter for trigger 3
print(y_filtered)
print(f'Detected {(y_filtered==1283).sum()/y_filtered.shape[0]*100}% as poisoned.')
if __name__ == "__main__":
main() | [
"raghu@caramail.com"
] | raghu@caramail.com |
49383e833cb705190cd94f4ed37f55b6866e2233 | edfe02ab1e45708ad09a86ef85d1b53b065de1fe | /seventhStudy/Chapter 17/image.py | 604ba5e5fdc4f4b84e5b7bb7896616cacfa9b72b | [] | no_license | kor-Chipmunk/PythonEducation | d88e490a2d042919dafaff0463dad63611947800 | d98e230646d263e8d9ca3480467f785ff2beda99 | refs/heads/master | 2021-01-11T17:07:21.857179 | 2017-01-30T04:24:10 | 2017-01-30T04:24:10 | 79,725,221 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 782 | py | # -*- encoding: utf-8 -*-
import pygame
pygame.init()
screen = pygame.display.set_mode((300, 100))
pygame.display.set_caption("Drawing image")
clock = pygame.time.Clock()
run = True
# 이미지 로딩
runner_img = pygame.image.load("img/runner.png")
runner_rect = runner_img.get_rect()
# 게임 루프
while run:
# 1) 사용자 입력 처리
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
# 2) 게임 상태 업데이트
if runner_rect.x > screen.get_width():
runner_rect.x = 0
else:
runner_rect.x += 1
# 3) 게임 상태 그리기
screen.fill(pygame.color.Color(0, 0, 255))
screen.blit(runner_img, runner_rect)
pygame.display.flip()
clock.tick(60)
pygame.quit() | [
"rhj4862@gmail.com"
] | rhj4862@gmail.com |
23ae94692bcda1f0250f8ed887b724e5f84cbd01 | c0b5c5a2f6a8e141ece35f3ef7f9c915df4eb2f8 | /arch/numbers.py | 4e68400971405cbb9efd5ca787b35a4af20d5895 | [] | no_license | andrewfarah/ember | 1a2fa7726f5ff043b9cea7c33265ba5db888db41 | d44b1c41c7cb757b46f3001e524b213acc40c21f | refs/heads/master | 2020-05-29T08:47:29.706981 | 2016-10-05T00:01:37 | 2016-10-05T00:01:37 | 70,015,193 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,328 | py | import random
def game():
# gen a random number btw 1 and 10
secret_num = random.randint(1, 10)
guesses_left = 5
while True:
# non-integar catch
try:
# get a num from a player
guess = int(input("Guess a number between 1 and 10: "))
except ValueError:
print("Sorry. Integers only, please.")
else:
# compare num to random number
if guesses_left == 0:
print("Game Over! You have {} guesses left".format(guesses_left))
# let people play again
start_over = str(input("Would you like to try again? Y or N? ")).lower()
if start_over == "n":
print("Game over!")
break
elif start_over == "y":
print("Let's do this. You've got 5 more guesses!")
guesses_left += 5
elif guess != secret_num:
print("Try again. You have {} guesses left".format(guesses_left))
guesses_left -= 1
# print "too low" or too high for bad guesses
if guess < secret_num:
print("Higher..")
else:
print("Lower..")
continue
elif guess == secret_num:
print("Yes! {} is correct!".format(guess))
start_over = str(input("Would you like to play again? Y or N? ")).lower()
if start_over == "n":
print("Game over!")
break
elif start_over == "y":
print("Let's do this. You've got 5 more guesses!")
guesses_left = 5
game() | [
"andrew.mfarah@gmail.com"
] | andrew.mfarah@gmail.com |
88da23c8de3ec260538b12fc8b4aaa0aadc5dc17 | 9bb92edc8f60cf393d9ef47768524ca00f0cc3a1 | /model_trainer/connective_classifier/conn_dict_creator.py | b377be04ef1739c281392eabe80c2f49328aad20 | [] | no_license | qkaren/CoNLL2016-CNN | e514e5970467edcaff3a3213b7ca0d69072ce378 | 86ee7d769c7aaf7fa9a93478e84c41b3fa13d483 | refs/heads/master | 2020-07-31T21:42:14.799674 | 2017-09-28T13:51:32 | 2017-09-28T13:51:32 | 73,591,629 | 8 | 1 | null | null | null | null | UTF-8 | Python | false | false | 84,119 | py | #coding:utf-8
import config, util
from pdtb import PDTB
from pdtb_parse import PDTB_PARSE
from syntax_tree import Syntax_tree
# from .conn_head_mapper import ConnHeadMapper
import conn_dict_util as dict_util
class ConnectiveDict:
def __init__(self, pdtb_parse):
self.pdtb_parse = pdtb_parse
self.disc_conns_dict = self.pdtb_parse.disc_conns_dict
self.non_disc_conns_dict = self.pdtb_parse.non_disc_conns_dict
self.parse_dict = pdtb_parse.parse_dict
#生成c 的POS字典。
#读取 pdtb-parses 的文件,获取c 的 pos tag
def create_CPOS_dict(self,dict_path, threshold = 1):
dict = {}
# dict[(DocID, sent_index)] = [[1], [4,5]]
#语篇连接词
for DocID, sent_index in list(self.disc_conns_dict.keys()):
for conn_indices in self.disc_conns_dict[(DocID, sent_index)]:
pos_tag_list = []
for conn_index in conn_indices:
pos_tag_list.append(self.parse_dict[DocID]["sentences"][sent_index]["words"][conn_index][1]["PartOfSpeech"])
pos_tag = "_".join(pos_tag_list)
if pos_tag not in dict:
dict[pos_tag] = 0
dict[pos_tag] += 1
#非语篇连接词
for DocID, sent_index in list(self.non_disc_conns_dict.keys()):
for conn_indices in self.non_disc_conns_dict[(DocID, sent_index)]:
pos_tag_list = []
for conn_index in conn_indices:
pos_tag_list.append(self.parse_dict[DocID]["sentences"][sent_index]["words"][conn_index][1]["PartOfSpeech"])
pos_tag = "_".join(pos_tag_list)
if pos_tag not in dict:
dict[pos_tag] = 0
dict[pos_tag] += 1
#删除频率小于threshold的键
util.removeItemsInDict(dict, threshold)
#字典keys写入文件
util.write_dict_keys_to_file(dict,dict_path)
# prev + C
def create_prev_C_dict(self, dict_path, threshold = 1):
dict = {}
parse_dict = self.parse_dict
#语篇连接词
for (DocID, sent_index) in list(self.disc_conns_dict.keys()):
for conn_indices in self.disc_conns_dict[(DocID, sent_index)]:
flag = 0
prev_index = conn_indices[0] - 1
pre_sent_index = sent_index
if prev_index < 0:
pre_sent_index -= 1
prev_index = -1
if pre_sent_index < 0:
flag = 1
# 连接词的前面一个词
if flag == 1:
prev = "NONE"
else:
prev = parse_dict[DocID]["sentences"][pre_sent_index]["words"][prev_index][0]
#获取连接词到名称
conn_name = " ".join([parse_dict[DocID]["sentences"][sent_index]["words"][word_token][0] \
for word_token in conn_indices ])
prev_C = "%s|%s" % (prev, conn_name)
if prev_C not in dict:
dict[prev_C] = 0
dict[prev_C] += 1
#非语篇连接词
for DocID, sent_index in list(self.non_disc_conns_dict.keys()):
for conn_indices in self.non_disc_conns_dict[(DocID, sent_index)]:
flag = 0
prev_index = conn_indices[0] - 1
pre_sent_index = sent_index
if prev_index < 0:
pre_sent_index -= 1
prev_index = -1
if pre_sent_index < 0:
flag = 1
# 连接词的前面一个词
if flag == 1:
prev = "NONE"
else:
prev = parse_dict[DocID]["sentences"][pre_sent_index]["words"][prev_index][0]
#获取连接词到名称
conn_name = " ".join([parse_dict[DocID]["sentences"][sent_index]["words"][word_token][0] \
for word_token in conn_indices ])
prev_C = "%s|%s" % (prev, conn_name)
if prev_C not in dict:
dict[prev_C] = 0
dict[prev_C] += 1
#删除频率小于threshold的键
util.removeItemsInDict(dict, threshold)
#字典keys写入文件
util.write_dict_keys_to_file(dict, dict_path)
# prevPOS
def create_prevPOS_dict(self, dict_path, threshold = 1):
dict = {}
#语篇连接词
for DocID, sent_index in list(self.disc_conns_dict.keys()):
for conn_indices in self.disc_conns_dict[(DocID, sent_index)]:
flag = 0
prev_index = conn_indices[0] - 1
pre_sent_index = sent_index
if prev_index < 0:
pre_sent_index -= 1
prev_index = -1
if pre_sent_index < 0:
flag = 1
# 连接词的前面一个词
if flag == 1:
prev = "NONE"
else:
prev = self.parse_dict[DocID]["sentences"][pre_sent_index]["words"][prev_index][0]
if prev == "NONE":
prev_pos = "NONE"
else:
prev_pos = self.parse_dict[DocID]["sentences"][pre_sent_index]["words"][prev_index][1]["PartOfSpeech"]
if prev_pos not in dict:
dict[prev_pos] = 0
dict[prev_pos] += 1
#非语篇连接词
for DocID, sent_index in list(self.non_disc_conns_dict.keys()):
for conn_indices in self.non_disc_conns_dict[(DocID, sent_index)]:
flag = 0
prev_index = conn_indices[0] - 1
pre_sent_index = sent_index
if prev_index < 0:
pre_sent_index -= 1
prev_index = -1
if pre_sent_index < 0:
flag = 1
# 连接词的前面一个词
if flag == 1:
prev = "NONE"
else:
prev = self.parse_dict[DocID]["sentences"][pre_sent_index]["words"][prev_index][0]
if prev == "NONE":
prev_pos = "NONE"
else:
prev_pos = self.parse_dict[DocID]["sentences"][pre_sent_index]["words"][prev_index][1]["PartOfSpeech"]
if prev_pos not in dict:
dict[prev_pos] = 0
dict[prev_pos] += 1
#删除频率小于threshold的键
util.removeItemsInDict(dict, threshold)
#字典keys写入文件
util.write_dict_keys_to_file(dict,dict_path)
# prePOS + CPOS
def create_prePOS_CPOS_dict(self,dict_path, threshold = 1):
dict = {}
# dict[(DocID, sent_index)] = [[1], [4,5]]
#语篇连接词
for DocID, sent_index in list(self.disc_conns_dict.keys()):
for conn_indices in self.disc_conns_dict[(DocID, sent_index)]:
flag = 0
prev_index = conn_indices[0] - 1
pre_sent_index = sent_index
if prev_index < 0:
pre_sent_index -= 1
prev_index = -1
if pre_sent_index < 0:
flag = 1
# 连接词的前面一个词
if flag == 1:
prev = "NONE"
else:
prev = self.parse_dict[DocID]["sentences"][pre_sent_index]["words"][prev_index][0]
if prev == "NONE":
prev_pos = "NONE"
else:
prev_pos = self.parse_dict[DocID]["sentences"][pre_sent_index]["words"][prev_index][1]["PartOfSpeech"]
pos_tag_list = []
for conn_index in conn_indices:
pos_tag_list.append(self.parse_dict[DocID]["sentences"][sent_index]["words"][conn_index][1]["PartOfSpeech"])
pos_tag = "_".join(pos_tag_list)
prePOS_CPOS = "%s|%s" % (prev_pos, pos_tag)
if prePOS_CPOS not in dict:
dict[prePOS_CPOS] = 0
dict[prePOS_CPOS] += 1
#非语篇连接词
for DocID, sent_index in list(self.non_disc_conns_dict.keys()):
for conn_indices in self.non_disc_conns_dict[(DocID, sent_index)]:
flag = 0
prev_index = conn_indices[0] - 1
pre_sent_index = sent_index
if prev_index < 0:
pre_sent_index -= 1
prev_index = -1
if pre_sent_index < 0:
flag = 1
# 连接词的前面一个词
if flag == 1:
prev = "NONE"
else:
prev = self.parse_dict[DocID]["sentences"][pre_sent_index]["words"][prev_index][0]
if prev == "NONE":
prev_pos = "NONE"
else:
prev_pos = self.parse_dict[DocID]["sentences"][pre_sent_index]["words"][prev_index][1]["PartOfSpeech"]
pos_tag_list = []
for conn_index in conn_indices:
pos_tag_list.append(self.parse_dict[DocID]["sentences"][sent_index]["words"][conn_index][1]["PartOfSpeech"])
pos_tag = "_".join(pos_tag_list)
prePOS_CPOS = "%s|%s" % (prev_pos, pos_tag)
if prePOS_CPOS not in dict:
dict[prePOS_CPOS] = 0
dict[prePOS_CPOS] += 1
#删除频率小于threshold的键
util.removeItemsInDict(dict, threshold)
#字典keys写入文件
util.write_dict_keys_to_file(dict,dict_path)
# C + next
def create_C_next_dict(self, dict_path, threshold = 1):
dict = {}
#语篇连接词
for (DocID, sent_index) in list(self.disc_conns_dict.keys()):
#获取该句子长度,该doc的总句子数
sent_count = len(self.parse_dict[DocID]["sentences"])
sent_length = len(self.parse_dict[DocID]["sentences"][sent_index]["words"])
for conn_indices in self.disc_conns_dict[(DocID, sent_index)]:
flag = 0
next_index = conn_indices[-1] + 1
next_sent_index = sent_index
if next_index >= sent_length:
next_sent_index += 1
next_index = 0
if next_sent_index >= sent_count:
flag = 1
# 连接词的后面一个词
if flag == 1:
next = "NONE"
else:
next = self.parse_dict[DocID]["sentences"][next_sent_index]["words"][next_index][0]
#获取连接词到名称
conn_name = " ".join([self.parse_dict[DocID]["sentences"][sent_index]["words"][word_token][0] \
for word_token in conn_indices ])
C_next = "%s|%s" % (conn_name, next)
if C_next not in dict:
dict[C_next] = 0
dict[C_next] += 1
#非语篇连接词
for (DocID, sent_index) in list(self.non_disc_conns_dict.keys()):
#获取该句子长度,该doc的总句子数
sent_count = len(self.parse_dict[DocID]["sentences"])
sent_length = len(self.parse_dict[DocID]["sentences"][sent_index]["words"])
for conn_indices in self.non_disc_conns_dict[(DocID, sent_index)]:
flag = 0
next_index = conn_indices[-1] + 1
next_sent_index = sent_index
if next_index >= sent_length:
next_sent_index += 1
next_index = 0
if next_sent_index >= sent_count:
flag = 1
# 连接词的后面一个词
if flag == 1:
next = "NONE"
else:
next = self.parse_dict[DocID]["sentences"][next_sent_index]["words"][next_index][0]
#获取连接词到名称
conn_name = " ".join([self.parse_dict[DocID]["sentences"][sent_index]["words"][word_token][0] \
for word_token in conn_indices ])
C_next = "%s|%s" % (conn_name, next)
if C_next not in dict:
dict[C_next] = 0
dict[C_next] += 1
#删除频率小于threshold的键
util.removeItemsInDict(dict, threshold)
#字典keys写入文件
util.write_dict_keys_to_file(dict, dict_path)
# next pos
def create_nextPOS_dict(self, dict_path, threshold = 1):
dict = {}
#语篇连接词
for (DocID, sent_index) in list(self.disc_conns_dict.keys()):
#获取该句子长度,该doc的总句子数
sent_count = len(self.parse_dict[DocID]["sentences"])
sent_length = len(self.parse_dict[DocID]["sentences"][sent_index]["words"])
for conn_indices in self.disc_conns_dict[(DocID, sent_index)]:
flag = 0
next_index = conn_indices[-1] + 1
next_sent_index = sent_index
if next_index >= sent_length:
next_sent_index += 1
next_index = 0
if next_sent_index >= sent_count:
flag = 1
# 连接词的后面一个词
if flag == 1:
next = "NONE"
else:
next = self.parse_dict[DocID]["sentences"][next_sent_index]["words"][next_index][0]
# 连接词的后面一个词的pos
''' next pos '''
if next == "NONE":
nextPOS = "NONE"
else:
nextPOS = self.parse_dict[DocID]["sentences"][next_sent_index]["words"][next_index][1]["PartOfSpeech"]
if nextPOS not in dict:
dict[nextPOS] = 0
dict[nextPOS] += 1
#非语篇连接词
for (DocID, sent_index) in list(self.non_disc_conns_dict.keys()):
#获取该句子长度,该doc的总句子数
sent_count = len(self.parse_dict[DocID]["sentences"])
sent_length = len(self.parse_dict[DocID]["sentences"][sent_index]["words"])
for conn_indices in self.non_disc_conns_dict[(DocID, sent_index)]:
flag = 0
next_index = conn_indices[-1] + 1
next_sent_index = sent_index
if next_index >= sent_length:
next_sent_index += 1
next_index = 0
if next_sent_index >= sent_count:
flag = 1
# 连接词的后面一个词
if flag == 1:
next = "NONE"
else:
next = self.parse_dict[DocID]["sentences"][next_sent_index]["words"][next_index][0]
# 连接词的后面一个词的pos
''' next pos '''
if next == "NONE":
nextPOS = "NONE"
else:
nextPOS = self.parse_dict[DocID]["sentences"][next_sent_index]["words"][next_index][1]["PartOfSpeech"]
if nextPOS not in dict:
dict[nextPOS] = 0
dict[nextPOS] += 1
#删除频率小于threshold的键
util.removeItemsInDict(dict, threshold)
#字典keys写入文件
util.write_dict_keys_to_file(dict, dict_path)
# C POS + next POS
def create_CPOS_nextPOS_dict(self,dict_path, threshold = 1):
dict = {}
# dict[(DocID, sent_index)] = [[1], [4,5]]
#语篇连接词
for DocID, sent_index in list(self.disc_conns_dict.keys()):
#获取该句子长度,该doc的总句子数
sent_count = len(self.parse_dict[DocID]["sentences"])
sent_length = len(self.parse_dict[DocID]["sentences"][sent_index]["words"])
for conn_indices in self.disc_conns_dict[(DocID, sent_index)]:
CPOS_list = []
for conn_index in conn_indices:
CPOS_list.append(self.parse_dict[DocID]["sentences"][sent_index]["words"][conn_index][1]["PartOfSpeech"])
CPOS = "_".join(CPOS_list)
flag = 0
next_index = conn_indices[-1] + 1
next_sent_index = sent_index
if next_index >= sent_length:
next_sent_index += 1
next_index = 0
if next_sent_index >= sent_count:
flag = 1
# 连接词的后面一个词
if flag == 1:
next = "NONE"
else:
next = self.parse_dict[DocID]["sentences"][next_sent_index]["words"][next_index][0]
# 连接词的后面一个词的pos
''' next pos '''
if next == "NONE":
nextPOS = "NONE"
else:
nextPOS = self.parse_dict[DocID]["sentences"][next_sent_index]["words"][next_index][1]["PartOfSpeech"]
CPOS_nextPOS = "%s|%s" % (CPOS, nextPOS)
if CPOS_nextPOS not in dict:
dict[CPOS_nextPOS] = 0
dict[CPOS_nextPOS] += 1
#非语篇连接词
for DocID, sent_index in list(self.non_disc_conns_dict.keys()):
#获取该句子长度,该doc的总句子数
sent_count = len(self.parse_dict[DocID]["sentences"])
sent_length = len(self.parse_dict[DocID]["sentences"][sent_index]["words"])
for conn_indices in self.non_disc_conns_dict[(DocID, sent_index)]:
CPOS_list = []
for conn_index in conn_indices:
CPOS_list.append(self.parse_dict[DocID]["sentences"][sent_index]["words"][conn_index][1]["PartOfSpeech"])
CPOS = "_".join(CPOS_list)
flag = 0
next_index = conn_indices[-1] + 1
next_sent_index = sent_index
if next_index >= sent_length:
next_sent_index += 1
next_index = 0
if next_sent_index >= sent_count:
flag = 1
# 连接词的后面一个词
if flag == 1:
next = "NONE"
else:
next = self.parse_dict[DocID]["sentences"][next_sent_index]["words"][next_index][0]
# 连接词的后面一个词的pos
''' next pos '''
if next == "NONE":
nextPOS = "NONE"
else:
nextPOS = self.parse_dict[DocID]["sentences"][next_sent_index]["words"][next_index][1]["PartOfSpeech"]
CPOS_nextPOS = "%s|%s" % (CPOS, nextPOS)
if CPOS_nextPOS not in dict:
dict[CPOS_nextPOS] = 0
dict[CPOS_nextPOS] += 1
#删除频率小于threshold的键
util.removeItemsInDict(dict, threshold)
#字典keys写入文件
util.write_dict_keys_to_file(dict, dict_path)
# path of c's parent to root
def create_CParent_to_root_path_dict(self,dict_path, threshold = 1):
dict = {}
# dict[(DocID, sent_index)] = [[1], [4,5]]
#语篇连接词
for DocID, sent_index in list(self.disc_conns_dict.keys()):
#获取该句话的语法树
parse_tree = self.parse_dict[DocID]["sentences"][sent_index]["parsetree"].strip()
syntax_tree = Syntax_tree(parse_tree)
for conn_indices in self.disc_conns_dict[(DocID, sent_index)]:
if syntax_tree.tree == None:
path = "NONE_TREE"
else:
path = ""
for conn_index in conn_indices:
conn_node = syntax_tree.get_leaf_node_by_token_index(conn_index)
conn_parent_node = conn_node.up
path += syntax_tree.get_node_path_to_root(conn_parent_node) + "&"
if path[-1] == "&":
path = path[:-1]
if path not in dict:
dict[path] = 0
dict[path] += 1
#非语篇连接词
for DocID, sent_index in list(self.non_disc_conns_dict.keys()):
#获取该句话的语法树
parse_tree = self.parse_dict[DocID]["sentences"][sent_index]["parsetree"].strip()
syntax_tree = Syntax_tree(parse_tree)
for conn_indices in self.non_disc_conns_dict[(DocID, sent_index)]:
if syntax_tree.tree == None:
path = "NONE_TREE"
else:
path = ""
for conn_index in conn_indices:
conn_node = syntax_tree.get_leaf_node_by_token_index(conn_index)
conn_parent_node = conn_node.up
path += syntax_tree.get_node_path_to_root(conn_parent_node) + "&"
if path[-1] == "&":
path = path[:-1]
if path not in dict:
dict[path] = 0
dict[path] += 1
#删除频率小于threshold的键
util.removeItemsInDict(dict, threshold)
#字典keys写入文件
util.write_dict_keys_to_file(dict, dict_path)
# compressed path of c's parent to root
def create_compressed_CParent_to_root_path_dict(self,dict_path, threshold = 1):
dict = {}
# dict[(DocID, sent_index)] = [[1], [4,5]]
#语篇连接词
for DocID, sent_index in list(self.disc_conns_dict.keys()):
#获取该句话的语法树
parse_tree = self.parse_dict[DocID]["sentences"][sent_index]["parsetree"].strip()
syntax_tree = Syntax_tree(parse_tree)
for conn_indices in self.disc_conns_dict[(DocID, sent_index)]:
if syntax_tree.tree == None:
compressed_path = "NONE_TREE"
else:
compressed_path = ""
for conn_index in conn_indices:
conn_node = syntax_tree.get_leaf_node_by_token_index(conn_index)
conn_parent_node = conn_node.up
path = syntax_tree.get_node_path_to_root(conn_parent_node)
compressed_path += util.get_compressed_path(path) + "&"
if compressed_path[-1] == "&":
compressed_path = compressed_path[:-1]
if compressed_path not in dict:
dict[compressed_path] = 0
dict[compressed_path] += 1
#非语篇连接词
for DocID, sent_index in list(self.non_disc_conns_dict.keys()):
#获取该句话的语法树
parse_tree = self.parse_dict[DocID]["sentences"][sent_index]["parsetree"].strip()
syntax_tree = Syntax_tree(parse_tree)
for conn_indices in self.non_disc_conns_dict[(DocID, sent_index)]:
if syntax_tree.tree == None:
compressed_path = "NONE_TREE"
else:
compressed_path = ""
for conn_index in conn_indices:
conn_node = syntax_tree.get_leaf_node_by_token_index(conn_index)
conn_parent_node = conn_node.up
path = syntax_tree.get_node_path_to_root(conn_parent_node)
compressed_path += util.get_compressed_path(path) + "&"
if compressed_path[-1] == "&":
compressed_path = compressed_path[:-1]
if compressed_path not in dict:
dict[compressed_path] = 0
dict[compressed_path] += 1
#删除频率小于threshold的键
util.removeItemsInDict(dict, threshold)
#字典keys写入文件
util.write_dict_keys_to_file(dict, dict_path)
# Pitler :self_category
def create_self_category_dict(self,dict_path, threshold = 1):
dict = {}
# dict[(DocID, sent_index)] = [[1], [4,5]]
#语篇连接词
for DocID, sent_index in list(self.disc_conns_dict.keys()):
#获取该句话的语法树
parse_tree = self.parse_dict[DocID]["sentences"][sent_index]["parsetree"].strip()
syntax_tree = Syntax_tree(parse_tree)
for conn_indices in self.disc_conns_dict[(DocID, sent_index)]:
if syntax_tree.tree == None:
self_category = "NONE_TREE"
else:
self_category = syntax_tree.get_self_category_node_by_token_indices(conn_indices).name
if self_category not in dict:
dict[self_category] = 0
dict[self_category] += 1
#非语篇连接词
for DocID, sent_index in list(self.non_disc_conns_dict.keys()):
#获取该句话的语法树
parse_tree = self.parse_dict[DocID]["sentences"][sent_index]["parsetree"].strip()
syntax_tree = Syntax_tree(parse_tree)
for conn_indices in self.non_disc_conns_dict[(DocID, sent_index)]:
if syntax_tree.tree == None:
self_category = "NONE_TREE"
else:
self_category = syntax_tree.get_self_category_node_by_token_indices(conn_indices).name
if self_category not in dict:
dict[self_category] = 0
dict[self_category] += 1
#删除频率小于threshold的键
util.removeItemsInDict(dict, threshold)
#字典keys写入文件
util.write_dict_keys_to_file(dict, dict_path)
# Pitler :parent_category
def create_parent_category_dict(self,dict_path, threshold = 1):
dict = {}
# dict[(DocID, sent_index)] = [[1], [4,5]]
#语篇连接词
for DocID, sent_index in list(self.disc_conns_dict.keys()):
#获取该句话的语法树
parse_tree = self.parse_dict[DocID]["sentences"][sent_index]["parsetree"].strip()
syntax_tree = Syntax_tree(parse_tree)
for conn_indices in self.disc_conns_dict[(DocID, sent_index)]:
if syntax_tree.tree == None:
parent_category = "NONE_TREE"
else:
parent_category_node = syntax_tree.get_parent_category_node_by_token_indices(conn_indices)
if parent_category_node == None:
parent_category = "ROOT"
else:
parent_category = parent_category_node.name
if parent_category not in dict:
dict[parent_category] = 0
dict[parent_category] += 1
#非语篇连接词
for DocID, sent_index in list(self.non_disc_conns_dict.keys()):
#获取该句话的语法树
parse_tree = self.parse_dict[DocID]["sentences"][sent_index]["parsetree"].strip()
syntax_tree = Syntax_tree(parse_tree)
for conn_indices in self.non_disc_conns_dict[(DocID, sent_index)]:
if syntax_tree.tree == None:
parent_category = "NONE_TREE"
else:
parent_category_node = syntax_tree.get_parent_category_node_by_token_indices(conn_indices)
if parent_category_node == None:
parent_category = "ROOT"
else:
parent_category = parent_category_node.name
if parent_category not in dict:
dict[parent_category] = 0
dict[parent_category] += 1
#删除频率小于threshold的键
util.removeItemsInDict(dict, threshold)
#字典keys写入文件
util.write_dict_keys_to_file(dict, dict_path)
# Pitler : left_sibling_category
def create_left_sibling_category_dict(self,dict_path, threshold = 1):
dict = {}
# dict[(DocID, sent_index)] = [[1], [4,5]]
#语篇连接词
for DocID, sent_index in list(self.disc_conns_dict.keys()):
#获取该句话的语法树
parse_tree = self.parse_dict[DocID]["sentences"][sent_index]["parsetree"].strip()
syntax_tree = Syntax_tree(parse_tree)
for conn_indices in self.disc_conns_dict[(DocID, sent_index)]:
if syntax_tree.tree == None:
left_sibling_category = "NONE_TREE"
else:
left_sibling_category_node = syntax_tree.get_left_sibling_category_node_by_token_indices(conn_indices)
if left_sibling_category_node == None:
left_sibling_category = "NONE"
else:
left_sibling_category = left_sibling_category_node.name
if left_sibling_category not in dict:
dict[left_sibling_category] = 0
dict[left_sibling_category] += 1
#非语篇连接词
for DocID, sent_index in list(self.non_disc_conns_dict.keys()):
#获取该句话的语法树
parse_tree = self.parse_dict[DocID]["sentences"][sent_index]["parsetree"].strip()
syntax_tree = Syntax_tree(parse_tree)
for conn_indices in self.non_disc_conns_dict[(DocID, sent_index)]:
if syntax_tree.tree == None:
left_sibling_category = "NONE_TREE"
else:
left_sibling_category_node = syntax_tree.get_left_sibling_category_node_by_token_indices(conn_indices)
if left_sibling_category_node == None:
left_sibling_category = "NONE"
else:
left_sibling_category = left_sibling_category_node.name
if left_sibling_category not in dict:
dict[left_sibling_category] = 0
dict[left_sibling_category] += 1
#删除频率小于threshold的键
util.removeItemsInDict(dict, threshold)
#字典keys写入文件
util.write_dict_keys_to_file(dict, dict_path)
# Pitler : right_sibling_category
def create_right_sibling_category_dict(self,dict_path, threshold = 1):
dict = {}
# dict[(DocID, sent_index)] = [[1], [4,5]]
#语篇连接词
for DocID, sent_index in list(self.disc_conns_dict.keys()):
#获取该句话的语法树
parse_tree = self.parse_dict[DocID]["sentences"][sent_index]["parsetree"].strip()
syntax_tree = Syntax_tree(parse_tree)
for conn_indices in self.disc_conns_dict[(DocID, sent_index)]:
if syntax_tree.tree == None:
right_sibling_category = "NONE_TREE"
else:
right_sibling_category_node = syntax_tree.get_right_sibling_category_node_by_token_indices(conn_indices)
if right_sibling_category_node == None:
right_sibling_category = "NONE"
else:
right_sibling_category = right_sibling_category_node.name
if right_sibling_category not in dict:
dict[right_sibling_category] = 0
dict[right_sibling_category] += 1
#非语篇连接词
for DocID, sent_index in list(self.non_disc_conns_dict.keys()):
#获取该句话的语法树
parse_tree = self.parse_dict[DocID]["sentences"][sent_index]["parsetree"].strip()
syntax_tree = Syntax_tree(parse_tree)
for conn_indices in self.non_disc_conns_dict[(DocID, sent_index)]:
if syntax_tree.tree == None:
right_sibling_category = "NONE_TREE"
else:
right_sibling_category_node = syntax_tree.get_right_sibling_category_node_by_token_indices(conn_indices)
if right_sibling_category_node == None:
right_sibling_category = "NONE"
else:
right_sibling_category = right_sibling_category_node.name
if right_sibling_category not in dict:
dict[right_sibling_category] = 0
dict[right_sibling_category] += 1
#删除频率小于threshold的键
util.removeItemsInDict(dict, threshold)
#字典keys写入文件
util.write_dict_keys_to_file(dict, dict_path)
''' conn syn interaction '''
def create_conn_self_category_dict(self,dict_path, threshold = 1):
dict = {}
# dict[(DocID, sent_index)] = [[1], [4,5]]
#语篇连接词
for DocID, sent_index in list(self.disc_conns_dict.keys()):
#获取该句话的语法树
parse_tree = self.parse_dict[DocID]["sentences"][sent_index]["parsetree"].strip()
syntax_tree = Syntax_tree(parse_tree)
for conn_indices in self.disc_conns_dict[(DocID, sent_index)]:
if syntax_tree.tree == None:
conn_self_category = "NONE_TREE"
else:
self_category = syntax_tree.get_self_category_node_by_token_indices(conn_indices).name
#获取连接词到名称
conn_name = " ".join([self.parse_dict[DocID]["sentences"][sent_index]["words"][word_token][0] \
for word_token in conn_indices ])
conn_self_category = "%s|%s" % (conn_name, self_category)
if conn_self_category not in dict:
dict[conn_self_category] = 0
dict[conn_self_category] += 1
#非语篇连接词
for DocID, sent_index in list(self.non_disc_conns_dict.keys()):
#获取该句话的语法树
parse_tree = self.parse_dict[DocID]["sentences"][sent_index]["parsetree"].strip()
syntax_tree = Syntax_tree(parse_tree)
for conn_indices in self.non_disc_conns_dict[(DocID, sent_index)]:
if syntax_tree.tree == None:
conn_self_category = "NONE_TREE"
else:
self_category = syntax_tree.get_self_category_node_by_token_indices(conn_indices).name
#获取连接词到名称
conn_name = " ".join([self.parse_dict[DocID]["sentences"][sent_index]["words"][word_token][0] \
for word_token in conn_indices ])
conn_self_category = "%s|%s" % (conn_name, self_category)
if conn_self_category not in dict:
dict[conn_self_category] = 0
dict[conn_self_category] += 1
#删除频率小于threshold的键
util.removeItemsInDict(dict, threshold)
#字典keys写入文件
util.write_dict_keys_to_file(dict, dict_path)
def create_conn_parent_category_dict(self,dict_path, threshold = 1):
dict = {}
# dict[(DocID, sent_index)] = [[1], [4,5]]
#语篇连接词
for DocID, sent_index in list(self.disc_conns_dict.keys()):
#获取该句话的语法树
parse_tree = self.parse_dict[DocID]["sentences"][sent_index]["parsetree"].strip()
syntax_tree = Syntax_tree(parse_tree)
for conn_indices in self.disc_conns_dict[(DocID, sent_index)]:
if syntax_tree.tree == None:
conn_parent_category = "NONE_TREE"
else:
parent_category_node = syntax_tree.get_parent_category_node_by_token_indices(conn_indices)
if parent_category_node == None:
parent_category = "ROOT"
else:
parent_category = parent_category_node.name
#获取连接词到名称
conn_name = " ".join([self.parse_dict[DocID]["sentences"][sent_index]["words"][word_token][0] \
for word_token in conn_indices ])
conn_parent_category = "%s|%s" % (conn_name, parent_category)
if conn_parent_category not in dict:
dict[conn_parent_category] = 0
dict[conn_parent_category] += 1
#非语篇连接词
for DocID, sent_index in list(self.non_disc_conns_dict.keys()):
#获取该句话的语法树
parse_tree = self.parse_dict[DocID]["sentences"][sent_index]["parsetree"].strip()
syntax_tree = Syntax_tree(parse_tree)
for conn_indices in self.non_disc_conns_dict[(DocID, sent_index)]:
if syntax_tree.tree == None:
conn_parent_category = "NONE_TREE"
else:
parent_category_node = syntax_tree.get_parent_category_node_by_token_indices(conn_indices)
if parent_category_node == None:
parent_category = "ROOT"
else:
parent_category = parent_category_node.name
#获取连接词到名称
conn_name = " ".join([self.parse_dict[DocID]["sentences"][sent_index]["words"][word_token][0] \
for word_token in conn_indices ])
conn_parent_category = "%s|%s" % (conn_name, parent_category)
if conn_parent_category not in dict:
dict[conn_parent_category] = 0
dict[conn_parent_category] += 1
#删除频率小于threshold的键
util.removeItemsInDict(dict, threshold)
#字典keys写入文件
util.write_dict_keys_to_file(dict, dict_path)
def create_conn_left_sibling_category_dict(self,dict_path, threshold = 1):
dict = {}
# dict[(DocID, sent_index)] = [[1], [4,5]]
#语篇连接词
for DocID, sent_index in list(self.disc_conns_dict.keys()):
#获取该句话的语法树
parse_tree = self.parse_dict[DocID]["sentences"][sent_index]["parsetree"].strip()
syntax_tree = Syntax_tree(parse_tree)
for conn_indices in self.disc_conns_dict[(DocID, sent_index)]:
if syntax_tree.tree == None:
conn_left_sibling_category = "NONE_TREE"
else:
left_sibling_category_node = syntax_tree.get_left_sibling_category_node_by_token_indices(conn_indices)
if left_sibling_category_node == None:
left_sibling_category = "NONE"
else:
left_sibling_category = left_sibling_category_node.name
#获取连接词到名称
conn_name = " ".join([self.parse_dict[DocID]["sentences"][sent_index]["words"][word_token][0] \
for word_token in conn_indices ])
conn_left_sibling_category = "%s|%s" % (conn_name, left_sibling_category)
if conn_left_sibling_category not in dict:
dict[conn_left_sibling_category] = 0
dict[conn_left_sibling_category] += 1
#非语篇连接词
for DocID, sent_index in list(self.non_disc_conns_dict.keys()):
#获取该句话的语法树
parse_tree = self.parse_dict[DocID]["sentences"][sent_index]["parsetree"].strip()
syntax_tree = Syntax_tree(parse_tree)
for conn_indices in self.non_disc_conns_dict[(DocID, sent_index)]:
if syntax_tree.tree == None:
conn_left_sibling_category = "NONE_TREE"
else:
left_sibling_category_node = syntax_tree.get_left_sibling_category_node_by_token_indices(conn_indices)
if left_sibling_category_node == None:
left_sibling_category = "NONE"
else:
left_sibling_category = left_sibling_category_node.name
#获取连接词到名称
conn_name = " ".join([self.parse_dict[DocID]["sentences"][sent_index]["words"][word_token][0] \
for word_token in conn_indices ])
conn_left_sibling_category = "%s|%s" % (conn_name, left_sibling_category)
if conn_left_sibling_category not in dict:
dict[conn_left_sibling_category] = 0
dict[conn_left_sibling_category] += 1
#删除频率小于threshold的键
util.removeItemsInDict(dict, threshold)
#字典keys写入文件
util.write_dict_keys_to_file(dict, dict_path)
def create_conn_right_sibling_category_dict(self,dict_path, threshold = 1):
dict = {}
# dict[(DocID, sent_index)] = [[1], [4,5]]
#语篇连接词
for DocID, sent_index in list(self.disc_conns_dict.keys()):
#获取该句话的语法树
parse_tree = self.parse_dict[DocID]["sentences"][sent_index]["parsetree"].strip()
syntax_tree = Syntax_tree(parse_tree)
for conn_indices in self.disc_conns_dict[(DocID, sent_index)]:
if syntax_tree.tree == None:
conn_right_sibling_category = "NONE_TREE"
else:
right_sibling_category_node = syntax_tree.get_right_sibling_category_node_by_token_indices(conn_indices)
if right_sibling_category_node == None:
right_sibling_category = "NONE"
else:
right_sibling_category = right_sibling_category_node.name
#获取连接词到名称
conn_name = " ".join([self.parse_dict[DocID]["sentences"][sent_index]["words"][word_token][0] \
for word_token in conn_indices ])
conn_right_sibling_category = "%s|%s" % (conn_name, right_sibling_category)
if conn_right_sibling_category not in dict:
dict[conn_right_sibling_category] = 0
dict[conn_right_sibling_category] += 1
#非语篇连接词
for DocID, sent_index in list(self.non_disc_conns_dict.keys()):
#获取该句话的语法树
parse_tree = self.parse_dict[DocID]["sentences"][sent_index]["parsetree"].strip()
syntax_tree = Syntax_tree(parse_tree)
for conn_indices in self.non_disc_conns_dict[(DocID, sent_index)]:
if syntax_tree.tree == None:
conn_right_sibling_category = "NONE_TREE"
else:
right_sibling_category_node = syntax_tree.get_right_sibling_category_node_by_token_indices(conn_indices)
if right_sibling_category_node == None:
right_sibling_category = "NONE"
else:
right_sibling_category = right_sibling_category_node.name
#获取连接词到名称
conn_name = " ".join([self.parse_dict[DocID]["sentences"][sent_index]["words"][word_token][0] \
for word_token in conn_indices ])
conn_right_sibling_category = "%s|%s" % (conn_name, right_sibling_category)
if conn_right_sibling_category not in dict:
dict[conn_right_sibling_category] = 0
dict[conn_right_sibling_category] += 1
#删除频率小于threshold的键
util.removeItemsInDict(dict, threshold)
#字典keys写入文件
util.write_dict_keys_to_file(dict, dict_path)
#self_parent
#self_right
#self_left
#parent_left
#parent_right
#left_right
def create_all_syn_syn_category_dict(self, threshold = 1):
self_parent_dict = {}
self_right_dict = {}
self_left_dict = {}
parent_left_dict = {}
parent_right_dict = {}
left_right_dict = {}
# dict[(DocID, sent_index)] = [[1], [4,5]]
#语篇连接词
for DocID, sent_index in list(self.disc_conns_dict.keys()):
#获取该句话的语法树
parse_tree = self.parse_dict[DocID]["sentences"][sent_index]["parsetree"].strip()
syntax_tree = Syntax_tree(parse_tree)
for conn_indices in self.disc_conns_dict[(DocID, sent_index)]:
if syntax_tree.tree == None:
self_category = "NONE_TREE"
else:
self_category = syntax_tree.get_self_category_node_by_token_indices(conn_indices).name
if syntax_tree.tree == None:
parent_category = "NONE_TREE"
else:
parent_category_node = syntax_tree.get_parent_category_node_by_token_indices(conn_indices)
if parent_category_node == None:
parent_category = "ROOT"
else:
parent_category = parent_category_node.name
if syntax_tree.tree == None:
left_sibling_category = "NONE_TREE"
else:
left_sibling_category_node = syntax_tree.get_left_sibling_category_node_by_token_indices(conn_indices)
if left_sibling_category_node == None:
left_sibling_category = "NONE"
else:
left_sibling_category = left_sibling_category_node.name
if syntax_tree.tree == None:
right_sibling_category = "NONE_TREE"
else:
right_sibling_category_node = syntax_tree.get_right_sibling_category_node_by_token_indices(conn_indices)
if right_sibling_category_node == None:
right_sibling_category = "NONE"
else:
right_sibling_category = right_sibling_category_node.name
self_parent = "%s|%s" % (self_category, parent_category)
self_right = "%s|%s" % (self_category, right_sibling_category)
self_left = "%s|%s" % (self_category, left_sibling_category)
parent_left = "%s|%s" % (parent_category, left_sibling_category)
parent_right = "%s|%s" % (parent_category, right_sibling_category)
left_right = "%s|%s" % (left_sibling_category, right_sibling_category)
if self_parent not in self_parent_dict:
self_parent_dict[self_parent] = 0
self_parent_dict[self_parent] += 1
if self_right not in self_right_dict:
self_right_dict[self_right] = 0
self_right_dict[self_right] += 1
if self_left not in self_left_dict:
self_left_dict[self_left] = 0
self_left_dict[self_left] += 1
if parent_left not in parent_left_dict:
parent_left_dict[parent_left] = 0
parent_left_dict[parent_left] += 1
if parent_right not in parent_right_dict:
parent_right_dict[parent_right] = 0
parent_right_dict[parent_right] += 1
if left_right not in left_right_dict:
left_right_dict[left_right] = 0
left_right_dict[left_right] += 1
#非语篇连接词
for DocID, sent_index in list(self.non_disc_conns_dict.keys()):
#获取该句话的语法树
parse_tree = self.parse_dict[DocID]["sentences"][sent_index]["parsetree"].strip()
syntax_tree = Syntax_tree(parse_tree)
for conn_indices in self.non_disc_conns_dict[(DocID, sent_index)]:
if syntax_tree.tree == None:
self_category = "NONE_TREE"
else:
self_category = syntax_tree.get_self_category_node_by_token_indices(conn_indices).name
if syntax_tree.tree == None:
parent_category = "NONE_TREE"
else:
parent_category_node = syntax_tree.get_parent_category_node_by_token_indices(conn_indices)
if parent_category_node == None:
parent_category = "ROOT"
else:
parent_category = parent_category_node.name
if syntax_tree.tree == None:
left_sibling_category = "NONE_TREE"
else:
left_sibling_category_node = syntax_tree.get_left_sibling_category_node_by_token_indices(conn_indices)
if left_sibling_category_node == None:
left_sibling_category = "NONE"
else:
left_sibling_category = left_sibling_category_node.name
if syntax_tree.tree == None:
right_sibling_category = "NONE_TREE"
else:
right_sibling_category_node = syntax_tree.get_right_sibling_category_node_by_token_indices(conn_indices)
if right_sibling_category_node == None:
right_sibling_category = "NONE"
else:
right_sibling_category = right_sibling_category_node.name
self_parent = "%s|%s" % (self_category, parent_category)
self_right = "%s|%s" % (self_category, right_sibling_category)
self_left = "%s|%s" % (self_category, left_sibling_category)
parent_left = "%s|%s" % (parent_category, left_sibling_category)
parent_right = "%s|%s" % (parent_category, right_sibling_category)
left_right = "%s|%s" % (left_sibling_category, right_sibling_category)
if self_parent not in self_parent_dict:
self_parent_dict[self_parent] = 0
self_parent_dict[self_parent] += 1
if self_right not in self_right_dict:
self_right_dict[self_right] = 0
self_right_dict[self_right] += 1
if self_left not in self_left_dict:
self_left_dict[self_left] = 0
self_left_dict[self_left] += 1
if parent_left not in parent_left_dict:
parent_left_dict[parent_left] = 0
parent_left_dict[parent_left] += 1
if parent_right not in parent_right_dict:
parent_right_dict[parent_right] = 0
parent_right_dict[parent_right] += 1
if left_right not in left_right_dict:
left_right_dict[left_right] = 0
left_right_dict[left_right] += 1
#删除频率小于threshold的键
util.removeItemsInDict(self_parent_dict, threshold)
util.removeItemsInDict(self_right_dict, threshold)
util.removeItemsInDict(self_left_dict, threshold)
util.removeItemsInDict(parent_left_dict, threshold)
util.removeItemsInDict(parent_right_dict, threshold)
util.removeItemsInDict(left_right_dict, threshold)
#字典keys写入文件
self_parent_path = config.CONNECTIVE_DICT_SELF_PARENT_CATEGORY_PATH
self_right_path = config.CONNECTIVE_DICT_SELF_RIGHT_CATEGORY_PATH
self_left_path = config.CONNECTIVE_DICT_SELF_LEFT_CATEGORY_PATH
parent_left_path = config.CONNECTIVE_DICT_PARENT_LEFT_CATEGORY_PATH
parent_right_path = config.CONNECTIVE_DICT_PARENT_RIGHT_CATEGORY_PATH
left_right_path = config.CONNECTIVE_DICT_LEFT_RIGHT_CATEGORY_PATH
util.write_dict_keys_to_file(self_parent_dict, self_parent_path)
util.write_dict_keys_to_file(self_right_dict, self_right_path)
util.write_dict_keys_to_file(self_left_dict, self_left_path)
util.write_dict_keys_to_file(parent_left_dict, parent_left_path)
util.write_dict_keys_to_file(parent_right_dict, parent_right_path)
util.write_dict_keys_to_file(left_right_dict, left_right_path)
# prev + C
def create_lower_case_C_dict(self, dict_path, threshold = 1):
dict = {}
parse_dict = self.parse_dict
#语篇连接词
for (DocID, sent_index) in list(self.disc_conns_dict.keys()):
for conn_indices in self.disc_conns_dict[(DocID, sent_index)]:
#获取连接词到名称
conn_name = " ".join([parse_dict[DocID]["sentences"][sent_index]["words"][word_token][0] \
for word_token in conn_indices ])
lower_case_C = conn_name.lower()
if "if the" == lower_case_C:
print("disc conn")
print((DocID, sent_index))
if lower_case_C not in dict:
dict[lower_case_C] = 0
dict[lower_case_C] += 1
#非语篇连接词
for DocID, sent_index in list(self.non_disc_conns_dict.keys()):
for conn_indices in self.non_disc_conns_dict[(DocID, sent_index)]:
#获取连接词到名称
conn_name = " ".join([parse_dict[DocID]["sentences"][sent_index]["words"][word_token][0] \
for word_token in conn_indices ])
lower_case_C = conn_name.lower()
if "if the" == lower_case_C:
print("non-disc conn")
print((DocID, sent_index))
if lower_case_C not in dict:
dict[lower_case_C] = 0
dict[lower_case_C] += 1
#删除频率小于threshold的键
util.removeItemsInDict(dict, threshold)
#字典keys写入文件
util.write_dict_keys_to_file(dict, dict_path)
# C
def create_C_dict(self, dict_path, threshold = 1):
dict = {}
parse_dict = self.parse_dict
#语篇连接词
for (DocID, sent_index) in list(self.disc_conns_dict.keys()):
for conn_indices in self.disc_conns_dict[(DocID, sent_index)]:
#获取连接词到名称
conn_name = " ".join([parse_dict[DocID]["sentences"][sent_index]["words"][word_token][0] \
for word_token in conn_indices ])
if conn_name not in dict:
dict[conn_name] = 0
dict[conn_name] += 1
#非语篇连接词
for DocID, sent_index in list(self.non_disc_conns_dict.keys()):
for conn_indices in self.non_disc_conns_dict[(DocID, sent_index)]:
#获取连接词到名称
conn_name = " ".join([parse_dict[DocID]["sentences"][sent_index]["words"][word_token][0] \
for word_token in conn_indices ])
if conn_name not in dict:
dict[conn_name] = 0
dict[conn_name] += 1
#删除频率小于threshold的键
util.removeItemsInDict(dict, threshold)
#字典keys写入文件
util.write_dict_keys_to_file(dict, dict_path)
# prePOS + C
def create_prePOS_C_dict(self,dict_path, threshold = 1):
dict = {}
# dict[(DocID, sent_index)] = [[1], [4,5]]
#语篇连接词
for DocID, sent_index in list(self.disc_conns_dict.keys()):
for conn_indices in self.disc_conns_dict[(DocID, sent_index)]:
flag = 0
prev_index = conn_indices[0] - 1
pre_sent_index = sent_index
if prev_index < 0:
pre_sent_index -= 1
prev_index = -1
if pre_sent_index < 0:
flag = 1
# 连接词的前面一个词
if flag == 1:
prev = "NONE"
else:
prev = self.parse_dict[DocID]["sentences"][pre_sent_index]["words"][prev_index][0]
if prev == "NONE":
prev_pos = "NONE"
else:
prev_pos = self.parse_dict[DocID]["sentences"][pre_sent_index]["words"][prev_index][1]["PartOfSpeech"]
#获取连接词到名称
conn_name = " ".join([self.parse_dict[DocID]["sentences"][sent_index]["words"][word_token][0] \
for word_token in conn_indices ])
prePOS_C = "%s|%s" % (prev_pos, conn_name.lower())
if prePOS_C not in dict:
dict[prePOS_C] = 0
dict[prePOS_C] += 1
#非语篇连接词
for DocID, sent_index in list(self.non_disc_conns_dict.keys()):
for conn_indices in self.non_disc_conns_dict[(DocID, sent_index)]:
flag = 0
prev_index = conn_indices[0] - 1
pre_sent_index = sent_index
if prev_index < 0:
pre_sent_index -= 1
prev_index = -1
if pre_sent_index < 0:
flag = 1
# 连接词的前面一个词
if flag == 1:
prev = "NONE"
else:
prev = self.parse_dict[DocID]["sentences"][pre_sent_index]["words"][prev_index][0]
if prev == "NONE":
prev_pos = "NONE"
else:
prev_pos = self.parse_dict[DocID]["sentences"][pre_sent_index]["words"][prev_index][1]["PartOfSpeech"]
#获取连接词到名称
conn_name = " ".join([self.parse_dict[DocID]["sentences"][sent_index]["words"][word_token][0] \
for word_token in conn_indices ])
prePOS_C = "%s|%s" % (prev_pos, conn_name.lower())
if prePOS_C not in dict:
dict[prePOS_C] = 0
dict[prePOS_C] += 1
#删除频率小于threshold的键
util.removeItemsInDict(dict, threshold)
#字典keys写入文件
util.write_dict_keys_to_file(dict,dict_path)
# path of c's parent to root path 的所有node的names
def create_self_category_to_root_path_dict(self,dict_path, threshold = 1):
dict = {}
# dict[(DocID, sent_index)] = [[1], [4,5]]
#语篇连接词
for DocID, sent_index in list(self.disc_conns_dict.keys()):
#获取该句话的语法树
parse_tree = self.parse_dict[DocID]["sentences"][sent_index]["parsetree"].strip()
syntax_tree = Syntax_tree(parse_tree)
for conn_indices in self.disc_conns_dict[(DocID, sent_index)]:
if syntax_tree.tree == None:
path = "NONE_TREE"
else:
conn_node = syntax_tree.get_self_category_node_by_token_indices(conn_indices)
path = syntax_tree.get_node_path_to_root(conn_node)
if path not in dict:
dict[path] = 0
dict[path] += 1
#非语篇连接词
for DocID, sent_index in list(self.non_disc_conns_dict.keys()):
#获取该句话的语法树
parse_tree = self.parse_dict[DocID]["sentences"][sent_index]["parsetree"].strip()
syntax_tree = Syntax_tree(parse_tree)
for conn_indices in self.non_disc_conns_dict[(DocID, sent_index)]:
if syntax_tree.tree == None:
path = "NONE_TREE"
else:
conn_node = syntax_tree.get_self_category_node_by_token_indices(conn_indices)
path = syntax_tree.get_node_path_to_root(conn_node)
if path not in dict:
dict[path] = 0
dict[path] += 1
#删除频率小于threshold的键
util.removeItemsInDict(dict, threshold)
#字典keys写入文件
util.write_dict_keys_to_file(dict, dict_path)
# c's parent to root node names
def create_CParent_to_root_path_node_names_dict(self, dict_path, threshold=1):
dict = {}
# dict[(DocID, sent_index)] = [[1], [4,5]]
#语篇连接词
for DocID, sent_index in list(self.disc_conns_dict.keys()):
#获取该句话的语法树
parse_tree = self.parse_dict[DocID]["sentences"][sent_index]["parsetree"].strip()
syntax_tree = Syntax_tree(parse_tree)
for conn_indices in self.disc_conns_dict[(DocID, sent_index)]:
if syntax_tree.tree == None:
path = "NONE_TREE"
else:
path = ""
for conn_index in conn_indices:
conn_node = syntax_tree.get_leaf_node_by_token_index(conn_index)
conn_parent_node = conn_node.up
path += syntax_tree.get_node_path_to_root(conn_parent_node) + "-->"
if path[-3:] == "-->":
path = path[:-3]
for t in path.split("-->"):
if t not in dict:
dict[t] = 0
dict[t] += 1
#非语篇连接词
for DocID, sent_index in list(self.non_disc_conns_dict.keys()):
#获取该句话的语法树
parse_tree = self.parse_dict[DocID]["sentences"][sent_index]["parsetree"].strip()
syntax_tree = Syntax_tree(parse_tree)
for conn_indices in self.non_disc_conns_dict[(DocID, sent_index)]:
if syntax_tree.tree == None:
path = "NONE_TREE"
else:
path = ""
for conn_index in conn_indices:
conn_node = syntax_tree.get_leaf_node_by_token_index(conn_index)
conn_parent_node = conn_node.up
path += syntax_tree.get_node_path_to_root(conn_parent_node) + "-->"
if path[-3:] == "-->":
path = path[:-3]
for t in path.split("-->"):
if t not in dict:
dict[t] = 0
dict[t] += 1
#删除频率小于threshold的键
util.removeItemsInDict(dict, threshold)
#字典keys写入文件
util.write_dict_keys_to_file(dict, dict_path)
# 连接词,加其上下文
def create_conn_connCtx_dict(self, dict_path, threshold = 1):
dict = {}
parse_dict = self.parse_dict
#语篇连接词
for (DocID, sent_index) in list(self.disc_conns_dict.keys()):
#获取该句话的语法树
parse_tree = self.parse_dict[DocID]["sentences"][sent_index]["parsetree"].strip()
syntax_tree = Syntax_tree(parse_tree)
for conn_indices in self.disc_conns_dict[(DocID, sent_index)]:
#获取连接词到名称
conn_name = " ".join([parse_dict[DocID]["sentences"][sent_index]["words"][word_token][0] \
for word_token in conn_indices ])
if syntax_tree.tree == None:
connCtx = "NONE_TREE"
else:
conn_node = syntax_tree.get_self_category_node_by_token_indices(conn_indices)
connCtx = dict_util.get_node_Ctx(conn_node, syntax_tree)
conn_connCtx = "%s|%s" % (conn_name, connCtx)
if conn_connCtx not in dict:
dict[conn_connCtx] = 0
dict[conn_connCtx] += 1
#非语篇连接词
for DocID, sent_index in list(self.non_disc_conns_dict.keys()):
#获取该句话的语法树
parse_tree = self.parse_dict[DocID]["sentences"][sent_index]["parsetree"].strip()
syntax_tree = Syntax_tree(parse_tree)
for conn_indices in self.non_disc_conns_dict[(DocID, sent_index)]:
#获取连接词到名称
conn_name = " ".join([parse_dict[DocID]["sentences"][sent_index]["words"][word_token][0] \
for word_token in conn_indices ])
if syntax_tree.tree == None:
connCtx = "NONE_TREE"
else:
conn_node = syntax_tree.get_self_category_node_by_token_indices(conn_indices)
connCtx = dict_util.get_node_Ctx(conn_node, syntax_tree)
conn_connCtx = "%s|%s" % (conn_name, connCtx)
if conn_connCtx not in dict:
dict[conn_connCtx] = 0
dict[conn_connCtx] += 1
#删除频率小于threshold的键
util.removeItemsInDict(dict, threshold)
#字典keys写入文件
util.write_dict_keys_to_file(dict, dict_path)
# 连接词,加其 right sibling 的上下文
def create_conn_rightSiblingCtx_dict(self, dict_path, threshold = 1):
dict = {}
parse_dict = self.parse_dict
#语篇连接词
for (DocID, sent_index) in list(self.disc_conns_dict.keys()):
#获取该句话的语法树
parse_tree = self.parse_dict[DocID]["sentences"][sent_index]["parsetree"].strip()
syntax_tree = Syntax_tree(parse_tree)
for conn_indices in self.disc_conns_dict[(DocID, sent_index)]:
#获取连接词到名称
conn_name = " ".join([parse_dict[DocID]["sentences"][sent_index]["words"][word_token][0] \
for word_token in conn_indices ])
if syntax_tree.tree == None:
rightSiblingCtx = "NONE_TREE"
else:
rightSibling_node = syntax_tree.get_right_sibling_category_node_by_token_indices(conn_indices)
rightSiblingCtx = dict_util.get_node_linked_Ctx(rightSibling_node, syntax_tree)
conn_rightSiblingCtx = "%s|%s" % (conn_name, rightSiblingCtx)
if conn_rightSiblingCtx not in dict:
dict[conn_rightSiblingCtx] = 0
dict[conn_rightSiblingCtx] += 1
#非语篇连接词
for DocID, sent_index in list(self.non_disc_conns_dict.keys()):
#获取该句话的语法树
parse_tree = self.parse_dict[DocID]["sentences"][sent_index]["parsetree"].strip()
syntax_tree = Syntax_tree(parse_tree)
for conn_indices in self.non_disc_conns_dict[(DocID, sent_index)]:
#获取连接词到名称
conn_name = " ".join([parse_dict[DocID]["sentences"][sent_index]["words"][word_token][0] \
for word_token in conn_indices ])
if syntax_tree.tree == None:
rightSiblingCtx = "NONE_TREE"
else:
rightSibling_node = syntax_tree.get_right_sibling_category_node_by_token_indices(conn_indices)
rightSiblingCtx = dict_util.get_node_linked_Ctx(rightSibling_node, syntax_tree)
conn_rightSiblingCtx = "%s|%s" % (conn_name, rightSiblingCtx)
if conn_rightSiblingCtx not in dict:
dict[conn_rightSiblingCtx] = 0
dict[conn_rightSiblingCtx] += 1
#删除频率小于threshold的键
util.removeItemsInDict(dict, threshold)
#字典keys写入文件
util.write_dict_keys_to_file(dict, dict_path)
# 连接词,加其 left sibling 的上下文
def create_conn_leftSiblingCtx_dict(self, dict_path, threshold = 1):
dict = {}
parse_dict = self.parse_dict
#语篇连接词
for (DocID, sent_index) in list(self.disc_conns_dict.keys()):
#获取该句话的语法树
parse_tree = self.parse_dict[DocID]["sentences"][sent_index]["parsetree"].strip()
syntax_tree = Syntax_tree(parse_tree)
for conn_indices in self.disc_conns_dict[(DocID, sent_index)]:
#获取连接词到名称
conn_name = " ".join([parse_dict[DocID]["sentences"][sent_index]["words"][word_token][0] \
for word_token in conn_indices ])
if syntax_tree.tree == None:
leftSiblingCtx = "NONE_TREE"
else:
leftSibling_node = syntax_tree.get_left_sibling_category_node_by_token_indices(conn_indices)
leftSiblingCtx = dict_util.get_node_linked_Ctx(leftSibling_node, syntax_tree)
conn_leftSiblingCtx = "%s|%s" % (conn_name, leftSiblingCtx)
if conn_leftSiblingCtx not in dict:
dict[conn_leftSiblingCtx] = 0
dict[conn_leftSiblingCtx] += 1
#非语篇连接词
for DocID, sent_index in list(self.non_disc_conns_dict.keys()):
#获取该句话的语法树
parse_tree = self.parse_dict[DocID]["sentences"][sent_index]["parsetree"].strip()
syntax_tree = Syntax_tree(parse_tree)
for conn_indices in self.non_disc_conns_dict[(DocID, sent_index)]:
#获取连接词到名称
conn_name = " ".join([parse_dict[DocID]["sentences"][sent_index]["words"][word_token][0] \
for word_token in conn_indices ])
if syntax_tree.tree == None:
leftSiblingCtx = "NONE_TREE"
else:
leftSibling_node = syntax_tree.get_left_sibling_category_node_by_token_indices(conn_indices)
leftSiblingCtx = dict_util.get_node_linked_Ctx(leftSibling_node, syntax_tree)
conn_leftSiblingCtx = "%s|%s" % (conn_name, leftSiblingCtx)
if conn_leftSiblingCtx not in dict:
dict[conn_leftSiblingCtx] = 0
dict[conn_leftSiblingCtx] += 1
#删除频率小于threshold的键
util.removeItemsInDict(dict, threshold)
#字典keys写入文件
util.write_dict_keys_to_file(dict, dict_path)
# 连接词,加其 left right sibling 的上下文
def create_conn_left_right_SiblingCtx_dict(self, dict_path, threshold = 1):
dict = {}
parse_dict = self.parse_dict
#语篇连接词
for (DocID, sent_index) in list(self.disc_conns_dict.keys()):
#获取该句话的语法树
parse_tree = self.parse_dict[DocID]["sentences"][sent_index]["parsetree"].strip()
syntax_tree = Syntax_tree(parse_tree)
for conn_indices in self.disc_conns_dict[(DocID, sent_index)]:
#获取连接词到名称
conn_name = " ".join([parse_dict[DocID]["sentences"][sent_index]["words"][word_token][0] \
for word_token in conn_indices ])
if syntax_tree.tree == None:
leftSiblingCtx = "NONE_TREE"
else:
leftSibling_node = syntax_tree.get_left_sibling_category_node_by_token_indices(conn_indices)
leftSiblingCtx = dict_util.get_node_linked_Ctx(leftSibling_node, syntax_tree)
if syntax_tree.tree == None:
rightSiblingCtx = "NONE_TREE"
else:
rightSibling_node = syntax_tree.get_right_sibling_category_node_by_token_indices(conn_indices)
rightSiblingCtx = dict_util.get_node_linked_Ctx(rightSibling_node, syntax_tree)
conn_left_right_SiblingCtx = "%s|%s|%s" % (conn_name, leftSiblingCtx, rightSiblingCtx)
if conn_left_right_SiblingCtx not in dict:
dict[conn_left_right_SiblingCtx] = 0
dict[conn_left_right_SiblingCtx] += 1
#非语篇连接词
for DocID, sent_index in list(self.non_disc_conns_dict.keys()):
#获取该句话的语法树
parse_tree = self.parse_dict[DocID]["sentences"][sent_index]["parsetree"].strip()
syntax_tree = Syntax_tree(parse_tree)
for conn_indices in self.non_disc_conns_dict[(DocID, sent_index)]:
#获取连接词到名称
conn_name = " ".join([parse_dict[DocID]["sentences"][sent_index]["words"][word_token][0] \
for word_token in conn_indices ])
if syntax_tree.tree == None:
leftSiblingCtx = "NONE_TREE"
else:
leftSibling_node = syntax_tree.get_left_sibling_category_node_by_token_indices(conn_indices)
leftSiblingCtx = dict_util.get_node_linked_Ctx(leftSibling_node, syntax_tree)
if syntax_tree.tree == None:
rightSiblingCtx = "NONE_TREE"
else:
rightSibling_node = syntax_tree.get_right_sibling_category_node_by_token_indices(conn_indices)
rightSiblingCtx = dict_util.get_node_linked_Ctx(rightSibling_node, syntax_tree)
conn_left_right_SiblingCtx = "%s|%s|%s" % (conn_name, leftSiblingCtx, rightSiblingCtx)
if conn_left_right_SiblingCtx not in dict:
dict[conn_left_right_SiblingCtx] = 0
dict[conn_left_right_SiblingCtx] += 1
#删除频率小于threshold的键
util.removeItemsInDict(dict, threshold)
#字典keys写入文件
util.write_dict_keys_to_file(dict, dict_path)
# 连接词,加其 parent_category 的上下文
def create_conn_parent_category_Ctx_dict(self, dict_path, threshold = 1):
dict = {}
parse_dict = self.parse_dict
#语篇连接词
for (DocID, sent_index) in list(self.disc_conns_dict.keys()):
#获取该句话的语法树
parse_tree = self.parse_dict[DocID]["sentences"][sent_index]["parsetree"].strip()
syntax_tree = Syntax_tree(parse_tree)
for conn_indices in self.disc_conns_dict[(DocID, sent_index)]:
#获取连接词到名称
conn_name = " ".join([parse_dict[DocID]["sentences"][sent_index]["words"][word_token][0] \
for word_token in conn_indices ])
if syntax_tree.tree == None:
parent_categoryCtx = "NONE_TREE"
else:
parent_category_node = syntax_tree.get_parent_category_node_by_token_indices(conn_indices)
parent_categoryCtx = dict_util.get_node_linked_Ctx(parent_category_node, syntax_tree)
conn_parent_categoryCtx = "%s|%s" % (conn_name, parent_categoryCtx)
if conn_parent_categoryCtx not in dict:
dict[conn_parent_categoryCtx] = 0
dict[conn_parent_categoryCtx] += 1
#非语篇连接词
for DocID, sent_index in list(self.non_disc_conns_dict.keys()):
#获取该句话的语法树
parse_tree = self.parse_dict[DocID]["sentences"][sent_index]["parsetree"].strip()
syntax_tree = Syntax_tree(parse_tree)
for conn_indices in self.non_disc_conns_dict[(DocID, sent_index)]:
#获取连接词到名称
conn_name = " ".join([parse_dict[DocID]["sentences"][sent_index]["words"][word_token][0] \
for word_token in conn_indices ])
if syntax_tree.tree == None:
parent_categoryCtx = "NONE_TREE"
else:
parent_category_node = syntax_tree.get_parent_category_node_by_token_indices(conn_indices)
parent_categoryCtx = dict_util.get_node_linked_Ctx(parent_category_node, syntax_tree)
conn_parent_categoryCtx = "%s|%s" % (conn_name, parent_categoryCtx)
if conn_parent_categoryCtx not in dict:
dict[conn_parent_categoryCtx] = 0
dict[conn_parent_categoryCtx] += 1
#删除频率小于threshold的键
util.removeItemsInDict(dict, threshold)
#字典keys写入文件
util.write_dict_keys_to_file(dict, dict_path)
# 连接词,加其 right sibling 的上下文
def create_rightSibling_production_rules_dict(self, dict_path, threshold = 1):
dict = {}
parse_dict = self.parse_dict
#语篇连接词
for (DocID, sent_index) in list(self.disc_conns_dict.keys()):
#获取该句话的语法树
parse_tree = self.parse_dict[DocID]["sentences"][sent_index]["parsetree"].strip()
syntax_tree = Syntax_tree(parse_tree)
for conn_indices in self.disc_conns_dict[(DocID, sent_index)]:
if syntax_tree.tree == None:
rightSibling_production_rules = ["NONE_TREE"]
else:
rightSibling_node = syntax_tree.get_right_sibling_category_node_by_token_indices(conn_indices)
rightSibling_production_rules = dict_util.get_node_production_rules(rightSibling_node, syntax_tree)
for rule in rightSibling_production_rules:
if rule not in dict:
dict[rule] = 0
dict[rule] += 1
#非语篇连接词
for DocID, sent_index in list(self.non_disc_conns_dict.keys()):
#获取该句话的语法树
parse_tree = self.parse_dict[DocID]["sentences"][sent_index]["parsetree"].strip()
syntax_tree = Syntax_tree(parse_tree)
for conn_indices in self.non_disc_conns_dict[(DocID, sent_index)]:
if syntax_tree.tree == None:
rightSibling_production_rules = ["NONE_TREE"]
else:
rightSibling_node = syntax_tree.get_right_sibling_category_node_by_token_indices(conn_indices)
rightSibling_production_rules = dict_util.get_node_production_rules(rightSibling_node, syntax_tree)
for rule in rightSibling_production_rules:
if rule not in dict:
dict[rule] = 0
dict[rule] += 1
#删除频率小于threshold的键
util.removeItemsInDict(dict, threshold)
#字典keys写入文件
util.write_dict_keys_to_file(dict, dict_path)
#排序后的连接词
def create_sorted_exp_conns():
ExpConnFile = open(config.ExpConn_PATH)
conn_list = [line.strip() for line in ExpConnFile.readlines()]
sortedConn = []
#1. if..then 这种类型的
for conn in conn_list:
if ".." in conn and conn not in sortedConn:
sortedConn.append(conn)
#2. 根据连接词字数降序排列
for conn in sorted(conn_list, cmp=lambda y, x: len(x.split(" ")) - len(y.split(" "))):
if conn not in sortedConn:
sortedConn.append(conn)
fout = open(config.SORTED_ExpConn_PATH, "w")
fout.write("\n".join(sortedConn))
fout.close()
if __name__ == "__main__":
# create_sorted_exp_conns()
pdtb_parse = PDTB_PARSE(config.PARSERS_TRAIN_PATH_JSON, config.PDTB_TRAIN_PATH, config.TRAIN)
# ''' 所有连接词特征所需的dict '''
# print "cpos dict ..."
# ConnectiveDict(pdtb_parse).create_CPOS_dict(config.CONNECTIVE_DICT_CPOS_PATH)
#
# print "pre + C dict ..."
# ConnectiveDict(pdtb_parse).create_prev_C_dict(config.CONNECTIVE_DICT_PREV_C_PATH)
#
# print "prevPOS dic..."
# ConnectiveDict(pdtb_parse).create_prevPOS_dict(config.CONNECTIVE_DICT_PREVPOS_PATH)
#
# # prePOS + CPOS
# print "prePOS + CPOS"
# ConnectiveDict(pdtb_parse).create_prePOS_CPOS_dict(config.CONNECTIVE_DICT_PREVPOS_CPOS_PATH)
#
# # C +next
print("C +next dict...")
ConnectiveDict(pdtb_parse).create_C_next_dict(config.CONNECTIVE_DICT_C_NEXT_PATH)
#
# # next POS
# print "next pos dict..."
# ConnectiveDict(pdtb_parse).create_nextPOS_dict(config.CONNECTIVE_DICT_NEXTPOS_PATH)
#
# # C POS + next POS
# print "C POS + next POS dict..."
# ConnectiveDict(pdtb_parse).create_CPOS_nextPOS_dict(config.CONNECTIVE_DICT_CPOS_NEXTPOS_PATH)
#
# # path of c's parent to root
# print "path of c's parent to root dict"
# ConnectiveDict(pdtb_parse).create_CParent_to_root_path_dict(config.CONNECTIVE_DICT_CPARENT_TO_ROOT_PATH)
#
# print "compressed path of c's parent to root dict"
# ConnectiveDict(pdtb_parse).create_compressed_CParent_to_root_path_dict(config.CONNECTIVE_DICT_COMPRESSED_CPARENT_TO_ROOT_PATH)
#
# print "create_self_category_dict"
# ConnectiveDict(pdtb_parse).create_self_category_dict(config.CONNECTIVE_DICT_SELF_CATEGORY_PATH)
#
# print "create_parent_category_dict..."
# ConnectiveDict(pdtb_parse).create_parent_category_dict(config.CONNECTIVE_DICT_PARENT_CATEGORY_PATH)
#
# print "create_left_sibling_category_dict..."
# ConnectiveDict(pdtb_parse).create_left_sibling_category_dict(config.CONNECTIVE_DICT_LEFT_SIBLING_CATEGORY_PATH)
#
# print "create_right_sibling_category_dict..."
# ConnectiveDict(pdtb_parse).create_right_sibling_category_dict(config.CONNECTIVE_DICT_RIGHT_SIBLING_CATEGORY_PATH)
#
# print "create_conn_self_category_dict..."
# ConnectiveDict(pdtb_parse).create_conn_self_category_dict(config.CONNECTIVE_DICT_CONN_SELF_CATEGORY_PATH)
#
# print "create_conn_parent_category_dict..."
# ConnectiveDict(pdtb_parse).create_conn_parent_category_dict(config.CONNECTIVE_DICT_CONN_PARENT_CATEGORY_PATH)
#
# print "create_conn_left_sibling_category_dict..."
# ConnectiveDict(pdtb_parse).create_conn_left_sibling_category_dict(config.CONNECTIVE_DICT_CONN_LEFT_SIBLING_CATEGORY_PATH)
#
# print "create_conn_right_sibling_category_dict..."
# ConnectiveDict(pdtb_parse).create_conn_right_sibling_category_dict(config.CONNECTIVE_DICT_CONN_RIGHT_SIBLING_CATEGORY_PATH)
#
# print "create_all_syn_syn_category_dict..."
# ConnectiveDict(pdtb_parse).create_all_syn_syn_category_dict()
#
# # ''' mine '''
# print "conn_lower_case dict ..."
# ConnectiveDict(pdtb_parse).create_lower_case_C_dict(config.CONNECTIVE_DICT_CONN_LOWER_CASE)
# print "conn_ dict ..."
# ConnectiveDict(pdtb_parse).create_C_dict(config.CONNECTIVE_DICT_CONN)
# print "prev POS + C"
# ConnectiveDict(pdtb_parse).create_prePOS_C_dict(config.CONNECTIVE_DICT_PREVPOS_C)
# print "create_CParent_to_root_path_node_names_dict"
# ConnectiveDict(pdtb_parse).create_conn_connCtx_dict(config.CONNECTIVE_DICT_CONN_CONNCTX)
# print "create_conn_rightSiblingCtx_dict"
# ConnectiveDict(pdtb_parse).create_conn_rightSiblingCtx_dict(config.CONNECTIVE_DICT_CONN_RIGHTSIBLINGCTX)
# print "create_conn_leftSiblingCtx_dict"
# ConnectiveDict(pdtb_parse).create_conn_leftSiblingCtx_dict(config.CONNECTIVE_DICT_CONN_LEFTSIBLINGCTX)
# print "create_conn_parent_category_Ctx_dict"
# ConnectiveDict(pdtb_parse).create_conn_parent_category_Ctx_dict(config.CONNECTIVE_DICT_CONN_PARENT_CATEGORY_CTX)
# print "create_rightSibling_production_rules_dict"
# ConnectiveDict(pdtb_parse).create_rightSibling_production_rules_dict(config.CONNECTIVE_DICT_CONN_RIGHTSIBLING_PRODUCTION_RULES)
| [
"qkaren@mercury.mercury"
] | qkaren@mercury.mercury |
003d4fcb554b08dd645a2f33bd3035bdd7d5d3f1 | 7234e6c72eb3f09c4a66dbe91f00fdf7742f010f | /algo/arrays/fruitsIntoBasket.py | 52054c5a06bb6d3e66bb587ba1ab54b57a3e8c24 | [] | no_license | srinathalla/python | 718ac603473e7bed060ba66aa3d39a90cf7ef69d | b6c546070b1738350303df3939888d1b0e90e89b | refs/heads/master | 2021-06-13T06:11:42.653311 | 2021-02-19T06:01:41 | 2021-02-19T06:01:41 | 150,374,828 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,106 | py | from typing import List
#
# This problem is similar to longest sub array with two distinct characters..
#
# T.C : O(2n) => O(n) Two pass solution
# S.C : O(3) => O(1) as map holds only 3 entries at max
# #
class Solution:
def totalFruit(self, tree: List[int]) -> int:
if len(tree) < 3:
return len(tree)
left = 0
right = 0
maxVal = [-1, -1]
count = 0
fruitsMap = {}
while right < len(tree):
if tree[right] not in fruitsMap:
fruitsMap[tree[right]] = 0
if fruitsMap[tree[right]] == 0:
count += 1
fruitsMap[tree[right]] += 1
while count > 2:
fruitsMap[tree[left]] -= 1
if fruitsMap[tree[left]] == 0:
count -= 1
left += 1
if maxVal[1] - maxVal[0] < right + 1 - left:
maxVal[1] = right + 1
maxVal[0] = left
right += 1
return maxVal[1] - maxVal[0]
s = Solution()
print(s.totalFruit([3, 3, 3, 1, 2, 1, 1, 2, 3, 3, 4]))
| [
"srinathb10j.ik@gmail.com"
] | srinathb10j.ik@gmail.com |
9a3841891607d7da33b468aefaaf2881dc8947be | da11409d30c8ee6c53d19f81b58ebabd53d2142c | /blog/url.py | a7069b14defa4a1320059a175d472bbe32abfb45 | [] | no_license | pollitosabroson/blog | 3eb896917808265d10929a4f32bfdc09c97d4c85 | e9b8b78741faad1d9b3807e2a166dc8e2e4010b3 | refs/heads/master | 2020-04-09T11:27:27.517901 | 2015-02-15T22:06:22 | 2015-02-15T22:06:22 | 30,840,845 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6 | py | url.py | [
"karnal02@gmail.com"
] | karnal02@gmail.com |
7477c8bd96b0f1e4c8bd1237d0f4081cbf400f67 | 28e54e668bace6fdbc4093df2134827e51797eca | /examples/authentication/demo_auth.py | 122347a5bc15e857be626ad74e1389ae3eb13e0c | [
"MIT"
] | permissive | xaiki/safrs | 0c371e1eca86f951181b01afe4ea157f8a278ea5 | e7948a4927e65f4086326b363e1b5ec4fe372ddd | refs/heads/master | 2020-06-12T21:22:07.855469 | 2019-06-29T17:13:25 | 2019-06-29T17:13:25 | 194,429,258 | 0 | 0 | MIT | 2019-06-29T17:00:05 | 2019-06-29T17:00:04 | null | UTF-8 | Python | false | false | 4,690 | py | #!/usr/bin/env python
#
# This is a demo application to demonstrate the functionality of the safrs_rest REST API with authentication
#
# you will have to install the requirements:
# pip3 install passlib flask_httpauth flask_login
#
# This script can be ran standalone like this:
# python3 demo_auth.py [Listener-IP]
# This will run the example on http://Listener-Ip:5000
#
# - A database is created and a item is added
# - User is created and the User endpoint is protected by user:admin & pass: password
# - swagger2 documentation is generated
#
import sys
import os
import logging
import builtins
from functools import wraps
from flask import Flask, redirect, jsonify, make_response
from flask import abort, request, g, url_for
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import Column, Integer, String
from safrs import SAFRSBase, SAFRSAPI, jsonapi_rpc
from flask_swagger_ui import get_swaggerui_blueprint
from flask_sqlalchemy import SQLAlchemy
from flask_httpauth import HTTPBasicAuth
from passlib.apps import custom_app_context as pwd_context
from itsdangerous import (TimedJSONWebSignatureSerializer as Serializer, BadSignature, SignatureExpired)
from flask_login import LoginManager, UserMixin, \
login_required, login_user, logout_user
db = SQLAlchemy()
auth = HTTPBasicAuth()
# Example sqla database object
class Item(SAFRSBase, db.Model):
'''
description: Item description
'''
__tablename__ = 'items'
id = Column(String, primary_key=True)
name = Column(String, default = '')
class User(SAFRSBase, db.Model):
'''
description: User description
'''
__tablename__ = 'users'
username = db.Column(db.String(32), primary_key=True)
password_hash = db.Column(db.String(64))
custom_decorators = [auth.login_required]
@jsonapi_rpc(http_methods=['POST'])
def hash_password(self, password):
self.password_hash = pwd_context.encrypt(password)
@jsonapi_rpc(http_methods=['POST'])
def verify_password(self, password):
return pwd_context.verify(password, self.password_hash)
@jsonapi_rpc(http_methods=['POST'])
def generate_auth_token(self, expiration=600):
s = Serializer(app.config['SECRET_KEY'], expires_in=expiration)
return s.dumps({'username': self.username})
@staticmethod
@jsonapi_rpc(http_methods=['POST'])
def verify_auth_token(token):
s = Serializer(app.config['SECRET_KEY'])
try:
data = s.loads(token)
except SignatureExpired:
return None # valid token, but expired
except BadSignature:
return None # invalid token
user = User.query.get(data['username'])
return user
def start_app(app):
OAS_PREFIX = '/api' # swagger location
api = SAFRSAPI(app, host='{}:{}'.format(HOST,PORT), schemes=["http"], prefix=OAS_PREFIX, api_spec_url=OAS_PREFIX+'/swagger' )
api.expose_object(Item)
api.expose_object(User)
item = Item(name='test',email='em@il')
#user = User(username='admin')
#user.hash_password('password')
print('Starting API: http://{}:{}/api'.format(HOST,PORT))
app.run(host=HOST, port = PORT)
#
# APP Initialization
#
app = Flask('demo_app')
app.config.update( SQLALCHEMY_DATABASE_URI = 'sqlite:////tmp/demo.sqlite',
SQLALCHEMY_TRACK_MODIFICATIONS = False,
SECRET_KEY = b'sdqfjqsdfqizroqnxwc',
DEBUG = True)
HOST = sys.argv[1] if len(sys.argv) > 1 else '0.0.0.0'
PORT = 5000
db.init_app(app)
#
# Authentication and custom routes
#
@auth.verify_password
def verify_password(username_or_token, password):
if username_or_token == 'user' and password == 'password':
return True
return False
user = User.verify_auth_token(username_or_token)
print(user, username_or_token, password)
if not user:
# try to authenticate with username/password
user = User.query.filter_by(username=username_or_token).first()
print(user)
if not user or not user.verify_password(password):
return False
print('Authentication Successful for "{}"'.format(user.username))
return True
@app.route('/')
def goto_api():
return redirect('/api')
@app.teardown_appcontext
def shutdown_session(exception=None):
'''cfr. http://flask.pocoo.org/docs/0.12/patterns/sqlalchemy/'''
db.session.remove()
# Start the application
with app.app_context():
db.create_all()
start_app(app)
| [
"thomas.pollet@gmail.com"
] | thomas.pollet@gmail.com |
7d8896dce3a2e64ecef24afa9b4e22931ac74e5a | f32ed75d9dfee31499fa27da916354d0ee22469b | /pysec/config.py | 3ef024e42d59a8a3e89de8b965ec27f3e5f62d5d | [
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] | permissive | aoloriz/owasp-pysec | 63f1cabe6525f5d1000d89db570eb1b7aac2febc | 80273eebc4cef600960cbb234eb0160dca657f82 | refs/heads/master | 2021-12-02T09:39:52.796209 | 2021-08-22T22:27:22 | 2021-08-22T22:27:22 | 398,907,810 | 0 | 0 | Apache-2.0 | 2021-08-22T22:27:22 | 2021-08-22T21:47:21 | Python | UTF-8 | Python | false | false | 782 | py | # Python Security Project (PySec) and its related class files.
#
# PySec is a set of tools for secure application development under Linux
#
# Copyright 2014 PySec development team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- coding: ascii -*-
keep_lib_log = 0
limits = {}
| [
"figus.federico@gmail.com"
] | figus.federico@gmail.com |
5f8b28914497787b40b57642a5e0bdf42c05e765 | 913f6aab5d86a92292578bb8e2ee32499a254d29 | /echo_pb2.py | 9afcbc2369d8c54f8f00d742e1be7022ed2c685c | [
"MIT"
] | permissive | e-heller/grpc-python-aio-memory-leak | ea0e4372029af39e08ca21c34d26b0dd3204422c | 3b33c4db286990d8908e6cdec95b72b4dd9d5bab | refs/heads/main | 2023-03-02T14:34:33.405983 | 2021-02-13T19:46:38 | 2021-02-13T19:46:38 | 338,640,286 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | true | 4,013 | py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: echo.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='echo.proto',
package='echo',
syntax='proto3',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\necho.proto\x12\x04\x65\x63ho\"\x1e\n\x0b\x45\x63hoRequest\x12\x0f\n\x07message\x18\x01 \x01(\t\"\x1f\n\x0c\x45\x63hoResponse\x12\x0f\n\x07message\x18\x01 \x01(\t2B\n\x0b\x45\x63hoService\x12\x33\n\x04\x45\x63ho\x12\x11.echo.EchoRequest\x1a\x12.echo.EchoResponse\"\x00(\x01\x30\x01\x62\x06proto3'
)
_ECHOREQUEST = _descriptor.Descriptor(
name='EchoRequest',
full_name='echo.EchoRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='message', full_name='echo.EchoRequest.message', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=20,
serialized_end=50,
)
_ECHORESPONSE = _descriptor.Descriptor(
name='EchoResponse',
full_name='echo.EchoResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='message', full_name='echo.EchoResponse.message', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=52,
serialized_end=83,
)
DESCRIPTOR.message_types_by_name['EchoRequest'] = _ECHOREQUEST
DESCRIPTOR.message_types_by_name['EchoResponse'] = _ECHORESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
EchoRequest = _reflection.GeneratedProtocolMessageType('EchoRequest', (_message.Message,), {
'DESCRIPTOR' : _ECHOREQUEST,
'__module__' : 'echo_pb2'
# @@protoc_insertion_point(class_scope:echo.EchoRequest)
})
_sym_db.RegisterMessage(EchoRequest)
EchoResponse = _reflection.GeneratedProtocolMessageType('EchoResponse', (_message.Message,), {
'DESCRIPTOR' : _ECHORESPONSE,
'__module__' : 'echo_pb2'
# @@protoc_insertion_point(class_scope:echo.EchoResponse)
})
_sym_db.RegisterMessage(EchoResponse)
_ECHOSERVICE = _descriptor.ServiceDescriptor(
name='EchoService',
full_name='echo.EchoService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=85,
serialized_end=151,
methods=[
_descriptor.MethodDescriptor(
name='Echo',
full_name='echo.EchoService.Echo',
index=0,
containing_service=None,
input_type=_ECHOREQUEST,
output_type=_ECHORESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_ECHOSERVICE)
DESCRIPTOR.services_by_name['EchoService'] = _ECHOSERVICE
# @@protoc_insertion_point(module_scope)
| [
"eheller@gmail.com"
] | eheller@gmail.com |
dfe1b7ca5af6e1b6d585d3efd87a2a11fe19e4a6 | 9bd1044b3e2886108457448345e9276c288efaac | /arq2/trab1/mcpat/IS.arquitetura2.mcpat.py | 666c2c44b0e8b6895950966f6657b78628124d28 | [] | no_license | vandersonmarocchio/trabalhos-2 | 35ec5fa1317fda6634afdd277913916647ea27fe | e719926dceac967cbea05de098f4153ccac28e52 | refs/heads/master | 2023-08-22T18:51:16.852692 | 2020-10-14T17:53:35 | 2020-10-14T17:53:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 135,017 | py | power = {'BUSES': {'Area': 3.70399,
'Bus/Area': 3.70399,
'Bus/Gate Leakage': 0.00993673,
'Bus/Peak Dynamic': 2.73167,
'Bus/Runtime Dynamic': 0.274095,
'Bus/Subthreshold Leakage': 0.103619,
'Bus/Subthreshold Leakage with power gating': 0.0388573,
'Gate Leakage': 0.00993673,
'Peak Dynamic': 2.73167,
'Runtime Dynamic': 0.274095,
'Subthreshold Leakage': 0.103619,
'Subthreshold Leakage with power gating': 0.0388573},
'Core': [{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0924505,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.278579,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.381151,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.475862,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.51449,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.406982,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.6028,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.704746,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 6.06887,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.95158,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.458413,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 1.57014,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.339402,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.440158,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 7.53506,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0737917,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0147534,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.148193,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.109111,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.221985,
'Execution Unit/Register Files/Runtime Dynamic': 0.123864,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.384567,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.907734,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 3.79634,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0211356,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00196905,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0211356,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00196905,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0178486,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00170277,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.006603,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000652461,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0748421,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0147224,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00156738,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00720826,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.222668,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0193175,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.759674,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.104891,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 6.67195,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.284205,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.72013,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.356256,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 9.77802,
'Instruction Fetch Unit/Runtime Dynamic': 0.771878,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0690956,
'L2/Runtime Dynamic': 0.0221097,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 4.25065,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.37526,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0874138,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0874139,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 4.66512,
'Load Store Unit/Runtime Dynamic': 1.89377,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.215547,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.431095,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0764985,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0775303,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.414837,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0464998,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.74551,
'Memory Management Unit/Runtime Dynamic': 0.12403,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 28.5035,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 4.17516,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.257443,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0502409,
'Renaming Unit/Free List/Runtime Dynamic': 0.0239087,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 1.08845,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.209541,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 5.71069,
'Renaming Unit/Runtime Dynamic': 0.490893,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 7.09902,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0924539,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.278582,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.381166,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.475868,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.51449,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.400861,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.6028,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.694147,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 6.06887,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.95158,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.450596,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 1.5456,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.333403,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.434022,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 7.52279,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0737947,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0145315,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.146591,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.10747,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.220385,
'Execution Unit/Register Files/Runtime Dynamic': 0.122001,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.380695,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.895407,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 3.75148,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0211356,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00190784,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0211356,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00190784,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0178486,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00165057,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.006603,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000632862,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0748421,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0147224,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00154381,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00701007,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.222668,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0186908,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.759674,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.103313,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 6.57161,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.280833,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.72013,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.350898,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 9.67274,
'Instruction Fetch Unit/Runtime Dynamic': 0.760745,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0681781,
'L2/Runtime Dynamic': 0.0217757,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 4.19618,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.34895,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0856514,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0856513,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 4.60229,
'Load Store Unit/Runtime Dynamic': 1.85701,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.211202,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.422403,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0749562,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0759787,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.408599,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0460152,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.736604,
'Memory Management Unit/Runtime Dynamic': 0.121994,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 28.3133,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 4.17516,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.257452,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0502409,
'Renaming Unit/Free List/Runtime Dynamic': 0.0235958,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 1.08845,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.206363,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 5.71069,
'Renaming Unit/Runtime Dynamic': 0.487411,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 7.00042,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0924539,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.278582,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.381166,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.475868,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.51449,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.405566,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.6028,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.702294,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 6.06887,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.95158,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.455273,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 1.56313,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.338012,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.437215,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 7.53224,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0737947,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0147021,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.147824,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.108731,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.221619,
'Execution Unit/Register Files/Runtime Dynamic': 0.123433,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.383675,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.901828,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 3.78006,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0211356,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00196526,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0211356,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00196526,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0178486,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.0016991,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.006603,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000650835,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0748421,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0147224,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00156193,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00719155,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.222668,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0192944,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.759674,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.104526,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 6.64874,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.280258,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.72013,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.355017,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 9.75367,
'Instruction Fetch Unit/Runtime Dynamic': 0.766287,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0680189,
'L2/Runtime Dynamic': 0.0217641,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 4.20925,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.35509,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0860742,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0860743,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 4.61736,
'Load Store Unit/Runtime Dynamic': 1.86566,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.212244,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.424489,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0753262,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0763464,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.413394,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.045921,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.74204,
'Memory Management Unit/Runtime Dynamic': 0.122267,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 28.424,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 4.17516,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.257453,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0502409,
'Renaming Unit/Free List/Runtime Dynamic': 0.0238364,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 1.08845,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.208764,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 5.71069,
'Renaming Unit/Runtime Dynamic': 0.490053,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 7.04609,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0924539,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.278583,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.381166,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.475869,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.51449,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.405448,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.6028,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.70209,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 6.06887,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.95158,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.455141,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 1.56268,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.337897,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.437076,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 7.53201,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0737947,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0146978,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.147793,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.108699,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.221588,
'Execution Unit/Register Files/Runtime Dynamic': 0.123397,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.3836,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.901548,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 3.77915,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0211356,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.0019659,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0211356,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.0019659,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0178486,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00169965,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.006603,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000651046,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0748421,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0147224,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00156148,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00719292,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.222668,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0193006,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.759674,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.104496,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 6.64681,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.279362,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.72013,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.354914,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 9.75165,
'Instruction Fetch Unit/Runtime Dynamic': 0.765265,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0683183,
'L2/Runtime Dynamic': 0.0218932,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 4.20682,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.35467,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0859958,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0859956,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 4.61457,
'Load Store Unit/Runtime Dynamic': 1.86476,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.212051,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.424101,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0752576,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0762822,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.413275,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0457757,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.741801,
'Memory Management Unit/Runtime Dynamic': 0.122058,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 28.419,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 4.17516,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.257453,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0502409,
'Renaming Unit/Free List/Runtime Dynamic': 0.0238304,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 1.08845,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.208697,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 5.71069,
'Renaming Unit/Runtime Dynamic': 0.48998,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 7.04311,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0924539,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.278583,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.381166,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.475869,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.51449,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.413699,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.6028,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.716378,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 6.06887,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.95158,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.463439,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 1.59352,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.345981,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.442733,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 7.54859,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0737947,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0149969,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.149956,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.110911,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.22375,
'Execution Unit/Register Files/Runtime Dynamic': 0.125908,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.388826,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.912919,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 3.82953,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0211356,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00207534,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0211356,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00207534,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0178486,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00179425,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.006603,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000687269,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0748421,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0147224,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00159325,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00753817,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.222668,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0203758,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.759674,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.106622,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 6.78208,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.278483,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.72013,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.362136,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 9.89358,
'Instruction Fetch Unit/Runtime Dynamic': 0.775156,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0682673,
'L2/Runtime Dynamic': 0.0218866,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 4.23033,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.36577,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0867564,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0867565,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 4.64168,
'Load Store Unit/Runtime Dynamic': 1.88038,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.213927,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.427853,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0759232,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0769473,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.421685,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0456321,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.751362,
'Memory Management Unit/Runtime Dynamic': 0.122579,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 28.6142,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 4.17516,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.257453,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0502409,
'Renaming Unit/Free List/Runtime Dynamic': 0.0242523,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 1.08845,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.212902,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 5.71069,
'Renaming Unit/Runtime Dynamic': 0.494607,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 7.12414,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0924539,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.278583,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.381166,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.475869,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.51449,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.411415,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.6028,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.712421,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 6.06887,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.95158,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.461191,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 1.58503,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.343742,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.441211,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 7.544,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0737947,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0149141,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.149357,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.110299,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.223152,
'Execution Unit/Register Files/Runtime Dynamic': 0.125213,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.387379,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.90986,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 3.81576,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0211356,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00204706,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0211356,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00204706,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0178486,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00177032,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.006603,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000678395,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0748421,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0147224,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00158445,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.0074489,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.222668,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0200795,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.759674,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.106033,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 6.74462,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.277807,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.72013,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.360136,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 9.85427,
'Instruction Fetch Unit/Runtime Dynamic': 0.771505,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0681017,
'L2/Runtime Dynamic': 0.0218565,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 4.22372,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.36224,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0865424,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0865423,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 4.63405,
'Load Store Unit/Runtime Dynamic': 1.87558,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.213399,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.426797,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0757359,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0767573,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.419356,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.045521,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.748709,
'Memory Management Unit/Runtime Dynamic': 0.122278,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 28.5598,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 4.17516,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.257454,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0502409,
'Renaming Unit/Free List/Runtime Dynamic': 0.0241355,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 1.08845,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.211737,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 5.71069,
'Renaming Unit/Runtime Dynamic': 0.493326,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 7.10031,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0924539,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.278583,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.381166,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.475869,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.51449,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.414376,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.6028,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.71755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 6.06887,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.95158,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.464084,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 1.59601,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.346645,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.443131,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 7.54995,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0737947,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0150215,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.150133,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.111093,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.223928,
'Execution Unit/Register Files/Runtime Dynamic': 0.126114,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.389256,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.913719,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 3.83343,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0211356,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00208394,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0211356,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00208394,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0178486,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00180115,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.006603,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000689616,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0748421,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0147224,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00159586,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00756489,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.222668,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0204796,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.759674,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.106797,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 6.79317,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.277024,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.72013,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.362729,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 9.90522,
'Instruction Fetch Unit/Runtime Dynamic': 0.774595,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0682291,
'L2/Runtime Dynamic': 0.0219309,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 4.22976,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.36548,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0867377,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0867376,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 4.64102,
'Load Store Unit/Runtime Dynamic': 1.87998,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.21388,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.42776,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0759069,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0769302,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.422375,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0453926,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.752024,
'Memory Management Unit/Runtime Dynamic': 0.122323,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 28.6271,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 4.17516,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.257454,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0502409,
'Renaming Unit/Free List/Runtime Dynamic': 0.0242869,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 1.08845,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.213242,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 5.71069,
'Renaming Unit/Runtime Dynamic': 0.494982,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 7.12724,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.092455,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.278583,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.381166,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.475869,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.51449,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.414105,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.6028,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.717079,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 6.06887,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.95158,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.463824,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 1.59501,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.346376,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.442926,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 7.54941,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0737947,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0150116,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.150062,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.11102,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.223857,
'Execution Unit/Register Files/Runtime Dynamic': 0.126032,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.389084,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.913308,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 3.83173,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0211356,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00208182,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0211356,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00208182,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0178486,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00179932,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.006603,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000688919,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0748421,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0147224,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00159481,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00755778,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.222668,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0204586,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.759674,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.106726,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 6.78872,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.275858,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.72013,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.362491,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 9.90055,
'Instruction Fetch Unit/Runtime Dynamic': 0.773092,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0678914,
'L2/Runtime Dynamic': 0.0219284,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 4.23029,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.36517,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0867548,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0867548,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 4.64163,
'Load Store Unit/Runtime Dynamic': 1.87977,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.213923,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.427845,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0759219,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.07694,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.422098,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.045199,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.751773,
'Memory Management Unit/Runtime Dynamic': 0.122139,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 28.6219,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 4.17516,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.257454,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0502409,
'Renaming Unit/Free List/Runtime Dynamic': 0.024273,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 1.08845,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.213098,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 5.71069,
'Renaming Unit/Runtime Dynamic': 0.494825,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 7.12348,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311}],
'DRAM': {'Area': 0,
'Gate Leakage': 0,
'Peak Dynamic': 0.4666838893602028,
'Runtime Dynamic': 0.4666838893602028,
'Subthreshold Leakage': 8.504,
'Subthreshold Leakage with power gating': 8.504},
'L3': [{'Area': 61.9075,
'Gate Leakage': 0.0484137,
'Peak Dynamic': 0.284289,
'Runtime Dynamic': 0.18201,
'Subthreshold Leakage': 6.80085,
'Subthreshold Leakage with power gating': 3.32364},
{'Area': 61.9075,
'Gate Leakage': 0.0484137,
'Peak Dynamic': 0.281753,
'Runtime Dynamic': 0.180811,
'Subthreshold Leakage': 6.80085,
'Subthreshold Leakage with power gating': 3.32364}],
'Processor': {'Area': 388.384,
'Gate Leakage': 3.09074,
'Peak Dynamic': 231.381,
'Peak Power': 297.927,
'Runtime Dynamic': 57.3007,
'Subthreshold Leakage': 63.4555,
'Subthreshold Leakage with power gating': 28.0572,
'Total Cores/Area': 260.865,
'Total Cores/Gate Leakage': 2.98397,
'Total Cores/Peak Dynamic': 228.083,
'Total Cores/Runtime Dynamic': 56.6638,
'Total Cores/Subthreshold Leakage': 49.7502,
'Total Cores/Subthreshold Leakage with power gating': 20.6649,
'Total L3s/Area': 123.815,
'Total L3s/Gate Leakage': 0.0968273,
'Total L3s/Peak Dynamic': 0.566041,
'Total L3s/Runtime Dynamic': 0.36282,
'Total L3s/Subthreshold Leakage': 13.6017,
'Total L3s/Subthreshold Leakage with power gating': 6.64728,
'Total Leakage': 66.5462,
'Total NoCs/Area': 3.70399,
'Total NoCs/Gate Leakage': 0.00993673,
'Total NoCs/Peak Dynamic': 2.73167,
'Total NoCs/Runtime Dynamic': 0.274095,
'Total NoCs/Subthreshold Leakage': 0.103619,
'Total NoCs/Subthreshold Leakage with power gating': 0.0388573}} | [
"diogoada@gmail.com"
] | diogoada@gmail.com |
76c42d25f8cacebb06202933fa87bbde25eaea41 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_coarsens.py | 7e3f51e1ea0cdb68080e778c6311c0f110396d1c | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 245 | py |
from xai.brain.wordbase.verbs._coarsen import _COARSEN
#calss header
class _COARSENS(_COARSEN, ):
def __init__(self,):
_COARSEN.__init__(self)
self.name = "COARSENS"
self.specie = 'verbs'
self.basic = "coarsen"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
24efc1c8c05ca4c49cca58db61245a2bf13b6a86 | 4fd614f55cbfd077a5a6e4631e2318ec30d8dd79 | /migrations/versions/19d2a959996a_new_fields_in_user_model.py | d29d51c5bfe283e3dd1dcb364063dbb05e852d7b | [] | no_license | tkijimakura/flask-mega-tutorial | e49dbd67dfadbc0d446f494cbac87b568e983b7a | 04512368a8b758d88ccd18d7f46d117960caeff0 | refs/heads/master | 2021-09-12T22:16:54.003906 | 2018-04-21T20:33:56 | 2018-04-21T20:33:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 792 | py | """new fields in user model
Revision ID: 19d2a959996a
Revises: 97fc3368eab9
Create Date: 2018-03-08 17:25:18.123486
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '19d2a959996a'
down_revision = '97fc3368eab9'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('about_me', sa.String(length=140), nullable=True))
op.add_column('user', sa.Column('last_seen', sa.DateTime(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('user', 'last_seen')
op.drop_column('user', 'about_me')
# ### end Alembic commands ###
| [
"aly.sivji@analytehealth.com"
] | aly.sivji@analytehealth.com |
ca1ea0bfa5e35ba455e7b13dca16a027b2a67ae0 | 76fb0a3cfc9d9362ab29174bd1d55e888ea4d7f6 | /tfx/orchestration/kubeflow/v2/components/experimental/ai_platform_training_executor.py | 22029308648a87a84dc866cf7e1b633872bbf10c | [
"Apache-2.0"
] | permissive | tensorflow/tfx | 0cfc9c55171352ecc98c9dfa8ffe976c689d7073 | 1b328504fa08a70388691e4072df76f143631325 | refs/heads/master | 2023-08-30T11:56:50.894497 | 2023-08-29T22:47:19 | 2023-08-29T22:48:26 | 169,116,405 | 2,116 | 899 | Apache-2.0 | 2023-09-14T21:51:42 | 2019-02-04T17:14:36 | Python | UTF-8 | Python | false | false | 2,422 | py | # Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Executor for AI Platform Training component."""
import datetime
from typing import Any, Dict, List
from tfx import types
from tfx.dsl.components.base import base_executor
from tfx.extensions.google_cloud_ai_platform import runner
from tfx.orchestration.launcher import container_common
from tfx.utils import json_utils
_POLLING_INTERVAL_IN_SECONDS = 30
_CONNECTION_ERROR_RETRY_LIMIT = 5
# Keys for AIP training config.
PROJECT_CONFIG_KEY = 'project_id'
TRAINING_JOB_CONFIG_KEY = 'training_job'
JOB_ID_CONFIG_KEY = 'job_id'
LABELS_CONFIG_KEY = 'labels'
CONFIG_KEY = 'aip_training_config'
class AiPlatformTrainingExecutor(base_executor.BaseExecutor):
"""AI Platform Training executor."""
def Do(self, input_dict: Dict[str, List[types.Artifact]],
output_dict: Dict[str, List[types.Artifact]],
exec_properties: Dict[str, Any]) -> None:
self._log_startup(input_dict, output_dict, exec_properties)
aip_config = json_utils.loads(exec_properties.pop(CONFIG_KEY))
assert aip_config, 'AIP training config is not found.'
training_job = aip_config.pop(TRAINING_JOB_CONFIG_KEY)
job_id = aip_config.pop(JOB_ID_CONFIG_KEY)
project = aip_config.pop(PROJECT_CONFIG_KEY)
# Resolve parameters.
training_job['training_input'][
'args'] = container_common._resolve_container_command_line( # pylint: disable=protected-access
cmd_args=training_job['training_input']['args'],
input_dict=input_dict,
output_dict=output_dict,
exec_properties=exec_properties)
training_job['job_id'] = job_id or 'tfx_{}'.format(
datetime.datetime.now().strftime('%Y%m%d%H%M%S'))
# Invoke CMLE job
runner._launch_cloud_training( # pylint: disable=protected-access
project=project,
training_job=training_job)
| [
"tensorflow-extended-nonhuman@googlegroups.com"
] | tensorflow-extended-nonhuman@googlegroups.com |
ecf6c154771b8096b621df303c0f3f611bbfc927 | 547bfb8f36b86140f3da6696512f79db00a9dc8e | /main.py | 886353e389d9d3ef97347bd8d2ecf4f9cad4325a | [] | no_license | DhanaVignesh2006/Wikipedia-Search | 61b96e5dda663dc82312c6ac03f2ffa02897512f | 9182162d25a077390e9596a81c1f1add8e665ad7 | refs/heads/main | 2023-05-27T19:57:36.092205 | 2021-05-31T13:06:32 | 2021-05-31T13:06:32 | 372,509,398 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,058 | py | from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
import sys
import wikipedia
class Window(QMainWindow):
def __init__(self):
super().__init__()
self.InitUI()
def InitUI(self):
self.setWindowTitle("Search")
self.setWindowIcon(QIcon("E:\\Python\\Wikipedia Search\\search.ico"))
#self.setGeometry(150, 100, 1100, 600)
self.setFixedSize(800, 600)
self.setStyleSheet("background-color:#343434;")
self.UIComponents()
self.setFocus()
self.show()
def UIComponents(self):
self.ent = QLineEdit(self)
self.ent.setClearButtonEnabled(True)
self.ent.returnPressed.connect(self.search)
self.ent.setStyleSheet("font-size:25px;font:baloo 2;background-color:#ffffff;border-radius:15px;")
self.ent.setAlignment(Qt.AlignCenter)
self.ent.setPlaceholderText("Type Here to Search")
self.ent.resize(600, 35)
self.ent.move(100, 30)
self.btn = QPushButton(self)
self.btn.setIcon(QIcon("searc.ico"))
self.btn.setStyleSheet("background-color:#b0b057;border-radius:15px;")
self.btn.setText("Search")
self.btn.move(350, 130)
self.btn.clicked.connect(self.search)
self.textEdit = QTextEdit(self)
self.textEdit.setReadOnly(True)
self.textEdit.move(50, 200)
self.textEdit.resize(700, 350)
self.textEdit.setStyleSheet("background-color:#ffffff;font-size:20px;border-radius:15px;color:#000000;")
def search(self):
self.s = self.ent.text()
try:
answer = wikipedia.summary(self.s)
self.textEdit.setText(answer)
except:
self.textEdit.setText("An error occured")
app = QApplication(sys.argv)
win = Window()
sys.exit(app.exec_())
# import cv2
# from pyzbar.pyzbar import decode, ZBarSymbol
# # Image.open('barcode1.png') # if use PIL library
# im = cv2.imread("qrcode1.png") # if use cv2
# # codes = decode(im, symbols=[ZBarSymbol.QRCODE]) # specify code type
# codes = decode(im) # auto detect code type
# print('Decoded:', codes)
# for code in codes:
# data = code.data.decode('ascii')
# print('Data:', code.data.decode('ascii'))
# print('Code Type:', code.type)
# print('BBox:', code.rect)
# x, y, w, h = code.rect.left, code.rect.top, \
# code.rect.width, code.rect.height
# cv2.rectangle(im, (x,y),(x+w, y+h),(255, 0, 0), 8)
# print('Polygon:', code.polygon)
# cv2.rectangle(im, code.polygon[0], code.polygon[1],
# (0, 255, 0), 4)
# txt = '(' + code.type + ') ' + data
# cv2.putText(im, txt, (x - 10, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 50, 255), 2)
# text1 = 'No. Codes: %s' % len(codes)
# cv2.putText(im, text1, (5, 15),
# cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
# cv2.imshow('bounding box', im)
# cv2.waitKey(0)
# cv2.destroyAllWindows() | [
"noreply@github.com"
] | noreply@github.com |
d2758af5379788063f46603975341fd96beb6e34 | 8d5653f98ae614e151b714e4d04b786e65028d05 | /icdc/fileio.py | cb47cf8afebcfa342b1e37775d1646f57e859e0c | [] | no_license | maedoc/icdc | 9a4071801968cc6d7999f3a12783454ee8fe5f07 | 11786e17891c3406b0902341861d4032c2054ac2 | refs/heads/master | 2023-03-15T12:41:23.288538 | 2021-03-10T10:53:38 | 2021-03-10T10:53:38 | 28,820,338 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,570 | py | import os
import io
import configparser
import numpy as np
from scipy.io import loadmat
import mne
from .core import Dataset
class VHDR(Dataset):
"""
Brain Vision file.
"""
def __init__(self, filename, **readkwds):
super(VHDR, self).__init__(filename)
self.wd, _ = os.path.split(filename)
# read file
with open(self.filename, "r") as fd:
self.srclines = [_.decode('ascii', 'ignore') for _ in fd.readlines()]
# config parser expects each section to have header
# but vhdr has some decorative information at the beginning
while not self.srclines[0].startswith("["):
self.srclines.pop(0)
self.sio = io.StringIO()
self.sio.write("\n".join(self.srclines))
self.sio.seek(0)
self.cp = configparser.ConfigParser()
self.cp.readfp(self.sio)
for opt in self.cp.options("Common Infos"):
setattr(self, opt, self.cp.get("Common Infos", opt))
self.binaryformat = self.cp.get("Binary Infos", "BinaryFormat")
self.labels = [
self.cp.get("Channel Infos", o).split(",")[0]
for o in self.cp.options("Channel Infos")
]
self.fs = self.srate = 1e6 / float(self.samplinginterval)
self.nchan = int(self.numberofchannels)
# important if not in same directory
self.datafile = os.path.join(self.wd, self.datafile)
self.read_data(**readkwds)
def read_data(self, mmap=False, dt="float32", mode="r"):
"""
VHDR stores data in channel contiguous way such that reading disparate pieces
in time is fast, when using memmap.
"""
if mmap:
ary = np.memmap(self.datafile, dt, mode)
else:
ary = np.fromfile(self.datafile, dt)
self.data = ary.reshape((-1, self.nchan)).T
self.nsamp = self.data.shape[1]
class EEGLAB(Dataset):
"EEGLAB .set file"
def __init__(self, filename):
super(EEGLAB, self).__init__(filename)
self.mat = loadmat(filename)
self.fs = self.mat["EEG"]["srate"][0, 0][0, 0]
self.nsamp = self.mat["EEG"]["pnts"][0, 0][0, 0]
self.data = np.fromfile(
".".join(filename.split(".")[:-1]) + ".fdt", dtype=np.float32
)
self.data = self.data.reshape((self.nsamp, -1)).T
self.nchan = self.data.shape[0]
self.labels = [c[0] for c in self.mat["EEG"]["chanlocs"][0, 0]["labels"][0]]
class MATFile(Dataset):
def __init__(self, filename):
super(MATFile, self).__init__(filename)
self.mat = loadmat(filename)
self.fs = self.mat["fs"][0, 0] * 1.0
self.data = self.mat["data"]
self.nsamp = self.data.shape[1]
self.nchan = self.data.shape[0]
self.labels = [l[0] for l in self.mat["labels"][0]]
class NPZFile(Dataset):
def __init__(self, filename):
super(NPZFile, self).__init__(filename)
z = np.load(filename)
self.fs = z["fs"].flat[0]
self.data = z["data"]
self.nsamp = self.data.shape[1]
self.nchan = self.data.shape[0]
self.labels = list(z["labels"])
# MNE has a bunch of file readers, MNEReader is a base class,
class MNEReader:
def __init__(self, filename):
super().__init__(filename)
raw = self._read_raw(filename)
data, times = raw[:]
self.fs = 1.0 / (times[1] - times[0])
self.data = data
self.nsamp = len(times)
self.labels = raw.info['ch_names']
self.nchan = len(self.labels)
# then just add a new class per read function
# cf https://mne.tools/stable/search.html?q=read_raw to see
# the other functions gdf, ctf, bti, etc
class EdfFile(Dataset, MNEReader):
_read_raw = mne.io.read_raw_edf
class FifFile(Dataset, MNEReader):
_read_raw = mne.io.read_raw_fif
class MarkersCSV(object):
pass
"""
try:
import openpyxl
class XLS(object):
# TODO useful to export data in this format
def read(filename):
wb = openpyxl.load_workbook(filename)
sheets = []
for sheet in wb.worksheets:
values = [[cell.value for cell in col] for col in sheet.columns]
array = numpy.array(values, object).T
sheets.append(array)
return sheets
def write(filename):
pass
"""
| [
"mmwoodman@gmail.com"
] | mmwoodman@gmail.com |
1674abf712c6b066af59fe0fea6ab7e259a5eb39 | 2d74104aaa132896a65ea0032951eee5d4c97840 | /chemman/msds_collector/migrations/0003_uploadedmsds_token.py | 4bd9419402891cf29b641c4c80e6ef4bb2b7ea19 | [] | no_license | Whitie/ChemManager | 6e228e8713f9dfeca21adbd3e9a65c8871a822bc | d40792361527219514b1b4cc03718ea7c2a92777 | refs/heads/master | 2023-06-09T09:29:41.626087 | 2022-12-14T13:29:44 | 2022-12-14T13:29:44 | 189,994,861 | 0 | 0 | null | 2023-04-21T21:40:13 | 2019-06-03T11:47:23 | Python | UTF-8 | Python | false | false | 553 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-07-26 06:36
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('msds_collector', '0002_parseddata'),
]
operations = [
migrations.AddField(
model_name='uploadedmsds',
name='token',
field=models.CharField(default='12345', editable=False, max_length=64, verbose_name='Security token'),
preserve_default=False,
),
]
| [
"weimann.th@yahoo.com"
] | weimann.th@yahoo.com |
3dae60730cb9477a444a89605681bbdc3c8e8fab | b4de71539c4b7a37d2c8877ae7af61cb84a43d89 | /data_preprocessing/outlier_detection/duplicates.py | 1b62ad6fa0c3f787bb9d47cacc3f7895f145e2c7 | [] | no_license | Padam-0/irish-property-price-explorer | d4826bd1b636e96e3477c809730d65de1d78f29d | d985f664a51934e6495e7619282735064ac4cf87 | refs/heads/master | 2021-01-20T21:35:24.082685 | 2017-09-01T12:38:14 | 2017-09-01T12:38:14 | 101,771,921 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,804 | py | import pandas as pd
import numpy as np
import re
def exact_duplicates(df):
# Removing Exact Duplicates
df_dups = df[df.duplicated(['sale_date', 'address', 'price'])==True].sort_values(by='address')
# Exact duplicate IDs isolated
dups_to_drop = df_dups.index
def address_date_duplicates(df):
"""
As it is difficult to confidently obtain true duplications of address and price repeats
none of the properties isolated are removed from the data set. Human review is required
before properties are removed from the data set.
"""
# Date column converted to datetime type
df['sale_date'] = pd.to_datetime(df['sale_date'])
# Duplicate properties isolated, sorted by address and sale date
df_dups = df[df.duplicated(['address', 'price'], keep=False) == True].sort_values(by=['address', 'sale_date'])
# Earliest sale date in the data set isolated
min_date = df_dups['sale_date'].min()
# Each sale date converted to integer based on the number of days it is away from min_date
df_dups['date_int'] = (df_dups['sale_date'] - min_date).dt.days
# Date delta calculated
df_dups['date_delta'] = df_dups.groupby(['address'])['date_int'].diff()
# Lambda function to highlight the properties of interested, based on a maximum delta gap
# Default: 7 days
df_dups['duplicates_of_interest'] = df_dups['date_delta'].apply(lambda x: 1 if x < 7 else 0)
def address_price_duplicates(df, stats):
"""
As it is difficult to confidently obtain true duplications of address and price repeats
none of the properties isolated are removed from the data set. Human review is required
before properties are removed from the data set.
"""
# Dataframe of duplicate addresses and sale dates filtered, sorted by address and price
df_dups = df[df.duplicated(['address', 'sale_date'], keep=False) == True].sort_values(by=['address', 'price'])
# Price differences calculated for all duplicate properties
df_dups['price_delta'] = df_dups.groupby(['address'])['price'].diff()
# Electoral division mean is added to each property
for i, x in df_dups.iterrows():
if x['ed'] in stats.index:
mean = stats.ix[x['ed'], 'mean']
df_dups.ix[i, 'ed_mean'] = int(mean)
# Price delta calculated
df_dups['price_delta'] = abs(df_dups['price'] - df_dups['ed_mean'])
def date_price_duplicates(df):
"""
As it is difficult to confidently obtain true duplications of date and price repeats
none of the properties isolated are removed from the data set. Human review is required
before properties are removed from the data set.
"""
# Dataframe of duplicate price, sale dates and electoral division filtered, sorted by sale date and price
df_dups = df[df.duplicated(['price', 'sale_date', 'ed']) == True].sort_values(by=['sale_date', 'price'])
# Strip the house/apartment number from the beginning of the address
df_dups['address_strip'] = df_dups['address'].apply(lambda x: int_strip(x))
def int_strip(s):
return re.sub(r'\d+', '', s)
def main():
# Import outlier data
df = pd.read_csv('outlier_data.csv', index_col=0)
# Remove known outliers - If there are any
try:
bad_data = pd.read_csv('known_outliers.csv', index_col=0)
for i, k in df.iterrows():
if i in bad_data.index:
df.drop(i, inplace=True)
except:
pass
# Import mean and standard deviation information for each electoral division
stats = pd.read_csv('ed_mean_std.csv', index_col=0)
# Functions to isolate duplicates
exact_duplicates(df)
address_date_duplicates(df)
address_price_duplicates(df, stats)
date_price_duplicates(df)
if __name__ == '__main__':
main() | [
"andy.mcsweeney91@gmail.com"
] | andy.mcsweeney91@gmail.com |
f4848810b38859463964d9f7a4eab75d41ce0d23 | 8fd700902d6948d79588260d4c4aa56eb6fab6b7 | /tools/new_ci/base_function_module/interface/db_advance_package.py | 3b231e5da3d53eb50cae40218b912b5f28e49124 | [] | no_license | dsxyy/tecs | 81556f0731480f82d90afcef80fb6bf8b63377ca | bdb7f8455f2082afe9a16a71786b3ad8add296e7 | refs/heads/master | 2021-01-21T11:07:33.068222 | 2017-03-01T08:24:42 | 2017-03-01T08:24:42 | 83,524,822 | 1 | 0 | null | null | null | null | GB18030 | Python | false | false | 10,211 | py | #!/usr/bin/python
# -*- coding: gb2312 -*-
#*******************************************************************************
# Copyright (c) 2012,中兴通讯股份有限公司,All rights reserved.
#
# 文件名称:ssh_advance_package.py
# 测试内容:测试环境的数据库恢复和备份方法
# 当前版本:1.0
# 作 者:李孝成
# 完成日期:2012/12/24
#*******************************************************************************/
import sys,os,getopt,time
from base_interface.ssh_cmd import *
from base_interface.rpc.contrib.tecs_common import *
from base_interface.postgresql import *
# TECS虚拟机类
class tecs_vm_info_from_db:
def __init__(self,config_para):
self.config_para = config_para
def __del__(self):
pass;
# 获取TC内所有虚拟机的vid,下面的db_ctrl 不能优化到init里面去,否则在最外面使用的时候,会存在DB数据库有链接的时候,进行数据库恢复出现异常
def get_all_deploy_vid(self):
db_ctrl = postgresql_db(self.config_para.server_addr, "5432", "tecs", "tecs", self.config_para.test_cloud)
vid = []
cluster_name = db_ctrl.query_from_tc("cluster_pool", "name", "1=1")
for cluster in cluster_name:
vid.extend(db_ctrl.query_from_cc(cluster, "vm_pool", "vid", "1=1"))
return vid
# 通过vid来获取所在集群的名称
def get_cluster_name_by_vid(self, vid):
db_ctrl = postgresql_db(self.config_para.server_addr, "5432", "tecs", "tecs", self.config_para.test_cloud)
names = db_ctrl.query_from_tc("vmcfg_pool", "deployed_cluster", "oid=%d" %long(vid))
if len(names) == 0:
return None
return names[0]
# 通过虚拟机名称查找第一个符合名称要求的虚拟机
def get_vmid_by_vm_name(self, vm_name):
db_ctrl = postgresql_db(self.config_para.server_addr, "5432", "tecs", "tecs", self.config_para.test_cloud)
vids = db_ctrl.query_from_tc("vmcfg_pool", "oid", "name='%s'" % str(vm_name))
if len(vids) == 0:
return -1
return vids[0]
def check_vm_op_result_time_out(self, vid, time_limit):
db_ctrl = postgresql_db(self.config_para.server_addr, "5432", "tecs", "tecs", self.config_para.test_cloud)
time_count = 0
cluster_name = self.get_cluster_name_by_vid(vid)
if cluster_name == None:
return "Fail"
state = db_ctrl.query_from_cc(cluster_name, "web_view_vmstate", "last_op_result", "vid=%d" % long(vid))
if len(state) == 0:
return "NULL"
while state[0] == self.config_para.vm_op_result_running:
time.sleep(2)
time_count += 2
if time_count >= time_limit:
return "TimeOut"
state = db_ctrl.query_from_cc(cluster_name, "web_view_vmstate", "last_op_result", "vid=%d" % long(vid))
if state[0] == self.config_para.vm_op_result_success:
return "Success"
else:
return "Faill"
# 在指定时间内,判断虚拟机cancel表是否清NULL了,cluster那么不能在里面取,因为cancel成功后,tc就没有记录
def check_vm_cancel_success_with_time_out(self, cluster_name, vid, time_limit):
db_ctrl = postgresql_db(self.config_para.server_addr, "5432", "tecs", "tecs", self.config_para.test_cloud)
state = db_ctrl.query_from_cc(cluster_name, "vm_cancel_pool", "oid", "vid=%d" % long(vid))
time_count = 0
while len(state) != 0:
time_delay(2, "check db item not exist from cc")
time_count += 2
if time_count >= time_limit:
return False
state = db_ctrl.query_from_cc(cluster_name, "vm_cancel_pool", "oid", "vid=%d" % long(vid))
return True
# 内部函数,还要被超时等封装,不做过多的非法判断
def __check_vm_state_with_time_out(self, vid, state_d, time_limit):
db_ctrl = postgresql_db(self.config_para.server_addr, "5432", "tecs", "tecs", self.config_para.test_cloud)
cluster_name = self.get_cluster_name_by_vid(vid)
state = db_ctrl.query_from_cc(cluster_name, "web_view_vmstate", "state", "vid=%d" % long(vid))
time_count = 0
while state[0] != state_d:
time_delay(2, "check vm state ,need wait")
time_count += 2
if time_count > time_limit:
return False
state = db_ctrl.query_from_cc(cluster_name, "web_view_vmstate", "state", "vid=%d" % long(vid))
return True
# 在指定的时间内,判断上次虚拟机操作是否成功了
def check_vm_op_success_with_time_out(self, vid, time_limit):
if self.check_vm_op_result_time_out(vid, time_limit) == "Success":
return True
return False
# 在指定的时间内获取虚拟机的状态,首先要判断当前操作已经结束,如果没有结束,不会获取状态
def vm_state_equal_check_with_time_out(self, vid, state, time_limit):
if self.check_vm_op_success_with_time_out(vid, time_limit) == False:
return False
return self.__check_vm_state_with_time_out(vid, state, time_limit)
# 根据不同的操作类型标识返回结果
def check_pd_op_result(self, exist, flag):
if flag == 0 or flag == 2:
if exist == 0:
return True
else:
return False
else:
if exist != 0:
return True
else:
return False
# 在指定的时间内,判断上次移动盘操作是否成功了
def check_pd_op_result_time_out(self, request_id, vid, time_limit, flag):
time_count = 0
cluster_name = ""
db_ctrl = postgresql_db(self.config_para.server_addr, "5432", "tecs", "tecs", self.config_para.test_cloud)
if flag == 0 or flag == 1:
names = db_ctrl.query_from_tc("storage_user_volume", "uid", "request_id='%s'" % str(request_id))
else:
cluster_name = self.get_cluster_name_by_vid(vid)
names = db_ctrl.query_from_cc(cluster_name,"vm_disk", "vid", "request_id='%s'" % str(request_id))
while self.check_pd_op_result(len(names), flag):
time.sleep(2)
time_count += 2
if time_count >= time_limit:
return False
if flag == 0 or flag == 1:
names = db_ctrl.query_from_tc("storage_user_volume", "uid", "request_id='%s'" % str(request_id))
else:
names = db_ctrl.query_from_cc(cluster_name,"vm_disk", "vid", "request_id='%s'" % str(request_id))
if self.check_pd_op_result(len(names), flag):
continue
else:
return True
# 在指定的时间内获取移动盘的状态
def pd_state_equal_check_with_time_out(self, request_id, vid, time_limit, flag):
if self.check_pd_op_result_time_out(request_id, vid, time_limit, flag) == False:
return False
return True
# 随机构造磁阵和cluster的映射关系,供测试使用
def pd_storage_cluster_map(self):
db_ctrl = postgresql_db(self.config_para.server_addr, "5432", "tecs", "tecs", self.config_para.test_cloud)
sids = db_ctrl.query_from_tc("storage_adaptor", "sid", "1=1")
if len(sids) == 0:
print "storage_adaptor is empty, cannot create portable disk!"
return False
clusters = db_ctrl.query_from_tc("cluster_pool", "name", "1=1")
if len(clusters) == 0:
print "cluster_pool is empty, cannot create portable disk!"
return False
ret = db_ctrl.add_to_storage_cluster(long(sids[0]),clusters[0])
if ret == True:
return clusters[0]
print "add_to_storage_cluster False, cannot create portable disk! ret = %d!" % ret
return False
# 检查是否存在磁阵和cluster的映射关系
def check_pd_storage_cluster(self):
db_ctrl = postgresql_db(self.config_para.server_addr, "5432", "tecs", "tecs", self.config_para.test_cloud)
clusters = db_ctrl.query_from_tc("storage_cluster", "cluster_name", "1=1")
if len(clusters) != 0:
return clusters[0]
ret = self.pd_storage_cluster_map()
if ret != False:
return ret
# 随机获取一个移动盘的request_id
def pd_get_request_id(self):
db_ctrl = postgresql_db(self.config_para.server_addr, "5432", "tecs", "tecs", self.config_para.test_cloud)
requestid = db_ctrl.query_from_tc("storage_user_volume", "request_id", "1=1")
if len(requestid) != 0:
return requestid[0]
return False
# 随机获取一个已经attach的移动盘的request_id和vid
def pd_get_attach_request_id_and_vid(self):
db_ctrl = postgresql_db(self.config_para.server_addr, "5432", "tecs", "tecs", self.config_para.test_cloud)
requestid = db_ctrl.query_from_cc("tecscluster","vm_disk", "request_id", "request_id != ''")
if len(requestid) != 0:
vid = db_ctrl.query_from_cc("tecscluster","vm_disk", "vid", "request_id = '%s'" % requestid[0])
return requestid[0],vid[0]
return "",-1
#部署虚拟机后,检查CC上(记录比较长,更新比较频繁)的表空间占用最多的10张表
def pd_get_table_size_info(self):
db_ctrl = postgresql_db(self.config_para.server_addr, "5432", "tecs", "tecs", self.config_para.test_cloud)
tabname = "pg_stat_user_tables"
column = "pg_relation_size(relid) as tablesize,schemaname,relname,n_live_tup"
condition = "1=1 order by tablesize desc limit 10"
results = db_ctrl.query_from_cc_ex("tecscluster", tabname, column, condition)
print "----------------------------------------"
for row in results:
for r in row:
print r,
print "\n"
print "----------------------------------------"
| [
"yao.yuan3@zte.com.cn"
] | yao.yuan3@zte.com.cn |
50ce2f47306a4a5639b530dcc16fee8c1ee8a21b | 1a4eb078abc3c4e19796ee0a6b56c972889cf7c8 | /niadevice.py | 893eccbccc4321ccccdaa2992d097f2a4df50093 | [] | no_license | pborky/pyneuro | 6397db2d51d0cb60501b1fb9eb181196b85513de | 0efa007af70e8dba80ef6549c1a9f5c115fe4758 | refs/heads/master | 2020-05-18T04:34:35.607845 | 2011-09-28T17:21:59 | 2011-09-28T17:21:59 | 2,410,066 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,712 | py | #!/bin/env python
from pyneuro import NeuroDeviceUSB,NeuroDeviceError
NIA_VENDOR = 0x1234
NIA_PRODUCT = 0x0000
NIA_HEADER = '''0 OCZ NIA USER OCZ NEURAL IMPULSE ACTUATOR 03.03.1017.51.36512 BIOSEMI -1 1 1 NIA RAW UNIVERSAL mV -32768 32767 -32768 32767 No prefiltering, raw data from NIA 4000 '''
class NIAError(NeuroDeviceError):
pass
class NIADevice(NeuroDeviceUSB):
r"""Class for reading data of NIA device in higher level mode
pyneuro"""
def __init__(self, idVendor = NIA_VENDOR, idProduct = NIA_PRODUCT, header = NIA_HEADER):
NeuroDeviceUSB.__init__(self, idVendor = idVendor, idProduct = idProduct, header = header)
self.sequence = 0
def getHeader(self):
return self.header
def getData(self):
arr = self.endpoint.read(self.endpoint.wMaxPacketSize)
retArray = []
if len(arr) != 55:
raise NIAError("Received data is inconsistent. Unexpected length: {0}".format(len(arr)))
nsamp = arr[54]
if nsamp > 16:
raise NIAError("Received data is inconsistent. Unexpected number of samples: {0}".format(nsamp))
for i in range(0,nsamp):
b = arr[i*3 + 2]<<16 | arr[i*3 + 1]<<8 | arr[i*3]
retArray.append((self.sequence, 1, (float(b)/256.0) - 32768))
self.sequence = self.sequence + 1
return retArray
| [
"pborky@pborky.sk"
] | pborky@pborky.sk |
6aaa96fca2f0988e8a953d3ea9d73960f446d645 | af4d559792c4255d5f26bc078cd176b70c0e643f | /hpsklearn/components/linear_model/_omp.py | 5ea2e28c9530e946a54473c50100917878145894 | [
"BSD-3-Clause"
] | permissive | hyperopt/hyperopt-sklearn | ec7d5f97ba8fd5a2c283dfec2fa9e0170b61c6ce | 4b3f6fde3a1ded2e71e8373d52c1b51a0239ef91 | refs/heads/master | 2023-08-02T07:19:20.259964 | 2022-12-15T17:53:07 | 2022-12-15T17:53:07 | 8,293,893 | 1,480 | 292 | NOASSERTION | 2022-12-15T17:53:08 | 2013-02-19T16:09:53 | Python | UTF-8 | Python | false | false | 3,050 | py | from hyperopt.pyll import scope, Apply
from hyperopt import hp
from sklearn import linear_model
import numpy as np
import typing
@scope.define
def sklearn_OrthogonalMatchingPursuit(*args, **kwargs):
return linear_model.OrthogonalMatchingPursuit(*args, **kwargs)
@scope.define
def sklearn_OrthogonalMatchingPursuitCV(*args, **kwargs):
return linear_model.OrthogonalMatchingPursuitCV(*args, **kwargs)
def orthogonal_matching_pursuit(name: str,
n_nonzero_coefs: int = None,
tol: typing.Union[float, Apply] = None,
fit_intercept: bool = True,
precompute: typing.Union[str, bool] = "auto"
):
"""
Return a pyll graph with hyperparameters that will construct
a sklearn.linear_model.OrthogonalMatchingPursuit model.
Args:
name: name | str
n_nonzero_coefs: target number non-zero coefficients | int
tol: maximum norm of residual | float
fit_intercept: whether to calculate intercept for model | bool
precompute: whether to use precomputed Gram and Xy matrix | str, bool
"""
def _name(msg):
return f"{name}.orthogonal_matching_pursuit_{msg}"
hp_space = dict(
n_nonzero_coefs=n_nonzero_coefs,
tol=hp.loguniform(_name("tol"), np.log(1e-5), np.log(1e-2)) if tol is None else tol,
fit_intercept=fit_intercept,
precompute=precompute
)
return scope.sklearn_OrthogonalMatchingPursuit(**hp_space)
def orthogonal_matching_pursuit_cv(name: str,
copy: bool = True,
fit_intercept: bool = True,
max_iter: typing.Union[int, Apply] = None,
cv: typing.Union[int, callable, typing.Generator, Apply] = None,
n_jobs: int = 1,
verbose: typing.Union[bool, int] = False
):
"""
Return a pyll graph with hyperparameters that will construct
a sklearn.linear_model.OrthogonalMatchingPursuitCV model.
Args:
name: name | str
copy: whether design matrix must be copied | bool
fit_intercept: whether to calculate intercept for model | bool
max_iter: maximum number of iterations | int
cv: cross-validation splitting strategy| int, callable or generator
n_jobs: number of CPUs during cv | int
verbose: verbosity amount | bool, int
"""
def _name(msg):
return f"{name}.orthogonal_matching_pursuit_cv_{msg}"
hp_space = dict(
copy=copy,
fit_intercept=fit_intercept,
max_iter=max_iter,
cv=hp.pchoice(_name("cv"), [(0.0625, 3), (0.175, 4), (0.525, 5), (0.175, 6), (0.0625, 7)])
if cv is None else cv,
n_jobs=n_jobs,
verbose=verbose
)
return scope.sklearn_OrthogonalMatchingPursuitCV(**hp_space)
| [
"38689620+mandjevant@users.noreply.github.com"
] | 38689620+mandjevant@users.noreply.github.com |
592fe916dff6173f79878fe433cb9c22429311b2 | 5158a5d5e5c9f499d9de2d7e2c9d7400b8ac7266 | /svm_basic.py | 197727617a6fda582ce7c12cdd7ded339a52ba3c | [] | no_license | PriyamJPatel/cs686-2018-01 | 9637f4bec009df85e25e35104dfea9ab6c2c1e7c | fe678d22ec8994f4d1b35ffde7e3298700752cf0 | refs/heads/master | 2021-04-12T10:55:33.196650 | 2018-05-11T06:30:39 | 2018-05-11T06:30:39 | 126,392,960 | 0 | 0 | null | 2018-03-22T20:44:37 | 2018-03-22T20:44:34 | Python | UTF-8 | Python | false | false | 8,662 | py | from numpy import *
from time import sleep
from classifier import classifier
def loadDataSet(fileName):
dataMat = []; labelMat = []
fr = open(fileName)
for line in fr.readlines():
lineArr = line.strip().split(',')
dataMat.append([float(lineArr[0]), float(lineArr[1])])
labelMat.append(float(lineArr[2]))
return dataMat,labelMat
def selectJrand(i,m):
j=i #we want to select any J not equal to i
while (j==i):
j = int(random.uniform(0,m))
return j
def clipAlpha(aj,H,L):
if aj > H:
aj = H
if L > aj:
aj = L
return aj
'''#######********************************
Non-Kernel VErsions below
'''#######********************************
class optStructK:
def __init__(self,dataMatIn, classLabels, C, toler): # Initialize the structure with the parameters
self.X = dataMatIn
self.labelMat = classLabels
self.C = C
self.tol = toler
self.m = shape(dataMatIn)[0]
self.alphas = mat(zeros((self.m,1)))
self.b = 0
self.eCache = mat(zeros((self.m,2))) #first column is valid flag
def calcEkK(oS, k):
fXk = float(multiply(oS.alphas,oS.labelMat).T*(oS.X*oS.X[k,:].T)) + oS.b
Ek = fXk - float(oS.labelMat[k])
return Ek
def selectJK(i, oS, Ei): #this is the second choice -heurstic, and calcs Ej
maxK = -1; maxDeltaE = 0; Ej = 0
oS.eCache[i] = [1,Ei] #set valid #choose the alpha that gives the maximum delta E
validEcacheList = nonzero(oS.eCache[:,0].A)[0]
if (len(validEcacheList)) > 1:
for k in validEcacheList: #loop through valid Ecache values and find the one that maximizes delta E
if k == i: continue #don't calc for i, waste of time
Ek = calcEkK(oS, k)
deltaE = abs(Ei - Ek)
if (deltaE > maxDeltaE):
maxK = k; maxDeltaE = deltaE; Ej = Ek
return maxK, Ej
else: #in this case (first time around) we don't have any valid eCache values
j = selectJrand(i, oS.m)
Ej = calcEkK(oS, j)
return j, Ej
def updateEkK(oS, k):#after any alpha has changed update the new value in the cache
Ek = calcEkK(oS, k)
oS.eCache[k] = [1,Ek]
def innerLK(i, oS):
Ei = calcEkK(oS, i)
if ((oS.labelMat[i]*Ei < -oS.tol) and (oS.alphas[i] < oS.C)) or ((oS.labelMat[i]*Ei > oS.tol) and (oS.alphas[i] > 0)):
j,Ej = selectJK(i, oS, Ei) #this has been changed from selectJrand
alphaIold = oS.alphas[i].copy(); alphaJold = oS.alphas[j].copy();
if (oS.labelMat[i] != oS.labelMat[j]):
L = max(0, oS.alphas[j] - oS.alphas[i])
H = min(oS.C, oS.C + oS.alphas[j] - oS.alphas[i])
else:
L = max(0, oS.alphas[j] + oS.alphas[i] - oS.C)
H = min(oS.C, oS.alphas[j] + oS.alphas[i])
if L==H:
# print("L==H");
return 0
eta = 2.0 * oS.X[i,:]*oS.X[j,:].T - oS.X[i,:]*oS.X[i,:].T - oS.X[j,:]*oS.X[j,:].T
if eta >= 0:
# print("eta>=0");
return 0
oS.alphas[j] -= oS.labelMat[j]*(Ei - Ej)/eta
oS.alphas[j] = clipAlpha(oS.alphas[j],H,L)
updateEkK(oS, j) #added this for the Ecache
if (abs(oS.alphas[j] - alphaJold) < 0.00001):
# print("j not moving enough");
return 0
oS.alphas[i] += oS.labelMat[j]*oS.labelMat[i]*(alphaJold - oS.alphas[j])#update i by the same amount as j
updateEkK(oS, i) #added this for the Ecache #the update is in the oppostie direction
b1 = oS.b - Ei- oS.labelMat[i]*(oS.alphas[i]-alphaIold)*oS.X[i,:]*oS.X[i,:].T - oS.labelMat[j]*(oS.alphas[j]-alphaJold)*oS.X[i,:]*oS.X[j,:].T
b2 = oS.b - Ej- oS.labelMat[i]*(oS.alphas[i]-alphaIold)*oS.X[i,:]*oS.X[j,:].T - oS.labelMat[j]*(oS.alphas[j]-alphaJold)*oS.X[j,:]*oS.X[j,:].T
if (0 < oS.alphas[i]) and (oS.C > oS.alphas[i]): oS.b = b1
elif (0 < oS.alphas[j]) and (oS.C > oS.alphas[j]): oS.b = b2
else: oS.b = (b1 + b2)/2.0
return 1
else: return 0
def smoPK(dataMatIn, classLabels, C, toler, maxIter): #full Platt SMO
oS = optStructK(mat(dataMatIn),mat(classLabels).transpose(),C,toler)
iter = 0
entireSet = True; alphaPairsChanged = 0
while (iter < maxIter) and ((alphaPairsChanged > 0) or (entireSet)):
alphaPairsChanged = 0
if entireSet: #go over all
for i in range(oS.m):
alphaPairsChanged += innerLK(i,oS)
# print("fullSet, iter: %d i:%d, pairs changed %d" % (iter,i,alphaPairsChanged))
iter += 1
else:#go over non-bound (railed) alphas
nonBoundIs = nonzero((oS.alphas.A > 0) * (oS.alphas.A < C))[0]
for i in nonBoundIs:
alphaPairsChanged += innerLK(i,oS)
# print("non-bound, iter: %d i:%d, pairs changed %d" % (iter,i,alphaPairsChanged))
iter += 1
if entireSet: entireSet = False #toggle entire set loop
elif (alphaPairsChanged == 0): entireSet = True
# print("iteration number: %d" % iter)
return oS.b,oS.alphas
def calcWs(alphas,dataArr,classLabels):
X = mat(dataArr); labelMat = mat(classLabels).transpose()
m,n = shape(X)
w = zeros((n,1))
for i in range(m):
w += multiply(alphas[i]*labelMat[i],X[i,:].T)
return w
def plot_fit(fit_line, datamatrix, labelmatrix):
import matplotlib.pyplot as plt
import numpy as np
weights = fit_line
dataarray = np.asarray(datamatrix)
n = dataarray.shape[0]
# Keep track of the two classes in different arrays so they can be plotted later...
xcord1 = []
ycord1 = []
xcord2 = []
ycord2 = []
for i in range(n):
if int(labelmatrix[i]) == 1:
xcord1.append(dataarray[i, 0])
ycord1.append(dataarray[i, 1])
else:
xcord2.append(dataarray[i, 0])
ycord2.append(dataarray[i, 1])
fig = plt.figure()
# Plot the data as points with different colours
ax = fig.add_subplot(111)
ax.scatter(xcord1, ycord1, s=30, c='red', marker='s')
ax.scatter(xcord2, ycord2, s=30, c='green')
# Plot the best-fit line
x = np.arange(-2.0, 7.5, 0.1)
y = (-weights[0] - weights[1] * x) / weights[2]
ax.plot(x, y)
plt.fill_between(x,y,y.min()-1,color='red',alpha=0.50)
plt.fill_between(x,y,y.max()+1,color='blue',alpha=0.30)
plt.xlim(-1.5,7)
plt.ylim(-1.5,7)
plt.xlabel('x1')
plt.ylabel('x2')
plt.show()
def plot_withoutline(datamatrix, labelmatrix):
import matplotlib.pyplot as plt
import numpy as np
dataarray = np.asarray(datamatrix)
n = dataarray.shape[0]
# Keep track of the two classes in different arrays so they can be plotted later...
xcord1 = []
ycord1 = []
xcord2 = []
ycord2 = []
for i in range(n):
if int(labelmatrix[i]) == 1:
xcord1.append(dataarray[i, 0])
ycord1.append(dataarray[i, 1])
else:
xcord2.append(dataarray[i, 0])
ycord2.append(dataarray[i, 1])
fig = plt.figure()
# Plot the data as points with different colours
ax = fig.add_subplot(111)
ax.scatter(xcord1, ycord1, s=30, c='red', marker='s')
ax.scatter(xcord2, ycord2, s=30, c='green')
plt.xlim(-1.5,7)
plt.ylim(-1.5,7)
plt.xlabel('x1')
plt.ylabel('x2')
plt.show()
class svm_basic(classifier):
def __init__(self):
self.C = 1
self.toler = 0.001
self.maxIter = 50
self.ws = None
self.b = None
self.listweights = []
def fit(self, X, Y):
self.b, alphas = smoPK(X, Y, 0.6, 0.001, 40)
self.ws = calcWs(alphas, X, Y)
# listweights = []
self.listweights.append(self.b.getA()[0])
self.listweights.append(self.ws[0][0])
self.listweights.append(self.ws[1][0])
# listweights
plot_fit(self.listweights, X, Y)
return self.listweights
def predict(self, X):
hypnew = []
dataMat = mat(X)
hyp = dataMat*mat(self.ws) + self.b
for i in range (len(X)):
if hyp[i] > 0:
hypnew.append(1)
else:
hypnew.append(-1)
return hypnew
if __name__ == '__main__':
X, Y = loadDataSet('linearly_separable.csv')
split = int(len(Y) * 0.8)
train_X = X[:split]
train_Y = Y[:split]
test_X = X[split:]
test_Y = Y[split:]
svm = svm_basic()
listweights = svm.fit(train_X, train_Y)
plot_withoutline(train_X, train_Y)
| [
"pjpatel3@dons.usfca.edu"
] | pjpatel3@dons.usfca.edu |
0a6a59073b7043bda4ed6a38ceee5501721c11b1 | db6533cae5a58becf3163d750cd890c73035d0c5 | /set_mark/link.py | cc0f91e2b4be47254492f099864a57c07bc33132 | [
"BSD-3-Clause"
] | permissive | goranmabi/openNAMU | 7e076f18279614a69a7969e22cf3b9fa31605cb5 | 1c0781cb6034040032122df2514e6d8baecc6120 | refs/heads/master | 2021-05-16T16:15:14.076942 | 2018-02-01T10:04:09 | 2018-02-01T10:04:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,698 | py | import sqlite3
import re
from urllib import parse
import hashlib
def url_pas(data):
return parse.quote(data).replace('/','%2F')
def sha224(data):
return hashlib.sha224(bytes(data, 'utf-8')).hexdigest()
def link(conn, title, data, num, category, backlink):
curs = conn.cursor()
data = data.replace('\', '\\')
m = re.findall("\[\[(분류:(?:(?:(?!\]\]|#).)+))((?:#(?:(?:(?!#|\]\]).)+))+)?\]\]", data)
for g in m:
if title != g[0]:
if num == 1:
backlink += [[title, g[0], 'cat']]
curs.execute("select title from data where title = ?", [g[0]])
if curs.fetchall():
red = ""
else:
red = 'class="not_thing"'
if(category != ''):
category += ' / '
style = ''
if g[1]:
if re.search('#blur', g[1]):
style = ' style="filter: blur(3px);" onmouseover="this.style.filter=\'none\';" onmouseout="this.style.filter=\'blur(3px)\';"'
category += '<a ' + red + ' ' + style + ' href="/w/' + url_pas(g[0]) + '">' + re.sub("분류:", "", g[0]) + '</a>'
data = re.sub("\[\[(분류:(?:(?:(?!\]\]|#).)+))((?:#(?:(?:(?!#|\]\]).)+))+)?\]\]", '', data, 1)
test = re.findall('\[\[wiki:([^|\]]+)(?:\|([^\]]+))?\]\]', data)
for wiki in test:
if wiki[1]:
out = wiki[1]
else:
out = wiki[0]
data = re.sub('\[\[wiki:([^|\]]+)(?:\|([^\]]+))?\]\]', '<a id="inside" href="/' + wiki[0] + '">' + out + '</a>', data, 1)
test = re.findall('\[\[inter:([^:]+):((?:(?!\||]]).)+)(?:\|([^\]]+))?]]', data)
for wiki in test:
curs.execute('select link from inter where title = ?', [wiki[0]])
inter = curs.fetchall()
if not inter:
data = re.sub('\[\[inter:([^:]+):((?:(?!\||]]).)+)(?:\|([^\]]+))?]]', '인터위키 정보 없음', data, 1)
else:
if wiki[2]:
out = wiki[0] + ':' + wiki[2]
else:
out = wiki[0] + ':' + wiki[1]
data = re.sub('\[\[inter:([^:]+):((?:(?!\||]]).)+)(?:\|([^\]]+))?]]', '<a id="inside" href="' + inter[0][0] + wiki[1] + '">' + out + '</a>', data, 1)
data = re.sub("\[\[(?::(?P<in>(?:분류|파일):(?:(?:(?!\]\]).)*)))\]\]", "[[\g<in>]]", data)
a = re.findall('\[\[\.\.\/(\|(?:(?!]]).)+)?]]', data)
for i in a:
b = re.search('(.*)\/', title)
if b:
m = b.groups()
if i:
data = re.sub('\[\[\.\.\/(\|((?!]]).)+)?]]', '[[' + m[0] + i + ']]', data, 1)
else:
data = re.sub('\[\[\.\.\/(\|((?!]]).)+)?]]', '[[' + m[0] + ']]', data, 1)
else:
if i:
data = re.sub('\[\[\.\.\/(\|((?!]]).)+)?]]', '[[' + title + i + ']]', data, 1)
else:
data = re.sub('\[\[\.\.\/(\|((?!]]).)+)?]]', '[[' + title + ']]', data, 1)
data = re.sub('\[\[(?P<in>\/(?:(?!]]|\|).)+)(?P<out>\|(?:(?:(?!]]).)+))?]]', '[[' + title + '\g<in>\g<out>]]', data)
link = re.compile('\[\[((?:(?!\[\[|\]\]|\|).)*)(?:\|((?:(?!\[\[|\]\]).)*))?\]\]')
while 1:
l_d = link.search(data)
if l_d:
d = l_d.groups()
if re.search('^(?:파일|외부):', d[0]):
width = ''
height = ''
align = ''
span = ['', '']
try:
w_d = re.search('width=([0-9]+(?:[a-z%]+)?)', d[1])
if w_d:
width = 'width="' + w_d.groups()[0] + '" '
h_d = re.search('height=([0-9]+(?:[a-z%]+)?)', d[1])
if h_d:
height = 'height="' + h_d.groups()[0] + '" '
a_d = re.search('align=(center|right)', d[1])
if a_d:
span[0] = '<span style="display: block; text-align: ' + a_d.groups()[0] + ';">'
span[1] = '</span>'
except:
pass
f_d = re.search('^파일:([^.]+)\.(.+)$', d[0])
if f_d:
if not re.search("^파일:([^\n]*)", title):
if num == 1:
backlink += [[title, d[0], 'file']]
file_name = f_d.groups()
curs.execute("select title from data where title = ?", ['파일:' + file_name[0] + '.' + file_name[1]])
if not curs.fetchall():
img = '<a class="not_thing" href="/w/' + url_pas('파일:' + file_name[0] + '.' + file_name[1]) + '">파일:' + file_name[0] + '.' + file_name[1] + '</a>'
else:
img = span[0] + '<img src="/image/' + sha224(file_name[0]) + '.' + file_name[1] + '" ' + width + height + '>' + span[1]
data = link.sub(img, data, 1)
else:
img = span[0] + '<img src="' + re.sub('^외부:', '', d[0]) + '" ' + width + height + '>' + span[1]
data = link.sub(img, data, 1)
elif re.search('^https?:\/\/', re.sub('<([^>]*)>', '', d[0])):
view = d[0]
try:
if re.search('(.+)', d[1]):
view = d[1]
except:
pass
data = link.sub('<a class="out_link" rel="nofollow" href="' + re.sub('<([^>]*)>', '', d[0]) + '">' + view + '</a>', data, 1)
else:
view = d[0].replace('\\\\', '<slash>').replace('\\', '').replace('<slash>', '\\')
try:
if re.search('(.+)', d[1]):
view = d[1].replace('\\\\', '<slash>').replace('\\', '').replace('<slash>', '\\')
except:
pass
sh = ''
s_d = re.search('#((?:(?!x27;|#).)+)$', d[0])
if s_d:
href = re.sub('#((?:(?!x27;|#).)+)$', '', d[0])
sh = '#' + s_d.groups()[0]
else:
href = d[0]
if d[0] == title:
data = link.sub('<b>' + view + '</b>', data, 1)
elif re.search('^#', d[0]):
data = link.sub('<a title="' + sh + '" href="' + sh + '">' + view + '</a>', data, 1)
else:
a = re.sub('<([^>]*)>', '', href.replace(''', "'").replace('"', '"').replace('\\\\', '<slash>').replace('\\', '').replace('<slash>', '\\'))
if num == 1:
backlink += [[title, a, '']]
curs.execute("select title from data where title = ?", [a])
if not curs.fetchall():
no = 'class="not_thing"'
if num == 1:
backlink += [[title, a, 'no']]
else:
no = ''
data = link.sub('<a ' + no + ' title="' + re.sub('<([^>]*)>', '', href) + sh + '" href="/w/' + url_pas(a) + sh + '">' + view.replace('\\', '\\\\') + '</a>', data, 1)
else:
break
data = data.replace('\\', '\')
return [data, category, backlink] | [
"min08101@naver.com"
] | min08101@naver.com |
f306020b90d5c15f9e8481d52d2d5a2b44fd8e51 | 6267a0052665bbbce937d667b19192b1fe13f8d0 | /moshi_login/urls.py | 99131ebc2c04cb95013efad56e22a54109ba4f62 | [] | no_license | 21nupur/XHPLoginbackend | 7d52caf8dd21614d478510806cdfd9ae2a95351e | bce0a1ef9f677504b48b95307f0ccc5b263c3714 | refs/heads/master | 2022-12-17T09:10:46.769308 | 2019-05-26T15:32:15 | 2019-05-26T15:32:15 | 188,698,106 | 0 | 0 | null | 2022-12-08T05:10:19 | 2019-05-26T15:09:50 | Python | UTF-8 | Python | false | false | 1,493 | py | """moshi_login URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.conf.urls import url
from django.urls import path
from rest_framework_swagger.views import get_swagger_view
from . import views
schema_view_swagger = get_swagger_view(title='Moshi Login')
from customuser import views as custom_user_views
urlpatterns = [
path('admin/', admin.site.urls),
url(r'^$', schema_view_swagger),
url(r'^api/v1/token/$', views.TokenAccessView.as_view(), name='token_obtain_pair'),
url(r'^api/v1/token/refresh/$', views.RefreshTokenView.as_view(), name='token_refresh'),
url(r'^api/v1/token/verify/$', views.VerifyTokenView.as_view(), name='token_verify'),
path('api/v1/secured', views.SecuredView.as_view()),
path('api/v1/user/register', custom_user_views.RegisterUser.as_view()),
path('api/v1/user/details', custom_user_views.UserDetailsView.as_view())
]
| [
"1996nupurpandey@gmail.com"
] | 1996nupurpandey@gmail.com |
3efa40b1568ac779495027a89b5b37e1c9ac8094 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_200/3303.py | 9127ca7f11dd01a24730d2a21ab3e5fad553dcc5 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 820 | py | def parse_input(str):
str_first_val = str.split()
real_digits = [ int(c) for c in str_first_val[0] ]
return real_digits
def solve(test):
big_number = parse_input(test)
num_of_digits = len(big_number)
index_of_max_incrising_digit = 0
for digit_ind in range(0,num_of_digits):
if( big_number[digit_ind] > big_number[index_of_max_incrising_digit] ):
index_of_max_incrising_digit = digit_ind;
elif ( big_number[digit_ind] < big_number[index_of_max_incrising_digit] ):
big_number[index_of_max_incrising_digit] -= 1
for digit_ind_in_change in range(index_of_max_incrising_digit+1,num_of_digits):
big_number[digit_ind_in_change] = 9
break
num_in_str = ''.join(map(str,big_number))
if( num_in_str[0] == '0'):
num_in_str = num_in_str[1:]
return num_in_str | [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
b2d852225e0eee9726acf98f89c5e40fa6649640 | 6320b4daf99fc88a7200eaa1602502e1345f4042 | /Python/DataStruct/Tree/B_Tree.py | 9b28c2947d83250e97e67d24037f1c9a18f1b1e1 | [] | no_license | tbfungeek/algorithm | d7c1fa4a3434dee4c16476c59b7320ca57a14695 | 53567f1200978bc4e464216db8c7e6ef01468c57 | refs/heads/master | 2022-06-16T23:17:30.337540 | 2022-05-28T15:55:24 | 2022-05-28T15:55:24 | 244,155,282 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,124 | py | class BTreeNode(object):
def __init__(self,capacity = 0, is_leaf = True):
self.__capacity = capacity
self.__is_leaf = is_leaf
self.__index = 0
self.__keys = [0] * self.__capacity
self.__children = [None] * (self.__capacity + 1)
@property
def capacity(self):
return self.__capacity
@capacity.setter
def capacity(self, capacity):
self.__capacity = capacity
@property
def is_leaf(self):
return self.__is_leaf
@is_leaf.setter
def is_leaf(self, is_leaf):
self.__is_leaf = is_leaf
@property
def keys(self):
return self.__keys
@keys.setter
def keys(self,keys):
self.__keys = keys
@property
def children(self):
return self.__children
@children.setter
def children(self, children):
self.__children = children
@property
def index(self):
return self.__index
@index.setter
def index(self,index):
self.__index = index
def is_full_node(self):
return self.index == self.__capacity
@staticmethod
def create_node(key_max,is_leaf = False):
return BTreeNode(key_max, is_leaf)
class BTree(object):
def __init__(self, node_min_count = 3):
"""
可以参照2-3树的定义理解:
结点要么为空要么是如下两种结点
2节点: 该节点保存1个key,以及两个指向左右节点的节点
3节点: 该节点保存2个key,以及三个指向左右节点的节点
"""
self.__root: BTreeNode = None
# B 树的最小度数
self.M = node_min_count
# 结点包含的最多关键字个数
self.KEY_MAX = 2 * self.M - 1
# 结点包含的最少关键字个数
self.KEY_MIN = self.M - 1
# 子结点的最大个数
self.CHILD_MAX = self.KEY_MAX + 1
# 子结点的最小个数
self.CHILD_MIN = self.KEY_MIN + 1
print("")
@property
def root(self):
return self.__root
@root.setter
def root(self, root):
self.__root = root
def __generate_node(self,is_leaf = True):
return BTreeNode.create_node(self.KEY_MAX,is_leaf)
def insert(self, key):
#已经存在的情况下就不执行插入操作了
if self.contain(key):
return
else:
#如果是空树的情况下进行插入,新生成一个结点作为根结点
if self.root is None:
node = self.__generate_node()
self.root = node
#如果根结点满的情况,先扩展结点
elif self.root.is_full_node():
new_node = self.__generate_node(False)
#将根结点作为新结点的第一个子结点
new_node.children[0] = self.root
#__split_node 这里将根结点拆分出一个右结点,作为new_node的右边结点
self.__split_node(new_node,0,self.root)
#新的结点作为新的根结点
self.root = new_node
#统一执行插入操作
self.__insert(self.root,key)
def contain(self, key):
return self.__search(self.root,key)
def __search(self,node:BTreeNode,key):
#如果根结点是空的则直接返回
if node is None:
return False
else:
i = 0
#遍历索引结点
while i < node.index and key > node.keys[i]:
i += 1
#在当前结点找到
if i < node.index and key == node.keys[i]:
return True
else:
#如果没找到,并且已经是叶子结点了,没法继续查找所以直接返回False结果
if node.is_leaf:
return False
else:
#如果不是叶子结点说明还可以继续往下找,则递归寻找
return self.__search(node.children[i],key)
def __split_node(self,result_node,splite_from,orginal_node):
#新生成一个和orginal_node类型一样的右结点
right_node = self.__generate_node(orginal_node.is_leaf)
right_node.index = self.KEY_MIN
#将原始结点超出的部分放置到右结点上
for i in range(self.KEY_MIN):
right_node.keys[i] = orginal_node.keys[i + self.CHILD_MIN]
#如果待分裂的结点不是叶子结点那么还需要处理子结点的拷贝
if not orginal_node.is_leaf:
for i in range(self.CHILD_MIN):
right_node.children[i] = orginal_node.children[i + self.CHILD_MIN]
#将index设置为满的状态
orginal_node.index = self.KEY_MIN
#将result结点从splite_from开始向后移动结点
for i in range(splite_from, result_node.index):
j = result_node.index + splite_from - i
result_node.children[j+1] = result_node.children[j]
result_node.keys[j] = result_node.keys[j-1]
#将right_node 和 orginal_node 作为 result_node 的子结点
result_node.children[splite_from + 1] = right_node
result_node.keys[splite_from] = orginal_node.keys[self.KEY_MIN]
result_node.index += 1
def __insert(self, node:BTreeNode, key):
# 获取结点内关键字个数
i = node.index
# 如果node是叶子结点
if node.is_leaf == True:
# 从后往前 查找关键字的插入位置
while i > 0 and key < node.keys[i - 1]:
# 向后移位
node.keys[i] = node.keys[i - 1]
i -= 1
# 插入关键字的值
node.keys[i] = key
# 更新结点关键字的个数
node.index += 1
# node是非叶子结点
else:
# 从后往前 查找关键字的插入的子树
while i > 0 and key < node.keys[i - 1]:
i -= 1
# 目标子树结点指针
target_child = node.children[i]
if target_child is None:
return
# 子树结点已经满了
if target_child.is_full_node():
# 分裂子树结点
self.__split_node(node, i, target_child)
# 确定目标子树
if key > node.keys[i]:
target_child = node.children[i + 1]
# 插入关键字到目标子树结点
self.__insert(target_child, key)
def delete(self,key):
#关键字不存在的情况下直接返回
if not self.contain(key):
return
#只有一个结点的情况下
if self.root.index == 1:
#根结点是叶子结点
if self.root.is_leaf:
self.destroy()
else:
#根结点下面还有下一层次,需要处理下个层次的关系
left_child = self.root.children[0]
right_child = self.root.children[1]
if left_child.index == self.KEY_MIN and right_child.index == self.KEY_MIN:
self.__merge_child(self.root,0)
self.__delete_node(self.root)
self.root = left_child
self.__recursive_remove(self.root,key)
def __merge_child(self,node:BTreeNode,index):
child1 = node.children[index]
child2 = node.children[index + 1]
#将index + 1 结点合并到 index 结点
child1.index = self.KEY_MAX
#将父结点到index值下移到child1
child1.keys[self.KEY_MIN] = node.keys[index]
#将child2 key添加到上面的结点之后
for i in range(self.KEY_MIN):
child1.keys[self.KEY_MIN + i + 1] = child2.keys[i]
#拷贝子结点
if not child1.is_leaf:
for i in range(self.CHILD_MIN):
child1.children[i + self.CHILD_MIN] = child2.children[i]
#删除child在父结点的数据
node.index -= 1
for i in range(index,node.index):
node.keys[i] = node.keys[i+1]
node.children[i+1]= node.children[i + 2]
#删除child2 结点
self.__delete_node(child2)
def __delete_node(self,node:BTreeNode):
if node is not None:
node = None
def __recursive_remove(self, node:BTreeNode, key):
#在node层查找
i = 0
while i < node.index and key > node.keys[i]:
i += 1
if i < node.index and key == node.keys[i]:
#在node层找到了
if node.is_leaf:
#node是叶子结点,从node中删除
for j in range(i, node.index):
node.keys[j] = node.keys[j + 1]
return
else:
#node是内结点
# 结点node中位于key之前的结点
child_prev = node.children[i]
# 结点node中位于key之后的结点
child_next = node.children[i + 1]
if child_prev.index >= self.CHILD_MIN:
# 获取key的前驱关键字
prev_key = self.predecessor(child_prev)
self.__recursive_remove(child_prev,prev_key)
# 替换成key的前驱关键字
node.keys[i] = prev_key
return
# 结点pChildNext中至少包含CHILD_MIN个关键字
elif child_next.index >= self.CHILD_MIN:
# 获取key的后继关键字
next_key = self.successor(child_next)
self.__recursive_remove(child_next,next_key)
# 替换成key的后继关键字
node.keys[i] = next_key
return
# 结点pChildPrev和pChildNext中都只包含CHILD_MIN-1个关键字
else:
self.__merge_child(node,i)
self.__recursive_remove(child_prev,key)
else:
#在node层没找到
child = node.children[i]
if child.index == self.KEY_MAX:
# 左兄弟结点
left = None
# 右兄弟结点
right = None
# 左兄弟结点
if i > 0:
left = node.children[i - 1]
# 右兄弟结点
if i < node.index:
right = node.children[i + 1]
j = 0
if left is not None and left.index >= self.CHILD_MIN:
# 父结点中i-1的关键字下移至pChildNode中
for j in range(child.index):
k = child.index - j
child.keys[k] = child.keys[k - 1]
child.keys[0] = node.keys[i - 1]
if not left.is_leaf:
for j in range(child.index + 1):
k = child.index + 1 - j
child.children[k] = child.children[k - 1]
child.children[0] = left.children[left.index]
child.index += 1
node.keys[i] = left.keys[left.index - 1]
left.index -= 1
# 右兄弟结点至少有CHILD_MIN个关键字
elif right is not None and right.index >= self.CHILD_MIN:
# 父结点中的i的关键字下移到child结点
child.keys[child.index] = node.keys[i]
child.index += 1
#right结点中最小的关键字上升到node中
node.keys[i] = right.keys[0]
right.index -= 1
for j in range(right.index):
right.keys[j] = right.keys[j + 1]
if not right.is_leaf:
child.children[child.index] = right.children[0]
for j in range(right.index):
right.children[j] = right.children[j + 1]
# 左右兄弟结点都只包含CHILD_MIN-1个结点
elif left is not None:
self.__merge_child(node, i - 1)
child = left
# 与右兄弟合并
elif right is not None:
self.__merge_child(node, i)
self.__recursive_remove(child,key)
def destroy(self):
self.__recursive_clear(self.root)
self.root = None
def __recursive_clear(self, node:BTreeNode):
#如果结点是空的则不做任何处理
if node is None:
return
#如果结点不是叶子结点
if not node.is_leaf:
#对每个子结点进行遍历删除
for i in range(node.index):
self.__recursive_clear(node.children[i])
#删除某个结点
self.__delete_node(node)
def predecessor(self, node: BTreeNode):
while not node.is_leaf:
node = node.children[node.index]
return node.keys[node.index - 1]
def successor(self, node: BTreeNode):
while not node.is_leaf:
node = node.children[0]
return node.keys[0]
if __name__ == "__main__":
tree = BTree(3)
tree.insert(11)
tree.insert(3)
tree.insert(1)
tree.insert(4)
tree.insert(33)
tree.insert(13)
tree.insert(63)
tree.insert(43)
tree.insert(2)
tree.insert(123)
tree.insert(55)
tree.destroy()
tree = BTree(2)
tree.insert(11)
tree.insert(3)
tree.insert(1)
tree.insert(4)
tree.insert(33)
tree.insert(13)
tree.insert(63)
tree.insert(43)
tree.insert(2)
tree.delete(1)
tree.delete(2)
tree.delete(3)
tree.destroy()
| [
"tbfungeek@163.com"
] | tbfungeek@163.com |
fe11363128107797faa237d1e756beda980fde50 | ae3361e515ae92752a152e7d2fc2c3bc45e03347 | /djautotask/migrations/0007_auto_20190917_1009.py | 0965206238e3ecdf9dfce1dcf45921dc3980942f | [
"MIT"
] | permissive | KerkhoffTechnologies/django-autotask | eff78c0a17c0fd855be4cc5df3cc3a2798571edc | ca9d49e1eb8e3dd99f0c6f91674a0e70d27dfaa1 | refs/heads/master | 2023-07-25T07:29:51.959050 | 2023-07-14T18:09:23 | 2023-07-14T18:09:23 | 134,658,648 | 7 | 7 | MIT | 2023-09-11T22:04:31 | 2018-05-24T03:56:14 | Python | UTF-8 | Python | false | false | 1,640 | py | # Generated by Django 2.1.11 on 2019-09-17 10:09
from django.db import migrations, models
import django.db.models.deletion
import django_extensions.db.fields
class Migration(migrations.Migration):
dependencies = [
('djautotask', '0006_auto_20190916_1645'),
]
operations = [
migrations.CreateModel(
name='Queue',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('value', models.CharField(blank=True, max_length=50, null=True)),
('label', models.CharField(blank=True, max_length=50, null=True)),
('is_default_value', models.BooleanField(default=False)),
('sort_order', models.PositiveSmallIntegerField(blank=True, null=True)),
('parent_value', models.CharField(blank=True, max_length=20, null=True)),
('is_active', models.BooleanField(default=False)),
('is_system', models.BooleanField(default=False)),
],
options={
'verbose_name_plural': 'Queues',
},
),
migrations.AddField(
model_name='ticket',
name='queue',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='djautotask.Queue'),
),
]
| [
"cameron@craftypenguins.net"
] | cameron@craftypenguins.net |
9d5f726e7f02e1e59d2149457b0addd03be9b23d | 77f4c8327a69780fd77575d9c7a59452c07f68f0 | /awesome_web/wsgi.py | f794a0c0d081a22e096ed8b7369bfa18e4f99a42 | [
"Apache-2.0"
] | permissive | WuQianyong/awesome_web | 7e8f46e0b5fff1f0ef1d0f1a3487afe454361b48 | 70438cc78fbc38c7173c30cdfd9f171ba16c6470 | refs/heads/master | 2021-05-16T00:16:10.634721 | 2017-10-11T09:45:31 | 2017-10-11T09:45:31 | 106,401,204 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 400 | py | """
WSGI config for awesome_web project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "awesome_web.settings")
application = get_wsgi_application()
| [
"wuqianyong1995@163.com"
] | wuqianyong1995@163.com |
4aa9f64fbfab6e8cd56acc0886dcc881f4b7a5c5 | bffb3cc4694941a278f4670af554c59872421f36 | /cyshg/views.py | 6d9d90357d9f5bac48a674d2dcebcb97e35cab72 | [] | no_license | penglian518/Aqua-mer | f26c34ea95bd67ebf99ceb0465f95e9e9730dc7a | 80c7188d72d7f813fe7129be08854601094ca6a5 | refs/heads/master | 2021-04-28T11:01:15.288723 | 2020-03-26T17:50:51 | 2020-03-26T17:50:51 | 122,079,980 | 0 | 1 | null | 2018-04-18T19:40:00 | 2018-02-19T15:13:59 | Python | UTF-8 | Python | false | false | 783 | py | from django.shortcuts import render, redirect, get_object_or_404
from django.http import HttpResponse, Http404
from django.core.exceptions import ObjectDoesNotExist
import json
from scripts.VistorStatistics import clientStatistics
from .models import StatisticsData
# Create your views here.
def index(request):
clientStatistics(request)
numVist = StatisticsData.objects.count()
return render(request, 'index.html', {'numVist': numVist})
def faq(request):
clientStatistics(request)
return render(request, 'faq.html')
# function for ajax query
def query_statistics(request):
#clientStatistics(request)
response_dict = {'success': True}
response_dict['numVist'] = StatisticsData.objects.count()
return HttpResponse(json.dumps(response_dict))
| [
"penglian518@foxmail.com"
] | penglian518@foxmail.com |
a98d5fa157114917f552657e302bbe47cba8d684 | dcec651059c0bddbcd88264e53f6a9ba979cb598 | /Arithmetic/inverse_modular.py | bd4ccaabce253630d2731dcafb83bfd89ba72897 | [] | no_license | Kestrel67/Python-Libraries | 4c0b94b8d18282c3827ec3ccc26d76305ab4ac6c | 02dad1791bddbaac01f98b33fbd31b8d70035a60 | refs/heads/master | 2021-01-22T11:51:52.255304 | 2014-11-16T21:58:04 | 2014-11-16T21:58:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,247 | py | from euclide import gcd
# decorator
def coprimes(func):
def funcp(a, b):
if gcd(a, b) != 1:
raise Exception('a et b ne sont pas premiers entre eux')
return func(a, b)
return funcp
# equa diophantienne
# dioph(247825482538725487253872458725387254782537, 24785245872358725487253487258735872582537)
@coprimes
def dioph(a, b):
q, r = divmod(a, b)
#print("{} = {} * {} + {}".format(a, b, q, r))
if r == 1:
return (1, -q)
else:
alpha, beta = dioph(b, r)
return (beta, alpha - beta * q)
# find c as n * a = 1 (mod p)
def modinvrec(n, p):
a, q = dioph(n, p)
if a <= 0:
raise ValueError
return a
# dioph
def extended_gcd(aa, bb):
lastremainder, remainder = abs(aa), abs(bb)
x, lastx, y, lasty = 0, 1, 1, 0
while remainder:
lastremainder, (quotient, remainder) = remainder, divmod(lastremainder, remainder)
x, lastx = lastx - quotient*x, x
y, lasty = lasty - quotient*y, y
return lastremainder, lastx * (-1 if aa < 0 else 1), lasty * (-1 if bb < 0 else 1)
# modinv(a, b) = x / x * a congrus à 1 (mod b)
def modinv(a, m):
g, x, y = extended_gcd(a, m)
if g != 1:
raise ValueError
return x % m
| [
"lucas.dietrich.pro@gmail.com"
] | lucas.dietrich.pro@gmail.com |
5e34d8613ea0c7bde87d6a8f970a0f0ef996335c | aa523547194d2b91ffda55ade56ba4a319b6e909 | /server.py | c9cedfd4580b55972ea7bcc49492e52b93e3d95d | [] | no_license | barbuza/delay | 78f65ca24d0e0c1f8d1b31d9d5d2523948f69cf5 | 5cb187dbed50bd0620e8f5ddf863069b1699a92b | refs/heads/master | 2020-05-19T20:14:50.222792 | 2015-07-20T20:13:12 | 2015-07-20T20:13:12 | 39,405,754 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,875 | py | #!./venv/bin/python
import json
import asyncio
import psycopg2
import click
from aiohttp import web
from data import Store, Entity
class JsonEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, Entity):
return dict(o.data, **{'id': o.id})
return super(JsonEncoder, self).default(o)
@asyncio.coroutine
def fetch_handler(store, request):
body = yield from request.content.read()
try:
payload = json.loads(body.decode('utf-8'))
depth = payload.get('depth', 1)
if not isinstance(depth, int):
raise ValueError("depth isn't int")
follow = payload.get('follow', [])
if not isinstance(follow, list):
raise ValueError("follow isn't a list")
query = payload.get('query', [])
if not isinstance(query, list) and not isinstance(query, str):
raise ValueError("query isn't an array or string")
except ValueError as err:
return web.Response(status=400, body=str(err).encode('utf-8'))
try:
data = yield from store.fetch_list(query, follow, depth)
except psycopg2.ProgrammingError as err:
return web.Response(body=err.diag.message_primary.encode('utf-8'), status=400)
return web.Response(body=json.dumps(data, cls=JsonEncoder).encode('utf-8'),
content_type='application/json')
@asyncio.coroutine
def persist_handler(store, request):
body = yield from request.content.read()
try:
payload = json.loads(body.decode('utf-8'))
id = payload.pop('id', None)
if id is not None and not isinstance(id, int):
raise ValueError()
except ValueError:
return web.Response(status=400)
data = yield from store.persist(id, payload)
return web.Response(body=json.dumps(data, cls=JsonEncoder).encode('utf-8'),
content_type='application/json')
@asyncio.coroutine
def init(loop, store, port):
app = web.Application(loop=loop)
app.router.add_route('POST', '/fetch', lambda request: fetch_handler(store, request))
app.router.add_route('POST', '/persist', lambda request: persist_handler(store, request))
srv = yield from loop.create_server(app.make_handler(), '127.0.0.1', port)
return srv
@click.command()
@click.option('--db', default='delay', help='postgres db')
@click.option('--user', default='delay', help='postgres user')
@click.option('--port', default=8080, help='http port')
def main(db, user, port):
loop = asyncio.get_event_loop()
store = Store(loop=loop)
loop.run_until_complete(store.connect('dbname={} user={}'.format(db, user)))
loop.run_until_complete(store.update_schema())
loop.run_until_complete(init(loop, store, port))
try:
loop.run_forever()
except KeyboardInterrupt:
pass
if __name__ == "__main__":
main()
| [
"barbuzaster@gmail.com"
] | barbuzaster@gmail.com |
d6f1e3ab4ff88ca2a4b553c4d4d48afae1da0610 | c572b70737bd5ec30cc2e31b29c8906a8de03544 | /divide_conquer_quick_sort.py | 35f2d3610f8d336819bd5d84f9cdcfce522e3d76 | [
"MIT"
] | permissive | rotnozeerin/algorithms-python | 072a1d44d75ca87b139ae1b367ad1dcfe78997ae | 75e62e510163a7cb2c877db4a8a6fc329c4fee4c | refs/heads/master | 2023-03-19T04:15:19.261253 | 2017-06-17T07:59:12 | 2017-06-17T07:59:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,046 | py |
#takes O(n^2) time
def quick_sort_slower(array):
if(len(array) < 2):
return array
pivot = array[0]
pivot_left_array = [i for i in array[1:] if i<= pivot]
pivot_right_array = [i for i in array[1:] if i> pivot]
return quick_sort_slower(pivot_left_array) + [pivot] + quick_sort_slower(pivot_right_array)
print(quick_sort_slower([2,4,1,5,7,0])) #[0, 1, 2, 4, 5, 7]
#takes O(nlogn) time
def quick_sort_faster(array):
if(len(array)<2):
return array
pivot_index = len(array) // 2
pivot = array[pivot_index]
pivot_left_array = [i for i in array[:pivot_index] if i<=pivot]
pivot_left_array = pivot_left_array + [i for i in array[pivot_index+1:] if i<= pivot]
pivot_right_array = [i for i in array[:pivot_index] if i> pivot]
pivot_right_array = pivot_right_array + [i for i in array[pivot_index+1:] if i> pivot]
return quick_sort_faster(pivot_left_array) + [pivot] + quick_sort_faster(pivot_right_array)
print(quick_sort_faster([-1, 4, 5, 3, 2, -4])) #[-4, -1, 2, 3, 4, 5] | [
"arafat.mahmud@revesoft.com"
] | arafat.mahmud@revesoft.com |
e7c04ec2cf024157d985c805cf4d4068468f9938 | 19ee165c252970294333e203728020cdcae550b3 | /agc018/agc018_a/20200210103816.py | ab764993bcef7aa989b4543b9e7a8b7a477f7530 | [] | no_license | autumncolors11/atc_submits | 4528c700e488d530f9cdde3a4198f36b30c3d35e | 6f9689b6d7de45fd4e44ad118e4e3531bb8dac4d | refs/heads/master | 2022-11-28T17:59:55.750896 | 2020-08-09T14:10:05 | 2020-08-09T14:10:05 | 258,122,955 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,437 | py | import sys
sys.setrecursionlimit(10**6)
from math import floor,ceil,sqrt,factorial,log
from heapq import heappop, heappush, heappushpop
from collections import Counter,defaultdict,deque
from itertools import accumulate,permutations,combinations,product,combinations_with_replacement
from bisect import bisect_left,bisect_right
from copy import deepcopy
from operator import itemgetter
from fractions import gcd
from functools import reduce
mod = 10 ** 9 + 7
#整数input
def ii(): return int(sys.stdin.readline().rstrip()) #int(input())
def mii(): return map(int,sys.stdin.readline().rstrip().split())
def limii(): return list(mii()) #list(map(int,input().split()))
def lin(n:int): return [ii() for _ in range(n)]
def llint(n: int): return [limii() for _ in range(n)]
#文字列input
def ss(): return sys.stdin.readline().rstrip() #input()
def mss(): return sys.stdin.readline().rstrip().split()
def limss(): return list(mss()) #list(input().split())
def lst(n:int): return [ss() for _ in range(n)]
def llstr(n: int): return [limss() for _ in range(n)]
#本当に貪欲法か? DP法では??
#本当に貪欲法か? DP法では??
#本当に貪欲法か? DP法では??
#agc018 getting difference
n,k=mii()
arr=limii()
#print(arr)
def gcd1(numbers):
return reduce(gcd,numbers)
p=gcd1(arr)
if k%p==0 and k<=max(arr):
print("POSSIBLE")
else:
print("IMPOSSIBLE")
| [
"biomimetics500tour@gmail.com"
] | biomimetics500tour@gmail.com |
27753c3a2a4483688bfc4471b2d0f7c8f2451bb0 | 44b1e8cbf6bb78bb20d5ad50405a5b9f5134e2e7 | /pycomsoc/winners/nw.py | b449cd89cd2472a150686ef0fb70d0d865c1b34e | [] | no_license | TheoDlmz/DBCOMSOC | dc437fce15b03b7521eece308e5772063907d0e2 | 14c9db233be31881ad7d35d80300b81bbb636703 | refs/heads/master | 2020-05-01T19:36:41.701130 | 2020-02-15T20:20:23 | 2020-02-15T20:20:23 | 177,652,784 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 46,218 | py | # Library pyCOMSOC for computational social choice
# New York University 2019
# All the function below are based on the paper by Xia and Conitzer
# Algorithms for Necessary Winner computation on partial orders
## Imports
import numpy as np
import random
from multiprocessing import Pool
import time
## Tools
def __intersect__(a, b):
return len(list(set(a) & set(b))) - 1
def __incrementMaxScore__(upLength,rule):
# Borda
if rule == 'b':
return upLength-1
# K-approval
elif rule[0] == 'k':
kIndex = int(rule[1:])
if upLength > kIndex:
return 1
else:
return 0
else:
# General positional scoring rule
return rule[0]-rule[upLength-1]
## Step 1 : Compute Up and Down for every candidate and every partial orders
#Compute "Up" for the general case
def __upGeneral(children,parents,roots,m,maxScore,rule='b'):
# Initialization
up = [[i] for i in range(m)]
parentsToSee = [len(p) for p in parents]
# Initialize the queue with the roots of the graph
# for which the up set only contains themselves
queue = roots.copy()
# True if some candidate has >1 children in the preference graph
isMerge = (np.max([len(c) for c in children]) > 1)
# True if some candidate has >1 parents in the preference graph
isSplit = (np.max([len(p) for p in parents]) > 1)
# BFS algorithm to compute "Up" for every candidate
if isSplit and isMerge: #If the graph is not a tree...
while queue != []:
u = queue.pop()
up[u] = list(set(up[u])) #...then we have to check for duplicate candidates
# Update maxscore depending on the rule
maxScore[u] += __incrementMaxScore__(len(up[u]),rule)
for e in children[u]:
up[e].extend(up[u])
parentsToSee[e] -= 1
if parentsToSee[e] == 0:
queue.append(e)
else:
while queue != []:
u = queue.pop()
maxScore[u] += __incrementMaxScore__(len(up[u]),rule)
for e in children[u]:
up[e].extend(up[u])
parentsToSee[e] -= 1
if parentsToSee[e] == 0:
queue.append(e)
return up
#Compute "Up" for the Partitioned case
def __upPartitioned(children,roots,m,maxScore,rule='b'):
#Initialization
blockNumber = [-1 for i in range(m)] #Block's number of the ith candidate
#Block "-1" means not ranked
ranksBlocks = [0] #Minimum rank of ith block
currentRank = 0
queue = roots.copy() #Initialize the queue with the root candidates
#Algorithm
while queue != []:
candidateInBlock = 0
for candidate in queue:
blockNumber[candidate] = currentRank
# Update maxscore depending on the rule
maxScore[candidate] += __incrementMaxScore__(ranksBlocks[-1]+1,rule)
candidateInBlock += 1 #Count the number of candidate in the block
candidate0 = queue[0]
queue = children[candidate0].copy() #Get the next block
ranksBlocks.append(ranksBlocks[-1]+candidateInBlock)
currentRank += 1
return blockNumber,ranksBlocks
#Compute "Up" for the Linear case
def __upLinear(children,roots,m,maxScore,rule='b'):
#Initialization
candRank = [-1 for i in range(m)] #Rank of ith candidatz
#Rank "-1" means not ranked
currentRank = 0
candidate = roots[0]
#Algorithm
while candidate != -1:
candRank[candidate] = currentRank
# Update maxscore depending on the rule
maxScore[candidate] += __incrementMaxScore__(currentRank+1,rule)
currentRank +=1
if len(children[candidate]) == 0: #Stop when no children in the graph
candidate = -1
else:
candidate = children[candidate][0]
candRank.append(currentRank) #Add the length of the order at the end of candRank
return candRank
#Compute "Up" for the Multilinear case
def upMultilinear(children,roots,m,maxScore,rule='b'):
#Initialization
candRank = [(-1,-1) for i in range(m)] #(orderNumber,rankInLinearOrder)
orderLength = [] #Length of the ith linear order
#Algorithm
for i in range(len(roots)): #Apply the upBordaLinear on each linear order
currentRank = 0
candidate = roots[i]
while candidate != -1:
candRank[candidate] = (i,currentRank)
# Update maxscore depending on the rule
maxScore[candidate] += __incrementMaxScore__(currentRank+1,rule)
currentRank +=1
if len(children[candidate]) == 0: #If it is a leaf...
candidate = -1 #...then stop
else:
candidate = children[candidate][0]
orderLength.append(currentRank)
return candRank,orderLength
#General algorithm for the computation of "Up" and "Down"
def upDown(population,m,rule='b',verbose=False,optimUpActivated=True,optimCandActivated=True):
#population : array with all the partial orders
#m : nb of candidates
#optimUpActivated : is the computation of Up and Down optimized for linear, multilinear and partitioned preferences
#optimCandActivated : is the list of candidate to test optimized
n = len(population)
maxScore = [0 for i in range(m)] #maximum score of every cand
blockNumberP = [] #Informations for partitioned preferences
ranksBlocksP = []
candRankL = [] #Informations for linear orders
candRankM = [] #Informations for multilinear orders
orderLengthM = []
upList = [] #Informations for general orders
generalCaseOrders = []
countEmpty = 0
#Loop over every partial preferences
for p in range(n):
pairs = population[p]
#Empty preferences = Empty linear order
if pairs == []:
candRankL.append([-1]*m)
countEmpty += 1
else:
#We get parents and children of every candidate in the preference graph
parents = [[] for i in range(m)]
children = [[] for i in range(m)]
for (a,b) in pairs:
parents[b].append(a)
children[a].append(b)
isLinear = True #Is the order linear ?
roots = [] #Roots of preference graph
leaves = [] #Leaves of preference graph
#Iterate over candidates
for i in range(m):
#If some candidate have > 1 parents or > 1 children then
#this is not a linear order
if len(parents[i]) > 1 or len(children[i]) > 1:
isLinear = False
#If candidate i does not have children then it is a leaf
if len(children[i]) == 0 and len(parents[i]) > 0:
leaves.append(i)
#If candidat i does not have parents then it is a children
elif len(parents[i]) == 0 and len(children[i]) > 0:
roots.append(i)
#If the first optimization is disabled, then use the general case
#algorithm for every order
if not(optimUpActivated):
upList_i = __upGeneral(children,parents,roots,m,maxScore,rule)
upList.append(upList_i)
generalCaseOrders.append(pairs)
#Otherwise, if the order is linear or multilinear,
#use special algorithms
elif len(roots) == len(leaves) and isLinear:
#Linear case
if len(roots) == 1:
candRankL_i = __upLinear(children,roots,m,maxScore,rule)
candRankL.append(candRankL_i)
#MultiLinear case
else:
candRankM_i,orderLengthM_i = upMultilinear(children,roots,m,maxScore,rule)
candRankM.append(candRankM_i)
orderLengthM.append(orderLengthM_i)
#Then test if this is partitioned preferences
else:
blockNumber = [-1 for i in range(m)] #Block number of ith candidate
currentBlockNb = 0 #Number of the current block
currentBlock = roots.copy() #Candidates in the curent block
sum = 0
last = 0
while currentBlock != []:
temp = len(currentBlock)
sum += last*temp
last = temp
#Set block number of candidates
for cand in currentBlock:
blockNumber[cand] = currentBlockNb
#Increment block number
currentBlockNb += 1
#Get next block
currentBlock = children[currentBlock[0]].copy()
#The order is partitioned iff :
# 1. every candidate of ith block is connected to every candidate
# of i+1th block (ie sum = len(pairs))
# 2. every children of candidate in ith block is in i+1th block
cont = False
if sum == (len(pairs)):
cont = True
for cand in range(m):
for x in children[cand]:
if blockNumber[x] != blockNumber[cand] + 1:
cont = False
break
if not(cont):
break
#Use partitioned preferences algorithm
if cont:
blockNumberP_i,ranksBlocksP_i = __upPartitioned(children,roots,m,maxScore,rule)
blockNumberP.append(blockNumberP_i)
ranksBlocksP.append(ranksBlocksP_i)
#Use general case algorithm
else:
upList_i = __upGeneral(children,parents,roots,m,maxScore,rule)
upList.append(upList_i)
generalCaseOrders.append(pairs)
#Optimize the list of candidate we want to test on next step (__competitions)
if optimCandActivated:
#TH : Only candidate(s) with the best maximal score can be necessary winner
# (we take the min because we saved m-bestScore insted of bestScore)
bestScore = min(maxScore)
candToTest = []
for i in range(m):
if maxScore[i] == bestScore:
candToTest.append(i)
nbToTest = len(candToTest)
#We compute the down only for candidate to test
downList = [[] for i in range(nbToTest)]
#We need to compute down only for orders in the general case
for pairs in generalCaseOrders:
#get children of every candidate
children = [[] for i in range(m)]
for (a,b) in pairs:
children[a].append(b)
#We look to the down set of every candidate to test
#This is the same algorithm than for Up
for j in range(nbToTest):
cand = candToTest[j]
visited = [False for i in range(m)]
downCand = [cand]
queue = children[cand].copy()
while queue != []:
newCand = queue.pop()
downCand.append(newCand)
for newCandChild in children[newCand]:
if not(visited[newCandChild]):
visited[newCandChild] = True
queue.append(newCandChild)
downList[j].append(downCand)
else:
#If we don't optimize, then we compute the down set of every candidate
#usign their up set.
candToTest = []
downList = [[] for i in range(m)]
for i in range(len(upList)):
upList_i = upList[i]
for j_1 in range(m):
downList[j_1].append([])
for j_1 in range(m):
for j_2 in upList_i[j_1]:
downList[j_2][i].append(j_1)
#Print if verbose activated
if verbose:
print("Empty : "+str(countEmpty)+"\nLinear : "+str(len(candRankL)-countEmpty)+"\nMultilinear : "+str(len(candRankM))+"\nPartitioned : "+str(len(blockNumberP))+"\nGeneral case : "+str(len(upList)))
#Return informations useful to the next step :
# 1. Informations on partial orders
# 2. List of candidates to test
# 3. The best score of each candidate.
return [[upList,downList],[blockNumberP,ranksBlocksP],[candRankL],[candRankM,orderLengthM]],candToTest,maxScore
## Parallelized version of the Step 1 (computation of Up and Down)
# We can speed up the above algorithm by parallelizing the computation of Up and Down (step 1)
# Below is the code with the parallelized version of the algorithm.
def __upParallelized(pairs,m,indice,rule):
parents = [[] for i in range(m)]
children = [[] for i in range(m)]
for (a,b) in pairs:
parents[b].append(a)
children[a].append(b)
maxScore = [0 for i in range(m)]
isLinear = True
roots = []
leaves = []
for i in range(m):
if len(parents[i]) > 1 or len(children[i]) > 1:
isLinear = False
if len(children[i]) == 0 and len(parents[i]) > 0:
leaves.append(i)
elif len(parents[i]) == 0 and len(children[i]) > 0:
roots.append(i)
if len(roots) == len(leaves) and isLinear:
if len(roots) == 1:
candRankL_i = __upLinear(children,roots,m,maxScore,rule)
return candRankL_i,0,maxScore
else:
candRankM_i,orderLengthM_i = upMultilinear(children,roots,m,maxScore,rule)
return (candRankM_i,orderLengthM_i),1,maxScore
else:
blockNumber = [-1 for i in range(m)]
currentBlockNb = 0
currentBlock = roots.copy()
sum = 0
last =0
while currentBlock != []:
temp = len(currentBlock)
sum += last*temp
last = temp
for cand in currentBlock:
blockNumber[cand] = currentBlockNb
currentBlockNb += 1
currentBlock = children[currentBlock[0]].copy()
cont = False
if sum == (len(pairs)):
cont = True
for cand in range(m):
for x in children[cand]:
if blockNumber[x] != blockNumber[cand] + 1:
cont = False
break
if not(cont):
break
if cont:
blockNumberP_i,ranksBlocksP_i = __upPartitioned(children,roots,m,maxScore,rule)
return (blockNumberP_i,ranksBlocksP_i),2,maxScore
else:
U_i = __upGeneral(C,P,roots,m,maxScore,rule)
# We convert U into a tuple array otherwise it takes too much time to be transfered between processes.
a = [tuple(ui) for ui in U_i]
ind = indice
return (a,ind),3,maxScore
def __upParallelizedConcat(list):
#We concatenate output from elements of one chunk
out = []
for (pair,m,ind,rule) in list:
out.append(__upParallelized(pair,m,ind,rule))
return out
def upDownParallelized(population,m,rule='b',verbose=False,process=4,chunksize=1,chunks=10):
n = len(population)
# Initialization
maxScore = [0 for i in range(m)]
upList = []
blockNumberP = []
ranksBlocksP = []
candRankL = []
candRankM = []
orderLengthM = []
pairsGeneralCase = []
if process <= 0:
raise ValueError("Number of processes should be > 0")
pairs_mb = [(pair,m,i,rule) for i,pair in enumerate(Population)]
# We divide the population into process*chunks blocks so each process work on chunks blocks
pairs_mb_concat = [pairs_mb[(i*n)//(process*chunks):((i+1)*n)//(process*chunks)] for i in range(process*chunks)]
with Pool(process) as p:
out = p.map(__upParallelizedConcat,pairs_mb_concat,chunksize=chunksize)
# We gather all the results together after the parallelized part
for out_el in out:
for out_i in out_el:
maxScore_i = out_i[2]
for j in range(m):
maxScore[j] += maxScore_i[j]
category = out_i[1]
if category == 0:
candRankL.append(out_i[0])
elif category == 1:
candRankM.append(out_i[0][0])
orderLengthM.append(out_i[0][1])
elif category == 2:
blockNumberP.append(out_i[0][0])
ranksBlocksP.append(out_i[0][1])
else:
upList.append(list(out_i[0][0]))
pairsGeneralCase.append(population[out_i[0][1]])
# We optimize the candidate to test
bestScore = min(maxScore)
candToTest = []
for i in range(m):
if maxScore[i] == bestScore:
candToTest.append(i)
nbToTest = len(candToTest)
# We compute the downList for those candidates and the
# partial orders in the general case.
downList = [[] for i in range(nbToTest)]
for pairs in pairsGeneralCase:
children = [[] for i in range(m)]
for (a,b) in pairs:
children[a].append(b)
for j in range(nbToTest):
cand = candToTest[j]
visited = [False for i in range(m)]
downCand = [cand]
queue = children[cand].copy()
while queue != []:
newCand = queue.pop()
downCand.append(newCand)
for newCandChild in children[newCand]:
if not(visited[newCandChild]):
visited[newCandChild] = True
queue.append(newCandChild)
downList[j].append(downCand)
if verbose:
print("Linear : "+str(len(candRankL))+"\nMultilinear : "+str(len(candRankM))+"\nPartitoned : "+str(len(blockNumberP))+"\nGeneral case : "+str(len(upList)))
return [[upList,downList],[blockNumberP,ranksBlocksP],[candRankL],[candRankM,orderLengthM]],candToTest,maxScore
## STEP 2 : __competition, with Borda rule.
#A borda __competition between two candidates "candTested" and "opponent"
#as described in Xia and Conitzer paper (in the general case)
def __competitionBordaGeneral(candTested,opponent,upList,downList,m):
n = len(upList)
scoreOpponent = 0 #Init scores
scoreCandTested = 0
for i in range(n):
# If candTested > opponent, then minimize their difference
if candTested in upList[i][opponent]:
blockSize = __intersect__(downList[i],upList[i][opponent])
scoreCandTested += blockSize
# Otherwise, set opponent > candTested and maximize their difference
else:
scoreOpponent += m-len(upList[i][opponent])
scoreCandTested += len(downList[i])-1
# Return score of the two candidates
return scoreOpponent,scoreCandTested
#A borda __competition in the case of partitioned preferences
def __competitionBordaPartitioned(candTested,opponent,blockNumberP,ranksBlocksP,m):
n = len(blockNumberP)
scoreOpponent = 0
scoreCandTested = 0 #Init scores
for i in range(n):
# If block(candTested) > block(opponent) then minimize their difference
if (blockNumberP[i][candTested] >= 0) and (blockNumberP[i][candTested] < blockNumberP[i][opponent]):
blockSize = ranksBlocksP[i][blockNumberP[i][opponent]] - ranksBlocksP[i][blockNumberP[i][candTested]+1] + 1
scoreCandTested += blockSize
# Otherwise
else:
# If candTested ranked, maximize the difference the difference with opponent
if blockNumberP[i][candTested] != -1:
scoreCandTested += ranksBlocksP[i][-1]-ranksBlocksP[i][blockNumberP[i][candTested]+1]
# If candTested not ranked, then put it at the bottom and maximize score of opponent
scoreOpponent += m-1-ranksBlocksP[i][max(0,blockNumberP[i][opponent])]
# Return score of the two candidates
return scoreOpponent,scoreCandTested
#A borda __competition in the case of linear preferences
def __competitionBordaLinear(candTested,opponent,candRankL,m):
n = len(candRankL)
scoreOpponent = 0 #Init scores
scoreCandTested = 0
for i in range(n):
# If candTested > opponent, then minimize their difference
if (candRankL[i][candTested] >= 0) and (candRankL[i][candTested] < candRankL[i][opponent]):
blockSize = candRankL[i][opponent] - candRankL[i][candTested]
scoreCandTested += blockSize
# Otherwise
else:
#If canTested ranked, put every "free" candidate between it and the opponent
if candRankL[i][candTested] != -1:
scoreCandTested += candRankL[i][-1]-candRankL[i][candTested]-1
#If candTested not ranked, put it at the bottom
scoreOpponent += m-1-max(candRankL[i][opponent],0)
# Return score of the two candidates
return scoreOpponent,scoreCandTested
# A borda __competition on the multilinear case
def __competitionBordaMultilinear(candTested,opponent,candRankM,orderLengthM,m):
n = len(candRankM)
scoreOpponent = 0 #Init scores
scoreCandTested = 0
for i in range(n):
(orderNumberCandTested,rankCandTested) = candRankM[i][candTested]
(orderNumberOpponent,rankOpponent) = candRankM[i][opponent]
# If the two candidates are in the same suborder and
# candTested > opponent, then minimize their difference
if (rankCandTested >= 0) and (rankCandTested < rankOpponent) and (orderNumberOpponent == orderNumberCandTested):
blockSize = rankOpponent - rankCandTested
scoreCandTested += blockSize
# Otherwise
else:
#If canTested ranked, put every candidate not in its suborder between it and the opponent
if orderNumberCandTested != -1:
scoreCandTested += orderLengthM[i][orderNumberCandTested]-rankCandTested-1
#If candTested not ranked, put it at the bottom
scoreOpponent += m-1-max(rankOpponent,0)
# Return score of the two candidates
return scoreOpponent,scoreCandTested
# A complete Borda __competition between two candidate
def __competitionBorda(candTested,opponent,ordersInfos,candIndex,m,verbose=False):
#Get orders infos
[upList,downList] = ordersInfos[0]
[blockNumberP,ranksBlocksP] = ordersInfos[1]
[candRankL] = ordersInfos[2]
[candRankM,orderLengthM] = ordersInfos[3]
#Compute subscore for each special case
scoreOpponentGeneralCase,scoreCandTestedGeneralCase = __competitionBordaGeneral(candTested,opponent,upList,downList[candIndex],m)
scoreOpponentPartitioned,scoreCandTestedPartitioned = __competitionBordaPartitioned(candTested,opponent,blockNumberP,ranksBlocksP,m)
scoreOpponentLinear,scoreCandTestedLinear = __competitionBordaLinear(candTested,opponent,candRankL,m)
scoreOpponentMultilinear,scoreCandTestedMultilinear = __competitionBordaMultilinear(candTested,opponent,candRankM,orderLengthM,m)
#Compute total scores
scoreOpponent = scoreOpponentGeneralCase + scoreOpponentPartitioned + scoreOpponentLinear + scoreOpponentMultilinear
scoreCandTested = scoreCandTestedGeneralCase + scoreCandTestedPartitioned + scoreCandTestedLinear + scoreCandTestedMultilinear
#Print if verbose
if verbose:
print("Test "+str(candTested)+" ("+str(scoreCandTested)+") against "+str(opponent)+" ("+str(scoreOpponent)+")")
# Return True iff candTested is always better than its opponent
return scoreCandTested >= scoreOpponent
## General Algorithm : Borda
# The algorithm for NW and Borda rule
def borda(population,m,verbose=False,optimUpActivated=True,optimCandActivated=True,parallelized=False,process=4,chunksize=1,chunks=10):
#Step 1 : get Up and Down (or similar order informations)
if parallelized:
ordersInfos,candToTest,maxScore = upDownParallelized(population,m,'b',verbose,process,chunksize,chunks)
else:
ordersInfos,candToTest,maxScore = upDown(population,m,'b',verbose,optimUpActivated,optimCandActivated)
#Step 2 : do __competitions between candidates
NW = [] #Init list of NW
#If optimization on __competitions, order the candidates so we test those which are
#more likely to be NW first
if optimCandActivated:
order = np.argsort(maxScore)
#Otherwise, we test every candidate
else:
candToTest = [i for i in range(m)]
order = [i for i in range(m)]
#Test ith candidate
for i in range(len(candToTest)):
isaNW = True
for j in range(m): #For every opponent != candTested
if candToTest[i] != order[j]:
if not(__competitionBorda(candToTest[i],order[j],ordersInfos,i,m,verbose=verbose)):
isaNW = False
break
if isaNW:
NW.append(candToTest[i])
#Return list of NW
return NW
## Step 2 :
#A k-approval __competition between two candidates "candTested" and "opponent"
#as described in Xia and Conitzer paper (in the general case)
def __competitionKappGeneral(k,candTested,opponent,upList,downList,m):
n = len(upList)
scoreOpponent = 0
scoreCandTested = 0
for i in range(n):
# We compute the best ranking for the opponent
minposOpponent = len(upList[i][opponent])
# We compute the worst ranking for thhe candidate tested
maxposCandTested = m-len(downList[i])+1
if candTested in upList[i][opponent]:
# If candTested > oponnent and rank(candTested) <= k and rank(opponent) > k,
# Then the tested candidate win a point but not the opponent
if maxposCandTested <= k and minposOpponent > k:
scoreCandTested += 1
else:
# Otherwise, we minimize the rank of the opponent and maximize
# the one of the candidate being tested
if maxposCandTested <= k:
scoreCandTested += 1
if minposOpponent <= k:
scoreOpponent += 1
return scoreOpponent,scoreCandTested
# A k-approval __competition with partitioned preferences
def __competitionKappPartitioned(k,candTested,opponent,blockNumberP,ranksBlocksP,m):
n = len(blockNumberP)
scoreOpponent = 0
scoreCandTested = 0
for i in range(n):
# We compute the best ranking for the opponent
minposOpponent = ranksBlocksP[i][blockNumberP[i][opponent]] + 1
# We compute the best ranking for the opponent
maxposCandTested = (m-ranksBlocksP[i][-1])+ranksBlocksP[i][blockNumberP[i][candTested]+1]
if blockNumberP[i][candTested] >= 0 and (blockNumberP[i][candTested] < blockNumberP[i][opponent]):
# If candTested > oponnent and rank(candTested) <= k and rank(opponent) > k,
# Then the tested candidate win a point but not the opponent
if maxposCandTested <= k and minposOpponent > k:
scoreCandTested += 1
else:
# Otherwise, we minimize the rank of the opponent and maximize
# the one of the candidate being tested
if maxposCandTested <= k and blockNumberP[i][candTested] >=0:
scoreCandTested += 1
if minposOpponent <= k or blockNumberP[i][opponent] < 0:
scoreOpponent += 1
return scoreOpponent,scoreCandTested
# A k-approval __competition with linear preferences
def __competitionKappLinear(k,candTested,opponent,candRankL,m):
n = len(candRankL)
scoreOpponent = 0
scoreCandTested = 0
for i in range(n):
# We compute the best ranking for the opponent
minposOpponent = max(candRankL[i][opponent],0) + 1
# We compute the best ranking for the opponent
maxposCandTested = (m-candRankL[i][-1])+candRankL[i][candTested]+1
if candRankL[i][candTested] >= 0 and candRankL[i][candTested] < candRankL[i][opponent]:
# If candTested > oponnent and rank(candTested) <= k and rank(opponent) > k,
# Then the tested candidate win a point but not the opponent
if minposOpponent > k and maxposCandTested <= k:
scoreCandTested += 1
else:
# Otherwise, we minimize the rank of the opponent and maximize
# the one of the candidate being tested
if maxposCandTested <= k and candRankL[i][candTested] >= 0:
scoreCandTested += 1
if minposOpponent <= k or candRankL[i][opponent] < 0:
scoreOpponent += 1
return scoreOpponent,scoreCandTested
# A k-approval __competition with multilinear preferences
def __competitionKappMultilinear(k,candTested,opponent,candRankM,orderLengthM,m):
n = len(candRankM)
scoreOpponent = 0
scoreCandTested = 0
for i in range(n):
(orderNumberCandTested,rankCandTested) = candRankM[i][candTested]
(orderNumberOpponent,rankOpponent) = candRankM[i][opponent]
# We compute the best ranking for the opponent
minposOpponent = rankOpponent + 1
# We compute the best ranking for the opponent
maxposCandTested = m-(orderLengthM[i][orderNumberCandTested] - rankCandTested)
if rankCandTested > 0 and (rankCandTested < rankOpponent and orderNumberOpponent == orderNumberCandTested):
# If candTested > oponnent and rank(candTested) <= k and rank(opponent) > k,
# Then the tested candidate win a point but not the opponent
if minposOpponent > k and maxposCandTested <= k:
scoreCandTested += 1
else:
# Otherwise, we minimize the rank of the opponent and maximize
# the one of the candidate being tested
if rankCandTested != -1 and maxposCandTested <= k:
scoreCandTested += 1
if rankOpponent == -1 or minposOpponent <= k:
scoreOpponent += 1
return scoreOpponent,scoreCandTested
# Gather results of all k-approval sub__competition between two candidates
def __competitionKapp(k,candTested,opponent,ordersInfos,candIndex,m,verbose=False):
#Get orders infos
[upList,downList] = ordersInfos[0]
[blockNumberP,ranksBlocksP] = ordersInfos[1]
[candRankL] = ordersInfos[2]
[candRankM,orderLengthM] = ordersInfos[3]
#Compute subscore for each special case
scoreOpponentGeneralCase,scoreCandTestedGeneralCase = __competitionKappGeneral(k,candTested,opponent,upList,downList[candIndex],m)
scoreOpponentPartitioned,scoreCandTestedPartitioned = __competitionKappPartitioned(k,candTested,opponent,blockNumberP,ranksBlocksP,m)
scoreOpponentLinear,scoreCandTestedLinear = __competitionKappLinear(k,candTested,opponent,candRankL,m)
scoreOpponentMultilinear,scoreCandTestedMultilinear = __competitionKappMultilinear(k,candTested,opponent,candRankM,orderLengthM,m)
#Compute total scores
scoreOpponent = scoreOpponentGeneralCase + scoreOpponentPartitioned + scoreOpponentLinear + scoreOpponentMultilinear
scoreCandTested = scoreCandTestedGeneralCase + scoreCandTestedPartitioned + scoreCandTestedLinear + scoreCandTestedMultilinear
#Print if verbose
if verbose:
print("Test "+str(candTested)+" ("+str(scoreCandTested)+") against "+str(opponent)+" ("+str(scoreOpponent)+")")
# Return True iff candTested is always better than its opponent
return scoreCandTested >= scoreOpponent
## General Algorithm : k-approval, plurality and veto
def kapp(population,m,k,verbose=False,optimUpActivated=True,optimCandActivated=True,parallelized=False,process=4,chunksize=1,chunks=10):
#Step 1 : get Up and Down (or similar order informations)
if parallelized:
ordersInfos,candToTest,maxScore = upDownParallelized(population,m,"k"+str(k),verbose,process,chunksize,chunks)
else:
ordersInfos,candToTest,maxScore = upDown(population,m,"k"+str(k),verbose,optimUpActivated,optimCandActivated)
#Step 2 : do __competitions between candidates
NW = []
if optimCandActivated:
order = np.argsort(maxScore)
else:
candToTest = [i for i in range(m)]
order = [i for i in range(m)]
#Test all candidates in candToTest
for i in range(len(candToTest)):
isaNW = True
for j in range(m):
if candToTest[i] != order[j]:
if not(__competitionKapp(k,candToTest[i],order[j],ordersInfos,i,m,verbose=verbose)):
isaNW = False
break
if isaNW:
NW.append(candToTest[i])
return NW
def plurality(population,m,verbose=False,optimUpActivated=False,optimCandActivated=True,parallelized=False,process=4,chunksize=1,chunks=10):
return kapp(population,m,1,verbose,optimUpActivated,optimCandActivated,parallelized,process,chunksize,chunks)
def veto(population,m,verbose=False,optimUpActivated=False,optimCandActivated=True,parallelized=False,process=4,chunksize=1,chunks=10):
return kapp(population,m,m-1,verbose,optimUpActivated,optimCandActivated,parallelized,process,chunksize,chunks)
## Step 2 : Any positional scoring rule
# This function __precompute the score difference for two element :
# M[i,j,k] is the score difference for block of size k whith a top of minimum rank i and a bottom of maximum rank j
# It uses dynamic programming
def precomputeScore(rule,m):
M = np.zeros((m-1,m-1,m-1))
for i in range(m):
for j in range(i+1,m):
M[i][j-1][j-i-1] = rule[i] - rule[j]
for k in range(1,m):
for i in range(m):
for j in range(i+k+1,m):
M[i][j-1][j-i-k-1] = min(rule[i+k]-rule[j],M[i][j-2][j-i-k-1])
return M
def __competitionPositionalScoringRuleGeneral(rule,M,candTested,opponent,upList,downList,m,optimPreprocessing=True):
n = len(upList)
scoreOpponent = 0
scoreCandTested = 0
for i in range(n):
if candTested in upList[i][opponent]:
# If candTested > opponent, then compute the minimal score difference between them.
blockSize = __intersect__(downList[i],upList[i][opponent])
if blockSize == 0:
raise ValueError("Block size = 0")
# If we use preprocessing, then use the M matrix
if optimPreprocessing:
M_i = len(upList[i][opponent])-blockSize-1
M_j = m-len(downList[i])+blockSize-1
scoreCandTested += M[M_i,M_j,blockSize-1]
# Otherwise, try every position of the block between the two candidates
else:
start = len(upList[i][opponent])-blockSize-1
end = m-len(downList[i])+1
minDiff = rule[start] - rule[start+blockSize]
for it in range(start+1,end):
if rule[it] - rule[it+blockSize] < minDiff:
minDiff = rule[it] - rule[it+blockSize]
scoreCandTested += minDiff
else:
# Otherwise, minimize score of candTested and maximize score of its oponnent
scoreOpponent += rule[len(upList[i][opponent])-1]
scoreCandTested += rule[m - len(downList[i])]
return scoreOpponent,scoreCandTested
def __competitionPositionalScoringRulePartitioned(rule,M,candTested,opponent,blockNumberP,ranksBlocksP,m,optimPreprocessing=True):
n = len(blockNumberP)
scoreOpponent = 0
scoreCandTested = 0
for i in range(n):
if (blockNumberP[i][candTested] >= 0) and (blockNumberP[i][candTested] < blockNumberP[i][opponent]):
# If candTested > opponent, then compute the minimal score difference between them.
blockSize = ranksBlocksP[i][blockNumberP[i][opponent]]+1 - ranksBlocksP[i][blockNumberP[i][candTested]+1]
if blockSize == 0:
raise ValueError("Block size = 0")
# If we use preprocessing, then use the M matrix
if optimPreprocessing:
M_i = ranksBlocksP[i][blockNumberP[i][opponent]]-blockSize
M_j = m-(ranksBlocksP[i][-1]-ranksBlocksP[i][blockNumberP[i][candTested]+1] + 1)+blockSize-1
scoreCandTested += M[M_i,M_j,blockSize-1]
# Otherwise, try every position of the block between the two candidates
else:
start = ranksBlocksP[i][blockNumberP[i][opponent]]-blockSize
end = m-(ranksBlocksP[i][-1]-ranksBlocksP[i][blockNumberP[i][candTested]+1])
minDiff = rule[start]-rule[start+blockSize]
for it in range(start+1,end):
if rule[it]-rule[it+blockSize] < minDiff:
minDiff = rule[it]-rule[it+blockSize]
scoreCandTested += minDiff
else:
# Otherwise, minimize score of candTested and maximize score of its oponnent
scoreOpponent += rule[ranksBlocksP[i][max(0,blockNumberP[i][opponent])]]
if blockNumberP[i][candTested] == -1:
scoreCandTested += rule[-1]
else:
scoreCandTested += rule[m - (ranksBlocksP[i][-1]-ranksBlocksP[i][blockNumberP[i][candTested]+1] + 1)]
return scoreOpponent,scoreCandTested
def __competitionPositionalScoringRuleLinear(rule,M,candTested,opponent,candRankL,m,optimPreprocessing=True):
n = len(candRankL)
scoreOpponent = 0
scoreCandTested = 0
for i in range(n):
if candRankL[i][candTested] >= 0 and candRankL[i][candTested] < candRankL[i][opponent]:
# If candTested > opponent, then compute the minimal score difference between them.
blockSize = candRankL[i][opponent] - candRankL[i][candTested]
if blockSize == 0:
raise ValueError("Block size = 0")
# If we use preprocessing, then use the M matrix
if optimPreprocessing:
M_i = candRankL[i][candTested]
M_j = m-1-(candRankL[i][-1]-candRankL[i][opponent])
scoreCandTested += M[M_i,M_j,blockSize-1]
# Otherwise, try every position of the block between the two candidates
else:
start = candRankL[i][candTested]
end = (m - candRankL[i][-1])+candRankL[i][candTested]+1
minDiff = rule[start]-rule[start+blockSize]
for i in range(start+1,end):
if rule[i] - rule[i+blockSize] < minDiff:
minDiff = rule[i] - rule[i+blockSize]
scoreCandTested += minDiff
else:
# Otherwise, minimize score of candTested and maximize score of its oponnent
scoreOpponent += rule[max(candRankL[i][opponent],0)]
if candRankL[i][candTested] == -1:
scoreCandTested += rule[-1]
else:
scoreCandTested += rule[(m - candRankL[i][-1])+candRankL[i][candTested]]
return scoreOpponent,scoreCandTested
def __competitionPositionalScoringRuleMultilinear(rule,M,candTested,opponent,candRankM,orderLengthM,m,optimPreprocessing=True):
n = len(candRankM)
scoreOpponent = 0
scoreCandTested = 0
for i in range(n):
(orderNumberCandTested,rankCandTested) = candRankM[i][candTested]
(orderNumberOpponent,rankOpponent) = candRankM[i][opponent]
if rankCandTested >=0 and (rankCandTested < rankOpponent) and orderNumberOpponent == orderNumberCandTested:
# If candTested > opponent, then compute the minimal score difference between them.
blockSize = rankOpponent-rankCandTested
if blockSize == 0:
raise ValueError("Block size = 0")
# If we use preprocessing, then use the M matrix
if optimPreprocessing:
M_i = rankCandTested-1
M_j = m-1-(orderLengthM[i][orderNumberOpponent]-rankOpponent)
scoreCandTested += M[M_i,M_j,blockSize-1]
# Otherwise, try every position of the block between the two candidates
else:
start = rankCandTested
end = m - orderLengthM[i][orderNumberCandTested]+rankCandTested+1
minDiff = rule[start] - rule[start+blockSize]
for i in range(start+1,end):
if rule[i] - rule[i+blockSize] < minDiff:
minDiff = rule[i] - rule[i+blockSize]
scoreCandTested += minDiff
else:
# Otherwise, minimize score of candTested and maximize score of its oponnent
scoreOpponent += rule[max(rankOpponent,0)]
if rankCandTested == -1:
scoreCandTested += rule[-1]
else:
scoreCandTested += rule[m - (orderLengthM[i][orderNumberCandTested]-rankCandTested)]
return scoreOpponent,scoreCandTested
def __competitionPositionalScoringRule(rule,M,candTested,opponent,ordersInfos,candIndex,m,verbose=False,optimPreprocessing=True):
#Get orders infos
[upList,downList] = ordersInfos[0]
[blockNumberP,ranksBlocksP] = ordersInfos[1]
[candRankL] = ordersInfos[2]
[candRankM,orderLengthM] = ordersInfos[3]
#Compute subscore for each special case
scoreOpponentGeneralCase,scoreCandTestedGeneralCase = __competitionPositionalScoringRuleGeneral(rule,M,candTested,opponent,upList,downList[candIndex],m,optimPreprocessing)
scoreOpponentPartitioned,scoreCandTestedPartitioned = __competitionPositionalScoringRulePartitioned(rule,M,candTested,opponent,blockNumberP,ranksBlocksP,m,optimPreprocessing)
scoreOpponentLinear,scoreCandTestedLinear = __competitionPositionalScoringRuleLinear(rule,M,candTested,opponent,candRankL,m,optimPreprocessing)
scoreOpponentMultilinear,scoreCandTestedMultilinear = __competitionPositionalScoringRuleMultilinear(rule,M,candTested,opponent,candRankM,orderLengthM,m,optimPreprocessing)
#Compute total scores
scoreOpponent = scoreOpponentGeneralCase + scoreOpponentPartitioned + scoreOpponentLinear + scoreOpponentMultilinear
scoreCandTested = scoreCandTestedGeneralCase + scoreCandTestedPartitioned + scoreCandTestedLinear + scoreCandTestedMultilinear
#Print if verbose
if verbose:
print("Test "+str(candTested)+" ("+str(scoreCandTested)+") against "+str(opponent)+" ("+str(scoreOpponent)+")")
# Return True iff candTested is always better than its opponent
return scoreCandTested >= scoreOpponent
## General algorithm : Positional scoring rule
def positionalScoringRule(population,m,rule,verbose=False,optimUpActivated=True,optimCandActivated=True,optimPreprocessing=True,parallelized=False,process=4,chunksize=1,chunks=10):
#Step 1 : get Up and Down (or similar order informations)
if parallelized:
ordersInfos,candToTest,maxScore = upDownParallelized(population,m,rule,verbose,process,chunksize,chunks)
else:
ordersInfos,candToTest,maxScore = upDown(population,m,rule,verbose,optimUpActivated,optimCandActivated)
# The optimization at preprocessing step has complexity m**3 while
# the unoptimized algorithm has complexity m*n so we check wether
# m**2 > n or not to know if we do preprocessing
n = len(population)
if m*m > n:
optimPreprocessing = False
if optimPreprocessing:
M = precomputeScore(rule,m)
else:
M = []
#Step 2 : do __competitions between candidates
NW = []
if not(optimCandActivated):
candToTest = [i for i in range(m)]
order = [i for i in range(m)]
else:
order = np.argsort(maxScore)
#Test all candidates in candToTest
for i in range(len(candToTest)):
isaNW = True
for j in range(m):
if candToTest[i] != order[j]:
if not(__competitionPositionalScoringRule(rule,M,candToTest[i],order[j],ordersInfos,i,m,verbose,optimPreprocessing)):
isaNW = False
break
if isaNW:
NW.append(candToTest[i])
return NW | [
"34279251+TheoDlmz@users.noreply.github.com"
] | 34279251+TheoDlmz@users.noreply.github.com |
948a40b4887290235480dbb0b1b338e9b123a66d | 5a30a2910498b7f738a3eab68a40a8fbbcba27c2 | /Tarea 1/tarea-1-hadoop/join2-reducer.py | a836ba7aa0a2d60c4712f6a0d3fdc3ae43e35d05 | [] | no_license | paulanavarretec/IIC-3423-big-data | b27e8aa634a123906e2ff1c1e0622c52bd8b28ea | 05d3476f2988678c2df9b2704c8649ba5ce22620 | refs/heads/main | 2023-08-28T17:20:54.590102 | 2021-10-08T02:02:19 | 2021-10-08T02:02:19 | 414,814,135 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 867 | py | #!/usr/bin/env python
import sys
import string
last_programa = None
audiencia_total = 0
for line in sys.stdin:
# Tratamiento de palabras desde entrada estandar
line = line.strip()
# Parseo de elementos (clave, valor) agrupados por clave
programa,audiencia = line.split(" ")
audiencia = int(audiencia)
# Identifica si es un programa antiguo
if not last_programa or last_programa == programa:
# Suma cantidad de audiencia
audiencia_total += audiencia
last_programa = programa
# Identifica si es un programa nuevo
elif programa != last_programa:
# Escritura del resultado en salida estandar
print last_programa + ' ' + str(audiencia_total)
last_programa = programa
audiencia_total = audiencia
# Escritura del ultimo programa
print last_programa + ' ' + str(audiencia_total) | [
"noreply@github.com"
] | noreply@github.com |
dc85fab7d7d45de099b87639674ff0ef08b306c1 | f8d3f814067415485bb439d7fe92dc2bbe22a048 | /models/research/object_detection/exporter.py | 0abe25a0a8504f1390b6187e755d9d6d1a7a13a3 | [
"Apache-2.0"
] | permissive | gmonkman/python | 2f9ab8f159c01f6235c86cb0cd52062cd3fdedd3 | 9123aa6baf538b662143b9098d963d55165e8409 | refs/heads/master | 2023-04-09T15:53:29.746676 | 2022-11-26T20:35:21 | 2022-11-26T20:35:21 | 60,254,898 | 0 | 2 | null | 2023-03-24T22:58:39 | 2016-06-02T10:25:27 | Python | UTF-8 | Python | false | false | 19,824 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to export object detection inference graph."""
import logging
import os
import tempfile
import tensorflow as tf
from google.protobuf import text_format
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.client import session
from tensorflow.python.framework import graph_util
from tensorflow.python.platform import gfile
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.training import saver as saver_lib
from object_detection.builders import model_builder
from object_detection.core import standard_fields as fields
from object_detection.data_decoders import tf_example_decoder
slim = tf.contrib.slim
# TODO(derekjchow): Replace with freeze_graph.freeze_graph_with_def_protos when
# newer version of Tensorflow becomes more common.
def freeze_graph_with_def_protos(
input_graph_def,
input_saver_def,
input_checkpoint,
output_node_names,
restore_op_name,
filename_tensor_name,
clear_devices,
initializer_nodes,
variable_names_blacklist=''):
"""Converts all variables in a graph and checkpoint into constants."""
del restore_op_name, filename_tensor_name # Unused by updated loading code.
# 'input_checkpoint' may be a prefix if we're using Saver V2 format
if not saver_lib.checkpoint_exists(input_checkpoint):
raise ValueError(
'Input checkpoint "' + input_checkpoint + '" does not exist!')
if not output_node_names:
raise ValueError(
'You must supply the name of a node to --output_node_names.')
# Remove all the explicit device specifications for this node. This helps to
# make the graph more portable.
if clear_devices:
for node in input_graph_def.node:
node.device = ''
with tf.Graph().as_default():
tf.import_graph_def(input_graph_def, name='')
config = tf.ConfigProto(graph_options=tf.GraphOptions())
with session.Session(config=config) as sess:
if input_saver_def:
saver = saver_lib.Saver(saver_def=input_saver_def)
saver.restore(sess, input_checkpoint)
else:
var_list = {}
reader = pywrap_tensorflow.NewCheckpointReader(input_checkpoint)
var_to_shape_map = reader.get_variable_to_shape_map()
for key in var_to_shape_map:
try:
tensor = sess.graph.get_tensor_by_name(key + ':0')
except KeyError:
# This tensor doesn't exist in the graph (for example it's
# 'global_step' or a similar housekeeping element) so skip it.
continue
var_list[key] = tensor
saver = saver_lib.Saver(var_list=var_list)
saver.restore(sess, input_checkpoint)
if initializer_nodes:
sess.run(initializer_nodes)
variable_names_blacklist = (variable_names_blacklist.split(',') if
variable_names_blacklist else None)
output_graph_def = graph_util.convert_variables_to_constants(
sess,
input_graph_def,
output_node_names.split(','),
variable_names_blacklist=variable_names_blacklist)
return output_graph_def
def replace_variable_values_with_moving_averages(graph,
current_checkpoint_file,
new_checkpoint_file):
"""Replaces variable values in the checkpoint with their moving averages.
If the current checkpoint has shadow variables maintaining moving averages of
the variables defined in the graph, this function generates a new checkpoint
where the variables contain the values of their moving averages.
Args:
graph: a tf.Graph object.
current_checkpoint_file: a checkpoint containing both original variables and
their moving averages.
new_checkpoint_file: file path to write a new checkpoint.
"""
with graph.as_default():
variable_averages = tf.train.ExponentialMovingAverage(0.0)
ema_variables_to_restore = variable_averages.variables_to_restore()
with tf.Session() as sess:
read_saver = tf.train.Saver(ema_variables_to_restore)
read_saver.restore(sess, current_checkpoint_file)
write_saver = tf.train.Saver()
write_saver.save(sess, new_checkpoint_file)
def _image_tensor_input_placeholder(input_shape=None):
"""Returns input placeholder and a 4-D uint8 image tensor."""
if input_shape is None:
input_shape = (None, None, None, 3)
input_tensor = tf.placeholder(
dtype=tf.uint8, shape=input_shape, name='image_tensor')
return input_tensor, input_tensor
def _tf_example_input_placeholder():
"""Returns input that accepts a batch of strings with tf examples.
Returns:
a tuple of input placeholder and the output decoded images.
"""
batch_tf_example_placeholder = tf.placeholder(
tf.string, shape=[None], name='tf_example')
def decode(tf_example_string_tensor):
tensor_dict = tf_example_decoder.TfExampleDecoder().decode(
tf_example_string_tensor)
image_tensor = tensor_dict[fields.InputDataFields.image]
return image_tensor
return (batch_tf_example_placeholder,
tf.map_fn(decode,
elems=batch_tf_example_placeholder,
dtype=tf.uint8,
parallel_iterations=32,
back_prop=False))
def _encoded_image_string_tensor_input_placeholder():
"""Returns input that accepts a batch of PNG or JPEG strings.
Returns:
a tuple of input placeholder and the output decoded images.
"""
batch_image_str_placeholder = tf.placeholder(
dtype=tf.string,
shape=[None],
name='encoded_image_string_tensor')
def decode(encoded_image_string_tensor):
image_tensor = tf.image.decode_image(encoded_image_string_tensor,
channels=3)
image_tensor.set_shape((None, None, 3))
return image_tensor
return (batch_image_str_placeholder,
tf.map_fn(
decode,
elems=batch_image_str_placeholder,
dtype=tf.uint8,
parallel_iterations=32,
back_prop=False))
input_placeholder_fn_map = {
'image_tensor': _image_tensor_input_placeholder,
'encoded_image_string_tensor':
_encoded_image_string_tensor_input_placeholder,
'tf_example': _tf_example_input_placeholder,
}
def _add_output_tensor_nodes(postprocessed_tensors,
output_collection_name='inference_op'):
"""Adds output nodes for detection boxes and scores.
Adds the following nodes for output tensors -
* num_detections: float32 tensor of shape [batch_size].
* detection_boxes: float32 tensor of shape [batch_size, num_boxes, 4]
containing detected boxes.
* detection_scores: float32 tensor of shape [batch_size, num_boxes]
containing scores for the detected boxes.
* detection_classes: float32 tensor of shape [batch_size, num_boxes]
containing class predictions for the detected boxes.
* detection_keypoints: (Optional) float32 tensor of shape
[batch_size, num_boxes, num_keypoints, 2] containing keypoints for each
detection box.
* detection_masks: (Optional) float32 tensor of shape
[batch_size, num_boxes, mask_height, mask_width] containing masks for each
detection box.
Args:
postprocessed_tensors: a dictionary containing the following fields
'detection_boxes': [batch, max_detections, 4]
'detection_scores': [batch, max_detections]
'detection_classes': [batch, max_detections]
'detection_masks': [batch, max_detections, mask_height, mask_width]
(optional).
'num_detections': [batch]
output_collection_name: Name of collection to add output tensors to.
Returns:
A tensor dict containing the added output tensor nodes.
"""
detection_fields = fields.DetectionResultFields
label_id_offset = 1
boxes = postprocessed_tensors.get(detection_fields.detection_boxes)
scores = postprocessed_tensors.get(detection_fields.detection_scores)
classes = postprocessed_tensors.get(
detection_fields.detection_classes) + label_id_offset
keypoints = postprocessed_tensors.get(detection_fields.detection_keypoints)
masks = postprocessed_tensors.get(detection_fields.detection_masks)
num_detections = postprocessed_tensors.get(detection_fields.num_detections)
outputs = {}
outputs[detection_fields.detection_boxes] = tf.identity(
boxes, name=detection_fields.detection_boxes)
outputs[detection_fields.detection_scores] = tf.identity(
scores, name=detection_fields.detection_scores)
outputs[detection_fields.detection_classes] = tf.identity(
classes, name=detection_fields.detection_classes)
outputs[detection_fields.num_detections] = tf.identity(
num_detections, name=detection_fields.num_detections)
if keypoints is not None:
outputs[detection_fields.detection_keypoints] = tf.identity(
keypoints, name=detection_fields.detection_keypoints)
if masks is not None:
outputs[detection_fields.detection_masks] = tf.identity(
masks, name=detection_fields.detection_masks)
for output_key in outputs:
tf.add_to_collection(output_collection_name, outputs[output_key])
if masks is not None:
tf.add_to_collection(output_collection_name,
outputs[detection_fields.detection_masks])
return outputs
def write_frozen_graph(frozen_graph_path, frozen_graph_def):
"""Writes frozen graph to disk.
Args:
frozen_graph_path: Path to write inference graph.
frozen_graph_def: tf.GraphDef holding frozen graph.
"""
with gfile.GFile(frozen_graph_path, 'wb') as f:
f.write(frozen_graph_def.SerializeToString())
logging.info('%d ops in the final graph.', len(frozen_graph_def.node))
def write_saved_model(saved_model_path,
frozen_graph_def,
inputs,
outputs):
"""Writes SavedModel to disk.
If checkpoint_path is not None bakes the weights into the graph thereby
eliminating the need of checkpoint files during inference. If the model
was trained with moving averages, setting use_moving_averages to true
restores the moving averages, otherwise the original set of variables
is restored.
Args:
saved_model_path: Path to write SavedModel.
frozen_graph_def: tf.GraphDef holding frozen graph.
inputs: The input image tensor to use for detection.
outputs: A tensor dictionary containing the outputs of a DetectionModel.
"""
with tf.Graph().as_default():
with session.Session() as sess:
tf.import_graph_def(frozen_graph_def, name='')
builder = tf.saved_model.builder.SavedModelBuilder(saved_model_path)
tensor_info_inputs = {
'inputs': tf.saved_model.utils.build_tensor_info(inputs)}
tensor_info_outputs = {}
for k, v in outputs.items():
tensor_info_outputs[k] = tf.saved_model.utils.build_tensor_info(v)
detection_signature = (
tf.saved_model.signature_def_utils.build_signature_def(
inputs=tensor_info_inputs,
outputs=tensor_info_outputs,
method_name=signature_constants.PREDICT_METHOD_NAME))
builder.add_meta_graph_and_variables(
sess, [tf.saved_model.tag_constants.SERVING],
signature_def_map={
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
detection_signature,
},
)
builder.save()
def write_graph_and_checkpoint(inference_graph_def,
model_path,
input_saver_def,
trained_checkpoint_prefix):
"""Writes the graph and the checkpoint into disk."""
for node in inference_graph_def.node:
node.device = ''
with tf.Graph().as_default():
tf.import_graph_def(inference_graph_def, name='')
with session.Session() as sess:
saver = saver_lib.Saver(saver_def=input_saver_def,
save_relative_paths=True)
saver.restore(sess, trained_checkpoint_prefix)
saver.save(sess, model_path)
def _get_outputs_from_inputs(input_tensors, detection_model,
output_collection_name):
inputs = tf.to_float(input_tensors)
preprocessed_inputs, true_image_shapes = detection_model.preprocess(inputs)
output_tensors = detection_model.predict(
preprocessed_inputs, true_image_shapes)
postprocessed_tensors = detection_model.postprocess(
output_tensors, true_image_shapes)
return _add_output_tensor_nodes(postprocessed_tensors,
output_collection_name)
def _build_detection_graph(input_type, detection_model, input_shape,
output_collection_name, graph_hook_fn):
"""Build the detection graph."""
if input_type not in input_placeholder_fn_map:
raise ValueError('Unknown input type: {}'.format(input_type))
placeholder_args = {}
if input_shape is not None:
if input_type != 'image_tensor':
raise ValueError('Can only specify input shape for `image_tensor` '
'inputs.')
placeholder_args['input_shape'] = input_shape
placeholder_tensor, input_tensors = input_placeholder_fn_map[input_type](
**placeholder_args)
outputs = _get_outputs_from_inputs(
input_tensors=input_tensors,
detection_model=detection_model,
output_collection_name=output_collection_name)
# Add global step to the graph.
slim.get_or_create_global_step()
if graph_hook_fn: graph_hook_fn()
return outputs, placeholder_tensor
def _export_inference_graph(input_type,
detection_model,
use_moving_averages,
trained_checkpoint_prefix,
output_directory,
additional_output_tensor_names=None,
input_shape=None,
output_collection_name='inference_op',
graph_hook_fn=None):
"""Export helper."""
tf.gfile.MakeDirs(output_directory)
frozen_graph_path = os.path.join(output_directory,
'frozen_inference_graph.pb')
saved_model_path = os.path.join(output_directory, 'saved_model')
model_path = os.path.join(output_directory, 'model.ckpt')
outputs, placeholder_tensor = _build_detection_graph(
input_type=input_type,
detection_model=detection_model,
input_shape=input_shape,
output_collection_name=output_collection_name,
graph_hook_fn=graph_hook_fn)
saver_kwargs = {}
if use_moving_averages:
# This check is to be compatible with both version of SaverDef.
if os.path.isfile(trained_checkpoint_prefix):
saver_kwargs['write_version'] = saver_pb2.SaverDef.V1
temp_checkpoint_prefix = tempfile.NamedTemporaryFile().name
else:
temp_checkpoint_prefix = tempfile.mkdtemp()
replace_variable_values_with_moving_averages(
tf.get_default_graph(), trained_checkpoint_prefix,
temp_checkpoint_prefix)
checkpoint_to_use = temp_checkpoint_prefix
else:
checkpoint_to_use = trained_checkpoint_prefix
saver = tf.train.Saver(**saver_kwargs)
input_saver_def = saver.as_saver_def()
write_graph_and_checkpoint(
inference_graph_def=tf.get_default_graph().as_graph_def(),
model_path=model_path,
input_saver_def=input_saver_def,
trained_checkpoint_prefix=checkpoint_to_use)
if additional_output_tensor_names is not None:
output_node_names = ','.join(outputs.keys()+additional_output_tensor_names)
else:
output_node_names = ','.join(outputs.keys())
frozen_graph_def = freeze_graph_with_def_protos(
input_graph_def=tf.get_default_graph().as_graph_def(),
input_saver_def=input_saver_def,
input_checkpoint=checkpoint_to_use,
output_node_names=output_node_names,
restore_op_name='save/restore_all',
filename_tensor_name='save/Const:0',
clear_devices=True,
initializer_nodes='')
write_frozen_graph(frozen_graph_path, frozen_graph_def)
write_saved_model(saved_model_path, frozen_graph_def,
placeholder_tensor, outputs)
def export_inference_graph(input_type,
pipeline_config,
trained_checkpoint_prefix,
output_directory,
input_shape=None,
output_collection_name='inference_op',
additional_output_tensor_names=None):
"""Exports inference graph for the model specified in the pipeline config.
Args:
input_type: Type of input for the graph. Can be one of [`image_tensor`,
`tf_example`].
pipeline_config: pipeline_pb2.TrainAndEvalPipelineConfig proto.
trained_checkpoint_prefix: Path to the trained checkpoint file.
output_directory: Path to write outputs.
input_shape: Sets a fixed shape for an `image_tensor` input. If not
specified, will default to [None, None, None, 3].
output_collection_name: Name of collection to add output tensors to.
If None, does not add output tensors to a collection.
additional_output_tensor_names: list of additional output
tensors to include in the frozen graph.
"""
detection_model = model_builder.build(pipeline_config.model,
is_training=False)
_export_inference_graph(input_type, detection_model,
pipeline_config.eval_config.use_moving_averages,
trained_checkpoint_prefix,
output_directory, additional_output_tensor_names,
input_shape, output_collection_name,
graph_hook_fn=None)
pipeline_config.eval_config.use_moving_averages = False
config_text = text_format.MessageToString(pipeline_config)
with tf.gfile.Open(
os.path.join(output_directory, 'pipeline.config'), 'wb') as f:
f.write(config_text)
| [
"gmonkman@mistymountains.biz"
] | gmonkman@mistymountains.biz |
ad938e02c950dfe5c347d6b032b1ce44e86cd517 | ffeec00fca386585c40aa7b22407ef166f50a8ab | /AudioServer/App/migrations/0006_auto_20210515_2016.py | 1f0f1bbcde42d6a9d3d11b5479262b822dee5a6a | [] | no_license | SyamPrakash07/AudioServer | 5bec2dc2f89444bacc12e79451f6905fc6381245 | 9d1b68160ad53c242e9230e0aec0a1701b4a4ee6 | refs/heads/master | 2023-05-01T11:55:11.012030 | 2021-05-16T08:07:07 | 2021-05-16T08:07:07 | 367,694,400 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 555 | py | # Generated by Django 3.2.3 on 2021-05-15 14:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('App', '0005_auto_20210515_1746'),
]
operations = [
migrations.AddField(
model_name='audiobook',
name='image',
field=models.ImageField(null=True, upload_to=''),
),
migrations.AddField(
model_name='podcast',
name='image',
field=models.ImageField(null=True, upload_to=''),
),
]
| [
"syamsurya987@gmail.com"
] | syamsurya987@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.