index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
22,700 | 5748873ce5ebd8b3d3c09a77adc778cc1af45846 | #-*- coding: utf-8 -*-
from py3wirecard.entities.lib.wireentity import *
from py3wirecard.entities.subtotals import Subtotals
class Amount(WireEntity):
@String(max=3, default="BRL", required=True)
def currency(self): pass
@String(max=9)
def fixed(self): pass
@String(max=4)
def percentual(self):pass
@Int(max=12)
def paid(self):pass
@Int(max=12)
def refunds(self):pass
@Int(max=12)
def fees(self):pass
@Int(max=12)
def fee(self):pass
@Int(max=12)
def total(self):pass
@Int(max=12)
def gross(self):pass
@Int(max=12)
def liquid(self):pass
@Int(max=12)
def otherReceivers(self):pass
@Object(type=Subtotals)
def subtotals(self):pass
|
22,701 | 6db88f93d29edb90891222c0b75a2b3959211b23 | # Questão 2
#classe fornecedor
from questao1 import Pessoa
class Fornecedor(Pessoa):
def __init__(self,nome, endereço, telefone, valorCredito, valorDivida): #construtor
Pessoa.__init__(self,nome, endereço, telefone)# construtor da classe Pessoa
#inicializando atributos desta classe
self._valorCredito = valorCredito
self._valorDivida = valorDivida
# metodo getters
def get_ValorCredito(self):
return self._valorCredito
def get_ValorDivida(self):
return self._valorDivida
#metodo setters modificando as variaveis
def set_ValorCredito(self, valorCredito):
self._valorCredito = valorCredito
def set_ValorDivida(self,valorDivida):
self._valorDivida = valorDivida
#metodo obtersaldo
def obtersaldo(self, valorCredito):
if self._valorCredito <= 5000.00:
empresta = 1000.00
elif self._valorCredito <= 10000.00:
empresta = 3000.00
elif self._valorCredito <= 15000.00:
empresta = 4000.00
else:
empresta = 5000.00
return self._valorCredito == valorCredito - empresta
|
22,702 | 1484cecfaf1aa5bf851785da2b2d57e5daa1f07f | #!/usr/bin/env python3
import pandas as pd
from process_db_data.data_cleaning_utils import clean_table_columns
from file_paths import raw_data, processed_data
def process_wounds():
"""
Cleans/Processes dataset
Indicated columns are dropped
Column names are cleaned
NA member_id rows are dropped
Returns:
DataFrame: cleaned dataframe
Outputs:
csv: processed data file
"""
wounds = pd.read_csv(
f"{raw_data}\\wounds.csv", parse_dates=["Date Time Occurred", "Date Healed"]
)
cols_to_drop = ["Participant"]
wounds.drop(cols_to_drop, axis=1, inplace=True)
wounds.columns = clean_table_columns(wounds.columns)
wounds.dropna(subset=["member_id"], inplace=True)
wounds["member_id"] = wounds["member_id"].astype(int)
wounds.to_csv(f"{processed_data}\\wounds.csv", index=False)
return wounds
if __name__ == "__main__":
process_wounds()
|
22,703 | 458938e9f4748b82710ef5c786a55c8b7de61519 | import pandas as pd
# Before any real work, some unwanted string pieces have to be removed, in order to correctly identify
# routes that share stops. The biggest cause of headache is the fact that if a metro station is called
# "Sesame street" then the corresponding bus station is called "Sesame street M" with no id or
# parent station that can connect the two
# I also did some cleaning by hand for rare occurrences, that are extremely hard to catch otherwise
unwanted = [' M+H',' M', ' H',
' [A]',' [B]',' [C]',
' [D]',' [E]',' [F]',
' [G]',' [H]',' [I]',
' [J]',' [K]',' [L]',
' [M]',' [N]', ' [2]',
' [3]',' [4]',' [5]',
' [6]',' [7]',' [8]',
' [9]',' [10]'' [11]',
' P+R']
df = pd.read_csv('stops.txt',header=0)
for i in range(len(df)):
for u in unwanted:
if u in df.iloc[i,1]:
df.iloc[i,1] = df.iloc[i,1].replace(u,'') # Replace unwanted strings with nothing
df.to_csv('stops.csv',index=False) |
22,704 | 1d40cc87f152ff7b6d72587be9d40448059bb368 | import os
import unittest
import heimdall
class TestMiscellaneous(unittest.TestCase):
def setUp(self):
self.heimdall = heimdall.Heimdall('test')
self.heimdall.database = "_test.db"
def tearDown(self):
if os.path.exists("_test.db"):
os.remove("_test.db")
def setup_by_corrupting_config_file(self):
with open('data/heimdall/messages_delivered.json', 'w') as f:
f.write("[][]")
def test_ability_to_recover_from_corrupted_config_file(self):
assert True
if os.path.exists("data/heimdall/messages_delivered.json"):
os.remove("data/heimdall/messages_delivered.json")
def setup_by_removing_config_file(self):
if os.path.exists("data/heimdall/messages_delivered.json"):
os.remove("data/heimdall/messages_delivered.json")
def test_ability_to_recover_from_missing_config_file(self):
assert True
|
22,705 | 3f8c020cecedd7d6fb03afa775390ce6d26cc1fe | from collections import defaultdict
class CircularList():
def __init__(self, items):
self.items = items
self.dead = False
def get(self, place):
if self.dead: raise "Operating on dead list"
return self.items[place % len(self.items)]
def insert(self, place, item):
if self.dead: raise "Operating on dead list"
self.kill()
place = place % (len(self.items)) + 1
self.items.insert(place, item)
return place, CircularList(self.items)
def remove(self, place):
if self.dead: raise "Operating on dead list"
self.kill()
place = place % len(self.items)
removed = self.items[place]
del self.items[place]
return place, removed, CircularList(self.items)
def format(self):
if self.dead: raise "Operating on dead list"
return self.items
def kill(self):
self.dead = True
class CircularListWithCenter():
def __init__(self, circular_list, center):
self.circular_list = circular_list
self.center = center
def get(self, place):
return self.circular_list.get(self.center + place)
def insert(self, place, item):
new_center, new_list = self.circular_list.insert(self.center + place, item)
return CircularListWithCenter(
new_list,
new_center
)
def remove(self, place):
index, removed, new_list = self.circular_list.remove(self.center + place)
return removed, CircularListWithCenter(
new_list,
index
)
def format(self):
def format_item(n):
if n == self.get(0): return f">{n: 3}"
return f" {n: 3}"
return " ".join(format_item(item) for item in self.circular_list.format())
def iterate(list, n):
if n % 23 == 0:
removed, list = list.remove(-7)
return n + removed, list
else:
return 0, list.insert(1, n)
def play(list, players, rounds):
scorecard = defaultdict(int)
for n in range(0, rounds):
if n % 10000 == 0: print(n / rounds)
player = (n % players) + 1
won, list = iterate(list, n + 1)
scorecard[player] += won
return scorecard
def highscore(scorecard):
winner = max(scorecard, key=lambda k: scorecard[k])
return f"Player {winner} won, score: {scorecard[winner]}"
print(highscore(play(CircularListWithCenter(CircularList([0]), 0), 9, 25)))
print(highscore(play(CircularListWithCenter(CircularList([0]), 0), 413, 7108200)))
## TO BEAT: 4.02s
|
22,706 | 4e8fae92b31b8df073de9e438f0b04e65304d394 | import sys
sys.stdin = open('BOJ 2805 나무 자르기.txt')
N,M = map(int, input().split())
trees = list(map(int,input().split()))
max_height = max(trees)
min_height = 1
while min_height <= max_height:
mid = (max_height + min_height) // 2
temp_cnt = 0
for i in trees:
if i > mid:
temp_cnt += (i-mid)
if temp_cnt >= M:
min_height = mid + 1
elif temp_cnt < M:
max_height = mid - 1
print(max_height) |
22,707 | 50228fa9a0c854f07e7c13ce4ee4796496a824d3 | from osbrain import run_agent
from osbrain import run_nameserver
import multiprocessing
def method_a(agent, message):
gg = mpns.temp
gg = 10
agent.log_info('Method A Temp: %s' % mpns.temp)
return 'Blah 1'
def method_b(agent, message):
agent.log_info('Method B Temp: %s' % mpns.temp)
return 'Blah 2'
if __name__ == '__main__':
manager = multiprocessing.Manager()
global mpns
mpns = manager.Namespace()
mpns.temp = 1
ns = run_nameserver()
alice = run_agent('Alice')
bob = run_agent('Bob')
addr1 = alice.bind('REP', alias='main1', handler=method_a)
addr2 = alice.bind('REP', alias='main2', handler=method_b)
bob.connect(addr1, alias='main1')
bob.send('main1', "Some message")
reply = bob.recv('main1')
bob.connect(addr2, alias='main2')
bob.send('main2', "Some message")
reply = bob.recv('main2')
agents = ns.agents()
print(agents)
ns.shutdown()
|
22,708 | e6e2a91ac677eb3577bf7cc78369a8ec5898b21e | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 13 12:48:04 2016
@author: 06411
"""
import pickle
# 객체 저장장소 이름 정의
shoplistfile = 'shoplist.data'
#저장대상 객체 생성
shoplist = ['apple', 'mango', 'carrot']
print " shoplist file", type(shoplistfile)
# 객체 저장장소 파일로 생성
f = open(shoplistfile, 'wb')
print f
print type(f)
#파일저장소에 저장
pickle.dump(shoplist, f)
f.close()
# 객체 삭제
del shoplist
# 객제 저장 파일 읽기
f = open(shoplistfile, 'rb')
# 저장된 파일을 로드
storedlist = pickle.load(f)
print storedlist |
22,709 | 5c251c5d3b480e6544adfb978ec5fc3cc5ca053b | #importing required modules
import pandas as pd
import matplotlib.pyplot as pyplot
import numpy as np
'''1.import the data set into the dataframes'''
#/Users/Lionel/Documents/Development/machineLearningConcepts/pricePrediction/
dc_listings = pd.read_csv('listings.csv')
dc_listings['price'] = dc_listings.price.str.replace("\$|,",'').astype(float)
# print dc_listings.shape
dc_listings.head()
'''first distance calculation'''
our_acc_value = 3
first_living_space_value = dc_listings.loc[0,'accommodates']
'''1. dropping the column 1 added during the model creation
2. data split -- training and test data '''
# dc_listings.drop('distance',axis=1)
train_df = dc_listings.copy().iloc[:2792]
test_df = dc_listings.copy().iloc[2792:]
'''creating function of the simple model, refering the pricePrediction_simpleModel'''
def predictPrice(new_listing_value, feature_columns):
temp_df = train_df
temp_df['distance'] = np.abs(dc_listings[feature_columns] - new_listing_value)
temp_df = temp_df.sort_values('distance')
knn_5 = temp_df.price.iloc[:5]
predicted_price = knn_5.mean()
return predicted_price
'''predictin using the simple model'''
test_df['predicted_price'] = test_df.accommodates.apply(predictPrice, feature_columns = 'accommodates')
# print test_df['predicted_price'].head()
'''calculating RMSE -- Root Mean Square Error
1. difference between actual and predicted value
2. squaring the distance
3. taking mean of all the squared values
4. taking squared root of the mean'''
test_df['squared_error'] = (test_df['predicted_price'] - test_df['price']) ** (2)
mse = test_df['squared_error'].mean()
rmse = mse ** (1/2.0)
'''comparing different models RMSE'''
for feature in ['accommodates','bedrooms','bathrooms','number_of_reviews']:
test_df['predicted_price'] = test_df.accommodates.apply(predictPrice, feature_columns=feature)
test_df['squared_error'] = (test_df['predicted_price'] - test_df['price']) ** (2)
mse = test_df['squared_error'].mean()
rmse = mse ** (1/2.0)
print("RMSE for the {} column: {}".format(feature,rmse))
|
22,710 | af2c47495598b7dd75a5326e3076088c73643ece | #!/usr/bin/env python3
import os
from imports import SERVER, CLIENTS
os.system(
f'scp ./wireguard/server/wg*.conf ./openvpn/server/*.conf root@{SERVER}:/root/')
server_command_string = f'ssh root@{SERVER} "wg-quick up ~/wg0.conf'
for i, client in enumerate(CLIENTS):
server_command_string += f'; openvpn --config {i}.conf --daemon'
server_command_string += '"'
os.system(server_command_string)
for i, client in enumerate(CLIENTS):
os.system(
f'scp ./wireguard/client/wg{i+1}.conf ./openvpn/client/{i}.conf root@{client}:/root/')
os.system(
f'ssh root@{client} "wg-quick up ~/wg{i+1}.conf; openvpn --config {i}.conf --daemon"')
|
22,711 | b4a39c2326b38e5968bed4a2b8a0b351b6dd82ed | # Description: Filter Vowels and Consonants Using Vowels
# Note
# 1. If a function is already define, use it over a list using a map function.
# List of alphabets
alphabets = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u',
'v', 'w', 'x', 'y', 'z']
# Function to filter vowels
def filter_vowels(alphabet):
vowels = ['a', 'e', 'i', 'o', 'u']
if (alphabet in vowels):
return True
else:
return False
# Function to filter consonants
def filter_consonants(alphabet):
vowels = ['a', 'e', 'i', 'o', 'u']
if (alphabet not in vowels):
return True
else:
return False
# Filter Vowels
filtered_vowels = filter(filter_vowels, alphabets)
print(filtered_vowels)
print(list(filtered_vowels))
# Filter Consonants
filtered_consonants = filter(filter_consonants, alphabets)
print(filtered_consonants)
print(list(filtered_consonants))
# Note: One can iterate over the returned object only once.
print('The filtered vowels are:')
for vowel in filtered_vowels:
print(vowel)
|
22,712 | 304a2a498194593ad7c206203705b17e66bb4720 | #================================================================================
#
# Group information
#
#
#
#================================================================================
import pandas as pd
import csv
def getPostalCodeDic(csvfname = 'CP.csv'):
"""
Returns a dictionary by postal code
"""
cpdic = {}
reader = csv.DictReader(open(csvfname), delimiter=':')
for row in reader:
cpdic[row['CP']] = row['arrondissement'].split('|')[-1]
return cpdic
def postalcode_area_studies():
"""
Group some charactesitics by postal code area (first 3 letters)
"""
dfpawnshop = pd.read_csv(pawnmtl.csv)
cpdic = getPostalCodeDic()
for ik in cpdic.keys():
print ik, cpdic[ik]
if __name__=='__main__':
#getPostalCodeDic()
postalcode_area_studies()
|
22,713 | 19bad54c8bbffde9ed1fe94c21656cc0401c5557 | animals_in_kennel = [
{
"id": 1,
"breed": "German Shepherd",
"age": 3,
"name": "Jack"
},
{
"id": 2,
"breed": "Siamese",
"age": 9,
"name": "Shy"
},
{
"id": 3,
"breed": "Labradoodle",
"age": 5,
"name": "Avett"
},
{
"id": 4,
"breed": "Shnauzer",
"age": 1,
"name": "Gypsy"
},
]
# for key, value in animals_in_kennel.items():
# result = f'Key {key} = {value}'
# print(result)
#This didn't work because its a bunch of dictionaries in a list. So you have to pull everything out of the list first[] and then take that argument and loop over it again.
for animal in animals_in_kennel:
for key, value in animal.items():
print(f'Key "{key}" = {value}')
# Key "id" = 1
# Key "breed" = German Shepherd
# Key "age" = 3
# Key "name" = Jack
# Key "id" = 2
# Key "breed" = Siamese
# Key "age" = 9
# Key "name" = Shy
# Key "id" = 3
# Key "breed" = Labradoodle
# Key "age" = 5
# Key "name" = Avett
# Key "id" = 4
# Key "breed" = Shnauzer
# Key "age" = 1
# Key "name" = Gypsy |
22,714 | f739545c05d01dbe318010f3f5771a6eb25b7364 | class Usuario:
def __init__(self, id:str, senha:str, cargo:str, nome:str, status="Offline", situacao=False):
self.nome = nome
self.senha = senha
self.id = id
self.cargo = cargo
self.status = status
self.situacao = situacao
class Logs():
def __init__(self, id_usuario:str, status, hora, data, cargo:str):
self.id_usuario = id_usuario
self.status = status
self.hora = hora
self.data = data
self.cargo = cargo
class Cargo:
def __init__(self, cargo:str):
self.cargo = cargo
|
22,715 | b55107777b312f3675c5999a99d2d2878de23823 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/1/8 23:30
# @Author : alison
# @File : tuple1.py
'''
1. tuple定义: (,,,)
2. 字符串的格式化输出 %(,)
3. 特点, immutable不可变性
4. 取值, 索引和切片[index:]
5. tuple与list互转
6. 用处, 遍历快,格式化
'''
# 元组
# 定义
# 元组是用圆括号括起来的,其中的元素之间用逗号隔开。(都是英文半角)
# 变量引用str
s = 'asd'
print(s)
# 如果这样写,就会是...
t = 123, 'asd', ['a', 's']
print(t)
# 字符串的格式化输出
print('I love %s, and I am %s' % ('haha', 'hehe'))
# 特点
# tuple是一种序列类型的数据,这点上跟list/str类似。它的特点就是其中的元素不能更改,这点上跟list不同,倒是跟str类似;它的元素又可以是任何类型的数据,这点上跟list相同,但不同于str。
t = 1, "23", [123, "abc"], ("python", "learn") # 元素多样性,近list
# t[0] = 8 # 不能原地修改,近str 报错
# Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
# TypeError: 'tuple' object does not support item assignment
# t.append("no") ##报错
# Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
# AttributeError: 'tuple' object has no attribute 'append'
# 索引和切片
print(t)
print(t[2])
print(t[1:])
print(t[2][0])
print(t[3][1])
# 关于序列的基本操作在tuple上的表现,就不一一展示了。看官可以去试试。
# 注意:如果一个元组中只有一个元素的时候,应该在该元素后面加一个半角的英文逗号。
# 如果要想看一个对象是什么类型,可以使用type()函数,然后就返回该对象的类型。
a = (3)
print(type(a))
a = (3,)
print(type(a))
# 所有在list中可以修改list的方法,在tuple中,都失效。
# tuple与list互转
print(t)
tlist = list(t)
print(tlist)
t_tuple = tuple(tlist)
print(t_tuple)
# tuple用在哪里?
# 1. Tuple 比 list 操作速度快。如果您定义了一个值的常量集,并且唯一要用它做的是不断地遍历它,请使用 tuple 代替 list。
# 2. 如果对不需要修改的数据进行 “写保护”,可以使代码更安全。使用 tuple 而不是 list 如同拥有一个隐含的 assert 语句,说明这一数据是常量。如果必须要改变这些值,则需要执行 tuple 到 list 的转换 (需要使用一个特殊的函数)。
# 3. Tuples 可以在 dictionary(字典,后面要讲述) 中被用做 key,但是 list 不行。Dictionary key 必须是不可变的。Tuple 本身是不可改变的,但是如果您有一个 list 的 tuple,那就认为是可变的了,用做 dictionary key 就是不安全的。只有字符串、整数或其它对 dictionary 安全的 tuple 才可以用作 dictionary key。
# 4. Tuples 可以用在字符串格式化中。
|
22,716 | 9256dfdf79ded31711c43e73157e273c764d1a98 | import torch.nn as nn
class HyperparamsBART():
def __init__(self):
self.freeze_encoders = False
self.freeze_embeddings = False
class Seq2SeqModelBART(nn.Module):
def __init__(self, tokenizer, model, hparams):
super().__init__()
self.tokenizer = tokenizer
self.model = model
if hparams.freeze_encoders:
self.freeze_params(self.model.get_encoder())
if hparams.freeze_embeddings:
self.freeze_embeds
def forward(self, input_ids, **kwargs):
return self.model(input_ids, **kwargs)
def freeze_params(self, model):
''' Function that takes a model as input (or part of a model) and freezes
the layers for faster training'''
for layer in model.parameters():
layer.requires_grade = False
def freeze_embeds(self):
''' freeze the positional embedding parameters of the model '''
self.freeze_params(self.model.model.shared)
for d in [self.model.model.encoder, self.model.model.decoder]:
self.freeze_params(d.embed_positions)
self.freeze_params(d.embed_tokens)
def shift_tokens_right(self, input_ids, pad_token_id):
""" Shift input ids one token to the right,
and wrap the last non pad token (usually <eos>).
"""
prev_output_tokens = input_ids.clone()
index_of_eos = (input_ids.ne(pad_token_id).sum(dim=1) - 1).unsqueeze(-1)
prev_output_tokens[:, 0] = input_ids.gather(1, index_of_eos).squeeze()
prev_output_tokens[:, 1:] = input_ids[:, :-1]
return prev_output_tokens |
22,717 | 3d55a39dd8632c467d45671feea0398ca4284c5f | from collections import OrderedDict
import math
import torch
import torch.nn as nn
from selective_convolution import SelectiveConv2d
from models import BRC
class ResNeXtBlock(nn.Module):
def __init__(self, in_planes, out_planes, args):
super(ResNeXtBlock, self).__init__()
n_groups = args['n_groups']
base_width = args['base_width']
widen_factor = args['widen_factor']
self.in_planes = in_planes
self.add_planes = out_planes - in_planes
width_ratio = out_planes // (64 * widen_factor)
planes = base_width * width_ratio * n_groups
if args.get('use_sconv', False):
gamma = args['gamma']
K = args.get('K', 3)
N_max = args.get('N_max', None)
self.brc_1 = SelectiveConv2d(in_planes, planes, kernel_size=1,
gamma=gamma, K=K, N_max=N_max)
self.brc_2 = BRC(planes, planes, kernel_size=3, padding=1, groups=n_groups)
self.brc_3 = SelectiveConv2d(planes, out_planes, kernel_size=1,
gamma=gamma, K=K, N_max=N_max)
else:
self.brc_1 = BRC(in_planes, planes, kernel_size=1)
self.brc_2 = BRC(planes, planes, kernel_size=3, padding=1, groups=n_groups)
self.brc_3 = BRC(planes, out_planes, kernel_size=1)
def forward(self, x):
x_ = self.brc_1(x)
x_ = self.brc_2(x_)
x_ = self.brc_3(x_)
if self.add_planes > 0:
N, _, H, W = x_.size()
padding = x_.new_zeros(N, self.add_planes, H, W)
x = torch.cat((x, padding), 1)
out = x + x_
return out
class _ResNeXtStage(nn.Sequential):
def __init__(self, args, n_layers, in_planes, out_planes):
super(_ResNeXtStage, self).__init__()
layer = ResNeXtBlock(in_planes, out_planes, args)
self.add_module('layer1', layer)
for i in range(n_layers-1):
layer = ResNeXtBlock(out_planes, out_planes, args)
self.add_module('layer%d' % (i + 2), layer)
class _Transition(nn.Module):
def __init__(self):
super(_Transition, self).__init__()
self.pool = nn.AvgPool2d(kernel_size=2, stride=2)
def forward(self, x):
x = self.pool(x)
return x
class ResNeXt(nn.Module):
def __init__(self, args, block_config, n_groups, base_width, widen_factor):
# Network-level hyperparameters
self.block_config = block_config
self.dataset = args['dataset']
self.n_classes = args['n_classes']
# Layer-level hyperparameters
args['n_groups'] = n_groups
args['base_width'] = base_width
args['widen_factor'] = widen_factor
self.args = args
super(ResNeXt, self).__init__()
if self.dataset in ['cifar10', 'cifar100']:
self.features = nn.Sequential(OrderedDict([
('conv0', nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)),
]))
last_pool = 8
elif self.dataset == 'fmnist':
self.features = nn.Sequential(OrderedDict([
('conv0', nn.Conv2d(1, 64, kernel_size=3, stride=1, padding=1, bias=False)),
]))
last_pool = 7
elif self.dataset == 'tinyimg':
self.features = nn.Sequential(OrderedDict([
('conv0', nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1, bias=False)),
]))
last_pool = 8
else:
raise NotImplementedError()
in_channels = 64
out_channels = in_channels * widen_factor
for i, n_layers in enumerate(self.block_config):
stage = _ResNeXtStage(args=args, n_layers=n_layers,
in_planes=in_channels, out_planes=out_channels)
self.features.add_module('block%d' % (i + 1), stage)
if i != len(self.block_config) - 1:
self.features.add_module('trans%d' % (i + 1), _Transition())
in_channels = out_channels
out_channels = out_channels * 2
self.features.add_module('norm_last', nn.BatchNorm2d(out_channels))
self.features.add_module('relu_last', nn.ReLU(inplace=True))
self.features.add_module('pool_last', nn.AvgPool2d(last_pool))
self.classifier = nn.Linear(out_channels, self.n_classes)
self.reset()
def reset(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
if m.weight is not None:
m.weight.data.fill_(1)
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x):
features = self.features(x)
out = features.view(features.size(0), -1)
out = self.classifier(out)
return out
def resnext29(hparams):
return ResNeXt(hparams, block_config=[3, 3, 3], n_groups=8, base_width=64, widen_factor=4)
def resnext_sconv29(hparams):
hparams['use_sconv'] = True
return ResNeXt(hparams, block_config=[3, 3, 3], n_groups=8, base_width=64, widen_factor=4) |
22,718 | 6c644632ca9614ec4365bdcb47fbae4e0a677cb9 | import numpy as np
import matplotlib.pyplot as plt
import statistics
import functions as fn
debug = 1
figure_number = 0
def update_figure_number():
global figure_number
figure_number += 1
return figure_number
def ex0(a, b, c):
ack = -1
if b >= a:
x_ = np.arange(a, b, c)
plt.figure(update_figure_number())
plt.title('Ex 1')
plt.plot(x_, fn.func(x_))
ack = 0
return ack
def visualize_results(distances):
mean = statistics.mean(distances)
std = statistics.stdev(distances)
median = statistics.median(distances)
title = "Mean: " + str(mean) + " Standard variation: " + str(std) + " Median: " + str(median)
plt.figure(update_figure_number())
plt.hist(distances, bins=np.arange(100))
plt.title(title)
return
def subplot_graph(subplot_option, image_plot, axis, title):
plt.subplot(subplot_option[0], subplot_option[1], subplot_option[2])
plt.imshow(image_plot)
plt.axis(axis) # this line removes the axis numbering
plt.title(title)
return
def plot_graph_2_curves(my_x1, my_y1, my_label1, my_x2, my_y2, my_label2, my_title):
plt.plot(my_x1, my_y1, label=my_label1)
plt.plot(my_x2, my_y2, label=my_label2)
plt.xlabel('x axis label')
plt.ylabel('y axis label')
plt.title(my_title)
plt.legend(loc=0)
return
def apply_threshold(image, tmin, tmax):
mask_rgb = np.zeros((image.shape[0], image.shape[1], image.shape[2]))
mask_rgb[fn.find_indexes_threshold((image[:, :, :]), tmin[:], tmax[:])] = 1
mask = np.zeros((image.shape[0], image.shape[1]))
mask[np.where((mask_rgb[:, :, 0] == 1) & (mask_rgb[:, :, 1] == 1) & (mask_rgb[:, :, 2] == 1))] = 1.0
# graph and test
if debug == 1:
plt.figure(update_figure_number())
subplot_graph([2, 3, 1], image[:, :, 0], 'off', 'Red')
subplot_graph([2, 3, 2], image[:, :, 1], 'off', 'Green')
subplot_graph([2, 3, 3], image[:, :, 2], 'off', 'Blue')
subplot_graph([2, 3, 4], mask_rgb[:, :, 0], 'off', 'Red after threshold ')
subplot_graph([2, 3, 5], mask_rgb[:, :, 1], 'off', 'Green after threshold ')
subplot_graph([2, 3, 6], mask_rgb[:, :, 2], 'off', 'Blue after threshold ')
return mask
def ex1_solver():
# first exercise
print("Ex 1")
info = "Images " + str(figure_number + 1)
ret = ex0(1, 10, 1)
if ret == -1:
print("Invalid parameters (ex0)")
info += "-" + str(figure_number)
print(info)
return 0
def ex2_solver():
# second exercise
print("Ex 2 ")
info = "Images " + str(figure_number + 1)
img = fn.create_image(100, 100)
if debug == 1:
plt.figure(update_figure_number())
plt.title("Random white/black image with one red pixel")
plt.imshow(img)
dist = fn.compute_distances(img)
visualize_results(dist)
info += "-" + str(figure_number)
print(info)
return 0
def ex3_solver():
# third exercise
print("Ex 3")
info = "Images " + str(figure_number + 1)
loaded_image = plt.imread("stopturnsigns.jpg")
if debug == 1:
plt.figure(update_figure_number())
plt.title("stopturnsigns.jpg")
plt.imshow(loaded_image)
my_mask = apply_threshold(loaded_image, [225, 30, 50], [253, 70, 85])
new_image = fn.apply_mask(loaded_image, my_mask)
plt.figure(update_figure_number())
plt.title("stopturnsigns.jpg after application of mask")
plt.imshow(new_image)
info += "-" + str(figure_number)
print(info)
return 0
def ex4_solver():
# fourth exercise
print("Ex 4")
if fn.check_mse() == 0:
info = "Images " + str(figure_number + 1)
x = np.arange(1, 201, 1)
coefficient_a = 1.2
f_x = 0.1*fn.func(x)
g_x = coefficient_a*x
plt.figure(update_figure_number())
plot_graph_2_curves(x, f_x, "f(x)", x, g_x, "g(x)", "Ex 4 f(x) and g(x)")
g_x = fn.find_better_linear_approximation(x, f_x)
plt.figure(update_figure_number())
plot_graph_2_curves(x, f_x, "f(x)", x, g_x, "g(x)", "Ex 4 f(x) and g(x) after coefficent's computation")
info += "-" + str(figure_number)
print(info)
return 0
ex1_solver()
ex2_solver()
ex3_solver()
ex4_solver()
plt.show()
|
22,719 | fc0cd6d047886005f6873a7430065f2ac3953c6d | '''
Write a function to clean up a given string by removing the special characters and retain
alphabets in both upper and lower case and numbers.
Author: Swapnika
Date: 25-08-2018
'''
import re
def clean_string(string):
'''to clean the input string'''
str_1 = ""
for _ in string:
str_1 = re.sub('[^A-Za-z0-9]', '', string.lower())
# str_1 = string.lower()
# str_1 = string.strip(" !@#$%^&*()-_+={}[]|\:;'<>?,./\"")
return str_1
def main():
'''main function'''
string = input()
print(clean_string(string))
if __name__ == '__main__':
main()
|
22,720 | cda3145038bbd5dc3e3b3095a01f49c1e279ade5 | # Copyright 2020-present, Pietro Buzzega, Matteo Boschini, Angelo Porrello, Davide Abati, Simone Calderara.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
from PIL import Image
import numpy as np
import random
import os
from utils import create_if_not_exists
import torchvision.transforms.transforms as transforms
from torchvision import datasets
from torch.utils.data import Dataset
import seaborn as sns
import matplotlib.pyplot as plt
class ValidationDataset(Dataset):
def __init__(self, data: torch.Tensor, targets: np.ndarray,
transform: transforms = None, target_transform: transforms = None) -> None:
self.data = data
self.targets = targets
self.transform = transform
self.target_transform = target_transform
def __len__(self):
return self.data.shape[0]
def __getitem__(self, index):
img, target = self.data[index], self.targets[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
if isinstance(img, np.ndarray):
if np.max(img) < 2:
img = Image.fromarray(np.uint8(img * 255))
else:
img = Image.fromarray(img)
else:
img = Image.fromarray(img.numpy())
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def get_train_val(train: datasets, test_transform: transforms,
dataset: str, val_perc: float = 0.1):
"""
For CV
Extract val_perc% of the training set as the validation set.
:param train: training dataset
:param test_transform: transformation of the test dataset
:param dataset: dataset name
:param val_perc: percentage of the training set to be extracted
:return: the training set and the validation set
"""
dataset_length = train.data.shape[0]
directory = 'datasets/val_permutations/'
create_if_not_exists(directory)
file_name = dataset + '.pt'
if os.path.exists(directory + file_name):
perm = torch.load(directory + file_name)
else:
perm = torch.randperm(dataset_length)
torch.save(perm, directory + file_name)
train.data = train.data[perm]
train.targets = np.array(train.targets)[perm]
test_dataset = ValidationDataset(train.data[:int(val_perc * dataset_length)],
train.targets[:int(val_perc * dataset_length)],
transform=test_transform)
train.data = train.data[int(val_perc * dataset_length):]
train.targets = train.targets[int(val_perc * dataset_length):]
return train, test_dataset
def split_data(data, label2id, train_ratio=0.667, test_ratio=0.2):
"""
for nlp dataset
:param data: data for split
:param label2id: label2id
:param train_ratio: train ratio
:param test_ratio: test ratio
:return: train, valid, test data
"""
targets = np.array([label2id[instance["y"]] for instance in data])
train_data = []
valid_data = []
test_data = []
labels = np.unique(targets)
for l_ in labels:
l_idx = np.where(targets == l_)[0]
l_size = len(l_idx)
train_split = l_idx[:int(l_size * train_ratio)]
test_split = l_idx[int(l_size * train_ratio): int(l_size * (train_ratio+test_ratio))]
valid_split = l_idx[int(l_size * (train_ratio+test_ratio)):]
train_data += [data[i] for i in train_split]
valid_data += [data[i] for i in valid_split]
test_data += [data[i] for i in test_split]
return train_data, valid_data, test_data
|
22,721 | ef58c0a72c2bad1f301fbbb2c400b77434ffcbfe | # -*- coding: utf-8 -*-
# Native Python Modules.
# External Modules.
from rest_framework import viewsets, mixins
# Django Modules.
from django.http import Http404
from django.db.utils import IntegrityError
# Project Modules.
from .responses import Response201, Response204, Response400, Response404, Response422
from core.rest_exceptions import Exception422
class ListModelMixin(mixins.ListModelMixin):
"""
ListModelMixin Wrapper to provide custom responses.
"""
def list(self, request, *args, **kwargs):
return super().list(request, *args, **kwargs)
class RetrieveModelMixin(mixins.RetrieveModelMixin):
"""
RetrieveModelMixin Wrapper to provide custom responses.
"""
def retrieve(self, request, *args, **kwargs):
try:
response = super().retrieve(request, *args, **kwargs)
except Http404 as e:
return Response404(obj=self, method="retrieve")
return response
class CreateModelMixin(mixins.CreateModelMixin):
"""
CreateModelMixin Wrapper to provide custom responses.
"""
def create(self, request, *args, **kwargs):
try:
response = super().create(request, *args, **kwargs)
except (Http404, IntegrityError, Exception422) as e:
if isinstance(e, Http404):
return Response404(obj=self, method="create")
if isinstance(e, (IntegrityError, Exception422)):
return Response422(obj=self, method="create")
return Response201(obj=self, method="create")
class UpdateModelMixin(mixins.UpdateModelMixin):
"""
UpdateModelMixin Wrapper to provide custom responses.
"""
def update(self, request, *args, **kwargs):
kwargs['partial'] = True
try:
response = super().update(request, *args, **kwargs)
except (Http404, IntegrityError) as e:
if isinstance(e, Http404):
return Response404(obj=self, method="update")
if isinstance(e, IntegrityError):
return Response422(obj=self, method="update")
return Response201(obj=self, method="update")
class DestroyModelMixin(mixins.DestroyModelMixin):
"""
DestroyModelMixin Wrapper to provide custom responses.
"""
def destroy(self, request, *args, **kwargs):
try:
response = super().destroy(request, *args, **kwargs)
except Http404 as e:
if isinstance(e, Http404):
return Response404(obj=self, method="destroy")
return Response204(obj=self, method="destroy")
|
22,722 | 6213ae700f40a9fc09e6692c94355a0c9641ab4a | import matplotlib.pyplot as plt
import librosa
import os
import matplotlib
import pylab
import librosa
import librosa.display
import numpy as np
def spectrogram(self,save_path,limits = (0,10000)):
'''
inputs self,save_path,frequency limits,save
saves a image as output
'''
plt.figure(figsize=(14, 5))
X = librosa.stft(self.signalData)
Xdb = librosa.amplitude_to_db(abs(X))
pylab.axis('off') # no axis
pylab.axes([0., 0., 1., 1.], frameon=False, xticks=[], yticks=[]) # Remove the white edge
librosa.display.specshow(Xdb, sr=self.samplingFrequency, x_axis='time', y_axis='hz',cmap = 'magma')
l1,l2 = limits
plt.ylim(l1,l2)
pylab.savefig(save_path, bbox_inches=None, pad_inches=0)
pylab.close()
|
22,723 | eacf457edbdc5c8ebb105b57cfc6aa08220bd6ee | # coding=utf-8
# Name:
# Date:
"""
proj04
practice with lists
"""
#Part I
#Take a list, say for example this one:
a_list = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89]
#and write a program that prints out all the elements of the list that are less than 5.
empty_list = []
for numbers in a_list:
if numbers < 5:
empty_list.append(numbers)
print empty_list
Number = int(raw_input("Enter a number."))
for numbers in a_list:
if numbers < Number:
print numbers
#Part II
# Take two lists, say for example these two:
b_list = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89]
c_list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
# and write a program that creates and prints a list that contains only the elements
# that are common between the lists (without duplicates).
# Make sure your program works on two lists of different sizes.
#for numbers in c_list:
#if numbers == numbers in b_list:
#print numbers
import random
random1 = random.randint(1,10)
import random
random2 = random.randint(10,20)
import random
random3 = random.randint(20,30)
import random
random4 = random.randint(30,40)
import random
random5 = random.randint(40,50)
import random
random6 = random.randint(50,60)
import random
random7 = random.randint(60,70)
import random
random8 = random.randint(70,80)
import random
random9 = random.randint(80,90)
import random
random10 = random.randint(90,100)
random_list = [random1, random2, random3, random4, random5, random6, random7, random8, random9, random10]
import random
random11 = random.randint(1,10)
import random
random12 = random.randint(10,20)
import random
random13 = random.randint(20,30)
import random
random14 = random.randint(30,40)
import random
random15 = random.randint(40,50)
import random
random16 = random.randint(50,60)
import random
random17 = random.randint(60,70)
import random
random18 = random.randint(70,80)
import random
random19 = random.randint(80,90)
import random
random20 = random.randint(90,100)
random_list2 = [random11, random12, random13, random14, random15, random16, random17, random18, random19, random20]
for numbers in random_list2:
if numbers == numbers in random_list:
print numbers
#Part III
# Take a list, say for example this one:
d_list = ["b", "a", "f", "y", "a", "t", "_", "p", "a", "R"]
# and write a program that replaces all “a” with “*”.
counter = 0
for items in d_list:
old_name = "a"
new_name = "*"
if items == old_name:
d_list[counter] = new_name
counter = counter + 1
print d_list
#Part IV
#Ask the user for a string, and print out whether this string is a palindrome or not.
String = raw_input("Type a string to determine whether it's a palindrome or not.")
String = String.lower()
String_list = []
for letter in String:
String_list.append(letter)
if String_list[0] == String_list[-1]:
print "The string is a palindrome."
if String_list[0] != String_list[-1]:
print "The string isn't a palindrome."
|
22,724 | 4fb791099ab837272b4b64dc0accdfb2f74b7bd4 | #-*- coding:utf-8 -*-
# @Time : 2019/5/10
# @Author : Botao Fan
|
22,725 | 52773afbd385af875140e2454f4732b1f5b7e7d4 | from django.db.models import signals
from rest_framework.reverse import reverse
from catalogue.signals.source_signals import index_source
from catalogue.models.source import Source
from rest_framework.test import APITestCase
from rest_framework import status
from model_mommy import mommy
class SourceViewTest(APITestCase):
def setUp(self):
signals.post_save.disconnect(index_source, sender=Source)
self.source = mommy.make("catalogue.Source")
def test_fetches_html_detail_with_success(self):
url = reverse('source-detail', kwargs={"pk": self.source.pk})
response = self.client.get(url)
#self.assertEqual(response.status.code, 200) #one way, or:
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_fetches_html_detail_with_failure(self):
url = reverse('source-detail', kwargs={"pk": 123456789}) #url = /source/123456789/
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def tearDown(self):
pass
|
22,726 | 689c96ba898e6abe9fce8046a23fcad3354a1e4d | #Daniel Bandler
#5/23/18
#battleshipsGame.py
from ggame import * #imports everything from ggame
from random import randint #imports the tools for generating a random integer
rowcols = int(input("Enter the number of rows and columns you want. (max of 5 please) ")) #asks for number of rows and columns
shipNum = int(input("Enter the number of subs you want. ")) #asks for number of ships
ROWS = rowcols #section stores data for rows, cols cell size, etc
COLS = rowcols
CELL_SIZE = 200/ROWS+45 #determines cell size
def buildBoard(): #makes the matrix
# Morpheus: "Unfortunately, no one can be told what the Matrix is. You have to see it for yourself:"
return [['a','b','c','d','e'],['f','g','h','i','j'],['k','l','m','n','o'],['p','q','r','s','t'],['u','v','w','x','y']]
def redrawAll(): #handles all of the graphics
for item in App().spritelist[:]: #clears graphics
item.destroy()
Sprite((TextAsset("BATTLESHIPS", fill=red2,style= "bold 75pt Georgia")), (150, 410))
for i in range(rowcols): #goes through everything cell by cell
for j in range(rowcols):
Sprite(RectangleAsset(CELL_SIZE,CELL_SIZE,LineStyle(3,black),blue),(i*CELL_SIZE, j*CELL_SIZE)) #makes the ocean cells
Sprite(RectangleAsset(CELL_SIZE,CELL_SIZE,LineStyle(3,black),blue),(i*CELL_SIZE+CELL_SIZE*(rowcols+1), j*CELL_SIZE))
if data["board"][i][j] == "ship": #sprites our ships
Sprite(shipbox,(i*CELL_SIZE,j*CELL_SIZE))
elif data["board"][i][j] == "sunk": #sinks our ships
Sprite(sunk,(i*CELL_SIZE,j*CELL_SIZE))
elif data["board"][i][j] == "miss": #sprites computer misses
Sprite(miss,(i*CELL_SIZE,j*CELL_SIZE))
if data["compboard"][i][j] == "miss": #sprites our misses
Sprite(miss,(i*CELL_SIZE+CELL_SIZE*(rowcols+1),j*CELL_SIZE))
elif data["compboard"][i][j] == "sunk": #sinks comp ships
Sprite(sunk,(i*CELL_SIZE+CELL_SIZE*(rowcols+1),j*CELL_SIZE))
if data["THEIRSUNK"] == shipNum: #ends the game with victory
Sprite((TextAsset("YOU WIN!!", fill=green2,style= "bold 75pt Georgia")), (220, 100))
data["gameOver"] = True #ends game
if data["SUNK"] == shipNum: #ends the game with loss
Sprite((TextAsset("YOU LOSE!!!!!!", fill=red2,style= "bold 75pt Georgia")), (220, 100))
data["gameOver"] = True #ends game
def mouseClick(event): #determines what happens when you click
if data["gameOver"] == False:
if data["totalClicks"] < shipNum: #deals with determining where our ships are
if (event.x//CELL_SIZE) <= (CELL_SIZE*rowcols) and event.y//CELL_SIZE <= CELL_SIZE*rowcols:
if data["board"][event.x//CELL_SIZE][event.y//CELL_SIZE] != "ship": #prevents placing multiple ships in a square
data["board"][event.x//CELL_SIZE][event.y//CELL_SIZE] = "ship" #sets a ship where we clicked
redrawAll()
data["totalClicks"] += 1
if data["totalClicks"] >= shipNum: #this part looks at our guesses of where their ships are
if data["compboard"] != "sunk" and data["compboard"] != "miss":#checks that we haven't guessed there before
if (event.x-(CELL_SIZE*(rowcols+1))//CELL_SIZE) >= CELL_SIZE*(rowcols+1) and (event.x-(CELL_SIZE*(rowcols+1))//CELL_SIZE) <= CELL_SIZE*((2*rowcols)+1): #makes sure your guess is in the right spot
if data["compboard"][(event.x-(CELL_SIZE*(rowcols+1)))//CELL_SIZE][event.y//CELL_SIZE] == "ship": #deals with what happens if it is a hit
data["totalClicks"] += 1
data["compboard"][(event.x-(CELL_SIZE*(rowcols+1)))//CELL_SIZE][event.y//CELL_SIZE] = "sunk" #sinks ship
data["THEIRSUNK"] += 1 #adds to theirsunk count
else: #deals with what happens if it is a miss
data["compboard"][(event.x-(CELL_SIZE*(rowcols+1)))//CELL_SIZE][event.y//CELL_SIZE] = "miss" #goes and sprites miss
data["totalClicks"] += 1
redrawAll()
computerTurn()
return False
def pickComputerShips(): #sprites computer ships
i = 0
while i < shipNum: #makes sure right number of ships are created
rand1 = randint(0,rowcols-1) #randomly places them
rand2 = randint(0,rowcols-1)
if data["compboard"][rand1][rand2] != "ship": #makes sure it hasn't been guesed before
data["compboard"][rand1][rand2] = "ship" #sets the ship there
i += 1
#print(data["compboard"]) #cheat code for scrubs
def computerTurn(): #guesses where our ships are.
if data["gameOver"] == False:
cord1 = randint(0,rowcols-1) #random guesses
cord2 = randint(0,rowcols-1)
if data["totalClicks"] > shipNum:
if data["board"][cord1][cord2] == "miss" or data["board"][cord1][cord2] == "sunk": #if it has been guessed before, it generates new numbers
computerTurn()
else:
if data["board"][cord1][cord2] == "ship": #verifies if there is a ship there
data["board"][cord1][cord2] = "sunk" #sets the ship as sunk
data["SUNK"] += 1 #adds to sunk count
else:
data["board"][cord1][cord2] = "miss" #lets the computer know that it has less a less effective intelligence agency than Lesotho, and can't find targets
redrawAll()
return False
if __name__== "__main__":
#creates everything I will use
data = {}
data["board"] = buildBoard() #builds board
data["compboard"] = buildBoard() #this board represents the computer's board
data["totalClicks"] = 0 #counts total mouse clicks
data["SUNK"] = 0 #counts how many ships we have lost
data["THEIRSUNK"] = 0 #counts how many ships they have lost
data["gameOver"] = False #sees if game is over
blue = Color(0x3383FF,1) #creates the colors for great fleet
chrome = Color(0xdbe4eb,1)
black = Color(0x000000,1)
green = Color(0x008000,.5)
green2 = Color(0x008000,1)
red = Color(0xFF0000,.5)
red2 = Color(0xFF0000,1)
shipbox = RectangleAsset(CELL_SIZE,CELL_SIZE,LineStyle(3,black),chrome) #creates a chrome box which our graphic designers claim looks like a ship, however I don't see it
miss = RectangleAsset(CELL_SIZE,CELL_SIZE,LineStyle(5,black),green) #creates a green box which represents a miss because misses obviously turn the ocean green
sunk = RectangleAsset(CELL_SIZE,CELL_SIZE,LineStyle(5,black),red) #creates a red box to signify the fire of a sinking ship
pickComputerShips() #picks the computer ships
redrawAll() #runs redrawAll()
App.listenMouseEvent("click", mouseClick) #listens for clicks
App().run() #runs ggame
|
22,727 | df3188cc81bf46ca38c74683275a7bb86221e9ce | # -*- coding: utf-8 -*-
"""
# Initialize vars for set_excInh_cPath.py and svm_excInh_trainDecoder.py. They both call this script.
Created on Fri Dec 9 15:25:41 2016
@author: farznaj
"""
# ## Specify variables for the analysis:
# - Data (mouse, day, sessions)
# - Neuron type: excit, inhibit, or all
# - Current-choice or previous-choice SVM training
# if current-choice, specify epoch of training, the outcome (corr, incorr, all) and strength (easy, medium, hard, all) of trials for training SVM.
# if previous-choice, specify ITI flag
# - Trials that will be used for projections and class accuracy traces (corr, incorr, all, trained).
# In[1]:
#def svm_excInh_setVars():
# Add the option to toggle on/off the raw code. Copied from http://stackoverflow.com/questions/27934885/how-to-hide-code-from-cells-in-ipython-notebook-visualized-with-nbviewer
import sys
if 'ipykernel' in sys.modules:
from IPython.display import HTML
HTML('''<script>
code_show=true;
function code_toggle() {
if (code_show){
$('div.input').hide();
} else {
$('div.input').show();
}
code_show = !code_show
}
$( document ).ready(code_toggle);
</script>
<form action="javascript:code_toggle()"><input type="submit" value="Click here to toggle on/off the raw code."></form>''')
# In[3]:
import sys
import os
import numpy as np
from datetime import datetime
nowStr = datetime.now().strftime('%y%m%d-%H%M%S')
# Only run the following section if you are running the code in jupyter, not if on the cluster or in spyder!
if ('ipykernel' in sys.modules) or any('SPYDER' in name for name in os.environ):
# Set these variables:
mousename = 'fni17'
imagingFolder = '151020'
mdfFileNumber = [1,2]
trialHistAnalysis = 0;
roundi = 1; # For the same dataset we run the code multiple times, each time we select a random subset of neurons (of size n, n=.95*numTrials)
iTiFlg = 1; # Only needed if trialHistAnalysis=1; short ITI, 1: long ITI, 2: all ITIs.
setNsExcluded = 1; # if 1, NsExcluded will be set even if it is already saved.
numSamples = 2 #100; # number of iterations for finding the best c (inverse of regularization parameter)
neuronType = 2; # 0: excitatory, 1: inhibitory, 2: all types.
saveResults = 0; # save results in mat file.
doNsRand = 0; # if 1, a random set of neurons will be selected to make sure we have fewer neurons than trials.
regType = 'l1' # 'l2' : regularization type
kfold = 10;
compExcInh = 1 # if 1, analyses will be run to compare exc inh neurons.
# The following vars don't usually need to be changed
doPlots = 1; # Whether to make plots or not.
saveHTML = 0; # whether to save the html file of notebook with all figures or not.
if trialHistAnalysis==1: # more parameters are specified in popClassifier_trialHistory.m
# iTiFlg = 1; # 0: short ITI, 1: long ITI, 2: all ITIs.
epEnd_rel2stimon_fr = 0 # 3; # -2 # epEnd = eventI + epEnd_rel2stimon_fr
else:
# not needed to set ep_ms here, later you define it as [choiceTime-300 choiceTime]ms # we also go 30ms back to make sure we are not right on the choice time!
ep_ms = [809, 1109] #[425, 725] # optional, it will be set according to min choice time if not provided.# training epoch relative to stimOnset % we want to decode animal's upcoming choice by traninig SVM for neural average responses during ep ms after stimulus onset. [1000, 1300]; #[700, 900]; # [500, 700];
# outcome2ana will be used if trialHistAnalysis is 0. When it is 1, by default we are analyzing past correct trials. If you want to change that, set it in the matlab code.
outcome2ana = 'corr' # '', corr', 'incorr' # trials to use for SVM training (all, correct or incorrect trials)
strength2ana = 'all' # 'all', easy', 'medium', 'hard' % What stim strength to use for training?
thStimStrength = 3; # 2; # threshold of stim strength for defining hard, medium and easy trials.
th_stim_dur = 800; # min stim duration to include a trial in timeStimOnset
trs4project = 'trained' # 'trained', 'all', 'corr', 'incorr' # trials that will be used for projections and the class accuracy trace; if 'trained', same trials that were used for SVM training will be used. "corr" and "incorr" refer to current trial's outcome, so they don't mean much if trialHistAnalysis=1.
windowAvgFlg = 1 # if 0, data points during ep wont be averaged when setting X (for SVM training), instead each frame of ep will be treated as a separate datapoint. It helps with increasing number of datapoints, but will make data mor enoisy.
thAct = 5e-4 #5e-4; # 1e-5 # neurons whose average activity during ep is less than thAct will be called non-active and will be excluded.
thTrsWithSpike = 1; # 3 % remove neurons that are active in <thSpTr trials.
pnev2load = [] #[] [3] # which pnev file to load: indicates index of date-sorted files: use 0 for latest. Set [] to load the latest one.
# In[4]:
if neuronType==0:
ntName = 'excit'
elif neuronType==1:
ntName = 'inhibit'
elif neuronType==2:
ntName = 'all'
if trialHistAnalysis==1:
if iTiFlg==0:
itiName = 'short'
elif iTiFlg==1:
itiName = 'long'
elif iTiFlg==2:
itiName = 'all'
print 'Analyzing %s' %(mousename+'_'+imagingFolder+'_'+str(mdfFileNumber))
if trialHistAnalysis==0:
print 'Training %s trials of strength %s. Making projections for %s trials' %(outcome2ana, strength2ana, trs4project)
print 'trialHistAnalysis = %i' %(trialHistAnalysis)
print 'Analyzing %s neurons' %(ntName)
if trialHistAnalysis==1:
print 'Analyzing %s ITIs' %(itiName)
elif 'ep_ms' in locals():
print 'training window: [%d %d] ms' %(ep_ms[0], ep_ms[1])
print 'windowAvgFlg = %i' %(windowAvgFlg)
print 'numSamples = %i' %(numSamples)
# ## Import Libraries and Modules
# In[5]:
import scipy.io as scio
import scipy as sci
import scipy.stats as stats
import numpy as np
import numpy.random as rng
import sys
from crossValidateModel import crossValidateModel
from linearSVM import linearSVM
from compiler.ast import flatten # gives deprecation warning... try: from funcy import flatten, isa
import matplotlib
from matplotlib import pyplot as plt
if 'ipykernel' in sys.modules and doPlots:
get_ipython().magic(u'matplotlib inline')
get_ipython().magic(u"config InlineBackend.figure_format = 'svg'")
matplotlib.rcParams['figure.figsize'] = (6,4) #(8,5)
from IPython.display import display
import sklearn.svm as svm
import os
import glob
# print sys.path
#sys.path.append('/home/farznaj/Documents/trial_history/imaging') # Gamal's dir needs to be added using "if" that takes the value of pwd
## print sys.path
#from setImagingAnalysisNamesP import *
#%%
def setImagingAnalysisNamesP(mousename, imagingFolder, mdfFileNumber, **options):
if options.get('signalCh'):
signalCh = options.get('signalCh');
else:
signalCh = []
if options.get('pnev2load'):
pnev2load = options.get('pnev2load');
else:
pnev2load = []
if options.get('postNProvided'):
postNProvided = options.get('postNProvided');
else:
postNProvided = 0
##%%
import numpy as np
import platform
import glob
import os.path
if len(pnev2load)==0:
pnev2load = [0];
##%%
dataPath = []
if platform.system()=='Linux':
if os.getcwd().find('sonas')==1: # server
dataPath = '/sonas-hs/churchland/nlsas/data/data/'
else: # office linux
dataPath = '/home/farznaj/Shares/Churchland/data/'
else:
dataPath = '/Users/gamalamin/git_local_repository/Farzaneh/data/'
#%%
tifFold = os.path.join(dataPath+mousename,'imaging',imagingFolder)
r = '%03d-'*len(mdfFileNumber)
r = r[:-1]
rr = r % (tuple(mdfFileNumber))
date_major = imagingFolder+'_'+rr
imfilename = os.path.join(tifFold,date_major+'.mat')
##%%
if len(signalCh)>0:
if postNProvided:
pnevFileName = 'post_'+date_major+'_ch'+str(signalCh)+'-Pnev*'
else:
pnevFileName = date_major+'_ch'+str(signalCh)+'-Pnev*'
pnevFileName = glob.glob(os.path.join(tifFold,pnevFileName))
# sort pnevFileNames by date (descending)
pnevFileName = sorted(pnevFileName, key=os.path.getmtime)
pnevFileName = pnevFileName[::-1] # so the latest file is the 1st one.
'''
array = []
for idx in range(0, len(pnevFileName)):
array.append(os.path.getmtime(pnevFileName[idx]))
inds = np.argsort(array)
inds = inds[::-1]
pnev2load = inds[pnev2load]
'''
if len(pnevFileName)==0:
c = ("No Pnev file was found"); print("%s\n" % c)
pnevFileName = ''
else:
pnevFileName = pnevFileName[pnev2load[0]]
if postNProvided:
p = os.path.basename(pnevFileName)[5:]
pnevFileName = os.path.join(tifFold,p)
else:
pnevFileName = ''
##%%
return imfilename, pnevFileName
# In[7]:
# Extend the built in two tailed ttest function to one-tailed
def ttest2(a, b, **tailOption):
import scipy.stats as stats
import numpy as np
h, p = stats.ttest_ind(a, b)
d = np.mean(a)-np.mean(b)
if tailOption.get('tail'):
tail = tailOption.get('tail').lower()
if tail == 'right':
p = p/2.*(d>0) + (1-p/2.)*(d<0)
elif tail == 'left':
p = (1-p/2.)*(d>0) + p/2.*(d<0)
if d==0:
p = 1;
return p
# In[8]:
"""
Created on Wed Aug 24 15:59:12 2016
@author: farznaj
This is Farzaneh's first Python code :-) She is very happy and pleased about it :D
example call:
mousename = 'fni17'
imagingFolder = '151021'
mdfFileNumber = [1] #(1,2)
# optional inputs:
postNProvided = 1; # Default:0; If your directory does not contain pnevFile and instead it contains postFile, set this to 1 to get pnevFileName
signalCh = [2] # since gcamp is channel 2, should be 2.
pnev2load = [] # which pnev file to load: indicates index of date-sorted files: use 0 for latest. Set [] to load the latest one.
from setImagingAnalysisNamesP import *
imfilename, pnevFileName = setImagingAnalysisNamesP(mousename, imagingFolder, mdfFileNumber, signalCh=signalCh, pnev2load=pnev2load, postNProvided=postNProvided)
imfilename, pnevFileName = setImagingAnalysisNamesP(mousename, imagingFolder, mdfFileNumber)
"""
#%%
"""
def setImagingAnalysisNamesP(mousename, imagingFolder, mdfFileNumber, **options):
if options.get('signalCh'):
signalCh = options.get('signalCh');
else:
signalCh = []
if options.get('pnev2load'):
pnev2load = options.get('pnev2load');
else:
pnev2load = []
if options.get('postNProvided'):
postNProvided = options.get('postNProvided');
else:
postNProvided = 0
#%%
import numpy as np
import platform
import glob
import os.path
if len(pnev2load)==0:
pnev2load = [0];
#%%
dataPath = []
if platform.system()=='Linux':
if os.getcwd().find('sonas')==1: # server
dataPath = '/sonas-hs/churchland/nlsas/data/data/'
else: # office linux
dataPath = '/home/farznaj/Shares/Churchland/data/'
else:
dataPath = '/Users/gamalamin/git_local_repository/Farzaneh/data/'
#%%
tifFold = os.path.join(dataPath+mousename,'imaging',imagingFolder)
r = '%03d-'*len(mdfFileNumber)
r = r[:-1]
rr = r % (tuple(mdfFileNumber))
date_major = imagingFolder+'_'+rr
imfilename = os.path.join(tifFold,date_major+'.mat')
#%%
if len(signalCh)>0:
if postNProvided:
pnevFileName = 'post_'+date_major+'_ch'+str(signalCh)+'-Pnev*'
else:
pnevFileName = date_major+'_ch'+str(signalCh)+'-Pnev*'
pnevFileName = glob.glob(os.path.join(tifFold,pnevFileName))
# sort pnevFileNames by date (descending)
pnevFileName = sorted(pnevFileName, key=os.path.getmtime)
pnevFileName = pnevFileName[::-1]
'''
array = []
for idx in range(0, len(pnevFileName)):
array.append(os.path.getmtime(pnevFileName[idx]))
inds = np.argsort(array)
inds = inds[::-1]
pnev2load = inds[pnev2load]
'''
if len(pnevFileName)==0:
c = ("No Pnev file was found"); print("%s\n" % c)
pnevFileName = ''
else:
pnevFileName = pnevFileName[pnev2load[0]]
if postNProvided:
p = os.path.basename(pnevFileName)[5:]
pnevFileName = os.path.join(tifFold,p)
else:
pnevFileName = ''
#%%
return imfilename, pnevFileName
#%%
#imfilename, pnevFileName = setImagingAnalysisNamesP(mousename, imagingFolder, mdfFileNumber, signalCh, pnev2load)
"""
# ## Set mat-file names
# In[9]:
pnev2load = [] #[] [3] # which pnev file to load: indicates index of date-sorted files: use 0 for latest. Set [] to load the latest one.
signalCh = [2] # since gcamp is channel 2, should be always 2.
postNProvided = 1; # If your directory does not contain pnevFile and instead it contains postFile, set this to 1 to get pnevFileName
# from setImagingAnalysisNamesP import *
imfilename, pnevFileName = setImagingAnalysisNamesP(mousename, imagingFolder, mdfFileNumber, signalCh=signalCh, pnev2load=pnev2load, postNProvided=postNProvided)
postName = os.path.join(os.path.dirname(pnevFileName), 'post_'+os.path.basename(pnevFileName))
moreName = os.path.join(os.path.dirname(pnevFileName), 'more_'+os.path.basename(pnevFileName))
print(imfilename)
print(pnevFileName)
print(postName)
print(moreName)
# ## Load matlab variables: event-aligned traces, inhibitRois, outcomes, choice, etc
# - traces are set in set_aligned_traces.m matlab script.
# In[10]:
# Set traces_al_stim that is same as traces_al_stimAll except that in traces_al_stim some trials are set to nan, bc their stim duration is <
# th_stim_dur or bc their go tone happens before ep(end) or bc their choice happened before ep(end).
# But in traces_al_stimAll, all trials are included.
# You need traces_al_stim for decoding the upcoming choice bc you average responses during ep and you want to
# control for what happens there. But for trial-history analysis you average responses before stimOnset, so you
# don't care about when go tone happened or how long the stimulus was.
frameLength = 1000/30.9; # sec.
# Load time of some trial events
Data = scio.loadmat(postName, variable_names=['timeCommitCL_CR_Gotone', 'timeStimOnset', 'timeStimOffset', 'time1stSideTry'])
timeCommitCL_CR_Gotone = np.array(Data.pop('timeCommitCL_CR_Gotone')).flatten().astype('float')
timeStimOnset = np.array(Data.pop('timeStimOnset')).flatten().astype('float')
timeStimOffset = np.array(Data.pop('timeStimOffset')).flatten().astype('float')
time1stSideTry = np.array(Data.pop('time1stSideTry')).flatten().astype('float')
# Load stim-aligned_allTrials traces, frames, frame of event of interest
if trialHistAnalysis==0:
Data = scio.loadmat(postName, variable_names=['stimAl_noEarlyDec'],squeeze_me=True,struct_as_record=False)
eventI = Data['stimAl_noEarlyDec'].eventI - 1 # remember difference indexing in matlab and python!
traces_al_stimAll = Data['stimAl_noEarlyDec'].traces.astype('float')
time_aligned_stim = Data['stimAl_noEarlyDec'].time.astype('float')
else:
Data = scio.loadmat(postName, variable_names=['stimAl_allTrs'],squeeze_me=True,struct_as_record=False)
eventI = Data['stimAl_allTrs'].eventI - 1 # remember difference indexing in matlab and python!
traces_al_stimAll = Data['stimAl_allTrs'].traces.astype('float')
time_aligned_stim = Data['stimAl_allTrs'].time.astype('float')
# time_aligned_stimAll = Data['stimAl_allTrs'].time.astype('float') # same as time_aligned_stim
print 'size of stimulus-aligned traces:', np.shape(traces_al_stimAll), '(frames x units x trials)'
DataS = Data
# Load outcomes and choice (allResp_HR_LR) for the current trial
# if trialHistAnalysis==0:
Data = scio.loadmat(postName, variable_names=['outcomes', 'allResp_HR_LR'])
outcomes = (Data.pop('outcomes').astype('float'))[0,:]
# allResp_HR_LR = (Data.pop('allResp_HR_LR').astype('float'))[0,:]
allResp_HR_LR = np.array(Data.pop('allResp_HR_LR')).flatten().astype('float')
choiceVecAll = allResp_HR_LR+0; # trials x 1; 1 for HR choice, 0 for LR choice. % choice of the current trial.
# choiceVecAll = np.transpose(allResp_HR_LR); # trials x 1; 1 for HR choice, 0 for LR choice. % choice of the current trial.
print 'Current outcome: %d correct choices; %d incorrect choices' %(sum(outcomes==1), sum(outcomes==0))
if trialHistAnalysis:
# Load trialHistory structure to get choice vector of the previous trial
Data = scio.loadmat(postName, variable_names=['trialHistory'],squeeze_me=True,struct_as_record=False)
choiceVec0All = Data['trialHistory'].choiceVec0.astype('float')
# Set trials strength and identify trials with stim strength of interest
if trialHistAnalysis==0:
Data = scio.loadmat(postName, variable_names=['stimrate', 'cb'])
stimrate = np.array(Data.pop('stimrate')).flatten().astype('float')
cb = np.array(Data.pop('cb')).flatten().astype('float')
s = stimrate-cb; # how far is the stimulus rate from the category boundary?
if strength2ana == 'easy':
str2ana = (abs(s) >= (max(abs(s)) - thStimStrength));
elif strength2ana == 'hard':
str2ana = (abs(s) <= thStimStrength);
elif strength2ana == 'medium':
str2ana = ((abs(s) > thStimStrength) & (abs(s) < (max(abs(s)) - thStimStrength)));
else:
str2ana = np.full((1, np.shape(outcomes)[0]), True, dtype=bool).flatten();
print 'Number of trials with stim strength of interest = %i' %(str2ana.sum())
print 'Stim rates for training = {}'.format(np.unique(stimrate[str2ana]))
'''
# Set to nan those trials in outcomes and allRes that are nan in traces_al_stim
I = (np.argwhere((~np.isnan(traces_al_stim).sum(axis=0)).sum(axis=1)))[0][0] # first non-nan neuron
allTrs2rmv = np.argwhere(sum(np.isnan(traces_al_stim[:,I,:])))
print(np.shape(allTrs2rmv))
outcomes[allTrs2rmv] = np.nan
allResp_HR_LR[allTrs2rmv] = np.nan
'''
# ## Set the time window for training SVM (ep) and traces_al_stim
# In[11]:
traces_al_stim = traces_al_stimAll
if trialHistAnalysis==1:
# either of the two below (stimulus-aligned and initTone-aligned) would be fine
# eventI = DataI['initToneAl'].eventI - 1
eventI = DataS['stimAl_allTrs'].eventI - 1 # remember to subtract 1! matlab vs python indexing!
epEnd = eventI + epEnd_rel2stimon_fr #- 2 # to be safe for decoder training for trial-history analysis we go upto the frame before the stim onset
# epEnd = DataI['initToneAl'].eventI - 2 # to be safe for decoder training for trial-history analysis we go upto the frame before the initTone onset
ep = np.arange(epEnd+1)
print 'training epoch is {} ms'.format(np.round((ep-eventI)*frameLength))
ep_ms = list(np.round((ep[[0,-1]]-eventI)*frameLength).astype(int)) # so it is the same format as ep_ms when trialHistAnalysis is 0
else:
# Set ep_ms if it is not provided: [choiceTime-300 choiceTime]ms # we also go 30ms back to make sure we are not right on the choice time!
# by doing this you wont need to set ii below.
# We first set to nan timeStimOnset of trials that anyway wont matter bc their outcome is not of interest. we do this to make sure these trials dont affect our estimate of ep_ms
if outcome2ana == 'corr':
timeStimOnset[outcomes!=1] = np.nan; # analyze only correct trials.
elif outcome2ana == 'incorr':
timeStimOnset[outcomes!=0] = np.nan; # analyze only incorrect trials.
if not 'ep_ms' in locals():
ep_ms = [np.floor(np.nanmin(time1stSideTry-timeStimOnset))-30-300, np.floor(np.nanmin(time1stSideTry-timeStimOnset))-30]
print 'Training window: [%d %d] ms' %(ep_ms[0], ep_ms[1])
epStartRel2Event = np.ceil(ep_ms[0]/frameLength); # the start point of the epoch relative to alignedEvent for training SVM. (500ms)
epEndRel2Event = np.ceil(ep_ms[1]/frameLength); # the end point of the epoch relative to alignedEvent for training SVM. (700ms)
ep = np.arange(eventI+epStartRel2Event, eventI+epEndRel2Event+1).astype(int); # frames on stimAl.traces that will be used for trainning SVM.
print 'Training epoch relative to stimOnset is {} ms'.format(np.round((ep-eventI)*frameLength - frameLength/2)) # print center of frames in ms
#%% Exclude some trials from traces_al_stim
# This criteria makes sense if you want to be conservative; otherwise if ep=[1000 1300]ms, go tone will definitely be before ep end, and you cannot have the following criteria.
# Make sure in none of the trials Go-tone happened before the end of training window (ep)
i = (timeCommitCL_CR_Gotone - timeStimOnset) <= ep_ms[-1];
'''
if np.sum(i)>0:
print 'Excluding %i trials from timeStimOnset bc their goTone is earlier than ep end' %(np.sum(i))
# timeStimOnset[i] = np.nan; # by setting to nan, the aligned-traces of these trials will be computed as nan.
else:
print('No trials with go tone before the end of ep. Good :)')
'''
# Make sure in none of the trials choice (1st side try) happened before the end of training window (ep)
ii = (time1stSideTry - timeStimOnset) <= ep_ms[-1];
if np.sum(ii)>0:
print 'Excluding %i trials from timeStimOnset bc their choice is earlier than ep end' %(np.sum(ii))
# timeStimOnset[i] = np.nan; # by setting to nan, the aligned-traces of these trials will be computed as nan.
else:
print('No trials with choice before the end of ep. Good :)')
# Make sure trials that you use for SVM (decoding upcoming choice from
# neural responses during stimulus) have a certain stimulus duration. Of
# course stimulus at least needs to continue until the end of ep.
# go with either 900 or 800ms. Since the preference is to have at least
# ~100ms after ep which contains stimulus and without any go tones, go with 800ms
# bc in many sessions go tone happened early... so you will loose lots of
# trials if you go with 900ms.
# th_stim_dur = 800; # min stim duration to include a trial in timeStimOnset
if doPlots:
plt.figure
plt.subplot(1,2,1)
plt.plot(timeCommitCL_CR_Gotone - timeStimOnset, label = 'goTone')
plt.plot(timeStimOffset - timeStimOnset, 'r', label = 'stimOffset')
plt.plot(time1stSideTry - timeStimOnset, 'm', label = '1stSideTry')
plt.plot([1, np.shape(timeCommitCL_CR_Gotone)[0]],[th_stim_dur, th_stim_dur], 'g:', label = 'th_stim_dur')
plt.plot([1, np.shape(timeCommitCL_CR_Gotone)[0]],[ep_ms[-1], ep_ms[-1]], 'k:', label = 'epoch end')
plt.xlabel('Trial')
plt.ylabel('Time relative to stim onset (ms)')
plt.legend(loc='center left', bbox_to_anchor=(1, .7))
# minStimDurNoGoTone = np.nanmin(timeCommitCL_CR_Gotone - timeStimOnset); # this is the duration after stim onset during which no go tone occurred for any of the trials.
# print 'minStimDurNoGoTone = %.2f ms' %minStimDurNoGoTone
# Exclude trials whose stim duration was < th_stim_dur
j = (timeStimOffset - timeStimOnset) < th_stim_dur;
if np.sum(j)>0:
print 'Excluding %i trials from timeStimOnset bc their stimDur < %dms' %(np.sum(j), th_stim_dur)
# timeStimOnset[j] = np.nan;
else:
print 'No trials with stimDur < %dms. Good :)' %th_stim_dur
# Set trials to be removed from traces_al_stimAll
# toRmv = (i+j+ii)!=0;
toRmv = (j+ii)!=0; print 'Not excluding %i trials whose goTone is earlier than ep end' %sum(i)
print 'Final: %i trials excluded in traces_al_stim' %np.sum(toRmv)
# Set traces_al_stim for SVM classification of current choice.
traces_al_stim[:,:,toRmv] = np.nan
# traces_al_stim[:,:,outcomes==-1] = np.nan
# print(np.shape(traces_al_stim))
'''
# Set ep
if len(ep_ms)==0: # load ep from matlab
# Load stimulus-aligned traces, frames, frame of event of interest, and epoch over which we will average the responses to do SVM analysis
Data = scio.loadmat(postName, variable_names=['stimAl'],squeeze_me=True,struct_as_record=False)
# eventI = Data['stimAl'].eventI - 1 # remember difference indexing in matlab and python!
# traces_al_stim = Data['stimAl'].traces.astype('float') # traces_al_stim
# time_aligned_stim = Data['stimAl'].time.astype('float')
ep = Data['stimAl'].ep - 1
ep_ms = np.round((ep-eventI)*frameLength).astype(int)
else: # set ep here:
'''
# In[12]:
'''
# Load 1stSideTry-aligned traces, frames, frame of event of interest
# use firstSideTryAl_COM to look at changes-of-mind (mouse made a side lick without committing it)
Data = scio.loadmat(postName, variable_names=['firstSideTryAl'],squeeze_me=True,struct_as_record=False)
traces_al_1stSide = Data['firstSideTryAl'].traces.astype('float')
time_aligned_1stSide = Data['firstSideTryAl'].time.astype('float')
# print(np.shape(traces_al_1stSide))
# Load goTone-aligned traces, frames, frame of event of interest
# use goToneAl_noStimAft to make sure there was no stim after go tone.
Data = scio.loadmat(postName, variable_names=['goToneAl'],squeeze_me=True,struct_as_record=False)
traces_al_go = Data['goToneAl'].traces.astype('float')
time_aligned_go = Data['goToneAl'].time.astype('float')
# print(np.shape(traces_al_go))
# Load reward-aligned traces, frames, frame of event of interest
Data = scio.loadmat(postName, variable_names=['rewardAl'],squeeze_me=True,struct_as_record=False)
traces_al_rew = Data['rewardAl'].traces.astype('float')
time_aligned_rew = Data['rewardAl'].time.astype('float')
# print(np.shape(traces_al_rew))
# Load commitIncorrect-aligned traces, frames, frame of event of interest
Data = scio.loadmat(postName, variable_names=['commitIncorrAl'],squeeze_me=True,struct_as_record=False)
traces_al_incorrResp = Data['commitIncorrAl'].traces.astype('float')
time_aligned_incorrResp = Data['commitIncorrAl'].time.astype('float')
# print(np.shape(traces_al_incorrResp))
# Load initiationTone-aligned traces, frames, frame of event of interest
Data = scio.loadmat(postName, variable_names=['initToneAl'],squeeze_me=True,struct_as_record=False)
traces_al_init = Data['initToneAl'].traces.astype('float')
time_aligned_init = Data['initToneAl'].time.astype('float')
# print(np.shape(traces_al_init))
# DataI = Data
'''
'''
if trialHistAnalysis:
# either of the two below (stimulus-aligned and initTone-aligned) would be fine
# eventI = DataI['initToneAl'].eventI
eventI = DataS['stimAl_allTrs'].eventI
epEnd = eventI + epEnd_rel2stimon_fr #- 2 # to be safe for decoder training for trial-history analysis we go upto the frame before the stim onset
# epEnd = DataI['initToneAl'].eventI - 2 # to be safe for decoder training for trial-history analysis we go upto the frame before the initTone onset
ep = np.arange(epEnd+1)
print 'training epoch is {} ms'.format(np.round((ep-eventI)*frameLength))
'''
# Load inhibitRois
Data = scio.loadmat(moreName, variable_names=['inhibitRois'])
inhibitRois = Data.pop('inhibitRois')[0,:]
# print '%d inhibitory, %d excitatory; %d unsure class' %(np.sum(inhibitRois==1), np.sum(inhibitRois==0), np.sum(np.isnan(inhibitRois)))
# Set traces for specific neuron types: inhibitory, excitatory or all neurons
if neuronType!=2:
nt = (inhibitRois==neuronType) # 0: excitatory, 1: inhibitory, 2: all types.
# good_excit = inhibitRois==0;
# good_inhibit = inhibitRois==1;
traces_al_stim = traces_al_stim[:, nt, :];
traces_al_stimAll = traces_al_stimAll[:, nt, :];
'''
traces_al_1stSide = traces_al_1stSide[:, nt, :];
traces_al_go = traces_al_go[:, nt, :];
traces_al_rew = traces_al_rew[:, nt, :];
traces_al_incorrResp = traces_al_incorrResp[:, nt, :];
traces_al_init = traces_al_init[:, nt, :];
'''
else:
nt = np.arange(np.shape(traces_al_stim)[1])
# ## Set X (trials x neurons) and Y (trials x 1) for training the SVM classifier.
# X matrix (size trials x neurons) that contains neural responses at different trials.
# Y choice of high rate (modeled as 1) and low rate (modeled as 0)
# In[13]:
# Set choiceVec0 (Y: the response vector)
if trialHistAnalysis:
choiceVec0 = choiceVec0All[:,iTiFlg] # choice on the previous trial for short (or long or all) ITIs
choiceVec0S = choiceVec0All[:,0]
choiceVec0L = choiceVec0All[:,1]
else: # set choice for the current trial
choiceVec0 = allResp_HR_LR; # trials x 1; 1 for HR choice, 0 for LR choice. % choice of the current trial.
# choiceVec0 = np.transpose(allResp_HR_LR); # trials x 1; 1 for HR choice, 0 for LR choice. % choice of the current trial.
if outcome2ana == 'corr':
choiceVec0[outcomes!=1] = np.nan; # analyze only correct trials.
elif outcome2ana == 'incorr':
choiceVec0[outcomes!=0] = np.nan; # analyze only incorrect trials.
choiceVec0[~str2ana] = np.nan
# Y = choiceVec0
# print(choiceVec0.shape)
# Set spikeAveEp0 (X: the predictor matrix (trials x neurons) that shows average of spikes for a particular epoch for each trial and neuron.)
if trialHistAnalysis:
# either of the two cases below should be fine (init-aligned traces or stim-aligned traces.)
spikeAveEp0 = np.transpose(np.nanmean(traces_al_stimAll[ep,:,:], axis=0)) # trials x neurons
# spikeAveEp0 = np.transpose(np.nanmean(traces_al_init[ep,:,:], axis=0)) # trials x neurons
else:
spikeAveEp0 = np.transpose(np.nanmean(traces_al_stim[ep,:,:], axis=0)) # trials x neurons
# X = spikeAveEp0;
print 'Size of spikeAveEp0 (trs x neurons): ', spikeAveEp0.shape
# In[14]:
# set trsExcluded and exclude them to set X and Y; trsExcluded are trials that are nan either in traces or in choice vector.
'''
#dirName = 'SVM_151102_001-002_ch2-PnevPanResults-160624-113108';
dirName = 'SVM_151029_003_ch2-PnevPanResults-160426-191859';
#dirName = '/home/farznaj/Shares/Churchland/data/fni17/imaging/151022/XY_fni17_151022 XY_lassoSVM.mat';
Data = scio.loadmat(dirName, variable_names=['X', 'Y', 'time_aligned_stim', 'non_filtered', 'traces_al_1stSideTry', 'time_aligned_stim_1stSideTry']);
X = Data.pop('X').astype('float')
Y = np.squeeze(Data.pop('Y')).astype('int')
time_aligned_stim = np.squeeze(Data.pop('time_aligned_stim')).astype('float')
Xt = Data.pop('non_filtered').astype('float')
Xt_choiceAl = Data.pop('traces_al_1stSideTry').astype('float')
time_aligned_1stSide = np.squeeze(Data.pop('time_aligned_stim_1stSideTry')).astype('float')
'''
# Identify nan trials
trsExcluded = (np.sum(np.isnan(spikeAveEp0), axis = 1) + np.isnan(choiceVec0)) != 0 # NaN trials # trsExcluded
# print sum(trsExcluded), 'NaN trials'
# Exclude nan trials
X = spikeAveEp0[~trsExcluded,:]; # trials x neurons
Y = choiceVec0[~trsExcluded];
print '%d high-rate choices, and %d low-rate choices\n' %(np.sum(Y==1), np.sum(Y==0))
# In[15]:
# Set NsExcluded : Identify neurons that did not fire in any of the trials (during ep) and then exclude them. Otherwise they cause problem for feature normalization.
# thAct and thTrsWithSpike are parameters that you can play with.
# If it is already saved, load it (the idea is to use the same NsExcluded for all the analyses of a session). Otherwise set it.
if trialHistAnalysis==0:
svmnowname = 'svmCurrChoice_allN' + '_*-' + pnevFileName[-32:]
else:
svmnowname = 'svmPrevChoice_allN_allITIs' + '_*-' + pnevFileName[-32:]
svmName = glob.glob(os.path.join(os.path.dirname(pnevFileName), 'svm', svmnowname))
svmName = sorted(svmName, key=os.path.getmtime)[::-1] # so the latest file is the 1st one.
if setNsExcluded==0 and np.shape(svmName)[0]!=0: # NsExcluded is already set and saved # 0: #
svmName = svmName[0] # get the latest file
print 'loading NsExcluded from file', svmName
Data = scio.loadmat(svmName, variable_names=['NsExcluded'])
NsExcluded = Data.pop('NsExcluded')[0,:].astype('bool')
NsExcluded = NsExcluded[nt]
stdX = np.std(X, axis = 0); # define stdX for all neurons; later we reset it only including active neurons
if min(stdX[~NsExcluded]) < thAct: # make sure the loaded NsExcluded makes sense; ie stdX of ~NsExcluded is above thAct
sys.exit(('min of stdX= %.8f; not supposed to be <%d (thAct)!' %(min(stdX), thAct)))
else:
print 'NsExcluded not saved, so setting it here'
if trialHistAnalysis and iTiFlg!=2:
# set X for short-ITI and long-ITI cases (XS,XL).
trsExcludedS = (np.sum(np.isnan(spikeAveEp0), axis = 1) + np.isnan(choiceVec0S)) != 0
XS = spikeAveEp0[~trsExcludedS,:]; # trials x neurons
trsExcludedL = (np.sum(np.isnan(spikeAveEp0), axis = 1) + np.isnan(choiceVec0L)) != 0
XL = spikeAveEp0[~trsExcludedL,:]; # trials x neurons
# Define NsExcluded as neurons with low stdX for either short ITI or long ITI trials.
# This is to make sure short and long ITI cases will include the same set of neurons.
stdXS = np.std(XS, axis = 0);
stdXL = np.std(XL, axis = 0);
NsExcluded = np.sum([stdXS < thAct, stdXL < thAct], axis=0)!=0 # if a neurons is non active for either short ITI or long ITI trials, exclude it.
else:
# Define NsExcluded as neurons with low stdX
stdX = np.std(X, axis = 0);
NsExcluded = stdX < thAct
# np.sum(stdX < thAct)
'''
# Set nonActiveNs, ie neurons whose average activity during ep is less than thAct.
# spikeAveEpAveTrs = np.nanmean(spikeAveEp0, axis=0); # 1 x units % response of each neuron averaged across epoch ep and trials.
spikeAveEpAveTrs = np.nanmean(X, axis=0); # 1 x units % response of each neuron averaged across epoch ep and trials.
# thAct = 5e-4; # 1e-5 #quantile(spikeAveEpAveTrs, .1);
nonActiveNs = spikeAveEpAveTrs < thAct;
print '\t%d neurons with ave activity in ep < %.5f' %(np.sum(nonActiveNs), thAct)
np.sum(nonActiveNs)
# Set NsFewTrActiv, ie neurons that are active in very few trials (by active I mean average activity during epoch ep)
# thTrsWithSpike = 1; # 3; # ceil(thMinFractTrs * size(spikeAveEp0,1)); % 30 % remove neurons with activity in <thSpTr trials.
nTrsWithSpike = np.sum(X > thAct, axis=0) # 0 # shows for each neuron, in how many trials the activity was above 0.
NsFewTrActiv = (nTrsWithSpike < thTrsWithSpike) # identify neurons that were active fewer than thTrsWithSpike.
print '\t%d neurons are active in < %i trials' %(np.sum(NsFewTrActiv), thTrsWithSpike)
# Now set the final NxExcluded: (neurons to exclude)
NsExcluded = (NsFewTrActiv + nonActiveNs)!=0
'''
print '%d = Final # non-active neurons' %(sum(NsExcluded))
# a = size(spikeAveEp0,2) - sum(NsExcluded);
print 'Using %d out of %d neurons; Fraction excluded = %.2f\n' %(np.shape(spikeAveEp0)[1]-sum(NsExcluded), np.shape(spikeAveEp0)[1], sum(NsExcluded)/float(np.shape(spikeAveEp0)[1]))
print '%i, %i, %i: #original inh, excit, unsure' %(np.sum(inhibitRois==1), np.sum(inhibitRois==0), np.sum(np.isnan(inhibitRois)))
# Check what fraction of inhibitRois are excluded, compare with excitatory neurons.
if neuronType==2:
print '%i, %i, %i: #excluded inh, excit, unsure' %(np.sum(inhibitRois[NsExcluded]==1), np.sum(inhibitRois[NsExcluded]==0), np.sum(np.isnan(inhibitRois[NsExcluded])))
print '%.2f, %.2f, %.2f: fraction excluded inh, excit, unsure\n' %(np.sum(inhibitRois[NsExcluded]==1)/float(np.sum(inhibitRois==1)), np.sum(inhibitRois[NsExcluded]==0)/float(np.sum(inhibitRois==0)), np.sum(np.isnan(inhibitRois[NsExcluded]))/float(np.sum(np.isnan(inhibitRois))))
# In[16]:
# Exclude non-active neurons from X and set inhRois (ie neurons that don't fire in any of the trials during ep)
X = X[:,~NsExcluded]
print np.shape(X)
# Set inhRois which is same as inhibitRois but with non-active neurons excluded. (it has same size as X)
if neuronType==2:
inhRois = inhibitRois[~NsExcluded]
# print 'Number: inhibit = %d, excit = %d, unsure = %d' %(np.sum(inhRois==1), np.sum(inhRois==0), np.sum(np.isnan(inhRois)))
# print 'Fraction: inhibit = %.2f, excit = %.2f, unsure = %.2f' %(fractInh, fractExc, fractUn)
# In[15]:
NsRand = np.ones(np.shape(X)[1]).astype('bool')
'''
## If number of neurons is more than 95% of trial numbers, identify n random neurons, where n= 0.95 * number of trials. This is to make sure we have more observations (trials) than features (neurons)
nTrs = np.shape(X)[0]
nNeuronsOrig = np.shape(X)[1]
nNeuronsNow = np.int(np.floor(nTrs * .95))
if nNeuronsNow < nNeuronsOrig:
if neuronType==2:
fractInh = np.sum(inhRois==1) / float(nNeuronsOrig)
fractExc = np.sum(inhRois==0) / float(nNeuronsOrig)
fractUn = np.sum(np.isnan(inhRois)) / float(nNeuronsOrig)
print 'Number: inhibit = %d, excit = %d, unsure = %d' %(np.sum(inhRois==1), np.sum(inhRois==0), np.sum(np.isnan(inhRois)))
print 'Fraction: inhibit = %.2f, excit = %.2f, unsure = %.2f' %(fractInh, fractExc, fractUn)
elif neuronType==0: # exc
fractInh = 0;
fractExc = 1;
fractUn = 0;
elif neuronType==1: # inh
fractInh = 1;
fractExc = 0;
fractUn = 0;
# Define how many neurons you need to pick from each pool of inh, exc, unsure.
nInh = int(np.ceil(fractInh*nNeuronsNow))
nExc = int(np.ceil(fractExc*nNeuronsNow))
nUn = nNeuronsNow - (nInh + nExc) # fractUn*nNeuronsNow
print '\nThere are', nTrs, 'trials; So selecting', nNeuronsNow, 'neurons out of', nNeuronsOrig
print '%i, %i, %i: number of selected inh, excit, unsure' %(nInh, nExc, nUn)
# Select nInh random indeces out of the inhibibory pool
inhI = np.argwhere(inhRois==1)
inhNow = rng.permutation(inhI)[0:nInh].flatten() # random indeces
# Select nExc random indeces out of the excitatory pool
excI = np.argwhere(inhRois==0)
excNow = rng.permutation(excI)[0:nExc].flatten()
# Select nUn random indeces out of the unsure pool
unI = np.argwhere(np.isnan(inhRois))
unNow = rng.permutation(unI)[0:nUn].flatten()
# Put all the 3 groups together
neuronsNow = np.sort(np.concatenate([inhNow,excNow,unNow]), axis=None)
np.shape(neuronsNow)
# neuronsNow
# np.max(neuronsNow)
# Define a logical array with 1s for randomly selected neurons (length = number of neurons in X (after excluding NsExcluded))
NsRand = np.arange(np.shape(X)[1])
NsRand = np.in1d(NsRand, neuronsNow)
# np.shape(NsRand)
# NsRand
else: # if number of neurons is already <= .95*numTrials, include all neurons.
print 'Not doing random selection of neurons (nNeurons=%d already fewer than .95*nTrials=%d)' %(np.shape(X)[1], nTrs)
NsRand = np.ones(np.shape(X)[1]).astype('bool')
'''
# In[21]:
# Set X and inhRois only for the randomly selected set of neurons
X = X[:,NsRand]
if neuronType==2:
inhRois = inhRois[NsRand]
if windowAvgFlg==0:
a = np.transpose(traces_al_stim[ep,:,:][:,~NsExcluded,:][:,:,~trsExcluded], (0,2,1)) # ep_frames x trials x units
a = a[:,:,neuronsNow]
X = np.reshape(a, (ep.shape[0]*(~trsExcluded).sum(), (~NsExcluded).sum())) # (ep_frames x trials) x units
Y = np.tile(np.reshape(choiceVec0[~trsExcluded], (1,-1)), (ep.shape[0], 1)).flatten()
# Handle imbalance in the number of trials:
# unlike matlab, it doesn't seem to be a problem here... so we don't make trial numbers of HR and LR the same.
# Print some numbers
numDataPoints = X.shape[0]
print '# data points = %d' %numDataPoints
# numTrials = (~trsExcluded).sum()
# numNeurons = (~NsExcluded).sum()
numTrials, numNeurons = X.shape
print '%d trials; %d neurons' %(numTrials, numNeurons)
# print ' The data has %d frames recorded from %d neurons at %d trials' %Xt.shape
# In[22]:
# Center and normalize X: feature normalization and scaling: to remove effects related to scaling and bias of each neuron, we need to zscore data (i.e., make data mean 0 and variance 1 for each neuron)
meanX = np.mean(X, axis = 0);
stdX = np.std(X, axis = 0);
# normalize X
X = (X-meanX)/stdX;
# In[23]:
if doPlots:
plt.figure
plt.subplot(2,2,1)
plt.plot(meanX)
plt.ylabel('meanX \n(ep-ave FR, mean of trials)')
plt.title('min = %.6f' %(np.min(meanX)))
plt.subplot(2,2,3)
plt.plot(stdX)
plt.ylabel('stdX \n(ep-ave FR, std of trials)')
plt.xlabel('neurons')
plt.title('min = %.6f' %(np.min(stdX)))
plt.tight_layout() #(pad=0.4, w_pad=0.5, h_pad=1.0)
# plt.subplots_adjust(hspace=.5)
# ## Set the traces that will be used for projections and plotting
# Traces are of size (frames x neurons x trials)
# Choose trials that will be used for projections (trs4project = 'trained', 'all', 'corr', 'incorr')
# Remove non-active neurons
# Do feature normalization and scaling for the traces (using mean and sd of X)
# In[24]:
# trs4project = 'incorr' # 'trained', 'all', 'corr', 'incorr'
# Data = scio.loadmat(postName, variable_names=['outcomes', 'allResp_HR_LR'])
# choiceVecAll = (Data.pop('allResp_HR_LR').astype('float'))[0,:]
# Set trials that will be used for projection traces
if trs4project=='all':
Xt = traces_al_stim
'''
Xt_choiceAl = traces_al_1stSide
Xt_goAl = traces_al_go
Xt_rewAl = traces_al_rew
Xt_incorrRespAl = traces_al_incorrResp
Xt_initAl = traces_al_init
'''
Xt_stimAl_all = traces_al_stimAll
choiceVecNow = choiceVecAll
elif trs4project=='trained':
Xt = traces_al_stim[:, :, ~trsExcluded];
'''
Xt_choiceAl = traces_al_1stSide[:, :, ~trsExcluded];
Xt_goAl = traces_al_go[:, :, ~trsExcluded];
Xt_rewAl = traces_al_rew[:, :, ~trsExcluded];
Xt_incorrRespAl = traces_al_incorrResp[:, :, ~trsExcluded];
Xt_initAl = traces_al_init[:, :, ~trsExcluded];
'''
Xt_stimAl_all = traces_al_stimAll[:, :, ~trsExcluded];
choiceVecNow = Y
elif trs4project=='corr':
Xt = traces_al_stim[:, :, outcomes==1];
'''
Xt_choiceAl = traces_al_1stSide[:, :, outcomes==1];
Xt_goAl = traces_al_go[:, :, outcomes==1];
Xt_rewAl = traces_al_rew[:, :, outcomes==1];
Xt_incorrRespAl = traces_al_incorrResp[:, :, outcomes==1];
Xt_initAl = traces_al_init[:, :, outcomes==1];
'''
Xt_stimAl_all = traces_al_stimAll[:, :, outcomes==1];
choiceVecNow = choiceVecAll[outcomes==1]
elif trs4project=='incorr':
Xt = traces_al_stim[:, :, outcomes==0];
'''
Xt_choiceAl = traces_al_1stSide[:, :, outcomes==0];
Xt_goAl = traces_al_go[:, :, outcomes==0];
Xt_rewAl = traces_al_rew[:, :, outcomes==0];
Xt_incorrRespAl = traces_al_incorrResp[:, :, outcomes==0];
Xt_initAl = traces_al_init[:, :, outcomes==0];
'''
Xt_stimAl_all = traces_al_stimAll[:, :, outcomes==0];
choiceVecNow = choiceVecAll[outcomes==0]
## Xt = traces_al_stim[:, :, np.sum(np.sum(np.isnan(traces_al_stim), axis =0), axis =0)==0];
## Xt_choiceAl = traces_al_1stSide[:, :, np.sum(np.sum(np.isnan(traces_al_1stSide), axis =0), axis =0)==0];
# Exclude non-active neurons (ie neurons that don't fire in any of the trials during ep)
Xt = Xt[:,~NsExcluded,:]
'''
Xt_choiceAl = Xt_choiceAl[:,~NsExcluded,:]
Xt_goAl = Xt_goAl[:,~NsExcluded,:]
Xt_rewAl = Xt_rewAl[:,~NsExcluded,:]
Xt_incorrRespAl = Xt_incorrRespAl[:,~NsExcluded,:]
Xt_initAl = Xt_initAl[:,~NsExcluded,:]
'''
Xt_stimAl_all = Xt_stimAl_all[:,~NsExcluded,:]
# Only include the randomly selected set of neurons
Xt = Xt[:,NsRand,:]
'''
Xt_choiceAl = Xt_choiceAl[:,NsRand,:]
Xt_goAl = Xt_goAl[:,NsRand,:]
Xt_rewAl = Xt_rewAl[:,NsRand,:]
Xt_incorrRespAl = Xt_incorrRespAl[:,NsRand,:]
Xt_initAl = Xt_initAl[:,NsRand,:]
'''
Xt_stimAl_all = Xt_stimAl_all[:,NsRand,:]
# Divide data into high-rate (modeled as 1) and low-rate (modeled as 0) trials
hr_trs = (choiceVecNow==1)
lr_trs = (choiceVecNow==0)
# print 'Projection traces have %d high-rate trials, and %d low-rate trials' %(np.sum(hr_trs), np.sum(lr_trs))
# window of training (ep)
win = (ep-eventI)*frameLength
# Plot stim-aligned averages after centering and normalization
if doPlots:
plt.figure()
plt.subplot(1,2,1)
a1 = np.nanmean(Xt[:, :, hr_trs], axis=1) # frames x trials (average across neurons)
tr1 = np.nanmean(a1, axis = 1)
tr1_se = np.nanstd(a1, axis = 1) / np.sqrt(numTrials);
a0 = np.nanmean(Xt[:, :, lr_trs], axis=1) # frames x trials (average across neurons)
tr0 = np.nanmean(a0, axis = 1)
tr0_se = np.nanstd(a0, axis = 1) / np.sqrt(numTrials);
mn = np.concatenate([tr1,tr0]).min()
mx = np.concatenate([tr1,tr0]).max()
plt.plot([win[0], win[0]], [mn, mx], 'g-.') # mark the begining and end of training window
plt.plot([win[-1], win[-1]], [mn, mx], 'g-.')
plt.fill_between(time_aligned_stim, tr1-tr1_se, tr1+tr1_se, alpha=0.5, edgecolor='b', facecolor='b')
plt.fill_between(time_aligned_stim, tr0-tr0_se, tr0+tr0_se, alpha=0.5, edgecolor='r', facecolor='r')
plt.plot(time_aligned_stim, tr1, 'b', label = 'high rate')
plt.plot(time_aligned_stim, tr0, 'r', label = 'low rate')
# plt.plot(time_aligned_stim, np.nanmean(Xt[:, :, lr_trs], axis = (1, 2)), 'r', label = 'high rate')
# plt.plot(time_aligned_stim, np.nanmean(Xt[:, :, hr_trs], axis = (1, 2)), 'b', label = 'low rate')
plt.xlabel('time aligned to stimulus onset (ms)')
plt.title('Population average - raw')
plt.legend()
## Feature normalization and scaling
# normalize stim-aligned traces
T, N, C = Xt.shape
Xt_N = np.reshape(Xt.transpose(0 ,2 ,1), (T*C, N), order = 'F')
Xt_N = (Xt_N-meanX)/stdX
Xt = np.reshape(Xt_N, (T, C, N), order = 'F').transpose(0 ,2 ,1)
# normalize stimAll-aligned traces
Tsa, Nsa, Csa = Xt_stimAl_all.shape
Xtsa_N = np.reshape(Xt_stimAl_all.transpose(0 ,2 ,1), (Tsa*Csa, Nsa), order = 'F')
Xtsa_N = (Xtsa_N-meanX)/stdX
Xtsa = np.reshape(Xtsa_N, (Tsa, Csa, Nsa), order = 'F').transpose(0 ,2 ,1)
'''
# normalize goTome-aligned traces
Tg, Ng, Cg = Xt_goAl.shape
Xtg_N = np.reshape(Xt_goAl.transpose(0 ,2 ,1), (Tg*Cg, Ng), order = 'F')
Xtg_N = (Xtg_N-meanX)/stdX
Xtg = np.reshape(Xtg_N, (Tg, Cg, Ng), order = 'F').transpose(0 ,2 ,1)
# normalize choice-aligned traces
Tc, Nc, Cc = Xt_choiceAl.shape
Xtc_N = np.reshape(Xt_choiceAl.transpose(0 ,2 ,1), (Tc*Cc, Nc), order = 'F')
Xtc_N = (Xtc_N-meanX)/stdX
Xtc = np.reshape(Xtc_N, (Tc, Cc, Nc), order = 'F').transpose(0 ,2 ,1)
# normalize reward-aligned traces
Tr, Nr, Cr = Xt_rewAl.shape
Xtr_N = np.reshape(Xt_rewAl.transpose(0 ,2 ,1), (Tr*Cr, Nr), order = 'F')
Xtr_N = (Xtr_N-meanX)/stdX
Xtr = np.reshape(Xtr_N, (Tr, Cr, Nr), order = 'F').transpose(0 ,2 ,1)
# normalize commitIncorrect-aligned traces
Tp, Np, Cp = Xt_incorrRespAl.shape
Xtp_N = np.reshape(Xt_incorrRespAl.transpose(0 ,2 ,1), (Tp*Cp, Np), order = 'F')
Xtp_N = (Xtp_N-meanX)/stdX
Xtp = np.reshape(Xtp_N, (Tp, Cp, Np), order = 'F').transpose(0 ,2 ,1)
# normalize init-aligned traces
Ti, Ni, Ci = Xt_initAl.shape
Xti_N = np.reshape(Xt_initAl.transpose(0 ,2 ,1), (Ti*Ci, Ni), order = 'F')
Xti_N = (Xti_N-meanX)/stdX
Xti = np.reshape(Xti_N, (Ti, Ci, Ni), order = 'F').transpose(0 ,2 ,1)
'''
np.shape(Xt)
# window of training (ep)
# win = (ep-eventI)*frameLength
# Plot stim-aligned averages after centering and normalization
if doPlots:
# plt.figure()
plt.subplot(1,2,2)
a1 = np.nanmean(Xt[:, :, hr_trs], axis=1) # frames x trials (average across neurons)
tr1 = np.nanmean(a1, axis = 1)
tr1_se = np.nanstd(a1, axis = 1) / np.sqrt(numTrials);
a0 = np.nanmean(Xt[:, :, lr_trs], axis=1) # frames x trials (average across neurons)
tr0 = np.nanmean(a0, axis = 1)
tr0_se = np.nanstd(a0, axis = 1) / np.sqrt(numTrials);
mn = np.concatenate([tr1,tr0]).min()
mx = np.concatenate([tr1,tr0]).max()
plt.plot([win[0], win[0]], [mn, mx], 'g-.') # mark the begining and end of training window
plt.plot([win[-1], win[-1]], [mn, mx], 'g-.')
plt.fill_between(time_aligned_stim, tr1-tr1_se, tr1+tr1_se, alpha=0.5, edgecolor='b', facecolor='b')
plt.fill_between(time_aligned_stim, tr0-tr0_se, tr0+tr0_se, alpha=0.5, edgecolor='r', facecolor='r')
plt.plot(time_aligned_stim, tr1, 'b', label = 'high rate')
plt.plot(time_aligned_stim, tr0, 'r', label = 'low rate')
# plt.plot(time_aligned_stim, np.nanmean(Xt[:, :, lr_trs], axis = (1, 2)), 'r', label = 'high rate')
# plt.plot(time_aligned_stim, np.nanmean(Xt[:, :, hr_trs], axis = (1, 2)), 'b', label = 'low rate')
plt.xlabel('time aligned to stimulus onset (ms)')
plt.title('Population average - normalized')
plt.legend()
|
22,728 | 565bf52b3c9cfd0c59da50ac2c110168be486bc2 | """Configurations and constant variables here."""
SETTINGS = {
'settings': {
'number_of_shards': 1,
'number_of_replicas': 0
},
'mappings': {
'salads': {
'dynamic': 'strict',
'properties': {
'title': {
'type': 'text'
},
'submitter': {
'type': 'text'
},
'description': {
'type': 'text'
},
'calories': {
'type': 'float'
},
'ingredients': {
'type': 'nested',
'properties': {
'step': {'type': 'text'}
}
},
'url': {
'type': 'text'
},
}
}
}
} |
22,729 | ef2c1f7f045be7f15d16f1497e291263486dca85 | LABELS = {
"rebalance_progress": "Rebalance progress, %",
"ops": "Ops per sec",
"cmd_get": "GET ops per sec",
"cmd_set": "SET ops per sec",
"delete_hits": "DELETE ops per sec",
"cas_hits": "CAS ops per sec",
"curr_connections": "Connections",
"hibernated_waked": "Streaming requests wakeups per sec",
"curr_items": "Active items",
"mem_used": "Memory used, bytes",
"ep_meta_data_memory": "Metadata in RAM, bytes",
"vb_active_resident_items_ratio": "Active docs resident, %",
"vb_replica_resident_items_ratio": "Replica docs resident, %",
"ep_num_value_ejects": "Ejections per sec",
"ep_dcp_replica_items_remaining": "DCP replication backlog, items",
"ep_dcp_replica_total_bytes": "DCP replication bytes sent, bytes",
"ep_dcp_other_items_remaining": "DCP clients backlog, items",
"ep_dcp_other_total_bytes": "DCP clients bytes sent, bytes",
"disk_write_queue": "Disk write queue, items",
"ep_cache_miss_rate": "Cache miss ratio, %",
"ep_bg_fetched": "Disk reads per sec",
"ep_diskqueue_drain": "Drain rate, items/sec",
"avg_bg_wait_time": "BgFetcher wait time, us",
"avg_disk_commit_time": "Disk commit time, s",
"avg_disk_update_time": "Disk update time, us",
"couch_docs_data_size": "Docs data size, bytes",
"couch_docs_actual_disk_size": "Docs total disk size, bytes",
"couch_docs_fragmentation": "Docs fragmentation, %",
"couch_total_disk_size": "Total disk size, bytes",
"replication_changes_left": "Outbound XDCR mutations, items",
"replication_size_rep_queue": "XDC replication queue, bytes",
"replication_rate_replication": "Mutation replication rate per sec",
"replication_bandwidth_usage": "Data replication rate, bytes/sec",
"replication_work_time": "Secs in replicating",
"replication_commit_time": "Secs in checkpointin",
"replication_active_vbreps": "Active vbucket replications",
"replication_waiting_vbreps": "Waiting vbucket replications",
"replication_num_checkpoints": "Checkpoints issued",
"replication_num_failedckpts": "Checkpoints failed",
"replication_meta_latency_wt": "Weighted meta ops latency, ms",
"replication_docs_latency_wt": "Weighted doc ops latency, ms",
"xdc_ops": "Total XDCR operations per sec",
"ep_num_ops_get_meta": "Metadata reads per sec",
"ep_num_ops_set_meta": "Metadata sets per sec",
"ep_num_ops_del_meta": "Metadata deletes per sec",
"couch_views_ops": "View reads per sec",
"couch_views_data_size": "Views data size, bytes",
"couch_views_actual_disk_size": "Views total disk size, bytes",
"couch_views_fragmentation": "Views fragmentation, %",
"cpu_utilization_rate": "CPU utilization across all cores in cluster, %",
"swap_used": "Swap space in use across all servers in cluster, bytes",
"beam.smp_rss": "beam.smp resident set size, bytes",
"beam.smp_cpu": "beam.smp CPU utilization, %",
"memcached_rss": "memcached resident set size, bytes",
"memcached_cpu": "memcached CPU utilization, %",
"indexer_rss": "indexer resident set size, bytes",
"indexer_cpu": "indexer CPU utilization, %",
"projector_rss": "projector resident set size, bytes",
"projector_cpu": "projector CPU utilization, %",
"cbq-engine_rss": "query resident set size, bytes",
"cbq-engine_cpu": "query CPU utilization, %",
"backup_rss": "backup resident set size, bytes",
"backup_cpu": "backup CPU utilization, %",
"sync_gateway_rss": "Sync Gateway resident set size, bytes",
"sync_gateway_cpu": "Sync Gateway CPU utilization, %",
"xdcr_lag": "Total XDCR lag (from memory to memory), ms",
"xdcr_persistence_time": "Observe latency, ms",
"xdcr_diff": "Replication lag, ms",
"latency_set": "SET ops latency, ms",
"latency_get": "GET ops latency, ms",
"latency_query": "Query latency, ms",
"latency_observe": "OBSERVE latency, ms",
"Sys": "Bytes obtained from system",
"Alloc": "Bytes allocated and still in use",
"HeapAlloc": "Bytes allocated and still in use",
"HeapObjects": "Total number of allocated objects",
"PauseTotalNs": "Total GC pause time, ns",
"PauseNs": "GC pause time, ns",
"NumGC": "GC events",
"PausesPct": "Percentage of total time spent in GC, %",
"gateway_push": "Single push request to SGW, ms",
"gateway_pull": "Single pull request to SGW, ms",
"data_rbps": "Bytes read/sec",
"data_wbps": "Bytes written/sec",
"data_avgqusz": "The average queue length",
"data_util": "Disk bandwidth utilization, %",
"index_rbps": "Bytes read/sec",
"index_wbps": "Bytes written/sec",
"index_avgqusz": "The average queue length",
"index_util": "Disk bandwidth utilization, %",
"backup_rbps": "Bytes read/sec",
"backup_wbps": "Bytes written/sec",
"backup_avgqusz": "The average queue length",
"backup_util": "Disk bandwidth utilization, %",
"bucket_compaction_progress": "Compaction progress, %",
"in_bytes_per_sec": "Incoming bytes/sec",
"out_bytes_per_sec": "Outgoing bytes/sec",
"in_packets_per_sec": "Incoming packets/sec",
"out_packets_per_sec": "Outgoing packets/sec",
"ESTABLISHED": "Connections in ESTABLISHED state",
"TIME_WAIT": "Connections in TIME_WAIT state",
"index_num_rows_returned": "Number of rows returned by 2i",
"index_scan_bytes_read": "Bytes read by 2i scans",
"index_num_requests": "Number of 2i requests",
"index_num_docs_indexed": "Number of documents indexed in 2i",
"index_num_docs_pending": "Number of remaining documents to be indexed",
"index_fragmentation": "fragmentation in secondary indexing",
"index_data_size": "2i data size",
"index_disk_size": "2i size on disk",
"index_total_scan_duration": "total time spent by 2i on scans",
"index_items_count": "number of items in 2i",
"query_avg_req_time": "Average processing time for N1QL",
"query_avg_svc_time": "Average servicing time for N1QL",
"query_avg_response_size": "Average response time for N1QL",
"query_avg_result_count": "Average result count for N1QL",
"query_active_requests": "N1QL avtive requests",
"query_errors": "N1QL errors",
"query_queued_requests": "N1QL queued requests",
"query_request_time": "N1QL request times",
"query_requests": "Query raw latency",
"query_requests_1000ms": "Query latency above 1000ms",
"query_requests_250ms": "Query latency above 250ms",
"query_requests_5000ms": "Query latency above 5000ms",
"query_requests_500ms": "Query latency above 500ms",
"query_result_count": "N1QL result count",
"query_result_size": "N1QL result size",
"query_selects": "N1QL selects per second",
"query_service_time": "N1QL service times",
"query_warnings": "N1QL warnings",
}
HISTOGRAMS = (
"latency_get", "latency_set", "latency_query", "latency_observe",
"xdcr_lag", "xdcr_persistence_time", "xdcr_diff",
"replication_meta_latency_wt", "replication_docs_latency_wt",
"avg_bg_wait_time", "avg_disk_commit_time", "avg_disk_update_time",
"query_requests", "index_num_requests",
)
ZOOM_HISTOGRAMS = (
"latency_get", "latency_set", "latency_query", "avg_bg_wait_time",
)
KDE = (
"latency_query", "latency_get", "latency_set", "xdcr_lag",
)
SMOOTH_SUBPLOTS = (
"latency_query", "latency_get", "latency_set",
)
NON_ZERO_VALUES = (
"rebalance_progress",
"bucket_compaction_progress",
"ops",
"cmd_get",
"cmd_set",
"delete_hits",
"cas_hits",
"couch_views_ops",
"couch_views_data_size",
"couch_views_actual_disk_size",
"couch_views_fragmentation",
"couch_docs_fragmentation",
"hibernated_waked",
"ep_tmp_oom_errors",
"disk_write_queue",
"ep_diskqueue_drain",
"ep_cache_miss_rate",
"ep_num_value_ejects",
"ep_bg_fetched",
"avg_bg_wait_time",
"avg_disk_commit_time",
"avg_disk_update_time",
"xdc_ops",
"ep_num_ops_get_meta",
"ep_num_ops_set_meta",
"ep_num_ops_del_meta",
"replication_changes_left",
"replication_size_rep_queue",
"replication_rate_replication",
"replication_bandwidth_usage",
"replication_work_time",
"replication_commit_time",
"replication_active_vbreps",
"replication_waiting_vbreps",
"replication_num_checkpoints",
"replication_num_failedckpts",
"replication_meta_latency_wt",
"replication_docs_latency_wt",
"bucket_compaction_progress",
"swap_used",
"data_rbps",
"data_wbps",
"data_avgqusz",
"data_util",
"index_rbps",
"index_wbps",
"index_avgqusz",
"index_util",
"TIME_WAIT",
)
PALETTE = (
"#51A351",
"#f89406",
"#7D1935",
"#4A96AD",
"#DE1B1B",
"#E9E581",
"#A2AB58",
"#FFE658",
"#118C4E",
"#193D4F",
)
|
22,730 | 2c5d0be5d75f10a1361cd572a83fb483d23de5fd | # -*- coding: utf-8 -*-
from cutstring import Cuts, Cut
import logging
logger = logging.getLogger(__name__)
"""
"""
class Channel(object):
@property
def cuts(self):
return self._cuts
@property
def name(self):
return self._name
###########################################
# Common block
###########################################
class EE(Channel):
def __init__(self):
self._name = "ee"
self._cuts = Cuts(
Cut("extraelec_veto<0.5", "extraelec_veto"),
Cut("extramuon_veto<0.5", "extramuon_veto"),
Cut("iso_1<0.1 && iso_2<0.1", "ele_iso"), Cut("q_1*q_2<0", "os"),
Cut("(trg_singleelectron==1 && pt_1>26 && pt_2>26)",
"trg_singleelectron"))
# Common MM
class MM2016(Channel):
def __init__(self):
self._name = "mm"
self._cuts = Cuts(
Cut("extraelec_veto<0.5", "extraelec_veto"),
Cut("extramuon_veto<0.5", "extramuon_veto"),
Cut("iso_1<0.15 && iso_2<0.15", "muon_iso"), Cut(
"q_1*q_2<0", "os"),
Cut("m_vis > 50","m_vis_cut"),
Cut("(pt_1 > 23 && trg_singlemuon==1)&&(0<1)", "trg_selection"))
class MM2017(Channel):
def __init__(self):
self._name = "mm"
self._cuts = Cuts(
Cut("extraelec_veto<0.5", "extraelec_veto"),
Cut("extramuon_veto<0.5", "extramuon_veto"),
Cut("iso_1<0.15 && iso_2<0.15", "muon_iso"), Cut(
"q_1*q_2<0", "os"),
Cut("m_vis > 50","m_vis_cut"),
Cut("(trg_singlemuon_27==1 || trg_singlemuon_24==1)", "trg_selection"))
class MM2018(Channel):
def __init__(self):
self._name = "mm"
self._cuts = Cuts(
Cut("extraelec_veto<0.5", "extraelec_veto"),
Cut("extramuon_veto<0.5", "extramuon_veto"),
Cut("iso_1<0.15 && iso_2<0.15", "muon_iso"), Cut(
"q_1*q_2<0", "os"),
Cut("m_vis > 50","m_vis_cut"),
Cut("(trg_singlemuon_27==1 || trg_singlemuon_24==1)", "trg_selection"))
# Common MT
class MT2017(Channel):
def __init__(self):
self._name = "mt"
self._cuts = Cuts(
Cut("mt_1_puppi<70", "transverse_mass"),
Cut("flagMETFilter == 1", "METFilter"),
Cut("extraelec_veto<0.5", "extraelec_veto"),
Cut("extramuon_veto<0.5", "extramuon_veto"),
Cut("dilepton_veto<0.5", "dilepton_veto"),
Cut("byTightDeepTau2017v2p1VSmu_2>0.5", "againstMuonDiscriminator"),
Cut("byVVLooseDeepTau2017v2p1VSe_2>0.5",
"againstElectronDiscriminator"),
Cut("byTightDeepTau2017v2p1VSjet_2>0.5", "tau_iso"),
Cut("iso_1<0.15", "muon_iso"), Cut("q_1*q_2<0", "os"),
Cut("pt_2>30 && ((trg_singlemuon_27 == 1) || (trg_singlemuon_24 == 1) || (pt_1 < 25 && trg_crossmuon_mu20tau27 == 1))",
"trg_selection"))
class MT2018(Channel):
def __init__(self):
self._name = "mt"
self._cuts = Cuts(
Cut("mt_1_puppi<70", "transverse_mass"),
Cut("flagMETFilter == 1", "METFilter"),
Cut("extraelec_veto<0.5", "extraelec_veto"),
Cut("extramuon_veto<0.5", "extramuon_veto"),
Cut("dilepton_veto<0.5", "dilepton_veto"),
Cut("byTightDeepTau2017v2p1VSmu_2>0.5", "againstMuonDiscriminator"),
Cut("byVVLooseDeepTau2017v2p1VSe_2>0.5",
"againstElectronDiscriminator"),
Cut("byTightDeepTau2017v2p1VSjet_2>0.5", "tau_iso"),
Cut("iso_1<0.15", "muon_iso"), Cut("q_1*q_2<0", "os"),
Cut("pt_2>30 && ((trg_singlemuon_27 == 1) || (trg_singlemuon_24 == 1)) || (pt_1 < 25 && (trg_crossmuon_mu20tau27_hps == 1 || trg_crossmuon_mu20tau27 == 1))",
"trg_selection"))
class MT2016(Channel):
def __init__(self):
self._name = "mt"
self._cuts = Cuts(
Cut("mt_1_puppi<70", "transverse_mass"),
Cut("flagMETFilter==1", "met_filter"),
Cut("extraelec_veto<0.5", "extraelec_veto"),
Cut("extramuon_veto<0.5", "extramuon_veto"),
Cut("dilepton_veto<0.5", "dilepton_veto"),
Cut("byTightDeepTau2017v2p1VSmu_2 > 0.5", "againstMuonDiscriminator"),
Cut("byVVLooseDeepTau2017v2p1VSe_2>0.5","againstElectronDiscriminator"),
Cut("byTightDeepTau2017v2p1VSjet_2>0.5", "tau_iso"),
Cut("iso_1<0.15", "muon_iso"),
Cut("q_1*q_2<0", "os"),
Cut("pt_2>30 && ((pt_1 >= 23 && trg_singlemuon == 1) || (trg_mutaucross == 1 && pt_1 < 23 && abs(eta_2)<2.1))","trg_selection")
)
# Common ET
class ET2017(Channel):
def __init__(self):
self._name = "et"
self._cuts = Cuts(
Cut("mt_1_puppi<70", "transverse_mass"),
Cut("flagMETFilter == 1", "METFilter"),
Cut("extraelec_veto<0.5", "extraelec_veto"),
Cut("extramuon_veto<0.5", "extramuon_veto"),
Cut("dilepton_veto<0.5", "dilepton_veto"),
Cut("byVLooseDeepTau2017v2p1VSmu_2>0.5", "againstMuonDiscriminator"),
Cut("byTightDeepTau2017v2p1VSe_2>0.5",
"againstElectronDiscriminator"),
Cut("byTightDeepTau2017v2p1VSjet_2>0.5", "tau_iso"),
Cut("iso_1<0.15", "ele_iso"), Cut("q_1*q_2<0", "os"),
Cut("pt_2>30 && pt_1 > 25 && (((trg_singleelectron_35 == 1) || (trg_singleelectron_32 == 1) || ((trg_singleelectron_27 == 1))) || (abs(eta_1)>1.5 && pt_1 >= 28 && pt_1 < 40 && isEmbedded)) || (pt_1>25 && pt_1<28 && pt_2>35 && ((isEmbedded && (abs(eta_1)>1.5)) || (trg_crossele_ele24tau30 == 1)))",
"trg_selection"))
class ET2018(Channel):
def __init__(self):
self._name = "et"
self._cuts = Cuts(
Cut("mt_1_puppi<70", "transverse_mass"),
Cut("flagMETFilter == 1", "METFilter"),
Cut("extraelec_veto<0.5", "extraelec_veto"),
Cut("extramuon_veto<0.5", "extramuon_veto"),
Cut("dilepton_veto<0.5", "dilepton_veto"),
Cut("byVLooseDeepTau2017v2p1VSmu_2>0.5", "againstMuonDiscriminator"),
Cut("byTightDeepTau2017v2p1VSe_2>0.5",
"againstElectronDiscriminator"),
Cut("byTightDeepTau2017v2p1VSjet_2>0.5", "tau_iso"),
Cut("iso_1<0.15", "ele_iso"), Cut("q_1*q_2<0", "os"),
Cut("pt_2>30 && ((trg_singleelectron_35 == 1) || (trg_singleelectron_32 == 1) || (pt_1>25 && pt_1<33 && pt_2>35 && (trg_crossele_ele24tau30_hps == 1 || trg_crossele_ele24tau30 == 1)))",
"trg_selection"))
class ET2016(Channel):
def __init__(self):
self._name = "et"
self._cuts = Cuts(
Cut("mt_1_puppi<70", "transverse_mass"),
Cut("flagMETFilter==1", "met_filter"),
Cut("extraelec_veto<0.5", "extraelec_veto"),
Cut("extramuon_veto<0.5", "extramuon_veto"),
Cut("dilepton_veto<0.5", "dilepton_veto"),
Cut("byVLooseDeepTau2017v2p1VSmu_2>0.5", "againstMuonDiscriminator"),
Cut("byTightDeepTau2017v2p1VSe_2>0.5",
"againstElectronDiscriminator"),
Cut("byTightDeepTau2017v2p1VSjet_2>0.5", "tau_iso"),
Cut("iso_1<0.15", "ele_iso"),
Cut("q_1*q_2<0", "os"),
Cut("pt_2>30 && ((pt_1>26 && (trg_singleelectron==1)) || (pt_1<26 && pt_1>25 && (trg_eletaucross==1)))", "trg_selection"))
# Common TT
class TT2016(Channel):
def __init__(self):
self._name = "tt"
self._cuts = Cuts(
Cut("flagMETFilter==1", "met_filter"),
Cut("extraelec_veto<0.5", "extraelec_veto"),
Cut("extramuon_veto<0.5", "extramuon_veto"),
Cut("dilepton_veto<0.5", "dilepton_veto"),
Cut("byVLooseDeepTau2017v2p1VSmu_1>0.5 && byVLooseDeepTau2017v2p1VSmu_2>0.5", "againstMuonDiscriminator"),
Cut("byVVLooseDeepTau2017v2p1VSe_1>0.5 && byVVLooseDeepTau2017v2p1VSe_2>0.5", "againstElectronDiscriminator"),
Cut("byTightDeepTau2017v2p1VSjet_1>0.5", "tau_1_iso"),
Cut("byTightDeepTau2017v2p1VSjet_2>0.5", "tau_2_iso"),
Cut("q_1*q_2<0", "os"),
# Cut("pt_tt>50", "pt_h"),
Cut("trg_doubletau==1", "trg_doubletau"))
class TT2017(Channel):
def __init__(self):
self._name = "tt"
self._cuts = Cuts(
Cut("flagMETFilter == 1", "METFilter"),
Cut("extraelec_veto<0.5", "extraelec_veto"),
Cut("extramuon_veto<0.5", "extramuon_veto"),
Cut("dilepton_veto<0.5", "dilepton_veto"),
Cut("byVLooseDeepTau2017v2p1VSmu_1>0.5 && byVLooseDeepTau2017v2p1VSmu_2>0.5",
"againstMuonDiscriminator"),
Cut("byVVLooseDeepTau2017v2p1VSe_1>0.5 && byVVLooseDeepTau2017v2p1VSe_2>0.5",
"againstElectronDiscriminator"),
Cut("byTightDeepTau2017v2p1VSjet_1>0.5",
"tau_1_iso"),
Cut("byTightDeepTau2017v2p1VSjet_2>0.5",
"tau_2_iso"), Cut("q_1*q_2<0", "os"),
Cut("(trg_doubletau_35_tightiso_tightid == 1) || (trg_doubletau_40_mediso_tightid == 1) || (trg_doubletau_40_tightiso == 1)",
"trg_selection"))
class TT2018(Channel):
def __init__(self):
self._name = "tt"
self._cuts = Cuts(
Cut("flagMETFilter == 1", "METFilter"),
Cut("extraelec_veto<0.5", "extraelec_veto"),
Cut("extramuon_veto<0.5", "extramuon_veto"),
Cut("dilepton_veto<0.5", "dilepton_veto"),
Cut("byVLooseDeepTau2017v2p1VSmu_1>0.5 && byVLooseDeepTau2017v2p1VSmu_2>0.5",
"againstMuonDiscriminator"),
Cut("byVVLooseDeepTau2017v2p1VSe_1>0.5 && byVVLooseDeepTau2017v2p1VSe_2>0.5",
"againstElectronDiscriminator"),
Cut("byTightDeepTau2017v2p1VSjet_1>0.5",
"tau_1_iso"),
Cut("byTightDeepTau2017v2p1VSjet_2>0.5",
"tau_2_iso"), Cut("q_1*q_2<0", "os"),
Cut("(((!(isMC||isEmbedded) && run>=317509) || (isMC||isEmbedded)) && (trg_doubletau_35_mediso_hps == 1)) || (!(isMC||isEmbedded) && (run<317509) && ((trg_doubletau_35_tightiso_tightid == 1) || (trg_doubletau_40_mediso_tightid == 1) || (trg_doubletau_40_tightiso == 1)))",
"trg_selection"))
# Common EM
class EM2016(Channel):
def __init__(self):
self._name = "em"
self._cuts = Cuts(
Cut("flagMETFilter == 1", "METFilter"),
Cut("extraelec_veto<0.5", "extraelec_veto"),
Cut("extramuon_veto<0.5", "extramuon_veto"),
Cut("dilepton_veto<0.5", "dilepton_veto"),
Cut("iso_1<0.15", "ele_iso"), Cut("iso_2<0.2", "muon_iso"),
Cut("q_1*q_2<0", "os"),
Cut("abs(eta_1)<2.4", "electron_eta"),
Cut("pt_1>15 && pt_2>15 && ((pt_1>15 && pt_2>24 && trg_muonelectron_mu23ele12 == 1) || (pt_1>24 && pt_2>15 && trg_muonelectron_mu8ele23 == 1))","trg_selection"))
class EM2017(Channel):
def __init__(self):
self._name = "em"
self._cuts = Cuts(
Cut("flagMETFilter == 1", "METFilter"),
Cut("extraelec_veto<0.5", "extraelec_veto"),
Cut("extramuon_veto<0.5", "extramuon_veto"),
Cut("dilepton_veto<0.5", "dilepton_veto"),
Cut("iso_1<0.15", "ele_iso"), Cut("iso_2<0.2", "muon_iso"),
Cut("q_1*q_2<0", "os"),
Cut("abs(eta_1)<2.4","electron_eta"),
Cut("pt_1>15 && pt_2>15 && ((trg_muonelectron_mu23ele12 == 1) || (trg_muonelectron_mu8ele23 == 1))",
"trg_selection"))
class EM2018(Channel):
def __init__(self):
self._name = "em"
self._cuts = Cuts(
Cut("flagMETFilter == 1", "METFilter"),
Cut("extraelec_veto<0.5", "extraelec_veto"),
Cut("extramuon_veto<0.5", "extramuon_veto"),
Cut("dilepton_veto<0.5", "dilepton_veto"),
Cut("iso_1<0.15", "ele_iso"), Cut("iso_2<0.2", "muon_iso"),
Cut("q_1*q_2<0", "os"),
Cut("abs(eta_1)<2.4", "electron_eta"),
Cut("(trg_muonelectron_mu23ele12 == 1 && pt_1>15 && pt_2 > 24) || (trg_muonelectron_mu8ele23 == 1 && pt_1>24 && pt_2>15)",
"trg_selection"))
###########################################
# SM block
###########################################
# SM 2016
class ETSM2016(ET2016):
def __init__(self, **kvargs):
super(ETSM2016, self).__init__(**kvargs)
class MTSM2016(MT2016):
def __init__(self, **kvargs):
super(MTSM2016, self).__init__(**kvargs)
class TTSM2016(TT2016):
def __init__(self, **kvargs):
super(TTSM2016, self).__init__(**kvargs)
class EMSM2016(EM2016):
def __init__(self, **kvargs):
super(EMSM2016, self).__init__(**kvargs)
self._cuts.add(
Cut("nbtag==0 && mTdileptonMET_puppi<60", "bveto_mTdileptonMET"),
)
class MMSM2016(MM2016):
def __init__(self, **kvargs):
super(MMSM2016, self).__init__(**kvargs)
class EESM2016(ET2016):
def __init__(self, **kvargs):
super(EESM2016, self).__init__(**kvargs)
# SM 2017
class ETSM2017(ET2017):
def __init__(self, **kvargs):
super(ETSM2017, self).__init__(**kvargs)
class MTSM2017(MT2017):
def __init__(self, **kvargs):
super(MTSM2017, self).__init__(**kvargs)
class TTSM2017(TT2017):
def __init__(self, **kvargs):
super(TTSM2017, self).__init__(**kvargs)
class EMSM2017(EM2017):
def __init__(self, **kvargs):
super(EMSM2017, self).__init__(**kvargs)
self._cuts.add(
Cut("nbtag==0 && mTdileptonMET_puppi<60", "bveto_mTdileptonMET"),
)
class MMSM2017(MM2017):
def __init__(self, **kvargs):
super(MMSM2017, self).__init__(**kvargs)
class EESM2017(ET2017):
def __init__(self, **kvargs):
super(EESM2017, self).__init__(**kvargs)
# SM 2018
class ETSM2018(ET2018):
def __init__(self, **kvargs):
super(ETSM2018, self).__init__(**kvargs)
class MTSM2018(MT2018):
def __init__(self, **kvargs):
super(MTSM2018, self).__init__(**kvargs)
class TTSM2018(TT2018):
def __init__(self, **kvargs):
super(TTSM2018, self).__init__(**kvargs)
class EMSM2018(EM2018):
def __init__(self, **kvargs):
super(EMSM2018, self).__init__(**kvargs)
self._cuts.add(
Cut("nbtag==0 && mTdileptonMET_puppi<60", "bveto_mTdileptonMET"),
)
class MMSM2018(MM2018):
def __init__(self, **kvargs):
super(MMSM2018, self).__init__(**kvargs)
class EESM2018(ET2018):
def __init__(self, **kvargs):
super(EESM2018, self).__init__(**kvargs)
###########################################
# MSSM block
###########################################
# MSSM 2016
class ETMSSM2016(ET2016):
def __init__(self, **kvargs):
super(ETMSSM2016, self).__init__(**kvargs)
class MTMSSM2016(MT2016):
def __init__(self, **kvargs):
super(MTMSSM2016, self).__init__(**kvargs)
class TTMSSM2016(TT2016):
def __init__(self, **kvargs):
super(TTMSSM2016, self).__init__(**kvargs)
class EMMSSM2016(EM2016):
def __init__(self, **kvargs):
super(EMMSSM2016, self).__init__(**kvargs)
# MSSM 2017
class ETMSSM2017(ET2017):
def __init__(self, **kvargs):
super(ETMSSM2017, self).__init__(**kvargs)
class MTMSSM2017(MT2017):
def __init__(self, **kvargs):
super(MTMSSM2017, self).__init__(**kvargs)
class TTMSSM2017(TT2017):
def __init__(self, **kvargs):
super(TTMSSM2017, self).__init__(**kvargs)
class EMMSSM2017(EM2017):
def __init__(self, **kvargs):
super(EMMSSM2017, self).__init__(**kvargs)
# MSSM 2018
class ETMSSM2018(ET2018):
def __init__(self, **kvargs):
super(ETMSSM2018, self).__init__(**kvargs)
class MTMSSM2018(MT2018):
def __init__(self, **kvargs):
super(MTMSSM2018, self).__init__(**kvargs)
class TTMSSM2018(TT2018):
def __init__(self, **kvargs):
super(TTMSSM2018, self).__init__(**kvargs)
class EMMSSM2018(EM2018):
def __init__(self, **kvargs):
super(EMMSSM2018, self).__init__(**kvargs)
# PU - no cuts
class PU(Channel):
def __init__(self):
self._name = "pu"
self._cuts = Cuts()
# collection of channels an analysis can be ran on
class Channels(object):
def __init__(self, name):
self._name = name
self._channels = []
def add(self, channel):
self._channels.append(channel)
@property
def name(self):
return self._name
|
22,731 | 6190be16b88fffd751b5b35e17220f2ad151206d | #!/usr/bin/env python3
import sys
import csv
import urllib3
import requests
import subprocess
import time
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
REQUEST_TIMEOUT = (10, 10)
def append_entries(host, port, err_msg, status_code="N/A", header="N/A", comment="N/A", nikto_output="N/A"):
with open("final_443_test_res.csv", 'a+', newline='') as file:
append = csv.writer(file)
rows = [[host, port, status_code, header, err_msg, comment, nikto_output]]
append.writerows(rows)
file.close()
def check_host(host, port):
err_comment3 = 'http_error'
err_comment5 = 'bad_status_line'
err_comment6 = 'remote_disconnected'
err_comment7 = 'connection_error'
err_comment8 = 'Need more than 10 seconds to connect'
err_comment9 = 'connect timeout'
if title is False:
import os
for file in os.listdir(os.curdir):
if "final_443_test_res.csv" == file:
os.remove("final_443_test_res.csv")
with open("final_443_test_res.csv", 'a+', newline='') as file:
writer = csv.writer(file)
writer.writerow(["IP", "Port", "Status_Code", "Header", "Error_Message", "comment", "Nikto_Output"])
file.close()
else:
pass
if int(port) not in [80, 443]:
print(f'Invalid port({port}) for host: {host}')
return
proto = (port == 80) and 'http' or 'https'
url = proto + '://' + host
os_command = []
if port == str(80):
os_command = "nikto -host http://{} -findonly".format(host)
elif port == str(443):
os_command = "nikto -host https://{} -findonly".format(host)
nikto_comm = subprocess.run(os_command, shell=True, stdout=subprocess.PIPE)
count = 0
while count < 30:
if not nikto_comm:
count = count + 5
time.sleep(5)
else:
count = 30
nikto_res = nikto_comm.stdout.split(b'\n')
for ws in nikto_res:
if "Server" in ws.decode("utf-8"):
new_ws = (ws.split(b'\t')[1].decode("utf-8"))
break
else:
curl_command = []
if port == str(80):
curl_command = "curl -I {}:80 | grep Server:" .format(host)
elif port == str(443):
curl_command = "curl -I -k {}:443 | grep Server:" .format(host)
curl_comm = subprocess.run(curl_command, shell=True, stdout=subprocess.PIPE)
while count < 30:
if not curl_comm:
count = count + 5
time.sleep(5)
else:
count = 30
curl_res = curl_comm.stdout.split(b'\n')
for ws in curl_res:
if "Server" in ws.decode("utf-8"):
new_ws = (ws.split(b'\t')[0].decode("utf-8"))
break
else:
new_ws = "No web server found"
break
try:
response = requests.get(url, verify=False, timeout=REQUEST_TIMEOUT)
print(response.status_code)
print(f'response.headers: {response.headers.get("Content-Type")}')
except requests.exceptions.HTTPError as http_err:
print(f'HTTPError:{host}:{port}:{http_err}')
append_entries(host=host, port=port, status_code="N/A", header="N/A", err_msg=f"HTTPError:{host}:{port}:{http_err}", comment=err_comment3, nikto_output=new_ws)
except requests.exceptions.SSLError as ssl_error:
for error in ssl_error.args:
for error in error.args:
import re
ssl_err_obj_list = error.split(',')
for error in range(len(ssl_err_obj_list), 0, -1):
err_msg = re.sub('[^A-Za-z0-9]+', ' ', ssl_err_obj_list[error-1])
break
append_entries(host=host, port=port, status_code="N/A", header="N/A",
err_msg="UnknownException: " + str(ssl_error), comment=err_msg, nikto_output=new_ws)
except requests.exceptions.ConnectionError as con_error:
str_err = str(con_error)
if 'BadStatusLine' in str_err:
print(f'BadStatusLine:{host}:{port}')
append_entries(host=host, port=port, status_code="N/A", header="N/A", err_msg="BadStatusLine:" + str_err, comment=err_comment5, nikto_output=new_ws)
elif 'RemoteDisconnected' in str_err:
print(f'RemoteDisconnected:{host}:{port}')
append_entries(host=host, port=port, status_code="N/A", header="N/A", err_msg="RemoteDisconnected:" + str_err, comment=err_comment6, nikto_output=new_ws)
else:
if err_comment9 in str_err:
append_entries(host=host, port=port, status_code="N/A", header="N/A",
err_msg="ConnectionError: " + str_err, comment=err_comment8, nikto_output=new_ws)
else:
print(f'ConnectionError:{host}:{port}:{con_error}')
append_entries(host=host, port=port, status_code="N/A", header="N/A", err_msg="ConnectionError: " + str_err, comment=err_comment7, nikto_output=new_ws)
except Exception as err:
print(f'UnknownException :: {err}')
append_entries(host=host, port=port, status_code="N/A", header="N/A", err_msg="ConnectionError: " + str(err),
comment=err_comment8, nikto_output=new_ws)
else:
print(f'{proto.upper()}_{response.status_code}:{host}:{port}')
append_entries(host=host, port=port, status_code=str(response.status_code),
header=str({response.headers.get("Content-Type")}), err_msg="N/A", comment="statuscode:" +str(response.status_code), nikto_output=new_ws)
#check_host(host="17.252.242.85", port=80)
title = True
if __name__ == "__main__":
title = False
with open(sys.argv[1], encoding='utf-8-sig') as f:
for host in f.readlines():
check_host(*host.strip().replace(',', ':').split(':'))
count = 0
while count < 30:
if not host:
time.sleep(5)
count = count + 5
else:
count = 30
title = True |
22,732 | ee33a4f7bbc156a10d3224e8ec5592caedcaa316 | import plotly
import plotly.graph_objects as go
import pandas as pd
csv_file = 'Time.csv'
df = pd.read_csv(csv_file)
fig = go.Figure()
fig.add_trace(go.Scatter(x=df['date'], y=df['confirmed'], mode='line+markers', name='Positive'))
fig.add_trace(go.Scatter(x=df['date'], y=df['released'], mode='markers', name='Released'))
fig.add_trace(go.Scatter(x=df['date'], y=df['deceased'], mode='line', name='Deceased'))
fig.update_layout(
title="Covid-19 Cases in South Korea",
xaxis_title="Date",
yaxis_title="Number of Cases",
)
fig.show()
plotly.offline.plot(fig, filename="CaseReport.html") |
22,733 | 9ef04bb7cfa07b5dcbb1740a1fad1f561f659f22 | import sys
import tweepy
from textblob import TextBlob
consumer_key = sys.argv[1]
consumer_secret = sys.argv[2]
access_token = sys.argv[3]
access_token_secret = sys.argv[4]
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
movie = sys.argv[5]
public_tweets = api.search(q=movie, count=100)
verdict = 0
count = 0
for tweet in public_tweets:
print(tweet.text)
analysis = TextBlob(tweet.text)
print(analysis.sentiment)
verdict += analysis.sentiment.polarity
count += 1
print('***Sentiment of movie is: ', verdict)
print('***No. of tweets analysed: ', count) |
22,734 | a4076bb15fe417302cbe50a2252639e4b8b8760a | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
FILE: distribution_policy_crud_ops_async.py
DESCRIPTION:
These samples demonstrates how to create Distribution Policy used in ACS JobRouter.
You need a valid connection string to an Azure Communication Service to execute the sample
USAGE:
python distribution_policy_crud_ops_async.py
Set the environment variables with your own values before running the sample:
1) AZURE_COMMUNICATION_SERVICE_ENDPOINT - Communication Service endpoint url
"""
import os
import asyncio
class DistributionPolicySamplesAsync(object):
endpoint = os.environ.get("AZURE_COMMUNICATION_SERVICE_ENDPOINT", None)
if not endpoint:
raise ValueError("Set AZURE_COMMUNICATION_SERVICE_ENDPOINT env before run this sample.")
_dp_policy_id = "sample_dp_policy"
async def create_distribution_policy(self):
connection_string = self.endpoint
policy_id = self._dp_policy_id
# [START create_distribution_policy_async]
from azure.communication.jobrouter.aio import JobRouterAdministrationClient
from azure.communication.jobrouter import (
DistributionPolicy,
LongestIdleMode,
)
# set `connection_string` to an existing ACS endpoint
router_admin_client = JobRouterAdministrationClient.from_connection_string(conn_str = connection_string)
print("JobRouterAdministrationClient created successfully!")
async with router_admin_client:
distribution_policy: DistributionPolicy = await router_admin_client.create_distribution_policy(
distribution_policy_id = policy_id,
distribution_policy = DistributionPolicy(
offer_expires_after_seconds = 1 * 60,
mode = LongestIdleMode(
min_concurrent_offers = 1,
max_concurrent_offers = 1
)
)
)
print(f"Distribution Policy successfully created with id: {distribution_policy.id}")
# [END create_distribution_policy_async]
async def update_distribution_policy(self):
connection_string = self.endpoint
policy_id = self._dp_policy_id
# [START update_distribution_policy_async]
from azure.communication.jobrouter.aio import JobRouterAdministrationClient
from azure.communication.jobrouter import (
DistributionPolicy,
RoundRobinMode,
)
# set `connection_string` to an existing ACS endpoint
router_admin_client = JobRouterAdministrationClient.from_connection_string(conn_str = connection_string)
print("JobRouterAdministrationClient created successfully!")
async with router_admin_client:
updated_distribution_policy: DistributionPolicy = await router_admin_client.update_distribution_policy(
distribution_policy_id = policy_id,
mode = RoundRobinMode(
min_concurrent_offers = 1,
max_concurrent_offers = 1
)
)
print(f"Distribution policy successfully update with new distribution mode")
# [END update_distribution_policy_async]
async def get_distribution_policy(self):
connection_string = self.endpoint
policy_id = self._dp_policy_id
# [START get_distribution_policy_async]
from azure.communication.jobrouter.aio import JobRouterAdministrationClient
router_admin_client = JobRouterAdministrationClient.from_connection_string(conn_str = connection_string)
async with router_admin_client:
distribution_policy = await router_admin_client.get_distribution_policy(distribution_policy_id = policy_id)
print(f"Successfully fetched distribution policy with id: {distribution_policy.id}")
# [END get_distribution_policy_async]
async def list_distribution_policies(self):
connection_string = self.endpoint
# [START list_distribution_policies_async]
from azure.communication.jobrouter.aio import JobRouterAdministrationClient
router_admin_client = JobRouterAdministrationClient.from_connection_string(conn_str = connection_string)
async with router_admin_client:
distribution_policy_iterator = router_admin_client.list_distribution_policies()
async for dp in distribution_policy_iterator:
print(f"Retrieved distribution policy with id: {dp.distribution_policy.id}")
print(f"Successfully completed fetching distribution policies")
# [END list_distribution_policies_async]
async def list_distribution_policies_batched(self):
connection_string = self.endpoint
# [START list_distribution_policies_batched_async]
from azure.communication.jobrouter.aio import JobRouterAdministrationClient
router_admin_client = JobRouterAdministrationClient.from_connection_string(conn_str = connection_string)
async with router_admin_client:
distribution_policy_iterator = router_admin_client.list_distribution_policies(results_per_page = 10)
async for policy_page in distribution_policy_iterator.by_page():
policies_in_page = [i async for i in policy_page]
print(f"Retrieved {len(policies_in_page)} policies in current page")
for dp in policies_in_page:
print(f"Retrieved distribution policy with id: {dp.distribution_policy.id}")
print(f"Successfully completed fetching distribution policies")
# [END list_distribution_policies_batched_async]
async def clean_up(self):
connection_string = self.endpoint
policy_id = self._dp_policy_id
# [START delete_distribution_policy_async]
from azure.communication.jobrouter.aio import JobRouterAdministrationClient
router_admin_client = JobRouterAdministrationClient.from_connection_string(conn_str = connection_string)
async with router_admin_client:
await router_admin_client.delete_distribution_policy(distribution_policy_id = policy_id)
# [END delete_distribution_policy_async]
async def main():
sample = DistributionPolicySamplesAsync()
await sample.create_distribution_policy()
await sample.update_distribution_policy()
await sample.get_distribution_policy()
await sample.list_distribution_policies()
await sample.list_distribution_policies_batched()
await sample.clean_up()
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
|
22,735 | 8aeee7d3f811f52bd6ddf2a141e997c8ed9979a3 | # Copyright 2019 Verily Life Sciences LLC
#
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import unittest
from typing import List # noqa: F401
import pandas as pd
from ddt import data, ddt, unpack
from purplequery.bq_types import BQScalarType, TypedDataFrame
from purplequery.query_helper import apply_rule
from purplequery.statement_grammar import statement as statement_rule
from purplequery.statements import Statement
from purplequery.storage import DatasetTableContext
from purplequery.tokenizer import tokenize
@ddt
class StatementTest(unittest.TestCase):
@data(
dict(statement='CREATE TABLE project.dataset.table (a int64, b string);',
already_exists=False),
dict(statement='CREATE TABLE IF NOT EXISTS project.dataset.table (a int64, b string);',
already_exists=False),
dict(statement='CREATE OR REPLACE TABLE project.dataset.table (a int64, b string);',
already_exists=True),
dict(statement='CREATE OR REPLACE TABLE project.dataset.table (a int64, b string);',
already_exists=False),
)
@unpack
def test_create_table(self, statement, already_exists):
# type: (str, bool) -> None
node, leftover = apply_rule(statement_rule, tokenize(statement))
self.assertFalse(leftover)
table_context = DatasetTableContext({'project': {'dataset': {}}})
original_table = TypedDataFrame(pd.DataFrame([], columns=['x', 'y', 'z']),
[BQScalarType.STRING, BQScalarType.INTEGER,
BQScalarType.BOOLEAN])
if already_exists:
table_context.set(('project', 'dataset', 'table'), original_table)
assert isinstance(node, Statement)
result = node.execute(table_context)
self.assertEqual(result.path, ('project', 'dataset', 'table'))
table, unused_name = table_context.lookup(result.path)
self.assertEqual(list(table.dataframe.columns), ['a', 'b'])
self.assertEqual(table.types, [BQScalarType.INTEGER, BQScalarType.STRING])
def test_create_table_already_exists(self):
# type: () -> None
node, leftover = apply_rule(statement_rule, tokenize(
'CREATE TABLE project.dataset.table (a int64, b string);'))
self.assertFalse(leftover)
table_context = DatasetTableContext({'project': {'dataset': {}}})
original_table = TypedDataFrame(pd.DataFrame([], columns=['x', 'y', 'z']),
[BQScalarType.STRING, BQScalarType.INTEGER,
BQScalarType.BOOLEAN])
table_context.set(('project', 'dataset', 'table'), original_table)
assert isinstance(node, Statement)
with self.assertRaisesRegexp(ValueError, 'Already Exists'):
node.execute(table_context)
return
def test_create_table_if_not_exists_and_it_does(self):
# type: () -> None
node, leftover = apply_rule(statement_rule, tokenize(
'CREATE TABLE IF NOT EXISTS project.dataset.table (a int64, b string);'))
self.assertFalse(leftover)
table_context = DatasetTableContext({'project': {'dataset': {}}})
original_table = TypedDataFrame(pd.DataFrame([], columns=['x', 'y', 'z']),
[BQScalarType.STRING, BQScalarType.INTEGER,
BQScalarType.BOOLEAN])
table_context.set(('project', 'dataset', 'table'), original_table)
assert isinstance(node, Statement)
result = node.execute(table_context)
self.assertEqual(result.path, ('project', 'dataset', 'table'))
table, unused_name = table_context.lookup(result.path)
self.assertIs(table, original_table)
@data(
dict(statement=('CREATE TABLE project.dataset.table (a int64, b string) '
'as (select 1 as x, "hi" as y);'),
columns=['a', 'b']),
dict(statement='CREATE TABLE project.dataset.table as (select 1 as x, "hi" as y);',
columns=['x', 'y']),
)
@unpack
def test_create_table_with_select(self, statement, columns):
# type: (str, List[str]) -> None
node, leftover = apply_rule(statement_rule, tokenize(statement))
self.assertFalse(leftover)
table_context = DatasetTableContext({'project': {'dataset': {}}})
assert isinstance(node, Statement)
result = node.execute(table_context)
self.assertEqual(result.path, ('project', 'dataset', 'table'))
table, unused_name = table_context.lookup(result.path)
self.assertEqual(list(table.dataframe.columns), columns)
self.assertEqual(table.types, [BQScalarType.INTEGER, BQScalarType.STRING])
@data(
dict(query=('create table project.dataset.table (x string)'
' as (select 1 as x)'),
error='Cannot implicitly coerce the given types'),
dict(query=('create table project.dataset.table (x int64)'
' as (select 1.5 as x)'),
error='data of more general type'),
)
@unpack
def test_create_table_with_select_mismatched_types(self, query, error):
# type: (str, str) -> None
node, leftover = apply_rule(statement_rule, tokenize(query))
self.assertFalse(leftover)
table_context = DatasetTableContext({'project': {'dataset': {}}})
assert isinstance(node, Statement)
with self.assertRaisesRegexp(ValueError, error):
node.execute(table_context)
if __name__ == '__main__':
unittest.main()
|
22,736 | 0487080ce09eaa555285256a449c48deef8676ac | #!/usr/bin/python
"""Slowly fades all outputs between black and white continuously.
This is a simple script to test whether the non-linear response to luminance
by the human eye is properly corrected for.
"""
__author__ = 'Elmer de Looff <elmer@underdark.nl>'
__version__ = '1.0'
# Standard modules
import time
# Custom modules
from lightbox import controller
from lightbox import utils
def Demo(controller_name, outputs):
"""Sweeping from low to high brightness and back, continuously."""
print 'Initiating controller %r ...\n' % controller_name
box = getattr(controller, controller_name).FirstDevice(outputs=outputs)
print '\nFade to white and back.'
FadeOutputs(box, '#fff')
FadeOutputs(box, '#000')
print 'Fade to a random color and back to black, ad nauseum.'
while True:
FadeOutputs(box, utils.RandomColor())
FadeOutputs(box, '#000')
def FadeOutputs(box, color, steps=50):
"""Fades all outputs to the given color and waits for it to complete."""
for output in box:
output.Fade(color=color, steps=steps)
time.sleep(steps / (float(box.frequency) / len(box)))
def main():
"""Processes commandline input to setup the demo."""
import optparse
import sys
parser = optparse.OptionParser()
parser.add_option('-c', '--controller', default='NewController',
help='Controller class to instantiate.')
parser.add_option('-o', '--outputs', type='int', default=5,
help='Number of outputs to use on the hardware.')
options, _arguments = parser.parse_args()
try:
Demo(options.controller, options.outputs)
except controller.ConnectionError:
sys.exit('ABORT: Could not find a suitable device.')
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print '\nEnd of demonstration.'
|
22,737 | d175e9e08d63b29909516e61e5519c415cde05fc | # adding extra info to file
f = open("text", "a")
str = input("enter a info to add:")
f.write(str)
f.close() |
22,738 | 449dfd648dd72e6e50f8227bf8a07804cd95e766 | from flask import Flask, request, jsonify
from query_handler import query_handler
from util_handler import util_handler
from sentiment_analysis import sentiment_analysis
from multiprocessing import Process
app = Flask(__name__)
# Make the WSGI interface available at the top level so wfastcgi can get it.
wsgi_app = app.wsgi_app
@app.route('/authtoken', methods=[ 'POST' ])
def AuthToken():
obj = {}
try:
body = request.get_json(force=True)
username = str(body.get('username', ''))
password = str(body.get('password', ''))
valid_for = int(body.get('validfor', 60))
if (username == '' or password == ''):
return("Credentials are empty or missing")
else:
obj_query = query_handler()
query = obj_query.auth(username,password)
obj_util = util_handler()
rows = obj_util.execute(query,False)
result = obj_util.convert_data_to_json(rows)
if (int(result[0]['COUNT']) == 0):
return("Invalid Credentials")
else:
token = obj_util.GetAuthToken(username, password, valid_for)
obj["result"] = {"AccessToken" : token}
except Exception as e:
return(str(e))
return jsonify(obj)
@app.route('/trainsentiments', methods=[ 'POST' ])
def TrainSentiments():
obj = {}
try:
token = request.headers.get('Token','')
if (token != ''):
obj_util = util_handler()
obj_query = query_handler()
cred = obj_util.ExtractAuthToken(token)
if (obj_util.DateValidation(cred[3]) == False):
return("Token Expired ")
query = obj_query.auth(cred[0],cred[1])
rows = obj_util.execute(query,False)
result = obj_util.convert_data_to_json(rows)
if (int(result[0]['COUNT']) == 0):
return("Access Token is Invalid. Please pass {Token: '<Valid Token>'} ")
else:
sa = sentiment_analysis()
training_task = Process(target=sa.process_review_train)
training_task.start()
obj["result"] = "Process for traing sentiments has started."
else:
return("Access Token is missing is the header. Please pass Token: '<Valid Token>' ")
except Exception as e:
return(str(e))
return jsonify(obj)
@app.route('/predictsentiment', methods=[ 'POST' ])
def PredictSentiment():
obj = {}
try:
token = request.headers.get('Token','')
if (token != ''):
obj_util = util_handler()
obj_query = query_handler()
cred = obj_util.ExtractAuthToken(token)
if (obj_util.DateValidation(cred[3]) == False):
return("Token Expired ")
query = obj_query.auth(cred[0],cred[1])
rows = obj_util.execute(query,False)
result = obj_util.convert_data_to_json(rows)
if (int(result[0]['COUNT']) == 0):
return("Access Token is Invalid. Please pass {Token: '<Valid Token>'} ")
else:
body = request.get_json(force=True)
review = str(body.get('review', ''))
if(review != ''):
sa = sentiment_analysis()
result = sa.predict_classification(review)
obj["result"] = result
else:
return("Access Token is missing is the header. Please pass Token: '<Valid Token>' ")
except Exception as e:
return(str(e))
return jsonify(obj)
@app.route('/marksentiments', methods=[ 'POST' ])
def MarkSentiments():
obj = {}
try:
token = request.headers.get('Token','')
if (token != ''):
obj_util = util_handler()
obj_query = query_handler()
cred = obj_util.ExtractAuthToken(token)
if (obj_util.DateValidation(cred[3]) == False):
return("Token Expired ")
query = obj_query.auth(cred[0],cred[1])
rows = obj_util.execute(query,False)
result = obj_util.convert_data_to_json(rows)
if (int(result[0]['COUNT']) == 0):
return("Access Token is Invalid. Please pass {Token: '<Valid Token>'} ")
else:
sa = sentiment_analysis()
training_task = Process(target=sa.mark_sentiment)
training_task.start()
obj["result"] = "Process to mark sentiments has started."
else:
return("Access Token is missing is the header. Please pass Token: '<Valid Token>' ")
except Exception as e:
return(str(e))
return jsonify(obj)
if __name__ == '__main__':
import os
HOST = os.environ.get('SERVER_HOST', 'localhost')
try:
PORT = int(os.environ.get('SERVER_PORT', '5555'))
except ValueError:
PORT = 5555
app.run(HOST, PORT)
|
22,739 | e0cb7df21c942aaed375f5532e250b0426c58603 | from ddt import data, ddt
from django.utils import timezone
from freezegun import freeze_time
from rest_framework import status, test
from waldur_core.core import utils as core_utils
from waldur_core.structure.tests import factories as structure_factories
from waldur_core.structure.tests import fixtures as structure_fixtures
from waldur_mastermind.common.mixins import UnitPriceMixin
from waldur_mastermind.common.utils import parse_date, parse_datetime
from waldur_mastermind.invoices import models as invoices_models
from waldur_mastermind.invoices import tasks as invoices_tasks
from waldur_mastermind.marketplace import models, tasks, utils
from waldur_mastermind.marketplace.tests import factories, fixtures
from waldur_mastermind.marketplace_openstack import TENANT_TYPE
from waldur_mastermind.marketplace_support import PLUGIN_NAME
class StatsBaseTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = structure_fixtures.ProjectFixture()
self.customer = self.fixture.customer
self.project = self.fixture.project
self.category = factories.CategoryFactory()
self.category_component = factories.CategoryComponentFactory(
category=self.category
)
self.offering = factories.OfferingFactory(
category=self.category,
type=TENANT_TYPE,
state=models.Offering.States.ACTIVE,
)
self.offering_component = factories.OfferingComponentFactory(
offering=self.offering,
parent=self.category_component,
type='cores',
billing_type=models.OfferingComponent.BillingTypes.LIMIT,
)
@freeze_time('2019-01-22')
class StatsTest(StatsBaseTest):
def setUp(self):
super().setUp()
self.date = parse_date('2019-01-01')
self.plan = factories.PlanFactory(offering=self.offering)
self.plan_component = factories.PlanComponentFactory(
plan=self.plan, component=self.offering_component, amount=10
)
self.resource = factories.ResourceFactory(
project=self.project, offering=self.offering, plan=self.plan
)
def test_reported_usage_is_aggregated_for_project_and_customer(self):
# Arrange
plan_period = models.ResourcePlanPeriod.objects.create(
start=parse_datetime('2019-01-01'),
resource=self.resource,
plan=self.plan,
)
models.ComponentUsage.objects.create(
resource=self.resource,
component=self.offering_component,
date=parse_date('2019-01-10'),
billing_period=parse_date('2019-01-01'),
plan_period=plan_period,
usage=100,
)
self.new_resource = factories.ResourceFactory(
project=self.project, offering=self.offering, plan=self.plan
)
new_plan_period = models.ResourcePlanPeriod.objects.create(
start=parse_date('2019-01-01'),
resource=self.new_resource,
plan=self.plan,
)
models.ComponentUsage.objects.create(
resource=self.resource,
component=self.offering_component,
date=parse_date('2019-01-20'),
billing_period=parse_date('2019-01-01'),
plan_period=new_plan_period,
usage=200,
)
# Act
tasks.calculate_usage_for_current_month()
# Assert
project_usage = (
models.CategoryComponentUsage.objects.filter(
scope=self.project, component=self.category_component, date=self.date
)
.get()
.reported_usage
)
customer_usage = (
models.CategoryComponentUsage.objects.filter(
scope=self.customer, component=self.category_component, date=self.date
)
.get()
.reported_usage
)
self.assertEqual(project_usage, 300)
self.assertEqual(customer_usage, 300)
def test_fixed_usage_is_aggregated_for_project_and_customer(self):
# Arrange
models.ResourcePlanPeriod.objects.create(
resource=self.resource,
plan=self.plan,
start=parse_date('2019-01-10'),
end=parse_date('2019-01-20'),
)
# Act
tasks.calculate_usage_for_current_month()
# Assert
project_usage = (
models.CategoryComponentUsage.objects.filter(
scope=self.project,
component=self.category_component,
date=self.date,
)
.get()
.fixed_usage
)
customer_usage = (
models.CategoryComponentUsage.objects.filter(
scope=self.customer, component=self.category_component, date=self.date
)
.get()
.fixed_usage
)
self.assertEqual(project_usage, self.plan_component.amount)
self.assertEqual(customer_usage, self.plan_component.amount)
def test_offering_customers_stats(self):
url = factories.OfferingFactory.get_url(self.offering, action='customers')
self.client.force_authenticate(self.fixture.staff)
result = self.client.get(url)
self.assertEqual(result.status_code, status.HTTP_200_OK)
self.assertEqual(len(result.data), 1)
self.assertEqual(
result.data[0]['uuid'], self.resource.project.customer.uuid.hex
)
@freeze_time('2020-01-01')
class CostsStatsTest(StatsBaseTest):
def setUp(self):
super().setUp()
self.url = factories.OfferingFactory.get_url(self.offering, action='costs')
self.plan = factories.PlanFactory(
offering=self.offering,
unit=UnitPriceMixin.Units.PER_DAY,
)
self.plan_component = factories.PlanComponentFactory(
plan=self.plan, component=self.offering_component, amount=10
)
self.resource = factories.ResourceFactory(
offering=self.offering,
state=models.Resource.States.OK,
plan=self.plan,
limits={'cores': 1},
)
invoices_tasks.create_monthly_invoices()
def test_offering_costs_stats(self):
with freeze_time('2020-03-01'):
self._check_stats()
def test_period_filter(self):
self.client.force_authenticate(self.fixture.staff)
result = self.client.get(self.url, {'other_param': ''})
self.assertEqual(result.status_code, status.HTTP_200_OK)
result = self.client.get(self.url, {'start': '2020-01'})
self.assertEqual(result.status_code, status.HTTP_400_BAD_REQUEST)
def test_offering_costs_stats_if_resource_has_been_failed(self):
with freeze_time('2020-03-01'):
self.resource.state = models.Resource.States.ERRED
self.resource.save()
self._check_stats()
def _check_stats(self):
self.client.force_authenticate(self.fixture.staff)
result = self.client.get(self.url, {'start': '2020-01', 'end': '2020-02'})
self.assertEqual(result.status_code, status.HTTP_200_OK)
self.assertDictEqual(
result.data[0],
{
'tax': 0,
'total': self.plan_component.price * 31,
'price': self.plan_component.price * 31,
'period': '2020-01',
},
)
def test_stat_methods_are_not_available_for_anonymous_users(self):
result = self.client.get(self.url)
self.assertEqual(result.status_code, status.HTTP_401_UNAUTHORIZED)
customers_url = factories.OfferingFactory.get_url(
self.offering, action='customers'
)
result = self.client.get(customers_url)
self.assertEqual(result.status_code, status.HTTP_401_UNAUTHORIZED)
@freeze_time('2020-03-01')
class ComponentStatsTest(StatsBaseTest):
def setUp(self):
super().setUp()
self.url = factories.OfferingFactory.get_url(
self.offering, action='component_stats'
)
self.plan = factories.PlanFactory(
offering=self.offering,
unit=UnitPriceMixin.Units.PER_DAY,
)
self.plan_component = factories.PlanComponentFactory(
plan=self.plan, component=self.offering_component, amount=10
)
self.resource = factories.ResourceFactory(
offering=self.offering,
state=models.Resource.States.OK,
plan=self.plan,
limits={'cores': 1},
)
def _create_items(self):
invoices_tasks.create_monthly_invoices()
invoice = invoices_models.Invoice.objects.get(
year=2020, month=3, customer=self.resource.project.customer
)
return invoice.items.filter(resource_id=self.resource.id)
def test_item_details(self):
sp = factories.ServiceProviderFactory(customer=self.resource.offering.customer)
component = factories.OfferingComponentFactory(
offering=self.resource.offering,
billing_type=models.OfferingComponent.BillingTypes.LIMIT,
type='storage',
)
factories.ComponentUsageFactory(
resource=self.resource,
billing_period=core_utils.month_start(timezone.now()),
component=component,
)
item = self._create_items().first()
self.assertDictEqual(
item.details,
{
'resource_name': item.resource.name,
'resource_uuid': item.resource.uuid.hex,
'service_provider_name': self.resource.offering.customer.name,
'service_provider_uuid': sp.uuid.hex,
'offering_name': self.offering.name,
'offering_type': TENANT_TYPE,
'offering_uuid': self.offering.uuid.hex,
'plan_name': self.resource.plan.name,
'plan_uuid': self.resource.plan.uuid.hex,
'plan_component_id': self.plan_component.id,
'offering_component_type': self.plan_component.component.type,
'offering_component_name': self.plan_component.component.name,
'resource_limit_periods': [
{
'end': '2020-03-31T23:59:59.999999+00:00',
'start': '2020-03-01T00:00:00+00:00',
'total': '31',
'quantity': 1,
'billing_periods': 31,
}
],
},
)
def test_component_stats_if_invoice_item_details_includes_plan_component_data(
self,
):
self.resource.offering.type = PLUGIN_NAME
self.resource.offering.save()
self.offering_component.billing_type = (
models.OfferingComponent.BillingTypes.FIXED
)
self.offering_component.save()
self._create_items()
self.client.force_authenticate(self.fixture.staff)
result = self.client.get(self.url, {'start': '2020-03', 'end': '2020-03'})
self.assertEqual(
result.data,
[
{
'description': self.offering_component.description,
'measured_unit': self.offering_component.measured_unit,
'name': self.offering_component.name,
'period': '2020-03',
'date': '2020-03-31T00:00:00+00:00',
'type': self.offering_component.type,
'usage': 31,
}
],
)
def test_handler(self):
self.resource.offering.type = PLUGIN_NAME
self.resource.offering.save()
# add usage-based component to the offering and plan
COMPONENT_TYPE = 'storage'
new_component = factories.OfferingComponentFactory(
offering=self.resource.offering,
billing_type=models.OfferingComponent.BillingTypes.USAGE,
type=COMPONENT_TYPE,
)
factories.PlanComponentFactory(
plan=self.plan,
component=new_component,
)
self._create_items()
plan_period = factories.ResourcePlanPeriodFactory(
resource=self.resource,
plan=self.plan,
start=core_utils.month_start(timezone.now()),
)
factories.ComponentUsageFactory(
resource=self.resource,
date=timezone.now(),
billing_period=core_utils.month_start(timezone.now()),
component=new_component,
plan_period=plan_period,
usage=2,
)
self.client.force_authenticate(self.fixture.staff)
result = self.client.get(self.url, {'start': '2020-03', 'end': '2020-03'})
component_cores = self.resource.offering.components.get(type='cores')
component_storage = self.resource.offering.components.get(type='storage')
self.assertEqual(len(result.data), 2)
self.assertEqual(
[r for r in result.data if r['type'] == component_cores.type][0],
{
'description': component_cores.description,
'measured_unit': component_cores.measured_unit,
'name': component_cores.name,
'period': '2020-03',
'date': '2020-03-31T00:00:00+00:00',
'type': component_cores.type,
'usage': 31, # days in March of 1 core usage with per-day plan
},
)
self.assertEqual(
[r for r in result.data if r['type'] == component_storage.type][0],
{
'description': component_storage.description,
'measured_unit': component_storage.measured_unit,
'name': component_storage.name,
'period': '2020-03',
'date': '2020-03-31T00:00:00+00:00',
'type': component_storage.type,
'usage': 2,
},
)
@ddt
class CustomerStatsTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = structure_fixtures.ProjectFixture()
@data(
'staff',
'global_support',
)
def test_user_can_get_marketplace_stats(self, user):
user = getattr(self.fixture, user)
self.client.force_authenticate(user)
response = self.client.get('/api/marketplace-stats/customer_member_count/')
self.assertEqual(response.status_code, status.HTTP_200_OK)
@data('owner', 'user', 'customer_support', 'admin', 'manager')
def test_user_cannot_get_marketplace_stats(self, user):
user = getattr(self.fixture, user)
self.client.force_authenticate(user)
response = self.client.get('/api/marketplace-stats/customer_member_count/')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
@ddt
class LimitsStatsTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = fixtures.MarketplaceFixture()
self.resource_1 = factories.ResourceFactory(
limits={'cpu': 5}, state=models.Resource.States.OK
)
factories.ResourceFactory(
limits={'cpu': 2},
state=models.Resource.States.OK,
offering=self.resource_1.offering,
)
self.resource_2 = factories.ResourceFactory(
limits={'cpu': 10, 'ram': 1}, state=models.Resource.States.OK
)
self.url = '/api/marketplace-stats/resources_limits/'
self.division_1 = structure_factories.DivisionFactory()
self.division_2 = structure_factories.DivisionFactory()
self.resource_1.offering.divisions.add(self.division_1, self.division_2)
self.resource_1.offering.country = 'EE'
self.resource_1.offering.save()
self.resource_2.offering.customer.country = 'FI'
self.resource_2.offering.customer.save()
@data(
# skipping because it is not stable now 'staff',
'global_support',
)
def test_user_can_get_marketplace_stats(self, user):
user = getattr(self.fixture, user)
self.client.force_authenticate(user)
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
len(response.data),
4,
)
self.assertTrue(
{
'offering_uuid': self.resource_1.offering.uuid,
'name': 'cpu',
'value': 7,
'offering_country': 'EE',
'division_name': self.division_1.name,
'division_uuid': self.division_1.uuid.hex,
}
in response.data,
)
self.assertTrue(
{
'offering_uuid': self.resource_1.offering.uuid,
'name': 'cpu',
'value': 7,
'offering_country': 'EE',
'division_name': self.division_2.name,
'division_uuid': self.division_2.uuid.hex,
}
in response.data,
)
self.assertTrue(
{
'offering_uuid': self.resource_2.offering.uuid,
'name': 'cpu',
'value': 10,
'offering_country': 'FI',
'division_name': '',
'division_uuid': '',
}
in response.data,
)
self.assertTrue(
{
'offering_uuid': self.resource_2.offering.uuid,
'name': 'ram',
'value': 1,
'offering_country': 'FI',
'division_name': '',
'division_uuid': '',
}
in response.data,
)
@data('owner', 'user', 'customer_support', 'admin', 'manager')
def test_user_cannot_get_marketplace_stats(self, user):
user = getattr(self.fixture, user)
self.client.force_authenticate(user)
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
@ddt
class CountUsersOfServiceProviderTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = fixtures.MarketplaceFixture()
self.url = '/api/marketplace-stats/count_users_of_service_providers/'
@data(
'staff',
'global_support',
)
def test_user_can_get_marketplace_stats(self, user):
user = getattr(self.fixture, user)
self.client.force_authenticate(user)
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 1)
@data('owner', 'user', 'customer_support', 'admin', 'manager')
def test_user_cannot_get_marketplace_stats(self, user):
user = getattr(self.fixture, user)
self.client.force_authenticate(user)
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
@ddt
class CountProjectsGroupedByOecdOfServiceProviderTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = fixtures.MarketplaceFixture()
self.url = '/api/marketplace-stats/count_projects_of_service_providers_grouped_by_oecd/'
@data(
'staff',
'global_support',
)
def test_user_can_get_marketplace_stats(self, user):
user = getattr(self.fixture, user)
self.client.force_authenticate(user)
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 1)
@data('owner', 'user', 'customer_support', 'admin', 'manager')
def test_user_cannot_get_marketplace_stats(self, user):
user = getattr(self.fixture, user)
self.client.force_authenticate(user)
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
@ddt
class CountUniqueUsersConnectedWithActiveResourcesOfServiceProviderTest(
test.APITransactionTestCase
):
def setUp(self):
self.fixture = fixtures.MarketplaceFixture()
self.url = '/api/marketplace-stats/count_unique_users_connected_with_active_resources_of_service_provider/'
@data(
'staff',
'global_support',
)
def test_user_can_get_marketplace_stats(self, user):
user = getattr(self.fixture, user)
self.client.force_authenticate(user)
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 0)
self.fixture.resource.state = models.Resource.States.OK
self.fixture.resource.save()
response = self.client.get(self.url)
self.assertEqual(len(response.data), 1)
self.assertEqual(response.data[0]['count_users'], 0)
self.fixture.admin
response = self.client.get(self.url)
self.assertEqual(len(response.data), 1)
self.assertEqual(response.data[0]['count_users'], 1)
self.fixture.member
response = self.client.get(self.url)
self.assertEqual(len(response.data), 1)
self.assertEqual(response.data[0]['count_users'], 2)
self.fixture.manager
response = self.client.get(self.url)
self.assertEqual(len(response.data), 1)
self.assertEqual(response.data[0]['count_users'], 3)
@data('owner', 'user', 'customer_support', 'admin', 'manager')
def test_user_cannot_get_marketplace_stats(self, user):
user = getattr(self.fixture, user)
self.client.force_authenticate(user)
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
class CountCustomersTest(test.APITransactionTestCase):
@freeze_time('2020-01-01')
def setUp(self):
self.fixture = fixtures.MarketplaceFixture()
self.service_provider = self.fixture.service_provider
self.fixture.resource.set_state_terminated()
self.fixture.resource.save()
def _create_resource(self, project=None):
if project:
order = factories.OrderFactory(
state=models.Order.States.DONE,
project=project,
)
else:
order = factories.OrderFactory(
state=models.Order.States.DONE,
)
resource = factories.ResourceFactory(
offering=self.fixture.offering, project=order.project
)
factories.OrderItemFactory(
offering=self.fixture.offering,
order=order,
resource=resource,
type=models.OrderItem.Types.CREATE,
)
return resource
def _terminate_resource(self, resource):
order = factories.OrderFactory(state=models.Order.States.DONE)
factories.OrderItemFactory(
offering=self.fixture.offering,
order=order,
resource=resource,
type=models.OrderItem.Types.TERMINATE,
)
resource.state = models.Resource.States.TERMINATED
return resource.save()
def test_count_customers_number_change(self):
with freeze_time('2022-01-10'):
self.assertEqual(
0, utils.count_customers_number_change(self.service_provider)
)
new_resource = self._create_resource()
self.assertEqual(
1, utils.count_customers_number_change(self.service_provider)
)
self._terminate_resource(new_resource)
self.assertEqual(
0, utils.count_customers_number_change(self.service_provider)
)
resource_1 = self._create_resource()
resource_2 = self._create_resource()
self.assertEqual(
2, utils.count_customers_number_change(self.service_provider)
)
with freeze_time('2022-02-10'):
self.assertEqual(
0, utils.count_customers_number_change(self.service_provider)
)
self._terminate_resource(resource_1)
self.assertEqual(
-1, utils.count_customers_number_change(self.service_provider)
)
self._create_resource(project=resource_2.project)
self.assertEqual(
-1, utils.count_customers_number_change(self.service_provider)
)
with freeze_time('2022-03-10'):
self.assertEqual(
0, utils.count_customers_number_change(self.service_provider)
)
self._create_resource(project=new_resource.project)
self.assertEqual(
1, utils.count_customers_number_change(self.service_provider)
)
def test_count_resources_number_change(self):
with freeze_time('2022-01-10'):
self.assertEqual(
0, utils.count_resources_number_change(self.service_provider)
)
new_resource = self._create_resource()
self.assertEqual(
1, utils.count_resources_number_change(self.service_provider)
)
self._terminate_resource(new_resource)
self.assertEqual(
0, utils.count_resources_number_change(self.service_provider)
)
resource_1 = self._create_resource()
resource_2 = self._create_resource()
self.assertEqual(
2, utils.count_resources_number_change(self.service_provider)
)
with freeze_time('2022-02-10'):
self.assertEqual(
0, utils.count_resources_number_change(self.service_provider)
)
self._terminate_resource(resource_1)
self.assertEqual(
-1, utils.count_resources_number_change(self.service_provider)
)
self._create_resource(project=resource_2.project)
self.assertEqual(
0, utils.count_resources_number_change(self.service_provider)
)
with freeze_time('2022-03-10'):
self.assertEqual(
0, utils.count_resources_number_change(self.service_provider)
)
self._create_resource(project=new_resource.project)
self.assertEqual(
1, utils.count_resources_number_change(self.service_provider)
)
class OfferingStatsTest(test.APITransactionTestCase):
@freeze_time('2020-01-01')
def setUp(self):
self.fixture = fixtures.MarketplaceFixture()
self.offering = self.fixture.offering
self.url = factories.OfferingFactory.get_url(self.offering, 'stats')
def test_offering_stats(self):
self.client.force_authenticate(self.fixture.offering_owner)
response = self.client.get(self.url)
self.assertEqual(response.data['resources_count'], 1)
self.assertEqual(response.data['customers_count'], 1)
new_resource = factories.ResourceFactory(offering=self.offering)
response = self.client.get(self.url)
self.assertEqual(response.data['resources_count'], 2)
self.assertEqual(response.data['customers_count'], 2)
new_resource.state = models.Resource.States.TERMINATED
new_resource.save()
response = self.client.get(self.url)
self.assertEqual(response.data['resources_count'], 1)
self.assertEqual(response.data['customers_count'], 1)
factories.ResourceFactory(offering=self.offering, project=self.fixture.project)
response = self.client.get(self.url)
self.assertEqual(response.data['resources_count'], 2)
self.assertEqual(response.data['customers_count'], 1)
|
22,740 | 49f660c9a7e4a0e23c940eeccd7d16c600579663 | from os import path
import pytest
from autoconf import conf
import autofit as af
from autofit.non_linear.samples import MCMCSamples, Sample
from autofit.mock.mock import MockClassx4
directory = path.dirname(path.realpath(__file__))
pytestmark = pytest.mark.filterwarnings("ignore::FutureWarning")
@pytest.fixture(name="samples")
def make_samples():
model = af.ModelMapper(mock_class_1=MockClassx4)
print(model.path_priors_tuples)
parameters = [
[0.0, 1.0, 2.0, 3.0],
[0.0, 1.0, 2.0, 3.0],
[0.0, 1.0, 2.0, 3.0],
[21.0, 22.0, 23.0, 24.0],
[0.0, 1.0, 2.0, 3.0],
]
samples = [
Sample(
log_likelihood=1.0,
log_prior=0.0,
weights=1.0,
mock_class_1_one=0.0,
mock_class_1_two=1.0,
mock_class_1_three=2.0,
mock_class_1_four=3.0
),
Sample(
log_likelihood=2.0,
log_prior=0.0,
weights=1.0,
mock_class_1_one=0.0,
mock_class_1_two=1.0,
mock_class_1_three=2.0,
mock_class_1_four=3.0
),
Sample(
log_likelihood=3.0,
log_prior=0.0,
weights=1.0,
mock_class_1_one=0.0,
mock_class_1_two=1.0,
mock_class_1_three=2.0,
mock_class_1_four=3.0
),
Sample(
log_likelihood=10.0,
log_prior=0.0,
weights=1.0,
mock_class_1_one=21.0,
mock_class_1_two=22.0,
mock_class_1_three=23.0,
mock_class_1_four=24.0
),
Sample(
log_likelihood=5.0,
log_prior=0.0,
weights=1.0,
mock_class_1_one=0.0,
mock_class_1_two=1.0,
mock_class_1_three=2.0,
mock_class_1_four=3.0
)
]
return MCMCSamples(
model=model,
samples=samples,
auto_correlation_times=1,
auto_correlation_check_size=2,
auto_correlation_required_length=3,
auto_correlation_change_threshold=4,
total_walkers=5,
total_steps=6,
time=7,
)
@pytest.fixture(autouse=True)
def set_config_path():
conf.instance.push(
path.join(directory, "files", "emcee", "config"),
output_path=path.join(directory, "files", "emcee", "output"),
)
class TestJsonCSV:
def test__from_csv_table_and_json_info(self, samples):
mcmc = af.Emcee()
samples.write_table(filename=path.join(mcmc.paths.samples_path, "samples.csv"))
samples.info_to_json(filename=path.join(mcmc.paths.samples_path, "info.json"))
model = af.ModelMapper(mock_class_1=MockClassx4)
samples = mcmc.samples_via_csv_json_from_model(model=model)
assert samples.parameters == [
[0.0, 1.0, 2.0, 3.0],
[0.0, 1.0, 2.0, 3.0],
[0.0, 1.0, 2.0, 3.0],
[21.0, 22.0, 23.0, 24.0],
[0.0, 1.0, 2.0, 3.0],
]
assert samples.log_likelihoods == [1.0, 2.0, 3.0, 10.0, 5.0]
assert samples.log_priors == [0.0, 0.0, 0.0, 0.0, 0.0]
assert samples.log_posteriors == [1.0, 2.0, 3.0, 10.0, 5.0]
assert samples.weights == [1.0, 1.0, 1.0, 1.0, 1.0]
# assert samples.auto_correlation_times == None
assert samples.auto_correlation_check_size == 2
assert samples.auto_correlation_required_length == 3
assert samples.auto_correlation_change_threshold == 4
assert samples.total_walkers == 5
assert samples.total_steps == 6
assert samples.time == 7
|
22,741 | 84b8835db865eaea2e5081295304305916c27b2a | import json
import os
from xml.etree import ElementTree as ET
import pyorc
SOURCE_FILE = 'data/paris/2.250182,48.818215,2.251182,48.819215.osm'
ORC_FILE = 'Orc/output/nodes.orc'
ORC_SNAPPY_FILE = 'Orc/output/snappy_nodes.orc'
ORC_ZLIB_FILE = 'Orc/output/zlib_nodes.orc'
JSON_FILE = 'Orc/output/nodes.json'
# Define data schema
schema = "struct<id:int,longitude:float,latitude:float,username:string>"
nodes = []
tree = ET.parse(open(SOURCE_FILE))
for node in tree.iterfind('node'):
nodes.append((
int(node.get('id')),
float(node.get('lon')),
float(node.get('lat')),
node.get('user')
))
with open(ORC_FILE, "wb") as data:
with pyorc.Writer(data, schema, compression=pyorc.CompressionKind.NONE) as writer:
for node in nodes:
writer.write(node)
## Looks like SNAPPY and LZO compression aren't supported by ORC yet?
#
# with open(ORC_SNAPPY_FILE, "wb") as data:
# with pyorc.Writer(data, schema, compression=pyorc.CompressionKind.SNAPPY) as writer:
# for node in nodes:
# writer.write(node)
##
with open(ORC_ZLIB_FILE, "wb") as data:
with pyorc.Writer(data, schema, compression=pyorc.CompressionKind.ZLIB) as writer:
for node in nodes:
writer.write(node)
# do the same with JSON format (for comparison)
with open(JSON_FILE, 'w') as json_file:
json.dump(nodes, json_file)
# Compare the size of the file formats
def print_file_size(file_path):
file_stats = os.stat(file_path)
print(f'Size of file {file_path} is {file_stats.st_size}')
print('Comparison of the file size of the different formats and compression algorithms:')
print_file_size(ORC_FILE)
# print_file_size(ORC_SNAPPY_FILE)
print_file_size(ORC_ZLIB_FILE)
print_file_size(JSON_FILE)
|
22,742 | 6eb5bb0fd4fd96e7b174cc6bb60d861ab612afe7 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#TODO: change name => "stream" is not appropriate
class DirectoryStream(object):
def __init__ (self, root):
self._root = root
self._event_streams = list()
# gets set once the first event has been added
self._first = None
self._last = None
# gets set to True after the first run or `compute_stats`
self._has_stats = False
def add(self, event_stream):
# track first element
if event_stream.first is not None:
if self.first == None:
self._first = event_stream.first
self._last = event_stream.last
else:
if event_stream.first < self.first:
self._first = event_stream.first
elif self.last < event_stream.last:
self._last = event_stream.last
self._event_streams.append(event_stream)
@property
def empty(self):
if self._first is None:
return True
if self._last is None:
return True
return False
@property
def root(self):
return self._root
@property
def event_streams(self):
return self._event_streams
@property
def first(self):
return self._first
@property
def last(self):
return self._last
@property
def has_stats(self):
return self._has_stats
@property
def good_diff(self):
return self._good_diff
@property
def failed_diff(self):
return self._failed_diff
@property
def good_duration(self):
return self._good_duration
@property
def failed_duration(self):
return self._failed_duration
@property
def good_total(self):
return self._good_total
@property
def fail_total(self):
return self._fail_total
def compute_stats(self):
self._good_diff = list()
self._good_duration = list()
self._failed_diff = list()
self._failed_duration = list()
self._good_total = 0
self._fail_total = 0
for es in self.event_streams:
# compute the event-stream level statistics (this will also sort
# the event stream):
es.compute_stats()
# collect statistics
for ev in es:
if ev.ok:
self._good_duration.append(ev.duration)
else:
self._failed_duration.append(ev.duration)
# collect diffs (gaps between events) NOTE that this only works if
# the even stream has been sorted:
for i in range(len(es) - 1):
if es[i].ok and es[i + 1].ok:
self._good_diff.append(es.diff[i])
else:
self._failed_diff.append(es.diff[i])
self._good_total += es.good_total
self._fail_total += es.fail_total
self._has_stats = True
def __repr__(self):
n_events = len(self.event_streams)
str_repr = f"DirectoryStream({self.root}):"
str_repr += f"\n +-> containing {n_events} EventStreams"
str_repr += f"\n |=> first Event:"
str_repr += f"{self.first}"
return str_repr
def __getitem__(self, key):
return self._event_streams[key]
|
22,743 | 8c421e07fd8c6e640b3175d898e09586f16dc5bf | #
#Copyright (C) 2002-2019 CERN for the benefit of the ATLAS collaboration
#
NumberOfHitsInBAInnerPerMultiLayer_ADCCut_labelx=['BIA1', 'BIA2', 'BIA3', 'BIA4', 'BIA5', 'BIA6', 'BIA7', 'BIA8']
NumberOfHitsInBAInnerPerMultiLayer_ADCCut_labely=['01 1', '01 2', '02 1', '02 2', '03 1', '03 2', '04 1', '04 2', '05 1', '05 2', '06 1', '06 2', '07 1', '07 2', '08 1', '08 2', '09 1', '09 2', '10 1', '10 2', '11 1 R', '11 2 R', '11 1 M', '11 2 M', '12 1', '12 2', '13 1', '13 2', '14 1', '14 2', '15 1 R', '15 2 R', '15 1 M', '15 2 M', '16 1', '16 2']
NumberOfHitsInBAMiddlePerMultiLayer_ADCCut_labelx=['BMA1', 'BMA2', 'BMA3', 'BMA4', 'BMA5', 'BMA6']
NumberOfHitsInBAMiddlePerMultiLayer_ADCCut_labely=['01 1', '01 2', '02 1', '02 2', '03 1', '03 2', '04 1', '04 2', '05 1', '05 2', '06 1', '06 2', '07 1', '07 2', '08 1', '08 2', '09 1', '09 2', '10 1', '10 2', '11 1', '11 2', '12 1', '12 2', '13 1', '13 2', '14 1', '14 2', '15 1', '15 2', '16 1', '16 2']
NumberOfHitsInBAOuterPerMultiLayer_ADCCut_labelx=['BOB0', 'BOA1', 'BOA2', 'BOA3', 'BOA4', 'BOA5', 'BOA6', 'BOA7', 'BOA8']
NumberOfHitsInBAOuterPerMultiLayer_ADCCut_labely=['01 1', '01 2', '02 1', '02 2', '03 1', '03 2', '04 1', '04 2', '05 1', '05 2', '06 1', '06 2', '07 1', '07 2', '08 1', '08 2', '09 1', '09 2', '10 1', '10 2', '11 1', '11 2', '12 1', '12 2', '13 1', '13 2', '14 1', '14 2', '15 1', '15 2', '16 1', '16 2']
NumberOfHitsInBAExtraPerMultiLayer_ADCCut_labelx=['BEA1', 'BEA2']
NumberOfHitsInBAExtraPerMultiLayer_ADCCut_labely=['02 1', '04 1', '06 1', '08 1', '10 1', '12 1', '14 1', '16 1']
NumberOfHitsInBCInnerPerMultiLayer_ADCCut_labelx=['BIC8', 'BIC7', 'BIC6', 'BIC5', 'BIC4', 'BIC3', 'BIC2', 'BIC1']
NumberOfHitsInBCInnerPerMultiLayer_ADCCut_labely=['01 1', '01 2', '02 1', '02 2', '03 1', '03 2', '04 1', '04 2', '05 1', '05 2', '06 1', '06 2', '07 1', '07 2', '08 1', '08 2', '09 1', '09 2', '10 1', '10 2', '11 1 R', '11 2 R', '11 1 M', '11 2 M', '12 1', '12 2', '13 1', '13 2', '14 1', '14 2', '15 1 R', '15 2 R', '15 1 M', '15 2 M', '16 1', '16 2']
NumberOfHitsInBCMiddlePerMultiLayer_ADCCut_labelx=['BMC6', 'BMC5', 'BMC4', 'BMC3', 'BMC2', 'BMC1']
NumberOfHitsInBCMiddlePerMultiLayer_ADCCut_labely=['01 1', '01 2', '02 1', '02 2', '03 1', '03 2', '04 1', '04 2', '05 1', '05 2', '06 1', '06 2', '07 1', '07 2', '08 1', '08 2', '09 1', '09 2', '10 1', '10 2', '11 1', '11 2', '12 1', '12 2', '13 1', '13 2', '14 1', '14 2', '15 1', '15 2', '16 1', '16 2']
NumberOfHitsInBCOuterPerMultiLayer_ADCCut_labelx=['BOC8', 'BOC7', 'BOC6', 'BOC5', 'BOC4', 'BOC3', 'BOC2', 'BOC1']
NumberOfHitsInBCOuterPerMultiLayer_ADCCut_labely=['01 1', '01 2', '02 1', '02 2', '03 1', '03 2', '04 1', '04 2', '05 1', '05 2', '06 1', '06 2', '07 1', '07 2', '08 1', '08 2', '09 1', '09 2', '10 1', '10 2', '11 1', '11 2', '12 1', '12 2', '13 1', '13 2', '14 1', '14 2', '15 1', '15 2', '16 1', '16 2']
NumberOfHitsInBCExtraPerMultiLayer_ADCCut_labelx=['BEC2', 'BEC1']
NumberOfHitsInBCExtraPerMultiLayer_ADCCut_labely=['02 1', '04 1', '06 1', '08 1', '10 1', '12 1', '14 1', '16 1']
NumberOfHitsInEAInnerPerMultiLayer_ADCCut_labelx=['EIA1', 'EIA2', 'EIA3', 'EIA4', 'EIA5']
NumberOfHitsInEAInnerPerMultiLayer_ADCCut_labely=['01 1', '01 2', '02 1', '02 2', '03 1', '03 2', '04 1', '04 2', '05 1', '05 2', '06 1', '06 2', '07 1', '07 2', '08 1', '08 2', '09 1', '09 2', '10 1', '10 2', '11 1', '11 2', '12 1', '12 2', '13 1', '13 2', '14 1', '14 2', '15 1', '15 2', '16 1', '16 2']
NumberOfHitsInEAMiddlePerMultiLayer_ADCCut_labelx=['EMA1', 'EMA2', 'EMA3', 'EMA4', 'EMA5']
NumberOfHitsInEAMiddlePerMultiLayer_ADCCut_labely=['01 1', '01 2', '02 1', '02 2', '03 1', '03 2', '04 1', '04 2', '05 1', '05 2', '06 1', '06 2', '07 1', '07 2', '08 1', '08 2', '09 1', '09 2', '10 1', '10 2', '11 1', '11 2', '12 1', '12 2', '13 1', '13 2', '14 1', '14 2', '15 1', '15 2', '16 1', '16 2']
NumberOfHitsInEAOuterPerMultiLayer_ADCCut_labelx=['EOA1', 'EOA2', 'EOA3', 'EOA4', 'EOA5', 'EOA6']
NumberOfHitsInEAOuterPerMultiLayer_ADCCut_labely=['01 1', '01 2', '02 1', '02 2', '03 1', '03 2', '04 1', '04 2', '05 1', '05 2', '06 1', '06 2', '07 1', '07 2', '08 1', '08 2', '09 1', '09 2', '10 1', '10 2', '11 1', '11 2', '12 1', '12 2', '13 1', '13 2', '14 1', '14 2', '15 1', '15 2', '16 1', '16 2']
NumberOfHitsInEAExtraPerMultiLayer_ADCCut_labelx=['EEA1', 'EEA2']
NumberOfHitsInEAExtraPerMultiLayer_ADCCut_labely=['01 1', '01 2', '02 1', '02 2', '03 1', '03 2', '04 1', '04 2', '05 1', '05 2', '06 1', '06 2', '07 1', '07 2', '08 1', '08 2', '09 1', '09 2', '10 1', '10 2', '11 1', '11 2', '12 1', '12 2', '13 1', '13 2', '14 1', '14 2', '15 1', '15 2', '16 1', '16 2']
NumberOfHitsInECInnerPerMultiLayer_ADCCut_labelx=['EIC5', 'EIC4', 'EIC3', 'EIC2', 'EIC1']
NumberOfHitsInECInnerPerMultiLayer_ADCCut_labely=['01 1', '01 2', '02 1', '02 2', '03 1', '03 2', '04 1', '04 2', '05 1', '05 2', '06 1', '06 2', '07 1', '07 2', '08 1', '08 2', '09 1', '09 2', '10 1', '10 2', '11 1', '11 2', '12 1', '12 2', '13 1', '13 2', '14 1', '14 2', '15 1', '15 2', '16 1', '16 2']
NumberOfHitsInECMiddlePerMultiLayer_ADCCut_labelx=['EMC5', 'EMC4', 'EMC3', 'EMC2', 'EMC1']
NumberOfHitsInECMiddlePerMultiLayer_ADCCut_labely=['01 1', '01 2', '02 1', '02 2', '03 1', '03 2', '04 1', '04 2', '05 1', '05 2', '06 1', '06 2', '07 1', '07 2', '08 1', '08 2', '09 1', '09 2', '10 1', '10 2', '11 1', '11 2', '12 1', '12 2', '13 1', '13 2', '14 1', '14 2', '15 1', '15 2', '16 1', '16 2']
NumberOfHitsInECOuterPerMultiLayer_ADCCut_labelx=['EOC6', 'EOC5', 'EOC4', 'EOC3', 'EOC2', 'EOC1']
NumberOfHitsInECOuterPerMultiLayer_ADCCut_labely=['01 1', '01 2', '02 1', '02 2', '03 1', '03 2', '04 1', '04 2', '05 1', '05 2', '06 1', '06 2', '07 1', '07 2', '08 1', '08 2', '09 1', '09 2', '10 1', '10 2', '11 1', '11 2', '12 1', '12 2', '13 1', '13 2', '14 1', '14 2', '15 1', '15 2', '16 1', '16 2']
NumberOfHitsInECExtraPerMultiLayer_ADCCut_labelx=['EEC2', 'EEC1']
NumberOfHitsInECExtraPerMultiLayer_ADCCut_labely=['01 1', '01 2', '02 1', '02 2', '03 1', '03 2', '04 1', '04 2', '05 1', '05 2', '06 1', '06 2', '07 1', '07 2', '08 1', '08 2', '09 1', '09 2', '10 1', '10 2', '11 1', '11 2', '12 1', '12 2', '13 1', '13 2', '14 1', '14 2', '15 1', '15 2', '16 1', '16 2']
NumberOfHitsInBarrelPerChamber_ADCCut_labelx= ['BC8', 'BC7', 'BC6', 'BC5', 'BC4', 'BC3', 'BC2', 'BC1', 'BB0', 'BA1', 'BA2', 'BA3', 'BA4', 'BA5', 'BA6', 'BA7', 'BA8']
NumberOfHitsInBarrelPerChamber_ADCCut_labely=['E 02', 'E 04', 'E 06', 'E 08', 'E 10', 'E 12', 'E 14', 'E 16', 'I 01', 'I 02', 'I 03', 'I 04', 'I 05', 'I 06', 'I 07', 'I 08', 'I 09', 'I 10', 'I 11 M', 'I 11 R', 'I 12', 'I 13', 'I 14', 'I 15 M', 'I 15 R', 'I 16', 'M 01', 'M 02', 'M 03', 'M 04', 'M 05', 'M 06', 'M 07', 'M 08', 'M 09', 'M 10', 'M 11', 'M 12', 'M 13', 'M 14', 'M 15', 'M 16', 'O 01', 'O 02', 'O 03', 'O 04', 'O 05', 'O 06', 'O 07', 'O 08', 'O 09', 'O 10', 'O 11', 'O 12', 'O 13', 'O 14', 'O 15', 'O 16']
NumberOfHitsInEndCapPerChamber_ADCCut_labelx=['EC6', 'EC5', 'EC4', 'EC3', 'EC2', 'EC1', 'EA1', 'EA2', 'EA3', 'EA4', 'EA5', 'EA6']
NumberOfHitsInEndCapPerChamber_ADCCut_labely=['E 01', 'E 02', 'E 03', 'E 04', 'E 05', 'E 06', 'E 07', 'E 08', 'E 09', 'E 10', 'E 11', 'E 12', 'E 13', 'E 14', 'E 15', 'E 16', 'I 01', 'I 02', 'I 03', 'I 04', 'I 05', 'I 06', 'I 07', 'I 08', 'I 09', 'I 10', 'I 11', 'I 12', 'I 13', 'I 14', 'I 15', 'I 16', 'M 01', 'M 02', 'M 03', 'M 04', 'M 05', 'M 06', 'M 07', 'M 08', 'M 09', 'M 10', 'M 11', 'M 12', 'M 13', 'M 14', 'M 15', 'M 16', 'O 01', 'O 02', 'O 03', 'O 04', 'O 05', 'O 06', 'O 07', 'O 08', 'O 09', 'O 10', 'O 11', 'O 12', 'O 13', 'O 14', 'O 15', 'O 16']
NumberOfHitsInMDTInner_ADCCut_labelx=['EIC5', 'EIC4', 'EIC3', 'EIC2', 'EIC1', '', 'EEC2', 'EEC1', 'BEC2', 'BEC1', '', 'BIC8', 'BIC7', 'BIC6', 'BIC5', 'BIC4', 'BIC3', 'BIC2', 'BIC1', 'BIA1', 'BIA2', 'BIA3', 'BIA4', 'BIA5', 'BIA6', 'BIA7', 'BIA8', '', 'BEA1', 'BEA2', 'EEA1', 'EEA2', '', 'EIA1', 'EIA2', 'EIA3', 'EIA4', 'EIA5']
NumberOfHitsInMDTInner_ADCCut_labely=['01 1', '01 2', '02 1', '02 2', '03 1', '03 2', '04 1', '04 2', '05 1', '05 2', '06 1', '06 2', '07 1', '07 2', '08 1', '08 2', '09 1', '09 2', '10 1', '10 2', '11 1 R', '11 2 R', '11 1 M', '11 2 M', '12 1', '12 2', '13 1', '13 2', '14 1', '14 2', '15 1 R', '15 2 R', '15 1 M', '15 2 M', '16 1', '16 2']
NumberOfHitsInMDTMiddle_ADCCut_labelx=['EMC5', 'EMC4', 'EMC3', 'EMC2', 'EMC1', '', 'BMC6', 'BMC5', 'BMC4', 'BMC3', 'BMC2', 'BMC1', 'BMA1', 'BMA2', 'BMA3', 'BMA4', 'BMA5', 'BMA6', '', 'EMA1', 'EMA2', 'EMA3', 'EMA4', 'EMA5']
NumberOfHitsInMDTMiddle_ADCCut_labely=['01 1', '01 2', '02 1', '02 2', '03 1', '03 2', '04 1', '04 2', '05 1', '05 2', '06 1', '06 2', '07 1', '07 2', '08 1', '08 2', '09 1', '09 2', '10 1', '10 2', '11 1', '11 2', '12 1', '12 2', '13 1', '13 2', '14 1', '14 2', '15 1', '15 2', '16 1', '16 2']
NumberOfHitsInMDTOuter_ADCCut_labelx=['EOC6', 'EOC5', 'EOC4', 'EOC3', 'EOC2', 'EOC1', '', 'BOC8', 'BOC7', 'BOC6', 'BOC5', 'BOC4', 'BOC3', 'BOC2', 'BOC1', 'BOB0', 'BOA1', 'BOA2', 'BOA3', 'BOA4', 'BOA5', 'BOA6', 'BOA7', 'BOA8', '', 'EOA1', 'EOA2', 'EOA3', 'EOA4', 'EOA5', 'EOA6']
NumberOfHitsInMDTOuter_ADCCut_labely=['01 1', '01 2', '02 1', '02 2', '03 1', '03 2', '04 1', '04 2', '05 1', '05 2', '06 1', '06 2', '07 1', '07 2', '08 1', '08 2', '09 1', '09 2', '10 1', '10 2', '11 1', '11 2', '12 1', '12 2', '13 1', '13 2', '14 1', '14 2', '15 1', '15 2', '16 1', '16 2']
labels_sectorPhi01=['BIL1A01', 'BIL1C01', 'BIL2A01', 'BIL2C01', 'BIL3A01', 'BIL3C01', 'BIL4A01', 'BIL4C01', 'BIL5A01', 'BIL5C01', 'BIL6A01', 'BIL6C01', 'BML1A01', 'BML1C01', 'BML2A01', 'BML2C01', 'BML3A01', 'BML3C01', 'BML4A01', 'BML4C01', 'BML5A01', 'BML5C01', 'BML6A01', 'BML6C01', 'BOL1A01', 'BOL1C01', 'BOL2A01', 'BOL2C01', 'BOL3A01', 'BOL3C01', 'BOL4A01', 'BOL4C01', 'BOL5A01', 'BOL5C01', 'BOL6A01', 'BOL6C01', 'EEL1A01', 'EEL1C01', 'EEL2A01', 'EEL2C01', 'EIL1A01', 'EIL1C01', 'EIL2A01', 'EIL2C01', 'EIL3A01', 'EIL3C01', 'EIL4A01', 'EIL4C01', 'EIL5A01', 'EIL5C01', 'EML1A01', 'EML1C01', 'EML2A01', 'EML2C01', 'EML3A01', 'EML3C01', 'EML4A01', 'EML4C01', 'EML5A01', 'EML5C01', 'EOL1A01', 'EOL1C01', 'EOL2A01', 'EOL2C01', 'EOL3A01', 'EOL3C01', 'EOL4A01', 'EOL4C01', 'EOL5A01', 'EOL5C01', 'EOL6A01', 'EOL6C01']
labels_sectorPhi02=['BEE1A02', 'BEE1C02', 'BEE2A02', 'BEE2C02', 'BIS1A02', 'BIS1C02', 'BIS2A02', 'BIS2C02', 'BIS3A02', 'BIS3C02', 'BIS4A02', 'BIS4C02', 'BIS5A02', 'BIS5C02', 'BIS6A02', 'BIS6C02', 'BIS7A02', 'BIS7C02', 'BIS8A02', 'BIS8C02', 'BMS1A02', 'BMS1C02', 'BMS2A02', 'BMS2C02', 'BMS3A02', 'BMS3C02', 'BMS4A02', 'BMS4C02', 'BMS5A02', 'BMS5C02', 'BMS6A02', 'BMS6C02', 'BOS1A02', 'BOS1C02', 'BOS2A02', 'BOS2C02', 'BOS3A02', 'BOS3C02', 'BOS4A02', 'BOS4C02', 'BOS5A02', 'BOS5C02', 'BOS6A02', 'BOS6C02', 'EES1A02', 'EES1C02', 'EES2A02', 'EES2C02', 'EIS1A02', 'EIS1C02', 'EIS2A02', 'EIS2C02', 'EMS1A02', 'EMS1C02', 'EMS2A02', 'EMS2C02', 'EMS3A02', 'EMS3C02', 'EMS4A02', 'EMS4C02', 'EMS5A02', 'EMS5C02', 'EOS1A02', 'EOS1C02', 'EOS2A02', 'EOS2C02', 'EOS3A02', 'EOS3C02', 'EOS4A02', 'EOS4C02', 'EOS5A02', 'EOS5C02', 'EOS6A02', 'EOS6C02']
labels_sectorPhi03=['BIL1A03', 'BIL1C03', 'BIL2A03', 'BIL2C03', 'BIL3A03', 'BIL3C03', 'BIL4A03', 'BIL4C03', 'BIL5A03', 'BIL5C03', 'BIL6A03', 'BIL6C03', 'BML1A03', 'BML1C03', 'BML2A03', 'BML2C03', 'BML3A03', 'BML3C03', 'BML4A03', 'BML4C03', 'BML5A03', 'BML5C03', 'BML6A03', 'BML6C03', 'BOL1A03', 'BOL1C03', 'BOL2A03', 'BOL2C03', 'BOL3A03', 'BOL3C03', 'BOL4A03', 'BOL4C03', 'BOL5A03', 'BOL5C03', 'BOL6A03', 'BOL6C03', 'EEL1A03', 'EEL1C03', 'EEL2A03', 'EEL2C03', 'EIL1A03', 'EIL1C03', 'EIL2A03', 'EIL2C03', 'EIL3A03', 'EIL3C03', 'EIL4A03', 'EIL4C03', 'EML1A03', 'EML1C03', 'EML2A03', 'EML2C03', 'EML3A03', 'EML3C03', 'EML4A03', 'EML4C03', 'EML5A03', 'EML5C03', 'EOL1A03', 'EOL1C03', 'EOL2A03', 'EOL2C03', 'EOL3A03', 'EOL3C03', 'EOL4A03', 'EOL4C03', 'EOL5A03', 'EOL5C03', 'EOL6A03', 'EOL6C03']
labels_sectorPhi04=['BEE1A04', 'BEE1C04', 'BEE2A04', 'BEE2C04', 'BIS1A04', 'BIS1C04', 'BIS2A04', 'BIS2C04', 'BIS3A04', 'BIS3C04', 'BIS4A04', 'BIS4C04', 'BIS5A04', 'BIS5C04', 'BIS6A04', 'BIS6C04', 'BIS7A04', 'BIS7C04', 'BIS8A04', 'BIS8C04', 'BMS1A04', 'BMS1C04', 'BMS2A04', 'BMS2C04', 'BMS3A04', 'BMS3C04', 'BMS4A04', 'BMS4C04', 'BMS5A04', 'BMS5C04', 'BMS6A04', 'BMS6C04', 'BOS1A04', 'BOS1C04', 'BOS2A04', 'BOS2C04', 'BOS3A04', 'BOS3C04', 'BOS4A04', 'BOS4C04', 'BOS5A04', 'BOS5C04', 'BOS6A04', 'BOS6C04', 'EES1A04', 'EES1C04', 'EES2A04', 'EES2C04', 'EIS1A04', 'EIS1C04', 'EIS2A04', 'EIS2C04', 'EMS1A04', 'EMS1C04', 'EMS2A04', 'EMS2C04', 'EMS3A04', 'EMS3C04', 'EMS4A04', 'EMS4C04', 'EMS5A04', 'EMS5C04', 'EOS1A04', 'EOS1C04', 'EOS2A04', 'EOS2C04', 'EOS3A04', 'EOS3C04', 'EOS4A04', 'EOS4C04', 'EOS5A04', 'EOS5C04', 'EOS6A04', 'EOS6C04']
labels_sectorPhi05=['BIL1A05', 'BIL1C05', 'BIL2A05', 'BIL2C05', 'BIL3A05', 'BIL3C05', 'BIL4A05', 'BIL4C05', 'BIL5A05', 'BIL5C05', 'BIL6A05', 'BIL6C05', 'BML1A05', 'BML1C05', 'BML2A05', 'BML2C05', 'BML3A05', 'BML3C05', 'BML4A05', 'BML4C05', 'BML5A05', 'BML5C05', 'BML6A05', 'BML6C05', 'BOL1A05', 'BOL1C05', 'BOL2A05', 'BOL2C05', 'BOL3A05', 'BOL3C05', 'BOL4A05', 'BOL4C05', 'BOL5A05', 'BOL5C05', 'BOL6A05', 'BOL6C05', 'EEL1A05', 'EEL1C05', 'EIL1A05', 'EIL1C05', 'EIL2A05', 'EIL2C05', 'EIL3A05', 'EIL3C05', 'EIL4A05', 'EIL4C05', 'EML1A05', 'EML1C05', 'EML2A05', 'EML2C05', 'EML3A05', 'EML3C05', 'EML4A05', 'EML4C05', 'EML5A05', 'EML5C05', 'EOL1A05', 'EOL1C05', 'EOL2A05', 'EOL2C05', 'EOL3A05', 'EOL3C05', 'EOL4A05', 'EOL4C05', 'EOL5A05', 'EOL5C05', 'EOL6A05', 'EOL6C05']
labels_sectorPhi06=['BEE1A06', 'BEE1C06', 'BEE2A06', 'BEE2C06', 'BIS1A06', 'BIS1C06', 'BIS2A06', 'BIS2C06', 'BIS3A06', 'BIS3C06', 'BIS4A06', 'BIS4C06', 'BIS5A06', 'BIS5C06', 'BIS6A06', 'BIS6C06', 'BIS7A06', 'BIS7C06', 'BIS8A06', 'BIS8C06', 'BMS1A06', 'BMS1C06', 'BMS2A06', 'BMS2C06', 'BMS3A06', 'BMS3C06', 'BMS4A06', 'BMS4C06', 'BMS5A06', 'BMS5C06', 'BMS6A06', 'BMS6C06', 'BOS1A06', 'BOS1C06', 'BOS2A06', 'BOS2C06', 'BOS3A06', 'BOS3C06', 'BOS4A06', 'BOS4C06', 'BOS5A06', 'BOS5C06', 'BOS6A06', 'BOS6C06', 'EES1A06', 'EES1C06', 'EES2A06', 'EES2C06', 'EIS1A06', 'EIS1C06', 'EIS2A06', 'EIS2C06', 'EMS1A06', 'EMS1C06', 'EMS2A06', 'EMS2C06', 'EMS3A06', 'EMS3C06', 'EMS4A06', 'EMS4C06', 'EMS5A06', 'EMS5C06', 'EOS1A06', 'EOS1C06', 'EOS2A06', 'EOS2C06', 'EOS3A06', 'EOS3C06', 'EOS4A06', 'EOS4C06', 'EOS5A06', 'EOS5C06', 'EOS6A06', 'EOS6C06']
labels_sectorPhi07=['BIL1A07', 'BIL1C07', 'BIL2A07', 'BIL2C07', 'BIL3A07', 'BIL3C07', 'BIL4A07', 'BIL4C07', 'BIL5A07', 'BIL5C07', 'BIL6A07', 'BIL6C07', 'BML1A07', 'BML1C07', 'BML2A07', 'BML2C07', 'BML3A07', 'BML3C07', 'BML4A07', 'BML4C07', 'BML5A07', 'BML5C07', 'BML6A07', 'BML6C07', 'BOL1A07', 'BOL1C07', 'BOL2A07', 'BOL2C07', 'BOL3A07', 'BOL3C07', 'BOL4A07', 'BOL4C07', 'BOL5A07', 'BOL5C07', 'BOL6A07', 'BOL6C07', 'EEL1A07', 'EEL1C07', 'EEL2A07', 'EEL2C07', 'EIL1A07', 'EIL1C07', 'EIL2A07', 'EIL2C07', 'EIL3A07', 'EIL3C07', 'EIL4A07', 'EIL4C07', 'EML1A07', 'EML1C07', 'EML2A07', 'EML2C07', 'EML3A07', 'EML3C07', 'EML4A07', 'EML4C07', 'EML5A07', 'EML5C07', 'EOL1A07', 'EOL1C07', 'EOL2A07', 'EOL2C07', 'EOL3A07', 'EOL3C07', 'EOL4A07', 'EOL4C07', 'EOL5A07', 'EOL5C07', 'EOL6A07', 'EOL6C07']
labels_sectorPhi08=['BEE1A08', 'BEE1C08', 'BEE2A08', 'BEE2C08', 'BIS1A08', 'BIS1C08', 'BIS2A08', 'BIS2C08', 'BIS3A08', 'BIS3C08', 'BIS4A08', 'BIS4C08', 'BIS5A08', 'BIS5C08', 'BIS6A08', 'BIS6C08', 'BIS7A08', 'BIS7C08', 'BIS8A08', 'BIS8C08', 'BMS1A08', 'BMS1C08', 'BMS2A08', 'BMS2C08', 'BMS3A08', 'BMS3C08', 'BMS4A08', 'BMS4C08', 'BMS5A08', 'BMS5C08', 'BMS6A08', 'BMS6C08', 'BOS1A08', 'BOS1C08', 'BOS2A08', 'BOS2C08', 'BOS3A08', 'BOS3C08', 'BOS4A08', 'BOS4C08', 'BOS5A08', 'BOS5C08', 'BOS6A08', 'BOS6C08', 'EES1A08', 'EES1C08', 'EES2A08', 'EES2C08', 'EIS1A08', 'EIS1C08', 'EIS2A08', 'EIS2C08', 'EMS1A08', 'EMS1C08', 'EMS2A08', 'EMS2C08', 'EMS3A08', 'EMS3C08', 'EMS4A08', 'EMS4C08', 'EMS5A08', 'EMS5C08', 'EOS1A08', 'EOS1C08', 'EOS2A08', 'EOS2C08', 'EOS3A08', 'EOS3C08', 'EOS4A08', 'EOS4C08', 'EOS5A08', 'EOS5C08', 'EOS6A08', 'EOS6C08']
labels_sectorPhi09=['BIL1A09', 'BIL1C09', 'BIL2A09', 'BIL2C09', 'BIL3A09', 'BIL3C09', 'BIL4A09', 'BIL4C09', 'BIL5A09', 'BIL5C09', 'BIL6A09', 'BIL6C09', 'BML1A09', 'BML1C09', 'BML2A09', 'BML2C09', 'BML3A09', 'BML3C09', 'BML4A09', 'BML4C09', 'BML5A09', 'BML5C09', 'BML6A09', 'BML6C09', 'BOL1A09', 'BOL1C09', 'BOL2A09', 'BOL2C09', 'BOL3A09', 'BOL3C09', 'BOL4A09', 'BOL4C09', 'BOL5A09', 'BOL5C09', 'BOL6A09', 'BOL6C09', 'EEL1A09', 'EEL1C09', 'EEL2A09', 'EEL2C09', 'EIL1A09', 'EIL1C09', 'EIL2A09', 'EIL2C09', 'EIL3A09', 'EIL3C09', 'EIL4A09', 'EIL4C09', 'EIL5A09', 'EIL5C09', 'EML1A09', 'EML1C09', 'EML2A09', 'EML2C09', 'EML3A09', 'EML3C09', 'EML4A09', 'EML4C09', 'EML5A09', 'EML5C09', 'EOL1A09', 'EOL1C09', 'EOL2A09', 'EOL2C09', 'EOL3A09', 'EOL3C09', 'EOL4A09', 'EOL4C09', 'EOL5A09', 'EOL5C09', 'EOL6A09', 'EOL6C09']
labels_sectorPhi10=['BEE1A10', 'BEE1C10', 'BEE2A10', 'BEE2C10', 'BIS1A10', 'BIS1C10', 'BIS2A10', 'BIS2C10', 'BIS3A10', 'BIS3C10', 'BIS4A10', 'BIS4C10', 'BIS5A10', 'BIS5C10', 'BIS6A10', 'BIS6C10', 'BIS7A10', 'BIS7C10', 'BIS8A10', 'BIS8C10', 'BMS1A10', 'BMS1C10', 'BMS2A10', 'BMS2C10', 'BMS3A10', 'BMS3C10', 'BMS4A10', 'BMS4C10', 'BMS5A10', 'BMS5C10', 'BMS6A10', 'BMS6C10', 'BOS1A10', 'BOS1C10', 'BOS2A10', 'BOS2C10', 'BOS3A10', 'BOS3C10', 'BOS4A10', 'BOS4C10', 'BOS5A10', 'BOS5C10', 'BOS6A10', 'BOS6C10', 'EES1A10', 'EES1C10', 'EES2A10', 'EES2C10', 'EIS1A10', 'EIS1C10', 'EIS2A10', 'EIS2C10', 'EMS1A10', 'EMS1C10', 'EMS2A10', 'EMS2C10', 'EMS3A10', 'EMS3C10', 'EMS4A10', 'EMS4C10', 'EMS5A10', 'EMS5C10', 'EOS1A10', 'EOS1C10', 'EOS2A10', 'EOS2C10', 'EOS3A10', 'EOS3C10', 'EOS4A10', 'EOS4C10', 'EOS5A10', 'EOS5C10', 'EOS6A10', 'EOS6C10']
labels_sectorPhi11=['BIM1A11', 'BIM1C11', 'BIM2A11', 'BIM2C11', 'BIM3A11', 'BIM3C11', 'BIM4A11', 'BIM4C11', 'BIM5A11', 'BIM5C11', 'BIR1A11', 'BIR1C11', 'BIR2A11', 'BIR2C11', 'BIR3A11', 'BIR3C11', 'BIR4A11', 'BIR4C11', 'BIR5A11', 'BIR5C11', 'BIR6A11', 'BIR6C11', 'BML1A11', 'BML1C11', 'BML2A11', 'BML2C11', 'BML3A11', 'BML3C11', 'BML4A11', 'BML4C11', 'BML5A11', 'BML5C11', 'BML6A11', 'BML6C11', 'BOL1A11', 'BOL1C11', 'BOL2A11', 'BOL2C11', 'BOL3A11', 'BOL3C11', 'BOL4A11', 'BOL4C11', 'BOL5A11', 'BOL5C11', 'BOL6A11', 'BOL6C11', 'EEL1A11', 'EEL1C11', 'EEL2A11', 'EEL2C11', 'EIL1A11', 'EIL1C11', 'EIL2A11', 'EIL2C11', 'EIL3A11', 'EIL3C11', 'EIL4A11', 'EIL4C11', 'EML1A11', 'EML1C11', 'EML2A11', 'EML2C11', 'EML3A11', 'EML3C11', 'EML4A11', 'EML4C11', 'EML5A11', 'EML5C11', 'EOL1A11', 'EOL1C11', 'EOL2A11', 'EOL2C11', 'EOL3A11', 'EOL3C11', 'EOL4A11', 'EOL4C11', 'EOL5A11', 'EOL5C11', 'EOL6A11', 'EOL6C11']
labels_sectorPhi12=['BEE1A12', 'BEE1C12', 'BEE2A12', 'BEE2C12', 'BIS1A12', 'BIS1C12', 'BIS2A12', 'BIS3A12', 'BIS3C12', 'BIS4A12', 'BIS4C12', 'BIS5A12', 'BIS5C12', 'BIS6A12', 'BIS6C12', 'BIS7A12', 'BIS7C12', 'BIS8A12', 'BIS8C12', 'BMF1A12', 'BMF1C12', 'BMF2A12', 'BMF2C12', 'BMF3A12', 'BMF3C12', 'BMG2A12', 'BMG2C12', 'BMG4A12', 'BMG4C12', 'BMG6A12', 'BMG6C12', 'BOF1A12', 'BOF1C12', 'BOF3A12', 'BOF3C12', 'BOF5A12', 'BOF5C12', 'BOF7A12', 'BOF7C12', 'BOG0B12', 'BOG2C12', 'BOG4A12', 'BOG4C12', 'BOG6A12', 'BOG6C12', 'BOG8A12', 'BOG8C12', 'EES1A12', 'EES1C12', 'EES2A12', 'EES2C12', 'EIS1A12', 'EIS1C12', 'EIS2A12', 'EIS2C12', 'EMS1A12', 'EMS1C12', 'EMS2A12', 'EMS2C12', 'EMS3A12', 'EMS3C12', 'EMS4A12', 'EMS4C12', 'EMS5A12', 'EMS5C12', 'EOS1A12', 'EOS1C12', 'EOS2A12', 'EOS2C12', 'EOS3A12', 'EOS3C12', 'EOS4A12', 'EOS4C12', 'EOS5A12', 'EOS5C12', 'EOS6A12', 'EOS6C12', 'BIS2C12', 'BOG2A12']
labels_sectorPhi13=['BIL1A13', 'BIL1C13', 'BIL2A13', 'BIL2C13', 'BIL3A13', 'BIL3C13', 'BIL4A13', 'BIL4C13', 'BIL5A13', 'BIL5C13', 'BIL6A13', 'BIL6C13', 'BML1A13', 'BML1C13', 'BML2A13', 'BML2C13', 'BML3A13', 'BML3C13', 'BML4A13', 'BML4C13', 'BML5A13', 'BML5C13', 'BOL1A13', 'BOL1C13', 'BOL2A13', 'BOL2C13', 'BOL3A13', 'BOL3C13', 'BOL4A13', 'BOL4C13', 'BOL5A13', 'BOL5C13', 'BOL6A13', 'BOL6C13', 'BOL7A13', 'BOL7C13', 'EEL1A13', 'EEL1C13', 'EEL2A13', 'EEL2C13', 'EIL1A13', 'EIL1C13', 'EIL2A13', 'EIL2C13', 'EIL3A13', 'EIL3C13', 'EIL4A13', 'EIL4C13', 'EML1A13', 'EML1C13', 'EML2A13', 'EML2C13', 'EML3A13', 'EML3C13', 'EML4A13', 'EML4C13', 'EML5A13', 'EML5C13', 'EOL1A13', 'EOL1C13', 'EOL2A13', 'EOL2C13', 'EOL3A13', 'EOL3C13', 'EOL4A13', 'EOL4C13', 'EOL5A13', 'EOL5C13', 'EOL6A13', 'EOL6C13']
labels_sectorPhi14=['BEE1A14', 'BEE1C14', 'BEE2A14', 'BEE2C14', 'BIS1A14', 'BIS1C14', 'BIS2A14', 'BIS2C14', 'BIS3A14', 'BIS3C14', 'BIS4A14', 'BIS4C14', 'BIS5A14', 'BIS5C14', 'BIS6A14', 'BIS6C14', 'BIS7A14', 'BIS7C14', 'BIS8A14', 'BIS8C14', 'BME1A14', 'BME1C14', 'BMF1A14', 'BMF1C14', 'BMF2A14', 'BMF2C14', 'BMF3A14', 'BMF3C14', 'BMG2A14', 'BMG2C14', 'BMG4A14', 'BMG4C14', 'BMG6A14', 'BMG6C14', 'BOF1A14', 'BOF1C14', 'BOF3A14', 'BOF3C14', 'BOF5A14', 'BOF5C14', 'BOF7A14', 'BOF7C14', 'BOG0B14', 'BOG2A14', 'BOG2C14', 'BOG4A14', 'BOG4C14', 'BOG6A14', 'BOG6C14', 'BOG8A14', 'BOG8C14', 'EES1A14', 'EES1C14', 'EES2A14', 'EES2C14', 'EIS1A14', 'EIS1C14', 'EIS2A14', 'EIS2C14', 'EMS1A14', 'EMS1C14', 'EMS2A14', 'EMS2C14', 'EMS3A14', 'EMS3C14', 'EMS4A14', 'EMS4C14', 'EMS5A14', 'EMS5C14', 'EOS1A14', 'EOS1C14', 'EOS2A14', 'EOS2C14', 'EOS3A14', 'EOS3C14', 'EOS4A14', 'EOS4C14', 'EOS5A14', 'EOS5C14', 'EOS6A14', 'EOS6C14']
labels_sectorPhi15=['BIM1A15', 'BIM1C15', 'BIM2A15', 'BIM2C15', 'BIM3A15', 'BIM3C15', 'BIM4A15', 'BIM4C15', 'BIM5A15', 'BIM5C15', 'BIR1A15', 'BIR1C15', 'BIR2A15', 'BIR2C15', 'BIR3A15', 'BIR3C15', 'BIR4A15', 'BIR4C15', 'BIR5A15', 'BIR5C15', 'BIR6A15', 'BIR6C15', 'BML1A15', 'BML1C15', 'BML2A15', 'BML2C15', 'BML3A15', 'BML3C15', 'BML4A15', 'BML4C15', 'BML5A15', 'BML5C15', 'BML6A15', 'BML6C15', 'BOL1A15', 'BOL1C15', 'BOL2A15', 'BOL2C15', 'BOL3A15', 'BOL3C15', 'BOL4A15', 'BOL4C15', 'BOL5A15', 'BOL5C15', 'BOL6A15', 'BOL6C15', 'EEL1A15', 'EEL1C15', 'EEL2A15', 'EEL2C15', 'EIL1A15', 'EIL1C15', 'EIL2A15', 'EIL2C15', 'EIL3A15', 'EIL3C15', 'EIL4A15', 'EIL4C15', 'EML1A15', 'EML1C15', 'EML2A15', 'EML2C15', 'EML3A15', 'EML3C15', 'EML4A15', 'EML4C15', 'EML5A15', 'EML5C15', 'EOL1A15', 'EOL1C15', 'EOL2A15', 'EOL2C15', 'EOL3A15', 'EOL3C15', 'EOL4A15', 'EOL4C15', 'EOL5A15', 'EOL5C15', 'EOL6A15', 'EOL6C15']
labels_sectorPhi16=['BEE1A16', 'BEE1C16', 'BEE2A16', 'BEE2C16', 'BIS1A16', 'BIS1C16', 'BIS2A16', 'BIS2C16', 'BIS3A16', 'BIS3C16', 'BIS4A16', 'BIS4C16', 'BIS5A16', 'BIS5C16', 'BIS6A16', 'BIS6C16', 'BIS7A16', 'BIS7C16', 'BIS8A16', 'BIS8C16', 'BMS1A16', 'BMS1C16', 'BMS2A16', 'BMS2C16', 'BMS3A16', 'BMS3C16', 'BMS4A16', 'BMS4C16', 'BMS5A16', 'BMS5C16', 'BMS6A16', 'BMS6C16', 'BOS1A16', 'BOS1C16', 'BOS2A16', 'BOS2C16', 'BOS3A16', 'BOS3C16', 'BOS4A16', 'BOS4C16', 'BOS5A16', 'BOS5C16', 'BOS6A16', 'BOS6C16', 'EES1A16', 'EES1C16', 'EES2A16', 'EES2C16', 'EIS1A16', 'EIS1C16', 'EIS2A16', 'EIS2C16', 'EMS1A16', 'EMS1C16', 'EMS2A16', 'EMS2C16', 'EMS3A16', 'EMS3C16', 'EMS4A16', 'EMS4C16', 'EMS5A16', 'EMS5C16', 'EOS1A16', 'EOS1C16', 'EOS2A16', 'EOS2C16', 'EOS3A16', 'EOS3C16', 'EOS4A16', 'EOS4C16', 'EOS5A16', 'EOS5C16', 'EOS6A16', 'EOS6C16']
label_empty = ['']
labelY_OccupancyVsLB_BA01 = ['BIL']+label_empty*11+['BIS']+label_empty*11+['BME', 'BML']+label_empty*11+['BMS']+label_empty*11+['BOL']+label_empty*11+['BOS']+label_empty*11
labelY_OccupancyVsLB_BC01 = labelY_OccupancyVsLB_BA01
labelY_OccupancyVsLB_BA02 = ['BIL']+label_empty*11+['BIS']+label_empty*11+['BML']+label_empty*11+['BMS']+label_empty*11+['BOL']+label_empty*11+['BOS']+label_empty*11
labelY_OccupancyVsLB_BC02 = labelY_OccupancyVsLB_BA02
labelY_OccupancyVsLB_BA03 = ['BIL']+label_empty*5+['BIM']+label_empty*4+['BIR']+label_empty*5+['BIS']+label_empty*11+['BMF']+label_empty*2+['BMG']+label_empty*2+['BML']+label_empty*11+['BMS']+label_empty*5+['BOF']+label_empty*3+['BOG']+label_empty*4+['BOL']+label_empty*11+['BOS']+label_empty*5
labelY_OccupancyVsLB_BC03 = ['BIL']+label_empty*5+['BIM']+label_empty*4+['BIR']+label_empty*5+['BIS']+label_empty*11+['BMF']+label_empty*2+['BMG']+label_empty*2+['BML']+label_empty*11+['BMS']+label_empty*5+['BOF']+label_empty*3+['BOG']+label_empty*3+['BOL']+label_empty*11+['BOS']+label_empty*6
labelY_OccupancyVsLB_BA04 = ['BIL']+label_empty*5+['BIM']+label_empty*4+['BIR']+label_empty*5+['BIS']+label_empty*11+['BMF']+label_empty*2+['BMG']+label_empty*2+['BML']+label_empty*10+['BMS']+label_empty*5+['BOF']+label_empty*3+['BOG']+label_empty*4+['BOL']+label_empty*12+['BOS']+label_empty*4
labelY_OccupancyVsLB_BC04 = ['BIL']+label_empty*5+['BIM']+label_empty*4+['BIR']+label_empty*5+['BIS']+label_empty*11+['BMF']+label_empty*2+['BMG']+label_empty*2+['BML']+label_empty*10+['BMS']+label_empty*5+['BOF']+label_empty*3+['BOG']+label_empty*3+['BOL']+label_empty*12+['BOS']+label_empty*5
labelY_OccupancyVsLB_EA01 = ['BEE']+label_empty*3+['BIS']+label_empty*3+['EEL']+label_empty*3+['EES']+label_empty*3+['EIL']+label_empty*8+['EIS']+label_empty*3+['EML']+label_empty*9+['EMS']+label_empty*9+['EOL']+label_empty*11+['EOS']+label_empty*11
labelY_OccupancyVsLB_EC01 = labelY_OccupancyVsLB_EA01
labelY_OccupancyVsLB_EA03 = labelY_OccupancyVsLB_EA01
labelY_OccupancyVsLB_EC03 = labelY_OccupancyVsLB_EA01
labelY_OccupancyVsLB_EA02 = ['BEE']+label_empty*3+['BIS']+label_empty*3+['EEL']+label_empty*2+['EES']+label_empty*3+['EIL']+label_empty*7+['EIS']+label_empty*3+['EML']+label_empty*9+['EMS']+label_empty*9+['EOL']+label_empty*11+['EOS']+label_empty*11
labelY_OccupancyVsLB_EC02 = labelY_OccupancyVsLB_EA02
labelY_OccupancyVsLB_EA04 = ['BEE']+label_empty*3+['BIS']+label_empty*3+['EEL']+label_empty*3+['EES']+label_empty*3+['EIL']+label_empty*7+['EIS']+label_empty*3+['EML']+label_empty*9+['EMS']+label_empty*9+['EOL']+label_empty*11+['EOS']+label_empty*11
labelY_OccupancyVsLB_EC04 = labelY_OccupancyVsLB_EA04
labelY_OccupancyVsLB = ['BA']+label_empty*15+['BC']+label_empty*15+['EA']+label_empty*15+['EC']+label_empty*15
labelY_OccupancyVsLB_BA_Inner = ['BI1']+label_empty*17+['BI2']+label_empty*17+['BI3']+label_empty*17+['BI4']+label_empty*17+['BI5']+label_empty*17+['BI6']+label_empty*15+['BI7']+label_empty*7+['BI8']+label_empty*7
labelY_OccupancyVsLB_BC_Inner=labelY_OccupancyVsLB_BA_Inner
labelY_OccupancyVsLB_BA_Middle = ['BM1']+label_empty*15+['BM2']+label_empty*15+['BM3']+label_empty*15+['BM4']+label_empty*15+['BM5']+label_empty*15+['BM6']+label_empty*14
labelY_OccupancyVsLB_BC_Middle=labelY_OccupancyVsLB_BA_Middle
labelY_OccupancyVsLB_BA_OuterExtra=['BO0']+label_empty*2+['BO1']+label_empty*14+['BO2']+label_empty*15+['BO3']+label_empty*15+['BO4']+label_empty*15+['BO5']+label_empty*15+['BO6']+label_empty*15+['BO7,8']+label_empty*3+['BE1']+label_empty*7+['BE2']+label_empty*7
labelY_OccupancyVsLB_BC_OuterExtra=['BO1']+label_empty*15+['BO2']+label_empty*15+['BO3']+label_empty*15+['BO4']+label_empty*15+['BO5']+label_empty*15+['BO6']+label_empty*15+['BO7,8']+label_empty*3+['BE1']+label_empty*7+['BE2']+label_empty*7
labelY_OccupancyVsLB_EA_Inner = ['EI1']+label_empty*15+['EI2']+label_empty*15+['EI3']+label_empty*7+['EI4']+label_empty*7+['EI5']+label_empty*1
labelY_OccupancyVsLB_EC_Inner = labelY_OccupancyVsLB_EA_Inner
labelY_OccupancyVsLB_EA_Middle = ['EM1']+label_empty*15+['EM2']+label_empty*15+['EM3']+label_empty*15+['EM4']+label_empty*15+['EM5']+label_empty*15
labelY_OccupancyVsLB_EC_Middle = labelY_OccupancyVsLB_EA_Middle
labelY_OccupancyVsLB_EA_OuterExtra = ['EO1']+label_empty*15+['EO2']+label_empty*15+['EO3']+label_empty*15+['EO4']+label_empty*15+['EO5']+label_empty*15+['EO6']+label_empty*15+['EE1,2']+label_empty*6
labelY_OccupancyVsLB_EC_OuterExtra = ['EO1']+label_empty*15+['EO2']+label_empty*15+['EO3']+label_empty*15+['EO4']+label_empty*15+['EO5']+label_empty*15+['EO6']+label_empty*15+['EE1']+label_empty*15+['EE2']+label_empty*14
|
22,744 | 9319cbfd4259e4bc0482a563a2cd83fc739ab201 | from easydict import EasyDict
from typing import Union
from pathlib import Path
import os
import torch
import warnings
from vortex.development.utils.common import check_and_create_output_dir
from vortex.development.core.factory import create_model,create_dataset,create_exporter
from vortex.development.predictor import create_predictor
from vortex.development.core.pipelines.base_pipeline import BasePipeline
__all__ = ['GraphExportPipeline']
class GraphExportPipeline(BasePipeline):
"""Vortex Graph Export Pipeline API
"""
def __init__(self,
config: EasyDict,
weights : Union[str,Path,None] = None):
"""Class initialization
Args:
config (EasyDict): dictionary parsed from Vortex experiment file
weights (Union[str,Path], optional): path to selected Vortex model's weight. If set to None, it will \
assume that final model weights exist in **experiment directory**. \
Defaults to None.
Example:
```python
from vortex.development.utils.parser import load_config
from vortex.development.core.pipelines import GraphExportPipeline
# Parse config
config = load_config('experiments/config/example.yml')
graph_exporter = GraphExportPipeline(config=config,
weights='experiments/outputs/example/example.pth')
```
"""
# Configure output directory
self.experiment_directory, _ = check_and_create_output_dir(config)
self.experiment_name = config.experiment_name
# Initialize Pytorch model
if weights is None:
weights = self.experiment_directory / '{}.pth'.format(self.experiment_name)
if not os.path.isfile(weights):
raise RuntimeError("Default weight in {} is not exist, please provide weight "
"path using '--weights' argument.".format(str(weights)))
ckpt = torch.load(weights)
state_dict = ckpt['state_dict'] if 'state_dict' in ckpt else ckpt
model_components = create_model(config.model, state_dict=state_dict, stage='validate')
model_components.network = model_components.network.eval()
self.predictor = create_predictor(model_components).eval()
self.image_size = config.model.preprocess_args.input_size
cls_names = None
if 'class_names' in ckpt:
cls_names = ckpt['class_names']
else:
dataset_name = None
if 'name' in config.dataset.train:
dataset_name = config.dataset.train.name
elif 'dataset' in config.dataset.train:
dataset_name = config.dataset.train.dataset
if dataset_name:
from vortex.development.utils.data.dataset.dataset import all_datasets
dataset_available = False
for datasets in all_datasets.values():
if dataset_name in datasets:
dataset_available = True
break
if dataset_available:
# Initialize dataset to get class_names
warnings.warn("'class_names' is not available in your model checkpoint, please "
"update your model using 'scripts/update_model.py' script. \nCreating dataset "
"to get 'class_names'")
dataset = create_dataset(config.dataset, stage='train',
preprocess_config=config.model.preprocess_args)
if hasattr(dataset.dataset, 'class_names'):
cls_names = dataset.dataset.class_names
else:
warnings.warn("'class_names' is not available in dataset, setting "
"'class_names' to None.")
else:
warnings.warn("Dataset {} is not available, setting 'class_names' to None.".format(
config.dataset))
if cls_names is None:
num_classes = 2 ## default is binary class
if 'n_classes' in config.model.network_args:
num_classes = config.model.network_args.n_classes
self.class_names = ["class_{}".format(i) for i in range(num_classes)]
self.class_names = cls_names
# Initialize export config
self.export_configs = [config.exporter] \
if not isinstance(config.exporter, list) \
else config.exporter
def run(self,
example_input : Union[str,Path,None] = None) -> EasyDict :
"""Function to execute the graph export pipeline
Args:
example_input (Union[str,Path], optional): path to example input image to help graph tracing.
Defaults to None.
Returns:
EasyDict: dictionary containing status of the export process
Example:
```python
example_input = 'image1.jpg'
graph_exporter = GraphExportPipeline(config=config,
weights='experiments/outputs/example/example.pth')
result = graph_exporter.run(example_input=example_input)
```
"""
outputs = []
ok = True
for export_config in self.export_configs :
exporter = create_exporter(
config=export_config,
experiment_name=self.experiment_name,
image_size=self.image_size,
output_directory=(self.experiment_directory),
)
ok = exporter(
predictor=self.predictor,
class_names=self.class_names,
example_image_path=example_input
) and ok
outputs.append(str(exporter.filename))
print('model is exported to:', ', '.join(outputs))
# TODO specify which export is failed
result = EasyDict({'export_status' : ok})
return result
|
22,745 | dbc4e4873927c1429c4151746a89a2da5fe63a21 | import smtplib
from email.message import EmailMessage
from string import Template
from pathlib import Path
html = Template(Path("./index.html").read_text())
email = EmailMessage()
email['from'] = "<name of sender>"
email['to'] = '<recipient email id>'
email['subject'] = 'lets try sending mails using python!'
email.set_content(html.substitute({'name':'<actual name in html content>'}),'html')
#this is gmail smtp config
with smtplib.SMTP(host='smtp.gmail.com',port=587) as smtp:
print('connecting to smtp server...')
smtp.ehlo()
print('starting tls...')
smtp.starttls()
print('login to server...')
#login with from account
#to use gmail to send mails from please enable less secure apps in gmail account settings
smtp.login('<login email id>','<password of sender email>')
print('sending mail...')
smtp.send_message(email)
print('email is submitted to SMTP server succesfully.\nHappy mailing :)') |
22,746 | e4d02130c1c6c9e724522d07af9cee61e137749d | from p4utils.utils.topology import Topology
from p4utils.utils.sswitch_API import SimpleSwitchAPI
from scapy.all import Ether, sniff, Packet, BitField
from multiprocessing import Pool
import threading
import json
import ipaddress
import itertools
class CpuHeader(Packet):
name = 'CpuPacket'
fields_desc = [BitField('macAddr', 0, 48), BitField('tunnel_id', 0, 16), BitField('pw_id_or_ingress_port', 0, 16)]
class RttHeader(Packet):
name = 'RttPacket'
fields_desc = [BitField('customer_id',0,16), BitField('ip_addr_src', 0, 32), BitField('ip_addr_dst', 0, 32), BitField('rtt',0,48)]
class EventBasedController(threading.Thread):
def __init__(self, params):
super(EventBasedController, self).__init__()
self.topo = Topology(db="topology.db")
self.sw_name = params["sw_name"]
self.cpu_port_intf = params["cpu_port_intf"]
self.thrift_port = params["thrift_port"]
self.id_to_switch = params["id_to_switch"]
self.whole_controller = params["whole_controller"]
self.controller = SimpleSwitchAPI(thrift_port)
self.ecmp_group_count = 1
def run(self):
sniff(iface=self.cpu_port_intf, prn=self.recv_msg_cpu)
def recv_msg_cpu(self, pkt):
print "received packet at " + str(self.sw_name) + " controller"
packet = Ether(str(pkt))
if packet.type == 0x1234:
cpu_header = CpuHeader(packet.payload)
self.process_packet([(cpu_header.macAddr, cpu_header.tunnel_id, cpu_header.pw_id_or_ingress_port)]) ### change None with the list of fields from the CPUHeader that you defined
elif packet.type == 0x5678:
rtt_header = RttHeader(packet.payload)
self.process_packet_rtt([(rtt_header.customer_id,rtt_header.ip_addr_src,rtt_header.ip_addr_dst,rtt_header.rtt)])
def process_packet(self, packet_data):
for macAddr, tunnel_id, pw_id_or_ingress_port in packet_data:
if self.topo.get_hosts_connected_to(self.sw_name) == []:
self.controller.table_add('l2_learning_tunnel', 'NoAction', [str(macAddr), str(pw_id_or_ingress_port)], [])
return
# non-tunnel packets
if tunnel_id == 0:
egress_spec = pw_id_or_ingress_port
self.controller.table_add('l2_learning_non_tunnel', 'NoAction', [str(macAddr), str(egress_spec)], [])
pw_id = self.whole_controller.get_pwid(self.sw_name)[egress_spec]
# direct_forward_without_tunnel
for ingress_port in self.whole_controller.get_all_non_tunnel_ports(self.sw_name):
if ingress_port == egress_spec or self.whole_controller.get_pwid(self.sw_name)[ingress_port] != pw_id:
continue
else:
self.controller.table_add('direct_forward_without_tunnel', 'direct_forward_without_tunnel_act', [str(ingress_port), str(macAddr)], [str(egress_spec)])
# decap_forward_with_tunnel
self.controller.table_add('decap_forward_with_tunnel', 'decap_forward_with_tunnel_act', [str(macAddr), str(pw_id)], [str(egress_spec)])
# tunnel packets
else:
pw_id = pw_id_or_ingress_port
self.controller.table_add('l2_learning_tunnel', 'NoAction', [str(macAddr), str(pw_id)], [])
tunnel = self.whole_controller.tunnel_list[tunnel_id - 1]
# encap_forward_with_tunnel
for ingress_port in self.whole_controller.get_all_non_tunnel_ports(self.sw_name):
if self.whole_controller.get_pwid(self.sw_name)[ingress_port] != pw_id:
continue
else:
egress_spec = self.whole_controller.get_tunnel_ports(tunnel, self.sw_name)[0]
self.controller.table_add('encap_forward_with_tunnel', 'encap_forward_with_tunnel_act', [str(ingress_port), str(macAddr)], [str(egress_spec), str(tunnel_id), str(pw_id)])
# # ecmp
the_other_pe = self.sw_name
for pe_pair in self.whole_controller.name_to_tunnel.keys():
if tunnel in self.whole_controller.name_to_tunnel[pe_pair]:
for pe in pe_pair:
if pe == self.sw_name:
continue
else:
the_other_pe = pe
tunnel_l = self.whole_controller.name_to_tunnel.get((self.sw_name, the_other_pe), None)
if tunnel_l == None:
tunnel_l = self.whole_controller.name_to_tunnel[(the_other_pe, self.sw_name)]
if len(tunnel_l) > 1:
for ingress_port in self.whole_controller.get_all_non_tunnel_ports(self.sw_name):
if self.whole_controller.get_pwid(self.sw_name)[ingress_port] != pw_id:
continue
else:
self.controller.table_add('ecmp_group', 'ecmp_group_act', [str(ingress_port), str(macAddr)], [str(self.ecmp_group_count), str(len(tunnel_l))])
for hash_value in range(len(tunnel_l)):
tunnel_ecmp = tunnel_l[hash_value]
tunnel_id_ecmp = self.whole_controller.tunnel_list.index(tunnel_ecmp) + 1
egress_spec = self.whole_controller.get_tunnel_ports(tunnel_ecmp, self.sw_name)[0]
self.controller.table_add('ecmp_forward', 'encap_forward_with_tunnel_act', [str(self.ecmp_group_count), str(hash_value)], [str(egress_spec), str(tunnel_id_ecmp), str(pw_id)])
self.ecmp_group_count += 1
def process_packet_rtt(self, packet_data):
for customer_id, ip_addr_src, ip_addr_dst, rtt in packet_data:
print("Customer_id: " + str(customer_id))
print("SourceIP: " + str(ipaddress.IPv4Address(ip_addr_src)))
print("DestinationIP: " + str(ipaddress.IPv4Address(ip_addr_dst)))
print("RTT: " + str(rtt))
class RoutingController(object):
def __init__(self, vpls_conf_file):
self.topo = Topology(db="topology.db")
self.cpu_ports = {x:self.topo.get_cpu_port_index(x) for x in self.topo.get_p4switches().keys()}
self.controllers = {}
self.vpls_conf_file = vpls_conf_file
self.name_to_tunnel = {}
self.tunnel_list = []
self.pe_list = []
self.non_pe_list = []
self.init()
def init(self):
self.connect_to_switches()
self.reset_states()
self.add_mirror()
self.extract_customers_information()
self.gen_tunnel()
self.get_pe_list()
self.switch_to_id = {sw_name:self.get_switch_id(sw_name) for sw_name in self.topo.get_p4switches().keys()}
self.id_to_switch = {self.get_switch_id(sw_name):sw_name for sw_name in self.topo.get_p4switches().keys()}
def add_mirror(self):
for sw_name in self.topo.get_p4switches().keys():
self.controllers[sw_name].mirroring_add(100, self.cpu_ports[sw_name])
def extract_customers_information(self):
with open(self.vpls_conf_file) as json_file:
self.vpls_conf = json.load(json_file)
def reset_states(self):
[controller.reset_state() for controller in self.controllers.values()]
def connect_to_switches(self):
for p4switch in self.topo.get_p4switches():
thrift_port = self.topo.get_thrift_port(p4switch)
self.controllers[p4switch] = SimpleSwitchAPI(thrift_port)
def get_switch_id(self, sw_name):
return "{:02x}".format(self.topo.get_p4switches()[sw_name]["sw_id"])
def gen_tunnel(self):
pe_switchs = []
for swname in self.topo.get_p4switches().keys():
if len(self.topo.get_hosts_connected_to(swname)) != 0:
pe_switchs.append(swname)
pe_pair = list(itertools.combinations(pe_switchs, 2))
name_to_tunnel = {}
tunnel_list = []
for item in pe_pair:
sub_tunnels = self.topo.get_shortest_paths_between_nodes(item[0], item[1])
for sub_tunnel in sub_tunnels:
if 'sw-cpu' in sub_tunnel:
sub_tunnels.remove(sub_tunnel)
for sub_tunnel in sub_tunnels:
tunnel_list.append(sub_tunnel)
name_to_tunnel.update({item: sub_tunnels})
self.name_to_tunnel = name_to_tunnel
self.tunnel_list = tunnel_list
def get_tunnel_ports(self, tunnel, sw_name):
ports = []
if tunnel.index(sw_name) == 0:
ports.append(self.topo.node_to_node_port_num(sw_name, tunnel[1]))
elif tunnel.index(sw_name) == len(tunnel) - 1:
ports.append(self.topo.node_to_node_port_num(sw_name, tunnel[len(tunnel) - 2]))
else:
index = tunnel.index(sw_name)
ports.append(self.topo.node_to_node_port_num(sw_name, tunnel[index - 1]))
ports.append(self.topo.node_to_node_port_num(sw_name, tunnel[index + 1]))
return ports
def get_all_tunnel_ports(self, sw_name):
ports = []
for tunnel in self.tunnel_list:
if sw_name in tunnel:
ports_t = self.get_tunnel_ports(tunnel, sw_name)
for port in ports_t:
if not port in ports:
ports.append(port)
return ports
def get_port_tunnels(self, port, sw_name):
tunnels = []
for tunnel in self.tunnel_list:
if sw_name in tunnel:
if port in self.get_tunnel_ports(tunnel, sw_name):
tunnels.append(tunnel)
return tunnels
def get_all_non_tunnel_ports(self, sw_name):
ports = []
for host in self.topo.get_hosts_connected_to(sw_name):
ports.append(self.topo.node_to_node_port_num(sw_name, host))
return ports
def get_pwid(self, sw_name):
pwid_dic = {}
for host in self.topo.get_hosts_connected_to(sw_name):
if self.vpls_conf['hosts'][host] == 'A':
pwid_dic.update({self.topo.node_to_node_port_num(sw_name, host): 1})
elif self.vpls_conf['hosts'][host] == 'B':
pwid_dic.update({self.topo.node_to_node_port_num(sw_name, host): 2})
return pwid_dic
def get_pe_list(self):
for sw_name in self.topo.get_p4switches().keys():
if len(self.topo.get_hosts_connected_to(sw_name)) > 0 :
self.pe_list.append(sw_name)
elif len(self.topo.get_hosts_connected_to(sw_name)) == 0 :
self.non_pe_list.append(sw_name)
def process_network(self):
# PE Part
for pe in self.pe_list:
# group_id = 0
for ingress_port in self.get_pwid(pe).keys():
pw_id = self.get_pwid(pe)[ingress_port]
# multicast
tunnel_handle_num = 0
for tunnel_port in self.get_all_tunnel_ports(pe):
for tunnel in self.get_port_tunnels(tunnel_port, pe):
tunnel_id = self.tunnel_list.index(tunnel) + 1
node_port = []
node_port.append(tunnel_port)
self.controllers[pe].mc_node_create(tunnel_id, node_port)
tunnel_handle_num += 1
for tunnel_port in self.get_all_tunnel_ports(pe):
for tunnel in self.get_port_tunnels(tunnel_port, pe):
tunnel_id = self.tunnel_list.index(tunnel) + 1
node_port = []
node_port.append(tunnel_port)
self.controllers[pe].mc_node_create(tunnel_id, node_port)
non_tunnel_ports_1 = []
non_tunnel_ports_2 = []
for non_tunnel_port in self.get_all_non_tunnel_ports(pe):
if self.get_pwid(pe)[non_tunnel_port] == 1:
non_tunnel_ports_1.append(non_tunnel_port)
elif self.get_pwid(pe)[non_tunnel_port] == 2:
non_tunnel_ports_2.append(non_tunnel_port)
for index in range(4):
self.controllers[pe].mc_mgrp_create(index + 1)
for index in range(2):
self.controllers[pe].mc_node_create(0, non_tunnel_ports_1)
self.controllers[pe].mc_node_create(0, non_tunnel_ports_2)
for index in range(tunnel_handle_num):
self.controllers[pe].mc_node_associate(1, index)
self.controllers[pe].mc_node_associate(2, index + tunnel_handle_num)
self.controllers[pe].mc_node_associate(1, tunnel_handle_num * 2)
self.controllers[pe].mc_node_associate(2, tunnel_handle_num * 2 + 1)
self.controllers[pe].mc_node_associate(3, tunnel_handle_num * 2 + 2)
self.controllers[pe].mc_node_associate(4, tunnel_handle_num * 2 + 3)
for ingress_port in self.get_all_non_tunnel_ports(pe):
pw_id = self.get_pwid(pe)[ingress_port]
self.controllers[pe].table_add('get_pwid', 'get_pwid_act', [str(ingress_port)], [str(pw_id)])
if pw_id == 1:
self.controllers[pe].table_add('encap_multicast', 'encap_multicast_act', [str(ingress_port)], ['1', str(pw_id)])
elif pw_id == 2:
self.controllers[pe].table_add('encap_multicast', 'encap_multicast_act', [str(ingress_port)], ['2', str(pw_id)])
self.controllers[pe].table_add('decap_multicast', 'decap_multicast_act', ['1'], ['3'])
self.controllers[pe].table_add('decap_multicast', 'decap_multicast_act', ['2'], ['4'])
# non_PE Part
for non_pe in self.non_pe_list:
tunnel_l = []
tunnel_id_l = []
for tunnel in self.tunnel_list:
if non_pe in tunnel:
tunnel_l.append(tunnel)
tunnel_id_l.append(self.tunnel_list.index(tunnel) + 1)
for index in range(len(tunnel_l)):
tunnel = tunnel_l[index]
tunnel_id = tunnel_id_l[index]
ports = self.get_tunnel_ports(tunnel, non_pe)
self.controllers[non_pe].table_add('direct_forward_with_tunnel', 'direct_forward_with_tunnel_act', [str(ports[0]), str(tunnel_id)], [str(ports[1])])
self.controllers[non_pe].table_add('direct_forward_with_tunnel', 'direct_forward_with_tunnel_act', [str(ports[1]), str(tunnel_id)], [str(ports[0])])
print '=====tunnel_list below====='
print self.tunnel_list
if __name__ == "__main__":
import sys
if len(sys.argv) < 2:
print "Error: vpls.conf file missing"
sys.exit()
vpls_conf_file = sys.argv[1]
controller = RoutingController(vpls_conf_file)
controller.process_network()
thread_list = []
for sw_name in controller.topo.get_p4switches().keys():
cpu_port_intf = str(controller.topo.get_cpu_port_intf(sw_name).replace("eth0", "eth1"))
thrift_port = controller.topo.get_thrift_port(sw_name)
id_to_switch = controller.id_to_switch
params ={}
params["sw_name"] = sw_name
params["cpu_port_intf"]= cpu_port_intf
params["thrift_port"]= thrift_port
params["id_to_switch"]= id_to_switch
params["whole_controller"] = controller
thread = EventBasedController(params )
thread.setName('MyThread ' + str(sw_name))
thread.daemon = True
thread_list.append(thread)
thread.start()
for thread in thread_list:
thread.join()
print ("Thread has finished")
|
22,747 | dbd5a6e017273d3453929e6df6934947f454c563 | # tuple data structure
# tuple can store any data type
# most important tuple are immutable,once tuple is created you can't update
# data inide tuple
# no append ,no insert,no pop,no remove methods() available in tuple()
# tuple are faster than list
# count,index
# length function
# slicing
#print(dir(tuple))
#looping
t=(1,2,3,4,5,6,7)
# for loop and tuple
#for i in t:
#print(i)
#i=0
#while i <len(t):
#print(t)
#i+=1
#tuple with one element
#nums=(1,)# one element tuple is created with single comma
#words=('words1',)
#print(type(words))
#tuple without parenthesis
guiter='koushik','arnok','john'
#print(type(guiters))
#tuple unpacking
#guiter1,guiter2,guiter3=(guiter)
#print(guiter1)
#guiter1,guiter2=(guiter)
#print(guiter)
# list inside tuple
t1=('koushik',['arnok','john'])
#t1[1].pop()
#print(t1)
t1[1].append("we are made it.")
#print(t1)
# min(),max,sum methods in tuple
#print(min(t))
#print(max(t))
#print(sum(t))
# more about tuple
# function returning two values
def func(int1,int2):
add= int1+int2
multiply=int1*int2
return add,multiply
#print(func(2,7))
#add,multiply=func(4,7)
#print(add)
#print(multiply)
#num=tuple(range(1,10))
#print(num)
n=str((1, 2, 3, 4, 5, 6, 7, 8, 9))
print(n)
print(type(n))
|
22,748 | 5d4edbe44b67a211ca8b31ac77b60e83d9f42d6d | #!/usr/bin/env python3
import http.client
import http.server
import os
import random
import select
import signal
import socketserver
PORT = 8000
MUSIC_ROOT = "/nfs/geluid/mp3"
class Playlist:
_current_file = None
def __init__(self, playlist_fname, state_fname):
# read the playlist files
self._files = [ ]
with open(playlist_fname, 'rt') as f:
for s in f:
s = s.strip()
self._files.append(s)
# read the state
self._state = {}
self._state_fname = state_fname
try:
with open(self._state_fname, 'rt') as f:
for s in f:
s = s.strip().split()
self._state[s[0]] = int(s[1])
except FileNotFoundError:
self._state = {
'seed': random.randrange(0, 9223372036854775807),
'count': 0,
}
# XXX we actually want a local random seed
random.seed(self._state['seed'])
# shuffle the files and throw away the amount that we already played
self._files = random.sample(self._files, len(self._files))
self._files = self._files[self._state['count']:]
def write_state(self):
with open(self._state_fname, 'wt') as f:
for k, v in self._state.items():
f.write('%s %s\n' % (k, v))
def advance(self):
self._state['count'] += 1
self.write_state()
self._current_file = self._files[0]
self._files = self._files[1:]
return self._current_file
def get_current(self):
return self._current_file
class Player:
_pid = None
def __init__(self, playlist):
self._playlist = playlist
def play(self):
next_file = self._playlist.advance()
print('playing "%s"' % next_file)
newpid = os.fork()
if newpid == 0:
#os.execvp("/usr/bin/mplayer", [ "mplayer", "-quiet", "-nolirc", "-really-quiet", os.path.join(MUSIC_ROOT, next_file) ])
os.execvp("/usr/bin/mpg321", [ "mpg321", "-quiet", os.path.join(MUSIC_ROOT, next_file) ])
#os.execvp("/bin/sleep", [ "sleep", "10" ])
self._pid = newpid
def stop(self):
os.kill(self._pid, signal.SIGTERM)
class httpHandler(http.server.BaseHTTPRequestHandler):
def do_GET(self):
global playlist, player, need_next
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
reply = "Request not understood"
if self.path == "/":
reply = 'Playing <b>%s</b><br/>' % playlist.get_current()
reply += '<a href="/next">skip</a>'
elif self.path == "/next":
need_next = True
# force a redirect
reply = '<meta http-equiv="refresh" content="0; url=/" />'
self.wfile.write(reply.encode('utf-8'))
# ping is used to drop out of the select(2) loop; for whatever reason,
# python keeps restarting calls after a signal
def do_PING(self):
self.send_response(200)
self.end_headers()
def on_sigchld(signum, frame):
global got_sigchld, clien
got_sigchld = True
client.request("PING", "/") # force out of the loop
# load files, redistribute them and start at count
playlist = Playlist('files.txt', 'state.txt')
player = Player(playlist)
# handle HTTP side of things
got_sigchld = False
need_next = False
signal.signal(signal.SIGCHLD, on_sigchld)
httpd = socketserver.TCPServer(("", PORT), httpHandler)
client = http.client.HTTPConnection("localhost:%d" % PORT)
player.play()
while True:
if need_next:
player.stop() # sigchld should pick this up further
need_next = False
if got_sigchld:
# advance to next track
client.getresponse() # to flush the response
player.play()
got_sigchld = False
httpd.handle_request()
httpd.server_close()
|
22,749 | 63795bcfd38a51d83f0229745abefbe3b8fe2063 | # !/usr/bin/env python
import os
import sys
import time
from owllook.database.es import ElasticObj
from owllook.spiders import DdSpider
# from owllook.spiders import QidianNovelSpider
os.environ['MODE'] = 'PRO'
sys.path.append('../../')
def start_spider():
obj = ElasticObj("bupt", "novel")
# obj.create_index()
spider = DdSpider('Mozilla/5.0 (Windows NT 6.1; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0')
spider1 = DdSpider('Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36')
page = 0
while page >= 0:
if page % 2 == 0:
result = spider.get_index_result("", page)
else:
result = spider1.get_index_result("", page)
for data in result:
if data:
obj.Index_Item(data)
else:
page = -10
break
page = page+1
print('page:'+str(page))
time.sleep(1)
# def qidian_spider():
# obj = ElasticObj("bupt", "novel")
# obj.create_index()
# spider = QidianNovelSpider('qidian_novel')
# for data in spider.parse():
# if data:
# obj.Index_Item(data)
# else:
# break
def main():
print("start============")
start_spider()
#qidian_spider()
if __name__ == '__main__':
main()
# python novels_schedule.py
|
22,750 | 625f69dd7ff1ea6a688678e9d5a0369e70ef1a6d | #coding:utf-8
#此文件的参数配置均与用例强相关,与执行环境无关
from common import baseinfo
pcap_sip = baseinfo.clientOpeIp
pcap_dip = baseinfo.serverOpeIp
ciface = baseinfo.pcapSendIface
siface = baseinfo.pcapReadIface
strip = baseinfo.strip
value = r"b'\x00\x00\x00\x10\x01\x05\x00\x0e\xc0'"
#报文发送,读取和预期结果
#列表里面的命令依次为:
#发送端:发送报文接口,发送报文数量,发送报文名称;
#抓包:接口名称,过滤规则,抓包数量,报文命名(以用例名称.pcap命名)
#报文读取:保存的报文名称,要读取的包的序号;这里读取的报文名称和上面抓包的保存报文名称应该一致
#期望结果:预期结果(协议字段),是否有偏差(保留),偏差值(保留)
pkt1_cfg={
"send":[ciface,1,"0001_TCP_ETH_IPV4_TCP__16_14_3_8889.pcap"],
"capture":[siface,f'tcp and host {pcap_dip}',1,"test_acl_compare_selabel_a1.pcap"],
"read":["test_acl_compare_selabel_a1.pcap",0],
"expect":[f'{value}\n',0,0]
}
#配置下发
#列表里面的顺序依次为:配置命令,查询命令,预期结果
case1_step={
"step1":[f"export cardid=0&&tupleacl --add --sip {pcap_sip} --action forward --netlbl strip --drop on --match n --mode BLP --doi 16 --level 16 --type 1 --value 0x3,0,0,0",f"export cardid=0&&tupleacl --query --sip {pcap_sip}", pcap_sip],
"step2":[f"export cardid=1&&tupleacl --add --dip {pcap_dip} --action forward --netlbl tag --drop off --match n --mode BLP --doi 16 --level 14 --type 1 --value 0x3,0,0,0",f"export cardid=1&&tupleacl --query --dip {pcap_dip}", pcap_dip]
}
pkt2_cfg={
"send":[ciface,1,"0001_TCP_ETH_IPV4_TCP__16_14_3_8889.pcap"],
"capture":[siface,f'tcp and host {pcap_dip}',1,"test_acl_compare_selabel_a2.pcap"],
"read":["test_acl_compare_selabel_a2.pcap",0],
"expect":[strip,0,0]
}
case2_step={
"step1":[f"export cardid=0&&tupleacl --add --sip {pcap_sip} --dip {pcap_dip} --dp 8889 --action forward --netlbl strip --drop on --match n --mode BLP --doi 16 --level 14 --type 1 --value 0x3,0,0,0",f"tupleacl --query --sip {pcap_sip} --dip {pcap_dip} --dp 8889",pcap_sip],
"step2":["export cardid=0&&selabel --set --netlbl strip --drop on --match n --mode BLP --doi 16 --level 8 --type 1 --value 0 1","selabel --get",'level:8'],
}
pkt3_cfg={
"send":[ciface,1,"0001_TCP_ETH_IPV4_TCP__16_14_3_8889.pcap"],
"capture":[siface,f'tcp and host {pcap_dip}',1,"test_acl_compare_selabel_a3.pcap"],
"read":["test_acl_compare_selabel_a3.pcap",0],
"expect":[strip,0,0]
}
case3_step={
"step1":["export cardid=0&&selabel --set --netlbl strip --drop on --match n --mode BLP --doi 16 --level 16 --type 1 --value 0 1","selabel --get",'level:16']
}
|
22,751 | 9ddc3083a5541862ab1d13ecea3a4ba91111d31d | def fibo(n):
'Print a Fibonnaci series up to n.'
a,b = 0,1
while a < n:
print(a, end=' ', flush=True)
a,b = b, a + b
print()
fibo(60)
print(fibo.__doc__) |
22,752 | 001df15812c0d327fa6f0f19d6c81b146ed727f1 | # inspired by Paul Barry : Head First Python
from flask import Flask, render_template, request
from letterSearch import search_for_letter
app = Flask(__name__)
# Just a test
# @app.route('/')
# def hello() -> str:
# return "Hello bobbi maniac"
@app.route('/search4', methods=['POST'])
def do_search() -> 'html':
phrase = request.form['phrase']
letters = request.form['letters']
title = "Here are the results"
results = str(search_for_letter(phrase, letters))
return render_template('results.html', the_phrase = phrase, the_letters = letters, the_title = title, the_results = results)
@app.route('/')
@app.route('/entry')
def entry_page() -> 'html':
return render_template('entry.html', the_title = "Welcome to Search4Letters")
# Port can be set at properties - Debug - Port Number
if __name__ == "__main__":
app.run("localhost", 4449) |
22,753 | dc2248496bcb13edb25a0f9123e92a83221624dc | from bs4 import BeautifulSoup
import urllib2
import sys
import re
import os
sys.path.insert(0,'libs')
from google.appengine.ext import ndb, deferred
class VectorDatabase():
def __init__(self, database):
self.database = database
def db_check(self):
test_vec_yeast = self.database.query(self.database.name=='pXP116').get()
test_vec_gateway = self.database.query(self.database.name=='pDONR201').get()
test_vec_unspecified = self.database.query(self.database.name=='pRS303').get()
return_string = ''
if test_vec_yeast is None:
deferred.defer(self.dl_vectors, 'Yeast', _target='builder')
if test_vec_gateway is None:
deferred.defer(self.dl_vectors, 'Other', _target='builder')
if test_vec_unspecified is None:
deferred.defer(self.dl_vectors, 'Unspecified', _target='builder')
def dl_vectors(self, dburl):
dburl = dburl.lower()
dburl = dburl.title()
dburl = "http://www.addgene.org/vector-database/query/?q_vdb=*+***%20vector_type:" + dburl
http = urllib2.urlopen(dburl).read()
soup = BeautifulSoup(http)
tbl = soup.find(id='results')
tbl = re.sub('<input\sid.*?value="','', str(tbl))
tbl = tbl.replace('###','')
list = tbl.split('::')
vec_dict = {}
for n in range(0, len(list), 9):
try:
if list[n+8] == 'yes':
if 'vector' in list[n+7]:
r = re.search(r'\d+',str(list[n+7]))
vec_dict[list[n+1]] = ("/browse/sequence/%s/giraffe-analyze_vdb/" % r.group())
else: vec_dict[list[n+1]] = ("%ssequences/" % str(list[n+7]))
except IndexError: break
for vector in vec_dict:
if 'giraffe' in vec_dict[vector]:
try:
http = urllib2.urlopen('http://www.addgene.org/' + vec_dict[vector]).read()
giraffelink = re.search(r'src="(/g.*?)"',http,re.DOTALL).group(1)
http = urllib2.urlopen('http://www.addgene.org' + str(giraffelink)).read()
srch = re.search(r'[a-zA-Z]{20}[a-zA-Z\n]{20,}',http).group(0)
self.database(name=vector,sequence=srch).put()
except AttributeError: continue
else:
try:
http = urllib2.urlopen('http://www.addgene.org/' + vec_dict[vector]).read()
srch = re.search(r'[a-zA-Z]{20}[a-zA-Z\n]{20,}',http).group(0)
self.database(name=vector,sequence=srch).put()
except AttributeError: continue
return("completed")
class SitesDatabase():
def __init__(self,database):
self.database = database
def db_check(self):
test_att_site = self.database.query(self.database.name=='attB4').get()
if test_att_site is None:
deferred.defer(self.dl_sites, _target='builder')
def dl_sites(self):
sitelist = []
dburl = "http://chien.neuro.utah.edu/tol2kitwiki/index.php/Att_site_sequences"
http = urllib2.urlopen(dburl).read()
src = re.findall(r'gt;.*?([\w\d]+).*?([A-Z]{3,}.*?)[&<]', http,flags=re.DOTALL)
for n in range (0,len(src),1):
if '_' in src[n][0]: continue
else: self.database(name=src[n][0],sequence=src[n][1]).put()
class EnzymesDatabase():
def __init__(self, database):
self.database = database
def db_check(self):
test_enz = self.database.query(self.database.name=='EcoRV').get()
if test_enz is None:
deferred.defer(self.dl_enzymes, _target='builder')
def dl_enzymes(self):
enzlist = []
dburl = "http://www.addgene.org/mol_bio_reference/restriction_enzymes/"
http = urllib2.urlopen(dburl).read()
soup = BeautifulSoup(http)
tbl = soup.find('table',attrs={'class':'border-box w400'})
src = tbl.find_all('td')
src = re.sub(r'<td>\s</td>,', '', str(src))
src = src.split(',')
for n in range(0, len(src), 2):
try:
name = re.search(r'\s(\w+)', str(src[n]))
enzyme = re.search(r'.*?([A-Z]+).*?([A-Z]+)', str(src[n+1]),
re.DOTALL)
if name.group(1) in enzlist: continue
else:
enzlist.append(str(name.group(1)))
self.database(name=str(name.group(1)),frontsite=str(enzyme.group(1)),backsite=str(enzyme.group(2))).put()
except IndexError:
continue
return("completed") |
22,754 | c96c0b3a2f118159b31866e5f347203920cdfcff | import math,sys,re,os,shutil
from xml.dom import minidom
import fileinput
#main part of analysis
if len(sys.argv)!=3:
print "Usage: reformatter.py <infile> model_description "
print " : input file is backed up but overwritten "
print " Last modified: Wed Apr 6 14:08:27 CDT 2011 "
sys.exit(0)
else:
print "Running reformatter.py to reformat Pythia LHE file"
file = sys.argv[1]
modelname = sys.argv[2]
#let xml find the event tags
try:
xmldoc = minidom.parse(sys.argv[1])
except IOError:
print " could not open file for xml parsing ",sys.argv[1]
sys.exit(0)
shutil.copyfile(file,file+".bak")
reflist = xmldoc.firstChild
for ref in reflist.childNodes:
if ref.nodeName=='header':
ref.appendChild(x)
if ref.nodeName=='event':
ref.firstChild.appendData("# model "+modelname+"\n")
t=xmldoc.toprettyxml(indent="",newl="")
f=open(file,'w')
f.write(t)
f.close()
f=open(file,'r')
o=open(file+".tmp",'w')
for i, line in enumerate(f):
if line.find("-->")>-1:
o.write(line)
o.write('<header>\n')
o.write('<slha>\n')
o.write("DECAY 1000022 0.0E+00\n")
o.write('</slha>\n')
o.write('</header>\n')
else:
o.write(line)
f.close()
o.close()
os.remove(file)
os.rename(file+".tmp", file)
|
22,755 | e6f98f6d7490ab08816901086dd988ce076c4a23 | #jupyter-notebook
%matplotlib inline
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
data = pd.read_csv('./data.csv',usecols=['F1','F2','Target‘])
data[:10]
data_arr = data.values
X = data_arr[:,:2]
y = data_arr[:,-1]
X.shape,y.shape
from sklearn.cluster import KMeans
km_clf = KMeans(3).fit(X)
km_clf.cluster_centers_*100
plt.scatter(X[:,0],X[:,1])
plt.scatter(km_clf.cluster_centers_[0,0],km_clf.cluster_centers_[0,1])
plt.scatter(km_clf.cluster_centers_[1,0],km_clf.cluster_centers_[1,1])
plt.scatter(km_clf.cluster_centers_[2,0],km_clf.cluster_centers_[2,1])
km_clf.predict(X)
lable_hash = {
1:1,
2:2,
0:3
}
lable_map = np.array([3,1,2])
predict = km_clf.predict(X)
predict
_y = lable_map[predict]
_y
np.sum(y == _y)/y.size
|
22,756 | 9a93271d1f9ac0f62fc02b58fa96a0a66d14f7c0 | import os
import stat
import time
import errno
class Worker(object):
def __init__(self, args, callback, extensions = None):
self.files_map = {}
self.callback = callback
self.extensions = extensions
self.config = args
if self.config.path is not None:
self.folder = os.path.realpath(args.path)
assert os.path.isdir(self.folder), "%s does not exists" % self.folder
assert callable(callback)
self.update_files()
for id, file in self.files_map.iteritems():
file.seek(os.path.getsize(file.name))
def __del__(self):
self.close()
def loop(self, interval=0.1, async=False):
while 1:
self.update_files()
for fid, file in list(self.files_map.iteritems()):
self.readfile(file)
if async:
return
time.sleep(interval)
def listdir(self):
ls = os.listdir(self.folder)
if self.extensions:
return [ff for ff in ls if any(x in ff for x in self.extensions) == False]
else:
return ls
@staticmethod
def tail(fname, window):
try:
f = open(fname, 'r')
except IOError, err:
if err.errno == errno.ENOENT:
return []
else:
raise
else:
BUFSIZ = 1024
f.seek(0, os.SEEK_END)
fsize = f.tell()
block = -1
data = ""
exit = False
while not exit:
step = (block * BUFSIZ)
if abs(step) >= fsize:
f.seek(0)
exit = True
else:
f.seek(step, os.SEEK_END)
data = f.read().strip()
if data.count('\n') >= window:
break
else:
block -= 1
return data.splitlines()[-window:]
def update_files(self):
ls = []
files = []
if self.config.files is not None:
for name in self.config.files:
files.append(os.path.realpath(name))
else:
for name in self.listdir():
files.append(os.path.realpath(os.path.join(self.folder, name)))
for absname in files:
try:
st = os.stat(absname)
except EnvironmentError, err:
if err.errno != errno.ENOENT:
raise
else:
if not stat.S_ISREG(st.st_mode):
continue
fid = self.get_file_id(st)
ls.append((fid, absname))
for fid, file in list(self.files_map.iteritems()):
try:
st = os.stat(file.name)
except EnvironmentError, err:
if err.errno == errno.ENOENT:
self.unwatch(file, fid)
else:
raise
else:
if fid != self.get_file_id(st):
self.unwatch(file, fid)
self.watch(file.name)
for fid, fname in ls:
if fid not in self.files_map:
self.watch(fname)
def readfile(self, file):
lines = file.readlines()
if lines:
self.callback(file.name, lines)
def watch(self, fname):
try:
file = open(fname, "r")
fid = self.get_file_id(os.stat(fname))
except EnvironmentError, err:
if err.errno != errno.ENOENT:
raise
else:
print "[{0}] - watching logfile {1}".format(fid, fname)
self.files_map[fid] = file
def unwatch(self, file, fid):
lines = self.readfile(file)
print "[{0}] - un-watching logfile {1}".format(fid, file.name)
del self.files_map[fid]
if lines:
self.callback(file.name, lines)
@staticmethod
def get_file_id(st):
return "%xg%x" % (st.st_dev, st.st_ino)
def close(self):
for id, file in self.files_map.iteritems():
file.close()
self.files_map.clear()
|
22,757 | 3ce7619fdd57918e24ff21961a0ae5d10764fa48 | from django import forms
from django.contrib.auth import authenticate
from accounts.models import User as CustomUser
class LoginForm(forms.Form):
username = forms.EmailField(max_length=255, help_text="Must be your epita-issued email address", required=True,
widget=forms.TextInput(attrs={
'placeholder': "yourname@epita.fr",
}))
password = forms.CharField(max_length=255, required=True,
widget=forms.PasswordInput(attrs={
'placeholder': 'examplepass',
}))
def clean(self):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
self.user_cache = authenticate(username=username, password=password)
if not self.user_cache:
raise forms.ValidationError("Login was invalid, either wrong password or not such email")
elif not self.user_cache.is_active:
raise forms.ValidationError("User is no longer active")
elif not self.user_cache.is_registered:
raise forms.ValidationError("User is not yet registered")
return self.cleaned_data
def get_user(self):
return self.user_cache
class CustomUserCreationForm(forms.ModelForm):
password1 = forms.CharField(label='Password', widget=forms.PasswordInput)
password2 = forms.CharField(label='Password confirmation', widget=forms.PasswordInput)
class Meta:
model = CustomUser
fields = ('email', 'first_name', 'last_name', 'external_email', 'is_active', 'is_staff', 'is_superuser')
def clean_password2(self):
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and (password1 != password2):
raise forms.ValidationError("Passwords do not match")
return password2
def save(self, commit=True):
user = super().save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class CustomUserChangeForm(forms.ModelForm):
class Meta:
model = CustomUser
fields = ['email', 'password', 'first_name', 'last_name', 'external_email', 'is_active', 'is_staff',
'is_superuser']
class ResetPasswordForm(forms.Form):
password1 = forms.CharField(label='Password', widget=forms.PasswordInput(attrs={
'class': 'form-control',
}))
password2 = forms.CharField(label='Password confirmation', widget=forms.PasswordInput(attrs={
'class': 'form-control',
}))
def clean(self):
password1 = self.cleaned_data.get('password1')
password2 = self.cleaned_data.get('password2')
if not password1 or not password2:
raise forms.ValidationError("Must fill both passwords")
if password1 and password2 and (password1 != password2):
raise forms.ValidationError("Passwords do not match")
def save(self, **kwargs):
user = kwargs['user']
if not user:
raise forms.ValidationError("user does not exist")
try:
self.full_clean()
except forms.ValidationError:
raise forms.ValidationError("could not clean the data in form")
new_password = self.cleaned_data['password1']
user.set_password(new_password)
user.set_registered()
user.save()
|
22,758 | 80fc5909161a99e5998d99e58e3833b46cf4e362 | from brian import *
tau = 1*ms
tr = 3*mV
v0 = 0*mV
eqs = '''
dv/dt = (tr-v)/(.5*k)/tau : volt
dk/dt = -4*k/(1*second) : volt/volt
'''
reset = '''
k+=1
v=v0
'''
IF = NeuronGroup(2, model=eqs, reset=reset, threshold=tr-.1*mV)
IF.v=[v0,2*mV]
IF.k=[1,1]
C = Connection(IF, IF, 'v')
C[0,1]=C[1,0]=-10*mV
##C[0,0]=C[1,1]=0
Mv = StateMonitor(IF, 'v', record=True)
Mk = StateMonitor(IF, 'k', record=True)
run(300 * ms)
subplot(211)
plot(Mv.times / ms, Mv[0] / mV)
plot(Mv.times / ms, Mk[0])
subplot(212)
plot(Mv.times / ms, Mv[1] / mV)
plot(Mv.times / ms, Mk[1])
show() |
22,759 | bccc38f374ab8759675749844d1b5326fbd59dc1 | N = int(input())
for j in range(N):
S = input()
even = ""
odd = ""
for i in range(len(S)):
if (i%2 == 0):
even += S[i]
else:
odd += S[i]
print(even + " " + odd) |
22,760 | 5846d6501c1734570560a2d6979fd0f7a2e2434d | name = input("enter file name: ")
file = open(name)
count = 0
total = 0
final = 0
for line in file:
if not line.startswith("X-DSPAM-Confidence:"):
continue
count = count + 1
i=line.find(":")
number=float(line[i+1:])
total=number+total
final=total/count
print("average spam confidence:",final) |
22,761 | 7fd70a9b83f180a1c31ec48dae0e33a3ce313f73 | import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
from object_tracking.ahc_ete._ahc import HeapCluster
from object_tracking.ahc_ete import AhcMetric
from object_tracking.deep_sort.kalman_filter import KalmanFilter
from object_tracking.data_hub import Query
import smrc.utils
class TemporalDist(Query):
@staticmethod
def is_unique_list(my_list):
return len(my_list) == len(set(my_list))
@staticmethod
def temporal_distance(heap_cluster1, heap_cluster2, allow_negative=False):
# key, image_id; value, global_bbox_id for a heap_cluster
image_ids_1 = [x[0] for x in heap_cluster1]
image_ids_2 = [x[0] for x in heap_cluster2]
num_overlap = len(image_ids_1) + len(image_ids_2) - len(set(image_ids_1 + image_ids_2))
if num_overlap > 0:
if allow_negative:
return -num_overlap
else:
return float('inf')
else:
if max(image_ids_1) < image_ids_2[0]:
return image_ids_2[0] - max(image_ids_1)
elif max(image_ids_2) < image_ids_1[0]:
return image_ids_1[0] - max(image_ids_2)
else:
return 0
class AppearanceDist(Query):
def appearance_dist_for_two_heap_clusters(self, heap_cluster1, heap_cluster2, linkage):
return self.appearance_dist_for_two_clusters(
cluster1=HeapCluster.heap_cluster_global_bbox_id_list(heap_cluster1),
cluster2=HeapCluster.heap_cluster_global_bbox_id_list(heap_cluster2),
linkage=linkage
)
def appearance_dist_for_two_clusters(self, cluster1, cluster2, linkage):
dist_matrix = self.appearance_dist_matrix_for_two_clusters(
cluster1, cluster2
)
return AhcMetric.cluster_distance_by_linkage(
dist_matrix_or_list=dist_matrix,
linkage=linkage
)
def appearance_dist_matrix_for_two_clusters(self, cluster1, cluster2):
features_cluster1 = np.array([self.get_feature(x) for x in cluster1])
features_cluster2 = np.array([self.get_feature(x) for x in cluster2])
dist_matrix = 1 - cosine_similarity(features_cluster1, features_cluster2)
return dist_matrix
# modified from ..deep_sort.track.Track
class KFTrack:
"""A single target track with state space `(x, y, a, h)` and associated
velocities, where `(x, y)` is the center of the bounding box, `a` is the
aspect ratio and `h` is the height.
Parameters
----------
mean : ndarray
Mean vector of the initial state distribution.
covariance : ndarray
Covariance matrix of the initial state distribution.
Attributes
----------
mean : ndarray
Mean vector of the initial state distribution.
covariance : ndarray
Covariance matrix of the initial state distribution.
"""
def __init__(self, mean, covariance, init_global_bbox_id=None):
self.mean = mean
self.covariance = covariance
self.global_bbox_id_list = []
if init_global_bbox_id is not None:
self.global_bbox_id_list = [init_global_bbox_id]
def predict(self, kf):
"""Propagate the state distribution to the current time step using a
Kalman filter prediction step.
Parameters
----------
kf : kalman_filter.KalmanFilter
The Kalman filter.
"""
self.mean, self.covariance = kf.predict(self.mean, self.covariance)
def update(self, kf, detection_bbox_xyah):
"""Perform Kalman filter measurement update step and update the feature
cache.
Parameters
----------
kf : kalman_filter.KalmanFilter
The Kalman filter.
detection_bbox_xyah : the new observed detection
"""
self.mean, self.covariance = kf.update(
self.mean, self.covariance, detection_bbox_xyah)
def gating_distance_between_kf_and_detections(self, kf, detections_bbox_xyah, only_position=False):
"""
measurements : ndarray
An Nx4 dimensional matrix of N measurements, each in
format (x, y, a, h) where (x, y) is the bounding box center
position, a the aspect ratio, and h the height.
:param kf:
:param detections_bbox_xyah:
:param only_position:
:return:
"""
measurements = np.asarray(detections_bbox_xyah).reshape(-1, 4)
distance = kf.gating_distance(
self.mean, self.covariance,
measurements=measurements,
only_position=only_position
)
return distance
def mahalanobis_dist(self, kf, detection_bbox_xyah, only_position=False):
"""Return the gating distance between kf and a single detection.
:param kf:
:param detection_bbox_xyah:
:param only_position:
:return:
"""
distance = self.gating_distance_between_kf_and_detections(
kf, detection_bbox_xyah, only_position=only_position
).item()
return distance
class KFDist(Query):
def kf_dist(
self, heap_cluster, skip_empty_detection=True, linkage='complete'
):
cluster = HeapCluster.heap_cluster_global_bbox_id_list(heap_cluster)
return self._kf_distance(
cluster, skip_empty_detection=skip_empty_detection, linkage=linkage
)
# def kf_dist_left_right(
# self, heap_cluster1, heap_cluster2, skip_empty_detection=True, linkage='single'
# ):
# min_image_id1, min_image_id2 = heap_cluster1[0][0], heap_cluster2[0][0]
# cluster1, cluster2 = [x[1] for x in heap_cluster1], [x[1] for x in heap_cluster2]
# # image ids, left [29, 30, 31], right [29], then [29] should not be the right cluster as the gating_dist_list
# # return [].
# if min_image_id1 < min_image_id2 or \
# (min_image_id1 == min_image_id2 and len(cluster1) < len(cluster2)):
# return self.kalman_filter_tracker.kf_distance_left_right(
# cluster_left=cluster1, cluster_right=cluster2,
# skip_empty_detection=skip_empty_detection, linkage=linkage
# )
# else: # min_image_id1 > min_image_id2:
# return self.kalman_filter_tracker.kf_distance_left_right(
# cluster_left=cluster2, cluster_right=cluster1,
# skip_empty_detection=skip_empty_detection, linkage=linkage
# )
def _kf_distance(self, cluster, skip_empty_detection=True, linkage='complete'):
"""
:param cluster:
:param skip_empty_detection: if True, we do not update kf state once no observation comes, then
the distance of the state of kf with future new detection is very likely to be large, as no state
update is conducted. So 'True' is a strong condition for object_tracking, i.e.,
no allowed sharp change in location.
In general, we should not skip empty detection for kf in the general sense.
:param linkage:
:return:
"""
cluster = self.sort_cluster_based_on_image_id(cluster)
image_id_list = self.get_image_id_list_for_cluster(cluster)
kf, kf_track = self.init_kf_track_with_one_bbox(global_bbox_id=cluster[0])
gating_distance_list = []
# for image_id in range(min(image_id_list)+1, max(image_id_list)+1):
for i in range(1, len(cluster)):
times_update = image_id_list[i] - image_id_list[i-1]
kf_track.predict(kf)
if times_update > 1 and not skip_empty_detection:
for j in range(1, times_update):
kf_track.predict(kf)
bbox = self.get_single_bbox(cluster[i])
detection_bbox_xyah = smrc.utils.bbox_to_xyah(bbox, with_class_index=False)
distance = kf_track.mahalanobis_dist(
kf=kf, detection_bbox_xyah=detection_bbox_xyah
)
# print(f'kf fitting the {ind}th bbox on the cluster, distance = {distance} ...')
gating_distance_list.append(distance)
# update the kalman filter only if new observations arrive
kf_track.update(kf, detection_bbox_xyah)
return AhcMetric.cluster_distance_by_linkage(
dist_matrix_or_list=gating_distance_list,
linkage=linkage
)
def init_kf_track_with_one_bbox(self, global_bbox_id):
kf = KalmanFilter()
mean, covariance = kf.initiate(
smrc.utils.bbox_to_xyah(self.get_single_bbox(global_bbox_id), with_class_index=False)
)
# initialize kf with the first bbox in cluster_left
kf_track = KFTrack(
mean=mean, covariance=covariance, init_global_bbox_id=global_bbox_id
)
return kf, kf_track
class PairwiseClusterMetric(KFDist, TemporalDist, AppearanceDist):
def __init__(self):
super().__init__()
|
22,762 | fd5e4a4b966e784d300b69b94555cdc59deb123d | #-- powertools.test
"""
fixtures and test utilities
"""
#-------------------------------------------------------------------------------------------------#
#-------------------------------------------------------------------------------------------------#
|
22,763 | c3eeeeac2d9996beb7d6cb4562191875f365e848 | # -*- coding: utf-8 -*-
from nose.tools import assert_true
from common.logging import loggers
class TestUnstructuredDataLogger:
def test_can_identify_extra(self):
log_record = TestUnstructuredDataLogger.get_log_record()
assert_true(hasattr(log_record, '_extra'))
@staticmethod
def get_log_record(extra=None):
logger = loggers.UnstructuredDataLogger(name='')
log_record = logger.makeRecord(name='',
level='',
fn='',
lno='',
msg='',
args='',
exc_info='',
extra=extra)
return log_record
|
22,764 | e5a5d78d50bf20fa29708a05820b9eeccd7c44ad | from PyQt5.QtCore import QObject, pyqtSignal, QTimer
from time import sleep
import time
import random
class GameController(QObject):
signal_round_ended = pyqtSignal()
signal_anyone_gone_out = pyqtSignal()
def __init__(self, model, widget):
super().__init__()
self._model = model
self._widget = widget
if not self._model.get_data('dealt'):
self.deal()
# check if current turn is computer
self.computer_speed = 1000
random.seed(time.clock())
self.which_pile_selected = 0
self.computer_action()
self._widget.load_from_model()
self._widget.signal_discard_btn_clicked.connect(self.discard_selected)
self._widget._ui.btn_drop.clicked.connect(self.drop_selected)
self.connect_signals()
# connect signals for pile widgets
def connect_signals(self):
try:
self._widget.draw_pile_wgt.signal_pile_clicked.connect(self.pickup_from_draw)
except:
pass
try:
self._widget.discard_pile_wgt.signal_pile_clicked.connect(self.pickup_from_discard)
except:
pass
for drop_hand_btn in self._widget.drop_hand_btns:
drop_hand_btn.signal_clicked.connect(self.cancel_drop)
# deal cards to the player
def deal(self):
self._model.deal()
# put down the selected cards of the player hand
def drop_selected(self):
self._model.drop_selected()
self._widget.load_from_model()
self.connect_signals()
# re-hold the put-down cards
def cancel_drop(self, drop_hand):
self._model.cancel_drop(drop_hand)
self._widget.load_from_model()
self.connect_signals()
# pick a card from the draw pile
def pickup_from_draw(self):
self._model.pickup_from_draw_pile()
self._model.update_data('cur_state', 'picked')
self._widget.load_from_model()
self.connect_signals()
# pick a card from the discard pile
def pickup_from_discard(self):
self._model.pickup_from_discard_pile()
self._model.update_data('cur_state', 'picked')
self._widget.load_from_model()
self.connect_signals()
# slot for what to do when the "Discard" button is clicked
def discard_selected(self, sel_card_index):
self._model.discard_from_cur_hand(sel_card_index)
self._model.update_data('cur_state', 'discarded')
if self.run_block_match() == True:
if not self._model.get_data('anyone_gone_out'):
self._model.set_gone_out_player()
self.signal_anyone_gone_out.emit()
# show dialog (xxx matches all runs and blocks, he goes out. others have only 1 chance)
if self._model.is_cur_round_ended() == False:
self.next_player()
else:
self.onRoundEnded()
return
self.computer_action()
self._widget.load_from_model()
self.connect_signals()
# goto next player
def next_player(self):
self._model.next_player()
# check if all cards match runs and blocks
def run_block_match(self):
return self._model.run_block_match()
# round ended
def onRoundEnded(self):
self._model.calc_round_score()
# self._model.init_new_round(False)
self.signal_round_ended.emit()
# computer actions
def computer_action(self):
# check if current turn is computer
cur_player = self._model.get_data('cur_turn')
if cur_player.get_player_type() == "computer":
self.disable_all_game_btns()
self.undo_cur_run_blocks()
self._widget.set_computer_turn(True)
QTimer.singleShot(self.computer_speed*2, self.cause_pickup_from_pile1)
else:
self.enable_game_btns()
self._widget.set_computer_turn(False)
def cause_pickup_from_pile1(self):
# pick up from draw pile or discard pile
self.which_pile_selected = random.randint(0,10)
if self.which_pile_selected in range(4,11):
# draw pile
self._widget.draw_pile_wgt.causeEnterEvent()
else:
# discard pile
self._widget.discard_pile_wgt.causeEnterEvent()
QTimer.singleShot(self.computer_speed, self.cause_pickup_from_pile2)
def cause_pickup_from_pile2(self):
if self.which_pile_selected in range(4,11):
self._widget.draw_pile_wgt.causeMousePressEvent()
self._widget.draw_pile_wgt.causeLeaveEvent()
else:
self._widget.discard_pile_wgt.causeMousePressEvent()
self._widget.discard_pile_wgt.causeLeaveEvent()
self.run_books = []
self.run_books = self._model.get_cur_runs_books()
QTimer.singleShot(self.computer_speed, self.cause_pickup_run_book)
def cause_pickup_run_book(self):
card_wgts = self._widget.hand_wgt._card_wgts
# books = self._model.get_cur_books()
# for book_rank in books:
# for book_card in books[book_rank]:
# for card_wgt in card_wgts:
# if book_card==card_wgt.get_card():
# card_wgt.causeCardPressEvent()
# QTimer.singleShot(self.computer_speed, self.cause_drop)
# return
# runs = self._model.get_cur_runs()
# for run in runs:
# for run_card in run:
# for card_wgt in card_wgts:
# if run_card==card_wgt.get_card():
# card_wgt.causeCardPressEvent()
# QTimer.singleShot(self.computer_speed, self.cause_drop)
# return
if len(self.run_books)>0:
run_book = self.run_books.pop()
for run_book_card in run_book:
for card_wgt in card_wgts:
if run_book_card==card_wgt.get_card():
card_wgt.causeCardPressEvent()
QTimer.singleShot(self.computer_speed, self.cause_drop)
else:
# if there is no more books or runs, now discard
cur_player = self._model.get_data('cur_turn')
cur_cards = cur_player.get_hand().get_cards()
anyone_gone_out = self._model.get_data('anyone_gone_out')
max_card_id = 0
max_card_value = 0
for cur_card_id in range(len(cur_cards)):
if not anyone_gone_out:
if cur_cards[cur_card_id].is_joker() or cur_cards[cur_card_id].is_wild():
continue
if max_card_value < cur_cards[cur_card_id].get_value():
max_card_value = cur_cards[cur_card_id].get_value()
max_card_id = cur_card_id
self.cause_discard_id = max_card_id
card_wgts[max_card_id].causeCardPressEvent()
QTimer.singleShot(self.computer_speed, self.cause_discard)
def cause_drop(self):
self.drop_selected()
QTimer.singleShot(self.computer_speed, self.cause_pickup_run_book)
def cause_discard(self):
self.discard_selected(self.cause_discard_id)
def disable_all_game_btns(self):
self._widget._ui.btn_discard.setEnabled(False)
self._widget._ui.btn_drop.setEnabled(False)
self._widget._ui.btn_quit.setEnabled(False)
self._widget._ui.btn_save.setEnabled(False)
self._widget._ui.btn_view_scores.setEnabled(False)
def enable_game_btns(self):
self._widget._ui.btn_quit.setEnabled(True)
self._widget._ui.btn_save.setEnabled(True)
self._widget._ui.btn_view_scores.setEnabled(True)
def undo_cur_run_blocks(self):
cur_player = self._model.get_data('cur_turn')
cur_player.cancel_all_drops()
# cur_drop_hands = cur_player.get_drop_hands()
# for cur_drop_id in range(len(cur_drop_hands)):
# cur_player.cancel_drop(cur_drop_hands[cur_drop_id])
|
22,765 | 41c21fcc5abec5b28ec8207cd411cdb88aad870f | import graphene
from flask_graphql_auth import (AuthInfoField, query_jwt_required, mutation_jwt_refresh_token_required, mutation_jwt_required)
from app.utilities import (create_tokens)
from .models import Person
from .service import PeopleService
import app.Building.graphql_types
import app.Product.graphql_types
import app.People.graphql_types
from app.Search.service import SearchService
service = PeopleService()
class Character(graphene.Interface):
id = graphene.ID(required=True)
title = graphene.String(required=True)
firstname = graphene.String(required=True)
lastname = graphene.String(required=True)
bio = graphene.String()
knownAs = graphene.String()
avatar = graphene.String()
mobile = graphene.String()
email = graphene.String(required=True)
line = graphene.List(lambda: Character)
team = graphene.List(lambda: Character)
manager = graphene.Field(lambda: Character)
deactivated = graphene.String()
class PersonType(graphene.ObjectType):
'''Person Type, represents a GraphQL version of a person entity'''
class Meta:
interfaces = (Character,)
products = graphene.List(lambda: app.Product.graphql_types.ProductType)
location = graphene.List(lambda: app.Building.graphql_types.BuildingType)
def resolve_team(self, info, **args):
return [PersonType(**Person.wrap(member).as_dict()) for member in service.fetch_team(person=self)]
def resolve_line(self, info, **args):
return [PersonType(**Person.wrap(member).as_dict()) for member in service.fetch_line(person=self)]
def resolve_manager(self, info, **args):
manager = service.fetch_manager(person=self)
if manager is not None:
return PersonType(**Person.wrap(manager).as_dict())
return None
def resolve_products(self, info, **args):
pass
def resolve_location(self, info, **args):
pass
class ProtectedPersonType(graphene.ObjectType):
person = graphene.Field(lambda: PersonType)
leave_items = graphene.List(lambda: graphene.String)
salary_level = graphene.Int()
next_of_keen = graphene.String()
birth_date = graphene.Date()
employment_anniversary = graphene.Date()
authorization_key = graphene.String()
def resolve_person(self, info, **args):
return PersonType(**Person.wrap(self.person).as_dict())
class AuthorizationType(graphene.ObjectType):
id_token = graphene.String()
access_token = graphene.String()
refresh_token = graphene.String()
class CreatePerson(graphene.Mutation):
class Arguments:
title = graphene.String(required=True)
firstname = graphene.String(required=True)
lastname = graphene.String(required=True)
bio = graphene.String()
knownAs = graphene.String()
avatar = graphene.String()
mobile = graphene.String()
email = graphene.String(required=True)
success = graphene.Boolean()
person = graphene.Field(lambda: PersonType)
# @mutation_jwt_required
def mutate(self, info, **kwargs):
person = Person(**kwargs)
person.save()
return CreatePerson(person=person, success=True)
class UpdatePerson(graphene.Mutation):
class Arguments:
id = graphene.ID(required=True)
title = graphene.String(required=True)
firstname = graphene.String(required=True)
lastname = graphene.String(required=True)
bio = graphene.String()
knownAs = graphene.String()
avatar = graphene.String()
mobile = graphene.String()
email = graphene.String(required=True)
success = graphene.Boolean()
person = graphene.Field(lambda: PersonType)
# @mutation_jwt_required
def mutate(self, info, **kwargs):
person = Person(**kwargs)
person.save()
return UpdatePerson(person=person, success=True)
class Authenticate(graphene.Mutation):
class Arguments:
email = graphene.String(required=True)
success = graphene.Boolean()
authorization = graphene.Field(lambda: AuthorizationType)
def mutate(self, info, **kwargs):
criteria = kwargs.get('email')
search_svc = SearchService()
matched = [PersonType(**Person.wrap(m).as_dict()) for m in search_svc.filter(query=criteria,limit=1,skip=0)]
if matched is not None and len(matched) > 0:
payload = AuthorizationType(**create_tokens(identity=matched[0].id))
return Authenticate(authorization=payload, success=True)
return Authenticate(authorization=None, success=False)
|
22,766 | 974bb1865142aaf3e8e407aacbe428a59ea9bab3 | import pandas as pd
import scipy
import numpy as np
# Topology-based Mass Functions (TopMF)
class TopMF:
def __init__(self, mass_type, features, labels):
self.mass_type = mass_type
self.features = features
self.labels = labels
self.mass = []
def calculate_mass(self):
for i in range(len(self.features)):
if(self.mass_type == 'SEP'):
self.mass.append(self.separation(self.features[i],self.labels[i]))
elif(self.mass_type == 'COH'):
self.mass.append(self.cohesion(self.features[i],self.labels[i]))
elif(self.mass_type == 'WPC'):
self.mass.append(self.weighted_per_class(self.features[i],self.labels[i]))
elif(self.mass_type == 'CC'):
self.mass.append(self.circled_by_its_own_class(self.features[i],self.labels[i]))
else:
raise KeyError('Mass function not implemented')
return self.mass
def separation(self, xq, class_q):
sumOfDistances = 0
distances = scipy.spatial.distance.cdist(xq.reshape(1,-1), self.features, 'euclidean')
sumOfDistances = sum([distance if class_ != class_q else 0 for class_,distance in zip(self.labels, distances[0])])
mass = 1/(np.log2(sumOfDistances))
return mass
def cohesion(self, xq, class_q):
sumOfDistances = 0
distances = scipy.spatial.distance.cdist(xq.reshape(1,-1), self.features, 'euclidean')
sumOfDistances = sum([distance if class_ == class_q else 0 for class_,distance in zip(self.labels, distances[0])])
mass = 1/(np.log2(sumOfDistances))
return mass
def weighted_per_class(self, xq, class_q):
n_q = sum([1 if label == class_q else 0 for label in self.labels])
unique_elements, counts_elements = np.unique(self.labels, return_counts=True)
class_and_freqs = sorted([(class_, freq) for class_,freq in zip(unique_elements, counts_elements)], key=lambda x: x[1])
class_maj = class_and_freqs[-1][0]
M = class_and_freqs[-1][1]
mass = np.log2((M / n_q) + 1)
return mass
def circled_by_its_own_class(self, xq, class_q):
distances = scipy.spatial.distance.cdist(xq.reshape(1,-1), self.features, 'euclidean')
class_and_distances = sorted([(class_, distance) for class_,distance in zip(self.labels, distances[0])], key=lambda x: x[1])
SNk = sum([1 if elem[0] == class_q else 0 for elem in class_and_distances[:7]])
mass = np.log2(SNk + 2)
return mass
if __name__ == "__main__":
df = pd.read_csv("/datasets/iris/iris.csv",header=None)
features, labels = df.iloc[1:, 0:-1].to_numpy(), df.iloc[1:,-1].to_numpy()
mass = Mass("SEP", features, labels)
print(mass.calculate_mass()) |
22,767 | a4d8ed94efcb546c636ef654522f36ccaf43b60b | import pyverdict
verdict = pyverdict.presto('localhost', 'hive', 'jiangchen', port=9080)
# verdict.sql('use tpch10g')
query = """bypass select
n_name,
sum(l_extendedprice * (1 - l_discount)) as revenue
from
tpch10g.customer,
tpch10g.orders,
tpch10g.lineitem,
tpch10g.supplier,
tpch10g.nation,
tpch10g.region
where
c_custkey = o_custkey
and l_orderkey = o_orderkey
and l_suppkey = s_suppkey
and c_nationkey = s_nationkey
and s_nationkey = n_nationkey
and n_regionkey = r_regionkey
and r_name = 'ASIA'
and o_orderdate >= date '1994-12-01'
and o_orderdate < date '1995-12-01'
group by
n_name
order by
revenue desc;"""
df = verdict.sql(query)
print(df)
|
22,768 | e6a469faefb873ba1b23865983536b458e1f1f00 | import get_datasets as gd
import os
import time
from glob import glob
import tensorflow as tf
import numpy as np
from collections import namedtuple
import re
#PANDAS
from pandas.core.frame import DataFrame
import pandas as pd
import igraph as ig
from enum import Enum
#SKLEARN
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
#PLOTLY
import plotly.tools
import plotly.plotly as py
import plotly.io as pio
import plotly.graph_objs as go
from asn1crypto.core import InstanceOf
import sklearn
import os
plotly.tools.set_credentials_file(username='isonettv', api_key='2Lg1USMkZAHONqo82eMG')
'''
The mode used in a cycle. Can be one of:
TRAIN: to use when training the model
TEST: to use when testing the model
'''
Mode = Enum("Mode","TRAIN TEST PRED")
'''
Neighborhood function h_ij to be used. Can be one of:
GAUSSIAN: Uses a gaussian with decay equal to self.sigma on the neighborhood
CONSTANT: 1 if in neighborhood, 0 otherwise
'''
NFunc = Enum("NFunc","GAUSSIAN CONSTANT")
'''
Initialization of the initial points.Can be one of:
RANDOM: Initializes each points randomly inside (-self.init_maxval,+self.init_maxval)
PCAINIT: Uses the 2 principal components to unfold a grid
'''
InitMode = Enum("InitMode","RANDOM PCAINIT")
'''
Configuration for the plot operation.Can be one of:
CLASS_COLOR: Shows the class attr. of unlabeled points through color
CLASS_NOCOLOR: All unlabeled points have the same color
'''
PlotMode = Enum("PlotMode","CLASS_COLOR CLASS_NOCOLOR")
'''
Configuration for grid type. Can be one of:
RECTANGLE: Use rectangular grid
HEXAGON: Use hexagonal grid
'''
GridMode = Enum("GridMode","RECTANGLE HEXAGON")
class som(object):
'''
Creates a Self-Organizing Map (SOM).
It has a couple of parameters to be selected by using the "args" object. These include:
self.N1: Number of nodes in each row of the grid
self.N2: Number of nodes in each column of the grid
self.eps_i, self.eps_f: initial and final values of epsilon. The current value of
epsilon is given by self.eps_i * (self.eps_f/self.eps_i)^p, where p is percent
of completed iterations.
self.sigma_i, self.sigma_f: initial and final values of sigma. The current value of
epsilon is given by self.eps_i * (self.eps_f/self.eps_i)^p, where p is percent
of completed iterations.
self.ntype: Neighborhood type
self.plotmode: which plot to make
self.initmode: how to initialize grid
self.gridmode: which type of grid
self.ds_name: Name of dataset (iris,isolet,wine,grid)
'''
''' Initializes the Growing Neural Gas Network
@param sess: the current session
@param args: object containing arguments (see main.py)
'''
def __init__(self, sess, args): # Parameters
self.sess = sess
self.run_id = args.run_id
#Number of nodes in each row of the grid
self.N1 = args.n1
#Number of nodes in each column of the grid
self.N2 = args.n2
#Initial and final values of epsilon
self.eps_i = args.eps_i
self.eps_f = args.eps_f
#Initial and final values of sigma
self.sigma_i = args.sigma_i
self.sigma_f = args.sigma_f
#Neighborhood type
self.ntype = NFunc[args.ntype]
#Which plot to make
self.plotmode = PlotMode[args.plotmode]
#Grid Mode
self.gridmode = GridMode[args.gridmode]
self.nsize = 1
#Which way to initialize points
self.initmode = InitMode[args.initmode]
#dataset chosen
self.ds_name = args.dataset
#Total number of iterations
self.n_iter = args.n_iter
#Number of iterations between plot op
self.plot_iter = args.plot_iter
self.characteristics_dict = \
{"dataset":str(self.ds_name),
"num_iter":self.n_iter,
"n1":self.N1,
"n2":self.N2,
"eps_i":self.eps_i,
"eps_f":self.eps_f,
"sigma_i":self.sigma_i,
"ntype":self.ntype.name,
"initmode":self.initmode.name,
"gridmode":self.gridmode.name,
"run_id":self.run_id
}
#Get datasets
if args.dataset == "isolet":
temp = gd.get_isolet(test_subset= args.cv)
elif args.dataset == "iris":
temp = gd.get_iris(args.cv)
elif args.dataset == "wine":
temp = gd.get_wine(args.cv)
elif args.dataset == "grid" or args.dataset == "box":
temp = gd.get_grid()
else:
raise ValueError("Bad dataset name")
#Create Dataset
if isinstance(temp,dict) and 'train' in temp.keys():
self.ds = temp["train"].concatenate(temp["test"])
else:
self.ds = temp
#Store number of dataset elements and input dimension
self.ds_size = self.getNumElementsOfDataset(self.ds)
self.ds_inputdim = self.getInputShapeOfDataset(self.ds)
#Normalize dataset
temp = self.normalizedDataset(self.ds)
self.ds = temp["dataset"]
df_x_normalized = temp["df_x"]
self.Y = temp["df_y"]
#Get PCA for dataset
print("Generating PCA for further plotting...")
self.pca = PCA(n_components=3)
self.input_pca = self.pca.fit_transform(df_x_normalized)
self.input_pca_scatter = self.inputScatter3D()
self.input_pca_maxval = -np.sort(-np.abs(np.reshape(self.input_pca,[-1])),axis=0)[5]
print("Done!")
print("Dimensionality of Y:{}",self.ds_inputdim)
self.ds = self.ds.shuffle(buffer_size=10000).repeat()
if args.dataset == "box":
self.ds = gd.get_box()
#Now generate iterators for dataset
self.iterator_ds = self.ds.make_initializable_iterator()
self.iter_next = self.iterator_ds.get_next()
# tf Graph input
self.X_placeholder = tf.placeholder("float", [self.ds_inputdim])
self.W_placeholder = tf.placeholder("float", [self.N1*self.N2,self.ds_inputdim])
self.nborhood_size_placeholder = tf.placeholder("int32", [])
self.sigma_placeholder = tf.placeholder("float", [])
self.eps_placeholder = tf.placeholder("float", [])
print("Initializing graph and global vars...")
self.init_graph()
# Initializing the variables
self.init = tf.global_variables_initializer()
print("Done!")
''' Transforms dataset back to dataframe'''
def getDatasetAsDF(self,dataset):
iterator = dataset.make_one_shot_iterator()
next_element = iterator.get_next()
num_elems = 0
while True:
try:
x, y = self.sess.run([next_element["X"], next_element["Y"]])
if num_elems == 0:
df_x = pd.DataFrame(0,index = np.arange(self.getNumElementsOfDataset(dataset)),\
columns = np.arange(x.shape[0])
)
print(y)
df_y = pd.DataFrame(0,index = np.arange(self.getNumElementsOfDataset(dataset)),\
columns = np.arange(y.shape[0])
)
df_x.iloc[num_elems,:] = x
df_y.iloc[num_elems,:] = y
num_elems += 1
except tf.errors.OutOfRangeError:
break
return({"df_x":df_x,"df_y":df_y})
''' Returns the total number of elements of a dataset
@param dataset: the given dataset
@return: total number of elements
'''
def getNumElementsOfDataset(self,dataset):
iterator = dataset.make_one_shot_iterator()
next_element = iterator.get_next()
num_elems = 0
while True:
try:
self.sess.run(next_element)
num_elems += 1
except tf.errors.OutOfRangeError:
break
return num_elems
''' Returns the dimensionality of the first element of dataset
@param dataset: the given dataset
@return: total number of elements
'''
def getInputShapeOfDataset(self,dataset):
iterator = dataset.make_one_shot_iterator()
next_element = iterator.get_next()
d = None
try:
self.sess.run(next_element)
d = next_element["X"].shape[0]
except tf.errors.OutOfRangeError:
return d
return int(d)
''' Returns the normalized version of a given dataset
@param dataset: the given dataset, such that each element returns an "X" and "Y"
@return: dict, with keys
"df_x": normalized elements,
"df_y": corresponding class attr.,
"dataset": normalized dataset ("X" and "Y")
'''
def normalizedDataset(self,dataset):
iterator = dataset.make_one_shot_iterator()
next_element = iterator.get_next()
num_elems = 0
while True:
try:
x, y = self.sess.run([next_element["X"], next_element["Y"]])
if num_elems == 0:
df_x = pd.DataFrame(0,index = np.arange(self.getNumElementsOfDataset(dataset)),\
columns = np.arange(x.shape[0])
)
print(y)
df_y = pd.DataFrame(0,index = np.arange(self.getNumElementsOfDataset(dataset)),\
columns = np.arange(y.shape[0])
)
df_x.iloc[num_elems,:] = x
df_y.iloc[num_elems,:] = y
num_elems += 1
except tf.errors.OutOfRangeError:
break
df_x = StandardScaler().fit_transform(df_x)
print(df_y)
return({"df_x": df_x,
"df_y": df_y,
"dataset": tf.data.Dataset.from_tensor_slices({"X":df_x,"Y":df_y}) \
})
''' Initializes the SOM graph
'''
def init_graph(self):
#initial topology
self.g = ig.Graph()
self.g.add_vertices(self.N1*self.N2)
incr = [(0,1),(0,-1),(1,0),(-1,0)]
def isvalid(x):
return(x[0] >= 0 and x[0] < self.N1 and\
x[1] >= 0 and x[1] < self.N2)
def toOneDim(x):
return x[0]*self.N2 + x[1]
def sum_tuple(x,y):
return(tuple(sum(pair) for pair in zip(x,y)))
edges = []
#Add edges
for i in np.arange(self.N1):
for j in np.arange(self.N2):
curr = (i,j)
self.g.vs[toOneDim(curr)]["i"] = i
self.g.vs[toOneDim(curr)]["j"] = j
if self.gridmode.name == "RECTANGLE":
incr = [(0,1),(0,-1),(1,0),(-1,0)]
else:
if i % 2 == 0:
incr = [(0,1),(0,-1),(-1,-1),(-1,0),(1,-1),(1,0)]
else:
incr = [(0,1),(0,-1),(-1,1),(-1,0),(1,1),(1,0)]
nbors = list(map(lambda x: sum_tuple(x,curr),incr))
nbors_exist = list(map(lambda x: isvalid(x),nbors))
for n in np.arange(len(nbors)):
if nbors_exist[n]:
edges += [(toOneDim(curr), toOneDim(nbors[n]))]
print(str(curr) + "->" + str(nbors[n]) )
self.g.add_edges(edges)
self.g.es["age"] = 0
#self.ID: maps index of each node to its corresponding position tuple (line, col)
self.ID = np.array(list(map(lambda x: [self.g.vs[x]["i"],self.g.vs[x]["j"]],\
np.arange(self.N1*self.N2))),dtype=np.int32 )
self.ID = np.reshape(self.ID,(-1,2))
#Initialize distances
print("Calculating distances...")
self.D = np.array(self.g.shortest_paths(source=None, target=None, weights=None, mode=ig.ALL),dtype=np.int32)
print("Done!")
def build_model(self):
X = self.X_placeholder
W = self.W_placeholder
nsize = self.nborhood_size_placeholder
sigma = self.sigma_placeholder
eps = self.eps_placeholder
#Step 3: Calculate dist_vecs (vector and magnitude)
self.dist_vecs = tf.map_fn(lambda w: X - w,W)
self.squared_distances = tf.map_fn(lambda w: 2.0*tf.nn.l2_loss(X - w),W)
#Step 4:Calculate 2 best
self.s = tf.math.top_k(-self.squared_distances,k=2)
#1D Index of s1_2d,s2_2d
self.s1_1d = self.s.indices[0]
self.s2_1d = self.s.indices[1]
#2d Index of s1_2d,s2_2d
self.s1_2d = tf.gather(self.ID,self.s1_1d)
self.s2_2d = tf.gather(self.ID,self.s2_1d)
#Step 5: Calculate l1 distances
#self.l1 = tf.map_fn(lambda x: tf.norm(x-self.s1_2d,ord=1),self.ID)
self.l1 = tf.gather(self.D,self.s1_1d)
self.mask = tf.reshape(tf.where(self.l1 <= nsize),\
tf.convert_to_tensor([-1]))
#Step 6: Calculate neighborhood function values
if self.ntype.name == "GAUSSIAN":
self.h = tf.exp(-tf.square(tf.cast(self.l1,dtype=tf.float32))\
/(2.0*sigma*sigma))
elif self.ntype.name == "CONSTANT":
self.h = tf.reshape(tf.where(self.l1 <= nsize,x=tf.ones(self.l1.shape),\
y=tf.zeros(self.l1.shape)),
tf.convert_to_tensor([-1]))
else:
raise ValueError("unknown self.ntype")
#Step 6: Update W
self.W_new = W + eps*tf.matmul(tf.diag(self.h), self.dist_vecs)
def cycle(self, current_iter, mode = Mode["TRAIN"]):
nxt = self.sess.run(self.iter_next)["X"]
if mode.name == "TRAIN":
#Iteration numbers as floats
current_iter_f = float(current_iter)
n_iter_f = float(self.n_iter)
#Get current epsilon and theta
eps = self.eps_i * np.power(self.eps_f/self.eps_i,current_iter_f/n_iter_f)
sigma = self.sigma_i * np.power(self.sigma_f/self.sigma_i,current_iter_f/n_iter_f)
print("Iteration {} - sigma {} - epsilon {}".format(current_iter,sigma,eps))
#Get vector distance, square distance
self.dist_vecs = np.array(list(map(lambda w: nxt - w,self.W)))
self.squared_distances = np.array(list(map(lambda w: np.linalg.norm(nxt - w,ord=2),self.W)))
self.s1_1d = np.argmin(self.squared_distances,axis=-1)
self.s1_2d = self.ID[self.s1_1d]
#Get L1 distances
#self.l1 = np.array(list(map(lambda x: np.linalg.norm(x - self.s1_2d,ord=1),self.ID)))
self.l1 = self.D[self.s1_1d,:]
self.mask = np.reshape(np.where(self.l1 <= sigma),[-1])
if self.ntype.name == "GAUSSIAN":
squared_l1 = np.square(self.l1.astype(np.float32))
self.h = np.exp(-squared_l1 /(2.0*sigma*sigma))
elif self.ntype.name == "CONSTANT":
self.h = np.reshape(np.where(self.l1 <= sigma,1,0),[-1])
for i in np.arange(self.N1*self.N2):
self.W[i,:] += eps * self.h[i] * self.dist_vecs[i,:]
elif mode.name == "TEST":
self.dist_vecs = np.array(list(map(lambda w: nxt - w,self.W)))
self.squared_distances = np.array(list(map(lambda w: np.linalg.norm(nxt - w,ord=2),self.W)))
#Get first and second activation
top_2 = np.argsort(self.squared_distances)[0:2]
self.s1_1d = top_2[0]
self.s2_1d = top_2[1]
self.s1_2d = self.ID[self.s1_1d]
self.s2_2d = self.ID[self.s2_1d]
#Get topographic error
if (self.s2_1d in self.g.neighbors(self.s1_1d)):
topographic_error = 0
else:
topographic_error = 1
#print("ERROR {}-{};{}-{}".format(self.s1_2d,self.squared_distances[self.s1_1d],\
# self.s2_2d,self.squared_distances[self.s2_1d] ))
#Get quantization error
quantization_error = (self.squared_distances[self.s1_1d])
return ({"topographic_error":topographic_error,\
"quantization_error":quantization_error})
def train(self):
#Run initializer
self.sess.run(self.init)
self.sess.run(self.iterator_ds.initializer)
if self.initmode.name == "PCAINIT":
if self.ds_inputdim < 2:
raise ValueError("uniform init needs dim input >= 2")
self.W = np.zeros([self.N1*self.N2,3])
print(self.W.shape)
self.W[:,0:2] = np.reshape(self.ID,[self.N1*self.N2,2])
if self.gridmode.name == "HEXAGON":
print(list(map(lambda x: (x//self.N2%2==0),np.arange(self.W.shape[0]))))
self.W[list(map(lambda x: (x//self.N2%2==0),np.arange(self.W.shape[0])))\
,1] -= 0.5
print(self.W.shape)
self.W = np.matmul(self.W,self.pca.components_)
print(self.W.shape)
self.W = StandardScaler().fit_transform(self.W)
print(self.W.shape)
else:
self.W = self.sess.run(tf.random.uniform([self.N1*self.N2,self.ds_inputdim],\
dtype=tf.float32))
self.W = StandardScaler().fit_transform(self.W)
#BEGIN Training
for current_iter in np.arange(self.n_iter):
self.cycle(current_iter)
if current_iter % self.plot_iter == 0:
self.prettygraph(current_iter,mask=self.mask)
self.prettygraph(self.n_iter,mask=self.mask,online=True)
#END Training
#BEGIN Testing
self.sess.run(self.iterator_ds.initializer)
topographic_error = 0
quantization_error = 0
chosen_Mat = np.zeros((self.N1,self.N2))
for current_iter in np.arange(self.ds_size):
cycl = self.cycle(current_iter,mode=Mode["TEST"])
topographic_error += cycl["topographic_error"]
quantization_error += cycl["quantization_error"]
chosen_Mat[self.s1_2d[0],self.s1_2d[1]] += 1
topographic_error = topographic_error / self.ds_size
quantization_error = quantization_error / self.ds_size
#Generate U-Matrix
U_Mat = np.zeros((self.N1,self.N2))
for i in np.arange(self.N1):
for j in np.arange(self.N2):
vert_pos = self.W[i * self.N2 + j]
nbors = self.g.neighbors(i * self.N2 + j)
d = np.sum(\
list(map(lambda x: np.linalg.norm(self.W[x] - vert_pos)\
,nbors)))
U_Mat[i,j] = d
print("Quantization Error:{}".format(quantization_error))
print("Topographic Error:{}".format(topographic_error))
print(np.array(self.characteristics_dict.keys()) )
df_keys = list(self.characteristics_dict.keys()) +\
["quantization_error","topographic_error"]
df_vals = list(self.characteristics_dict.values()) +\
[quantization_error,topographic_error]
#df_vals = [str(x) for x in df_vals]
print(df_keys)
print(df_vals)
print(len(df_keys))
print(len(df_vals))
if os.path.exists("runs.csv"):
print("CSV exists")
df = pd.read_csv("runs.csv",header=0)
df_new = pd.DataFrame(columns=df_keys,index=np.arange(1))
df_new.iloc[0,:] = df_vals
df = df.append(df_new,ignore_index=True)
else:
print("CSV created")
df = pd.DataFrame(columns=df_keys,index=np.arange(1))
df.iloc[0,:] = df_vals
df.to_csv("runs.csv",index=False)
def inputScatter3D(self):
Xn = self.input_pca[:,0]
Yn = self.input_pca[:,1]
Zn = self.input_pca[:,2]
Y = self.sess.run(tf.cast(tf.argmax(self.Y,axis=-1),dtype=tf.int32) )
Y = [int(x) for x in Y]
num_class = len(Y)
pal = ig.ClusterColoringPalette(num_class)
if self.plotmode.name == "CLASS_COLOR":
col = pal.get_many(Y)
siz = 2
else:
col = "green"
siz = 1.5
trace0=go.Scatter3d(x=Xn,
y=Yn,
z=Zn,
mode='markers',
name='input',
marker=dict(symbol='circle',
size=siz,
color=col,
line=dict(color='rgb(50,50,50)', width=0.25)
),
text="",
hoverinfo='text'
)
return(trace0)
def prettygraph(self,iter_number, mask,online = False):
trace0 = self.input_pca_scatter
W = self.pca.transform(self.W)
Xn=W[:,0]# x-coordinates of nodes
Yn=W[:,1] # y-coordinates
if self.ds_name in ["box","grid"]:
Zn=self.W[:,2] # z-coordinates
else:
Zn=W[:,2] # z-coordinates
edge_colors = []
Xe=[]
Ye=[]
Ze=[]
num_pallete = 1000
for e in self.g.get_edgelist():
#col = self.g.es.find(_between=((e[0],), (e[1],)),)["age"]
#col = float(col)/float(1)
#col = min(num_pallete-1, int(num_pallete * col))
#edge_colors += [col,col]
Xe+=[W[e[0],0],W[e[1],0],None]# x-coordinates of edge ends
Ye+=[W[e[0],1],W[e[1],1],None]# y-coordinates of edge ends
Ze+=[W[e[0],2],W[e[1],2],None]# z-coordinates of edge ends
#Create Scaling for edges based on Age
pal_V = ig.GradientPalette("blue", "black", num_pallete)
pal_E = ig.GradientPalette("black", "white", num_pallete)
v_colors = ["orange" for a in np.arange(self.g.vcount())]
for v in mask:
v_colors[v] = "yellow"
trace1=go.Scatter3d(x=Xe,
y=Ye,
z=Ze,
mode='lines',
line=dict(color="black",
width=3),
hoverinfo='none'
)
reference_vec_text = ["m" + str(x) for x in np.arange(self.W.shape[0])]
trace2=go.Scatter3d(x=Xn,
y=Yn,
z=Zn,
mode='markers',
name='reference_vectors',
marker=dict(symbol='square',
size=6,
color=v_colors,
line=dict(color='rgb(50,50,50)', width=0.5)
),
text=reference_vec_text,
hoverinfo='text'
)
axis=dict(showbackground=False,
showline=True,
zeroline=False,
showgrid=False,
showticklabels=True,
title='',
range = [-self.input_pca_maxval-1,1+self.input_pca_maxval]
)
layout = go.Layout(
title="Visualization of SOM",
width=1000,
height=1000,
showlegend=False,
scene=dict(
xaxis=dict(axis),
yaxis=dict(axis),
zaxis=dict(axis),
),
margin=dict(
t=100
),
hovermode='closest',
annotations=[
dict(
showarrow=False,
text="Data source:</a>",
xref='paper',
yref='paper',
x=0,
y=0.1,
xanchor='left',
yanchor='bottom',
font=dict(
size=14
)
)
], )
data=[trace1, trace2, trace0]
fig=go.Figure(data=data, layout=layout)
OUTPATH = "./plot/"
for k, v in sorted(zip(self.characteristics_dict.keys(),self.characteristics_dict.values()),key = lambda t: (t[0].lower()) ):
OUTPATH += str(k)+ "=" + str(v) + ";"
OUTPATH = OUTPATH[0:(len(OUTPATH) - 1)]
if not os.path.exists(OUTPATH):
os.mkdir(OUTPATH)
print("Plotting graph...")
if online:
try:
py.iplot(fig)
except plotly.exceptions.PlotlyRequestError:
print("Warning: Could not plot online")
pio.write_image(fig,OUTPATH + "/" + str(iter_number) + ".png")
print("Done!")
|
22,769 | 3b52b974b38e88c2a9c403f558aae1dc7ad74386 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
__author__ = 'Trung Dong Huynh'
__email__ = 'trungdong@donggiang.com'
@six.python_2_unicode_compatible
class Identifier(object):
"""Base class for all identifiers and also represents xsd:anyURI."""
# TODO: make Identifier an "abstract" base class and move xsd:anyURI
# into a subclass
def __init__(self, uri):
"""
Constructor.
:param uri: URI string for the long namespace identifier.
"""
self._uri = six.text_type(uri) # Ensure this is a unicode string
@property
def uri(self):
"""Identifier's URI."""
return self._uri
def __str__(self):
return self._uri
def __eq__(self, other):
return self.uri == other.uri if isinstance(other, Identifier) else False
def __hash__(self):
return hash((self.uri, self.__class__))
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self._uri)
def provn_representation(self):
"""PROV-N representation of qualified name in a string."""
return '"%s" %%%% xsd:anyURI' % self._uri
@six.python_2_unicode_compatible
class QualifiedName(Identifier):
"""Qualified name of an identifier in a particular namespace."""
def __init__(self, namespace, localpart):
"""
Constructor.
:param namespace: Namespace to use for qualified name resolution.
:param localpart: Portion of identifier not part of the namespace prefix.
"""
Identifier.__init__(self, u''.join([namespace.uri, localpart]))
self._namespace = namespace
self._localpart = localpart
self._str = (
':'.join([namespace.prefix, localpart])
if namespace.prefix else localpart
)
@property
def namespace(self):
"""Namespace of qualified name."""
return self._namespace
@property
def localpart(self):
"""Local part of qualified name."""
return self._localpart
def __str__(self):
return self._str
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self._str)
def __hash__(self):
return hash(self.uri)
def provn_representation(self):
"""PROV-N representation of qualified name in a string."""
return "'%s'" % self._str
class Namespace(object):
"""PROV Namespace."""
def __init__(self, prefix, uri):
"""
Constructor.
:param prefix: String short hand prefix for the namespace.
:param uri: URI string for the long namespace identifier.
"""
self._prefix = prefix
self._uri = uri
self._cache = dict()
@property
def uri(self):
"""Namespace URI."""
return self._uri
@property
def prefix(self):
"""Namespace prefix."""
return self._prefix
def contains(self, identifier):
"""
Indicates whether the identifier provided is contained in this namespace.
:param identifier: Identifier to check.
:return: bool
"""
uri = identifier if isinstance(identifier, six.string_types) else (
identifier.uri if isinstance(identifier, Identifier) else None
)
return uri.startswith(self._uri) if uri else False
def qname(self, identifier):
"""
Returns the qualified name of the identifier given using the namespace
prefix.
:param identifier: Identifier to resolve to a qualified name.
:return: :py:class:`QualifiedName`
"""
uri = identifier if isinstance(identifier, six.string_types) else (
identifier.uri if isinstance(identifier, Identifier) else None
)
if uri and uri.startswith(self._uri):
return QualifiedName(self, uri[len(self._uri):])
else:
return None
def __eq__(self, other):
return (
(self._uri == other.uri and self._prefix == other.prefix)
if isinstance(other, Namespace) else False
)
def __ne__(self, other):
return (
not isinstance(other, Namespace) or
self._uri != other.uri or
self._prefix != other.prefix
)
def __hash__(self):
return hash((self._uri, self._prefix))
def __repr__(self):
return '<%s: %s {%s}>' % (
self.__class__.__name__, self._prefix, self._uri
)
def __getitem__(self, localpart):
if localpart in self._cache:
return self._cache[localpart]
else:
qname = QualifiedName(self, localpart)
self._cache[localpart] = qname
return qname
|
22,770 | 60cb6de1992942896c0b96d23d4ec110fe12b35b | pessoa = {'nome':'Ishsan', 'idade':'600', 'sexo':'M'}
print(pessoa)
print(pessoa['idade'])
print(f'O {pessoa["nome"]} tem {pessoa["idade"]} anos')
print(pessoa.keys())
print(pessoa.values())
print(pessoa.items())
#del pessoa['sexo']
#pessoa['nome']='Sengir'
#vai mudar o nome de Ishsan para Sengir
pessoa['peso']=200
for k, v in pessoa.items():
print(f'{k} = {v}') |
22,771 | 366e705eedfad5fe5fd7ac0addd41b3fc10b189d | """
Student ID: 2594 4800
Name: JiaHui (Jeffrey) Lu
Aug-2017
"""
import numpy as np
def eps():
eps = 1
while 1 + eps > 1:
eps /= 2
# prints out the double of before it was registered by the condition.
print(eps * 2)
if __name__ == "__main__":
print("The machine eps is: ")
eps()
|
22,772 | 1863eb0adc167f6a4be4ac5fb85f3c5382684bd1 | from urllib.request import urlopen
import warnings
import os
import json
URL = 'http://www.oreilly.com/pub/sc/osconfeed'
# JSON = 'data/osconfeed.json'
JSON = 'data/schedule1_db'
def load():
if not os.path.exists(JSON):
msg = 'downloading {} to {}'.format(URL, JSON)
warnings.warn(msg)
with urlopen(URL) as remote, open(JSON, 'wb') as local:
local.write(remote.read())
with open(JSON, encoding='utf8') as f:
return json.load(f)
if __name__ == "__main__":
dataframe = load()
print(dataframe) |
22,773 | 762861d49b9e706e9937636d6bf73084d0e2e6e6 | import gym
import numpy as np
# Hide the warning ("WARN: gym.spaces.Box autodetected dtype as <class 'numpy.float32'>. Please provide explicit dtype.") by executing gym.logger.set_level(40). This will set minimal level of logger message to be printed to 40. Correspondingly, only error level messages will be displayed now.
gym.logger.set_level(40)
# instantiate a gym environment
env = gym.make('CartPole-v0')
# manage the epsiode length
done = False
cnt = 0
# have to reset the environment before using it
observation = env.reset()
# this will give us our first observation at timestep t = 0
# this is a 4-element array containing the markov state, which captures the environment, contains: pole position, pole velocity, cart position, cart velocity
while not done:
# render the environment to be displayed, should be turned off to improve computation time
env.render('mode=human')
cnt += 1
# we can sample a random action from the action space
action = env.action_space.sample()
# take the action for the next timestep
observation, reward, done, _ = env.step(action)
# returns:
# observation - the new environment state after the action has been executed
# reward - the reward of executing the action, which defined as "+1" for each timestep the terminating state has not been reached
# done - flag variable if the episode has terminated
# _ - placeholder for an info variable for debugging
if done:
break
print('game lasted ', cnt, ' moves')
|
22,774 | 2addbbcafeb96fa3861fd3817c883fd2d2e77bb5 | import re
from collections import OrderedDict
from itertools import chain
class InvalidValue(Exception):
pass
class InvalidKey(Exception):
pass
class InvalidMove(Exception):
pass
class NotYourTurn(Exception):
pass
class TicTacToeBoard:
VALID_VALUES = ('X', 'O')
WINNING_COMBINATIONS = (('X', 'X', 'X'),
('O', 'O', 'O'))
NUMBER_OF_SQUARES = 9
EMPTY_SQUARE = ' '
CHARS = ('A', 'B', 'C')
KEY_REGEX = '[A-C][1-3]'
def __init__(self):
self.__dictBoard = OrderedDict.fromkeys(self.CHARS)
for char in self.__dictBoard:
self.__dictBoard[char] = [self.EMPTY_SQUARE] * 3
self.__is_finished = False
self.__winner = None
self.__last_move = None
self.__moves_counter = 0
def __getitem__(self, key):
char, digit = self.__cleaned_key(key)
return self.__dictBoard[char][digit]
def __setitem__(self, key, value):
self.__validate_move(key, value)
char, digit = self.__cleaned_key(key)
self.__dictBoard[char][digit] = value
self.__last_move = value
self.__moves_counter += 1
if not self.__is_finished:
self.__resolve_status()
def game_status(self):
if self.__is_finished:
if self.__winner:
return "{} wins!".format(self.__winner)
else:
return "Draw!"
return 'Game in progress.'
def __str__(self):
return ('\n -------------\n' +
'3 | {0[2]} | {1[2]} | {2[2]} |\n' +
' -------------\n' +
'2 | {0[1]} | {1[1]} | {2[1]} |\n' +
' -------------\n' +
'1 | {0[0]} | {1[0]} | {2[0]} |\n' +
' -------------\n' +
' A B C \n').format(*self.columns)
@classmethod
def __cleaned_key(cls, key):
if not isinstance(key, str) or not re.match(cls.KEY_REGEX, key):
raise InvalidKey()
char, digit = list(key)
digit = int(digit) - 1
return (char, digit)
@property
def rows(self):
return tuple(zip(*self.columns))
@property
def columns(self):
return tuple(tuple(self.__dictBoard[char]) for char in self.CHARS)
@property
def diagonals(self):
return (tuple(col[i] for i, col in enumerate(self.columns)),
tuple(col[i] for i, col in enumerate(reversed(self.columns))))
def __resolve_status(self):
for triple in chain(self.columns, self.rows, self.diagonals):
if triple in self.WINNING_COMBINATIONS:
self.__is_finished = True
self.__winner = triple[0]
return
if self.__moves_counter == self.NUMBER_OF_SQUARES:
self.__is_finished = True
def __validate_move(self, key, value):
char, digit = self.__cleaned_key(key)
if not value in self.VALID_VALUES:
raise InvalidValue()
if self.__dictBoard[char][digit] != self.EMPTY_SQUARE:
raise InvalidMove()
if self.__last_move == value:
raise NotYourTurn()
|
22,775 | bf0e29c6f032686ad34292fe24edadc5e50aa0f5 | import os
import pdfkit
# Makes a codeforces problemset directory
def create_dir(directory, path):
if path is not '':
path += '/'
if not os.path.exists(path + directory):
print('Creating directory ' + directory)
os.makedirs(path + directory)
else:
print('Directory ' + directory + ' already exists')
# Creates a covered file
def create_metafile(project_name):
covered = os.path.join(project_name , 'Covered Contests.txt')
if not os.path.isfile(covered):
write_file(covered)
# Create a new file
def write_file(path):
open(path, 'w')
# Add data onto an existing file
def append_to_file(path, data):
with open(path, 'a') as file:
file.write(data + '\n')
# Delete the contents of a file
def delete_file_contents(path):
open(path, 'w').close()
# Read a file and convert each line to integer lists
def file_to_list(file_name):
results = []
with open(file_name, 'rt') as f:
for line in f:
if line is not '\n':
results.append(int(line))
return sorted(results)
# Iterate through a list, each item will be a line in a file
def list_to_file(contests, file_name):
with open(file_name,"w") as f:
for l in sorted(contests):
f.write(str(l)+"\n")
def print_url(url, pdf_name, path):
pdfkit.from_url(url, path + pdf_name + '.pdf')
|
22,776 | 56675565266f87c1bcc55ae93be77c9d705a3765 | #!/usr/bin/env python3
import re
import copy
rules = {}
cache = {}
lines = open('input.txt').read().splitlines()
for line in lines:
parts = line.split(' ')
lhs_color = parts[0] + ' ' + parts[1]
assert lhs_color not in rules
line = ' '.join(parts[3:])
rhs = set()
for m in re.findall('(\d+) ([\w ]+) bags', line):
rhs.add(m[1])
if rhs:
rules[lhs_color] = rhs
def dfs(bags, depth):
if 'shiny gold' in bags:
return True
fbags = frozenset(bags)
if fbags in cache:
return cache[fbags]
print(bags)
for color in bags:
if fbags in cache:
return cache[fbags]
if color in rules:
bags2 = copy.copy(bags)
bags2 |= rules[color]
bags2.remove(color)
r = dfs(bags2, depth + 1)
if r:
cache[frozenset(bags2)] = True
return True
cache[fbags] = False
first_colors = set(rules.keys())
count = 0
for i, start in enumerate(sorted(first_colors)):
print(f'{i}/{len(first_colors)}: {start}')
if dfs({start}, 0):
print('yep')
count += 1
print(count)
|
22,777 | 144fa7fea84df1e42fcf7803f574f81ab8747c52 | __author__ = 'katie'
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.lines as lines
from matplotlib.collections import PatchCollection
import matplotlib.patches as mpatches
#data = [[5., 25., 50., 20.],
#[4., 23., 51., 17.],
#[6., 22., 52., 19.]]
#X = np.arange(4)
#plt.bar(X + 0.00, data[0], color = 'b', width = 0.25)
#plt.bar(X + 0.25, data[1], color = 'g', width = 0.25)
#plt.bar(X + 0.50, data[2], color = 'r', width = 0.25)
#plt.show()
import os, csv
root = '/media/truecrypt1/SocCog/results/noMV_noval_1stvs2nd_wbp_cov/lrn/'
data_dict = {}
data_file = os.path.join(root, 'regandcovclust1.csv')
with open(data_file, 'r') as data:
for line in csv.DictReader(data):
data_dict[line['regressor']] = (float(line['cov_beta']), float(line['avg_val']))
regs1 = ('LSRF','LSRS', 'LSIF', 'LSIS', 'LYNRF', 'LYNRS', 'LYNIF', 'LYNIS' )
regs2 = ('LSR', 'LSI', 'LYNR', 'LYNI')
regs3 = ('LS', 'LYN')
labs1 = []
for reg in regs1:
if reg[-1] == 'S':
newreg = reg[0:-1] + 'B'
else:
newreg = reg[0:-1] + 'A'
labs1.append(newreg)
labs1 = tuple(labs1)
fig, (ax1, ax2, ax3) = plt.subplots(ncols=1, nrows=3)
betastr = u"\u03B2"
# add some text for labels, title and axes ticks
X3 = np.arange(8)
xshift = [.3, -.1, .1, -.3, .1, -.1, +.1, -.3]
X3 = [x + xshift[x] for x in X3]
#fst = [x + .1 for x in X[0:4]]
#scd = [x - .3 for x in X[4:8]]
#X = fst + scd
#0 add .1 add .05 equals + .30
#1 add .1 subtract .05 equals + .1
#2 subtract .1 add .05 equals - .1
#3 subtract .1 subtract .05 equals -.15
#4 add .1 add .05 equals .15
#5 add .1 subtract .05 equals .05
#6 subtract .1 add .05 equals - .05
#7 substract .1 subtract .05 equals -.15
print(X3)
ax3.set_ylabel('cov ' + betastr)
#fig.set_title(betastr + ' for reg 1st level beta ~ cov score')
ax3.set_xticks(X3)
ax3data = [data_dict[reg][0] for reg in regs1]
ax3dots = [data_dict[reg][1] for reg in regs1]
colors1 = ['purple', 'peachpuff', 'darkred', 'yellow', 'darkblue', 'lightskyblue',
'darkolivegreen', 'lightgreen']
patches = []
for i, dot in enumerate(ax3dots):
circ = mpatches.Ellipse(xy = (X3[i] + .125, ax3dots[i]), width = .05, height = .05, color = 'black')
patches.append(circ)
patches = PatchCollection(patches, match_original=True, zorder=10)
patches.set_zorder(20)
ax3.set_xticks([x + .125 for x in X3])
for i in range(8):
ax3.bar(X3[i], ax3data[i], color=colors1[i], width=0.25, alpha = .8, zorder=2)
ax3.set_xticklabels(labs1)
ax3.set_ylim(min(ax3data) - .25, max(ax3data) + .25)
line_xs = ax3.get_xlim()
line_ys = [0, 0]
ax3.add_line(lines.Line2D(line_xs, line_ys, linewidth=1, alpha=.5, color='black'))
ax3.add_collection(patches)
ax3.set_xlim(X3[0] - .5, X3[-1] + .7)
X2= []
for i, x in enumerate(X3[::2]):
X2.append(np.mean([X3[2*i], X3[2*i + 1]]))
X2 = np.arange(4)
xshift = [.2, -.2, .2, -.2]
X2 = [x + xshift[x] for x in X2]
print X2
ax2.set_ylabel('cov ' + betastr)
ax2.set_xticks(X2)
ax2data = [data_dict[reg][0] for reg in regs2]
ax2dots = [data_dict[reg][1] for reg in regs2]
colors2 = ['magenta', 'orange', 'steelblue', 'green']
patches = []
for i, dot in enumerate(ax2dots):
circ = mpatches.Ellipse(xy = (X2[i] + .075, ax2dots[i]), width = .05, height = .05, color = 'black')
patches.append(circ)
patches = PatchCollection(patches, match_original=True, zorder=10)
patches.set_zorder(20)
ax2.set_xticks([x + .075 for x in X2])
for i in range(4):
ax2.bar(X2[i], ax2data[i], color=colors2[i], width=0.125, alpha = .8, zorder=2)
ax2.add_line(lines.Line2D(line_xs, line_ys, linewidth=1, alpha=.5, color='black'))
ax2.add_collection(patches)
ax2.set_xticklabels(regs2)
ax2.set_xlim(X2[0] - .125, X2[-1] + .4)
X1 = []
for i, x in enumerate(X2[::2]):
X1.append(np.mean([X2[2*i], X2[2*i + 1]]))
ax1.set_ylabel('cov ' + betastr)
ax1.set_xticks(X1)
ax1data = [data_dict[reg][0] for reg in regs3]
ax1dots = [data_dict[reg][1] for reg in regs3]
colors1 = ['red', 'cyan']
patches = []
for i, dot in enumerate(ax1dots):
circ = mpatches.Ellipse(xy = (X1[i] + .125, ax2dots[i]), width = .05, height = .05, color = 'black')
patches.append(circ)
patches = PatchCollection(patches, match_original=True, zorder=10)
patches.set_zorder(20)
ax1.set_xticks([x + .125 for x in X1])
for i in range(2):
ax1.bar(X1[i], ax1data[i], color=colors1[i], width=0.25, alpha = .8, zorder=2)
ax1.add_line(lines.Line2D(line_xs, line_ys, linewidth=1, alpha=.5, color='black'))
ax1.add_collection(patches)
ax1.set_xticklabels(regs3)
mini = min(min(ax1data), min(ax1dots))
maxi = max(max(ax1data), max(ax1dots))
delta = abs(mini-maxi)*.1
print delta
ax1.set_ylim(mini - delta, maxi+ delta)
plt.show()
#my color schemes are going to be
#SRB Purple
#SRA Pink
#SIB Dark Red
#SIA Yellow
#YNRB Dark Blue
#YNRA Light Blue
#YNIB Dark Green
#YNIA Light Green
#SR Magenta
#SI Orange
#YNR Medium Blue
#YNI Medium Green
#S Red
#YN Blue Green
|
22,778 | db09bc3808a35a85c37df70ca922cd1bb89a2901 | sequence = input()
result = [int(i) for i in sequence]
result_2 = [result[n] + sum(result[:n]) for n in range(len(result))]
print(result_2) |
22,779 | 310b0a05104d40791297472bdd450cfa4af674c9 | #!/usr/bin/env python
import argparse
from cassandra.cluster import Cluster
key_space_name = "testkeyspace"
table_name = "employee"
def create():
cluster = Cluster()
session = cluster.connect()
command = "CREATE KEYSPACE \"{}\" with replication = {{'class':'SimpleStrategy', 'replication_factor' : 3}};".format(key_space_name)
print(command)
session.execute(command)
command = 'use "{}";'.format(key_space_name)
print(command)
session.execute(command)
command = "CREATE TABLE {} (id int PRIMARY KEY, name text, city text);".format(table_name)
print(command)
session.execute(command)
for i in range(1, 10+1):
command = "INSERT INTO {} (id, name, city) VALUES ({}, 'Name{}', 'City{}');".format(table_name, i, i, i)
print(command)
session.execute(command)
session.execute(command)
command = "ALTER TABLE employee ADD email text;"
print(command)
session.execute(command)
for i in range(11, 20+1):
command = "INSERT INTO {} (id, name, city, email) VALUES ({}, 'Name{}', 'City{}', 'Email{}');".format(table_name, i, i, i, i)
print(command)
session.execute(command)
def verify():
cluster = Cluster()
session = cluster.connect(key_space_name)
command = 'SELECT id, name, city, email FROM {};'.format(table_name)
print(command)
rows = session.execute(command)
rows = sorted(rows, key=lambda row: row.id)
for i in range(1, 10+1):
row = rows[i-1]
assert(row.id == i)
assert(row.name == 'Name{}'.format(i))
assert(row.city == 'City{}'.format(i))
for i in range(11, 20+1):
row = rows[i-1]
assert(row.id == i)
assert(row.name == 'Name{}'.format(i))
assert(row.city == 'City{}'.format(i))
assert(row.email == 'Email{}'.format(i))
def delete():
cluster = Cluster()
session = cluster.connect()
command = "DROP KEYSPACE \"{}\";".format(key_space_name)
print(command)
session.execute(command)
def main():
parser = argparse.ArgumentParser(description="Quick Cassandra test")
parser.add_argument("-c", "--create", action='store_true', default=False,
help="Create the data")
parser.add_argument("-v", "--verify", action='store_true', default=False,
help="Verify the data")
parser.add_argument("-d", "--delete", action='store_true', default=False,
help="Delete the data")
args = parser.parse_args()
try:
if args.create:
create()
if args.verify:
verify()
except:
delete()
raise
if args.delete:
delete()
if __name__ == "__main__":
main()
|
22,780 | 1ae125741a6f188b4b8e655ddb36c790bb0aa7aa | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from motPapa import bb_list_center
from scipy.spatial import distance_matrix
import motmetrics as mm
gt_path = "../data/gt/gt.txt"
track_path = "../Output/tracking/tracking.txt"
track_df = pd.read_csv(track_path, header=None)
gt_df = pd.read_csv(gt_path, header=None)
last_frame = int(np.max(gt_df.iloc[:, 0]))
print(last_frame)
acc = mm.MOTAccumulator(auto_id=True)
for i in range(last_frame):
track = track_df.loc[track_df.iloc[:, 0] == i + 1, [1, 2, 3]]
track_ids = (track.iloc[:, 0]).values.tolist()
track_cc = (track.iloc[:, [1, 2]]).values.tolist()
gt = gt_df.loc[gt_df.iloc[:, 0] == i + 1, [1, 2, 3, 4, 5]]
gt_ids = gt.iloc[:, 0]
gt_bb = (gt.iloc[:, [1, 2, 3, 4]]).values.tolist()
gt_cc = bb_list_center(gt_bb)
dist_mat = distance_matrix(gt_cc, track_cc)
acc.update(gt_ids, track_ids, dist_mat)
mh = mm.metrics.create()
summary = mh.compute(acc,
mm.metrics.motchallenge_metrics,# metrics=['num_frames', 'mota', 'motp'],
name='acc')
strsummary = mm.io.render_summary(
summary,
formatters=mh.formatters,
namemap=mm.io.motchallenge_metric_names
)
print(strsummary)
# number of ids
max_tracked_id = np.max(track_df.iloc[:, 1])
max_gt_id = np.max(gt_df.iloc[:, 1])
print(max_tracked_id, max_gt_id)
# trajectories length
trajectories_length = np.array(track_df[1].value_counts())
mean_traj_length = np.sum(trajectories_length)/trajectories_length.shape[0]
print(mean_traj_length)
plt.plot(trajectories_length)
plt.title("Length of tracked trajectories distribution")
plt.ylabel("tracked frames for id")
plt.xlabel("Number of ids")
plt.show()
acc.events.to_csv("out_events.txt")
|
22,781 | 14735ab54e71cbdde3ddce3bb023c2786fa048aa | import os
import pytest
import pandas as pd
class MainTestClass(object):
@pytest.fixture
def df(self) -> pd.DataFrame:
sample_cols = ['id', 'name', 'address', 'updated']
sample_recs = [[1000, 'zeke', '123 street'],
[1001, 'larry', '688 road'],
[1002, 'fred', '585 lane']]
for rec in sample_recs:
rec.append(pd.NaT)
return pd.DataFrame(sample_recs, columns=sample_cols)
@pytest.fixture
def output_dir(self) -> str:
fp = os.path.join(os.path.dirname(__file__), "output")
if not os.path.exists(fp):
os.mkdir(fp)
return fp
@pytest.fixture
def fixtures_dir(self) -> str:
fp = os.path.join(os.path.dirname(__file__), "fixtures")
if not os.path.exists(fp):
os.mkdir(fp)
return fp
@pytest.fixture
def project_root_dir(self, fixtures_dir):
return os.path.join(fixtures_dir, "fixed_root_dir/fixed_project")
@pytest.fixture
def project_log_dir(self, project_root_dir):
return os.path.join(project_root_dir, "logs")
@pytest.fixture
def project_settings_path(self, project_root_dir):
return os.path.join(project_root_dir, "sample_project_config.ini")
@pytest.fixture
def example_file_path(self, project_root_dir):
return os.path.join(project_root_dir, "example.csv")
|
22,782 | 5f7aa6d92a048b738b076047f7e4722b952444dc | if True:
_str1 = "string"
print(_str1)
#错误2
_1024 = 1024
print(_1024)
#错误3
float_1_024 = 1.024
print(float_1_024)
#错误3
False_define_by_myself = False
print(False_define_by_myself)
#在此处输出保留关键字
print("['False', 'None', 'True', 'and', 'as', 'assert', 'break', 'class', 'continue', 'def', 'del', 'elif', 'else', 'except', 'finally', 'for', 'from', 'global', 'if', 'import', 'in', 'is', 'lambda', 'nonlocal', 'not', 'or', 'pass', 'raise', 'return', 'try', 'while', 'with', 'yield']")
print("end")
|
22,783 | f953da26364e7b1fdb41aca246bda7be630e0ae7 | from .gdb_dap import GDBRequest, GDBResponse
|
22,784 | 39616004f2f91c61f3d0448e03003c539de11a8f | from Build_Models.regression import create_regression_model
from Build_Models.regression_dropout import create_regression_model_with_dropout
from Build_Models.regression_attention import create_regression_model_with_attention
from scipy.stats import spearmanr
from sklearn.metrics import mean_squared_error
import numpy as np
# --------------- loading Data --------------- #
files = ['hek293t.episgt','hela.episgt','hct116.episgt','hl60.episgt']
dataArr_inputs_test = np.array([None]*4)
dataArr_labels_test = np.array([None]*4)
# loading every piece in one big data
for i in range(4):
files[i]=files[i][:files[i].index('.')]
x=np.load(f"./training_data/inputs_{files[i]}_test_REG.npy")
dataArr_inputs_test[i] = x
x=np.load(f"./training_data/labels_{files[i]}_test_REG.npy")
dataArr_labels_test[i] = x
# concatente the array of 4 to get one array
dataArr_inputs_test = np.concatenate((dataArr_inputs_test))
dataArr_labels_test = np.concatenate((dataArr_labels_test))
dataArr_inputs_test = dataArr_inputs_test.transpose([0, 2, 3, 1])
dataArr_labels_test=dataArr_labels_test.reshape((-1))
X_test=dataArr_inputs_test
Y_test=dataArr_labels_test
def pretrained_reg_model(enhancment="no enhancment"):
# create model from classification
if enhancment == "no enhancment":
model=create_regression_model()
model.load("./scenarios/scenario6/regModel/thirdBestModel/ClassificationModel.tfl")
if enhancment == "dropout":
model=create_regression_model_with_dropout()
model.load("./enhancments/dropout_reg/regModel/thirdBestModel/ClassificationModel.tfl")
if enhancment == "attention":
model=create_regression_model_with_attention()
model.load("./enhancments/attention_reg/regModel/thirdBestModel/ClassificationModel.tfl")
return model
"""
for regression model with attention
"./enhancments/attention_reg/regModel/BestModel/ClassificationModel.tfl"
Test Spearman Corr value : 0.12630249750314507
Test Pvalue : 5.560799899549878e-15
Test MSE value : 0.029209616
"./enhancments/attention_reg/regModel/thirdBestModel/ClassificationModel.tfl"
Test Spearman Corr value : 0.19720300464754606
Test Pvalue : 1.288415447470461e-34
Test MSE value : 0.029820437
"./enhancments/attention_reg/regModel/finalModel/RegressionModel.tfl"
Test Spearman Corr value : 0.21434419407451152
Test Pvalue : 1.0004915046479287e-40
Test MSE value : 0.032414112
"""
"""
for regression model with dropout
"./enhancments/dropout_reg/regModel/BestModel/ClassificationModel.tfl"
Test Spearman Corr value : 0.3089411745351265
Test Pvalue : 8.191531055516354e-85
Test MSE value : 0.024200544
"./enhancments/dropout_reg/regModel/thirdBestModel/ClassificationModel.tfl"
Test Spearman Corr value : 0.3089411745351265
Test Pvalue : 8.191531055516354e-85
Test MSE value : 0.024200544
"./enhancments/dropout_reg/regModel/finalModel/RegressionModel.tfl"
Test Spearman Corr value : 0.2896430857310438
Test Pvalue : 2.534234818621121e-74
Test MSE value : 0.025018202
"""
"""
for regression model with no enhancments
"./scenarios/scenario6/regModel/BestModel/ClassificationModel.tfl"
Test Spearman Corr value : 0.2214713955039721
Test Pvalue : 1.983400816065288e-43
Test MSE value : 0.06687444
"./scenarios/scenario6/regModel/thirdBestModel/ClassificationModel.tfl"
Test Spearman Corr value : 0.3952939849155957
Test Pvalue : 2.6568130457908383e-142
Test MSE value : 0.02409169
"./scenarios/scenario6/regModel/finalModel/RegressionModel.tfl"
Test Spearman Corr value : 0.4087671290061256
Test Pvalue : 5.688987942594142e-153
Test MSE value : 0.02484151
# "./scenarios/scenario7/regModel/hct/BestModel/ClassificationModel.tfl"
Test Spearman Corr value : 0.7177728812187373
Test Pvalue : 0.0
Test MSE value : 0.013914336
# "./scenarios/scenario7/regModel/hct/finalModel/RegressionModel.tfl"
Test Spearman Corr value : 0.7073092061277186
Test Pvalue : 0.0
Test MSE value : 0.016922968
# "./scenarios/scenario7/regModel/hek/BestModel/ClassificationModel.tfl"
Test Spearman Corr value : 0.6365298917983627
Test Pvalue : 0.0
Test MSE value : 0.014217936
# "./scenarios/scenario7/regModel/hek/finalModel/RegressionModel.tfl"
Test Spearman Corr value : 0.6365298917983627
Test Pvalue : 0.0
Test MSE value : 0.014217936
# "./scenarios/scenario7/regModel/hela/BestModel/ClassificationModel.tfl"
Test Spearman Corr value : 0.5719276598714741
Test Pvalue : 0.0
Test MSE value : 0.024042983
# "./scenarios/scenario7/regModel/hela/finalModel/RegressionModel.tfl"
Test Spearman Corr value : 0.5195469710949443
Test Pvalue : 9.88173754557735e-262
Test MSE value : 0.025831336
# "./scenarios/scenario7/regModel/hl/BestModel/ClassificationModel.tfl"
Test Spearman Corr value : 0.2991578251196238
Test Pvalue : 2.149227472353362e-79
Test MSE value : 0.029789122
# "./scenarios/scenario7/regModel/hl/finalModel/RegressionModel.tfl"
Test Spearman Corr value : 0.7113122494700089
Test Pvalue : 0.0
Test MSE value : 0.026292099
"""
|
22,785 | 7d72498b1b8d32510cb1fa551fe7443bf5709c6f | """Test code generation command."""
import os
import sys
import textwrap
from pytest_bdd.scripts import main
PATH = os.path.dirname(__file__)
def test_generate(monkeypatch, capsys):
"""Test if the code is generated by a given feature."""
monkeypatch.setattr(sys, "argv", ["", "generate", os.path.join(PATH, "generate.feature")])
main()
out, err = capsys.readouterr()
assert out == textwrap.dedent(
'''
# coding=utf-8
"""Code generation feature tests."""
from pytest_bdd import (
given,
scenario,
then,
when,
)
@scenario('scripts/generate.feature', 'Given and when using the same fixture should not evaluate it twice')
def test_given_and_when_using_the_same_fixture_should_not_evaluate_it_twice():
"""Given and when using the same fixture should not evaluate it twice."""
@given('1 have a fixture (appends 1 to a list) in reuse syntax')
def have_a_fixture_appends_1_to_a_list_in_reuse_syntax():
"""1 have a fixture (appends 1 to a list) in reuse syntax."""
raise NotImplementedError
@given('I have an empty list')
def i_have_an_empty_list():
"""I have an empty list."""
raise NotImplementedError
@when('I use this fixture')
def i_use_this_fixture():
"""I use this fixture."""
raise NotImplementedError
@then('my list should be [1]')
def my_list_should_be_1():
"""my list should be [1]."""
raise NotImplementedError
'''[
1:
].replace(
u"'", u"'"
)
)
|
22,786 | b1722b98edc12dbd8a0a3f4a8ec4990230aa16f4 | #!/usr/bin/env python3.6
from datetime import datetime
import shlex
import io
import subprocess
import csv
import time
from pprint import pprint
from gdrive.clients import GClient
TMATE_DB = '1Jbsm4qCqk2-HRwA3cnT4wBRV3dnvvAQrXdqV6fBvuoA'
CMD_CREATE_SESSION = 'tmate -S /tmp/tmate.sock new-session -d'
CMD_PRINT_WEB = "tmate -S /tmp/tmate.sock display -p '#{tmate_web}'"
boxname = open('./.boxname').read().strip()
print(f'boxname is {boxname}')
class Tmate:
def __init__(self):
self.client = GClient()
def kill(self):
return subprocess.run(['pkill', 'tmate'])
def new_session(self):
return subprocess.check_output(
shlex.split(CMD_CREATE_SESSION),
stderr=subprocess.STDOUT
)
def get_session_url(self):
try:
return subprocess.check_output(
shlex.split(CMD_PRINT_WEB),
stderr=subprocess.STDOUT
).decode('utf-8').strip()
except subprocess.CalledProcessError:
return None
def _get_row(self, rows):
return [row for row in rows if row['name'] == boxname][0]
def update_session(self):
rows = self.client.get_csv_rows(fileId=TMATE_DB)
row = self._get_row(rows)
session_url = self.get_session_url()
if row['url'] == session_url:
print(f'URL in sync: {session_url}')
return None
print('gdrive url {} != local url {}. Restarting ...'.format(
row['url'], session_url))
self.kill()
self.new_session()
time.sleep(2)
session_url = self.get_session_url()
if not session_url:
raise Exception('cannot create new session')
row['updated_ts'] = int(time.time())
row['url'] = session_url
row['comment'] = 'sup'
print(f'Update url to {session_url}')
self.update_csv(rows)
def update_csv(self, rows) -> dict:
fieldnames = list(rows[0].keys())
new_file = io.StringIO()
writer = csv.DictWriter(new_file, fieldnames=fieldnames)
writer.writeheader()
writer.writerows(rows)
results = self.client.update_file(
fileId=TMATE_DB,
new_file=new_file,
)
return results
def main():
tmate = Tmate()
tmate.update_session()
if __name__ == '__main__':
print(datetime.now())
main()
|
22,787 | 50ef457e96adf9fcb54be0301618d04d333aa729 | import socket
import os
import sctp
import sys
sk = sctp.sctpsocket_tcp(socket.AF_INET)
# sk.connect(("127.0.0.1", int(9090)))
def connect_plus_message(OUT_IP, OUT_PORT):
sk.connect((OUT_IP, OUT_PORT))
print("Sending Message")
sk.sctp_send(msg='HELLO, I AM ALIVE!!!')
msgFromServer = sk.recvfrom(1024)
print(msgFromServer[0].decode('utf-8'))
sk.shutdown(0)
sk.close()
if __name__ == '__main__':
connect_plus_message(sys.argv[1],int(sys.argv[2]))
|
22,788 | ea3f2f4dfeaf112a5889fab656e803d3c4d2f5f1 | # Generated by Django 4.1.4 on 2023-03-14 22:52
# flake8: noqa
from django.contrib.postgres.operations import TrigramExtension
from django.db import migrations, models
from dandiapi.api.models.asset import Asset, AssetBlob
from dandiapi.api.models.version import Version
ASSET_TABLE = Asset._meta.db_table
ASSET_BLOB_TABLE = AssetBlob._meta.db_table
VERSION_TABLE = Version._meta.db_table
VERSION_ASSET_TABLE = Asset.versions.through._meta.db_table
raw_sql = f'''
CREATE MATERIALIZED VIEW asset_search AS
SELECT DISTINCT
{VERSION_TABLE}.dandiset_id AS dandiset_id,
{ASSET_TABLE}.asset_id AS asset_id,
{ASSET_TABLE}.metadata AS asset_metadata,
{ASSET_BLOB_TABLE}.size AS asset_size
FROM {ASSET_TABLE}
JOIN {ASSET_BLOB_TABLE} ON {ASSET_BLOB_TABLE}.id = {ASSET_TABLE}.blob_id
JOIN {VERSION_ASSET_TABLE} ON {ASSET_TABLE}.id = {VERSION_ASSET_TABLE}.asset_id
JOIN {VERSION_TABLE} ON {VERSION_ASSET_TABLE}.version_id = {VERSION_TABLE}.id;
CREATE UNIQUE INDEX idx_asset_search_dandiset_id_asset_id ON asset_search (dandiset_id, asset_id);
CREATE INDEX idx_asset_search_asset_size ON asset_search (asset_size);
CREATE INDEX idx_asset_search_measurement_technique ON asset_search USING gin ((asset_metadata->'measurementTechnique'));
CREATE INDEX asset_search_metadata_species_name_idx ON asset_search ((asset_metadata #> '{{wasAttributedTo,0,species,name}}'));
CREATE INDEX asset_search_metadata_genotype_name_idx ON asset_search ((asset_metadata #> '{{wasAttributedTo,0,genotype,name}}'));
CREATE INDEX idx_asset_search_encoding_format ON asset_search USING gin (UPPER(asset_metadata->>'encodingFormat') gin_trgm_ops);
'''
class Migration(migrations.Migration):
dependencies = [
('api', '0041_assetblob_download_count_and_more'),
]
operations = [
TrigramExtension(),
migrations.RunSQL(raw_sql),
migrations.CreateModel(
name='AssetSearch',
fields=[
('dandiset_id', models.PositiveBigIntegerField()),
('asset_id', models.PositiveBigIntegerField(primary_key=True, serialize=False)),
('asset_metadata', models.JSONField()),
('asset_size', models.PositiveBigIntegerField()),
],
options={
'db_table': 'asset_search',
'managed': False,
},
),
]
|
22,789 | 89c055b4350a254350ac9e5753b96d4c627c7a21 | import cv2
import sys
import os
# Get user supplied values
imagePath = sys.argv[1]
# cascPath = sys.argv[2]
cascPath = 'haarcascade_frontalface_alt.xml'
catdir = os.path.join(os.getcwd(), 'public', 'img')
catnum = sys.argv[2]
savePath = sys.argv[3]
# Create the haar cascade
faceCascade = cv2.CascadeClassifier(cascPath)
# Read the image
image = cv2.imread(imagePath)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Detect faces in the image
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags = cv2.cv.CV_HAAR_SCALE_IMAGE
)
# print "Found {0} faces!".format(len(faces))
if len(faces)==0:
raise Exception("Error: face not found.")
# print os.path.join(catdir, 'c'+catnum+'.jpg')
catimg = cv2.imread(os.path.join(catdir, 'c'+catnum+'.jpg'))
cat_height, cat_width, cat_channels = catimg.shape
# print cat_height, cat_width, cat_channels
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
# cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2)
cat_tmp = cv2.resize(catimg, (w, w*cat_height/cat_width))
th, tw, tc = cat_tmp.shape
image[y:y+th, x:x+tw, :] = cat_tmp
# cv2.imshow("Faces found" ,image)
# cv2.waitKey(0)
cv2.imwrite(savePath, image) |
22,790 | c5c81bb410db0a1919003fa916361a4dc380d120 | import pandas as pd
import random
import plotly.figure_factory as ff
import plotly.graph_objects as go
import statistics as st
df = pd.read_csv("medium_data.csv")
data = df["reading_time"].tolist()
def randomSetOfMean(counter):
dataSet = []
for i in range(0,counter):
randomIndex = random.randint(0,len(data)-1)
value = data[randomIndex]
dataSet.append(value)
mean = st.mean(dataSet)
return mean
def showFig(meanList):
df = meanList
mean = st.mean(df)
fig = ff.create_distplot([df],["reading_time"],show_hist=False)
fig.add_trace(go.Scatter(x = [mean,mean], y = [0,1], mode = 'lines', name = 'mean'))
fig.show()
def setup():
meanList = []
for i in range(0,100):
setofmeans = randomSetOfMean(100)
meanList.append(setofmeans)
showFig(meanList)
mean = st.mean(meanList)
mode = st.mode(meanList)
median = st.median(meanList)
sd = st.stdev(meanList)
print(mean)
print(mode)
print(median)
print(sd)
setup() |
22,791 | 0131bc73c124e94b083e21737429819b79e2c8ab | from django.db import models
class student(models.Model):
id=models.IntegerField(default=3,primary_key=True)
username = models.IntegerField(default=500)
password = models.IntegerField(default=50)
cno=models.IntegerField(default=10)
email=models.EmailField(max_length=49)
course=models.CharField(max_length=500)
class faculity(models.Model):
id=models.IntegerField(default=10,primary_key=True)
name=models.CharField(max_length=100)
gender=models.CharField(max_length=10)
cno=models.IntegerField(default=10)
exp=models.DecimalField(max_digits=3,decimal_places=1)
course=models.CharField(max_length=50)
password=models.IntegerField(default=10)
|
22,792 | f45e80d83447b553fcd9006a14cbb14e2297820a | import pandas as pd
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn import model_selection
# from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, classification_report
from sklearn.feature_extraction import DictVectorizer
import pickle
from time import time
def get_BOW(text):
BOW = {}
for word in text:
BOW[word] = BOW.get(word, 0) + 1
return BOW
def prepare_data(feature_extractor):
training_set = []
test_set = []
training_classifications = []
for _, row in train_norm.iterrows():
feature_dict = feature_extractor(row["Twitter"].split("\x01"))
training_set.append(feature_dict)
training_classifications.append(row["UID"])
for _, row in test_norm.iterrows():
features = feature_extractor(row["Twitter"].split("\x01"))
test_set.append(features)
vectorizer = DictVectorizer()
training_data = vectorizer.fit_transform(training_set)
test_data = vectorizer.transform(test_set)
return training_data, training_classifications, test_data
# save predicted result to csv file for uploading
def save_predicted(predicted, index):
output = [(i + 1, pred) for i, pred in enumerate(predicted)]
out_df = pd.DataFrame(output, columns=["Id", "Predicted"]).set_index("Id")
out_df.to_csv(data_folder + "predicted_{}.csv".format(index))
# fit selected models and predict result
def fit_predict(clfs, indexs, data, classifications):
for i in indexs:
s_time = time()
clfs[i].fit(data, classifications)
save_predicted(clfs[i].predict(test_data), i)
with open('trained_{}.pkl'.format(i), 'wb') as fid:
pickle.dump(clfs[i], fid, protocol=4)
print("time cost:{}".format(time() - s_time))
# load existing models and predict result
def load_predict(indexs):
for i in indexs:
with open('trained_{}.pkl'.format(i), 'rb') as fid:
model = pickle.load(fid)
save_predicted(model.predict(test_data), i)
data_folder = "/home/zlp/SML/"
train_norm = pd.read_csv(data_folder + "train_norm.csv")
test_norm = pd.read_csv(data_folder + "test_norm.csv")
trn_data, trn_classes, test_data = prepare_data(get_BOW)
clfs = [KNeighborsClassifier(n_jobs=-1), DecisionTreeClassifier(), RandomForestClassifier(n_jobs=-1),
MultinomialNB(), LinearSVC(), LogisticRegression(n_jobs=-1)]
models_index = list(range(2, 3)) # select a list of models
# fit_predict(clfs, models_index, trn_data, trn_classes)
load_predict(models_index)
|
22,793 | 71c506c3e6abc75d3efe4149ef167f859463fb0f | import glob
import os
import pickle
import random
from collections import Counter
import numpy as np
from dataset.dataset_base import Dataset_Base
from utils.data_downloader import download_file_from_google_drive, downloder
from utils.progress import progress
script_path = os.path.dirname(__file__)
save_path = os.path.abspath(script_path + "/data")
if not os.path.exists(save_path):os.mkdir(save_path)
IMAGE_SIZE = 11788
class CUB(Dataset_Base):
def __init__(self, args, pre_train, shuffle=True):
image_size = args.image_size
gpu_num = args.gpu_num
batch_size = args.batch_size
data_path = os.path.dirname(__file__) + "/data"
super(CUB, self).__init__(image_size, gpu_num, batch_size, pre_train, shuffle)
if not os.path.exists(data_path):os.mkdir(data_path)
data_path = data_path + "/bird"
if not os.path.exists(data_path):os.mkdir(data_path)
self.data_path = data_path
image_data_64 = self.load_images(64)
image_data_128 = self.load_images(128)
image_data_256 = self.load_images(256)
caption_data = self.load_captions()
id_list = list(image_data_64.keys())
self.train_image_data = {key:{"x_64":image_data_64[key], "x_128":image_data_128[key], "x_256":image_data_256[key]} for key in id_list[:10000]}
self.val_image_data = {key:{"x_64":image_data_64[key], "x_128":image_data_128[key], "x_256":image_data_256[key]} for key in id_list[10000:]}
self.train_caption_data = {key:caption_data[key] for key in id_list[:10000]}
self.val_caption_data = {key:caption_data[key] for key in id_list[10000:]}
self.train_id_list = list(self.train_image_data.keys())
self.val_id_list = list(self.val_image_data.keys())
if self.shuffle:
np.random.shuffle(self.train_id_list)
np.random.shuffle(self.val_id_list)
def load_images(self, img_size):
if os.path.exists(self.data_path + f"/image_data_{img_size}.plk"):
with open(self.data_path + f"/image_data_{img_size}.plk", "rb") as fp:
data = pickle.load(fp)
else:
file_name = "CUB_200_2011.tgz"
url = "http://www.vision.caltech.edu.s3-us-west-2.amazonaws.com/visipedia-data/CUB-200-2011/CUB_200_2011.tgz"
if not os.path.exists(self.data_path + f"/{file_name}"):downloder(url, self.data_path + f"/{file_name}")
if len(glob.glob(self.data_path + "/images/*.jpg")) != IMAGE_SIZE:
print("Info:Extracting image data from tar file")
import tarfile, shutil
with tarfile.open(self.data_path + f"/{file_name}", 'r') as tar_fp:
tar_fp.extractall(self.data_path)
shutil.move(self.data_path + "/CUB_200_2011/images", self.data_path)
data = {}
files = glob.glob(self.data_path + "/images/*/*.jpg")
print(f"Info:load {img_size}x{img_size} image data")
for i, _path in enumerate(files):
arr_id = int(_path.split("_")[-1].split(".")[0])
arr = self.path2array(_path, img_size)
data[arr_id] = arr
progress(i+1, IMAGE_SIZE)
print("")
with open(self.data_path + f"/image_data_{img_size}.plk", "wb") as fp:
pickle.dump(data, fp)
return data
def load_captions(self):
if os.path.exists(self.data_path + "/caption_data.pkl"):
with open(self.data_path + "/caption_data.pkl", "rb") as fp:
data = pickle.load(fp)
else:
file_name = "cub_icml.tar.gz"
drive_id = "0B0ywwgffWnLLLUc2WHYzM0Q2eWc"
if not os.path.exists(self.data_path + f"/{file_name}"):
download_file_from_google_drive(drive_id, self.data_path + f"/{file_name}")
if not os.path.exists(self.data_path + "/captions"):
print("Info:Extracting caption data from tar file")
import tarfile
with tarfile.open(self.data_path + f"/{file_name}", 'r') as tar_fp:
tar_fp.extractall(self.data_path)
file_name = file_name.split(".")[0]
os.rename(self.data_path + f"/{file_name}", self.data_path + "/captions")
data = {}
files = glob.glob(self.data_path + "/captions/*/*.t7")
import torchfile
for path in files:
cap_id = int(path.split("_")[-1].split(".")[0])
cap = torchfile.load(path)[b'txt']
data[cap_id] = cap
with open(self.data_path + "/caption_data.plk", "wb") as fp:
pickle.dump(data, fp)
return data |
22,794 | 7803fd775da9b677d51f7a93ab58f6b47a88a2ad | from django import forms
from django.forms import fields, models
from .models import Applicatiion, Job
class ApplicationForm(forms.ModelForm):
class Meta:
model = Applicatiion
fields = ['APPName','APPEmail','APPurl','APPCV','APPCOver_letter']
class JobForm(forms.ModelForm):
class Meta:
model = Job
fields = '__all__'
exclude = ['JOBSlug', 'JOBowner']
|
22,795 | eb9849f6b67af6214f7cf03be7a9667bd07cf3c0 | # coding:utf-8
import unittest
from EpisodeTopicsEncoding import TopicsTokenizer
class TestTopicsTokenizer(unittest.TestCase):
"""docstring for TestTopicsTokenizer"""
# Sanity Check Test
def test_Sanity(self):
self.assertEqual(True, True)
def test_TokenizeTerms(self):
self.assertEqual(
TopicsTokenizer.tokenize('Thanksgiving, Turkeys, Thawing a Turkey, Dry Brine (Cure), Rubbed Sage, Golden Syrup, Starch'),
sorted(['Sage', 'Golden', 'Turkeys', 'Syrup', 'a', 'Thawing', 'Turkey', 'Dry', 'Brine', 'Rubbed', 'Starch', 'Thanksgiving', 'Cure']))
self.assertEqual(
TopicsTokenizer.tokenize("Bread Pudding, Bread Stalling, Amylose, Half & Half"),
sorted(["Bread", "Pudding", "Stalling", "Amylose", "Half"]))
def test_removeStopWords(self):
self.assertEqual(
TopicsTokenizer.removeStopWords(TopicsTokenizer.tokenize('Thanksgiving, Turkeys, Thawing a Turkey, Dry Brine (Cure), Rubbed Sage, Golden Syrup, Starch')),
sorted(['Thanksgiving', 'Turkeys', 'Thawing', 'Turkey', 'Dry', 'Brine', 'Cure', 'Rubbed', 'Sage', 'Golden', 'Syrup', 'Starch']))
if __name__ == '__main__':
unittest.main()
|
22,796 | 47dcddea7cd5a1cb3c76b4fb4730a448a1c2855f | from collections import deque
class NumInfo :
def __init__(self, num):
self.myNum = num
self.cnt = 0
class InAndOut :
def __init__(self, enter, leave):
self.enter = deque(enter)
self.leave = deque(leave)
self.totalLen = len(enter)
self.nDict = dict()
self.ansDict = dict()
def makeAnswer(self) :
while self.leave :
target = self.leave.popleft()
n = -1
while target not in self.nDict :
self.plusNumCnt()
n = self.enter.popleft()
newNum = NumInfo(n)
newNum.cnt = len(self.nDict)
self.nDict[n] = newNum
self.ansDict[target] = self.nDict[target]
del self.nDict[target]
answer = []
for i in range(1, (self.totalLen) + 1) :
answer.append(self.ansDict[i].cnt)
return answer
def plusNumCnt(self) :
for k in self.nDict.keys() :
self.nDict[k].cnt += 1
def solution(enter, leave):
answer = []
s = InAndOut(enter,leave)
answer = s.makeAnswer()
return answer |
22,797 | 141dc7f65fd7a1d6b0926d21ff0faacc23072e5f | from __future__ import unicode_literals
import datetime
import json
import logging
import os
import random
import re
from collections import OrderedDict
import pandas as pd
from django.contrib.postgres.fields import JSONField
from django.db import models
from django.db.models.fields import related
from ltc.base.models import Test, TestFile
from pylab import *
from pylab import np
dateconv = np.vectorize(datetime.datetime.fromtimestamp)
logger = logging.getLogger('django')
class TestOnlineData(models.Model):
test = models.ForeignKey(
Test, on_delete=models.CASCADE,
related_name='online_data'
)
name = models.CharField(max_length=200, default='')
start_line = models.IntegerField(default=0)
data = JSONField()
@classmethod
def update(cls, test: Test):
try:
if test.is_locked is True:
return
test.is_locked = True
test.save()
result_file = TestFile.objects.filter(
test=test,
file_type=TestFile.MAIN_RESULT_CSV_FILE,
).first()
if not result_file:
return
result_file_path = str(result_file.path)
logger.info(f'[online] result file X{result_file_path}X')
logger.info(os.access(result_file_path, os.F_OK))
logger.info(os.access(result_file_path, os.F_OK))
if not os.path.exists(result_file_path):
logger.info(
f'[online] result file does not exists X{result_file_path}X'
)
test.is_locked = False
test.save()
return
num_lines = sum(1 for line in open(result_file_path))
if test.online_lines_analyzed > num_lines - 10:
test.is_locked = False
test.save()
return
read_lines = num_lines - test.online_lines_analyzed - 10
skiprows = test.online_lines_analyzed
df = pd.read_csv(
result_file_path,
index_col=0,
low_memory=False,
skiprows=skiprows,
nrows=read_lines
)
test.online_lines_analyzed = (skiprows + read_lines)
test.save()
df.columns = [
'response_time', 'url', 'responseCode', 'success',
'threadName', 'failureMessage', 'grpThreads', 'allThreads'
]
df.index = pd.to_datetime(dateconv((df.index.values / 1000)))
group_by_response_codes = df.groupby('responseCode')
update_df = pd.DataFrame()
update_df['count'] = group_by_response_codes.success.count()
update_df = update_df.fillna(0)
output_json = json.loads(
update_df.to_json(orient='index', date_format='iso'),
object_pairs_hook=OrderedDict)
new_data = {}
for row in output_json:
new_data[row] = {'count': output_json[row]['count']}
if not TestOnlineData.objects.filter(
test=test,
name='response_codes'
).exists():
online_data = TestOnlineData(
test=test,
name='response_codes',
data=new_data
)
online_data.save()
else:
online_data = TestOnlineData.objects.get(
test=test, name='response_codes'
)
old_data = online_data.data
for k in new_data:
if k not in old_data:
old_data[k] = {'count': 0}
old_data[k] = {
'count': old_data[k]['count'] + new_data[k]['count']
}
online_data.data = old_data
online_data.save()
# Aggregate table
update_df = pd.DataFrame()
group_by_url = df.groupby('url')
update_df = group_by_url.aggregate({
'response_time': np.mean
}).round(1)
update_df['maximum'] = group_by_url.response_time.max().round(1)
update_df['minimum'] = group_by_url.response_time.min().round(1)
update_df['count'] = group_by_url.success.count().round(1)
update_df['errors'] = df[(
df.success == False
)].groupby('url')['success'].count()
update_df['weight'] = group_by_url.response_time.sum()
update_df = update_df.fillna(0)
update_df.columns = [
'average',
'maximum',
'minimum',
'count',
'errors',
'weight'
]
new_data = {}
output_json = json.loads(
update_df.to_json(orient='index', date_format='iso'),
object_pairs_hook=OrderedDict
)
for row in output_json:
new_data[row] = {
'average': output_json[row]['average'],
'maximum': output_json[row]['maximum'],
'minimum': output_json[row]['minimum'],
'count': output_json[row]['count'],
'errors': output_json[row]['errors'],
'weight': output_json[row]['weight']
}
if not TestOnlineData.objects.filter(
test=test,
name='aggregate_table'
).exists():
online_data = TestOnlineData(
test=test,
name='aggregate_table',
data=new_data)
online_data.save()
else:
online_data = TestOnlineData.objects.get(
test=test,
name='aggregate_table'
)
old_data = online_data.data
for k in new_data:
if k not in old_data:
old_data[k] = {
'average': 0,
'maximum': 0,
'minimum': 0,
'count': 0,
'errors': 0,
'weight': 0
}
maximum = (
new_data[k]['maximum']
if new_data[k]['maximum'] > old_data[k]['maximum']
else old_data[k]['maximum']
)
minimum = (
new_data[k]['minimum']
if new_data[k]['minimum'] < old_data[k]['minimum']
else old_data[k]['minimum']
)
old_data[k] = {
'average':
(old_data[k]['weight'] + new_data[k]['weight']) /
(old_data[k]['count'] + new_data[k]['count']),
'maximum':
maximum,
'minimum':
minimum,
'count':
old_data[k]['count'] + new_data[k]['count'],
'errors':
old_data[k]['errors'] + new_data[k]['errors'],
'weight':
old_data[k]['weight'] + new_data[k]['weight'],
}
online_data.data = old_data
online_data.save()
# Over time data
update_df = pd.DataFrame()
df_gr_by_ts = df.groupby(pd.Grouper(freq='1Min'))
update_df['avg'] = df_gr_by_ts.response_time.mean()
update_df['count'] = df_gr_by_ts.success.count()
update_df['weight'] = df_gr_by_ts.response_time.sum()
df_gr_by_ts_only_errors = df[(
df.success == False)].groupby(pd.Grouper(freq='1Min'))
update_df['errors'] = df_gr_by_ts_only_errors.success.count()
new_data = {}
output_json = json.loads(
update_df.to_json(orient='index', date_format='iso'),
object_pairs_hook=OrderedDict)
for row in output_json:
new_data = {
'timestamp': row,
'avg': output_json[row]['avg'],
'count': output_json[row]['count'],
'errors': output_json[row]['errors'],
'weight': output_json[row]['weight'],
}
if not TestOnlineData.objects.filter(
test=test,
name='data_over_time'
).exists():
online_data = TestOnlineData(
test=test,
name='data_over_time',
data=new_data
)
online_data.save()
else:
data_over_time_data = TestOnlineData.objects.filter(
test=test,
name='data_over_time'
).values()
update = False
for d in data_over_time_data:
if d['data']['timestamp'] == new_data['timestamp']:
d_id = d['id']
update = True
if update:
test_running_data = TestOnlineData.objects.get(
id=d_id
)
old_data = test_running_data.data
old_data['average'] = (
old_data['weight'] + new_data['weight']) / (
old_data['count'] + new_data['count'])
old_data[
'count'] = old_data['count'] + new_data['count']
old_errors = (
0 if old_data['errors'] is None
else old_data['errors']
)
new_errors = (
0 if new_data['errors'] is None
else new_data['errors']
)
old_data[
'errors'] = old_errors + new_errors
old_data['weight'] = (
old_data['weight'] + new_data['weight']
)
test_running_data.data = old_data
test_running_data.save()
else:
test_running_data = TestOnlineData(
test=test,
name='data_over_time',
data=new_data)
test_running_data.save()
test.is_locked = False
test.save()
except Exception as e:
test.is_locked = False
test.save()
|
22,798 | eb139c245d73d04c70b1b722626c72a713bed2ae | def order():
my_list = []
with open("Stock.txt", "r") as my_file:
new_file = my_file.readlines()
for line in new_file:
my_list.append(line.strip("\n").split(","))
return my_list
# print(order())
|
22,799 | 2f08028c9f56bb6c7aaee31407ed072be45b972f | main = [1,2,3,4,5]
for side in main:
for x in main:
if side == x:
continue
else:
print(str(side) + ',' + str(x))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.