index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
991,000 | 3d004e0dfa6e9883bc4385fce24302d54d8ae1b4 | #coding:cp949
prompt = """
1.Add
2.Del
3.List
4.Quit
Enter number: """
number =0
While number !=4:
print(prompt)
number = int(input())
|
991,001 | 6ae762e91429a8f986a336d72cd07aa9600af30d | import os
import dgl
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import networkx as nx
import numpy as np
from sklearn.model_selection import KFold
import digital_patient
from digital_patient.conformal.base import RegressorAdapter
from digital_patient.conformal.icp import IcpRegressor
from digital_patient.conformal.nc import RegressorNc
from examples.load_data2 import load_physiology
def main():
# create directory to save results
output_dir = 'cardiac-model'
data_dir = os.path.join(output_dir, 'data')
result_dir = os.path.join(output_dir, 'results')
if not os.path.isdir(result_dir):
os.makedirs(result_dir)
# load data
df = pd.read_csv(os.path.join(data_dir, 'data.csv'), index_col=0)
var_names = [name.split(' ')[0] for name in df.columns]
x = df.values.astype('float32')
reps = 10
x = np.tile(x.T, reps=reps).T
# # check
# plt.figure()
# plt.plot(x[:500, 0], x[:500, 1])
# plt.show()
# # scale data
# scaler = StandardScaler()
# scaler = scaler.fit(x)
# x = scaler.transform(x)
# create sample lists
samples = []
labels = []
window_size = 1000
for batch in range(x.shape[0] - 2 * window_size):
print(f"{batch} - {batch + window_size - 2} -> {batch + window_size - 1} - {batch + 2 * window_size - 3}")
samples.append(x[batch:batch + window_size - 2])
labels.append(x[batch + window_size - 1:batch + 2 * window_size - 3])
samples = np.array(samples)
labels = np.array(labels)
# create CV splits
skf = KFold(n_splits=5, shuffle=True)
trainval_index, test_index = [split for split in skf.split(samples)][0]
skf2 = KFold(n_splits=5, shuffle=True)
train_index, val_index = [split for split in skf2.split(np.arange(trainval_index.size))][0]
x_train, x_val = samples[trainval_index[train_index]], samples[trainval_index[val_index]]
y_train, y_val = labels[trainval_index[train_index]], labels[trainval_index[val_index]]
x_test, y_test = samples[test_index], labels[test_index]
# create edge list
edge_list = []
for i in range(df.shape[1]):
for j in range(df.shape[1]):
edge_list.append((i, j))
# instantiate a digital patient model
G = dgl.DGLGraph(edge_list)
dp = digital_patient.DigitalPatient(G, epochs=20, lr=0.01, window_size=window_size-2)
# # plot the graph corresponding to the digital patient
# nx_G = dp.G.to_networkx()
# pos = nx.circular_layout(nx_G)
# node_labels = {}
# for i, cn in enumerate(var_names):
# node_labels[i] = cn
# plt.figure()
# nx.draw(nx_G, pos, alpha=0.3)
# nx.draw_networkx_labels(nx_G, pos, labels=node_labels)
# plt.tight_layout()
# plt.savefig(f'{result_dir}/graph.png')
# plt.show()
# instantiate the model, train and predict
dp.fit(x_train, y_train)
predictions = dp.predict(x_test)
# plot the results
sns.set_style('whitegrid')
for i, name in enumerate(var_names):
for j in range(predictions.shape[0]):
xi = y_test[j, :, i]
pi = predictions[j, :, i]
if name == 't':
continue
ti = labels[0, :, 0]
# tik = np.repeat(ti, pi.shape[0])
pik = np.hstack(pi)
plt.figure()
plt.plot(ti, xi, label='true')
for pik in pi:
plt.plot(ti, pik, c='r', alpha=0.2)
# sns.lineplot(tik, pik, alpha=0.2, ci=0.9)
# plt.fill_between(ti, pi[:, 0], pi[:, 1], alpha=0.2, label='predicted')
plt.title(name)
plt.legend()
# plt.ylabel(ylabel)
plt.xlabel('time')
plt.tight_layout()
plt.savefig(f'{result_dir}/{name}_{j}.png')
plt.show()
break
return
if __name__ == '__main__':
main()
|
991,002 | 1ba39b03f10c1ab7498a80cfe80463d9626f5172 | n=int(input())
sum1=0
while n>0:
s=n%10
sum1=sum1+(s*s)
n=n//10
print(sum1)
|
991,003 | 9b18b86e6f482514f11322490d15b59e94172355 | from Crypto.Util.number import size
from dateutil.parser import parser
__author__ = 'zanetworker'
from ConfigParser import SafeConfigParser
import utils.CommonUtils as CommonUtil
parser = SafeConfigParser()
try:
config_file = CommonUtil.get_file_location('config', 'deployment.ini')
parser.read(config_file)
except Exception as e:
print e.message
def load_vipr_credentials():
results = {
'vipr_host': parser.get('vipr', 'HOST'),
'vipr_port': parser.get('vipr', 'PORT', 4443),
'cookie_path': parser.get('vipr', 'COOKIE_DIR_ABS_PATH')
}
return results
def load_smis_details():
results = {
'ip_address': parser.get('smis','IP_ADDRESS'),
'name': parser.get('smis', 'NAME'),
'password': parser.get('smis', 'PASS'),
'port_number': parser.get('smis', 'PORT'),
'use_ssl': parser.get('smis', 'USE_SSL'),
'user_name': parser.get('smis', 'USER')
}
return results
def load_vplex_details():
results = {
"ip_address": parser.get('vplex','IP_ADDRESS'),
"name": parser.get('vplex', 'NAME'),
"password": parser.get('vplex', 'PASS'),
"port_number": parser.get('vplex', 'PORT'),
"use_ssl": parser.get('vplex', 'USE_SSL'),
"user_name": parser.get('vplex', 'USER'),
"interface_type": parser.get('vplex', 'INTERFACE_TYPE')
}
return results
def load_cmcne_details():
results = {
"name": parser.get('cmcne','NAME'),
"system_type": parser.get('cmcne', 'SYSTEM_TYPE'),
"smis_provider_ip": parser.get('cmcne', 'SMIS_IP'),
"smis_port_number": parser.get('cmcne', 'SMIS_PORT'),
"smis_user_name": parser.get('cmcne', 'SMIS_USER'),
"smis_password": parser.get('cmcne', 'SMIS_PASSWORD'),
"smis_use_ssl": parser.get('cmcne', 'USE_SSL')
}
return results
def load_hosts_details():
host_names = parser.get('hosts','HOST_NAME').split(',')
names = parser.get('hosts', 'NAME').split(',')
user_names = parser.get('hosts', 'USER_NAME').split(',')
passwords = parser.get('hosts', 'PASSWORD').split(',')
use_ssls = parser.get('hosts', 'USE_SSL').split(',')
types = parser.get('hosts', 'TYPE').split(',')
port_numbers = parser.get('hosts','PORT_NO').split(',')
results = []
#TODO - CHeck that all arrays are of the same size
for i in range(0, len(host_names)):
results.append({
"host_name": host_names[i],
"name": names[i],
"user_name": user_names[i],
"password": passwords[i],
"port_number": port_numbers[i],
"use_ssl": use_ssls[i],
"type": types[i]
})
return results |
991,004 | 035a97c756d61cdaf309336ecc495091ca0be97c | # -*- coding:utf-8 -*-
import time
import openpyxl
from config import SHEET_TITLE
def get_excel(data, field, file):
# create a workbook
new = openpyxl.Workbook()
# create a sheet
sheet = new.active
# named the sheet
sheet.title = SHEET_TITLE
# write the field into the first row of sheet
for col in range(len(field)):
_ = sheet.cell(row=1, column=col + 1, value=u'%s' % field[col][0])
# write the data into the sheet
for row in range(len(data)):
for col in range(len(field)):
if 'time' in field[col][0] or 'date' in field[col][0]:
time_local = time.localtime(data[row][col])
data[row][col] = time.strftime("%Y-%m-%d %H:%M:%S", time_local)
# print data[row][col]
_ = sheet.cell(row=row + 2, column=col + 1, value=u'%s' % data[row][col])
# save the workbook
newworkbook = new.save(file)
return newworkbook
|
991,005 | 9558fbb18cd7b9e37a5ab4fd33207a4f995975f7 | from utilities.file_operations import FileOperation
from utilities.read_configs import ReadConfig
from utilities.os_operations import OSOperations
import pytest
@pytest.mark.recon
@pytest.mark.findasset
@pytest.mark.gau_test_get_all_urls
def test_get_all_urls():
"""should return result of 'gau 42qwerty42.com'"""
config = ReadConfig.get_all_settings()
output_file_name = "gau-test_get_all_urls.txt"
section_file_name = config["URLS_FILE"]
try:
# Design
command = '{} -b {} {}'.format(config["GAU_TOOL_PATH"], config["GAU_SKIP_EXTENSIONS"], config["DOMAIN_TEXT"])
# Execution and dumping
FileOperation.dump_all(output_file_name, OSOperations.execute_shell_command(command))
config["LOGGER"].info("Result dumped into '{}'".format(output_file_name))
# Merging
FileOperation.merge_both(output_file_name, section_file_name)
config["LOGGER"].info("File '{}' is now merged with '{}'".format(output_file_name, section_file_name))
assert True
except Exception as e:
config["LOGGER"].error("gau-test_get_all_urls -> {}".format(str(e)))
assert False
@pytest.mark.custom
@pytest.mark.gau_do_custom
def test_do_custom():
"""should return result of 'X'"""
config = ReadConfig.get_all_settings()
output_file_name = "gau-test_do_custom.txt"
try:
# Design
command = "{}".format(config["GAU_CUSTOM_COMMAND"])
config["LOGGER"].info("'{}' is executed!".format(command))
# Execution and dumping
FileOperation.dump_all(output_file_name, OSOperations.execute_shell_command(command))
config["LOGGER"].info("Result dumped into '{}'".format(output_file_name))
assert True
except Exception as e:
config["LOGGER"].error("gau-test_do_custom -> {}".format(str(e)))
assert False
|
991,006 | 5fbb77ca9933c141fe36ebfc9d1c7ebf2583f400 | # ํ
ํธ๋ก๋ฏธ๋
ธ
# https://www.acmicpc.net/problem/14500
# ํํธ
# 1. ๋ฐ๋ก์ธํธ : https://www.acmicpc.net/board/view/61597
# 2. dfs๋ฅผ ํ์ฉํ์ฌ ๊ธธ์ด(ํฌ๊ธฐ)๊ฐ 4์ผ๋๊น์ง์ ํฉ ์ค ๊ฐ์ฅ ํฐ ๊ฐ์ ์ ๋ต์ผ๋ก ๊ฐฑ์ ํ๋ค.
# 3. ๊ฐ์ ์์น์ ๊ฐ์ ๋ชจ์์ด ์ค๋ณต๋์ง ์๋๋ก, limit_i, limit_j๋ฅผ ์ง์ ํด์ฃผ์ด
# ์ถ๊ฐ๋๋ ๋ธ๋ก์ด limit_i๋ณด๋ค ์์๋ limit_i์ ๊ฐ๊ฑฐ๋ limit_j๋ณด๋ค ์์์ชฝ์ผ๋ก ์์ฑ๋์ง ์๋๋ก ํ๋ค.
# 4. dp๋ฅผ ์ฌ์ฉํ์ฌ ์ด๋ฏธ ์ ์ ๋ ๋ธ๋ก์ ์ถ๊ฐ๋ก ์ ์ ํ์ง ์๋๋ก ํ๋ค.
def dfs(i, j, length):
if i < 0 or i >= N or j < 0 or j >= M:
return 0
# 3. ๊ฐ์ ์์น์ ๊ฐ์ ๋ชจ์์ด ์ค๋ณต๋์ง ์๋๋ก, limit_i, limit_j๋ฅผ ์ง์ ํด์ฃผ์ด
# ์ถ๊ฐ๋๋ ๋ธ๋ก์ด limit_i๋ณด๋ค ์์๋ limit_i์ ๊ฐ๊ฑฐ๋ limit_j๋ณด๋ค ์์์ชฝ์ผ๋ก ์์ฑ๋์ง ์๋๋ก ํ๋ค.
if i < limit_i or (i == limit_i and j < limit_j):
return 0
if dp[i][j]:
return 0
dp[i][j] = True
ret = 0
# ํฌ๊ธฐ๊ฐ 4๊ฐ ์๋๋ผ๋ฉด ๋ค์ ๋ธ๋ก์ ์ถ๊ฐ๋ก ์์ฑํด์ผํ๋ค. ์ํ์ข์ฐ ๋ค ๋ฐฉํฅ์ผ๋ก ์๋ํ ๊ฐ์ฅ ๋์ ์ ์๋ก ๊ฐฑ์ .
if length != 4:
ret = max(ret, dfs(i + 1, j, length + 1))
ret = max(ret, dfs(i, j + 1, length + 1))
ret = max(ret, dfs(i - 1, j, length + 1))
ret = max(ret, dfs(i, j - 1, length + 1))
# ํฌ๊ธฐ๊ฐ 1์ด๋ฉด ์ํ์ข์ฐ ์ธ์๋ ์๋ 1์นธ + ์ค๋ฅธ์ชฝ 2์นธ ํน์ ์๋ 2์นธ + ์ค๋ฅธ์ชฝ 1์นธ์ด ๊ฐ๋ฅํ๋ค.
# ์๋ฅผ ๋ค์ด, ์๋ ๋ธ๋ก ๋ชจ์
# 134 14
# 4 3
# , 4
if length == 1:
ret = max(ret, dfs(i + 1, j, length + 2) + dfs(i, j + 1, length + 3))
ret = max(ret, dfs(i + 1, j, length + 3) + dfs(i, j + 1, length + 2))
# ํฌ๊ธฐ๊ฐ 2์ด๋ฉด ์ํ์ข์ฐ ์ธ์๋ ์๋ ๋ชจ์์ด ๊ฐ๋ฅํ๋ค.
# 124 1 1 1
# 4 424 42 24
# , , 4, 4 ,
if length == 2:
ret = max(ret, dfs(i + 1, j, length + 2) + dfs(i, j + 1, length + 2))
ret = max(ret, dfs(i, j - 1, length + 2) + dfs(i, j + 1, length + 2))
ret = max(ret, dfs(i, j - 1, length + 2) + dfs(i + 1, j, length + 2))
ret = max(ret, dfs(i + 1, j, length + 2) + dfs(i, j + 1, length + 2))
dp[i][j] = False
ret += scores[i][j]
return ret
def solve():
global answer, limit_i, limit_j
for i in range(N):
for j in range(M):
limit_i = i
limit_j = j
answer = max(answer, dfs(i, j, 1))
if __name__ == "__main__":
N, M = map(int, input().split())
scores = []
dp = [[False] * M for _ in range(N)]
for _ in range(N):
scores.append(list(map(int, input().split())))
answer, limit_i, limit_j = 0, 0, 0
solve()
print(answer)
|
991,007 | 3e9ce65efa0dc5e2ff060deda73e97f9b63765c5 | #-*- coding: utf-8 -*-
# Copyright (c) 2015 Blowmorph Team
from __future__ import unicode_literals
import wx
from launcher_frame import LauncherFrame
##############################################################################
class Launcher(wx.App):
#-------------------------------------------------------------------------
def __init__(self):
super(Launcher, self).__init__()
#-------------------------------------------------------------------------
def OnInit(self):
self.frame = LauncherFrame()
self.frame.Center()
self.frame.Show()
self.SetTopWindow(self.frame)
return True
#-------------------------------------------------------------------------
##############################################################################
if __name__ == "__main__":
Launcher().MainLoop()
|
991,008 | f46b02a5dd8c04a2a9f3389ce831193952175434 |
# BEE Structure stores data about the keystroke
class BufferEventElement(object):
def __init__(self, key, action, time):
self.key = key
self.action = action
self.time = time
self.delete = False
class GroupingBuffer(object):
def __init__(self, holdkey_matrix):
self.events = []
# This is a failsafe variable designed to prevent the system
# from hanging in the event that there is a stuck key in the buffer
self.count_returns = 0
self.num_downs = 0
self.num_ups = 0
self.holdkey_matrix = holdkey_matrix
def get_event_offset(self, action, pos):
"""
Args:
action: 'D' or 'U'
pos: offset from the start of the queue
"""
act_c = 0
for i in range(len(self.events)):
if self.events[i].action == action:
act_c += 1
if act_c == pos:
return self.events[i]
return None
def get_event_key(self, action, key, start_time=None):
"""
Args:
action: 'D' or 'U'
key: the key for which the action is desired
"""
for i in range(len(self.events)):
if (self.events[i].key == key) & (self.events[i].action == action):
if start_time is not None:
if self.events[i].time > start_time:
return self.events[i]
else:
return self.events[i]
return None
def add_event(self, buffer_event):
# if len(self.events) > 0:
if self.events: # Check if the list is empty or not
# pop all the deleted or Up actions from the start of the list
while self.events[0].delete or (self.events[0].action == 'U'):
if self.events[0].action == 'D':
self.num_downs -= 1
else:
self.num_ups -= 1
self.events.pop(0)
# Add the new event to the end of the buffer
self.events.append(buffer_event)
if buffer_event.action == 'D':
self.num_downs += 1
else:
self.num_ups += 1
# Check if there are 4 Down events in the buffer
if self.num_downs >= 4:
# Check if the second down event has a corresponding up event
s_down = self.get_event_offset('D', 2)
s_up = self.get_event_key('U', s_down.key)
if (s_up is not None):
if (s_up.time < s_down.time):
# This likely means that we are looking at a double letter press
s_up = self.get_event_key('U', s_down.key, s_down.time)
if (s_down is not None) & (s_up is not None):
f_down = self.get_event_offset('D', 1)
a_down = self.get_event_offset('D', 3)
if a_down is None:
for e in self.events:
print("{}:{}", e.key, e.action)
f_down.delete = True
# print("{}F {}D {}U".format(f_down.time,s_down.time,s_up.time))
# Exctract the 1,2,3 hold time.
# print("{} has a hold time of {}ms when preceeded by {} and followed by {}".format(s_down.key,1000*(s_up.time-s_down.time),f_down.key,a_down.key))
prior = vkconvert.convert(f_down.key)
key = vkconvert.convert(s_down.key)
post = vkconvert.convert(a_down.key)
if (prior is not None) & (key is not None) & (post is not None):
# Add this to the holdkey matrix
# print("Adding key data {} {} {}".format(prior,key,post))
self.holdkey_matrix.get_key_distribution(prior, key, post).add_timing(
1000 * (s_up.time - s_down.time))
else:
# It is likely the case that we have a stuck key, so allow this to go on
# until we have 10 down events queued, then pop the top down event and set the second down event to delete to un stick it
if self.num_downs >= 6:
print("Popping the top event and setting the delete flag on {} since it seems to be stuck".format(
s_down.key))
self.events[0].delete = True
s_down.delete = True |
991,009 | 0ab1019c07d6e8fa2eaa724c20f8753c1c82b0bc |
print(''+"\n".join((i[0]+str(i[1])) for i in zip(['ๆๅคฉ','ๅๅคฉ','ๅคงๅๅคฉ'],[1,2,3])))
def ss (a,b,**c):
print(a)
print(b)
print(c)
s=dict()
o=input("key")
p=input("value")
s[o]=p
ss(1,2,**s) |
991,010 | 93ee4ab43cb2eae61ef9473fdd5cb3e732317234 | from django.test import TestCase
from manifest_parser import ManifestParser, ParseError
from models import MediaFile, Manifest
valid_manifest_file = 'file_manager/test_data/valid.xml'
test_upload_manifest = 'file_manager/test_data/test_upload_manifest.xml'
test_upload_1 = 'file_manager/test_data/test_upload_1.txt'
class ManifestParserTestCase(TestCase):
def setUp(self):
self.valid_xml_file = open(valid_manifest_file, 'r')
self.invalid_xml_file = open('file_manager/test_data/missing_attr.xml', 'r')
def test_parse_valid_and_save(self):
mp = ManifestParser(self.valid_xml_file)
mp.parse()
self.assertEqual(len(mp.media_file_objs), 2)
mp.save()
self.assertEqual(MediaFile.objects.count(), 2)
manifest = Manifest.objects.get(id=1)
self.assertEqual(manifest.filename, valid_manifest_file)
self.assertEqual(manifest.media_files.count(), 2)
def test_parse_invalid(self):
with self.assertRaises(ParseError):
ManifestParser(self.invalid_xml_file).parse()
class MediaFileUploadTestCase(TestCase):
def setUp(self):
mp = ManifestParser(open(test_upload_manifest, 'r'))
mp.parse()
self.manifest = mp.save()
self.upload_url = '/upload/%d' % self.manifest.id
def test_valid_file_upload(self):
with open(test_upload_1, 'r') as media_file:
response = self.client.post(self.upload_url, {
'file': media_file,
})
self.assertRedirects(response, self.upload_url)
def test_wrong_file_upload(self):
# Try uploading the manifest as a media file for kicks
with open(test_upload_manifest, 'r') as media_file:
response = self.client.post(self.upload_url, {
'file': media_file,
})
self.assertEqual(response.status_code, 200)
self.assertTrue(
'The MD5 checksum of the uploaded file does not match the manifest' in response.content
) |
991,011 | 3b49013c439fee3e686c4f19bf5b350e924bfe83 | # -*- coding: utf-8 -*-
"""
Created on Thu Feb 25 13:09:29 2021
@author: lalyor
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.metrics import confusion_matrix,classification_report
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import RobustScaler
from sklearn.preprocessing import MaxAbsScaler
from sklearn.preprocessing import Normalizer
from sklearn.preprocessing import QuantileTransformer
from sklearn.preprocessing import PowerTransformer
import functions as f
######################################################################################################################
signal = []
window_size = 40
for i in range(0,8):
# open the file in read binary mode
file = open("C:/Users/lalyor/Documents/Masterarbeit/data/signal_"+str(i), "rb")
#read the file to numpy array
arr1 = np.load(file)
signal += [arr1]
#close the file
file.close()
signal = np.array(signal)
######################################################################################################################
angriff_rename_attack_1 = []
angriff_rename_attack_2 = []
for i in range(0,8):
# open the file in read binary mode
file = open("C:/Users/lalyor/Documents/Masterarbeit/Angriff_8/Rename_attack/signal_silver"+str(i), "rb")
#read the file to numpy array
arr1 = np.load(file)
angriff_rename_attack_1 += [arr1]
#close the file
file.close()
# open the file in read binary mode
file = open("C:/Users/lalyor/Documents/Masterarbeit/Angriff_8/Rename_attack/signal_silver_process_on"+str(i), "rb")
#read the file to numpy array
arr1 = np.load(file)
angriff_rename_attack_2 += [arr1]
#close the file
file.close()
angriff_rename_attack_1 = np.array(angriff_rename_attack_1)
angriff_rename_attack_2 = np.array(angriff_rename_attack_2)
pred_rename_attack_1 = [0 for i in range(990)] #Replay attack
for i in range(113,990):
pred_rename_attack_1[i] = -1
pred_rename_attack_2 = [0 for i in range(990)] #Replay attack
for i in range(267,990):
pred_rename_attack_2[i] = -1
######################################################################################################################
angriff_replay_attack_1 = []
angriff_replay_attack_2 = []
for i in range(0,8):
# open the file in read binary mode
file = open("C:/Users/lalyor/Documents/Masterarbeit/Angriff_8/Replay_attack/signal_silver"+str(i), "rb")
#read the file to numpy array
arr1 = np.load(file)
angriff_replay_attack_1 += [arr1]
#close the file
file.close()
# open the file in read binary mode
file = open("C:/Users/lalyor/Documents/Masterarbeit/Angriff_8/Replay_attack/signal_silver_process_on"+str(i), "rb")
#read the file to numpy array
arr1 = np.load(file)
angriff_replay_attack_2 += [arr1]
#close the file
file.close()
angriff_replay_attack_1 = np.array(angriff_replay_attack_1)
angriff_replay_attack_2 = np.array(angriff_replay_attack_2)
pred_replay_attack_1 = [0 for i in range(990)] #Replay attack
for i in range(395,990):
pred_replay_attack_1[i] = -1
pred_replay_attack_2 = [0 for i in range(990)] #Replay attack
for i in range(476,990):
pred_replay_attack_2[i] = -1
######################################################################################################################
angriff_sis_attack_1 = []
angriff_sis_attack_2 = []
for i in range(0,8):
# open the file in read binary mode
file = open("C:/Users/lalyor/Documents/Masterarbeit/Angriff_8/Sis_attack/signal_silver"+str(i), "rb")
#read the file to numpy array
arr1 = np.load(file)
angriff_sis_attack_1 += [arr1]
#close the file
file.close()
# open the file in read binary mode
file = open("C:/Users/lalyor/Documents/Masterarbeit/Angriff_8/Sis_attack/signal_silver_process_on"+str(i), "rb")
#read the file to numpy array
arr1 = np.load(file)
angriff_sis_attack_2 += [arr1]
#close the file
file.close()
angriff_sis_attack_1 = np.array(angriff_sis_attack_1)
angriff_sis_attack_2 = np.array(angriff_sis_attack_2)
pred_sis_attack = [-1 for i in range(len(angriff_sis_attack_1[0]))] #no change in the behaviour of the process
pred_sis_attack = np.array(pred_sis_attack)
######################################################################################################################
angriff_fake_attack_1 = []
angriff_fake_attack_2 = []
for i in range(0,8):
# open the file in read binary mode
file = open("C:/Users/lalyor/Documents/Masterarbeit/Angriff_8/Silver/signal_silver_"+str(i), "rb")
#read the file to numpy array
arr1 = np.load(file)
angriff_fake_attack_1 += [arr1]
#close the file
file.close()
# open the file in read binary mode
file = open("C:/Users/lalyor/Documents/Masterarbeit/Angriff_8/Black/signal_black_"+str(i), "rb")
#read the file to numpy array
arr1 = np.load(file)
angriff_fake_attack_2 += [arr1]
#close the file
file.close()
angriff_fake_attack_1 = np.array(angriff_fake_attack_1)
angriff_fake_attack_2 = np.array(angriff_fake_attack_2)
pred_fake_attack = [0 for i in range(len(angriff_fake_attack_1[0]))] #Attack when Schranke goes down
pred_fake_attack = np.array(pred_fake_attack)
for i in range(21*3,46*3):
pred_fake_attack[i] = -1
######################################################################################################################
# #Without scaling/normalizing
# results = [['Scaling Method','Accuracy Test','Accuracy Outliers']]
# result_excel_angriff = []
# result_excel_test = []
# x_train = signal[:,0:35000]
# x_test = signal
# x_test = np.transpose(x_test)
# x_outliers = np.concatenate((x_train,angriff_fake_attack_1), axis = 1)
# x_outliers = np.transpose(x_outliers)
# # x_outliers = np.transpose(signal[35000:59400])
# dbscan = DBSCAN(eps=1.9, min_samples=3000, algorithm='auto')
# test_pred = dbscan.fit_predict(x_test)
# n_outliers = 0
# ground_truth = np.zeros(len(x_test), dtype=int)
# n_errors = (test_pred != ground_truth).sum()
# result_test = (len(x_test)-n_errors)/(len(x_test))
# cm_test = confusion_matrix(ground_truth,test_pred)
# cr_test = classification_report(ground_truth,test_pred)
# outlier_pred = dbscan.fit_predict(x_outliers)
# n_outliers = len(angriff_fake_attack_1[0])
# # ground_truth = np.zeros(35000, dtype=int)
# # ground_truth = np.concatenate((ground_truth,pred_sis_attack,pred_sis_attack), axis = 0)
# ground_truth = pred_fake_attack
# n_errors = (ground_truth != outlier_pred[35000:]).sum()
# # result_outlier = (len(x_outliers) - n_errors)/(len(x_outliers))
# result_outlier = (len(ground_truth) - n_errors)/(len(ground_truth))
# cm_outlier = confusion_matrix(ground_truth,outlier_pred[35000:])
# cr_outlier = classification_report(ground_truth,outlier_pred[35000:])
# true_pos = 0
# true_neg = 0
# false_pos = 0
# false_neg = 0
# for i in range(len(ground_truth)):
# if(outlier_pred[35000 + i] == -1):
# if(ground_truth[i] == -1):
# true_pos += 1
# else:
# false_pos += 1
# else:
# if(ground_truth[i] == -1):
# false_neg += 1
# else:
# true_neg += 1
# accuracy = []
# precision = []
# recall = []
# f1 = []
# accuracy += [(true_pos + true_neg)/len(ground_truth)]
# if (true_pos + false_pos) != 0:
# precision += [true_pos / (true_pos + false_pos)]
# else:
# precision += [0]
# if (true_pos + false_neg) != 0:
# recall += [true_pos / (true_pos + false_neg)]
# else:
# recall += [0]
# if (precision[0] + recall[0]) != 0:
# f1 += [2*precision[0]*recall[0] / (precision[0] + recall[0])]
# else:
# f1 += [0]
# excel_result = accuracy + precision + recall + f1
# print("No Scaler: ")
# print("Accuracy test :", result_test)
# print("Accuracy outliers:", result_outlier)
# # results += [['No Scaler', result_test,result_outlier]]
# # result_excel_angriff += [result_outlier]
# # result_excel_test += [result_test]
# # labels = dbscan.labels_
# # # identify core samples
# # core_samples = np.zeros_like(labels, dtype=bool)
# # core_samples[dbscan.core_sample_indices_] = True
# # print(core_samples)
# # # declare the number of clusters
# # n_clusters = len(set(labels)) - (1 if -1 in labels else 0)
# # print(n_clusters)
# # print("Silhouette Coefficient: %0.3f" % metrics.silhouette_score(x_outliers, labels))
# # # visualize outputs
# # colors = dbscan.labels_
# # # for i in range(8):
# # # for j in range(8):
# # # plt.scatter(x_outliers[:,i],x_outliers[:,j], c = colors)
# ######################################################################################################################
# MinMaxScaler
x_train = signal[:,0:35000]
x_test = signal
x_test = np.transpose(x_test)
x_outliers = np.concatenate((x_train,angriff_replay_attack_1), axis = 1)
x_outliers = np.transpose(x_outliers)
# x_outliers = np.transpose(signal[35000:59400])
dbscan = DBSCAN(eps=0.9, min_samples=750, algorithm='auto')
x_test = MinMaxScaler().fit_transform(x_test)
test_pred = dbscan.fit_predict(x_test)
n_outliers = 0
ground_truth = np.zeros(len(x_test), dtype=int)
# n_errors = (test_pred != ground_truth).sum()
n_errors = (test_pred < ground_truth).sum()
result_test = (len(x_test)-n_errors)/(len(x_test))
cm_test = confusion_matrix(ground_truth,test_pred)
# cr_test = classification_report(ground_truth,test_pred)
x_outliers = MinMaxScaler().fit_transform(x_outliers)
outlier_pred = dbscan.fit_predict(x_outliers)
n_outliers = len(angriff_fake_attack_1[0])
# ground_truth = np.zeros(35000, dtype=int)
# ground_truth = np.concatenate((ground_truth,pred_sis_attack,pred_sis_attack), axis = 0)
ground_truth = pred_replay_attack_1
n_errors = (ground_truth != outlier_pred[35000:]).sum()
# result_outlier = (len(x_outliers) - n_errors)/(len(x_outliers))
result_outlier = (len(ground_truth) - n_errors)/(len(ground_truth))
cm_outlier = confusion_matrix(ground_truth,outlier_pred[35000:])
cr_outlier = classification_report(ground_truth,outlier_pred[35000:])
true_pos = 0
true_neg = 0
false_pos = 0
false_neg = 0
for i in range(len(ground_truth)):
if(outlier_pred[35000 + i] == -1):
if(ground_truth[i] == -1):
true_pos += 1
else:
false_pos += 1
else:
if(ground_truth[i] == -1):
false_neg += 1
else:
true_neg += 1
accuracy = []
precision = []
recall = []
f1 = []
accuracy += [(true_pos + true_neg)/len(ground_truth)]
if (true_pos + false_pos) != 0:
precision += [true_pos / (true_pos + false_pos)]
else:
precision += [0]
if (true_pos + false_neg) != 0:
recall += [true_pos / (true_pos + false_neg)]
else:
recall += [0]
if (precision[0] + recall[0]) != 0:
f1 += [2*precision[0]*recall[0] / (precision[0] + recall[0])]
else:
f1 += [0]
excel_result = accuracy + precision + recall + f1
print("MinMaxScaler: ")
print("Accuracy test :", result_test)
print("Accuracy outliers:", result_outlier)
# results += [['MinMaxScaler', result_test,result_outlier]]
# result_excel_angriff += [result_outlier]
# result_excel_test += [result_test]
# labels = dbscan.labels_
# # identify core samples
# core_samples = np.zeros_like(labels, dtype=bool)
# core_samples[dbscan.core_sample_indices_] = True
# print(core_samples)
# # declare the number of clusters
# n_clusters = len(set(labels)) - (1 if -1 in labels else 0)
# print(n_clusters)
# print("Silhouette Coefficient: %0.3f" % metrics.silhouette_score(x_outliers, labels))
# # visualize outputs
# colors = dbscan.labels_
# # for i in range(8):
# # for j in range(8):
# # plt.scatter(x_outliers[:,i],x_outliers[:,j], c = colors)
# ######################################################################################################################
# # StandardScaler
# x_train = signal[:,0:35000]
# x_test = signal
# x_test = np.transpose(x_test)
# x_outliers = np.concatenate((x_train,angriff_fake_attack_1), axis = 1)
# x_outliers = np.transpose(x_outliers)
# # x_outliers = np.transpose(signal[35000:59400])
# dbscan = DBSCAN(eps=3, min_samples=20000, algorithm='auto')
# x_test = StandardScaler().fit_transform(x_test)
# test_pred = dbscan.fit_predict(x_test)
# n_outliers = 0
# ground_truth = np.zeros(len(x_test), dtype=int)
# n_errors = (test_pred != ground_truth).sum()
# result_test = (len(x_test)-n_errors)/(len(x_test))
# cm_test = confusion_matrix(ground_truth,test_pred)
# # cr_test = classification_report(ground_truth,test_pred)
# x_outliers = StandardScaler().fit_transform(x_outliers)
# outlier_pred = dbscan.fit_predict(x_outliers)
# n_outliers = len(angriff_fake_attack_1[0])
# # ground_truth = np.zeros(35000, dtype=int)
# # ground_truth = np.concatenate((ground_truth,pred_sis_attack,pred_sis_attack), axis = 0)
# # ground_truth = np.concatenate((pred_rename_attack_1,pred_rename_attack_2), axis = 0)
# ground_truth = pred_fake_attack
# n_errors = (ground_truth != outlier_pred[35000:]).sum()
# # result_outlier = (len(x_outliers) - n_errors)/(len(x_outliers))
# result_outlier = (len(ground_truth) - n_errors)/(len(ground_truth))
# cm_outlier = confusion_matrix(ground_truth,outlier_pred[35000:])
# # cr_outlier = classification_report(ground_truth,outlier_pred[35000:])
# true_pos = 0
# true_neg = 0
# false_pos = 0
# false_neg = 0
# for i in range(len(ground_truth)):
# if(outlier_pred[35000 + i] == -1):
# if(ground_truth[i] == -1):
# true_pos += 1
# else:
# false_pos += 1
# else:
# if(ground_truth[i] == -1):
# false_neg += 1
# else:
# true_neg += 1
# accuracy = []
# precision = []
# recall = []
# f1 = []
# accuracy += [(true_pos + true_neg)/len(ground_truth)]
# if (true_pos + false_pos) != 0:
# precision += [true_pos / (true_pos + false_pos)]
# else:
# precision += [0]
# if (true_pos + false_neg) != 0:
# recall += [true_pos / (true_pos + false_neg)]
# else:
# recall += [0]
# if (precision[0] + recall[0]) != 0:
# f1 += [2*precision[0]*recall[0] / (precision[0] + recall[0])]
# else:
# f1 += [0]
# excel_result = accuracy + precision + recall + f1
# print("StandardScaler: ")
# print("Accuracy test :", result_test)
# print("Accuracy outliers:", result_outlier)
# # results += [['StandardScaler', result_test,result_outlier]]
# # result_excel_angriff += [result_outlier]
# # result_excel_test += [result_test]
# # labels = dbscan.labels_
# # # identify core samples
# # core_samples = np.zeros_like(labels, dtype=bool)
# # core_samples[dbscan.core_sample_indices_] = True
# # print(core_samples)
# # # declare the number of clusters
# # n_clusters = len(set(labels)) - (1 if -1 in labels else 0)
# # print(n_clusters)
# # print("Silhouette Coefficient: %0.3f" % metrics.silhouette_score(x_outliers, labels))
# # # visualize outputs
# # colors = dbscan.labels_
# # # for i in range(8):
# # # for j in range(8):
# # # plt.scatter(x_outliers[:,i],x_outliers[:,j], c = colors)
# ######################################################################################################################
# # # BooleanScaler
# # threshold = 1
# # x_train = signal[:,0:35000]
# # x_test = signal
# # x_test = np.transpose(x_test)
# # x_outliers = np.concatenate((x_train,angriff_rename_attack_1,angriff_rename_attack_2), axis = 1)
# # x_outliers = np.transpose(x_outliers)
# # # x_outliers = np.transpose(signal[35000:59400])
# # dbscan = DBSCAN(eps=0.90, min_samples=12000, algorithm='auto')
# # x_test = f.booleanScaler(x_test, threshold)
# # test_pred = dbscan.fit_predict(x_test)
# # n_outliers = 0
# # ground_truth = np.zeros(len(x_test), dtype=int)
# # n_errors = (test_pred != ground_truth).sum()
# # result_test = (len(x_test)-n_errors)/(len(x_test))
# # cm_test = confusion_matrix(ground_truth,test_pred)
# # cr_test = classification_report(ground_truth,test_pred)
# # x_outliers = f.booleanScaler(x_outliers, threshold)
# # outlier_pred = dbscan.fit_predict(x_outliers)
# # n_outliers = 2*len(angriff_rename_attack_1[0])
# # # ground_truth = np.zeros(35000, dtype=int)
# # # ground_truth = np.concatenate((ground_truth,pred_sis_attack,pred_sis_attack), axis = 0)
# # ground_truth = np.concatenate((pred_rename_attack_1,pred_rename_attack_2), axis = 0)
# # n_errors = (ground_truth != outlier_pred[35000:]).sum()
# # # result_outlier = (len(x_outliers) - n_errors)/(len(x_outliers))
# # result_outlier = (len(ground_truth) - n_errors)/(len(ground_truth))
# # cm_outlier = confusion_matrix(ground_truth,outlier_pred[35000:])
# # cr_outlier = classification_report(ground_truth,outlier_pred[35000:])
# # print("BooleanScaler: ")
# # print("Accuracy test :", result_test)
# # print("Accuracy outliers:", result_outlier)
# # results += [['BooleanScaler', result_test,result_outlier]]
# result_excel_angriff += [result_outlier]
# result_excel_test += [result_test]
# # labels = dbscan.labels_
# # # identify core samples
# # core_samples = np.zeros_like(labels, dtype=bool)
# # core_samples[dbscan.core_sample_indices_] = True
# # print(core_samples)
# # # declare the number of clusters
# # n_clusters = len(set(labels)) - (1 if -1 in labels else 0)
# # print(n_clusters)
# # # print("Silhouette Coefficient: %0.3f" % metrics.silhouette_score(x_outliers, labels))
# # # visualize outputs
# # colors = dbscan.labels_
# # # for i in range(8):
# # # for j in range(8):
# # # plt.scatter(x_outliers[:,i],x_outliers[:,j], c = colors)
# ######################################################################################################################
# # RobustScaler
# x_train = signal[:,0:35000]
# x_test = signal
# x_test = np.transpose(x_test)
# x_outliers = np.concatenate((x_train,angriff_replay_attack_2), axis = 1)
# x_outliers = np.transpose(x_outliers)
# # x_outliers = np.transpose(signal[35000:59400])
# dbscan = DBSCAN(eps=3, min_samples=20000, algorithm='auto')
# x_test = RobustScaler().fit_transform(x_test)
# test_pred = dbscan.fit_predict(x_test)
# n_outliers = 0
# ground_truth = np.zeros(len(x_test), dtype=int)
# n_errors = (test_pred != ground_truth).sum()
# result_test = (len(x_test)-n_errors)/(len(x_test))
# cm_test = confusion_matrix(ground_truth,test_pred)
# cr_test = classification_report(ground_truth,test_pred)
# x_outliers = RobustScaler().fit_transform(x_outliers)
# outlier_pred = dbscan.fit_predict(x_outliers)
# n_outliers = len(angriff_rename_attack_1[0])
# # ground_truth = np.zeros(35000, dtype=int)
# # ground_truth = np.concatenate((ground_truth,pred_sis_attack,pred_sis_attack), axis = 0)
# # ground_truth = np.concatenate((pred_rename_attack_1,pred_rename_attack_2), axis = 0)
# ground_truth = pred_replay_attack_2
# n_errors = (ground_truth != outlier_pred[35000:]).sum()
# true_pos = 0
# true_neg = 0
# false_pos = 0
# false_neg = 0
# for i in range(len(ground_truth)):
# if(outlier_pred[35000 + i] == -1):
# if(ground_truth[i] == -1):
# true_pos += 1
# else:
# false_pos += 1
# else:
# if(ground_truth[i] == -1):
# false_neg += 1
# else:
# true_neg += 1
# accuracy = []
# precision = []
# recall = []
# f1 = []
# accuracy += [(true_pos + true_neg)/len(ground_truth)]
# if (true_pos + false_pos) != 0:
# precision += [true_pos / (true_pos + false_pos)]
# else:
# precision += [0]
# if (true_pos + false_neg) != 0:
# recall += [true_pos / (true_pos + false_neg)]
# else:
# recall += [0]
# if (precision[0] + recall[0]) != 0:
# f1 += [2*precision[0]*recall[0] / (precision[0] + recall[0])]
# else:
# f1 += [0]
# excel_result = accuracy + precision + recall + f1
# # result_outlier = (len(x_outliers) - n_errors)/(len(x_outliers))
# result_outlier = (len(ground_truth) - n_errors)/(len(ground_truth))
# cm_outlier = confusion_matrix(ground_truth,outlier_pred[35000:])
# cr_outlier = classification_report(ground_truth,outlier_pred[35000:])
# print("RobustScaler: ")
# print("Accuracy test :", result_test)
# print("Accuracy outliers:", result_outlier)
# # results += [['RobustScaler', result_test,result_outlier]]
# # labels = dbscan.labels_
# # # identify core samples
# # core_samples = np.zeros_like(labels, dtype=bool)
# # core_samples[dbscan.core_sample_indices_] = True
# # print(core_samples)
# # # declare the number of clusters
# # n_clusters = len(set(labels)) - (1 if -1 in labels else 0)
# # print(n_clusters)
# # print("Silhouette Coefficient: %0.3f" % metrics.silhouette_score(x_outliers, labels))
# # # visualize outputs
# # colors = dbscan.labels_
# # # for i in range(8):
# # # for j in range(8):
# # # plt.scatter(x_outliers[:,i],x_outliers[:,j], c = colors)
# ######################################################################################################################
# # MaxAbsScaler
# x_train = signal[:,0:35000]
# x_test = signal
# x_test = np.transpose(x_test)
# x_outliers = np.concatenate((x_train,angriff_rename_attack_1,angriff_rename_attack_2), axis = 1)
# x_outliers = np.transpose(x_outliers)
# # x_outliers = np.transpose(signal[35000:59400])
# dbscan = DBSCAN(eps=0.80, min_samples=2000, algorithm='auto')
# x_test = MaxAbsScaler().fit_transform(x_test)
# test_pred = dbscan.fit_predict(x_test)
# n_outliers = 0
# ground_truth = np.zeros(len(x_test), dtype=int)
# n_errors = (test_pred != ground_truth).sum()
# result_test = (len(x_test)-n_errors)/(len(x_test))
# cm_test = confusion_matrix(ground_truth,test_pred)
# cr_test = classification_report(ground_truth,test_pred)
# x_outliers = MaxAbsScaler().fit_transform(x_outliers)
# outlier_pred = dbscan.fit_predict(x_outliers)
# n_outliers = 2*len(angriff_rename_attack_1[0])
# # ground_truth = np.zeros(35000, dtype=int)
# # ground_truth = np.concatenate((ground_truth,pred_sis_attack,pred_sis_attack), axis = 0)
# ground_truth = np.concatenate((pred_rename_attack_1,pred_rename_attack_2), axis = 0)
# n_errors = (ground_truth != outlier_pred[35000:]).sum()
# # result_outlier = (len(x_outliers) - n_errors)/(len(x_outliers))
# result_outlier = (len(ground_truth) - n_errors)/(len(ground_truth))
# cm_outlier = confusion_matrix(ground_truth,outlier_pred[35000:])
# cr_outlier = classification_report(ground_truth,outlier_pred[35000:])
# print("MaxAbsScaler: ")
# print("Accuracy test :", result_test)
# print("Accuracy outliers:", result_outlier)
# results += [['MaxAbsScaler', result_test,result_outlier]]
# labels = dbscan.labels_
# # identify core samples
# core_samples = np.zeros_like(labels, dtype=bool)
# core_samples[dbscan.core_sample_indices_] = True
# print(core_samples)
# # declare the number of clusters
# n_clusters = len(set(labels)) - (1 if -1 in labels else 0)
# print(n_clusters)
# print("Silhouette Coefficient: %0.3f" % metrics.silhouette_score(x_outliers, labels))
# # visualize outputs
# colors = dbscan.labels_
# # for i in range(8):
# # for j in range(8):
# # plt.scatter(x_outliers[:,i],x_outliers[:,j], c = colors)
# ######################################################################################################################
# # Normalizer
# x_train = signal[:,0:35000]
# x_test = signal
# x_test = np.transpose(x_test)
# x_outliers = np.concatenate((x_train,angriff_rename_attack_1,angriff_rename_attack_2), axis = 1)
# x_outliers = np.transpose(x_outliers)
# # x_outliers = np.transpose(signal[35000:59400])
# dbscan = DBSCAN(eps=0.80, min_samples=2000, algorithm='auto')
# x_test = Normalizer().fit_transform(x_test)
# test_pred = dbscan.fit_predict(x_test)
# n_outliers = 0
# ground_truth = np.zeros(len(x_test), dtype=int)
# n_errors = (test_pred != ground_truth).sum()
# result_test = (len(x_test)-n_errors)/(len(x_test))
# cm_test = confusion_matrix(ground_truth,test_pred)
# cr_test = classification_report(ground_truth,test_pred)
# x_outliers = Normalizer().fit_transform(x_outliers)
# outlier_pred = dbscan.fit_predict(x_outliers)
# n_outliers = 2*len(angriff_rename_attack_1[0])
# # ground_truth = np.zeros(35000, dtype=int)
# # ground_truth = np.concatenate((ground_truth,pred_sis_attack,pred_sis_attack), axis = 0)
# ground_truth = np.concatenate((pred_rename_attack_1,pred_rename_attack_2), axis = 0)
# n_errors = (ground_truth != outlier_pred[35000:]).sum()
# # result_outlier = (len(x_outliers) - n_errors)/(len(x_outliers))
# result_outlier = (len(ground_truth) - n_errors)/(len(ground_truth))
# cm_outlier = confusion_matrix(ground_truth,outlier_pred[35000:])
# cr_outlier = classification_report(ground_truth,outlier_pred[35000:])
# print("Normalizer: ")
# print("Accuracy test :", result_test)
# print("Accuracy outliers:", result_outlier)
# results += [['Normalizer', result_test,result_outlier]]
# labels = dbscan.labels_
# # identify core samples
# core_samples = np.zeros_like(labels, dtype=bool)
# core_samples[dbscan.core_sample_indices_] = True
# print(core_samples)
# # declare the number of clusters
# n_clusters = len(set(labels)) - (1 if -1 in labels else 0)
# print(n_clusters)
# print("Silhouette Coefficient: %0.3f" % metrics.silhouette_score(x_outliers, labels))
# # visualize outputs
# colors = dbscan.labels_
# # for i in range(8):
# # for j in range(8):
# # plt.scatter(x_outliers[:,i],x_outliers[:,j], c = colors)
# ######################################################################################################################
# # QuantileTransformer(output_distribution='uniform')
# x_train = signal[:,0:35000]
# x_test = signal
# x_test = np.transpose(x_test)
# x_outliers = np.concatenate((x_train,angriff_rename_attack_1,angriff_rename_attack_2), axis = 1)
# x_outliers = np.transpose(x_outliers)
# # x_outliers = np.transpose(signal[35000:59400])
# dbscan = DBSCAN(eps=0.90, min_samples=2000, algorithm='auto')
# x_test = QuantileTransformer(output_distribution='uniform').fit_transform(x_test)
# test_pred = dbscan.fit_predict(x_test)
# n_outliers = 0
# ground_truth = np.zeros(len(x_test), dtype=int)
# n_errors = (test_pred != ground_truth).sum()
# result_test = (len(x_test)-n_errors)/(len(x_test))
# cm_test = confusion_matrix(ground_truth,test_pred)
# cr_test = classification_report(ground_truth,test_pred)
# x_outliers = QuantileTransformer(output_distribution='uniform').fit_transform(x_outliers)
# outlier_pred = dbscan.fit_predict(x_outliers)
# n_outliers = 2*len(angriff_rename_attack_1[0])
# # ground_truth = np.zeros(35000, dtype=int)
# # ground_truth = np.concatenate((ground_truth,pred_sis_attack,pred_sis_attack), axis = 0)
# ground_truth = np.concatenate((pred_rename_attack_1,pred_rename_attack_2), axis = 0)
# n_errors = (ground_truth != outlier_pred[35000:]).sum()
# # result_outlier = (len(x_outliers) - n_errors)/(len(x_outliers))
# result_outlier = (len(ground_truth) - n_errors)/(len(ground_truth))
# cm_outlier = confusion_matrix(ground_truth,outlier_pred[35000:])
# cr_outlier = classification_report(ground_truth,outlier_pred[35000:])
# print("QuantileTransformer(output_distribution='uniform'): ")
# print("Accuracy test :", result_test)
# print("Accuracy outliers:", result_outlier)
# results += [['QuantileTransformer(output_distribution=uniform)', result_test,result_outlier]]
# labels = dbscan.labels_
# # identify core samples
# core_samples = np.zeros_like(labels, dtype=bool)
# core_samples[dbscan.core_sample_indices_] = True
# print(core_samples)
# # declare the number of clusters
# n_clusters = len(set(labels)) - (1 if -1 in labels else 0)
# print(n_clusters)
# print("Silhouette Coefficient: %0.3f" % metrics.silhouette_score(x_outliers, labels))
# # visualize outputs
# colors = dbscan.labels_
# # for i in range(8):
# # for j in range(8):
# # plt.scatter(x_outliers[:,i],x_outliers[:,j], c = colors)
# ######################################################################################################################
# # QuantileTransformer(output_distribution='normal')
# x_train = signal[:,0:35000]
# x_test = signal
# x_test = np.transpose(x_test)
# x_outliers = np.concatenate((x_train,angriff_rename_attack_1,angriff_rename_attack_2), axis = 1)
# x_outliers = np.transpose(x_outliers)
# # x_outliers = np.transpose(signal[35000:59400])
# dbscan = DBSCAN(eps=1.50, min_samples=2000, algorithm='auto')
# x_test = QuantileTransformer(output_distribution='normal').fit_transform(x_test)
# test_pred = dbscan.fit_predict(x_test)
# n_outliers = 0
# ground_truth = np.zeros(len(x_test), dtype=int)
# n_errors = (test_pred != ground_truth).sum()
# result_test = (len(x_test)-n_errors)/(len(x_test))
# cm_test = confusion_matrix(ground_truth,test_pred)
# cr_test = classification_report(ground_truth,test_pred)
# x_outliers = QuantileTransformer(output_distribution='normal').fit_transform(x_outliers)
# outlier_pred = dbscan.fit_predict(x_outliers)
# n_outliers = 2*len(angriff_rename_attack_1[0])
# # ground_truth = np.zeros(35000, dtype=int)
# # ground_truth = np.concatenate((ground_truth,pred_sis_attack,pred_sis_attack), axis = 0)
# ground_truth = np.concatenate((pred_rename_attack_1,pred_rename_attack_2), axis = 0)
# n_errors = (ground_truth != outlier_pred[35000:]).sum()
# # result_outlier = (len(x_outliers) - n_errors)/(len(x_outliers))
# result_outlier = (len(ground_truth) - n_errors)/(len(ground_truth))
# cm_outlier = confusion_matrix(ground_truth,outlier_pred[35000:])
# cr_outlier = classification_report(ground_truth,outlier_pred[35000:])
# print("QuantileTransformer(output_distribution='normal'): ")
# print("Accuracy test :", result_test)
# print("Accuracy outliers:", result_outlier)
# results += [['QuantileTransformer(output_distribution=normal)', result_test,result_outlier]]
# labels = dbscan.labels_
# # identify core samples
# core_samples = np.zeros_like(labels, dtype=bool)
# core_samples[dbscan.core_sample_indices_] = True
# print(core_samples)
# # declare the number of clusters
# n_clusters = len(set(labels)) - (1 if -1 in labels else 0)
# print(n_clusters)
# print("Silhouette Coefficient: %0.3f" % metrics.silhouette_score(x_outliers, labels))
# # visualize outputs
# colors = dbscan.labels_
# # for i in range(8):
# # for j in range(8):
# # plt.scatter(x_outliers[:,i],x_outliers[:,j], c = colors)
# ######################################################################################################################
# # PowerTransformer(method='yeo-johnson')
# x_train = signal[:,0:35000]
# x_test = signal
# x_test = np.transpose(x_test)
# x_outliers = np.concatenate((x_train,angriff_rename_attack_1,angriff_rename_attack_2), axis = 1)
# x_outliers = np.transpose(x_outliers)
# # x_outliers = np.transpose(signal[35000:59400])
# dbscan = DBSCAN(eps=1.50, min_samples=2000, algorithm='auto')
# x_test = PowerTransformer(method='yeo-johnson').fit_transform(x_test)
# test_pred = dbscan.fit_predict(x_test)
# n_outliers = 0
# ground_truth = np.zeros(len(x_test), dtype=int)
# n_errors = (test_pred != ground_truth).sum()
# result_test = (len(x_test)-n_errors)/(len(x_test))
# cm_test = confusion_matrix(ground_truth,test_pred)
# cr_test = classification_report(ground_truth,test_pred)
# x_outliers = PowerTransformer(method='yeo-johnson').fit_transform(x_outliers)
# outlier_pred = dbscan.fit_predict(x_outliers)
# n_outliers = 2*len(angriff_rename_attack_1[0])
# # ground_truth = np.zeros(35000, dtype=int)
# # ground_truth = np.concatenate((ground_truth,pred_sis_attack,pred_sis_attack), axis = 0)
# ground_truth = np.concatenate((pred_rename_attack_1,pred_rename_attack_2), axis = 0)
# n_errors = (ground_truth != outlier_pred[35000:]).sum()
# # result_outlier = (len(x_outliers) - n_errors)/(len(x_outliers))
# result_outlier = (len(ground_truth) - n_errors)/(len(ground_truth))
# cm_outlier = confusion_matrix(ground_truth,outlier_pred[35000:])
# cr_outlier = classification_report(ground_truth,outlier_pred[35000:])
# print("PowerTransformer(method='yeo-johnson'): ")
# print("Accuracy test :", result_test)
# print("Accuracy outliers:", result_outlier)
# results += [['PowerTransformer(method=yeo-johnson)', result_test,result_outlier]]
# labels = dbscan.labels_
# # identify core samples
# core_samples = np.zeros_like(labels, dtype=bool)
# core_samples[dbscan.core_sample_indices_] = True
# print(core_samples)
# # declare the number of clusters
# n_clusters = len(set(labels)) - (1 if -1 in labels else 0)
# print(n_clusters)
# print("Silhouette Coefficient: %0.3f" % metrics.silhouette_score(x_outliers, labels))
# # visualize outputs
# colors = dbscan.labels_
# # for i in range(8):
# # for j in range(8):
# # plt.scatter(x_outliers[:,i],x_outliers[:,j], c = colors)
# ######################################################################################################################
# # labels_true = ground_truth
# # X = x_outliers
# # core_samples_mask = np.zeros_like(dbscan.labels_, dtype=bool)
# # core_samples_mask[dbscan.core_sample_indices_] = True
# # # Number of clusters in labels, ignoring noise if present.
# # n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
# # n_noise_ = list(labels).count(-1)
# # print('Estimated number of clusters: %d' % n_clusters_)
# # print('Estimated number of noise points: %d' % n_noise_)
# # print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
# # print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
# # print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
# # print("Adjusted Rand Index: %0.3f"
# # % metrics.adjusted_rand_score(labels_true, labels))
# # print("Adjusted Mutual Information: %0.3f"
# # % metrics.adjusted_mutual_info_score(labels_true, labels))
# # print("Silhouette Coefficient: %0.3f"
# # % metrics.silhouette_score(X, labels))
# # # #############################################################################
# # # Plot result
# # # Black removed and is used for noise instead.
# # unique_labels = set(labels)
# # colors = [plt.cm.Spectral(each)
# # for each in np.linspace(0, 1, len(unique_labels))]
# # for k, col in zip(unique_labels, colors):
# # if k == -1:
# # # Black used for noise.
# # col = [0, 0, 0, 1]
# # class_member_mask = (labels == k)
# # xy = X[class_member_mask & core_samples_mask]
# # plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=tuple(col),
# # markeredgecolor='k', markersize=14)
# # xy = X[class_member_mask & ~core_samples_mask]
# # plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=tuple(col),
# # markeredgecolor='k', markersize=6)
# # plt.title('Estimated number of clusters: %d' % n_clusters_)
# # plt.show() |
991,012 | 191d9b882647262b7b6908a363fc89c3f892627a | from django.apps import AppConfig
class StatustransitionConfig(AppConfig):
name = 'statustransition'
|
991,013 | 98ff8ba6a0da02fce774846a48e23fdc01804162 | # Copyright 2019-2020 the ProGraML authors.
#
# Contact Chris Cummins <chrisc.101@gmail.com>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module defines an iterator over batches, divided into epochs."""
from typing import Iterable
from programl.models.base_batch_builder import BaseBatchBuilder
from programl.models.batch_data import BatchData
def _LimitBatchesToTargetGraphCount(
batches: Iterable[BatchData], target_graph_count: int
):
"""Return an iterator over the input batches which terminates once the
target number of graphs has been reached.
"""
graph_count = 0
for batch in batches:
yield batch
graph_count += batch.graph_count
if graph_count >= target_graph_count:
break
def EpochBatchIterator(
batch_builder: BaseBatchBuilder,
target_graph_counts: Iterable[int],
start_graph_count: int = 0,
):
"""Divide a sequence of batches into chunks of the given graph counts.
Args:
batch_builder: A batch builder.
target_graph_counts: A list of target graph counts.
Returns:
A iterable sequence of <target_graph_count, total_graph_count, batches>
tuples of length len(target_graph_counts).
"""
total_graph_count = start_graph_count
for target_graph_count in target_graph_counts:
total_graph_count += target_graph_count
batches = _LimitBatchesToTargetGraphCount(batch_builder, target_graph_count)
yield target_graph_count, total_graph_count, batches
batch_builder.Stop()
|
991,014 | dbfcd8953f4fc7607748e1b3838719f0228fa1a2 | class User:
firstName=None;
lastName=None;
userName=None;
password=None;
salt=None;
def __init__(self,firstName,lastName,userName,password,salt):
print "User constructor";
self.firstName=firstName;
self.lastName=lastName;
self.userName=userName;
self.salt = salt;
self.password=password;
|
991,015 | 4e5b7c5cbca92232617d2e8f51ff82de2412372e | class Solution(object):
def areConnected(self, n, threshold, queries):
"""
:type n: int
:type threshold: int
:type queries: List[List[int]]
:rtype: List[bool]
"""
data = range(n + 1)
def find(x):
if data[x] != x:
data[x] = find(data[x])
return data[x]
def union(x, y):
rx = find(x)
ry = find(y)
if rx != ry:
data[max(rx, ry)] = min(rx, ry)
return ry
for divisor in xrange(threshold + 1, n + 1):
if data[divisor] != divisor:
continue
for multiple in xrange(2, n + 1):
if divisor * multiple > n:
break
union(divisor, divisor * multiple)
return [find(a) == find(b) for a, b in queries]
|
991,016 | 721993343fdb4c7b2dd7a79805dcb28ad1fd3ca3 | import json
from decimal import Decimal
import boto3
restaurantMenu = {
"categories": [{
"categoryName": "Mains",
"timeServed": [""],
"items": [{
"itemName": "New Item",
"itemDescription": "",
"itemPrice": 0.0,
"picture": "",
"ingredients": "",
"available": True,
"options": [{
"optionTitle": "",
"optionList": [{
"optionName": "",
"optionPrice": 0.0
}],
"minimum": 0,
"maximum": 1
}],
"typeTags": [""],
"additionalHealthInfo": "",
"cityTax": 0.0
}]
}],
"defaultCityTax": 0.0
}
restaurantProfile = {
"id": 1,
"restaurantInfo": {
"restaurantName": "",
"restaurantAddress": "",
"hoursOfOperation": {
"mondayHours": {
"mondayOpen": "",
"mondayClosed": ""
},
"tuesdayHours": {
"tuesdayOpen": "",
"tuesdayClosed": ""
},
"wednesdayHours": {
"wednesdayOpen": "",
"wednesdayClosed": ""
},
"thursdayHours": {
"thursdayOpen": "",
"thursdayClosed": ""
},
"fridayHours": {
"fridayOpen": "",
"fridayClosed": ""
},
"saturdayHours": {
"saturdayOpen": "",
"satudayClosed": ""
},
"sundayHours": {
"sundayOpen": "",
"sundayClosed": ""
}
},
"restaurantType": {
"typeTags": [
"italian",
"fastFood",
"pizza",
"burger"
],
"food": True,
"drink": True
}
},
"deliveryOptions": {
"flatFee": 0.00,
"flatFeeRadius": 5.0,
"additionalDeliveryFee": 0.0,
"maxRadius": 10.0
},
"displayItem": {
"itemName": "",
"itemPicture": "",
"itemCategory": ""
}
}
customerProfile = {
"id": "",
"customerInfo": {
"username": "",
"picture": "",
"customerName": {
"firstName": "",
"lastName": ""
},
"customerAddress": {
"default": "",
"savedAddresses": [
{
"address": "",
"zip": "",
"state": ""
},
{
"address": "",
"zip": "",
"state": ""
}
]
},
"contactInformation": {
"emailAddress": "",
"contactNumber": {
"phoneNumber": "",
"preferredContactMethod": [
"text",
"call"
]
}
}
},
"paymentOptions": {
"default": {
"cardType": "",
"cardNumber": "",
"nameOnCard": "",
"cvv": "",
"expirationMonth": 0,
"expirationYear": 0
},
"cards": [
{
"cardType": "",
"cardNumber": "",
"nameOnCard": "",
"cvv": "",
"expirationMonth": "",
"expirationYear": ""
},
{
"cardType": "",
"cardNumber": "",
"nameOnCard": "",
"cvv": "",
"expirationMonth": "",
"expirationYear": ""
}
]
},
"favorites": []
}
def lambda_handler(event, context):
incoming_id = event['incoming_Id']
response = get_user_id(incoming_id)
return response
def get_user_id(cognito_id, dynamodb=None):
if not dynamodb:
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table('userLookup')
response = table.get_item(
Key={
'CognitoId': cognito_id
}
)
if 'Item' in response:
if response['Item']['isRestaurant']:
return find_restaurant(response['Item']['userId'])
else:
return find_customer(response['Item']['userId'])
else:
return create_new_user(cognito_id)
def find_customer(customer_id, dynamodb=None):
if not dynamodb:
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table('Customers')
response = table.get_item(
Key={
'CustomerId': customer_id
}
)
return json.dumps({
"userData":response['Item'],
"id": customer_id,
"restaurant": False,
})
def find_restaurant(restaurant_id, dynamodb=None):
if not dynamodb:
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table('Restaurants')
response = table.get_item(
Key={
'RestaurantId': restaurant_id
}
)
return json.dumps({
"userData":response['Item'],
"id": restaurant_id,
"restaurant": True,
})
import uuid
def create_new_user(cognito_id, dynamodb=None):
if not dynamodb:
dynamodb = boto3.resource('dynamodb')
user_id = str(uuid.uuid1())
table = dynamodb.Table('Customers')
response = table.put_item(
Item={
'CustomerId': user_id,
'customerInfo': customerProfile
}
)
update_user_lookup(cognito_id, user_id)
return find_customer(user_id)
def update_user_lookup(cognito_id, user_id, dynamodb=None):
if not dynamodb:
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table('userLookup')
response = table.put_item(
Item={
'CognitoId': cognito_id,
'isRestaurant': False,
'userId': user_id
}
)
return response |
991,017 | b5d0c67946d1ba093ec79880049b5f1bd525dc46 | import problem
class Node():
def __init__(self, parent=None, position=None , cost=0 ):
self.parent = parent
self.position = position
self.c = cost
self.g = 0
self.h = 0
self.f = 0
def __eq__(self, other):
return self.position == other.position
def a_star(mygraph, start, end , isrobot = False , robotpos = None , butters = [] , current_butter = None):
sx = int(start[1])
sy = int(start[0])
ey = int(end[0])
ex = int(end[1])
if sx < 0 or sy < 0 or ex < 0 or ey < 0 :
return False
# Create start and end node
start_node = Node(None, start , mygraph[start][1])
start_node.g = start_node.h = start_node.f = 0
end_node = Node(None, end , mygraph[end][1])
end_node.g = end_node.h = end_node.f = 0
# Initialize both open and closed list
frontier = []
explored = []
current_node = None
# Add the start node
frontier.append(start_node)
# Loop until you find the end
while len(frontier) > 0:
# Get the current node
current_node = frontier[0]
current_index = 0
for index, item in enumerate(frontier):
if item.f < current_node.f:
current_node = item
current_index = index
# Found the goal
if current_node == end_node:
path = []
current = current_node
while current is not None:
path.append(current.position)
current = current.parent
path.reverse()
cost = 0
for i in range(len(path)-1):
cost += int(mygraph[path[i+1]][1])
return (path , cost , len(path) -1 ) # Return reversed path
if isrobot is True :
if current_node.position == current_butter :
frontier.pop(current_index)
continue
# print("fuck")
# Pop current off open list, add to closed list
frontier.pop(current_index)
explored.append(current_node)
# Generate children
children = []
for new_position in mygraph[current_node.position]: # Adjacent squares
if type(new_position) != tuple :
continue
# Get node position
node_position = new_position[0]
# Create new node
new_node = Node(current_node, node_position , mygraph[node_position][1])
# Append
children.append(new_node)
# Loop through children
for child in children:
bummer = False
# Create the f, g, and h values
child.g = current_node.g + child.c
child.h = abs((int(child.position[0]) - int(end_node.position[0]))) + abs((int(child.position[1]) - int(end_node.position[1])))
child.f = child.g + child.h
# Child is on the closed list
for closed_child in explored:
if child == closed_child :
bummer = True
if bummer == True :
continue
# Child is already in the open list
for open_node in frontier:
if child == open_node and child.g > open_node.g:
bummer = True
if bummer == True :
continue
robotp = None
if current_node.parent is None :
robotp = robotpos
else:
robotp = current_node.parent.position
if not problem.checktwobefor(mygraph , child.position , butters) :
bummer = True
if bummer == True :
continue
# if isrobot is False :
# if not problem.deadlock(mygraph , current_node.position , child.position , robotp , "astar") :
# bummer = True
if isrobot is False :
# tmp = mygraph[current_node.position][0]
# mygraph[current_node.position][0] = 'x'
direction = problem.whichDirection(current_node.position , child.position )
if problem.isDeadlock(current_node.position ,robotp , "astar" , direction , mygraph , butters ) :
bummer = True
# mygraph[current_node.position][0] = tmp
if bummer == True :
continue
# Add the child to the open list
frontier.append(child)
return (None , 0 , 0 )
|
991,018 | c9b94c02549bd036b45a0719ecc6d7a9a2cbd074 | from random import *
# --- Define your functions below! ---
def begin():
print("Hi! I'm Chatbot. I love talking to people!")
def name():
print("What is your name?")
user_input = input()
print("Hello " + user_input + "! Nice to meet you!")
return user_input
def color():
print("What's your favorite color?")
userColor = input()
print(userColor + " is my favorite too!")
def respond(answer, user_input):
res = ["hi" + user_input , "hello", "howdy"," hey", "greetings"]
res2 = ["tell me a joke", "make a funny", "make me laugh", "lol", "be funny"]
res3 = ["owo", "uwu", "memes", "send memes", "meme", "me me"]
GreetRes = ["'sup bro", "hey", "what's good fam", "*nods*", "hi to you too!"]
GreetRes2 = ["my life", "memes", "uwu", "captialism", "communism"]
numres = ["tell me a number", "number", "give number", "give me a number"]
aRandomNumber = randint(1, 999)
aRandomIndex = randint(0, len(GreetRes)-1)
aRandomIndex = randint(0, len(GreetRes2)-1)
if answer in res:
print ("\n" + GreetRes[aRandomIndex])
elif answer in res2:
print("\n" + GreetRes2[aRandomIndex])
elif answer in res3:
print("\n" + "owo!!!" + "\n")
elif answer in numres:
print(aRandomNumber)
def default():
print("\n" + "Cool! Keep talking, I'm listening..." + "\n")
def processInput(answer):
if answer == "hi":
sayGreeting
# --- Put your main program below! ---
def main():
begin()
user_input = name()
color()
while True:
answer = input("Reply: ")
respond(answer, user_input)
default()
# DON'T TOUCH! Setup code that runs your main() function.
if __name__ == "__main__":
main()
|
991,019 | 16c27fffaa326a1ec3e84555f3dd3cfc21037b0f | import json
import logging
import os
import urllib
from string import Template
from typing import List, Dict
import networkx as nx
import requests
from rdflib import URIRef, RDF
from utils import ResourceDictionary, serialize, deserialize
logging.basicConfig(format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO)
def get_uris_of_class(repository: str, endpoint: str, sparql_file: str, class_name: str, endpoint_type: str,
limit: int = 1000) -> List[URIRef]:
"""
Returns the list of uris of type class_name
:param repository: The repository containing the RDF data
:param endpoint: The SPARQL endpoint
:param sparql_file: The file containing the SPARQL query
:param class_name: The class_name to search
:param endpoint_type: GRAPHDB or VIRTUOSO (to change the way the endpoint is called)
:param limit: The sparql query limit
:return: The list of uris of type class_name
"""
uri_list = []
uris_of_class_sparql_query = open(sparql_file).read()
uris_of_class_template = Template(uris_of_class_sparql_query).substitute(class_name=class_name)
uris_of_class_template = Template(uris_of_class_template + " limit $limit offset $offset ")
for uri in get_sparql_results(uris_of_class_template, "uri", endpoint, repository,
endpoint_type, limit):
uri_list.append(uri)
if len(uri_list) % 1000 == 0:
print(len(uri_list))
return uri_list
def get_properties_dict(serialized_file: str, sparql_file: str, repository: str, endpoint: str, endpoint_type: str,
limit: int = 1000) -> ResourceDictionary:
"""
Return a ResourceDictionary with the list of properties in the ontology
:param serialized_file: The file where the properties ResourceDictionary is serialized
:param sparql_file: The file containing the SPARQL query
:param repository: The repository containing the ontology
:param endpoint: The SPARQL endpoint
:param endpoint_type: GRAPHDB or VIRTUOSO (to change the way the endpoint is called)
:param limit: The sparql query limit
:return: A ResourceDictionary with the list of properties in the ontology
"""
global_properties_dict = deserialize(serialized_file)
if global_properties_dict:
return global_properties_dict
global_properties_dict = ResourceDictionary()
global_properties_dict.add(RDF.type)
properties_sparql_query = open(sparql_file).read()
properties_sparql_query_template = Template(properties_sparql_query + " limit $limit offset $offset ")
for rdf_property in get_sparql_results(properties_sparql_query_template, ["property"], endpoint, repository,
endpoint_type, limit):
global_properties_dict.add(rdf_property[0])
serialize(global_properties_dict, serialized_file)
return global_properties_dict
def get_classes_dict(serialized_file: str, sparql_file: str, repository: str, endpoint: str, endpoint_type: str,
limit: int = 1000) -> ResourceDictionary:
"""
Return a ResourceDictionary with the list of classes in the ontology
:param serialized_file: The file where the properties ResourceDictionary is serialized
:param sparql_file: The file containing the SPARQL query
:param repository: The repository containing the ontology
:param endpoint: The SPARQL endpoint
:param endpoint_type: GRAPHDB or VIRTUOSO (to change the way the endpoint is called)
:param limit: The sparql query limit
:return: A ResourceDictionary with the list of classes in the ontology
"""
classes_dictionary = deserialize(serialized_file)
if classes_dictionary:
return classes_dictionary
classes_dictionary = ResourceDictionary()
classes_sparql_query = open(sparql_file).read()
classes_sparql_query_template = Template(classes_sparql_query + " limit $limit offset $offset ")
for class_uri in get_sparql_results(classes_sparql_query_template, ["class"], endpoint, repository,
endpoint_type, limit):
classes_dictionary.add(class_uri[0])
serialize(classes_dictionary, serialized_file)
return classes_dictionary
def get_properties_groups(serialized_file: str, sparql_file: str, repository: str, endpoint: str, endpoint_type: str,
properties_dict: ResourceDictionary,
limit: int = 1000) -> Dict:
"""
Return a dictionary containing the group ids for each property in the ontology (The group ids are determined via connected components)
:param serialized_file: The file where the properties ResourceDictionary is serialized
:param sparql_file: The file containing the SPARQL query
:param repository: The repository containing the ontology
:param endpoint: The SPARQL endpoint
:param endpoint_type: GRAPHDB or VIRTUOSO (to change the way the endpoint is called)
:param properties_dict: The ResourceDictionary containing the properties of the ontology
:param limit: The sparql query limit
:return: A dictionary containing the group ids for each property
"""
if os.path.isfile(serialized_file):
properties_groups = deserialize(serialized_file)
return properties_groups
encoding_dir = os.path.dirname(serialized_file)
if not os.path.exists(encoding_dir):
os.makedirs(encoding_dir)
sub_properties_dict = {}
get_sub_properties_query = open(sparql_file).read()
get_sub_properties_query_template = Template(get_sub_properties_query + " limit $limit offset $offset ")
for (property1, property2) in get_sparql_results(get_sub_properties_query_template, ["property1", "property2"],
endpoint, repository,
endpoint_type, limit):
if property2 not in sub_properties_dict:
sub_properties_dict[property2] = []
sub_properties_dict[property2].append(property1)
G = nx.Graph()
for property1 in sub_properties_dict:
for property2 in sub_properties_dict[property1]:
G.add_edge(property1, property2)
for property_uri in properties_dict:
G.add_node(property_uri)
properties_connected_components = {}
index = 0
for c in nx.connected_components(G):
for p in c:
properties_connected_components[p] = index
index += 1
serialize(properties_connected_components, serialized_file)
return properties_connected_components
def get_sparql_results(query_template, variables, endpoint, repository, endpoint_type, limit=1000):
more_results = True
offset = 0
try:
while more_results:
sparql_query = query_template.substitute(offset=str(offset), limit=limit)
if endpoint_type == "GRAPHDB":
sparql_results = graphdb_query(sparql_query, repository, endpoint)
else:
sparql_results = sparqlQuery(sparql_query, endpoint)
if len(sparql_results) < limit:
more_results = False
for result in sparql_results:
yield [URIRef(result[variable]['value']) for variable in variables]
offset += limit
except:
logging.error(
"SPARQL query error. Please make sure the ontology is loaded in repository %s at %s or change config",
repository, endpoint)
exit()
def sparqlQuery(query, baseURL, format="application/json"):
params = {
"default-graph": "",
"should-sponge": "soft",
"query": query,
"debug": "on",
"timeout": "",
"format": format,
"save": "display",
"fname": ""
}
querypart = urllib.parse.urlencode(params).encode("utf-8")
response = urllib.request.urlopen(baseURL, querypart).read()
json_response = json.loads(response)
return json_response['results']['bindings']
def graphdb_query(query, repository, baseURL="http://localhost:7200/repositories/", limit=1000, debug=False, offset=0,
infer=True, sameAs=True,
verbatim=False):
headers = {'Accept': 'application/json,*/*;q=0.9',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'X-Requested-With': 'XMLHttpRequest',
'X-GraphDB-Repository': repository}
params = {
"infer": infer,
"offset": offset,
"query": query,
"limit": limit,
"sameAs": sameAs,
}
response = requests.post(baseURL + repository, headers=headers, params=params)
if debug:
print(response.text)
if verbatim:
return response.text
json_response = json.loads(response.text)
return json_response['results']['bindings']
def graphdb_ask(query, repository, baseURL="http://localhost:7200/repositories/", infer=True, sameAs=False):
headers = {'Accept': 'application/sparql-results+json,*/*;q=0.9',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'X-Requested-With': 'XMLHttpRequest',
'X-GraphDB-Repository': repository}
params = {
"infer": infer,
"query": query,
"sameAs": sameAs,
}
response = requests.post(baseURL + repository, headers=headers, params=params)
print(response)
print(response.text)
json_response = json.loads(response.text)
return bool(json_response['boolean'])
|
991,020 | 332f2fca01d224a329f6ab8f6e0a4e8895f142a2 | import cx_Freeze
target = cx_Freeze.Executable(
script="JUJUJUL.py",
base = "Win32GUI",
icon=r"E:\putting 1s and 0s in the right order\Le J\Google Chrome.ico"
)
cx_Freeze.setup(
name = "JUJUJUL",
options = {"build_exe" : {"packages": ["ctypes", "winsound"], "include_files" : ["JUJUJUL.jpg" , "Jul - On Mappelle Lovni Clip Officiel 2016.wav"] } } ,
description = "Le J",
executables = [target],
version = "6.9"
)
|
991,021 | a881a8c803ab64def311b52519308f02f26a335d | import os
from cartmigration.models.setup import Setup
file_config = 'cartmigration/etc/config.ini'
if os.path.isfile(file_config):
print("File config is exist, setup db with file config")
print("----------------------------------")
setup = Setup()
setup.run() |
991,022 | dbe5f393957540a6a1cab00a0650c3fc78f1ebf9 | __author__ = 'leif'
from ifind.search import EngineFactory
from example_utils import make_query, run_query, display_results
query_str = u'retrievability'
wiki_engine = EngineFactory('wikipedia')
query = make_query(query_str)
response = run_query(wiki_engine, query)
display_results(response) |
991,023 | 8c187d9be36a49a6203b77995e39e83f677c3b0a | import random
# to generate random value
print(random.random())
# to generate a integer randam value between two numbers
print(random.randint(10,20)) |
991,024 | 0b56a31c04740ea57ab77e9b294fb6c74dee37fb | from requests_html import HTMLSession
from bs4 import BeautifulSoup as BS
import shelve
import pandas as pd
import pprint
session = HTMLSession()
url = 'http://corganinet.com/_applications/locator_v1/view_officemap_01.cfm'
def make_dict():
emp_dict = dict()
floors = dict([('First Floor', 10025),('Second Floor', 10026),('Third Floor',10027), ('Corgan East', 10019)])
for v in floors.values():
params = {'MAPID': v, 'EMID': '00015'}
r = session.get(url, params=params)
soup = BS(r.text, 'lxml')
for emp in soup.find_all(attrs={'class': 'color_48'}):
name, seat = emp['title'].split(' | ')[::-1]
name = ' '.join(name.split(', ')[::-1])
emp_dict[name] = seat
return emp_dict
def get_team(row):
if row['Sector'] == 'Critical Facilities':
if row['Subsector'] == 'Red Studio':
return 'CF Red'
return 'CF Blue'
if row['Sector'] == 'Shared Services':
return row['Subsector']
return row['Sector']
if __name__ == '__main__':
#file = 'emp_list.csv'
#df = pd.read_csv(file)
emp_dict = make_dict()
pprint.pprint(emp_dict)
"""
with shelve.open(EnterName) as emps:
for row in [x[1] for x in df.iterrows()]:
try:
name = ' '.join(row['Name'].split(', ')[::-1])
emps[name] = {'seat': emp_dict[name], 'team': get_team(row)}
except KeyError as k:
continue
""" |
991,025 | 79aa4f67b3c8c625398368fd0736c3534ddcad45 | ### XLSX PARSER ###
from bisect import insort
from openpyxl import load_workbook
from typing import List, Dict, Iterable
import re
def list_column(spreadsheet: str, column: str) -> List[str]:
## Takes a path to an xlsx and a column and returns a list
wb = load_workbook(spreadsheet)
worksheet = wb.active
return worksheet[column]
def normalize_list(names: List[str]) -> List[str]:
## Takes a List of full names and normalizes list ##
# Tokens to remove absolutely
clean = str.maketrans(dict.fromkeys("(),_."))
# Remove any Names containing Test
# Remove any (<WORD>)
# [A-Z]{2,} : Remove any caps words of two or more chars (i.e., MD,PHD,etc)
# \b[A-Z]?-\S+ : Remove '-' and any following word
# (\s)(?=\s) : Remove double whitespaces
normalized: List[str] = [re.sub(
r'(Test\s.*)|(\sHw)|(O\')|(^St\s)|[A-Z]{2,}|\b[A-Z]?-\S+|(\s)(?=\s)',
'',
str(n.value).translate(clean)
).strip() for n in names]
return normalized
def split_list(names: List[str]) -> List[List[str]]:
# Takes a list of names and returns a list of each name in a full name
# If no middle name is given, create a NULL value
normal = normalize_list(names[1:])
split_names = list(
sub.append('NULL') or sub if len(sub) == 2
else sub
for sub in list(re.split(r'\s', name) for name in normal))
# Filter out blank elements
split_names = [list(filter(None,name)) for name in split_names]
return split_names
def stringify(m_list: List[str]) -> List[str]:
str_list: List[str] = [re.sub(
r'(CACTR\S+)|(FBCMD)|(OBNP)|(OBPA)|(None)',
'',
str(e.value)).strip() for e in m_list[1:]]
return list(filter(None,str_list))
|
991,026 | 66301785698cd29216cdcaf7d0b6db6677c00fda | import unittest
class SegmentTee:
def __init__(self, input_arr, query_func, no_overlap_default_value):
"""
:param input_arr: an array that is an object of the query
:param query_func: a lambda function that accept two arguments amd returns a number
:param no_overlap_default_value: the default return value when the query interval
does not overlap the interval from the tree
"""
self.__input_size = len(input_arr)
self.__query_func = query_func
self.no_overlap_value = no_overlap_default_value
self.seg_tree = self.__class__._create_empty_segment_tree(self.__input_size)
self._build_segment_tree(input_arr, 0, len(input_arr) - 1, 0, query_func)
def range_query(self, start_index, end_index):
""" Apply a query in a given range of the SegmentTree
:param start_index: the start index of the query
:param end_index: the end index of the query
:return: the result of the query (i.e. max, min, or sum of the segment elements)
"""
return self._range_query(query_lo=start_index, query_hi=end_index, lo=0, hi=self.__input_size - 1, pos=0)
def _range_query(self, query_lo, query_hi, lo, hi, pos):
""" Apply a query in a given range of the SegmentTree
:param query_lo: the lower index of the query
:param query_hi: the higher index of the query
:param lo: the current lower index
:param hi: the current higher index
:param pos: the current position in the SegmentTree
:return: the result of the query
"""
if query_lo <= lo and query_hi >= hi:
return self.seg_tree[pos]
if query_lo > hi or query_hi < lo:
return self.no_overlap_value
mid = int((lo + hi)/2)
left = self._range_query(query_lo, query_hi, lo, mid, 2 * pos + 1)
right = self._range_query(query_lo, query_hi, mid + 1, hi, 2 * pos + 2)
return self.__query_func(left, right)
def _build_segment_tree(self, input_list, lo, hi, pos, query_func):
""" Builds a SegmentTree with recursion
:param input_list: an array that is an object of the query
:param lo: the lower bound of the current interval
:param hi: the higher bound of the current interval
:param pos: the current position in the segment tree array
:param query_func: a lambda function that accept two arguments amd returns an integer value
"""
if lo == hi:
self.seg_tree[pos] = input_list[lo]
return
mid = int((lo + hi) / 2)
self._build_segment_tree(input_list, lo, mid, 2 * pos + 1, query_func)
self._build_segment_tree(input_list, mid + 1, hi, 2 * pos + 2, query_func)
self.seg_tree[pos] = query_func(self.seg_tree[2 * pos + 1], self.seg_tree[2 * pos + 2])
@staticmethod
def _create_empty_segment_tree(size):
""" Creates an empty segment tree
:param size: the size of the initial list
:return: a new list with size = 2 * (next power of 2) - 1, i. e
if size = 4 (the next power of 2 is the number itself) the new length is 2*2^2 - 1 = 7,
if size = 7 (the next power of two is 8) the new length is 3*2^3 - 1 = 15
"""
import math
next_pow_of_two = math.ceil(math.log(size, 2))
new_size = 2 * math.pow(2, next_pow_of_two) - 1
return [0] * int(new_size)
class TestSegmentTree(unittest.TestCase):
def test_min_query(self):
min_st = SegmentTee(
[-1, 2, 4, 0],
query_func=lambda x, y: min(x, y),
no_overlap_default_value=float('inf'))
self.assertEqual(min_st.range_query(1, 3), 0)
self.assertEqual(min_st.range_query(0, 3), -1)
self.assertEqual(min_st.range_query(0, 2), -1)
self.assertEqual(min_st.range_query(1, 2), 2)
self.assertEqual(min_st.range_query(2, 2), 4)
def test_max_query(self):
max_st = SegmentTee(
[-1, 2, 4, 0],
query_func=lambda x, y: max(x, y),
no_overlap_default_value=float('-inf'))
self.assertEqual(max_st.range_query(1, 3), 4)
self.assertEqual(max_st.range_query(0, 3), 4)
self.assertEqual(max_st.range_query(0, 2), 4)
self.assertEqual(max_st.range_query(1, 2), 4)
self.assertEqual(max_st.range_query(2, 2), 4)
self.assertEqual(max_st.range_query(0, 1), 2)
self.assertEqual(max_st.range_query(3, 3), 0)
def test_sum_query(self):
sum_st = SegmentTee(
[1, 3, 5, 7, 9, 11],
query_func=lambda x, y: x + y,
no_overlap_default_value=0)
self.assertEqual(sum_st.range_query(1, 4), 24)
self.assertEqual(sum_st.range_query(0, 2), 9)
self.assertEqual(sum_st.range_query(3, 5), 27)
self.assertEqual(sum_st.range_query(2, 4), 21)
self.assertEqual(sum_st.range_query(0, 5), 36)
self.assertEqual(sum_st.range_query(3, 3), 7)
if __name__ == "__main__":
unittest.main()
|
991,027 | fac71d576dc9739bc24295a8e7f8dc1dcb5ed0d0 | #!/usr/bin/python
import os
import sys
import numpy as np
import logging
from create_dataset import genererate_dataframe
from preprocess_data import Preprocces
import pickle
from sklearn.model_selection import train_test_split
from keras.preprocessing.text import Tokenizer
from keras.layers import Embedding
from keras import layers
from keras import Input
from keras import Model
from keras.callbacks import ModelCheckpoint, EarlyStopping
import warnings
warnings.filterwarnings("ignore")
sys.path.append(os.path.realpath('../'))
sys.path.append(os.path.realpath('../../'))
logging.basicConfig(level=logging.INFO,
format='%(asctime)s[%(name)s][%(levelname)s] %(message)s',
datefmt='[%Y-%m-%d][%H:%M:%S]')
logger = logging.getLogger(__name__)
MAX_SEQUENCE_LENGTH = 250
EMBEDDING_DIM = 100
class Train:
def __init__(self, path):
self.data_path = path
self.path_models = "../../models/"
logger.info("Initialized Class")
def train_model(self):
path = self.data_path
df = genererate_dataframe(path)
preprocces = Preprocces(df)
df = preprocces.clean_dataframe()
stats_words = preprocces.stast_df(df)
MAX_NB_WORDS = len(stats_words['unique_words'])
tokenizer = Tokenizer(nb_words=MAX_NB_WORDS)
tokenizer.fit_on_texts(df.Cleaned_text)
word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index))
X_train_cnn = pickle.load(open(self.path_models+'X_train_cnn.pickle', 'rb'))
X_test_cnn = pickle.load(open(self.path_models+'X_test_cnn.pickle', 'rb'))
Y_train_cnn = pickle.load(open(self.path_models+'Y_train_cnn.pickle', 'rb'))
Y_test_cnn = pickle.load(open(self.path_models+'Y_test_cnn.pickle', 'rb'))
logger.info('Shape of X_train: ' + str(X_train_cnn.shape))
logger.info('Shape of X_test :' + str(X_test_cnn.shape))
embeddings_index = {}
f = open('../../glove/glove.6B.100d.txt', encoding='ISO-8859-1')
for line in f:
try:
values = line.split()
word = values[0]
coefs = np.array(values[1:], dtype='float32')
embeddings_index[word] = coefs
except:
pass
f.close()
logger.info('Found %s word vectors.' % len(embeddings_index))
embedding_matrix = np.zeros((len(word_index) + 1, EMBEDDING_DIM))
for word, i in word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
embedding_layer = Embedding(
len(word_index) + 1,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False
)
pat = 5
self.early_stopping = EarlyStopping(monitor='val_loss', patience=pat, verbose=1)
self.model_checkpoint_cnn = ModelCheckpoint('../../models/model.h5', verbose=1, save_best_only=True)
sequence_input = Input(shape=(None,), dtype="int64")
embedded_sequences = embedding_layer(sequence_input)
x = layers.Conv1D(128, 5, activation="relu")(embedded_sequences)
x = layers.MaxPooling1D(5)(x)
x = layers.Dropout(0.5)(x)
x = layers.Conv1D(128, 5, activation="relu")(x)
x = layers.MaxPooling1D(5)(x)
x = layers.Dropout(0.5)(x)
x = layers.Conv1D(128, 5, activation="relu")(x)
x = layers.GlobalMaxPooling1D()(x)
x = layers.Dense(128, activation="relu")(x)
x = layers.Dropout(0.5)(x)
preds = layers.Dense(len(Y_train_cnn[0]), activation="softmax")(x)
self.model_cnn = Model(sequence_input, preds)
self.model_cnn.compile(
loss='categorical_crossentropy',
optimizer='adam',
metrics=['acc']
)
n_folds = 3
epochs = 20
batch_size = 128
# save the model history in a list after fitting
model_history_cnn = []
for i in range(n_folds):
print("Training on Fold: ", i+1)
t_x, val_x, t_y, val_y = train_test_split(
X_train_cnn, Y_train_cnn, test_size=0.1,
random_state=np.random.randint(1, 1000, 1)[0])
model_history_cnn.append(self.fit_and_evaluate_cnn(t_x, val_x, t_y, val_y, epochs, batch_size))
print("======="*12, end="\n\n\n")
logger.info('Training CNN model')
logger.info("The model has been trained")
logger.info("Saving model ")
logger.info(f"Add this path for the next step classify: ../../models/model.h5")
return True
def fit_and_evaluate_cnn(self, t_x, val_x, t_y, val_y, EPOCHS=20, BATCH_SIZE=128):
model = None
model = self.model_cnn
results = model.fit(t_x, t_y, epochs=EPOCHS, batch_size=BATCH_SIZE,
callbacks=[self.early_stopping, self.model_checkpoint_cnn],
verbose=1, validation_split=0.1)
print("Val Score: ", model.evaluate(val_x, val_y))
return results
if __name__ == "__main__":
logger.info(sys.argv[1])
training_model = Train(sys.argv[1])
training_model.train_model()
|
991,028 | c0c9dec78f0bae58898c1685f7a1e76f2dfbebbc | import copy
import math
import os
from typing import Dict, Type, Union, Tuple, Optional
import numpy as np
import pandas as pd
import torch
import torch.nn.functional as F
from torch.autograd import Variable
from torch.optim.lr_scheduler import StepLR
import source.evaluation_utils as eval_utils
from source.data_handlers import BaseDataLoader, FastDataLoader
from source.models import BaseModel
from source.optimizer_utils import get_optimizer
from source.similarity_utils import get_similarity_function
from source.weighting_utils import get_weighting_update_function
class BaseParticipant:
def __init__(
self,
index,
name,
config,
device,
dataset_loader: Union[BaseDataLoader, FastDataLoader],
model_class: Type[BaseModel],
model_init_params: dict,
class_weights: Optional[dict],
):
self.accept_update = True
self.index = index
self.name = name
self.config = config
self.device = device
self.dataset_loader = dataset_loader
self._model = model_class(**model_init_params).to(self.device)
self.model_state_beginning = {}
self._optimizer = get_optimizer(self.config, self._model.parameters())
self._lr_scheduler = StepLR(
self._optimizer,
int(self.config["n_rounds"] / self.config["decay_steps"]),
self.config["decay_mult"],
)
self.delta = {}
self.delta_flat = torch.Tensor()
self._class_weights = None
self._logging_stats = pd.DataFrame(
columns=["name", "round", "n_samples"] + self.config["metrics"]
)
# set number of batches to length of dataset loader (= full epoch) or limit
self.n_batches = len(self.dataset_loader.train_loader)
if "n_batch_limit" in self.config and self.config["n_batch_limit"] is not None:
# no looping (i.e. max 1 epoch)
self.n_batches = min(self.n_batches, int(self.config["n_batch_limit"]))
# allowed to loop
# self.n_batches = int(self.config["n_batch_limit"])
self._class_weights = torch.tensor(class_weights, dtype=torch.float).to(
self.device, non_blocking=True
)
self._init_auxilliary_functions()
def _init_auxilliary_functions(self):
self._weight_update_function = get_weighting_update_function(self.config)
self._similarity_function = get_similarity_function(self.config)
def get_similarity(self, other_participant):
return self._similarity_function(self, other_participant)
def _local_model_batch_step(
self, data, target, running_stats,
):
data, target = (
data.to(self.device, non_blocking=True),
target.to(self.device, non_blocking=True),
)
self._optimizer.zero_grad()
output = self._model(data)
pred_loss = F.nll_loss(
output, target, weight=self._class_weights, reduction="sum",
)
reg_loss = self._get_regularization_loss()
loss = pred_loss + reg_loss
loss.backward()
self._optimizer.step()
output = output.detach()
(train_loss, train_targets, train_predictions, train_probas) = running_stats
train_loss += loss
train_targets = torch.cat([train_targets, target])
train_predictions = torch.cat([train_predictions, output.argmax(dim=1)], dim=0)
train_probas = torch.cat(
[train_probas, torch.nn.functional.softmax(output, dim=1)], dim=0
)
return (train_loss, train_targets, train_predictions, train_probas)
def _get_running_stats(self) -> Tuple:
train_loss = 0
train_targets = torch.Tensor().to(self.device, non_blocking=True)
train_predictions = torch.Tensor().to(self.device, non_blocking=True)
train_probas = torch.Tensor().to(self.device, non_blocking=True)
return (train_loss, train_targets, train_predictions, train_probas)
def local_train_step(
self, round_n, n_epochs: int = 1,
):
self.model_state_beginning = copy.deepcopy(self._model.state_dict())
self._model.train()
running_stats = self._get_running_stats()
for e in range(n_epochs):
for batch_idx in range(self.n_batches):
data, target = next(iter(self.dataset_loader.train_loader))
running_stats = self._local_model_batch_step(
data, target, running_stats
)
self._lr_scheduler.step()
self._compute_delta()
return self._evaluate(running_stats)
def _compute_delta(self):
self.delta = {}
self.delta_flat = torch.Tensor().to(self.device)
for key in self._model.state_dict().keys():
self.delta[key] = torch.sub(
self._model.state_dict()[key], self.model_state_beginning[key]
)
self.delta_flat = torch.cat([self.delta_flat, self.delta[key].flatten()])
def local_evaluate_step(self, round_n, loader=None):
if loader is None:
loader = self.dataset_loader.test_loader
(
test_loss,
test_targets,
test_predictions,
test_probas,
) = self._get_running_stats()
self._model.eval()
with torch.no_grad():
for data, target in loader:
data, target = (
data.to(self.device, non_blocking=True),
target.to(self.device, non_blocking=True),
)
output = self._model(data)
loss = F.nll_loss(
output, target, weight=self._class_weights, reduction="sum",
)
test_loss += loss
test_targets = torch.cat([test_targets, target])
test_predictions = torch.cat(
[test_predictions, output.argmax(dim=1)], dim=0
)
test_probas = torch.cat(
[test_probas, torch.nn.functional.softmax(output, dim=1)], dim=0
)
eval_stats = self._evaluate(
(test_loss, test_targets, test_predictions, test_probas)
)
self._write_eval_stats(round_n, eval_stats)
return eval_stats
def set_local_model(self, new_state: Dict[str, torch.Tensor], round_i: int = 0):
self._model.load_state_dict(new_state)
def get_local_model(self) -> Dict[str, torch.Tensor]:
return self._model.state_dict()
def get_model_at_beginning(self) -> Dict[str, torch.Tensor]:
return self.model_state_beginning
def get_updated_mixing_matrix(self, current_weights, participants):
new_weights = self._weight_update_function(self, current_weights, participants)
if (new_weights[self.index]) > (3 * (1 / len(participants))):
self.accept_update = False
return new_weights
def _evaluate(self, running_stats):
(loss_sum, target, predictions, probas) = running_stats
true = target.cpu().numpy()
pred = predictions.cpu().numpy()
pred_probas = probas.cpu().numpy()
metrics = []
for metric in self.config["metrics"]:
if metric == "loss":
metrics.append(loss_sum.cpu().detach().numpy() / len(target))
else:
metrics.append(eval_utils.get_metric(metric, true, pred, pred_probas))
return metrics
def _get_regularization_loss(self):
return 0.0
def _write_eval_stats(self, round_n: int, stats: list):
row = [self.name, round_n, self.dataset_loader.test_loader.n_samples] + stats
self._logging_stats.loc[round_n] = row
log_path = self.config["logdir"] + "logs/"
if round_n <= 1:
os.makedirs(log_path, exist_ok=True)
if (round_n + 1) % 5 == 0:
self._logging_stats.to_csv(
log_path + str(self.name) + "-log.csv", index=False, sep=";",
)
class SplitParticipant(BaseParticipant):
def __init__(self, **args):
super(SplitParticipant, self).__init__(**args)
assert self.config["mixing_init"] != "local"
assert self.config["process"] == "base"
self._personalized_delta = {}
self._sharable_param_keys = [
x
for x in self._model.state_dict().keys()
if x not in self.config["local_layers"]
]
self.personalized_update_start_round = self.config[
"personalized_update_start_round"
]
if self.personalized_update_start_round < 1.0:
# is a fraction of total rounds
self.personalized_update_start_round = int(
self.personalized_update_start_round * self.config["n_rounds"]
)
assert self.config["process"] == "base"
assert self.config["weight_update_method"] == "fixed"
def get_local_model(self) -> Dict[str, torch.Tensor]:
sharable_params = {}
for key in self._sharable_param_keys:
sharable_params[key] = self._model.state_dict()[key]
return sharable_params
def get_model_at_beginning(self) -> Dict[str, torch.Tensor]:
sharable_params = {}
for key in self._sharable_param_keys:
sharable_params[key] = self.model_state_beginning[key]
return sharable_params
def set_local_model(self, new_state: Dict[str, torch.Tensor], round_i: int = 0):
combined_state = {}
for key in self._model.state_dict().keys():
if key in self._sharable_param_keys:
combined_state[key] = new_state[key]
else:
if (
round_i > self.personalized_update_start_round
and key in self._personalized_delta
):
combined_state[key] = self.model_state_beginning[key] + (
self._personalized_delta[key]
)
else:
if key in self.model_state_beginning.keys():
combined_state[key] = self.model_state_beginning[key]
else:
combined_state[key] = self._model.state_dict()[key]
self._model.load_state_dict(combined_state)
def _compute_delta(self):
self.delta = {}
self.delta_flat = torch.Tensor().to(self.device)
self._personalized_delta = {}
for key in self._model.state_dict().keys():
if key in self._sharable_param_keys:
self.delta[key] = torch.sub(
self._model.state_dict()[key], self.model_state_beginning[key]
)
self.delta_flat = torch.cat(
[self.delta_flat, self.delta[key].flatten()]
)
else:
self._personalized_delta[key] = torch.sub(
self._model.state_dict()[key], self.model_state_beginning[key]
)
def _get_regularization_loss(self):
if self.config["reg_multiplier"] == 0.0:
return 0.0
def _custom(w: torch.Tensor, exponent=4, stretch=10, increase=2):
w = w.abs().clip(0.001)
l = (
(1 / (w))
- torch.pow(1 / (w * stretch), exponent)
- 1
+ increase * (w - 1)
) / stretch
return torch.nansum(torch.clip(l, min=0.0, max=1.0)) / torch.numel(
l
) # torch.mean(torch.clip(l, min=0.0, max=1.0))
def _gamma(w: torch.Tensor, a=2.0, b=2.0, stretch=4.0):
assert a >= 2.0
gamma = (
np.power(b, a)
* torch.pow(w, (a - 1))
* torch.exp(-b * stretch * w)
* stretch
)
return torch.mean(torch.clip(gamma, min=0.0, max=1.0))
reg = Variable(torch.zeros(1, dtype=torch.float), requires_grad=True).to(
self.device
)
for w in self._model.named_parameters():
if self.config["reg_type"] == "norm":
if w[0] not in self._sharable_param_keys:
if w[0].endswith("_w"):
reg = reg + (w[1].abs() - 1).norm(
p=self.config["weight_reg_norm"]
)
else:
reg = reg + (w[1].abs()).norm(p=self.config["weight_reg_norm"])
elif self.config["reg_type"] == "gamma":
if w[0] not in self._sharable_param_keys:
if w[0].endswith("_w"):
reg = reg + _gamma((w[1].abs() - 1))
else:
reg = reg + _gamma(w[1].abs())
else:
if w[0] not in self._sharable_param_keys:
if w[0].endswith("_w"):
reg = reg + _custom((w[1].abs() - 1))
else:
reg = reg + _custom(w[1].abs())
return self.config["reg_multiplier"] * reg.clip(0.0)
class APFLParticipant(BaseParticipant):
def __init__(self, model_class: Type[BaseModel], model_init_params: dict, **args):
super(APFLParticipant, self).__init__(
model_class=model_class, model_init_params=model_init_params, **args
)
assert self.config["optimizer"] == "SGD"
assert self.config["process"] == "apfl"
assert self.config["reg_multiplier"] == 0.0
assert self.config["mixing_init"] != "local"
# mixing parameter (low = local)
self.alpha = 0.5
# local model and optimizer (v)
self.personalized_model_state_beginning = {}
self._personalized_model = model_class(**model_init_params).to(self.device)
self._personalized_optimizer = get_optimizer(
self.config, self._personalized_model.parameters()
)
self._personalized_lr_scheduler = StepLR(
self._personalized_optimizer,
int(self.config["n_rounds"] / self.config["decay_steps"]),
self.config["decay_mult"],
)
self.personalized_delta = {}
self.personalized_delta_flat = torch.Tensor()
# global model parameters:
self.global_model_params = copy.deepcopy(self._model.state_dict())
self.personalized_model_params = copy.deepcopy(
self._personalized_model.state_dict()
)
self.mixed_model_params = copy.deepcopy(self._compute_mixed_model())
def local_train_step(
self, round_n, n_epochs: int = 1,
):
# set the models
self._model.load_state_dict(self.global_model_params)
self._personalized_model.load_state_dict(self.personalized_model_params)
# copy beginning states
self.model_state_beginning = copy.deepcopy(self._model.state_dict())
self.personalized_model_state_beginning = copy.deepcopy(
self._personalized_model.state_dict()
)
running_stats = self._get_running_stats()
# training of global and local model
for e in range(n_epochs):
for batch_idx in range(self.n_batches):
data, target = next(iter(self.dataset_loader.train_loader))
# global update step
self._model.train()
running_stats = self._local_model_batch_step(
data, target, running_stats
)
self._model.eval()
# personalized model update
self._personalized_model.train()
self._batch_step_apfl_local(
data, target,
)
self._personalized_model.eval()
self._lr_scheduler.step()
self._personalized_lr_scheduler.step()
# set global and local model to current state
self.global_model_params = copy.deepcopy(self._model.state_dict())
self.personalized_model_params = copy.deepcopy(
self._personalized_model.state_dict()
)
self._compute_delta()
self.alpha = self._alpha_update()
# compute and load the mixed model for evaluation
self.mixed_model_params = self._compute_mixed_model()
self._model.load_state_dict(self.mixed_model_params)
return self._evaluate(running_stats)
def _alpha_update(self):
lr = self.config["optimizer_params"]["lr"]
new_alpha = self.alpha - lr * (
torch.dot(self.model_dif_flat, self.mixed_grad_flat)
)
return new_alpha.clip(0, 1)
def _compute_delta(self):
# deltas
self.delta = {}
self.delta_flat = torch.Tensor().to(self.device)
self.personalized_delta = {}
self.personalized_delta_flat = torch.Tensor().to(self.device)
# custom
self.model_dif_flat = torch.Tensor().to(self.device)
for key in self._model.state_dict().keys():
# for the shared model
self.delta[key] = torch.sub(
self._model.state_dict()[key], self.model_state_beginning[key]
)
self.delta_flat = torch.cat([self.delta_flat, self.delta[key].flatten()])
# for the personalized model
self.personalized_delta[key] = torch.sub(
self._personalized_model.state_dict()[key],
self.personalized_model_state_beginning[key],
)
self.personalized_delta_flat = torch.cat(
[self.personalized_delta_flat, self.personalized_delta[key].flatten()]
)
# model difference
self.model_dif_flat = torch.cat(
[
self.model_dif_flat,
self.personalized_model_state_beginning[key].flatten()
- self.model_state_beginning[key].flatten(),
]
)
# gradient of mixed model (negative scaled delta of personalized/local model)
self.mixed_grad_flat = (
-self.personalized_delta_flat / self.config["optimizer_params"]["lr"]
)
def set_local_model(self, new_state: Dict[str, torch.Tensor], round_i: int = 0):
self.global_model_params = new_state
def _compute_mixed_model(self):
mixed_model = {}
w = self._model.state_dict()
v = self._personalized_model.state_dict()
for key in w.keys():
mixed_model[key] = self.alpha * v[key] + (1 - self.alpha) * w[key]
return mixed_model
def _batch_step_apfl_local(
self, data, target,
):
data, target = (
data.to(self.device, non_blocking=True),
target.to(self.device, non_blocking=True),
)
self._optimizer.zero_grad()
self._personalized_optimizer.zero_grad()
output_global = self._model(data)
output_personalized = self._personalized_model(data)
output = self.alpha * output_personalized + (1 - self.alpha) * output_global
loss = F.nll_loss(output, target, weight=self._class_weights, reduction="sum",)
loss.backward()
self._personalized_optimizer.step()
return True
|
991,029 | 7903be6fc31dad03c65199597f33d9ee931e402a | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import podium_api
from podium_api.asyncreq import get_json_header_token, make_request_custom_success
from podium_api.types.racestat import get_racestat_from_json
from podium_api.types.redirect import get_redirect_from_json
def make_racestat_get(
token,
endpoint,
expand=False,
quiet=None,
success_callback=None,
failure_callback=None,
progress_callback=None,
redirect_callback=None,
):
"""
Request that returns a Racestat that represents a specific
racestat found at the URI.
Args:
token (PodiumToken): The authentication token for this session.
Kwargs:
expand (bool): Expand all objects in response output. Defaults to False
quiet (object): If not None HTML layout will not render endpoint
description. Defaults to None.
success_callback (function): Callback for a successful request,
will have the signature:
on_success(account (PodiumAccount))
Defaults to None.
failure_callback (function): Callback for redirects, failures, and
errors. Will have the signature:
on_failure(failure_type (string), result (dict), data (dict))
Values for failure type are: 'error', 'redirect', 'failure'. Defaults
to None.
progress_callback (function): Callback for progress updates,
will have the signature:
on_progress(current_size (int), total_size (int), data (dict))
Defaults to None.
redirect_callback (function): Callback for redirect,
Will have the signature:
on_redirect(result (dict), data (dict))
Defaults to None.
Return:
UrlRequest: The request being made.
"""
params = {"expand": expand}
if quiet is not None:
params["quiet"] = quiet
header = get_json_header_token(token)
return make_request_custom_success(
endpoint,
racestat_success_handler,
method="GET",
success_callback=success_callback,
failure_callback=failure_callback,
progress_callback=progress_callback,
redirect_callback=redirect_callback,
params=params,
header=header,
)
def make_racestats_create(
token,
event_id,
racestats,
success_callback=None,
failure_callback=None,
progress_callback=None,
redirect_callback=None,
):
"""
add a collection of racestats to the specified event id
Args:
token (PodiumToken): The authentication token for this session
event_id: The id of the event to apply racestats
"""
endpoint = "{}/api/v1/events/{}/racestats".format(podium_api.PODIUM_APP.podium_url, event_id)
index = 0
body = {}
for racestat in racestats:
body[f"racestat[{index}][device_id]"] = racestat["device_id"]
body[f"racestat[{index}][comp_number]"] = racestat["comp_number"]
body[f"racestat[{index}][comp_class]"] = racestat["comp_class"]
body[f"racestat[{index}][total_laps]"] = racestat["total_laps"]
body[f"racestat[{index}][last_lap_time]"] = racestat["last_lap_time"]
body[f"racestat[{index}][position_overall]"] = racestat["position_overall"]
body[f"racestat[{index}][position_in_class]"] = racestat["position_in_class"]
body[f"racestat[{index}][comp_number_ahead]"] = racestat["comp_number_ahead"]
body[f"racestat[{index}][comp_number_behind]"] = racestat["comp_number_behind"]
body[f"racestat[{index}][gap_to_ahead]"] = racestat["gap_to_ahead"]
body[f"racestat[{index}][gap_to_behind]"] = racestat["gap_to_behind"]
body[f"racestat[{index}][laps_to_ahead]"] = racestat["laps_to_ahead"]
body[f"racestat[{index}][laps_to_behind]"] = racestat["laps_to_behind"]
body[f"racestat[{index}][fc_flag]"] = racestat["fc_flag"]
body[f"racestat[{index}][comp_flag]"] = racestat["comp_flag"]
index += 1
header = get_json_header_token(token)
return make_request_custom_success(
endpoint,
None,
method="POST",
success_callback=success_callback,
redirect_callback=create_racestat_redirect_handler,
failure_callback=failure_callback,
progress_callback=progress_callback,
body=body,
header=header,
data={"_redirect_callback": redirect_callback},
)
def make_racestat_create(
token,
event_id,
device_id,
comp_number,
comp_class,
total_laps,
last_lap_time,
position_overall,
position_in_class,
comp_number_ahead,
comp_number_behind,
gap_to_ahead,
gap_to_behind,
laps_to_ahead,
laps_to_behind,
fc_flag,
comp_flag,
success_callback=None,
failure_callback=None,
progress_callback=None,
redirect_callback=None,
):
"""
add a racestat for the specified event_id / device_id
The uri for the newly created racestat will be provided to the
redirect_callback if one is provided in the form of a PodiumRedirect.
Args:
token (PodiumToken): The authentication token for this session.
Kwargs:
success_callback (function): Callback for a successful request,
will have the signature:
on_success(result (dict), data (dict))
Defaults to None..
failure_callback (function): Callback for failures and errors.
Will have the signature:
on_failure(failure_type (string), result (dict), data (dict))
Values for failure type are: 'error', 'failure'. Defaults to None.
redirect_callback (function): Callback for redirect,
Will have the signature:
on_redirect(redirect_object (PodiumRedirect))
Defaults to None.
progress_callback (function): Callback for progress updates,
will have the signature:
on_progress(current_size (int), total_size (int), data (dict))
Defaults to None.
Return:
UrlRequest: The request being made.
"""
endpoint = "{}/api/v1/events/{}/devices/{}/racestat".format(podium_api.PODIUM_APP.podium_url, event_id, device_id)
body = {
"racestat[comp_number]": comp_number,
"racestat[comp_class]": comp_class,
"racestat[total_laps]": total_laps,
"racestat[last_lap_time]": last_lap_time,
"racestat[position_overall]": position_overall,
"racestat[position_in_class]": position_in_class,
"racestat[comp_number_ahead]": comp_number_ahead,
"racestat[comp_number_behind]": comp_number_behind,
"racestat[gap_to_ahead]": gap_to_ahead,
"racestat[gap_to_behind]": gap_to_behind,
"racestat[laps_to_ahead]": laps_to_ahead,
"racestat[laps_to_behind]": laps_to_behind,
"racestat[fc_flag]": fc_flag,
"racestat[comp_flag]": comp_flag,
}
header = get_json_header_token(token)
return make_request_custom_success(
endpoint,
None,
method="POST",
success_callback=success_callback,
redirect_callback=create_racestat_redirect_handler,
failure_callback=failure_callback,
progress_callback=progress_callback,
body=body,
header=header,
data={"_redirect_callback": redirect_callback},
)
def create_racestat_redirect_handler(req, results, data):
"""
Handles the success redirect of a **make_racestat_create** call.
Returns a PodiumRedirect with a uri for the newly created event to the
_redirect_callback found in data.
Automatically called by **make_racestat_create**, will call the
redirect_callback passed in to **make_racestat_create** if there is one.
Args:
req (UrlRequest): Instace of the request that was made.
results (dict): Dict returned by the request.
data (dict): Wildcard dict for containing data that needs to be passed
to the various callbacks of a request. Will contain at least a
'success_callback' key.
Return:
None, this function instead calls a callback.
"""
if data["_redirect_callback"] is not None:
data["_redirect_callback"](get_redirect_from_json(results, "racestat"))
def racestat_success_handler(req, results, data):
"""
Creates and returns a Racestat to the success_callback
found in data.
Called automatically by **make_account_request**.
Args:
req (UrlRequest): Instace of the request that was made.
results (dict): Dict returned by the request.
data (dict): Wildcard dict for containing data that needs to be passed
to the various callbacks of a request. Will contain at least a
'success_callback' key.
Return:
None, this function instead calls a callback.
"""
account = results["racestat"]
if account is not None:
if data["success_callback"] is not None:
data["success_callback"](get_racestat_from_json(results["racestat"]))
elif data["failure_callback"] is not None:
data["failure_callback"]("None", results, data)
|
991,030 | 54aed481c901cca120906facb87d4dea01905ae7 | WTF_CSRF_ENABLED = True
SECRET_KEY = 'changed-in-production'
|
991,031 | 42e6df731eab9360d1f0a0fcacb70c3f2af9b2a5 | #!/usr/bin/python
# Env: python3
# Rewrite by afei_0and1
'''
9ใๅฐๅ่กจไธญ็0่ฟ่กๅ็ฝฎ๏ผๅ่กจ้ๆ๏ผ
่พๅ
ฅไธไธชๆดๆฐๅ่กจ๏ผๅ่กจไธญ็ฉฟๆๅญๅ
ฅไบๅ
็ด 0ใ่ฆๆฑ็ผๅ็จๅบ๏ผๅฐๅ่กจไธญๆๆ็0ๅ็ฝฎ๏ผๅๆถไฟๆๅ
ถไปๅ
็ด ็้กบๅบไธๅใไพๅฆ๏ผ
่พๅ
ฅๅ่กจไธบ[1,3,0,2,0,5]๏ผๅ้่ฆ่ฟๅ[1,3,2,5,0,0]ใ
่งฃ้ขๆ่ทฏ๏ผ
ๆนๆณไธ๏ผๅๅปบไธไธชๆฐ็ๅ่กจ๏ผ้่ฟ้ๅๆง็ๅ่กจ๏ผๅฐๆงๅ่กจไธญ็ๅ
็ด ๆ็
งๆฐ็่งๅๆพๅ
ฅๆฐๅ่กจๅฎๆ้ๆ๏ผ่ฟ็ง้ๆๆนๅผ่ขซ็งฐไธบ
้ๅๅฐ้ๆ๏ผ
ๆนๆณไบ๏ผ้่ฟๅฏนๆงๅ่กจ็ๅ
็ด ่ฟ่กไบคๆขๅ็งปๅจๆไฝๅฎ็ฐ้ๆ๏ผ่ฟ็ง้ๆๆนๅผ่ขซ็งฐไธบๅๅฐ้ๆใๅๅฐ้ๆๅฏไปฅ่็ไธๅฟ
่ฆ็ๅ
ๅญ
็ฉบ้ด๏ผไฝๆฏ้่ฆ้่ฆ่ฟ่ก้ขๅค็ๅ
็ด ็งปๅจๆไฝ๏ผๅ ๆญคๅ
ถ็ฉบ้ดๆ็้ซไฝๆถ้ดๆ็ไฝใ
ๅจๅฎ้
ๅบ็จไธญ๏ผๆ นๆฎ้ๆฑๅฏไปฅ้ๆฉ็ฎๆณ่ฟ่ก้ๆใ
'''
#้ๅๅฐ้ๆ
def willbeListZeroPostition_1(l):
res = [] #ๅๅปบๆฐ็ๅ่กจ
zero = 0 #่ฎฐๅฝ0็ไธชๆฐ
#่ฎก็ฎ0็ไธชๆฐ
for i in l:
if i == 0:
zero += 1
else:
res.append(i)
#่ฎก็ฎๅบ0็ไธชๆฐๅๆฐๅ่กจๆซๅฐพ่กฅ0
res += [0] * zero
return res
print(willbeListZeroPostition_1([1,3,0,2,0,5]))
'''
Output result๏ผ
[1, 3, 2, 5, 0, 0]
'''
#ๅๅฐ้ๆ
def willbeListZeroPostition_2(l):
#ไธๆ ่ฟ่กๅพช็ฏ
for i in range(len(l) - 1):
item = l[i]
if item == 0:
for j in range(i+1, len(l)):
#่ฟ่กไบคๆข
if l[j] != 0:
tmp = l[j]
l[j] = 0
l[i] = tmp
break #ๅฎๆไบคๆข๏ผๅพช็ฏ็ปๆญข
return l
print(willbeListZeroPostition_2([1,3,0,2,0,5]))
'''
Output result๏ผ
[1, 3, 2, 5, 0, 0]
''' |
991,032 | 7393a02fb05cd4b9eea564c5f08f8db28debcd79 | """
connection to database
"""
import datetime
import logging
import os
import sqlalchemy
from sqlalchemy import orm
from db.db_connect import TABLE_NAME, COLUMN_TYPES
from db.mysql_connect import create_mysql_session
from straddle.market_watcher_parser import getOptionMW
from straddle.strategy import Strike
# key fields of a strike
KEY_STRIKE_FIELDS = {
'underlying':'underlying',
'expiration':'expiration',
'strike':'strike',
'is_call':'is_call',
}
SELECT_LATEST = """
SELECT %s
FROM
(
SELECT underlying, expiration, strike, is_call, max(query_time) AS qt
FROM %s
GROUP BY underlying, expiration, strike, is_call
) r
INNER JOIN %s t
ON t.query_time = r.qt
AND t.strike=r.strike
AND t.underlying=r.underlying
AND t.is_call=r.is_call
AND t.expiration=r.expiration
WHERE t.underlying = '%s'
AND %s
AND t.expiration >= '%s' AND t.expiration <= '%s'
AND t.is_call IN (%s);
"""
def get_day_range(a, b, date_format='%Y-%m-%d'):
""" get a range [today+a, today+b] """
today = datetime.datetime.now().date()
res = [today + datetime.timedelta(days=a), today + datetime.timedelta(days=b)]
if date_format is None:
return res
return [datetime.datetime.strftime(x, date_format) for x in res]
SELECT_QUERY_TIME = """
SELECT %s
FROM %s t
WHERE t.underlying = '%s'
AND %s
AND t.expiration >= '%s' AND t.expiration <= '%s'
AND t.is_call IN (%s)
AND t.query_time >= '%s' AND t.query_time < '%s';
"""
def get_query_latest(table_name='test_options',
underlying='spy',
k_list=None,
exps=None,
call_list=[True],
strike_in=False,
query_time=None):
""" get latest strikes for given condition
if query_time is not none, it should be a str
like " %Y-%m-%d %H:%M:%S" (PDT).
return the data around that time today
"""
if not isinstance(k_list, list):
k_list = [k_list]
if not isinstance(exps, list):
exps = [exps, exps]
if isinstance(exps[0], int):
exps = get_day_range(exps[0], exps[1])
if not isinstance(call_list, list):
call_list = [call_list]
select_str = ','.join(['t.'+x for x in COLUMN_TYPES])
if strike_in:
strike_str = 't.strike IN (%s)' % ','.join([str(x) for x in k_list])
else:
strike_str = 't.strike >= %s AND t.strike <= %s' % (k_list[0], k_list[1])
call_str = ','.join([str(x) for x in call_list])
if query_time is None:
query = SELECT_LATEST % (select_str, table_name, table_name, underlying, strike_str, exps[0], exps[1], call_str)
else:
base_time = datetime.datetime.strptime(query_time, '%Y-%m-%d %H:%M:%S')
qt = [base_time - datetime.timedelta(minutes=15), base_time + datetime.timedelta(minutes=15)]
qt = [datetime.datetime.strftime(x, '%Y-%m-%d %H:%M:%S') for x in qt]
query = SELECT_QUERY_TIME % (select_str, table_name, underlying, strike_str,
exps[0], exps[1], call_str, qt[0], qt[1])
return query
def get_latest_strikes(table_name, underlying, k_list, exps, call_list, query_time):
""" get the latest strikes """
query = get_query_latest(table_name,
underlying,
k_list,
exps,
call_list,
False,
query_time)
conn = create_mysql_session()
res = conn.execute(query)
strikes = []
for r in res:
s = {}
for i in range(len(COLUMN_TYPES)):
s[COLUMN_TYPES[i]] = r[i]
strikes.append(Strike(s))
return strikes
def main():
""" main function """
strikes = get_latest_strikes('test_options', 'spy', [260], [2,10], True)
for s in strikes:
print s.__json__()
if __name__ == '__main__':
main()
|
991,033 | d83e32b3a5431e0521b6e0a3d73a7c3836bb4265 | import unittest
from fizzbuzz import fizzbuzz, FizzBuzzInt
class FizzBuzzTestCase(unittest.TestCase):
def test_fizzbuzz_1_retorna_1(self):
result = fizzbuzz(1)
self.assertEqual(1, result)
def test_fizzbuzz_2_retorna_2(self):
result = fizzbuzz(2)
self.assertEqual(2, result)
def test_fizzbuzz_3_retorna_fizz(self):
result = fizzbuzz(3)
self.assertEqual('fizz', result)
def test_fizzbuzz_4_retorna_4(self):
result = fizzbuzz(4)
self.assertEqual(4, result)
def test_fizzbuzz_5_retorna_buzz(self):
result = fizzbuzz(5)
self.assertEqual('buzz', result)
def test_fizzbuzz_6_retorna_fizz(self):
result = fizzbuzz(6)
self.assertEqual('fizz', result)
def test_fizzbuzz_7_retorna_7(self):
result = fizzbuzz(7)
self.assertEqual(7, result)
def test_fizzbuzz_9_retorna_fizz(self):
result = fizzbuzz(9)
self.assertEqual('fizz', result)
def test_fizzbuzz_10_retorna_buzz(self):
result = fizzbuzz(10)
self.assertEqual('buzz', result)
def test_fizzbuzz_15_retorna_fizzbuzz(self):
result = fizzbuzz(15)
self.assertEqual('fizzbuzz', result)
def test_int_fizz_buzz_1_retorna_1(self):
fizzbuzz_int = FizzBuzzInt(1)
self.assertEqual(fizzbuzz_int.to_fizzbuzz(), 1)
def test_int_fizz_buzz_2_retorna_2(self):
fizzbuzz_int = FizzBuzzInt(2)
self.assertEqual(fizzbuzz_int.to_fizzbuzz(), 2)
def test_int_fizz_buzz_3_retorna_3(self):
fizzbuzz_int = FizzBuzzInt(3)
self.assertEqual(fizzbuzz_int.to_fizzbuzz(), 'fizz')
def test_repr_fizz_buzz_int_3(self):
fizzbuzz_int = FizzBuzzInt(3)
self.assertEqual(repr(fizzbuzz_int), repr('fizz'))
def test_repr_fizz_buzz_int_5(self):
fizzbuzz_int = FizzBuzzInt(5)
self.assertEqual(repr(fizzbuzz_int), repr('buzz'))
def test_repr_fizz_buzz_int_13(self):
fizzbuzz_int = FizzBuzzInt(13)
self.assertEqual(repr(fizzbuzz_int), repr(13))
unittest.main() |
991,034 | 55796b97be2983a3ae5cd502c8bf6a7ba206b79e | import numpy as np
class BinaryHigherOrderModel:
"""Higher order model.
"""
def __init__(self, interactions: list):
self.interactions = interactions
indices = set(self.interactions[0].keys())
for coeff in self.interactions[1:]:
for _inds in coeff.keys():
indices = indices | set(_inds)
self.indices = list(indices)
for i in self.indices:
if i not in self.interactions[0]:
self.interactions[0][i] = 0.0
def adj_dict(self):
"""adjacency list of each variables
Returns:
dict: key (variables key), value (list of tuple represents connected indices)
"""
adj_dict = {i: [] for i in self.indices}
for coeff in self.interactions[1:]:
for _inds, value in coeff.items():
for i in _inds:
_inds_list = list(_inds)
_inds_list.remove(i)
adj_dict[i].append([_inds_list, value])
return adj_dict
def energy(self, state):
"""calculate energy of state
Args:
state (list of int): list of SPIN or BINARY
Returns:
float: energy of state
"""
energy = 0.0
if isinstance(state, dict):
# convert to array
state = [state[elem] for elem in self.indices]
state = np.array(state)
for coeff in self.interactions[1:]:
for _inds, value in coeff.items():
energy += value * np.prod(state[list(_inds)])
for i, hi in self.interactions[0].items():
energy += hi * state[i]
return energy
def calc_energy(self, state):
"""alias of `energy`
Args:
state (list of int): list of SPIN or BINARY
Returns:
float: energy of state
"""
return self.energy(state)
|
991,035 | 44ecea8b6b486dab3df130b6c7ccc154f9142742 | a = float(input())
while True:
n = float(input())
if(n==999): break
print('{:0.2f}'.format(n-a))
a = n |
991,036 | 3f2beb22acf9bfeac28e763fae22fe33186eec08 | import Tkinter as tk
from lxml import etree
element_to_view_map = {}
# this is a mixin class that is used to implement data binding between views
# and models; it shouldn't be instantiated on its own because it has no model
# without deriving a specific type of view from it
class ViewWithUpdate(object):
def update_model(self, child_tag, var, *args):
child = self.model.find(child_tag)
if child is None:
# handle updates to arbitrary subelements
if '/' in child_tag:
#import ipdb; ipdb.set_trace()
path = child_tag.split('/')
current_elem = self.model
for path_child in path:
parent = current_elem.find(path_child)
if parent is None:
current_elem = etree.SubElement(current_elem, path_child)
else:
current_elem = parent
current_elem.text = str(var.get())
else:
child = etree.SubElement(self.model, child_tag)
child.text = str(var.get())
else:
child.text = str(var.get())
def update_view(self, child_tag, var, value_type):
#print 'updating {} from element {}'.format(str(var), child_tag)
child = self.model.find(child_tag)
if child is not None:
var.set(value_type(child.text))
def update_view_attribute(self, attribute, var, value_type):
value = self.model.attrib.get(attribute, None)
if value is not None:
var.set(value_type(value))
def update_model_attribute(self, attribute, var, *args):
value = str(var.get()).strip()
if value == '':
del self.model.attrib[attribute]
elif value is not None:
self.model.attrib[attribute] = unicode(value)
self.event_generate('<<ElemUpdate>>')
class ViewBase(tk.Frame):
_passthrough_args = ['attributes', 'children']
def __init__(self, model, *args, **kwargs):
self.view_args = {arg: kwargs.pop(arg, None) for arg in self._passthrough_args}
tk.Frame.__init__(self, *args, **kwargs)
self.model = model
__all__ = ['decorators', 'generic', 'notice', 'paragraphs', 'element_to_view_map'] |
991,037 | c126000499f4647d070a77d2885586bbd14bdd8b | from flask import Flask
from flask import render_template
from twilio.rest import TwilioRestClient
import twilio.twiml
from flask import request
app = Flask(__name__)
def makePhoneCall():
account_sid = "ACc593f66f0b7dd76aefcc8dba0ad31361"
auth_token = "96aa5dffecbd672bfa957570da84d43b"
client = TwilioRestClient(account_sid, auth_token)
server_url = "https://341b598f.ngrok.com/"
call = client.calls.create(to="+16095715366", # Any phone number
from_="2153757024", # Must be a valid Twilio number
url=server_url)
print call.sid
@app.route("/", methods=['GET', 'POST'])
@app.route("/<headline>", methods=['GET', 'POST'])
def hello(headline=None):
return render_template('message.html', headline=headline)
@app.route("/monkey", methods=['GET', 'POST'])
def hello_monkey():
"""Respond to incoming calls with a simple text message."""
# import pdb
# pdb.set_trace()
body = request.values['Body']
print body
resp = twilio.twiml.Response()
# makePhoneCall()
resp.message("You said, " + body)
return str(resp)
if __name__ == "__main__":
app.run(debug=True)
|
991,038 | 71e4fae2e9569aa807c12f0f32dac83cd76a278f | from src.tools import vector
class LineSprite:
def __init__(self, size=None, color=None, arrow_type=None, visible=True):
self.size = size if size else vector.zero
self.color = color if color else "gray"
self.arrow_type = arrow_type
self.visible = visible
self.sprite_type = "line"
self.display_radius = abs(self.size)
def __repr__(self):
return "{{LineSprite: r={0}{1}}}".format(
self.size,
"" if self.visible else ", invisible"
)
|
991,039 | efa187620d712fe1e5dc1e6a4383cd72c3ec7d14 | from settings import DATABASES
import os
import sys
print(DATABASES)
|
991,040 | ee79a65eaecb8adcbc0c4a97a40d9252f3ac7b78 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import abc
import logging
import os
import time
from functools import partial
import numpy as np
import ray
import torch
from ray import tune
from ray.tune import Experiment
from ray.tune.logger import JsonLogger, CSVLogger
from ray.tune.suggest.variant_generator import generate_variants
from torch import nn
from torchvision import transforms
from src.modules.base import get_block_model
from src.modules.utils import get_conv_out_size
from src.train.ignite_utils import _prepare_batch
from src.train.ray_training import TrainLLModel, OSTrainLLModel, \
convert_to_tune_search_space
from src.train.training import train, get_classic_dataloaders
from src.train.utils import set_optim_params, _load_datasets
from src.utils.misc import get_env_url, rename_class
from src.utils.plotting import plot_res_dataframe, plot_trajectory, \
list_top_archs, list_arch_scores, update_summary
logger = logging.getLogger(__name__)
class LifelongLearningModel(nn.Module, abc.ABC):
def __init__(self, n_hidden, n_convs, hidden_size, dropout_p, grid_params,
ray_resources, channel_scaling, base_model=get_block_model,
*args, **kwargs):
super(LifelongLearningModel, self).__init__(*args, **kwargs)
self.models = nn.ModuleList([])
self.grid_params = grid_params
assert isinstance(hidden_size, list)
if n_hidden is not None:
assert len(hidden_size) == 1
hidden_size = hidden_size * n_hidden
else:
assert n_hidden is None
self.hidden_size = hidden_size
self.channel_scaling = channel_scaling
if isinstance(dropout_p, int):
dropout_p = [dropout_p] * len(self.hidden_size)
self._dropout_p = dropout_p
# self.n_hidden = n_hidden
self.n_convs = n_convs
self.ray_resources = ray_resources
self.base_model_func = partial(base_model, dropout_p=dropout_p, n_convs=self.n_convs)
def get_k(self, layer):
if layer >= self.n_convs:
return None
elif isinstance(self._k, (tuple, list)):
return self._k[layer]
else:
return self._k
def get_stride(self, layer):
if layer >= self.n_convs:
return None
elif isinstance(self._stride, (tuple, list)):
return self._stride[layer]
else:
return self._stride
def get_pad(self, layer):
if layer >= self.n_convs:
return None
elif isinstance(self._pad, (tuple, list)):
return self._pad[layer]
else:
return self._pad
def get_dropout_p(self, layer):
if isinstance(self._dropout_p, (tuple, list)):
return self._dropout_p[layer]
else:
return self._dropout_p
def get_pool_k(self, layer):
if layer >= self.n_convs:
return None
if isinstance(self._pool_k, (tuple, list)):
return self._pool_k[layer]
else:
return self._pool_k
def get_model(self, task_id, **task_infos):
if task_id >= len(self.models):
# this is a new task
# New tasks should always give the x_dim and n_classes infos.
assert 'x_dim' in task_infos and 'n_classes' in task_infos
assert task_id == len(self.models)
model = self._new_model(task_id=task_id, **task_infos)
self.models.append(model)
return self.models[task_id]
def get_sizes(self, x_dim, n_classes):
if len(x_dim) == 1:
x_dim = x_dim[0]
assert self.n_convs == 0, 'Can\'t use convs on 1D inputs.'
assert len(n_classes) == 1, 'Only supports single output'
n_classes = n_classes[0]
# Put all dimensions together for current model.
model_dims = [x_dim, *self.hidden_size, n_classes]
# if self.residual:
# return model_dims
for i in range(self.n_convs):
# Compute intermediate map sizes
img_size = model_dims[i][1:]
k = self.get_k(i)
pad = self.get_pad(i)
stride = self.get_stride(i)
out_size = get_conv_out_size(img_size, k, pad, stride)
pool_k = self.get_pool_k(i)
if pool_k is not None:
out_size = get_conv_out_size(out_size, pool_k, 0, pool_k)
model_dims[i+1] = [model_dims[i+1], *out_size]
return model_dims
def get_res_sizes(self, x_dim, n_classes):
if len(x_dim) == 1:
x_dim = x_dim[0]
assert self.n_convs == 0, 'Can\'t use convs on 1D inputs.'
assert len(n_classes) == 1, 'Only supports single output'
n_classes = n_classes[0]
# Put all dimensions together for current model.
model_dims = [x_dim, *self._res_hidden_size, n_classes]
# if self.residual:
# return model_dims
for i in range(self.n_res_blocks+1):
# Compute intermediate map sizes
img_size = model_dims[i][1:]
k = self._k
pad = self._pad
stride = self._res_stride[i]
out_size = get_conv_out_size(img_size, k, pad, stride)
pool_k = self._res_pool_k[i]
if pool_k is not None:
out_size = get_conv_out_size(out_size, pool_k, 0, pool_k)
model_dims[i+1] = [model_dims[i+1], *out_size]
return np.array(model_dims)
@abc.abstractmethod
def _new_model(self, **kwargs):
raise NotImplementedError
def n_params(self, t_id):
"""
Return the total number of parameters used by the lifelong learner for
models up to the task with id `t_id` (included).
:param t_id:
:return:
"""
all_params = set()
for i in range(t_id+1):
model = self.get_model(i)
all_params.update(model.parameters())
all_params.update(model.buffers())
return sum(map(torch.numel, all_params))
def new_params(self, t_id):
return self.n_params(t_id) - self.n_params(t_id-1)
def get_search_space(self):
# params = {k: tune.grid_search(v) for k, v in self.grid_params.items()}
# return params
return convert_to_tune_search_space(self.grid_params)
def finish_task(self, dataset, task_id, viz=None, path=None):
"""
Use the datastet to perform reauired post taks operations and compute
statistics to track.
:param dataset:
:param task_id:
:param viz:
:return:
"""
return {}
def forward(self, *input):
raise NotImplementedError
def train_model_on_task(self, task, task_viz, exp_dir, use_ray,
use_ray_logging, grace_period,
num_hp_samplings, local_mode,
redis_address, lca_n, **training_params):
logger.info("Training dashboard: {}".format(get_env_url(task_viz)))
t_id = task['id']
trainable = self.get_trainable(use_ray_logging=use_ray_logging)
past_tasks = training_params.pop('past_tasks')
normalize = training_params.pop('normalize')
augment_data = training_params.pop('augment_data')
transformations = []
if augment_data:
transformations.extend([
transforms.ToPILImage(),
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, 4),
transforms.ToTensor()
])
t_trans = [[] for _ in range(len(task['split_names']))]
t_trans[0] = transformations
datasets = trainable._load_datasets(task,
task['loss_fn'],
past_tasks, t_trans, normalize)
train_loader, eval_loaders = get_classic_dataloaders(datasets,
training_params.pop(
'batch_sizes'))
model = self.get_model(task_id=t_id, x_dim=task['x_dim'],
n_classes=task['n_classes'],
descriptor=task['descriptor'],
dataset=eval_loaders[:2])
if use_ray:
if not ray.is_initialized():
ray.init(address=redis_address)
scheduler = None
training_params['loss_fn'] = tune.function(
training_params['loss_fn'])
training_params['optim_func'] = tune.function(self.optim_func)
init_model_path = os.path.join(exp_dir, 'model_initializations')
model_file_name = '{}_init.pth'.format(training_params['name'])
model_path = os.path.join(init_model_path, model_file_name)
torch.save(model, model_path)
training_params['model_path'] = model_path
config = {**self.get_search_space(),
'training-params': training_params}
if use_ray_logging:
stop_condition = {'training_iteration':
training_params['n_it_max']}
checkpoint_at_end = False
keep_checkpoints_num = 1
checkpoint_score_attr = 'min-Val nll'
else:
stop_condition = None
# loggers = [JsonLogger, MyCSVLogger]
checkpoint_at_end = False
keep_checkpoints_num = None
checkpoint_score_attr = None
trainable = rename_class(trainable, training_params['name'])
experiment = Experiment(
name=training_params['name'],
run=trainable,
stop=stop_condition,
config=config,
resources_per_trial=self.ray_resources,
num_samples=num_hp_samplings,
local_dir=exp_dir,
loggers=(JsonLogger, CSVLogger),
checkpoint_at_end=checkpoint_at_end,
keep_checkpoints_num=keep_checkpoints_num,
checkpoint_score_attr=checkpoint_score_attr)
analysis = tune.run(experiment,
scheduler=scheduler,
verbose=1,
raise_on_failed_trial=True,
# max_failures=-1,
# with_server=True,
# server_port=4321
)
os.remove(model_path)
logger.info("Training dashboard: {}".format(get_env_url(task_viz)))
all_trials = {t.logdir: t for t in analysis.trials}
best_logdir = analysis.get_best_logdir('Val nll', 'min')
best_trial = all_trials[best_logdir]
# picked_metric = 'accuracy_0'
# metric_names = {s: '{} {}'.format(s, picked_metric) for s in
# ['Train', 'Val', 'Test']}
logger.info('Best trial: {}'.format(best_trial))
best_res = best_trial.checkpoint.result
best_point = (best_res['training_iteration'], best_res['Val nll'])
# y_keys = ['mean_loss' if use_ray_logging else 'Val nll', 'train_loss']
y_keys = ['Val nll', 'Train nll']
epoch_key = 'training_epoch'
it_key = 'training_iteration'
plot_res_dataframe(analysis, training_params['name'], best_point,
task_viz, epoch_key, it_key, y_keys)
if 'entropy' in next(iter(analysis.trial_dataframes.values())):
plot_res_dataframe(analysis, training_params['name'], None,
task_viz, epoch_key, it_key, ['entropy'])
best_model = self.get_model(task_id=t_id)
best_model.load_state_dict(torch.load(best_trial.checkpoint.value))
train_accs = analysis.trial_dataframes[best_logdir]['Train accuracy_0']
best_t = best_res['training_iteration']
t = best_trial.last_result['training_iteration']
else:
search_space = self.get_search_space()
rand_config = list(generate_variants(search_space))[0][1]
learner_params = rand_config.pop('learner-params', {})
optim_params = rand_config.pop('optim')
split_optims = training_params.pop('split_optims')
if hasattr(model, 'set_h_params'):
model.set_h_params(**learner_params)
if hasattr(model, 'train_loader_wrapper'):
train_loader = model.train_loader_wrapper(train_loader)
loss_fn = task['loss_fn']
if hasattr(model, 'loss_wrapper'):
loss_fn = model.loss_wrapper(task['loss_fn'])
prepare_batch = _prepare_batch
if hasattr(model, 'prepare_batch_wrapper'):
prepare_batch = model.prepare_batch_wrapper(prepare_batch, t_id)
optim_fact = partial(set_optim_params,
optim_func=self.optim_func,
optim_params=optim_params,
split_optims=split_optims)
if hasattr(model, 'train_func'):
f = model.train_func
t, metrics, b_state_dict = f(train_loader=train_loader,
eval_loaders=eval_loaders,
optim_fact=optim_fact,
loss_fn=loss_fn,
split_names=task['split_names'],
viz=task_viz,
prepare_batch=prepare_batch,
**training_params)
else:
optim = optim_fact(model=model)
t, metrics, b_state_dict = train(model=model,
train_loader=train_loader,
eval_loaders=eval_loaders,
optimizer=optim,
loss_fn=loss_fn,
split_names=task['split_names'],
viz=task_viz,
prepare_batch=prepare_batch,
**training_params)
train_accs = metrics['Train accuracy_0']
best_t = b_state_dict['iter']
if 'training_archs' in metrics:
plot_trajectory(model.ssn.graph, metrics['training_archs'],
model.ssn.stochastic_node_ids, task_viz)
weights = model.arch_sampler().squeeze()
archs = model.ssn.get_top_archs(weights, 5)
list_top_archs(archs, task_viz)
list_arch_scores(self.arch_scores[t_id], task_viz)
update_summary(self.arch_scores[t_id], task_viz, 'scores')
if len(train_accs) > lca_n:
lca_accs = []
for i in range(lca_n + 1):
if i in train_accs:
lca_accs.append(train_accs[i])
else:
logger.warning('Missing step for {}/{} for lca computation'
.format(i, lca_n))
lca = np.mean(lca_accs)
else:
lca = np.float('nan')
stats = {}
start = time.time()
# train_idx = task['split_names'].index('Train')
# train_path = task['data_path'][train_idx]
# train_dataset = _load_datasets([train_path])[0]
train_dataset = _load_datasets(task, 'Train')[0]
stats.update(self.finish_task(train_dataset, t_id, task_viz,
path='drawings'))
stats['duration'] = {'iterations': t,
'finish': time.time() - start,
'best_iterations': best_t}
stats['params'] = {'total': self.n_params(t_id),
'new': self.new_params(t_id)}
stats['lca'] = lca
return stats
def get_trainable(self, use_ray_logging):
if use_ray_logging:
return TrainLLModel
else:
return OSTrainLLModel
|
991,041 | 0cce3781f3f2171f07a0a195d10f917debe5aff5 | import math
def golden_section(f,a,b):
'''The parameters to the golden_section function to compute the minimum are
f - objective function
a - starting point
b - ending point'''
# Setting the Golden Numbers
g = (3 - math.sqrt(5)) / 2
gn = 1 - g
n = 0 #Setting the count of iterations to 0
while abs(b-a)>=math.exp(-8): #Checking if the difference of a and b is greater than the tolerance level (1e-8)
l = a + g*(b-a) #Setting the value for lambda
m = a + gn*(b-a) #Setting the value of mu
n = n+1 #incrementing the number of iterations by 1
if f(l) < f(m): #if the value of function at lambda is less than the value of the function at mu
b = m #Set value of b to mu
m = l #Set value of mu to lambda
l = a + g*(b-a) #Calculate the new lambda value
else:
a = l #Set value of a to lambda
l = m #Set value of lambda to mu
m = a + gn*(b-a) #Calculate the new mu value
print("Number of iterations = {}".format(n)) #Once outside the loop print the total number of iterations
return (a+b)/2 #Return the minimum point
#############################
#Example#
#############################
f = lambda x: (math.exp(-x)) + (x**2) #Setting the objective function
a = 0
b = 100
minimum = golden_section(f,a,b) #Calling the golden_section function to compute minimum
print("Minimum point is {}".format(minimum)) #Printing the minimum point
print("Minimum value of the function is {}".format(f(minimum))) #Printing the minimum value of the function
|
991,042 | 480fd619eb11e348ea037e2d53a8e33f69132ab7 | n,h = map(int,input().split())
A = []
B = []
ans = 0
for i in range(n):
a,b = map(int,input().split())
A.append(a)
B.append(b)
#aใฎๆๅคงๅคใๆฑใใใ
maxa = max(A)
#maxaใฎใใกใbใๆๅฐใฎๅใๆฑใใใใใใๅ๏ฝใจใใใๅ๏ฝใๅใ้คใ
minb = 10**10
s = 0
for i in range(n):
if A[i] == maxa and B[i] < minb:
minb = B[i]
s = i
B.pop(s)
#bใ้้ ใซไธฆในใmaxaใใๅคงใใbใฏๆใใคใใ
B.sort(reverse = True)
for i in range(len(B)):
if B[i] > maxa:
h = h - B[i]
ans += 1
if h <= 0:
print(ans)
exit()
else:
break
#ใพใsใๆใใฆใใaใงๅฒใ
h = h - minb
ans += 1
if h <= 0:
print(ans)
else:
ans += (h+(maxa-1)) // maxa
print(ans) |
991,043 | 7dbeb00e94fc50419a9c277380dd0f7a0e25d357 | import math
import numpy as np
from sklearn.neighbors import kneighbors_graph
from sklearn.cluster import SpectralClustering
class NcutSegmenter:
def __init__(self, values, k):
self.values = values
self.k = k
def customNcuts(self):
""" Return segmentation label using classic Ncuts """
# computing neighboors graph
A = kneighbors_graph(self.values, self.k, mode='distance', include_self=False).toarray()
for i in range(self.values.shape[0]):
for j in range(self.values.shape[0]):
if A[i][j] > 0:
v1 = (self.values[i][3], self.values[i][4], self.values[i][5])
v2 = (self.values[j][3], self.values[j][4], self.values[j][5])
magnitude1 = np.sqrt(v1[0] * v1[0] + v1[1] * v1[1] + v1[2] * v1[2])
magnitude2 = np.sqrt(v2[0] * v2[0] + v2[1] * v2[1] + v2[2] * v2[2])
ang = np.arccos(np.dot(v1, v2) / (magnitude1 * magnitude2))
A[i][j] = max(self.values[i][7], self.values[j][7]) * A[i][j]
# init SpectralClustering
sc = SpectralClustering(4, affinity='precomputed', n_init=10, assign_labels = 'discretize')
# cluster
labels = sc.fit_predict(A)
return labels
def segment_func1(self):
""" Return SpectralClustering segmentation label using the normal angle edge weights"""
# computing neighboors graph
A = self.normal_graph()
# SpectralClustering segmentation
sc = SpectralClustering(3, affinity='precomputed', n_init=10, assign_labels='discretize')
labels = sc.fit_predict(A)
return labels
def normal_graph(self):
""" Calculates the graph matrix using the angle between normals as node weight """
A = kneighbors_graph(self.values, self.k, mode='connectivity', include_self=False).toarray()
print(" creating affinity matrix A (Normal angles)")
for i in range(self.values.shape[0]):
for j in range(self.values.shape[0]):
if A[i][j] == 1:
v1 = (self.values[i][3], self.values[i][4], self.values[i][5])
v2 = (self.values[j][3], self.values[j][4], self.values[j][5])
magnitude1 = np.sqrt(v1[0] * v1[0] + v1[1] * v1[1] + v1[2] * v1[2])
magnitude2 = np.sqrt(v2[0] * v2[0] + v2[1] * v2[1] + v2[2] * v2[2])
ang = np.arccos(np.dot(v1, v2) / (magnitude1 * magnitude2))
A[i][j] = ang
print(" Done.")
return A
def segment_func2(self):
""" Return SpectralClustering segmentation label using the bondary prediction as edge weights"""
# computing neighboors graph
A = self.boundaryprob_graph()
# SpectralClustering segmentation
sc = SpectralClustering(3, affinity='precomputed', n_init=10, assign_labels='discretize')
labels = sc.fit_predict(A)
return labels
def boundaryprob_graph(self):
A = kneighbors_graph(self.values, self.k, mode='connectivity', include_self=False).toarray()
print(" creating affinity matrix A (boundary prob)")
for i in range(self.values.shape[0]):
for j in range(self.values.shape[0]):
if A[i][j] == 1:
A[i][j] = max(self.values[i][7], self.values[j][7])
print(" Done.")
return A
|
991,044 | b15fd43e94854b2c2337210631cb18974f0fa441 | # Generated by Django 3.1.5 on 2021-03-13 15:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0011_auto_20210122_1154'),
]
operations = [
migrations.AddField(
model_name='user',
name='own_reviews',
field=models.PositiveIntegerField(default=0, editable=False, verbose_name='Number of personal reviews'),
),
]
|
991,045 | 30a26b3d17faf9a0f585c0cc41dfb6ac7643eb9b | # -*- coding: utf-8 -*-
"""
controller GUI
========
"""
import traceback, sys
from eurotherm_reader.GUI.styles import style, img
from eurotherm_reader.controller.eurotherm_controller import CK20
from eurotherm_reader.controller.serial_ports import SerialPorts
from eurotherm_reader.analysis.cal_analysis import ThermocoupleStatistics
from eurotherm_reader.GUI.auxilary_classes import MainWindow, Canvas, ThreadSignals, NewThread
from eurotherm_reader.GUI.about_window import About
from eurotherm_reader.GUI.data_analysis_window import TCDataAnalysis
from eurotherm_reader.GUI.help_window import Help
from eurotherm_reader.GUI.settings_window import Thermocouple_settings
from eurotherm_reader import __VERSION__
from datetime import datetime
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtGui import QPalette, QColor
from PyQt5.QtCore import (QThreadPool, Qt)
from PyQt5.QtWidgets import QDialog, QFileDialog, QMessageBox
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
import matplotlib.pyplot as plt
from time import sleep
class Ui_MainWindow(QDialog):
version = __VERSION__
def setupUi(self, MainWindow):
self.threadpool = QThreadPool() # initate ThreadPool
plt.style.use('seaborn-whitegrid') # style to be used by all of the plots drawn
self.temp_log = [[], []] # init temp log
self.temp_repr = Canvas(MainWindow, width=4.8, height=3.5, dpi=120)
self.temp_repr.move(191, 131)
self.toolbar = NavigationToolbar(self.temp_repr, self)
### CALL THE WINDOWS ###
self.about = About()
self.help = Help()
self.tc_settings = Thermocouple_settings()
self.tc_data_analysis = TCDataAnalysis()
MainWindow.setObjectName("MainWindow")
MainWindow.setWindowModality(QtCore.Qt.WindowModal)
MainWindow.resize(805, 602)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth())
MainWindow.setSizePolicy(sizePolicy)
MainWindow.setMinimumSize(QtCore.QSize(805, 602))
MainWindow.setMaximumSize(QtCore.QSize(805, 602))
MainWindow.setFocusPolicy(QtCore.Qt.ClickFocus)
MainWindow.setContextMenuPolicy(QtCore.Qt.ActionsContextMenu)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(img["icon"]), QtGui.QIcon.Normal, QtGui.QIcon.Off)
icon.Active
MainWindow.setWindowIcon(icon)
MainWindow.setWindowOpacity(0.95)
MainWindow.setStyleSheet(style["main_window"])
MainWindow.setToolButtonStyle(QtCore.Qt.ToolButtonFollowStyle)
MainWindow.setDocumentMode(True)
MainWindow.setTabShape(QtWidgets.QTabWidget.Triangular)
MainWindow.setDockNestingEnabled(True)
MainWindow.setUnifiedTitleAndToolBarOnMac(False)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.com_port_select = QtWidgets.QComboBox(self.centralwidget)
self.com_port_select.setGeometry(QtCore.QRect(20, 60, 111, 21))
self.com_port_select.setWhatsThis("")
self.com_port_select.setObjectName("com_port_select")
self.go_button = QtWidgets.QPushButton(self.centralwidget)
self.go_button.setGeometry(QtCore.QRect(20, 190, 51, 21))
self.go_button.setObjectName("go_button")
# button functionality
self.go_button.clicked.connect(lambda: self.MainWindow_exec_thread(self.temp_mainloop))
self.com_port_select_label = QtWidgets.QLabel(self.centralwidget)
self.com_port_select_label.setGeometry(QtCore.QRect(20, 40, 111, 16))
self.com_port_select_label.setObjectName("com_port_select_label")
self.baudrate_select = QtWidgets.QComboBox(self.centralwidget)
self.baudrate_select.setGeometry(QtCore.QRect(20, 110, 111, 21))
self.baudrate_select.setObjectName("bauderate_select")
self.baudrate_select.addItem("")
self.baudrate_select.addItem("")
self.baude_rate_select = QtWidgets.QLabel(self.centralwidget)
self.baude_rate_select.setGeometry(QtCore.QRect(20, 90, 111, 16))
self.baude_rate_select.setObjectName("baude_rate_select")
self.timeout_label = QtWidgets.QLabel(self.centralwidget)
self.timeout_label.setGeometry(QtCore.QRect(20, 140, 111, 16))
self.timeout_label.setObjectName("timeout_label")
self.temp_lcd1 = QtWidgets.QLCDNumber(self.centralwidget)
self.temp_lcd1.setGeometry(QtCore.QRect(585, 50, 181, 51))
self.temp_lcd1.setStyleSheet(style["temp_lcd_style"])
self.temp_lcd1.setObjectName("temp_lcd")
self.temp_lcd1.display(None)
self.temp_lcd2 = QtWidgets.QLCDNumber(self.centralwidget)
self.temp_lcd2.setGeometry(QtCore.QRect(190, 50, 181, 51))
self.temp_lcd2.setStyleSheet(style["temp_lcd_style"])
self.temp_lcd2.setObjectName("temp_lcd")
self.temp_lcd2.display(None)
self.temp_lcds = [self.temp_lcd1, self.temp_lcd2]
self.timeout = QtWidgets.QLineEdit(self.centralwidget)
self.timeout.setGeometry(QtCore.QRect(20, 160, 113, 20))
self.timeout.setObjectName("timeout [s]")
# preset value:
self.timeout.setText(str(2))
self.log_dir_path = r"../data"
self.stop_button = QtWidgets.QPushButton(self.centralwidget)
self.stop_button.setGeometry(QtCore.QRect(80, 190, 51, 21))
self.stop_button.setObjectName("stop_button")
# event stop_button
self.stop_button.clicked.connect(self.state_temp_loop)
self.msg_box = QtWidgets.QTextEdit(self.centralwidget)
self.msg_box.setGeometry(QtCore.QRect(10, 230, 151, 251))
self.msg_box.setStyleSheet(style["msg_box_style"])
self.msg_box.setTextInteractionFlags(QtCore.Qt.NoTextInteraction)
self.msg_box.setObjectName("msg_box")
self.prev_event = [] # previous log in the msg box
self.output_dir = QtWidgets.QPushButton(self.centralwidget)
self.output_dir.setGeometry(QtCore.QRect(10, 490, 151, 21))
self.output_dir.setStyleSheet("")
self.output_dir.setObjectName("output_dir")
self.output_dir.clicked.connect(self.get_log_dir)
self.icon = QtWidgets.QLabel(self.centralwidget)
self.icon.setGeometry(QtCore.QRect(360, 0, 181, 31))
self.icon.setText("")
self.icon.setPixmap(QtGui.QPixmap(img["icon"]))
self.icon.setScaledContents(True)
self.icon.setObjectName("CISEMI")
self.line = QtWidgets.QFrame(self.centralwidget)
self.line.setGeometry(QtCore.QRect(-20, 520, 831, 20))
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.line.sizePolicy().hasHeightForWidth())
self.line.setSizePolicy(sizePolicy)
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.status_label = QtWidgets.QLabel(self.centralwidget)
self.status_label.setGeometry(QtCore.QRect(630, 540, 61, 16))
self.status_label.setObjectName("status_label")
self.led_light = QtWidgets.QLabel(self.centralwidget)
self.led_light.setGeometry(QtCore.QRect(770, 540, 21, 21))
self.led_light.setText("")
self.led_light.setPixmap(QtGui.QPixmap(img["red_led"]))
self.led_light.setScaledContents(True)
self.led_light.setObjectName("led_light")
self.connect_status = QtWidgets.QLabel(self.centralwidget)
self.connect_status.setGeometry(QtCore.QRect(690, 540, 61, 21))
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.connect_status.sizePolicy().hasHeightForWidth())
self.connect_status.setSizePolicy(sizePolicy)
self.connect_status.setText("")
self.connect_status.setPixmap(QtGui.QPixmap(img["offline"]))
self.connect_status.setScaledContents(True)
self.connect_status.setObjectName("connect_status")
self.line_3 = QtWidgets.QFrame(self.centralwidget)
self.line_3.setGeometry(QtCore.QRect(0, 30, 811, 20))
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.line_3.sizePolicy().hasHeightForWidth())
self.line_3.setSizePolicy(sizePolicy)
self.line_3.setFrameShape(QtWidgets.QFrame.HLine)
self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_3.setObjectName("line_3")
self.vers_label = QtWidgets.QLabel(self.centralwidget)
self.vers_label.setGeometry(QtCore.QRect(10, 540, 161, 16))
self.vers_label.setObjectName("vers_label")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 805, 21))
self.menubar.setObjectName("menubar")
# headers in the toolbar
self.menuFile = QtWidgets.QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
self.menuhelp = QtWidgets.QMenu(self.menubar)
self.menuhelp.setObjectName("menuhelp")
self.menusettings = QtWidgets.QMenu(self.menubar)
self.menusettings.setObjectName("menusettings")
MainWindow.setMenuBar(self.menubar)
# actions in each toolbar header
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.actioncom_port = QtWidgets.QAction(MainWindow)
self.actioncom_port.setObjectName("actioncom_port")
self.actionexit = QtWidgets.QAction(MainWindow)
self.actionexit.setObjectName("actionexit")
self.wipe_display = QtWidgets.QAction(MainWindow)
self.wipe_display.setObjectName("wipe_display")
self.actionabout = QtWidgets.QAction(MainWindow)
self.actionabout.setObjectName("actionabout")
self.actionhelp = QtWidgets.QAction(MainWindow)
self.actionhelp.setObjectName("actionhelp")
self.action_tc_settings = QtWidgets.QAction(MainWindow)
self.action_tc_settings.setObjectName("TC settings")
self.actionoutput = QtWidgets.QAction(MainWindow)
self.actionoutput.setObjectName("actionoutput")
self.menuFile.addAction(self.actioncom_port)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionexit)
self.menuhelp.addAction(self.actionabout)
self.menuhelp.addAction(self.actionhelp)
self.tools = QtWidgets.QAction(MainWindow)
self.tools.setObjectName("Tools")
# Add additional tabs to the seetings tab
self.menusettings.addAction(self.actionoutput)
self.menusettings.addAction(self.wipe_display)
self.menusettings.addAction(self.action_tc_settings)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menusettings.menuAction())
self.menubar.addAction(self.menuhelp.menuAction())
self.MainWindow_exec_thread(self.chk_ports)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
# mouse over tool tip text
self.output_dir.setToolTip("Select output directory for log files")
self.go_button.setToolTip("Start temperature measurement")
self.stop_button.setToolTip("Stop measuring")
# Special variables
self.len_of_com_list = 0 # update whenever the comlist has a change of length
def state_temp_loop(self, state=False):
self.temp_loop_actiator = state
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "TC controller"))
self.go_button.setText(_translate("MainWindow", "&Go"))
self.com_port_select_label.setText(_translate("MainWindow",
"<html><head/><body><p align=\"center\"><span style=\" font-size:10pt;\">Port</span></p></body></html>"))
self.baudrate_select.setItemText(0, _translate("MainWindow", "9200"))
self.baudrate_select.setItemText(1, _translate("MainWindow", "9600"))
self.baude_rate_select.setText(_translate("MainWindow",
"<html><head/><body><p align=\"center\"><span style=\" font-size:10pt;\">baudrate</span></p></body></html>"))
self.timeout_label.setText(
_translate("MainWindow", "<html><head/><body><p align=\"center\">timeout [s]</p></body></html>"))
self.stop_button.setText(_translate("MainWindow", "Stop"))
self.output_dir.setText(_translate("MainWindow", "output directory"))
self.status_label.setText(_translate("MainWindow", "status:"))
self.vers_label.setText(
_translate("MainWindow", f"<html><head/><body><p>{Ui_MainWindow.version}</p></body></html>"))
self.menuFile.setTitle(_translate("MainWindow", "File"))
self.menuhelp.setTitle(_translate("MainWindow", "&Help"))
self.menusettings.setTitle(_translate("MainWindow", "Settings"))
self.actioncom_port.setText(_translate("MainWindow", "T&C Test analysis"))
self.actionexit.setText(_translate("MainWindow", "Exit"))
self.actionabout.setText(_translate("MainWindow", "A&bout"))
self.actionhelp.setText(_translate("MainWindow", "Help"))
self.actionoutput.setText(_translate("MainWindow", "Output to"))
self.wipe_display.setText(_translate("MainWindow", "Clear display"))
self.action_tc_settings.setText(_translate("MainWindow", "TC settings"))
# shortcuts
self.actionexit.setShortcut("Esc")
self.actionoutput.setShortcut("Ctrl+F")
self.action_tc_settings.setShortcut("Ctrl+Q")
self.wipe_display.setShortcut("Ctrl+X")
# actions :
self.actionexit.triggered.connect(MainWindow.close)
self.actionabout.triggered.connect(lambda: self.about.show())
self.actionhelp.triggered.connect(lambda: self.help.show())
self.action_tc_settings.triggered.connect(lambda: self.tc_settings.show())
self.actionoutput.triggered.connect(self.get_log_dir)
self.wipe_display.triggered.connect(self._wipe_log)
self.actioncom_port.triggered.connect(lambda: self.tc_data_analysis.show())
def get_log_dir(self):
# Wrapper for function calling
dir_selected = str(QFileDialog.getExistingDirectory(self, 'Select Directory', self.log_dir_path))
if dir_selected != "":
self.log_dir_path = dir_selected
print(f"Log files will now be saved in: {self.log_dir_path}")
def _wipe_log(self):
"""
PRIVATE METHOD
Wipe log of temperature from script's memory. The data is still available in the csv files
"""
self.temp_log.clear()
self.temp_log = [[], []] # init a 2D array
def find_ports(self):
# purpose is to to find serial devices attached
# I have given a really dumb solution for a problem of recognizing non controller/ET devices
# It will only update the list if the number of devices changes
# Should be fairly bugless however something must be lurking about for such a dumb solution
# Anyone reading this- my sincere apology and try looking for HKEY_LOCAL_MACHINE PYQT soltuion in Stackoverflow
self.com = SerialPorts()
coms = self.com.get_com_list()
ET_coms = [] # list of eurotherm/ck devices attached
if len(coms) != self.len_of_com_list:
self.len_of_com_list = len(coms)
for com in coms:
try:
CK20_caller = CK20.connect_to_device(com, 1)
is_et_controller = CK20_caller.unit_id
except Exception:
print(f"com {com} isn't a CK/Eurotherm device")
pass
else:
ET_coms.append(com)
if len(coms) > 0:
self.com_port_select.clear()
self.com_port_select.addItems(ET_coms)
self.connect_status.setPixmap(QtGui.QPixmap(img["online"]))
self.connect_status.resize(21, 21)
else:
self.com_port_select.clear()
self.connect_status.setPixmap(QtGui.QPixmap(img["offline"]))
self.connect_status.resize(60, 20)
# try reading the com ports in the main window, if not possible pass
try:
# First clear the content of the boxes
self.tc_settings.com_port_select_1.clear()
self.tc_settings.com_port_select_2.clear()
all_items = [self.com_port_select.itemText(i) for i in range(self.com_port_select.count())]
self.tc_settings.com_port_select_1.addItems(all_items)
self.tc_settings.com_port_select_2.addItems(all_items)
except Exception as e:
pass
def temp_mainloop(self, cycles_to_respond=1000):
"""
TEMP MAINLOOP
=============
This is the function of the controller.
It connects to the serial ports, by creating up to 2 controller objects.
The related functions are:
sample_devices- sample temp from each device
_wait_for_temp
_temp_events
End result is to display the temperature of both channels in:
lcd_displays
Canvas object (the plot)
in the automatically generated csv's
:param cycles_to_respond: cycles to get a temp read
"""
if self.com_port_select.count() > 0:
report = "Begin measuring"
self.go_button.setEnabled(False) # don't allow to create new threads
self.state_temp_loop(True)
self.append_msg_box(report)
while self.temp_loop_actiator:
for num in range(self.com_port_select.count()):
# self.MainWindow_exec_thread(lambda: self.sample_devices(cycles_to_respond, num))
self.sample_devices(cycles_to_respond, num)
self.temp_repr.plot_tc_temp(self.temp_log)
timeo = float(self.timeout.text()) # timeout
sleep(timeo)
# -------- record data once in msg box --------
self.go_button.setEnabled(True)
self.led_light.setPixmap(QtGui.QPixmap(img["red_led"]))
else:
self.append_msg_box("No device is connected")
def _wait_for_temp(self, device, cycles_to_respond):
""" PRIVATE METHOD """
# purpose to not crash if there's a momentary loss of connection with serial port
wait_temp_response = 0
try:
temp = device.get_temp() # get first read
except Exception as e:
temp = 0
while temp == 0 and wait_temp_response < cycles_to_respond:
temp = device.get_temp()
wait_temp_response += 1
return temp
@staticmethod
def check_ports_in_combo(tc_port_combo: "list", port: "str") -> "str":
"""
Shameful method to help compare the current port in the loop with the tc_port_couple list.
This could be done much more efficient and cleaner so TODO
Args:
tc_port_combo (*list): list from the tc_settings window
port (*str): current port being used
Return:
(*str)
"""
for combo in tc_port_combo:
if port in combo:
return combo
return ''
def sample_devices(self, cycles_to_respond, num):
# --- dual channel ---
# purpose to break sample_temp in size for easier debugging
# make single mesaurement of temp
try:
port = str(self.com_port_select.itemText(num))
temp = self._wait_for_temp(self.device[num], cycles_to_respond)
except Exception as e:
device_settings = (port, 1)
CK20_caller = CK20.connect_to_device(*(device_settings), MAX_ATTEMPTS=10000)
if CK20_caller is None:
self.state_temp_loop()
msg = "connection fail"
self.append_msg_box(msg)
return
if type(e) in (AttributeError, NameError):
print(type(e))
self.device = []
self.device.append(CK20_caller)
elif type(e) is IndexError:
self.device.append(CK20_caller)
else:
print(type(e))
self.led_light.setPixmap(QtGui.QPixmap(img["red_led"]))
self.temp_lcds[num].display("ERROR")
self.append_msg_box(f"{e.args}")
return
self.device[num].baudrate = int(self.baudrate_select.currentText())
self.device[num].close_port_after_each_call = True
temp = self._wait_for_temp(self.device[num], cycles_to_respond)
else:
# block intended to try to reconnect in case connection is lost for several cycles < 1000
try:
self._temp_events(temp, self.device[num], num)
except UnboundLocalError:
temp = 0
self._temp_events(temp, self.device[num], num)
finally:
# search for the port-tc combo and add to the log files
self.temp_log[num].append(temp) # logging the temperature locally (could be directly to csv)
com_port_pairs = self.tc_settings.tc_port_couple # for clarity
tc_port_pair = self.check_ports_in_combo(com_port_pairs, port) # get '' or port-thermocouple pair str
self.device[num].log_temp(temp, save_dir=self.log_dir_path, tc_port_couple=tc_port_pair)
# purpose- updating a msg in the msg_box only once
try:
if self.device[num].event != self.prev_event[num] and self.device[num].event != None:
self.append_msg_box(self.device[num].event)
self.prev_event[num] = self.device[num].event
except (NameError, AttributeError, IndexError):
self.prev_event.append(None)
def _temp_events(self, temp, device, num, TEMP_MAX=1700) -> None:
""" PRIVATE METHOD """
# this method applies for when the temperature measurement is on
# deals with all related events
# TODO- this method should be in eurotherm_controllers, not here- find way to implement it there
if temp == 0:
self.led_light.setPixmap(QtGui.QPixmap(img["red_led"]))
elif temp > TEMP_MAX:
device.event = f"TC not connected to device {device.unit_id}"
self.led_light.setPixmap(QtGui.QPixmap(img["red_led"]))
elif not self.temp_lcds[num].checkOverflow(temp):
device.event = f"TC connected to device {device.unit_id}"
self.temp_lcds[num].display(temp)
self.led_light.setPixmap(QtGui.QPixmap(img["green_led"]))
elif self.temp_lcds[num].checkOverflow(temp):
self.temp_lcds[num].display("ERROR")
device.event = "An unexpected error has occured"
def print_output(self, s):
print(s)
def append_msg_box(self, msg, strf="%H:%M:%S"):
# convience function for appending msgs in the msg_box widget
now = datetime.now()
current_time = now.strftime(strf)
self.msg_box.append(f"{current_time}: {msg}")
def thread_complete(self, fn):
reprt = f"{fn.__name__} finished"
print(reprt)
self.append_msg_box(reprt)
def MainWindow_exec_thread(self, fn):
"""
PURPOSE:
get any function and execute it with a new thread
:arg: function to execute with a thread
"""
temp_meas = NewThread(fn) # Any other args, kwargs are passed to the run function
temp_meas.signals.result.connect(self.print_output)
temp_meas.signals.finished.connect(lambda: self.thread_complete(fn))
# Execute
self.threadpool.start(temp_meas)
def chk_ports(self, s=0.1):
# Thread to check communication port at an interval s
while True:
sleep(s)
self.find_ports()
class Thermocouple_settings(QtWidgets.QTableWidget):
"""
Settings of thermocouples
=========================
Args:
TC1 name (*str)
TC2 name (*str)
TC1 port (*str)
TC2 port (*str)
"""
def __init__(self, parent=None):
super(Thermocouple_settings, self).__init__(parent)
# icon
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(img["icon"]), QtGui.QIcon.Normal, QtGui.QIcon.Off)
icon.Active
self.setWindowIcon(icon)
# set style sheet
self.setStyleSheet(style["main_window"])
self.setObjectName("TC Settings")
self.resize(248, 214)
self.gridLayout = QtWidgets.QGridLayout(self)
self.gridLayout.setObjectName("gridLayout")
self.tc_settings_label = QtWidgets.QLabel(self)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.tc_settings_label.sizePolicy().hasHeightForWidth())
self.tc_settings_label.setSizePolicy(sizePolicy)
self.tc_settings_label.setObjectName("tc_settings_label")
self.gridLayout.addWidget(self.tc_settings_label, 0, 0, 1, 2)
self.line = QtWidgets.QFrame(self)
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.gridLayout.addWidget(self.line, 1, 0, 1, 2)
self.device_1_box_label = QtWidgets.QLabel(self) # TC 1
self.device_1_box_label.setObjectName("device_1_box_label")
self.gridLayout.addWidget(self.device_1_box_label, 2, 0, 1, 1)
self.device_1_box = QtWidgets.QLineEdit(self)
self.device_1_box.setObjectName("device_1_box")
self.gridLayout.addWidget(self.device_1_box, 2, 1, 1, 1)
self.device_2_box_label = QtWidgets.QLabel(self) # TC 2
self.device_2_box_label.setObjectName("device_2_box_label")
self.gridLayout.addWidget(self.device_2_box_label, 3, 0, 1, 1)
self.device_2_box = QtWidgets.QLineEdit(self)
self.device_2_box.setObjectName("device_2_box")
self.gridLayout.addWidget(self.device_2_box, 3, 1, 1, 1)
self.port_1_box_label = QtWidgets.QLabel(self)
self.port_1_box_label.setObjectName("port_1_box_label")
self.gridLayout.addWidget(self.port_1_box_label, 4, 0, 1, 1)
self.com_port_select_1 = QtWidgets.QComboBox(self)
self.com_port_select_1.setWhatsThis("")
self.com_port_select_1.setObjectName("com_port_select")
self.gridLayout.addWidget(self.com_port_select_1, 4, 1, 1, 1)
self.device_2_port_label = QtWidgets.QLabel(self)
self.device_2_port_label.setObjectName("device_2_port_label")
self.gridLayout.addWidget(self.device_2_port_label, 5, 0, 1, 1)
self.com_port_select_2 = QtWidgets.QComboBox(self)
self.com_port_select_2.setWhatsThis("")
self.com_port_select_2.setObjectName("com_port_select_2")
self.gridLayout.addWidget(self.com_port_select_2, 5, 1, 1, 1)
self.default_vals = QtWidgets.QCheckBox(self)
self.default_vals.setObjectName("default_vals")
self.gridLayout.addWidget(self.default_vals, 6, 0, 1, 2)
# Setup for the finish dialog
self.tc_dialog_box = QtWidgets.QDialogButtonBox(self)
self.tc_dialog_box.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel | QtWidgets.QDialogButtonBox.Ok)
self.tc_dialog_box.setObjectName("tc_dialog_box")
# Exit dialog
self.tc_dialog_box.rejected.connect(self.close)
self.tc_dialog_box.accepted.connect(self.approve_changes)
self.gridLayout.addWidget(self.tc_dialog_box, 7, 1, 1, 1)
self.retranslateUi()
QtCore.QMetaObject.connectSlotsByName(self)
# init tc_port_couple- this is the combination of the port number and TC name picked
self.tc_port_couple = ['', ''] # a 2 cell list of strings
def retranslateUi(self):
_translate = QtCore.QCoreApplication.translate
self.setWindowTitle(_translate("Thermocouple settings", "TC Settings"))
self.tc_settings_label.setText(_translate("Thermocouple",
"<html><head/><body><p align=\"center\"><span style=\" font-size:12pt; font-weight:600;\">Thermocouple settings</span></p></body></html>"))
self.device_1_box_label.setText(
_translate("Form", "<html><head/><body><p align=\"center\">TC 1 name:</p></body></html>"))
self.device_2_box_label.setText(
_translate("Form", "<html><head/><body><p align=\"center\">TC 2 name:</p></body></html>"))
self.port_1_box_label.setText(
_translate("Form", "<html><head/><body><p align=\"center\">Port 1:</p></body></html>"))
self.device_2_port_label.setText(
_translate("Form", "<html><head/><body><p align=\"center\">Port 2:</p></body></html>"))
self.default_vals.setText(_translate("Form", "use default values"))
def approve_changes(self):
"""
Approve changes in the seetings window
"""
name1 = self.device_1_box.displayText()
name2 = self.device_2_box.displayText()
port1 = self.com_port_select_1.currentText()
port2 = self.com_port_select_2.currentText()
# Checkbox not enabled
if not self.default_vals.isChecked():
if name1 == name2:
msg_header = "Name error"
if name1 == '':
msg = "Please name your thermocouples or use default values"
else:
msg = f"Both thermocouples are named {name1}.\n" \
f"Please make sure they have different names"
QMessageBox.critical(self, msg_header, msg, QMessageBox.Ok)
return
if port1 == port2:
msg_header = f"Port Error"
msg = f"Both thermocouples are assigned to the same port {port1}.\n" \
f"Please assign them different ports"
if port1 == '':
msg = "No ports were assigned. Please connect a controller to the USB port"
QMessageBox.critical(self, msg_header, msg, QMessageBox.Ok)
return
answer = QMessageBox.question(
None, "Approve changes",
"Are you sure you want to proceed with these changes?",
QMessageBox.Ok | QMessageBox.Cancel
)
if answer & QMessageBox.Ok:
if not self.default_vals.isChecked():
name_and_port1 = f"{port1}-{name1}-"
name_and_port2 = f"{port2}-{name2}-"
self.tc_port_couple = [name_and_port1, name_and_port2]
else:
self.tc_port_couple = ['', '']
print(self.tc_port_couple)
self.close()
elif answer & QMessageBox.Cancel:
pass
def gui_main():
# main function of the script GUI
# The rest of the code is the same as for the "normal" text editor.
app = QtWidgets.QApplication(sys.argv)
app.setStyle("Fusion")
# Now use a palette to switch to dark colors:
palette = QPalette()
palette.setColor(QPalette.Window, QColor(53, 53, 53))
palette.setColor(QPalette.WindowText, Qt.white)
palette.setColor(QPalette.Base, QColor(25, 25, 25))
palette.setColor(QPalette.AlternateBase, QColor(53, 53, 53))
palette.setColor(QPalette.ToolTipBase, Qt.white)
palette.setColor(QPalette.ToolTipText, Qt.white)
palette.setColor(QPalette.Text, Qt.white)
palette.setColor(QPalette.Button, QColor(53, 53, 53))
palette.setColor(QPalette.ButtonText, Qt.white)
palette.setColor(QPalette.BrightText, Qt.red)
palette.setColor(QPalette.Link, QColor(42, 130, 218))
palette.setColor(QPalette.Highlight, QColor(42, 130, 218))
palette.setColor(QPalette.HighlightedText, Qt.black)
app.setPalette(palette)
main_window = MainWindow()
ui = Ui_MainWindow()
ui.setupUi(main_window)
main_window.show()
sys.exit(app.exec_())
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
|
991,046 | f30abb71fc7d65d784a82c458e5e85ee7d0b5af8 | from event import event
from event import subscriber
from event import publisher
class Ev(event.Event):
def __init__(self):
super(Ev, self).__init__("event1111")
def serialize_payload(self):
return self.payload
def deserialize_payload(self, payload):
return payload
class Disp(subscriber.Subscriber):
def __init__(self):
super(Disp, self).__init__("disp")
def receive_event(self, event_serialized):
print "event occured", event_serialized
if __name__ == '__main__':
ev = Ev()
p = publisher.Publisher([ev])
s = Disp()
p.register(ev, s)
ev.payload = "hello"
p.publish(ev)
|
991,047 | d0905618c803b10443a39e1e3cc116d2c5f7c64c | __author__ = 'branw'
import os
import base64
import tempfile
from openstack import connection
import wsme
from wsme.rest import json as wsme_json
class Base(wsme.types.Base):
def to_dict(self):
return wsme_json.tojson(self.__class__, self)
@classmethod
def to_obj(cls, values):
# wsme_json.fromjson cannot be used here because it doesn't work
# correctly with read-only attributes (wsme raises exception that
# read-only property is violated when fromjson is used).
wsme_dict = {}
for attribute in wsme.types.list_attributes(cls):
value = values.get(attribute.name, wsme.types.Unset)
if value and wsme.types.iscomplex(attribute.datatype):
value = attribute.datatype(**value)
wsme_dict[attribute.name] = value
return cls(**wsme_dict)
@classmethod
def to_wsme_model(cls, obj):
wsme_dict = {}
for attribute in wsme.types.list_attributes(cls):
value = getattr(obj, attribute.name, None)
wsme_dict[attribute.name] = value
return cls(**wsme_dict)
class Credentials(Base):
auth_url = wsme.wsattr(str, mandatory=True)
username = wsme.wsattr(str, mandatory=True)
password = wsme.wsattr(str, mandatory=True)
project = wsme.wsattr(str, mandatory=True)
insecure = wsme.wsattr(bool, mandatory=True)
domain = wsme.wsattr(str, mandatory=False)
cacert = wsme.wsattr(str, mandatory=False)
def get_cafile(cacert, prefix_str):
cafile = None
if cacert:
cert_content = base64.b64decode(cacert)
fd, cafile = tempfile.mkstemp(dir="/Users/branw/Documents/onesafe/pystudy", prefix=prefix_str)
os.write(fd, cert_content)
os.close(fd)
return cafile
def create_conn(credentials):
cafile = get_cafile(credentials.cacert, "openstack_cert")
try:
print credentials
conn = connection.Connection(auth_url=credentials.auth_url,
username=credentials.username,
password=credentials.password,
project_name=credentials.project,
verify= "/Users/branw/Documents/onesafe/pystudy/vio.crt",
#verify= True,
#verify= False if credentials.insecure else cafile,
#cert=cafile,
user_domain_name=credentials.domain,
project_domain_name=credentials.domain)
except Exception as e:
raise e
# finally:
# if cafile is not None:
# os.remove(cafile)
return conn
credentials = Credentials()
credentials.auth_url = "http://10.111.88.103:35357/v3"
credentials.username = "admin"
credentials.password = "admin"
credentials.project = "admin"
credentials.insecure = False
credentials.cacert = "MIIDojCCAoqgAwIBAgIJAN+BgmWpSPTYMA0GCSqGSIb3DQEBCwUAMGYxCzAJBgNV\
BAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJUGFsbyBBbHRvMQ8wDQYDVQQK\
DAZWTXdhcmUxDDAKBgNVBAsMA1ZJTzEXMBUGA1UEAwwOMTAuMTExLjEwNi4xOTEw\
HhcNMTcwNDEyMDg1ODUyWhcNMjcwNDEwMDg1ODUyWjBmMQswCQYDVQQGEwJVUzEL\
MAkGA1UECAwCQ0ExEjAQBgNVBAcMCVBhbG8gQWx0bzEPMA0GA1UECgwGVk13YXJl\
MQwwCgYDVQQLDANWSU8xFzAVBgNVBAMMDjEwLjExMS4xMDYuMTkxMIIBIjANBgkq\
hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAwvs4OU+OUFt1cflgTQEWkSBwsmgiTB21\
h+qOiFtOGpA6StLHvss/hBlc8XDIWE+3OxLyMNSv9I5SYDnND37dzMfKpU3NHMIN\
+F2ud0AELt2o/pALZySV6cU7rFFRPdYCpFf0V5Vn+qeJOQoXN7zdH8LDAuWVaCdf\
LQXeGay+J7NMneYpLPww3xiMfiVNTYxTGMZPvOklgt1Vk/4PnL7P/Juh7SgYaphC\
HptceFVTMYltBl+vFmJrzq+SHkkgk99YeF8YckJaTpTWvHFDLpECYh8s7u6B1fnB\
VN/09/bULSHHIwgF2MImPuFnkZrAcwjlWt3ksC8tx1Veoig+yb1bvwIDAQABo1Mw\
UTAMBgNVHRMEBTADAQH/MB0GA1UdDgQWBBSQI0kKb0GtlzaEVWGKMe32NoPFHDAV\
BgNVHREEDjAMhwQKb2q/hwQKb1hnMAsGA1UdDwQEAwICjDANBgkqhkiG9w0BAQsF\
AAOCAQEAd7FGlOJ0hsT9TsqwJjkLVTefSmReDkc3nUf4it3up/PdxjXvdWPilbUT\
d75SdVZw3zQm++vv2BwWl3QZEX2AleHOgXKVdWBhXNrZYtw11MYzZ+Kb6ZSEyIfV\
FtE6krmz8CsB04cs1xwwJ1cUyku2fOLkFYeLAWCeIVEVHpk0QVj2E9NJC3ZLPpPf\
tvWHFDhnrPNhYQIS9zChHi4ggU/P4G6PvFgNUwvOXMerI6W4xyuEXtxjKsWGocZx\
7570mTN31VPrC9/vuJC2f0YKigmfM7316pt4bN31I4z4i+fLPdRmVtmzTd4ONabG\
oGnptHcbIUIsY0qACJ26G3VNUhXQyQ=="
def authenticate():
conn = create_conn(credentials)
conn.authorize()
token = conn.session.get_token()
project_id = conn.session.get_project_id()
print token
print project_id
print [img.name for img in conn.image.images()]
authenticate() |
991,048 | 47d7d679efb2415d741f3aee91db74e5c677b252 | import numpy as np
from downsample import *
import sys
sys.path.append('../') # import from folder one directory out
import util
import pandas as pd
import data_handler
import matplotlib.pyplot as plt
from matplotlib import rc
rc('text', usetex=True)
rc('font', family='serif', size=12)
df = util.csv_to_df('../Datasets/star_light/train.csv')
reductions = np.linspace(1, 200, 100)
shifting = [0, 20, 40, 80, 200]
test_classification(df, reductions, shifting) |
991,049 | a7a95cf5276a5193eb586eaf63b0d97928a52186 | from .registry import DATASETS
from .xml_style import XMLDataset
@DATASETS.register_module
class VOCDataset(XMLDataset):
CLASSES = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car',
'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train',
'tvmonitor')
CLASSES = ('umbrella', 'liquid', 'lighter', 'scissors', 'knife',
'cellphone', 'battery', 'gun', 'none-liquid', 'pliers', 'wrench')
CLASSES = ('gun', 'dagger', 'knife', 'bottle', 'light_bottle', 'scissors')
CLASSES = ('hammer', 'scissors', 'knife', 'bottle', 'battery', 'firecracker',
'gun', 'grenade', 'bullet', 'lighter', 'ppball', 'baton')
CLASSES = ('hammer', 'scissors', 'knife', 'bottle', 'battery', 'firecracker',
'gun', 'grenade', 'bullet')
CLASSES = ('gun', 'knife', 'bottle', 'scissors', 'dagger')
CLASSES = ('hammer','scissors','knife','bottle','battery','firecracker',
'gun','grenade','bullet','lighter','ppball','baton')
CLASSES = ('hammer','scissors','knife','bottle','battery','firecracker',
'gun','grenade')
def __init__(self, **kwargs):
super(VOCDataset, self).__init__(**kwargs)
if 'VOC2007' in self.img_prefix:
self.year = 2007
elif 'VOC2012' in self.img_prefix:
self.year = 2012
else:
raise ValueError('Cannot infer dataset year from img_prefix')
|
991,050 | fd26947f7bc31f1f4f02891d74181b50b007635f | #!/usr/bin/env python
"""
nxosm was written to extract and build a road network using OSM data;
to build adjacency matrix with geolocation information for each node.
Citation Format:
Legara, E.F. (2014) nxosm source code (Version 2.0) [source code].
http://www.erikalegara.net
"""
__author__ = "Erika Fille Legara"
__date__ = "22 January 2014"
__programname__ = "nxosm.py"
__codeversion__ = "2.0"
__status__ = "Complete"
__datasource__ = "http://labs.geofabrik.de/haiyan/"
from itertools import tee, izip
from osmread import parse_file, Way, Node #https://github.com/dezhin/osmread
# import shapefile
import networkx as nx
global highway_types
'''
For more highway types, visit http://wiki.openstreetmap.org/wiki/Key:highway
'''
highway_types = ['secondary', 'secondary_link', 'primary', 'primary_link',\
'tertiary', 'tertiary_link', 'motorway','motorway_link','trunk','trunk_link',\
'residential','road','track','Track']
def load_osm_pbf():
return parse_file('latest.osm.pbf')
def load_osm(path):
return parse_file(path)
# def load_intersection_nodes_file():
# '''
# The intersection file can be generated using QGIS.
# '''
# #shp = shapefile.Reader("small_set_intersection.shp")
# shp = shapefile.Reader("For HaiyanPH Paper.shp")
# shp_iter = shp.iterRecords()
# return shp_iter
# def load_road_shapefile():
# shp = shapefile.Reader("latest.shp/roads")
# fields = shp.fields
# return shp, fields
def pairwise(nodes):
''' From a list of nodes = [1,2,3,4,5],
produce: (1,2), (2,3), (3,4), (4,5)'''
a, b = tee(nodes)
next(b, None)
return izip(a,b)
def build_road_network(pbf):
G = nx.Graph()
node_locations = {}
for entity in pbf:
if isinstance(entity, Way) and 'highway' in entity.tags:
if entity.tags['highway'] in highway_types:
nodes = list(entity.nodes)
edges = pairwise(nodes)
edges = [e for e in edges]
G.add_edges_from(edges)
for e in edges:
G.edge[e[0]][e[1]]['tipo'] = entity.tags['highway']
elif isinstance(entity,Node):
node_locations[entity.id] = (entity.lon, entity.lat)
return G, node_locations
def build_road_network_2(fil):
G = nx.Graph()
ndes = {}
segments = []
node_locations = {}
for entity in fil:
if isinstance (entity, Way) and 'highway' in entity.tags:
#print "highway", entity.tags['highway']
if entity.tags['highway'] in highway_types:
nodes = list(entity.nodes)
for n in nodes:
# Set flag that node n does belong to the road network
ndes[n]['way_part'] = True
edges = pairwise(nodes)
edges = [e for e in edges]
G.add_edges_from(edges)
for e in edges:
segment = {}
segment['node_1'] = e[0]
segment['node_2'] = e[1]
segment['highway_type'] = entity.tags['highway']
segment['status'] = 'ok'
G.edge[e[0]][e[1]]['highway_type'] = entity.tags['highway']
G.edge[e[0]][e[1]]['status'] = 'ok'
# initialize road segment status tag such that we don't get a KeyError
if 'status' in entity.tags:
G.edge[e[0]][e[1]]['status'] = entity.tags['status']
segment['status'] = entity.tags['status']
segments.append(segment)
elif isinstance(entity, Node):
#print "Node!"
#print entity.id. entity.lat, entity.lon
node_locations[entity.id] = (entity.lat, entity.lon)
#print node_locations[entity.id]
nde = {}
nde['nodeid'] = entity.id
#print nde['nodeid']
nde['latitude'] = entity.lat
#print nde['latitude']
nde['longitude'] = entity.lon
#print nde['longitude']
# Default node isn't a barrier so None. set otherwise
nde['barrier'] = 'None'
if 'barrier' in entity.tags:
nde['barrier'] = entity.tags['barrier']
#print nde['barrier']
#print nde
# Set flag that this node does not belong to the road network.
# Since in an OSM file nodes are listed ahead of ways, the way-handling code
# above will set to true.
nde['way_part'] = False
ndes[entity.id] = nde # check for bug later
# Before returning, remove all nodes in ndes which are not in G.
# nodes_in_graph = [ndes[entry] for entry in G.nodes()]
#return G, nodes_in_graph, node_locations, segments
return G, ndes.values(), node_locations, segments
def get_nodes_locations(G):
pbf = load_osm_pbf()
nodes = G.nodes()
node_attrib = {}
for entity in pbf:
if isinstance(entity, Node) and entity.id in nodes:
node_attrib[entity.id] = (entity.lon, entity.lat)
return node_attrib
def reduce_nodes(G):
'''
The reduce_nodes() function reduces the bigger and more complete
graph G by identifying all nodes with degree 2 and removing them
in the graph, and connecting their adjacent nodes to one another.
'''
g = G.copy()
degree2 = [] #list of all nodes with degree 2
for n in G.nodes():
if G.degree(n) == 2:
degree2.append(n)
for v in degree2:
edges = g.edges(v)
try:
first_edge = edges[0]
last_edge = edges[-1]
type1 = G[first_edge[1]][first_edge[0]]['tipo']
type2 = G[last_edge[0]][last_edge[1]]['tipo']
if type1 == type2:
g.add_edge(first_edge[1],last_edge[1], tipo = type1)
g.remove_edge(first_edge[0], first_edge[1])
g.remove_edge(last_edge[0], last_edge[1])
except:
pass
for v in degree2:
if g.degree(v) == 0:
g.remove_node(v)
return g
if __name__ == "__main__":
pbf = load_osm_pbf()
G, node_locations = build_road_network(pbf)
#shp = load_intersection_nodes_file()
#node_attrib = get_nodes_locations(G)
latlong_ids = {}
for n in G.nodes():
lat, lng = node_locations[n][1], node_locations[n][0]
G.node[n]['latitude'] = node_locations[n][1]
G.node[n]['longitude'] = node_locations[n][0]
latlong_ids[(lat,lng)] = n
'''
The reduce_nodes() function reduces the bigger and more complete
graph G by identifying all nodes with degree 2 and removing them
in the graph, and connecting their adjacent nodes.
'''
#small_g = reduce_nodes(G)
'''
Here, we are writing out the network data structures to .gexf format.
'''
#nx.write_gexf(G,'road_network_all_nodes.gexf')
#nx.write_gexf(small_g,'road_network_reduced.gexf')
#np.save("all_nodes.npy",G.nodes())
|
991,051 | d154c5ed0e45c496db12ce01909e0d25ded1d97d | import pandas as pd
from models import DATACONTRACT
from loguru import logger
class DraftParser:
def __init__(self):
pass
def parse_draft_info(self, draft_soup, info_dict):
# DRAFTTRACKERCOLS = [UNIQUE_ID, LEAGUE_ID, TEAM_ID, TEAM_NAME, DRAFTORDER, CLASSORDER, PLAYERNAME, PLAYERPOS]
team_table_section = draft_soup.find("section", {"id": "draft-team"})
rows = team_table_section.find_all('tr')
data_rows = rows[1::]
all_data = []
for row in data_rows:
new_dict = {}
new_dict.update(info_dict)
row_dict = self.parse_draft_row(row)
new_dict.update(row_dict)
logger.debug(new_dict)
all_data.append(new_dict)
logger.debug(all_data)
return pd.DataFrame(all_data)
def parse_draft_row(self, row_soup) -> dict:
tds = row_soup.find_all('td')
draft_order = tds[0].contents[0]
class_order = tds[1].contents[0].replace('(', '').replace(')', '')
player_name = tds[2].find('a').contents[0]
player_pos = tds[3].contents[0]
row_dict = {DATACONTRACT.DRAFTORDER: draft_order,
DATACONTRACT.CLASSORDER: class_order,
DATACONTRACT.PLAYERNAME: player_name,
DATACONTRACT.PLAYERPOS: player_pos}
logger.debug(row_dict)
return row_dict
d1 = {DATACONTRACT.PLAYERPOS: 'WR', DATACONTRACT.PLAYERNAME: ['P1']}
d2 = {DATACONTRACT.PLAYERPOS: 'WR', DATACONTRACT.PLAYERNAME: ['P2']}
d3 = {DATACONTRACT.PLAYERPOS: ['QB'], DATACONTRACT.PLAYERNAME: ['P3']}
d4 = {DATACONTRACT.PLAYERPOS: ['QB'], DATACONTRACT.DRAFTORDER: ['5']}
l = [d1, d2, d3, d4]
df = pd.DataFrame(l)
df = pd.DataFrame.from_dict(d1)
df.append(pd.DataFrame.from_dict(d2))
d = {DATACONTRACT.PLAYERPOS: ['WR', 'WR', 'QB'], DATACONTRACT.PLAYERNAME: ['P1', 'P2', 'P3']}
df = pd.DataFrame.from_dict(d)
d1 = {'r1': 1, 'r2': 2}
d2 = {'r3': 3, 'r4': 4} |
991,052 | d4e44aae2049ab147eedd3a1c3669d2b13acf610 |
def area(largura, comprimento):
return largura * comprimento
def divisor():
print('-='*30)
def msg(msg):
print(msg)
def lerteclado(msg):
return input(msg)
msg('Controle de Terrenos')
divisor()
largura = float(lerteclado('Largura (m): '))
comprimento = float(lerteclado('Comprimento (m): '))
divisor()
msg(f'A รกrea de um terreno {largura}x{comprimento} รฉ de {area(largura, comprimento)}')
|
991,053 | fc3a28c8193bdcc7fe7a8d93fc6e85a16e798c3f | import copy
import pathlib
import pickle
import time
from functools import partial, reduce
import numpy as np
import torch
from det3d.core.bbox import box_np_ops
from det3d.core.sampler import preprocess as prep
from det3d.utils.check import shape_mergeable
class DataBaseSamplerV2:
def __init__(
self,
db_infos, # object/dbinfos_train.pkl
groups, # [dict(Car=15,),],
db_prepor=None, # filter_by_min_num_points, filter_by_difficulty
rate=1.0, # rate=1.0
global_rot_range=None, # [0, 0]
logger=None, # logging.getLogger("build_dbsampler")
gt_random_drop=-1.0,
gt_aug_with_context=-1.0,
gt_aug_similar_type=False,
):
# load all gt database here.
for k, v in db_infos.items():
logger.info(f"load {len(v)} {k} database infos")
# preprocess: filter_by_min_num_points/difficulty.
if db_prepor is not None:
db_infos = db_prepor(db_infos)
logger.info("After filter database:")
for k, v in db_infos.items():
logger.info(f"load {len(v)} {k} database infos")
self.db_infos = db_infos
self._rate = rate
self._groups = groups
self._group_db_infos = {}
self._group_name_to_names = []
self._sample_classes = []
self._sample_max_nums = []
self.gt_point_random_drop = gt_random_drop
self.gt_aug_with_context = gt_aug_with_context
# get group_name: Car and group_max_num: 15
self._group_db_infos = self.db_infos # just use db_infos
for group_info in groups:
self._sample_classes += list(group_info.keys()) # ['Car']
self._sample_max_nums += list(group_info.values()) # [15]
# get sampler dict for each class like Car, Cyclist, Pedestrian...
# this sampler can ensure batch samples selected randomly.
self._sampler_dict = {}
for k, v in self._group_db_infos.items():
self._sampler_dict[k] = prep.BatchSampler(v, k)
if gt_aug_similar_type:
self._sampler_dict["Car"] = prep.BatchSampler(self._group_db_infos["Car"] + self._group_db_infos["Van"], "Car")
def sample_all(
self,
root_path,
gt_boxes,
gt_names,
num_point_features,
random_crop=False,
gt_group_ids=None,
calib=None,
targeted_class_names=None,
with_road_plane_cam=None,
):
'''
This func aims to sample some gt-boxes and corresponding points to perform gt augmentation;
notice that the non-targeted gt boxes (like pedestrian) have been considered into collision test;
notice that the points in corresponding gt-box are read from pre-saved gt database;
'''
# record the num of gt-aug samples with a dict and a list
sampled_num_dict = {}
sample_num_per_class = []
for class_name, max_sample_num in zip(self._sample_classes, self._sample_max_nums): # actual only once for ['Car': 15]
sampled_num = int(max_sample_num - np.sum([n == class_name for n in gt_names]))
#sampled_num = int(max_sample_num - np.sum([name in targeted_class_names for name in gt_names]))
sampled_num = np.round(self._rate * sampled_num).astype(np.int64)
sampled_num_dict[class_name] = sampled_num
sample_num_per_class.append(sampled_num)
sampled = []
sampled_gt_boxes = []
all_gt_boxes = gt_boxes
# gt-augmentation: sample gt boxes and add them to current gt_boxes.
# todo: we may sample box one by one to ensure num of gt-boxes is fulfilled.
for class_name, sampled_num in zip(self._sample_classes, sample_num_per_class):
# if sampled_num > 0:
# sampled_objects = self.sample_class_v2(class_name, sampled_num, all_gt_boxes)
# sampled += sampled_objects
# if len(sampled_objects) > 0:
# if len(sampled_objects) == 1:
# sampled_boxes = sampled_objects[0]["box3d_lidar"][np.newaxis, ...]
# else:
# sampled_boxes = np.stack([s["box3d_lidar"] for s in sampled_objects], axis=0)
# sampled_gt_boxes += [sampled_boxes]
# all_gt_boxes = np.concatenate([all_gt_boxes, sampled_boxes], axis=0)
# ensure final num_boxes fulfill the num requirement after collision test.
times = 0
while sampled_num > 0 and times < 2:
sampled_objects = self.sample_class_v2(class_name, sampled_num, all_gt_boxes)
sampled += sampled_objects
if len(sampled_objects) > 0:
if len(sampled_objects) == 1:
sampled_boxes = sampled_objects[0]["box3d_lidar"][np.newaxis, ...]
else:
sampled_boxes = np.stack([s["box3d_lidar"] for s in sampled_objects], axis=0)
sampled_gt_boxes += [sampled_boxes]
all_gt_boxes = np.concatenate([all_gt_boxes, sampled_boxes], axis=0)
sampled_num -= len(sampled_objects)
times += 1
if len(sampled) > 0:
''' get points in sampled gt_boxes '''
sampled_gt_boxes = np.concatenate(sampled_gt_boxes, axis=0)
num_sampled = len(sampled)
s_points_list = []
# get points in sampled gt-boxes from pre-generated gt database.
for info in sampled:
try:
s_points = np.fromfile(str(pathlib.Path(root_path) / info["path"]), dtype=np.float32).reshape(-1, num_point_features)
# gt_points are saved with relative distance; so need to recover by adding box center.
s_points[:, :3] += info["box3d_lidar"][:3]
if with_road_plane_cam is not None:
a, b, c, d = with_road_plane_cam
box3d_cam_center = info['box3d_cam'][0:3] # x,y,z with cam coord. bottom center.
cur_height_cam = (-d - a * box3d_cam_center[0] - c * box3d_cam_center[2]) / b
move_height_cam = info['box3d_cam'][1] - cur_height_cam # cal y dist, > 0: move up, < 0: move down.
s_points[:, 2] += move_height_cam
index = sampled.index(info)
sampled_gt_boxes[index, 2] += move_height_cam
# random drop points in gt_boxes to make model more robust.
if self.gt_point_random_drop > 0:
num_point = s_points.shape[0]
if num_point > 10:
drop_num = int(np.random.uniform(0, self.gt_point_random_drop) * num_point)
choice = np.random.choice(np.arange(num_point), num_point - drop_num, replace=False)
s_points = s_points[choice]
s_points_list.append(s_points)
except Exception:
print(info["path"])
continue
'''todo: do something about random crop'''
if random_crop: # False
s_points_list_new = []
assert calib is not None
rect = calib["rect"]
Trv2c = calib["Trv2c"]
P2 = calib["P2"]
gt_bboxes = box_np_ops.box3d_to_bbox(sampled_gt_boxes, rect, Trv2c, P2)
crop_frustums = prep.random_crop_frustum(gt_bboxes, rect, Trv2c, P2)
for i in range(crop_frustums.shape[0]):
s_points = s_points_list[i]
mask = prep.mask_points_in_corners(s_points, crop_frustums[i : i + 1]).reshape(-1)
num_remove = np.sum(mask)
if num_remove > 0 and (s_points.shape[0] - num_remove) > 15:
s_points = s_points[np.logical_not(mask)]
s_points_list_new.append(s_points)
s_points_list = s_points_list_new
ret = {
"gt_names": np.array([s["name"] for s in sampled]),
"difficulty": np.array([s["difficulty"] for s in sampled]),
"gt_boxes": sampled_gt_boxes,
"points": torch.cat(s_points_list, axis=0),
"gt_masks": np.ones((num_sampled,), dtype=np.bool_),
"group_ids": np.arange(gt_boxes.shape[0], gt_boxes.shape[0] + len(sampled)),
}
else:
ret = None
return ret
def sample_class_v2(self, name, num, gt_boxes):
'''
This func aims to select fixed number of gt boxes from gt database with collision (bev iou) test performed.
'''
# sample num gt_boxes from gt_database
sampled = self._sampler_dict[name].sample(num)
sampled = copy.deepcopy(sampled)
num_sampled = len(sampled)
num_gt = gt_boxes.shape[0]
# get all boxes: gt_boxes + sp_boxes
sp_boxes = np.stack([i["box3d_lidar"] for i in sampled], axis=0) # todo: need modification here
boxes = np.concatenate([gt_boxes, sp_boxes], axis=0).copy()
offset = [0.0, 0.0]
if self.gt_aug_with_context > 0.0:
offset = [self.gt_aug_with_context, self.gt_aug_with_context]
# get all boxes_bev: gt_boxes_bev + sampled_boxes_bev
sp_boxes_new = boxes[num_gt:]
gt_boxes_bv = box_np_ops.center_to_corner_box2d(gt_boxes[:, 0:2], gt_boxes[:, 3:5], gt_boxes[:, -1])
sp_boxes_bv = box_np_ops.center_to_corner_box2d(sp_boxes_new[:, 0:2], sp_boxes_new[:, 3:5] + offset, sp_boxes_new[:, -1])
total_bv = np.concatenate([gt_boxes_bv, sp_boxes_bv], axis=0)
# collision test on bev (stricter than 3d)
coll_mat = prep.box_collision_test(total_bv, total_bv) # todo: too slow here
diag = np.arange(total_bv.shape[0])
coll_mat[diag, diag] = False
# get valid samples
valid_samples = []
for i in range(num_gt, num_gt + num_sampled): # todo: the overall box num may not meet the requirement
if coll_mat[i].any():
# i-th sampled box is not considered into auged gt-boxes.
coll_mat[i] = False
coll_mat[:, i] = False
else:
# i-th sampled box is considered into auged gt-boxes.
valid_samples.append(sampled[i - num_gt])
return valid_samples
def sample_class_v3(self, name, num, gt_boxes):
'''
This func aims to selected fixed number of gt boxes from gt database with collision test performed.
'''
# sample num gt_boxes from gt_database
sampled_objects = copy.deepcopy(self._sampler_dict[name].sample(num))
sampled_boxes = np.stack([i["box3d_lidar"] for i in sampled_objects], axis=0) # todo: need modification here
all_boxes = np.concatenate([gt_boxes, sampled_boxes], axis=0).copy()
all_boxes_torch = torch.from_numpy(all_boxes).float()
# collision test: prep.box_collision_test is still a little faster than boxes_iou_bev_cpu.
iou_bev = iou3d.boxes_iou_bev_cpu(all_boxes_torch, all_boxes_torch).numpy()
coll_mat = iou_bev > 0.
diag = np.arange(all_boxes.shape[0])
coll_mat[diag, diag] = False
# get valid samples
valid_samples = []
num_gt = gt_boxes.shape[0]
num_sampled = len(sampled_objects)
for i in range(num_gt, num_gt + num_sampled): # todo: without multiple try times, sometimes, the overall box num may not meet the requirement
if coll_mat[i].any():
# i-th sampled box is not considered into auged gt-boxes.
coll_mat[i] = False
coll_mat[:, i] = False
else:
# i-th sampled box is considered into auged gt-boxes.
valid_samples.append(sampled_objects[i - num_gt])
return valid_samples
|
991,054 | 21541297a9acd6d12f2470d7327cafb575bc0b1b | #Importamos las librerรญas
import numpy as np
import matplotlib.pyplot as plt
import pyaudio
from pyaudio import PyAudio as pa
import math
import wave
import time
from scipy import signal
#%% Seteamos el rate
#def bitrate(rate):
# global BITRATE
# BITRATE=rate
BITRATE = 44100
def print_bitrate():
print(BITRATE)
#%% Creamos funciones para comunicarnos con la placa de audio
#---------------Emision--------------------------
#---------------------------------------------------------------------------------------------------------------------------
#Tipos de ondas
def armonica(freq_arm,dur_arm): #senal armonica de amplitud 256 bits (suponemos 1.2V), frecuencia freq_arm, duracion dur_arm.
onda=''
cant_puntos = int(BITRATE * dur_arm)
silencios = cant_puntos % BITRATE
for x in range(cant_puntos):
onda+=chr(int(math.sin(x / ((BITRATE / freq_arm) / math.pi)) * 126/2 + 128/2))
#Llenamos lo que falta de duracion con silencios
for x in range(silencios):
onda+=chr(128)
#test: grafica un pedazo de la senal enviada (si no hay que hacer mucho zoom para ver)
# t = np.arange(0,dur_arm/10**3,1/BITRATE)
# onda_plot=np.sin(t*freq_arm)*126/2+128/2
# plt.figure(1)
# plt.plot(t, onda_plot)
# plt.xlabel('Tiempo')
# plt.ylabel('Intensidad')
# plt.show()
# esto es igual pero en vez de graficar vs t grafica por cant de puntos
# #test: grafica un pedazo de la senal enviada (si no hay que hacer mucho zoom para ver)
# #dom= np.array(range(cant_puntos/10**3))
# onda_plot=np.sin(dom / ((BITRATE / freq_arm) / math.pi))*126/2+128/2
# plt.figure(1)
# plt.plot(dom, onda_plot)
# plt.xlabel('Frame')
# plt.ylabel('Intensidad')
# plt.show()
#
return onda
def armonica_2(amp,frec,dur): #amp es Vpp
t = np.arange(0,dur,1/BITRATE)
if amp <= 1.20:
amp_bit= amp*256/1.20 #convierte de volts a bits
armonica = (amp_bit/2)*(np.sin(2 * np.pi * frec * t) + 1)
armonica_lista = list(armonica)
senal = ''
for x in range(len(t)):
senal += chr(int(armonica_lista[x]))
#test: grafica un pedazo de la senal enviada
t_plot = np.arange(0,dur/10**3,1/BITRATE)
plt.figure(2)
plt.plot(t_plot, armonica[:len(t_plot)]*amp/amp_bit)
plt.xlabel('Tiempo')
plt.ylabel('Intensidad')
plt.show()
return armonica;
else:
return ('El voltaje debe ser menor a 1.20V')
def cuadrada(amp, frec, dur):
t = np.arange(0,dur,1/BITRATE)
if amp <= 1.20:
amp_bit= amp*256/1.20 #convierte de volts a bits
cuadrada = (amp_bit/2)*(signal.square(2 * np.pi * frec * t) + 1)
cuadrada_lista = list(cuadrada)
senal = ''
for x in range(len(t)):
senal += chr(int(cuadrada_lista[x]))
#test: grafica un pedazo de la senal enviada
t_plot = np.arange(0,dur/10**3,1/BITRATE)
plt.figure(2)
plt.plot(t_plot, cuadrada[:len(t_plot)]*amp/amp_bit)
plt.xlabel('Tiempo')
plt.ylabel('Intensidad')
plt.show()
return cuadrada;
else:
return ('El voltaje debe ser menor a 1.20V')
def sawtooth(amp, frec, dur):
t = np.arange(0,dur,1/BITRATE)
if amp <= 1.20:
amp_bit= amp*256/1.20 #convierte de volts a bits
sawtooth = (amp_bit/2)*(signal.sawtooth(2 * np.pi * frec * t) + 1) #si pongo 0.5 en el segundo arg de signal.sawtooth obtengo una triangular
sawtooth_lista = list(sawtooth)
senal = ''
for x in range(len(t)):
senal += chr(int(sawtooth_lista[x]))
#test: grafica un pedazo de la senal enviada
t_plot = np.arange(0,dur/10**3,1/BITRATE)
plt.figure(2)
plt.plot(t_plot, sawtooth[:len(t_plot)]*amp/amp_bit)
plt.xlabel('Tiempo')
plt.ylabel('Intensidad')
plt.show()
return sawtooth;
else:
return ('El voltaje debe ser menor a 1.20V')
#---------------------------------------------------------------------------------------------------------------------------
#Ejecuciรณn de la senal de emisiรณn
def emitir(onda,callback=None):
p = pa()
try:
if callback: #modo callback
print('modo callback')
stream = p.open(
format=p.get_format_from_width(1),
channels=1,
rate=BITRATE,
output=True,
stream_callback=callback
)
stream.start_stream()
while stream.is_active():
time.sleep(0.1)
else: #modo bloqueo
print('modo bloqueo')
stream = p.open(
format=p.get_format_from_width(1),
channels=1,
rate=BITRATE,
output=True,
)
stream.write(onda)
except Exception as e:
print(e)
finally:
stream.stop_stream()
stream.close()
p.terminate()
#---------------------------------------------------------------------------------------------------------------------------
#------------Medicion-------------------------------------
def medir(dur_med): #Devuelve un array con una medicion de voltaje de duracion dur_med.
FORMAT = pyaudio.paInt16
CHANNELS = 1 #creo que si ponemos 2 es estereo
CHUNK = 1024 #Espacio que ocupa un bloque de datos del buffer. La senal se divide en estos "chunks".
nombre_arch = 'arch.wav'
frames = []
p = pa()
# Empieza a grabar
stream = p.open(format=FORMAT, channels=CHANNELS,
rate=BITRATE, input=True,
frames_per_buffer=CHUNK)
print('grabando...')
for i in range(0, int(BITRATE / CHUNK * dur_med)):
data = stream.read(CHUNK)
frames.append(data)
print('finalizando grabaciรณn...')
# Termina de grabar
stream.stop_stream()
stream.close()
p.terminate()
#Crea un archivo temporal .wav para poder recuperarlo como array mas tarde.
waveFile = wave.open(nombre_arch, 'wb')
waveFile.setnchannels(CHANNELS)
waveFile.setsampwidth(p.get_sample_size(FORMAT))
waveFile.setframerate(BITRATE)
waveFile.writeframes(b''.join(frames))
waveFile.close()
arch_temp = wave.open('arch.wav','r')
#Extrae un array de la senal wav
senal = arch_temp.readframes(-1)
senal = np.fromstring(senal, 'Int16')
return senal
#emite y mide al mismo tiempo mientras esta activo el callback
def playrec(callback,dur_med):
p = pa()
try:
#modo callback
print('modo callback')
stream = p.open(
format=p.get_format_from_width(1),
channels=1,
rate=BITRATE,
output=True,
stream_callback=callback
)
stream.start_stream()
data = np.empty([])
while stream.is_active():
time.sleep(0.1)
lista=list(medir(dur_med))
np.append(data,lista)
except Exception as e:
print(e)
finally:
stream.stop_stream()
stream.close()
p.terminate()
data=np.asarray(data)
return data
#---------------------------------------------------------------------------------------------------------------------------
##If Stereo
#if spf.getnchannels() == 2: ---------esta sentencia impide ingresar dos canales. Chequear si es necesaria------
# print 'Just mono files'
# sys.exit(0)
# plt.figure(1)
# plt.title('Signal Wave...') -------El ploteo prefiero dejarlo fuera de la clase-------
# plt.plot(senal)
# plt.show()
#
#----------------------------------------------COMENTARIO IMPORTANTE----------------------------------------------
#queda chequear que este midiendo bien con el microfono y/o el cable del labo. Hay que ver si mide en bits (de 0 a 255), en cuyo
#caso agregar la siguiente linea: senal=senal*5/255 (suponiendo que la placa entrega de 0 a 5V)
|
991,055 | f59fa272ae3f44a1d8e6b2ba2f3db44bebbba6f4 | from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException
from bs4 import BeautifulSoup
from urllib.request import urlopen
import smtplib
import config
class BikeScraper:
def __init__(self, location, category_code, keyword):
self.location = location
self.keyword = keyword
self.category_code = category_code
self.url = "https://" + location + ".craigslist.org/search/"+ category_code +"?query=" + keyword
self.driver = webdriver.Chrome('/Users/tlueders/Desktop/chromedriver')
self.delay = 3
def load_url(self):
self.driver.get(self.url)
try:
wait = WebDriverWait(self.driver, self.delay)
wait.until(EC.presence_of_element_located((By.ID, "searchform")))
except TimeoutException:
print("Timed Out")
def extract_data(self):
all_posts = self.driver.find_elements_by_class_name("result-row")
dates = []
titles = []
prices = []
for post in all_posts:
title = post.text.split("$")
if title[0] == '':
title = title[1]
else:
title = title[0]
title = title.split("\n")
price = title[0]
title = title[-1]
title = title.split(" ")
month = title[0]
day = title[1]
title = ' '.join(title[2:])
date = month + " " + day
titles.append(title)
prices.append(price)
dates.append(date)
return titles, prices, dates
def extract_urls(self):
url_list = []
html_page = urlopen(self.url)
soup = BeautifulSoup(html_page, "html.parser")
for link in soup.findAll("a", {"class": "result-title"}):
url_list.append(link["href"])
return url_list
def quit(self):
self.driver.close()
def send_email(self, subject, msg):
try:
server = smtplib.SMTP('smtp.gmail.com:587')
server.ehlo()
server.starttls()
server.login(config.EMAIL_ADDRESS, config.PASSWORD)
message = "Subject: {}\n\n{}".format(subject, msg)
server.sendmail(config.EMAIL_ADDRESS, config.EMAIL_ADDRESS, message)
server.quit()
print("Success: Email sent.")
except:
print('Failed: Email was not sent.')
def create_message(self):
titles, prices, dates = self.extract_data()
url_list = self.extract_urls()
posts = []
for title, price, date, url in zip(titles, prices, dates, url_list):
post = "Price: " + price, "Title: " + title, "Date: " + date, "URL: " + url
posts.append(post)
print("{} \n".format(post))
return posts
# Initialize Scraper
scraper = BikeScraper("bend", "bia", "specialized")
scraper.load_url()
# Create & Send Message
# subject = "Daily Craigslist Updates"
msg = scraper.create_message()
print(msg)
# scraper.send_email(subject, msg)
# Close Scraper
scraper.quit() |
991,056 | 89387ff9bbc8b069b79bcba773b734aa795c5544 | """
This code computes the refractive index of a protein,
using PaliwalTomarGupta2014 Uricase as a base.
We use the relation:
n^2 = A + B/(1-C/L^2) + D/(1-E/L^2)
n: real part of refraction index
L: wavelength
A=-97.26
B=95.053
C=0.016
D=0.0647
E=0.521
We assume imaginary part of refractive index=0.024 (constant)
"""
import numpy
A=-97.26
B=95.053
C=0.016
D=0.0647
E=0.521
N=41
wl = numpy.linspace(0.515, 0.535, N)
n2 = A + B/(1-C/wl**2) + D/(1-E/wl**2)
n = numpy.sqrt(n2)
k = numpy.ones(N)*0.024
data = numpy.zeros((N,3))
data[:,0] = wl
data[:,1] = n
data[:,2] = k
numpy.savetxt('molecule_PTG14', data)
|
991,057 | 76ef15fe0922bcedfea1d057e36e8f9b5cbf1df4 | from PyQt5.QtCore import QThread, pyqtSignal
class ThreadRefreshOnOrderStatus(QThread):
signal = pyqtSignal('PyQt_PyObject')
def __init__(self, parent_class):
super().__init__()
self.output_list = []
self.parent_class = parent_class
def run(self):
self.output_list.clear()
from pymongo.errors import AutoReconnect
from errors import CustomerNotDoneYetError
myc_o = self.parent_class.MW.DB.orders
try:
customers_data = myc_o.find({'pay_done': False},
{'_id': 1, 'name': 1, 'table_no': 1, 'done': 1, 'total': 1})
for x in customers_data:
self.output_list.append([x['_id'], x['name'], x['table_no'], x['done'], x['total']])
self.signal.emit(True)
except AutoReconnect:
self.parent_class.MW.mess('-->> Network Error <<--')
except CustomerNotDoneYetError as ob:
self.parent_class.MW.mess(str(ob))
finally:
self.parent_class.curr_wid.bt_refresh_on_order.setEnabled(True)
|
991,058 | e6a704e16e0285e7618e9947cf5be9034a6f8737 | import json
import os
from argparse import ArgumentParser
from typing import List
from tensorflow import keras
from calamari_ocr.ocr import SavedCalamariModel
from calamari_ocr.ocr.model.ensemblegraph import EnsembleGraph
from calamari_ocr.ocr.model.graph import Graph
def split(args):
ckpt = SavedCalamariModel(args.model)
keras_model = keras.models.load_model(
ckpt.ckpt_path,
custom_objects={
"Graph": Graph,
"EnsembleGraph": EnsembleGraph,
"VoterGraph": EnsembleGraph,
},
)
def extract_keras_model(i):
inputs = keras_model.input
outputs = keras_model.output
assert isinstance(outputs, dict)
assert isinstance(inputs, dict)
names_to_extract = [
"blank_last_logits",
"blank_last_softmax",
"softmax",
"decoded",
"out_len",
]
split_outputs = {}
for name in names_to_extract:
src_name = f"{name}_{i}"
if src_name not in outputs:
return None
split_outputs[name] = outputs[src_name]
return keras.Model(inputs=inputs, outputs=split_outputs)
split_models: List[keras.Model] = []
print("Starting to split models")
while True:
model = extract_keras_model(len(split_models))
if model is None:
break
split_models.append(model)
print(f"Split model into {len(split_models)}.")
print(f"Saving models to {ckpt.dirname}/{ckpt.basename}_split_(i).ckpt")
with open(ckpt.json_path) as f:
ckpt_dict = json.load(f)
ckpt_dict["scenario_params"]["model_params"]["ensemble"] = -1
ckpt_dict["scenario_params"]["data_params"]["ensemble_"] = -1
for i, split_model in enumerate(split_models):
path = os.path.join(ckpt.dirname, f"{ckpt.basename}_split_{i}.ckpt")
with open(path + ".json", "w") as f:
json.dump(ckpt_dict, f, indent=2)
split_model.save(path)
print(f"Saved {i + 1}/{len(split_models)}")
def main():
parser = ArgumentParser()
sub_parser = parser.add_subparsers(title="Program mode", required=True, dest="mode")
split_parser = sub_parser.add_parser("split")
split_parser.add_argument("model")
args = parser.parse_args()
if args.mode == "split":
split(args)
if __name__ == "__main__":
main()
|
991,059 | c6a0c5cbaf443d7092a6f2c780ce58422eebd166 | #importing libraries
import tensorflow as tf
import numpy as np
numpy_inputs = np.mat([[5,2,13],[7,9,0]], dtype = int)
inputs = tf.convert_to_tensor(value = numpy_inputs, dtype = tf.int8)
#session start
with tf.Session() as sess:
print(sess.run(fetches = inputs))
print(inputs)
#session end
sess.close()
|
991,060 | d89fbd6eb51a7a79e69344d77408949b6b440a08 | from db import Column, Integer, ForeignKey, Boolean
from models.abstract_models import BaseModel
from models.clinical_story.medicine_type import MedicineTypeModel
class MedicineModel(BaseModel):
__tablename__ = "medicaciones"
medicine_type_id = Column("medicamentos_id", Integer, ForeignKey(MedicineTypeModel.id))
clinical_story_id = Column("historias_clinicas_id", Integer, ForeignKey('historias_clinicas.id'))
background = Column("antecedente", Boolean)
@classmethod
def get_list(cls, query_params=None) -> "ExamTypeModel":
query = cls.query
return query.all()
@classmethod
def find_by_story_id(cls, story_id: str) -> "ExamResultModel":
return cls.query.filter_by(clinical_story_id=story_id).all() |
991,061 | 3b0233a4157595ea5706411522878e91d9c06684 | class DependencyNotFound(Exception):
pass
class DependencyResolvingException(Exception):
pass
|
991,062 | a4bea911faaf5f3e2df3c1c159df7f7ffb0aaf4b | # Copyright 2019 British Broadcasting Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You may
# obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Test cases for DabRadioStation class.
"""
from lib.dab_radio_station import DabRadioStation
from nose.tools import assert_raises
def test_DabRadioStation():
p = DabRadioStation(name = 'BBC Radio 1', ecc = 'ce1', eid = 'ce15', sid = 'c221', scids = '0')
assert p.get_hostname() == '0.c221.ce15.ce1.dab.radiodns.org'
assert p.get_text_topic() == '/topic/dab/ce1/ce15/c221/0/text'
assert p.get_image_topic() == '/topic/dab/ce1/ce15/c221/0/image'
assert p.get_name() == 'BBC Radio 1'
def test_DabRadioStation_pa():
p = DabRadioStation(name = 'BBC Radio 1', ecc = 'abc', eid = 'abcd', sid = 'abcd', scids = '0', pa = 1023)
assert p.get_hostname() == '1023.0.abcd.abcd.abc.dab.radiodns.org'
assert p.get_text_topic() == '/topic/dab/abc/abcd/abcd/0/1023/text'
assert p.get_image_topic() == '/topic/dab/abc/abcd/abcd/0/1023/image'
assert p.get_name() == 'BBC Radio 1'
def test_DabRadioStation_appty_uatype():
p = DabRadioStation(name = 'BBC Radio 1', ecc = 'abc', eid = 'abcd', sid = 'abcd', scids = '0', appty = '01', uatype = '234')
assert p.get_hostname() == '01-234.0.abcd.abcd.abc.dab.radiodns.org'
assert p.get_text_topic() == '/topic/dab/abc/abcd/abcd/0/01-234/text'
assert p.get_image_topic() == '/topic/dab/abc/abcd/abcd/0/01-234/image'
assert p.get_name() == 'BBC Radio 1'
def test_DabRadioStation_appty_uatype_missing_uatype():
assert_raises(ValueError, DabRadioStation, name = 'BBC Radio 1', ecc = 'abc', eid = 'abcd', sid = 'abcd', scids = '0', appty = '01')
def test_DabRadioStation_appty_uatype_missing_appty():
assert_raises(ValueError, DabRadioStation, name = 'BBC Radio 1', ecc = 'abc', eid = 'abcd', sid = 'abcd', scids = '0', uatype = '234')
def test_DabRadioStation_both_pa_and_appty_uatype():
assert_raises(ValueError, DabRadioStation, name = 'BBC Radio 1', ecc = 'abc', eid = 'abcd', sid = 'abcd', scids = '0', pa = 1023, appty = '01', uatype = '234')
|
991,063 | 2bcb2d5fdf0374f7f5267538c67d96db7786c895 | import random
from indy import ledger
from perf_load.perf_req_gen import RequestGenerator
class RGConfigChangeState(RequestGenerator):
_req_types = ["111"]
def _rand_data(self):
return str(random.randint(0, 99999999))
async def _gen_req(self, submit_did, req_data):
return await ledger.build_pool_config_request(submit_did, True, False)
|
991,064 | d80f4da69a92d9e131b26f64d9fe0dff5b75ac47 | #https://github.com/micropython/micropython-lib/tree/master/umqtt.simple
#https://github.com/micropython/micropython-lib/blob/master/umqtt.simple/example_pub.py
#https://github.com/micropython/micropython-lib/pull/91#issuecomment-239030008
# wlan = network.WLAN(network.STA_IF)
# while not wlan.isconnected():
# utime.sleep(1)
def conectar():
#import dht
import mfrc522
from os import uname
import machine
import time
#d = dht.DHT11(machine.Pin(13))
from umqtt.simple import MQTTClient
#import micropython
if uname()[0] == 'WiPy':
rdr = mfrc522.MFRC522("GP14", "GP16", "GP15", "GP22", "GP17")
elif uname()[0] == 'esp8266':
rdr = mfrc522.MFRC522(0, 2, 4, 5, 14)
else:
raise RuntimeError("Unsupported platform")
SERVER = "10.6.1.112"
TOPIC = b"/rfid/normal"
#TOPIC1 = b"/esp/dht/temp"
#TOPIC2 = b"/esp/dht/hum"
ID = "esp"
USER = b"esp"
PASSWORD = b"senhaesp"
c = MQTTClient(ID, SERVER, user=USER, password=PASSWORD)
#c.connect()
while True:
uid = ""
# print("")
# print("Place card before reader to read from address 0x08")
# print("")
(stat, tag_type) = rdr.request(rdr.REQIDL)
if stat == rdr.OK:
(stat, raw_uid) = rdr.anticoll()
if stat == rdr.OK:
#print("New card detected")
#print(" - tag type: 0x%02x" % tag_type)
#print("%02x%02x%02x%02x" % (raw_uid[0], raw_uid[1], raw_uid[2], raw_uid[3]))
for i in range(0, 4):
uid = uid + "%02x" % raw_uid[i]
uid = uid + "0"
print(uid)
c.connect()
c.publish(TOPIC, b"%s" % uid)
c.disconnect()
# if rdr.select_tag(raw_uid) == rdr.OK:
#
# key = [0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF]
#
# if rdr.auth(rdr.AUTHENT1A, 8, key, raw_uid) == rdr.OK:
# print("Address 8 data: %s" % rdr.read(8))
# rdr.stop_crypto1()
# else:
# print("Authentication error")
# else:
# print("Failed to select tag")
#try:
#c.connect()
#d.measure()
#temp = d.temperature()
#hum = d.humidity()
#print('Temp: %s' % temp)
#print('Hum: %s' % hum)
#c.publish(TOPIC, str(uid))
#c.publish(TOPIC2, str(hum))
#c.disconnect()
#time.sleep(30) #30 segundos
#finally:
#c.disconnect()
|
991,065 | 5fe07a5d67c46d3850ca0e30fd59f27e4f4df9b7 | #!/usr/bin/env python2
from limiti import *
usage="""Generatore per "salti".
Parametri:
* N (primo numero in input)
Constraint:
* 1 <= N < %d
""" % MAXN
from sys import argv, exit, stderr
import os
from numpy.random import seed, random, randint
from random import choice, sample
if __name__ == "__main__":
try:
import psyco
psyco.full()
except ImportError:
pass
if len(argv) != 2:
print usage
exit(1)
N, = [int(x) for x in argv[1:]]
assert (1 <= N and N <= MAXN)
print N
|
991,066 | 03748f954e72c92e8328a46cfa50c8cd0ac2a197 | # -*- coding:utf-8 -*-
import time
import cv2
t_start = time.time()
fps = 0
# ๅพ็่ฏๅซๆนๆณๅฐ่ฃ
def discern(img):
global fps
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cap = cv2.CascadeClassifier(
"Cascades/haarcascade_frontalface_default.xml"
)
faceRects = cap.detectMultiScale(
gray, scaleFactor=1.2, minNeighbors=3, minSize=(50, 50))
if len(faceRects):
for faceRect in faceRects:
x, y, w, h = faceRect
cv2.rectangle(img, (x, y), (x + h, y + w), (0, 255, 0), 2) # ๆกๅบไบบ่ธ
# ่ฎก็ฎFPS
fps = fps + 1
sfps = fps / (time.time() - t_start)
cv2.putText(img, "FPS : " + str(int(sfps)), (10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.imshow("Image", img)
# ่ทๅๆๅๅคด0่กจ็คบ็ฌฌไธไธชๆๅๅคด
cap = cv2.VideoCapture(0)
while (1): # ้ๅธงๆพ็คบ
ret, img = cap.read()
# cv2.imshow("Image", img)
discern(img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release() # ้ๆพๆๅๅคด
cv2.destroyAllWindows() # ้ๆพ็ชๅฃ่ตๆบ |
991,067 | 31fff8ea911283dac3aa2a61892ab413c095aaf8 | # ไฝ1็ไธชๆฐ
class Solution(object):
def hammingWeight(self, n):
"""
:type n: int
:rtype: int
"""
# # ๆ็ๆณๆณ
# return bin(n).count("1")
# ไบบๅฎถ็่งฃๆณ
res = 0
while n != 0:
n = n & (n - 1)
res += 1
return res
# # ไบบๅฎถ็่งฃๆณ๏ผ้ๅบฆๅพๆ
ข
# res = 0
# while n != 0:
# res += n & 1
# n >>= 1
# return res
print(Solution().hammingWeight(3))
|
991,068 | 894d3cdee4104276bb7de2d6cbbe0b0112530fe3 | # Replace the following lines with client IDs obtained from the APIs
# Console or Cloud Console.
WEB_CLIENT_ID = 'replace with Web client ID'
|
991,069 | 5d3c53d1563bd4df71f90640b4b44b1399d1598c | print("Let's practice everything.")
print('You\'d need to know \'bout escapes with \\ that do \n newlines and \
t tabs.')
poem = """
\tThe lovely world
with logic so firmly planted
cannot discern \n the needs of love
nor comprehend passion from intuition
and requires an explanation
\n\t\twhere there is none.
"""
print("-----------------")
print(poem)
print("-----------------")
five = 10 - 2 + 3 - 6
print("This should be five: {five}".format(five=five))
def secret_formula(started):
jelly_beans = started * 500
jars = jelly_beans / 1000
crates = jars / 100
return jelly_beans, jars, crates
start_point = 10000
beans, jars, crates = secret_formula(start_point)
print("With a starting point of: {sp}".format(sp=start_point))
print("We'd have {b} beans, {j} jars and {c} crates.".format(b=beans, j=jars, c=crates))
start_point = start_point / 10
print("We can also do that this way:")
t = secret_formula(start_point)
print("We'd have {b} beans, {j} jars and {c} crates.".format(b=t[0], j=t[1], c=t[2]))
|
991,070 | af8401e5fab460f20603afb5cee6a79e673622e9 | #!/usr/bin/python
# -*- coding: utf-8 -*-.
# Mapper Input
# The input is a 2-element list: [document_id, text],
# where document_id is a string representing a document identifier and text is a string representing the text of the document.
# The document text may have words in upper or lower case and may contain punctuation.
# You should treat each token as if it was a valid word; that is, you can just use value.split() to tokenize the string.
# Reducer Output
# The output should be a (word, document ID list) tuple where word is a String and document ID list is a list of Strings.
import MapReduce
import sys
"""
Inverted index in the Simple Python MapReduce Framework
"""
mr = MapReduce.MapReduce()
# =============================
# Do not modify above this line
def mapper(record): # Accepting recode array [document_id, text],
# key: document identifier
# value: document contents
key = record[0] # document_id
value = record[1] # text
words = value.split() # tokenizing every single words from the document
for w in words:
mr.emit_intermediate(w, key)
def reducer(key, list_of_values):
# key: word
# value: list of documents where the word appears
# ex) reduce("history", (12,41,123,...,121))
s = set()
for v in list_of_values:
s.add(v)
mr.emit((key, [k for k in s])) # Building key value pair, outputting (word, document ID list)
# Do not modify below this line
# =============================
if __name__ == '__main__':
inputdata = open(sys.argv[1])
mr.execute(inputdata, mapper, reducer)
|
991,071 | 12859a199d393efad1835e88389dab661d69265f | '''
A serial commonly regular expressions is listed below. It includes:
* Regular float number, like ``1.0``, ``0.9``
* Scientific float number, like ``1.0e4``
* Float number with percentage, like ``93.75%``
'''
import re
# Used to match regular float number
# For example: -0.1
reFloatNumber = re.compile('[-+]?[0-9]*\.?[0-9]*')
reScientificFloatNumber = re.compile('[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?')
rePercentageFloatNumber = re.compile('[0-9]*\.?[0-9]*\%')
# It's extramely ugly but very helpful.
reURL = re.compile("((([A-Za-z]{3,9}:(?:\/\/)?)(?:[\-;:&=\+\$,\w]+@)?[A-Za-z0-9\.\-]+|(?:www\.|[\-;:&=\+\$,\w]+@)[A-Za-z0-9\.\-]+)((?:\/[\+~%\/\.\w\-_]*)?\??(?:[\-\+=&;%@\.\w_]*)#?(?:[\.\!\/\\\w]*))?)")
reEnglish = re.compile("[a-zA-Z][a-zA-Z\.,]*")
# it include the digit and english and their mixture
reProductAndModel = re.compile("[0-9a-zA-Z][0-9a-zA-Z\.,-]*")
reDigit = re.compile("[0-9][0-9\.,]*%?")
reDigitTimeAndPercentage = re.compile("[0-9][0-9\.]*:[0-9][0-9\.]*")
from generalize import Generalize
|
991,072 | 31afe096a27986efd87208d78af3a2ebd7577d24 | ### Simple Array Sum - Solution
def simpleArraySum(arr):
print(sum(arr))
size = int(input())
arr = tuple(map(int, input().split()[:size]))
simpleArraySum(arr) |
991,073 | 08ab4e0219024e1392a477eb156297e62b56f721 | # Import Module --------------------------------------------------------------#
import doctest
# Test Suite Class Definition ------------------------------------------------#
def testToUpper(_in):
"""
>>> testToUpper('test')
'TEST'
"""
return _in.upper()
# Main -----------------------------------------------------------------------#
if __name__ == "__main__":
doctest.testmod()
|
991,074 | d59360f7aebc5885077834d95842ee0cc5085e29 | # -*- coding: utf-8 -*-
"""
Created on Sun Jul 28 17:07:19 2019
@author: Mark
"""
import numpy as np
import TicTacToeGame
from IBot import IBot
import pandas as pd
import random
import TQmodel_train
WIN_VALUE = 1.0 # type: float
DRAW_VALUE = 0.5 # type: float
LOSS_VALUE = 0.0 # type: float
def hash_value(board_state):
""" Creates a special ID for the board state which is just a string
representation of the board, i.e " X O X" with spaces denoting empty
board positions
"""
res = ""
for i in range(1,10):
res = res + board_state[i]
return res
def train_model(bot, nIterations, resolution = 50):
"""args
bot: the bot to be trained
nIterations how many training iterations do you want to do
approximately 20'000 are needed to train against all 3 bots
resolution is the distance between the points in the training graph
in the x axis (game_counter axis)
returns the training progress dataframe progress_data"""
# Track training progress
progress_data = pd.DataFrame()
# Define bots to train against
modes = ["hard", "easy", "random"]
selected_mode = random.choice(modes)
# Train the bot
for game_counter in range(nIterations):
if game_counter % resolution == 0:
print("Game #: ", game_counter)
print("Mode:", selected_mode)
selected_mode = random.choice(modes)
nWins = 0
nDraws = 0
nLosses = 0
# Track training progress by playing 50 dummy games without training
# the bot and saving the data to progress_data
# probabl a way to track the progress of the bot more efficiently
# but too lazy to think of a way since this algorithm is so fast
for measGame in range(50):
# Play 20 bot matches against the deterministic optimal bot
# to track learning progress
result = TQmodel_train.trainTQbot("easy", bot, updateQ = False, print_progress = False)
if result == "won":
nWins += 1
elif result == "draw":
nDraws += 1
else:
nLosses += 1
progress_data = progress_data.append({"game_counter": game_counter,
"nWins": nWins,
"nDraws": nDraws,
"nLosses": nLosses,
"winPercent": nWins / (nWins + nDraws + nLosses),
"drawPercent": nDraws / (nWins + nDraws + nLosses),
"lossPercent": nLosses / (nWins + nDraws + nLosses)},
ignore_index = True)
# this result isn't used but you can if you want
result = TQmodel_train.trainTQbot(selected_mode, bot, updateQ = True, print_progress = False)
return progress_data
class botTQ(IBot):
""" The tabular Q learning bot
What it does is it tracks all its historic moves and keeps a dictionary with
all past board states in hash form
i.e q = {board_hash: qvals for the moves 1-9}
"""
def __init__(self, alpha = 0.9, gamma = 0.95, q_init = 0.6):
self.q = {}
self.move_history = []
self.learning_rate = alpha
self.value_discount = gamma
self.q_init_val = q_init
def get_q(self, board_hash):
""" gets the q values for the board board_hash """
if board_hash in self.q:
qvals = self.q[board_hash]
else:
qvals = [0] + [self.q_init_val for i in range(1,10)]
self.q[board_hash] = qvals
return qvals
def GetMove(self, game, letter):
(move, score) = self.get_move(game.getBoard(), letter)
return move
def get_move(self, board_state, letter):
""" Takes argument board_state and letter (symbol)
finds the optimal move by taking the move that has the highest
q value associated to it, returns that move
if a move is illegal it associates the value -1.0 to that move"""
board_hash = hash_value(board_state)
qvals = self.get_q(board_hash)
freeMoves = TicTacToeGame.getFreePositions(board_state)
while True:
bestMove = np.argmax(qvals) # type: int
if bestMove in freeMoves:
self.q[board_hash] = qvals
self.move_history.append((board_hash, bestMove))
return bestMove, qvals
else:
qvals[bestMove] = -1.0
def update_Qfunction(self, result):
""" Updates the Q function for the bot depending on if the bot won
lost or drawed. With values 1, 0.5, and 0 correspondingly
Updates the q values according to learning rate and value discount
Q(S,A) = Q(S, A) * (1 - lr) + lr * vd * max_a Q(S', a)
Where A is the move on board S and Q(S', a) is the max value q for the
next board state S'. Loops all historic moves ( in reverse ) and assigns
new q values for all the boards S that occured during the game"""
if result == "won":
final_value = WIN_VALUE
elif result == "draw":
final_value = DRAW_VALUE
elif result == "lost":
final_value = LOSS_VALUE
self.move_history.reverse()
next_max = -1.0
for h in self.move_history:
qvals = self.get_q(h[0])
m = h[1]
if next_max < 0:
qvals[m] = final_value
else:
qvals[m] = qvals[m] * (1.0 - self.learning_rate) + self.learning_rate * self.value_discount * next_max
self.q[h[0]] = qvals
next_max = max(qvals)
def reset_move_history(self):
# resets move history
self.move_history = []
|
991,075 | 2a6d7b01d1b88f793425a5413fae77e48f4e9d44 | # uvicorn server:app --reload
from fastapi import FastAPI, Request
from fastapi.responses import JSONResponse
import pybullet
from concurrent.futures import ThreadPoolExecutor
from robot import NoSuchControlType, robot
import asyncio
from logger import Logger
from dtos import *
app = FastAPI()
@app.on_event("startup")
async def startup_event():
asyncio.create_task(robot.step_in_background())
@app.get("/robot/state")
async def get_robot_state():
return robot.get_full_state()
@app.post("/robot/joints")
async def post_joints(new_state: JointsState):
try:
robot.set_control(new_state.scaling, new_state.ts, new_state.positions)
except ValueError as err:
return {"error": err.args[0]}
return robot.get_full_state()
@app.post("/robot/joint_traj_lin")
async def joint_traj(traj: JointTrajLin):
try:
robot.set_traj_control_lin(traj.scaling, traj.traj)
except ValueError as err:
return {"error": err.args[0]}
return robot.get_full_state()
@app.post("/robot/joint_traj_interp")
async def joint_traj_interp(traj: JointTrajInterp):
try:
robot.set_traj_control_interp(traj.interpolation, traj.traj)
except ValueError as err:
return {"error": err.args[0]}
return robot.get_full_state()
@app.post("/robot/cart_traj")
async def cart_traj(traj: CartesianTraj):
try:
robot.set_cart_traj(traj.interpolation, traj.traj)
except ValueError as err:
return {"error": err.args[0]}
return robot.get_full_state()
@app.post("/robot/cart_traj_p2p")
async def cart_traj_p2p(traj: CartesianTrajP2p):
try:
robot.set_cart_traj_p2p(traj.motion_type, traj.traj)
except ValueError as err:
return {"error": err.args[0]}
return robot.get_full_state()
@app.post("/robot/compute_ik")
async def compute_ik(cart_state: CartesianState):
return robot.get_inverse_kinematics(cart_state.pos, cart_state.orient)
@app.on_event("shutdown")
async def shutdown_event():
pybullet.disconnect()
Logger.close()
@app.exception_handler(NoSuchControlType)
async def unicorn_exception_handler(request: Request, exc: NoSuchControlType):
return JSONResponse(
status_code=400,
content={"message": f"No such control type: {exc.args[0]}"},
)
|
991,076 | aaed9f6d2c9aab3eeddd57478fb733f7c8fe160c | # 2022.12.27-Changed for EDSR-PyTorch
# Huawei Technologies Co., Ltd. <wangchengcheng11@huawei.com>
# Copyright 2022 Huawei Technologies Co., Ltd.
# Copyright 2018 sanghyun-son (https://github.com/sanghyun-son/EDSR-PyTorch).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import numpy as np
import mindspore as ms
from mindspore import nn
import mindspore.common.initializer as init
from GhostSR.unsupported_model.PixelShuffle import PixelShuffle
def exponential_decay(t, _init=10, m=200, finish=1e-2):
alpha = np.log(_init / finish) / m
l = - np.log(_init) / alpha
decay = np.exp(-alpha * (t + l))
return decay
def sample_gumbel(size, eps=1e-20):
# print('size.dtype: ', size[0].dtype)
uniform_real = ms.ops.UniformReal()
U = uniform_real(size)
return -ms.ops.log(-ms.ops.log(U + eps) + eps)
def gumbel_softmax(weights, epoch):
noise_temp = 0.97 ** (epoch - 1)
noise = sample_gumbel(weights.shape) * noise_temp
y = weights + noise
y_abs = y.abs().view(1, -1)
y_hard = ms.ops.zeros_like(y_abs)
y_hard[0, ms.ops.Argmax()(y_abs)] = 1
y_hard = y_hard.view(weights.shape)
# ret = (y_hard - weights).detach() + weights
ret = ms.ops.stop_gradient(y_hard - weights) + weights
return ret
def hard_softmax(weights):
y_abs = weights.abs().view(1, -1)
y_hard = ms.ops.ZerosLike()(y_abs)
y_hard[0, ms.ops.Argmax()(y_abs)] = 1
y_hard = y_hard.view(weights.shape)
return y_hard
# 1*1*3*3 shift
class ShiftConvGeneral(nn.Cell):
def __init__(self, act_channel, in_channels=1, out_channels=1, kernel_size=3, stride=1,
padding=1, groups=1,
bias=False):
super(ShiftConvGeneral, self).__init__()
self.stride = stride
self.padding = padding
self.bias = bias
self.epoch = 1
self.act_channel = act_channel
# self.w_out_channels = in_channels // groups
self.kernel_size = kernel_size
self.weight = ms.Parameter(
ms.Tensor(shape=(out_channels, in_channels // groups, kernel_size, kernel_size),
dtype=ms.float16,
init=init.HeUniform(negative_slope=math.sqrt(5))), requires_grad=True)
if bias:
self.b = ms.Parameter(ms.ops.Zeros(act_channel), requires_grad=True)
# self.reset_parameters()
def reset_parameters(self):
init.HeUniform(self.weight, a=math.sqrt(5))
def construct(self, x):
assert x.shape[1] == self.act_channel
if self.training:
w = gumbel_softmax(self.weight, self.epoch)
else:
w = hard_softmax(self.weight)
w = w.astype(x.dtype)
w = ms.numpy.tile(w, (x.shape[1], 1, 1, 1))
out = ms.ops.Conv2D(self.act_channel, self.kernel_size, stride=self.stride,
pad=self.padding, dilation=1,
pad_mode='pad', group=x.shape[1])(x, w)
if self.bias:
out += self.b.unsqueeze(0).unsqueeze(2).unsqueeze(3)
return out
# ็ปไธๆนๅshift, lamda!=0.5
class GhostModule(nn.Cell):
def __init__(self, inp, oup, kernel_size, dir_num, ratio=0.5, stride=1, bias=True):
super(GhostModule, self).__init__()
self.oup = oup
init_channels = math.ceil(oup * ratio)
new_channels = oup - init_channels
self.primary_conv = nn.Conv2d(inp, init_channels, kernel_size, stride,
pad_mode='pad', padding=kernel_size // 2, has_bias=bias)
self.cheap_conv = ShiftConvGeneral(new_channels, 1, 1, kernel_size=3, stride=1, padding=1,
groups=1, bias=False)
self.concat = ms.ops.Concat(axis=1)
self.init_channels = init_channels
self.new_channels = new_channels
def construct(self, x):
if self.init_channels > self.new_channels:
x1 = self.primary_conv(x)
x2 = self.cheap_conv(x1[:, :self.new_channels, :, :])
elif self.init_channels == self.new_channels:
x1 = self.primary_conv(x)
x2 = self.cheap_conv(x1)
# elif self.init_channels < self.new_channels:
else:
x1 = self.primary_conv(x)
x1 = x1.repeat(1, 3, 1, 1)
x2 = self.cheap_conv(x1[:, :self.new_channels, :, :])
out = self.concat([x1, x2])
return out[:, :self.oup, :, :]
def default_conv(in_channels, out_channels, kernel_size, bias=True):
return nn.Conv2d(
in_channels, out_channels, kernel_size,
pad_mode='pad', padding=(kernel_size // 2), has_bias=bias)
def default_ghost(in_channels, out_channels, kernel_size, dir_num, bias=True):
return GhostModule(
in_channels, out_channels, kernel_size, dir_num, bias=bias)
class MeanShift(nn.Conv2d):
def __init__(
self, rgb_range,
rgb_mean=(0.4488, 0.4371, 0.4040), rgb_std=(1.0, 1.0, 1.0), sign=-1):
super(MeanShift, self).__init__(3, 3, kernel_size=1, pad_mode='valid', has_bias=True)
std = ms.Tensor(rgb_std)
self.weight.set_data(ms.ops.eye(3, 3, ms.float32).view(3, 3, 1, 1) / std.view(3, 1, 1, 1))
self.bias.set_data(sign * rgb_range * ms.Tensor(rgb_mean) / std)
for p in self.get_parameters():
p.requires_grad = False
class GhostResBlock(nn.Cell):
def __init__(self, conv, n_feats, kernel_size, dir_num=1, bias=True, bn=False, act=nn.ReLU(),
res_scale=1):
super(GhostResBlock, self).__init__()
m = []
for i in range(2):
m.append(conv(n_feats, n_feats, kernel_size, dir_num=dir_num, bias=bias))
if bn:
m.append(nn.BatchNorm2d(n_feats, momentum=0.9))
if i == 0:
m.append(act)
self.body = nn.SequentialCell(m)
self.mul = ms.ops.Mul()
self.res_scale = res_scale
def construct(self, x):
res = self.mul(self.body(x), self.res_scale)
# res = self.body(x) * self.res_scale
res += x
return res
class ConvResBlock(nn.Cell):
def __init__(self, conv, n_feats, kernel_size, bias=True, bn=False, act=nn.ReLU(), res_scale=1):
super(ConvResBlock, self).__init__()
m = []
for i in range(2):
m.append(conv(n_feats, n_feats, kernel_size, bias=bias))
if bn:
m.append(nn.BatchNorm2d(n_feats, momentum=0.9))
if i == 0:
m.append(act)
self.body = nn.SequentialCell(m)
self.res_scale = ms.Tensor(res_scale, dtype=ms.int32)
def construct(self, x):
res = self.body(x).mul(self.res_scale)
res += x
return res
class Upsampler(nn.SequentialCell):
def __init__(self, conv, scale, n_feats, bn=False, act=False, bias=True):
m = []
if (scale & (scale - 1)) == 0: # Is scale = 2^n?
for _ in range(int(math.log(scale, 2))):
m.append(conv(n_feats, 4 * n_feats, 3, bias))
m.append(PixelShuffle(2))
if bn:
m.append(nn.BatchNorm2d(n_feats, momentum=0.9))
if act == 'relu':
m.append(nn.ReLU())
elif act == 'prelu':
m.append(nn.PReLU(n_feats))
elif scale == 3:
m.append(conv(n_feats, 9 * n_feats, 3, bias))
m.append(PixelShuffle(3))
if bn:
m.append(nn.BatchNorm2d(n_feats, momentum=0.9))
if act == 'relu':
m.append(nn.ReLU())
elif act == 'prelu':
m.append(nn.PReLU(n_feats))
else:
raise NotImplementedError
super(Upsampler, self).__init__(m)
|
991,077 | 79efecaaa51cbfaad0450d319c71a6e530d4e719 | # coding: latin-1
'''
Canal K+ voltage-dรฉpendant.
'''
from brian import *
El=-70*mV
tau=20*ms
R=50*Mohm
C=tau/R # capacitรฉ
tauK=1*ms
va=-60*mV # demi-activation
k=3*mV # pente
#gK=1/R # conductance K+ maximale (ici = conductance de fuite)
EK=-90*mV # potentiel de rรฉversion
eqs='''
dv/dt=(El-v)/tau + gK*m*(EK-v)/C : volt
#dv/dt=(El-v)/tau + gK*m*(EK-El)/C : volt
dm/dt=(minf-m)/tauK : 1 # ouverture du canal K+
minf=1/(1+exp((va-v)/k)) : 1
#minf=1/(1+exp((va-El)/k))+(v-El)* : 1
gK : siemens
'''
neurone=NeuronGroup(2,model=eqs)
neurone.v=El
neurone.m=neurone.minf
neurone.gK=[0/R,1/R]
run(50*ms)
Mv=StateMonitor(neurone,'v',record=True)
Mm=StateMonitor(neurone,'m',record=True)
run(10*ms)
neurone.v+=15*mV
run(50*ms)
subplot(211)
plot(Mv.times/ms,Mv[0]/mV,'r')
plot(Mv.times/ms,Mv[1]/mV,'b')
ylabel('Vm (mV)')
subplot(212)
plot(Mm.times/ms,Mm[1])
ylabel('m')
xlabel('Temps (ms)')
show()
|
991,078 | f35d3dbfedeb135d05a4e92b768e78372334a560 | from collections import defaultdict
import numpy as np
import copy
import time
import sys
import json
import random
import csv
import socket
import math
from multiprocessing import Pool
from functools import partial
from help import send,receive
from sklearn import preprocessing
from phe import paillier
from phe import EncryptedNumber
import config
import random
'''
Modified methods:
find_min_dis_batch
'''
data_path = config._DATA_PATH
dim = -1
def load_data():
global dim
points = np.loadtxt(data_path)
dim = points.shape[1]
return points
batch_size=config._BATCH_SIZE
K=None
cpu_number=config._S1_CPU_NUMBER
class KMEANS:
def __init__(self, n_cluster, epsilon=config._EQUAL_TOL, maxstep=config._MAX_STEP):
self.n_cluster = n_cluster
self.epsilon = epsilon
self.maxstep = maxstep
self.N = None
self.centers = None
self.server = None
self.pubkey, self.prikey = paillier.generate_paillier_keypair()
self.cluster = defaultdict(list)
def initial_0(self,data):
# ๅๅๅฏ้ฅ
t1_start = time.perf_counter()
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ip = config._S2_IP
port = config._S2_PORT
server.connect((ip, port))
self.server = server
send(server, self.prikey)
print("ๅๅๅฏ้ฅ็ๆถ้ด๏ผ")
print(time.perf_counter() - t1_start)
self.init_center(data)
return
def init_center(self,data):
np.random.seed(1)
self.N = data.shape[0]
random_ind = np.random.choice(self.N, size=self.n_cluster)
# random_ind = [500, 1200, 2000, 2500]
self.centers = [data[i] for i in random_ind]
return
def enc(self,number):
return self.pubkey.encrypt(number)
def initial_1(self, data):
csh_start = time.perf_counter()
self.initial_0(data)
ci_enc_list = []
pool = Pool(cpu_number)
centers=np.array(self.centers).flatten()
ci_enc_list=pool.map(self.enc, centers)
Cdata_enc = np.array(ci_enc_list).reshape(K,dim)
data=np.array(data).flatten()
t3_start = time.perf_counter()
try:
data_enc=pool.map(self.enc, data)
except Exception as e:
print(data)
print("ๅ ๅฏๆถ้ด:")
print(time.perf_counter() - t3_start)
self.centers = Cdata_enc
pool.close()
pool.join()
return np.array(data_enc).reshape(config._NUMBER, dim)
def cal_dis1_one(self, data_enc_point):
Bi = data_enc_point - self.centers
hx_Bi = np.empty((Bi.shape[0], Bi.shape[1]), dtype=EncryptedNumber)
eps_Bi = np.empty((Bi.shape[0], Bi.shape[1]))
# Bi----k*R
for j in range(Bi.shape[0]):
for k in range(Bi.shape[1]):
r = [-1, 1]
i = random.randint(0, 1)
eps = r[i]
eps_Bi[j, k] = eps
hx_Bi[j, k] = eps * Bi[j, k]
return hx_Bi, eps_Bi, Bi
def cal_dis1(self, data_enc_point):
Bi = data_enc_point - self.centers
hx_Bi = np.empty((Bi.shape[0], Bi.shape[1]), dtype=EncryptedNumber)
eps_Bi = np.empty((Bi.shape[0], Bi.shape[1]))
# Bi----k*R
for j in range(Bi.shape[0]):
for k in range(Bi.shape[1]):
r = [-1, 1]
i = random.randint(0, 1)
eps = r[i]
eps_Bi[j, k] = eps
hx_Bi[j, k] = eps * Bi[j, k]
return hx_Bi, eps_Bi, Bi
def cal_dis2(self, iter_data):
res = np.empty((iter_data[0].shape[0], iter_data[0].shape[1]))
d = []
# dis_start = time.perf_counter()
for j in range(iter_data[0].shape[0]):
dj = self.pubkey.encrypt(0)
for k in range(iter_data[0].shape[1]):
if iter_data[1][j, k] == 1:
if iter_data[0][j, k] <= 0:
res[j, k] = -1
elif iter_data[0][j, k] == 1:
res[j, k] = 1
else:
if iter_data[0][j, k] <= 0:
res[j, k] = 1
elif iter_data[0][j, k] == 1:
res[j, k] = -1
dj += iter_data[2][j, k] * res[j, k]
d.append(dj)
return d
def cal_dis(self, data_enc_point):
server = self.server
pool = Pool(cpu_number)
hx_Bi_eps_Bi_Bi = []
cal_dis1_start=time.perf_counter()
hx_Bi_eps_Bi_Bi.append(pool.map(self.cal_dis1, data_enc_point))
pool.close()
pool.join()
hx_Bi = []
eps_Bi = []
Bi = []
for i in hx_Bi_eps_Bi_Bi[0]:
hx_Bi.append(i[0])
eps_Bi.append(i[1])
Bi.append(i[2])
print("cal_dis,ciphertext_calculation_time:%f"%(time.perf_counter()-cal_dis1_start))
# ๅฐๆททๆท็BIไผ ่พๅฐS2
tx_start=time.perf_counter()
print('cal_dis,send/receive size:')
send(server, hx_Bi)
hx_Bi_sig = receive(server)
print('cal_dis,socket_time:%f'%(time.perf_counter()-tx_start))
cal_dis2_start=time.perf_counter()
for i in range(len(hx_Bi_eps_Bi_Bi[0])):
hx_Bi_eps_Bi_Bi[0][i] = list(hx_Bi_eps_Bi_Bi[0][i])
for i in range(len(hx_Bi_eps_Bi_Bi[0])):
hx_Bi_eps_Bi_Bi[0][i][0] = hx_Bi_sig[0][i]
d_batch=[]
pool=Pool(cpu_number)
d_batch.append(pool.map(self.cal_dis2, hx_Bi_eps_Bi_Bi[0]))
pool.close()
# pool.terminate()
pool.join()
print("cal_dis,ciphertext_sign_time:%f"%(time.perf_counter() - cal_dis2_start))
return d_batch[0]
def compare(self, e1, e2):
server = self.server
ec = e1 - e2
r = [-1, 1]
i = random.randint(0, 1)
eps = r[i]
ec1 = eps * ec
send(server, ec1)
sig = receive(server)
if sig==-1:
res=e1
elif sig==0:
if eps==1:
res=e1
else:
res=e2
elif sig==1:
if eps==1:
res=e2
else:
res=e1
return res
def find_min_dis(self, d):
min_d = d[0]
for i in range(1,len(d)):
min_d=self.compare(min_d,d[i])
return min_d
def get_one(self,one):
sub_dis = []
sub_eps = []
list_r = [-1, 1]
for j in range(K):
sub_dis.append(np.array(one[j]) - np.array(one))
sub_eps.append([random.choice(list_r) for i in range(K)])
return sub_dis,sub_eps
def find_min_dis_batch(self, d):
d = np.array(d)
min_col = d[:,0]
for cidx in range(1, K):
start = time.perf_counter()
current_col = d[:, cidx]
diff_col = current_col - min_col
socket_start = time.perf_counter()
print('find_min_dist,l=%d,send/receive size:'%cidx)
send(self.server, diff_col)
diff_sign = np.array(receive(self.server))
print('find_min_dist,l=%d,socket_time:%f'%(cidx,time.perf_counter()-socket_start))
min_col[diff_sign<=0] = current_col[diff_sign<=0]
print('find_min_dist,l=%d,time:%f'%(cidx,time.perf_counter()-start))
return min_col
def cfp(self, data):
for ind in range(0,len(data),batch_size):
print("batch index=%d"%(ind))
dbatch = self.cal_dis(data[ind:ind+batch_size])
min_d = self.find_min_dis_batch(dbatch)
stage3_start=time.perf_counter()
for i,d in enumerate(min_d):
self.cluster[dbatch[i].index(d)].append(i+ind)
print("reassign time:%f"%(time.perf_counter()-stage3_start))
def updata_enc_center(self, enc_data):
start=time.perf_counter()
for label, inds in self.cluster.items():
if len(enc_data[inds]) !=0:
self.centers[label] = np.mean(enc_data[inds], axis=0)
print('update_enc_center time:%f'%(time.perf_counter()-start))
def enc_divide(self,data):
tmp_cluster = copy.deepcopy(self.cluster)
for label, inds in tmp_cluster.items():
data_label=data[inds]
for ind in range(0, len(data_label), batch_size):
print("batch index=%d"%(ind))
dbatch = self.cal_dis(data_label[ind:ind + batch_size])
min_d = self.find_min_dis_batch(dbatch)
reassign_start = time.perf_counter()
for i, d in enumerate(min_d):
new_label = dbatch[i].index(d)
if new_label == label: # ่ฅ็ฑปๆ ่ฎฐไธๅ๏ผ่ทณ่ฟ
continue
else:
self.cluster[label].remove(inds[i+ind])
self.cluster[new_label].append(inds[i+ind])
print("reassign time:%f"%(time.perf_counter()-reassign_start))
def enc_fit(self, data):
stage1_start=time.perf_counter()
data_enc = self.initial_1(data)
print("initialization time:%f"%(time.perf_counter()-stage1_start))
stage2_start = time.perf_counter()
self.cfp(data_enc)
print("cfp time:%f"%(time.perf_counter() - stage2_start))
step = 0
while step < self.maxstep:
step += 1
print("step=%d"%step)
self.updata_enc_center(data_enc)
self.enc_divide(data_enc)
self.server.close()
def test_enc(n):
km = KMEANS(n)
start=time.perf_counter()
km.enc_fit(data)
end=time.perf_counter()-start
print(end)
prec_label = []
for lb, inds in km.cluster.items():
for i in inds:
prec_label.append([i, lb])
dic = {}
for number in prec_label:
key = number[0]
dic[key] = number[1]
s = sorted(dic.items(), key=lambda x: x[0])
y_pred = []
for si in s:
y_pred.append(si[1])
print(y_pred)
with open(config._DATA_PATH+"_%d_pred"%K, "w") as pred_out:
pred_out.write(str(y_pred))
print("end")
if __name__ == '__main__':
fdata = load_data()
data = np.array(fdata)
data[data<config._EQUAL_TOL] = 0.
for k in config._K:
K = k
start=time.perf_counter()
test_enc(k)
cost_t = time.perf_counter()-start
with open(config._DATA_PATH+"_%d_time"%k, 'w') as t_out:
t_out.write(str(cost_t))
|
991,079 | 8bd13414208de63739288562a0f19e7788018e4c | import z2edit
from z2edit import Address
from z2edit.util import Tile, chr_clear, chr_copy, chr_swap
# Graphics arrangement (numbers in expressed in hex):
#
# Western Hyrule = bank 2/3
# Eastern Hyrule = bank 4/5
# P1 = bank 8/9
# P2 = bank a/b
# P3 = bank 12/13
# P4 = bank 14/15
# P5 = bank 16/17
# P6 = bank 18/19
# P7 = bank c/d
#
# The medicine is bank3, sprite $31 (chrs 30 and 31).
# In bank 5 (eastern hyrule), the Kid is in chrs 30 and 31.
# The magic container is in banks 3 and 5, sprite $83 (chrs 82 and 83).
# In the palace banks:
# chrs $9c,$9d are a comma and an empty space.
# chrs $b0,$b1 are a spike (never used).
# chrs $82,$83 are part of link-holding-up-an-item.
# In the overworkd banks:
# chrs $9c,$9d are a comma and a fragment of cave wall.
#
# Since the sprite IDs for the meds and MC are global, it wouild be best to
# make them the same in every bank. As such, we will move sprites thusly:
#
# Link holding up an item will replace the unused spike, and the magic container
# will replace that part of link.
#
# The medicine will overwrite the comma placement, and for overworlds, the
# cave wall will be moved into the medicine place.
def hack(config, edit, asm):
for t in range(0x88, 0xc4):
chr_clear(edit, Tile(0x11, t), True)
# Swap the med/kid with comma/cave wall in overworld CHR banks.
for bank in (3, 5):
chr_swap(edit, Tile(bank, 0x9c), Tile(bank, 0x30))
chr_swap(edit, Tile(bank, 0x9d), Tile(bank, 0x31))
# Fix the overworld PRG banks after the cave wall move.
edit.write(Address.prg(1, 0x8463), 0x31)
edit.write(Address.prg(1, 0x846b), 0x31)
edit.write(Address.prg(2, 0x8463), 0x31)
edit.write(Address.prg(2, 0x846b), 0x31)
# Copy the meds to the comma/blank in the palace banks.
# Move tile $88 to $8d and clear tile $8b ($89,$8b,$8d appear unused).
# This opens up tiles $88/$89 as location for an 8x16 sprite.
# Copy link into $88 and over the spike ($b0), and the hc and mc over link.
for bank in (0x09, 0x0b, 0x0d, 0x13, 0x15, 0x17, 0x19):
chr_clear(edit, Tile(bank, 0x8b))
chr_copy(edit, Tile(bank, 0x9c), Tile(3, 0x9c))
chr_copy(edit, Tile(bank, 0x9d), Tile(3, 0x9d))
chr_copy(edit, Tile(bank, 0x8d), Tile(bank, 0x88))
chr_copy(edit, Tile(bank, 0x88), Tile(bank, 0x80))
chr_copy(edit, Tile(bank, 0x89), Tile(bank, 0x81))
chr_copy(edit, Tile(bank, 0xb0), Tile(bank, 0x82))
chr_copy(edit, Tile(bank, 0xb1), Tile(bank, 0x83))
chr_copy(edit, Tile(bank, 0x80), Tile(3, 0x80))
chr_copy(edit, Tile(bank, 0x81), Tile(3, 0x81))
chr_copy(edit, Tile(bank, 0x82), Tile(3, 0x82))
chr_copy(edit, Tile(bank, 0x83), Tile(3, 0x83))
# Fix the palace crystal statue after tile moves
edit.write(Address.prg(4, 0x8391), 0x8d)
edit.write(Address.prg(4, 0x83a5), 0x8d)
# Rewrite the sprite table for the meds/kid:
edit.write_bytes(Address.prg(-1, 0xeea9), bytes([0x9d, 0x9d]))
edit.write_bytes(Address.prg(-1, 0xeea5), bytes([0x9d, 0x9d]))
# Rewrite the sprite table for link-holding-up-item:
edit.write_bytes(Address.prg(-1, 0xeb92), bytes([0x89, 0xb1]))
# No config modification; just return the config back to caller.
return config
|
991,080 | c762d426837882e75acadec666191436b33821aa | #AD1-1 - Questรฃo 1
#Programa Principal
entrada = input("Digite um nรบmero inteiro ou tecle enter para sair: ")
pi = 3.1415
while entrada != "": # loop
raio = int(entrada)
if raio %2 != 0:
perimetro = 2*pi*raio
area = pi*raio**2
print(f"รrea e Perรญmetro do Cรญrculo de Raio {raio:d} sรฃo {area:.2f} e {perimetro:.2f}")
elif raio %2 == 0:
print(f"Divisores de {raio:d} sรฃo: ", end="")
for divisao in range(1, raio + 1):
if raio % divisao == 0:
print(divisao, end=" ")
print()
entrada = input("\nDigite outro nรบmero inteiro ou tecle enter para sair: ")
|
991,081 | 17c8ebb74015e86a452a86702d8db207bf28d532 | ####################################################################################
#ๆณ1๏ผๅฉ็จๅ็ผ่กจ่พพๅผ่ฎก็ฎๆฅๅฎ็ฐ
#่พๅ
ฅ๏ผไปฅ็ฉบ็ฝไฝไธบๅ้
#่พๅบ๏ผไธไธช่ฎก็ฎ็ปๆ
###################################################################################
#line = input('Infix Expression: ')
line = '( ( 1 + 2 ) * ( 3 + 4 ) - 27 ) / ( 2 + 1 )'
value = suf_exp_evaluator(trans_infix_suffix(line))
print('ๆณ1๏ผ',value)
#ๆณ2๏ผ็ดๆฅ่ฎก็ฎ
#่ฟ็ฎ็ฌฆ็ไผๅ
็บง๏ผๆฌๅทไฝ็จ๏ผ็กฎๅฎๅฎๆๅ่ฟ็ฎ็ๆถๆบ๏ผๅๆไธช่ฟ็ฎ๏ผๆพๅฐๆญฃ็กฎ็่ฟ็ฎๅฏน่ฑก
#ๅฎไน็ๆๅจ
def tokens(text):
text = text.split()
for i in text:
yield i
# ๅฎไนไผๅ
็บงๅ่ฟ็ฎ็ฌฆ้ๅ
priority = {'(': 1, '+': 3, '-': 3, '*': 5, '/': 5} # ๅฎไนไผๅ
็บง
infix_operators = '+-*/()' # ๅฎไน่ฟ็ฎ็ฌฆ้ๅ
def infix_exp_evaluator(line):
st = sstack()
exp = []
# ๅฏนx็ๆ
ๅตๅๆ๏ผ
# 1ใๆฏๆฐๅญ๏ผ2ใๆฏๅทฆๆฌๅท๏ผ3ใๆฏๅณๆฌๅท๏ผ4ใๆฏ่ฟ็ฎ็ฌฆ
# ๆฐๅญๅฐฑ็ดๆฅๅญๅ
ฅ่กจไธญ๏ผๅทฆๅณๆฌๅทๅณๅฎ็ๅ ไธช่ฟ็ฎ็ฌฆ็่ฟ็ปญๅไบบ่กจไธญ๏ผ
# ๆ ้กถ็่ฟ็ฎ็ฌฆ้่ฆๅๅ็ไธไธช่ฟ็ฎ็ฌฆๆฏ่พๅ๏ผๅฝๆฏๅไธไธช่ฟ็ฎ็ฌฆไผๅ
็บง้ซๆ่ฝๅ
ฅ่กจ
for x in tokens(line): # tokens ๆฏไธไธชๅธฆๅฎไน็็ๆๅจ
if x not in infix_operators:
exp.append(float(x))
elif st.is_empty() or x == '(': # ๅทฆๆฌๅท่ฟๆ
st.push(x)
elif x == ')': # ๅค็ๅณๆฌๅทๅๆฏ
while not st.is_empty() and st.top() != '(':
a = exp.pop()
b = exp.pop()
operator = st.pop()
c = caculate(b,a,operator)
exp.append(c)
if st.is_empty(): # ๆฒกๆพๅฐๅทฆๆฌๅท๏ผๅฐฑไธๅน้
raise SyntaxError("Missing '('.")
st.pop() # ๅผนๅบๅทฆๆฌๅท๏ผๅณๆฌๅทไนไธ่ฟๆ
else:
while (not st.is_empty() and priority[st.top()] >= priority[x]):
a = exp.pop()
b = exp.pop()
operator = st.pop()
c = caculate(b,a,operator) # ๆ ้กถ็่ฟ็ฎ็ฌฆ้่ฆๅๅ็ไธไธช่ฟ็ฎ็ฌฆๆฏ่พๅ๏ผๅฝๆฏๅไธไธช่ฟ็ฎ็ฌฆไผๅ
็บง้ซๆ่ฝๅ
ฅ่กจ
exp.append(c)
st.push(x)
while not st.is_empty():
if st.top() == '(':
raise SyntaxError("Extra '('.")
a = exp.pop()
b = exp.pop()
operator = st.pop()
c = caculate(b,a,operator)
exp.append(c)
return exp.pop()
def caculate(b,a,operator):
if operator == '+':
c = b + a
elif operator == '-':
c = b - a
elif operator == '*':
c = b * a
elif operator == '/':
if a == 0:
raise zeroDivisionError(str(b) + '/' + str(a))
c = b / a
else:
raise Exception(str(b) + operator + str(a))
return c
# ๆต่ฏ
line = '( ( 1 + 2 ) * ( 3 + 4 ) - 27 ) / ( 2 + 1 )'
value = infix_exp_evaluator(line)
print('ๆณ2๏ผ', value) |
991,082 | 88974182893c6178d01505be789195f2fad0073b | # git commit -m "code: Solve boj 01107 ๋ฆฌ๋ชจ์ปจ (yoonbaek)"
# ๋๋ฅด๋ง๋ฌด๋ฅผ ํด์ผํ๋ ๋ฌธ์
# ์๊พธ EOF๋ก ํ๋ฆฌ๋ค์.... ํ
์ผ ๋ชจ๋ ํต๊ณผ๋๋ฉด ์์ ํ๊ฒ ์ต๋๋ค.
if __name__ == "__main__":
target_channel = input()
length = len(target_channel)
target_channel = int(target_channel)
broken_num = int(input())
broken_buttons = []
if broken_num:
broken_buttons = input().split()
worst = abs(target_channel-100)
minimum = worst
scope = 2*10**length
scope = scope if scope < 1000000 else 1000000
for num in range(scope):
digits = str(num)
for digit in digits:
if digit in broken_buttons:
break
else:
steps = len(digits) + abs(target_channel-num)
minimum = minimum if steps > minimum else steps
print(minimum) |
991,083 | 00592e64fba09c36d70082301ee04500806e9115 | class Solution:
"""
@param nums: a list of integers.
@param k: length of window.
@return: the sum of the element inside the window at each moving.
"""
def winSum(self, nums, k):
if k == 0:
return []
n = len(nums)
firstIndex = 0
result = [sum(nums[firstIndex:firstIndex + k])]
if n <= k:
return result
lastIndex = k - 1
while lastIndex + 1 < n:
cur = result[-1] - nums[firstIndex] + nums[lastIndex + 1]
result.append(cur)
firstIndex += 1
lastIndex += 1
return result
# write your code here |
991,084 | af2e9659c006fe781e87ecf33ee000417526f22a | # Imports the Google Cloud client library
from google.cloud import language_v1
import math
# returns a tuple containing the normalized direction and magnitude of sentiment
# (out of five using gaussian model)
def sentimentDetection(tweet):
client = language_v1.LanguageServiceClient()
document = language_v1.Document(content=tweet, type_=language_v1.Document.Type.PLAIN_TEXT)
sentiment = client.analyze_sentiment(request={'document': document}).document_sentiment
normDirection = (3.7 / (.3 * math.sqrt(2 * math.pi))) * math.e ** (-1 * (.5 * sentiment.score ** 2) / .09)
normMagnitude = (16.3 / (1.3 * math.sqrt(2 * math.pi))) * math.e ** (-1 * (.5 * sentiment.magnitude ** 2) / 1.3 ** 2)
# print("Sentiment: {}, {}".format(sentiment.score, sentiment.magnitude))
# print("Normalized Sentiment: {}, {}".format(normDirection, normMagnitude))
return normDirection, normMagnitude
|
991,085 | 76c40e916d132590b56520b3d04e260748ea94d1 | import simplejson
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
a = "C:/Users/rafayk7/Documents/Steam_game_comparer/allgamedata.txt"
with open(a) as data:
content = simplejson.load(data)
all_appids = []
total_games = len(content["applist"]["apps"])
for i in range(total_games):
appid = content["applist"]["apps"][i]["appid"]
all_appids.append(appid)
all_appnames = []
for i in range(total_games):
appname = content["applist"]["apps"][i]["name"]
all_appnames.append(appname)
file_to_save2 = open("allappnames.txt", 'w')
for item in all_appnames:
file_to_save2.write("%s\n" % item)
file_to_save = open("allappids.txt", 'w')
for item in all_appids:
file_to_save.write("%s\n" % item)
|
991,086 | f9ad711944dd1aaf330341d088936e7d9e9bc292 | def getList(inputString):
indentedTabsEachLine = []
splittedLines = inputString.split("\n")
for line in splittedLines:
# Remove all spaces from selected line
stringwithoutTabs = line.strip()
# LOGIC : tabs at beginning = substring index
tabsAtBeginnning = line.index(stringwithoutTabs)
indentedTabsEachLine.append(tabsAtBeginnning)
return indentedTabsEachLine
|
991,087 | f5af3b90dda5b1f6bc1ff1cbb36cc749dc3ac2bb | # Jason Shawn D Souza
# import numpy as np
# import math
# import os
# import time
#
# class KNN():
#
# def __init__(self):
# pass
#
# def splitarray(self,arr):
# return arr[:, :-1], arr[:, -1]
#
# def calculateDistances(self, training_data, test_instance):
# esubtract = np.subtract(training_data,test_instance)
# square = np.square(subtract)
# euclid_sum =np.sum(square,axis=1)
# euclidean_distances = np.sqrt(euclid_sum)
# return euclidean_distances, np.argsort(euclidean_distances)
#
# def prediction(self,training_data, euclidean_indices, knn_k_value):
# k_instance_class =training_data[euclidean_indices[:knn_k_value]][:, -1]
# if knn_k_value==1:
# if k_instance_class == 0:
# return 0
# if k_instance_class == 1:
# return 1
# if k_instance_class == 2:
# return 2
# else:
# class_0_count = len(k_instance_class[k_instance_class == 0])
# class_1_count = len(k_instance_class[k_instance_class == 1])
# class_2_count = len(k_instance_class[k_instance_class == 2])
# if class_2_count >= class_1_count:
# return 2
# if class_1_count >= class_0_count:
# return 1
# return 0
#
# def main():
# training_file = "data\\classification\\trainingData.csv"
# test_file = "data\\classification\\testData.csv"
# training_data = np.genfromtxt(training_file, delimiter=",")
# test_data = np.genfromtxt(test_file, delimiter=",")
# knn = KNN()
# training_data_values, training_data_class = knn.splitarray(training_data)
# test_data_values, test_data_class = knn.splitarray(test_data)
# test_ins = 0
# correct,incorrect =0,0
# knn_value = k =1
# for test_instance in test_data:
# test_instance_values = test_instance[:10]
# test_ins +=1
# distance,index = knn.calculateDistances(training_data_values,test_instance_values)
# prediction = knn.prediction(training_data,index,k)
# if prediction == test_instance[10]:
# correct +=1
# else:
# incorrect +=1
# print((correct/(correct+incorrect))*100)
# # print(test_ins)
#
# if __name__ == '__main__':
# main() |
991,088 | 3dd56b5003ecad39727335c963688f5ec306ff5a | from datetime import timedelta
from django.db import models
from django.utils import timezone
class Visit(models.Model):
datetime = models.DateTimeField()
ip = models.CharField(max_length=15)
method = models.CharField(max_length=6)
url = models.CharField(max_length=1000)
referer = models.CharField(max_length=1000, null=True, blank=True, default=None)
querystring = models.TextField(null=True, blank=True, default=None)
status = models.IntegerField()
reason = models.CharField(max_length=64)
country = models.CharField(max_length=100, null=True, default=None, blank=True)
city = models.CharField(max_length=100, null=True, default=None, blank=True)
def __unicode__(self):
return self.ip
@classmethod
def get_views_today(cls):
t_from = timezone.now().replace(hour=0, minute=0, second=0)
return cls.objects.filter(datetime__gte=t_from).count()
@classmethod
def get_visitors_24(cls):
now = timezone.now()
t_from = timezone.now() - timedelta(days=1)
return cls.objects.filter(datetime__gte=t_from). \
aggregate(models.Count('ip', distinct=True))['ip__count']
@classmethod
def get_visitors_today(cls):
t_from = timezone.now().replace(hour=0, minute=0, second=0)
return cls.objects.filter(datetime__gte=t_from). \
aggregate(models.Count('ip', distinct=True))['ip__count']
|
991,089 | 7f554102f3dcfc0dc3df6a92b51ae736b11394da | """SQL database for the template_pattern.py file."""
import sqlite3
connection = sqlite3.Connection("template_pattern.db")
connection.execute(
"CREATE TABLE IF NOT EXISTS Sales (salesperson text, amt currency, year integer,"
"model text, new boolean)"
)
connection.execute(
"INSERT INTO Sales values ('Tim', 16000, 2010, 'Honda Fit', 'true')"
)
connection.execute(
"INSERT INTO Sales values ('Eric', 7800, 2007, 'Ford Focus', 'false')"
)
connection.execute(
"INSERT INTO Sales values ('Chester', 15000, 2012, 'Opus X', 'true')"
)
connection.execute(
"INSERT INTO Sales values ('Kygo', 13000, 2011, 'Audi A5', 'true')"
)
connection.execute(
"INSERT INTO Sales values ('Prydz', 11000, 2009, 'Opel Astra', 'false')"
)
connection.execute(
"INSERT INTO Sales values ('Chris', 14000, 2005, 'Toyota Supra', 'true')"
)
connection.execute(
"INSERT INTO Sales values ('Armin', 13500, 2019, 'BMW X5', 'true')"
)
connection.execute(
"INSERT INTO Sales values ('Tim', 8000, 2004, 'Renault Symbol', 'false')"
)
connection.execute(
"INSERT INTO Sales values ('Eric', 14000, 2009, 'Ford Fiesta', 'true')"
)
connection.commit()
connection.close()
|
991,090 | d6e135d1d989b3b95b89bd34ef5c14ec2350a9b0 | """
@Time : 2021/3/30 22:19
@Author : Xiao Qinfeng
@Email : qfxiao@bjtu.edu.cn
@File : backbone_3d.py
@Software: PyCharm
@Desc :
"""
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
def conv3x3x3(in_planes, out_planes, stride=1, bias=False):
# 3x3x3 convolution with padding
return nn.Conv3d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=1,
bias=bias)
def conv1x3x3(in_planes, out_planes, stride=1, bias=False):
# 1x3x3 convolution with padding
return nn.Conv3d(
in_planes,
out_planes,
kernel_size=(1, 3, 3),
stride=(1, stride, stride),
padding=(0, 1, 1),
bias=bias)
def downsample_basic_block(x, planes, stride):
out = F.avg_pool3d(x, kernel_size=1, stride=stride)
zero_pads = torch.Tensor(
out.size(0), planes - out.size(1), out.size(2), out.size(3),
out.size(4)).zero_()
if isinstance(out.data, torch.cuda.FloatTensor):
zero_pads = zero_pads.cuda()
out = torch.cat([out, zero_pads], dim=1)
return out
class BasicBlock3d(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, track_running_stats=True, use_final_relu=True):
super(BasicBlock3d, self).__init__()
bias = False
self.use_final_relu = use_final_relu
self.conv1 = conv3x3x3(inplanes, planes, stride, bias=bias)
self.bn1 = nn.BatchNorm3d(planes, track_running_stats=track_running_stats)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3x3(planes, planes, bias=bias)
self.bn2 = nn.BatchNorm3d(planes, track_running_stats=track_running_stats)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
if self.use_final_relu: out = self.relu(out)
return out
class BasicBlock2d(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, track_running_stats=True, use_final_relu=True):
super(BasicBlock2d, self).__init__()
bias = False
self.use_final_relu = use_final_relu
self.conv1 = conv1x3x3(inplanes, planes, stride, bias=bias)
self.bn1 = nn.BatchNorm3d(planes, track_running_stats=track_running_stats)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv1x3x3(planes, planes, bias=bias)
self.bn2 = nn.BatchNorm3d(planes, track_running_stats=track_running_stats)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
if self.use_final_relu: out = self.relu(out)
return out
class Bottleneck3d(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, track_running_stats=True, use_final_relu=True):
super(Bottleneck3d, self).__init__()
bias = False
self.use_final_relu = use_final_relu
self.conv1 = nn.Conv3d(inplanes, planes, kernel_size=1, bias=bias)
self.bn1 = nn.BatchNorm3d(planes, track_running_stats=track_running_stats)
self.conv2 = nn.Conv3d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=bias)
self.bn2 = nn.BatchNorm3d(planes, track_running_stats=track_running_stats)
self.conv3 = nn.Conv3d(planes, planes * 4, kernel_size=1, bias=bias)
self.bn3 = nn.BatchNorm3d(planes * 4, track_running_stats=track_running_stats)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
if self.use_final_relu: out = self.relu(out)
return out
class Bottleneck2d(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, track_running_stats=True, use_final_relu=True):
super(Bottleneck2d, self).__init__()
bias = False
self.use_final_relu = use_final_relu
self.conv1 = nn.Conv3d(inplanes, planes, kernel_size=1, bias=bias)
self.bn1 = nn.BatchNorm3d(planes, track_running_stats=track_running_stats)
self.conv2 = nn.Conv3d(planes, planes, kernel_size=(1, 3, 3), stride=(1, stride, stride), padding=(0, 1, 1),
bias=bias)
self.bn2 = nn.BatchNorm3d(planes, track_running_stats=track_running_stats)
self.conv3 = nn.Conv3d(planes, planes * 4, kernel_size=1, bias=bias)
self.bn3 = nn.BatchNorm3d(planes * 4, track_running_stats=track_running_stats)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
if self.batchnorm: out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
if self.batchnorm: out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
if self.batchnorm: out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
if self.use_final_relu: out = self.relu(out)
return out
class ResNet2d3d(nn.Module):
def __init__(self, input_channel, input_size, feature_dim, strides=None, block=None, layers=None, use_final_fc=True,
track_running_stats=True):
super(ResNet2d3d, self).__init__()
self.input_channel = input_channel
self.input_size = list(input_size)
self.feature_dim = feature_dim
self.use_final_fc = use_final_fc
if strides is None:
strides = [1, 1, 2, 2]
if layers is None:
layers = [2, 2, 2, 2]
if block is None:
block = [BasicBlock2d, BasicBlock2d, BasicBlock3d, BasicBlock3d]
self.inplanes = 64
self.track_running_stats = track_running_stats
bias = False
self.conv1 = nn.Conv3d(input_channel, 64, kernel_size=(1, 7, 7), stride=(1, 2, 2), padding=(0, 3, 3), bias=bias)
self.bn1 = nn.BatchNorm3d(64, track_running_stats=track_running_stats)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool3d(kernel_size=(1, 3, 3), stride=(1, 2, 2), padding=(0, 1, 1))
if not isinstance(block, list):
block = [block] * 4
self.layer1 = self._make_layer(block[0], 64, layers[0], stride=strides[0])
self.layer2 = self._make_layer(block[1], 128, layers[1], stride=strides[1])
self.layer3 = self._make_layer(block[2], 256, layers[2], stride=strides[2])
self.layer4 = self._make_layer(block[3], 256, layers[3], stride=strides[3], is_final=True)
self.input_size[1] = int(math.ceil(self.input_size[1] / 4))
self.input_size[2] = int(math.ceil(self.input_size[2] / 4))
for idx, block_item in enumerate(block):
if block_item == BasicBlock2d:
self.input_size[1] = int(math.ceil(self.input_size[1] / strides[idx]))
self.input_size[2] = int(math.ceil(self.input_size[2] / strides[idx]))
elif block_item == BasicBlock3d:
self.input_size[0] = int(math.ceil(self.input_size[0] / strides[idx]))
self.input_size[1] = int(math.ceil(self.input_size[1] / strides[idx]))
self.input_size[2] = int(math.ceil(self.input_size[2] / strides[idx]))
else:
raise ValueError
if use_final_fc:
self.final_fc = nn.Sequential(
nn.ReLU(inplace=True),
nn.Linear(self.input_size[0] * self.input_size[1] * self.input_size[2] * 256, feature_dim),
nn.ReLU(inplace=True),
nn.Linear(feature_dim, feature_dim)
)
# modify layer4 from exp=512 to exp=256
for m in self.modules():
if isinstance(m, nn.Conv3d):
m.weight = nn.init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None: m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1, is_final=False):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
# customized_stride to deal with 2d or 3d residual blocks
if (block == Bottleneck2d) or (block == BasicBlock2d):
customized_stride = (1, stride, stride)
else:
customized_stride = stride
downsample = nn.Sequential(
nn.Conv3d(self.inplanes, planes * block.expansion, kernel_size=1, stride=customized_stride, bias=False),
nn.BatchNorm3d(planes * block.expansion, track_running_stats=self.track_running_stats)
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, track_running_stats=self.track_running_stats))
self.inplanes = planes * block.expansion
if is_final: # if is final block, no ReLU in the final output
for i in range(1, blocks - 1):
layers.append(block(self.inplanes, planes, track_running_stats=self.track_running_stats))
layers.append(
block(self.inplanes, planes, track_running_stats=self.track_running_stats, use_final_relu=False))
else:
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, track_running_stats=self.track_running_stats))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
if self.use_final_fc:
x = x.view(x.size(0), -1)
x = self.final_fc(x)
return x
class Encoder3d(nn.Module):
def __init__(self, input_size, input_channel, feature_dim, feature_mode='raw'):
super(Encoder3d, self).__init__()
if feature_mode == 'raw':
strides = (2, 2, 2, 2)
elif feature_mode == 'freq':
strides = (1, 1, 1, 1)
else:
raise ValueError
self.features = nn.Sequential(
# Heading conv layer
nn.Conv3d(input_channel, 64, kernel_size=(3, 3, 3), stride=(strides[0], 2, 2), padding=(1, 1, 1)),
nn.BatchNorm3d(64),
nn.ReLU(inplace=True),
nn.MaxPool3d(kernel_size=(3, 3, 3), stride=(strides[1], 2, 2), padding=(1, 1, 1)),
# First group of conv layer (2d, without stride)
nn.Conv3d(64, 64, kernel_size=(1, 3, 3), stride=1, padding=(0, 1, 1)),
nn.BatchNorm3d(64),
nn.ReLU(inplace=True),
nn.Conv3d(64, 128, kernel_size=(1, 3, 3), stride=1, padding=(0, 1, 1)),
nn.BatchNorm3d(128),
nn.ReLU(inplace=True),
# Second group of conv layer with stride (3d)
nn.Conv3d(128, 128, kernel_size=3, stride=(strides[2], 2, 2), padding=1),
nn.BatchNorm3d(128),
nn.ReLU(inplace=True),
nn.Conv3d(128, 256, kernel_size=3, stride=1, padding=1),
nn.BatchNorm3d(256),
nn.ReLU(inplace=True),
# Third group of conv layer with stride (3d)
nn.Conv3d(256, 256, kernel_size=3, stride=(strides[3], 2, 2), padding=1),
nn.BatchNorm3d(256),
nn.ReLU(inplace=True),
nn.Conv3d(256, 512, kernel_size=3, stride=1, padding=1),
nn.BatchNorm3d(512),
nn.ReLU(inplace=True)
)
last_size = list(input_size)
last_size[0] = int(math.ceil(last_size[0] / strides[0]))
last_size[1] = int(math.ceil((last_size[1] / 2)))
last_size[2] = int(math.ceil((last_size[2] / 2)))
last_size[0] = int(math.ceil(last_size[0] / strides[1]))
last_size[1] = int(math.ceil((last_size[1] / 2)))
last_size[2] = int(math.ceil((last_size[2] / 2)))
last_size[0] = int(math.ceil(last_size[0] / strides[2]))
last_size[1] = int(math.ceil((last_size[1] / 2)))
last_size[2] = int(math.ceil((last_size[2] / 2)))
last_size[0] = int(math.ceil(last_size[0] / strides[3]))
last_size[1] = int(math.ceil((last_size[1] / 2)))
last_size[2] = int(math.ceil((last_size[2] / 2)))
self.fc = nn.Sequential(
nn.Dropout(0.5),
nn.Linear(512 * last_size[0] * last_size[1] * last_size[2], feature_dim),
nn.BatchNorm1d(feature_dim),
nn.ReLU(inplace=True),
nn.Linear(feature_dim, feature_dim)
)
def forward(self, x):
out = self.features(x)
out = out.view(out.shape[0], -1)
out = self.fc(out)
return out
if __name__ == '__main__':
model = Encoder3d(input_channel=1, input_size=(5, 32, 32), feature_dim=128, feature_mode='freq')
out = model(torch.randn(32, 1, 5, 32, 32))
print(out.shape)
|
991,091 | a3f9b1c38f64ced9d95a514b681d467d3c2f2c8e | import logging
DOMAIN = "tavos_water_outage"
_LOGGER = logging.getLogger(__name__)
MONITORED_STRING = "monitored_string"
|
991,092 | ecf86737420eb963c9844f74fbe9498331ae5ee0 | from bravado.client import SwaggerClient
from collections import Counter
from getGeneIdList import getGeneIdList
import xlsxwriter
# Connects to cbioportal database api
# Turned off validation requests because of discovered incorrect error throwing
cbioportal = SwaggerClient.from_url('https://www.cbioportal.org/api/api-docs',
config={"validate_requests":False,"validate_responses":False})
# Make resources all lower-case and underlined for easy access
for a in dir(cbioportal):
cbioportal.__setattr__(a.replace(' ', '_').lower(), cbioportal.__getattr__(a))
# Get list of studies: MSK is the largest query that contains correctly formatted categories
studyIds = ["msk_impact_2017"]
# Get list of gene Ids from function implemented in getGeneIdList.pycfor api to work correctly
geneList = getGeneIdList()
# Find number of amplifications
amplificationsInStudies = []
mutationsInStudies = []
for studyId in studyIds:
print(studyId + "_all")
print(studyId + "_cna")
amplificationsInStudies.append(cbioportal.discrete_copy_number_alterations.fetchDiscreteCopyNumbersInMolecularProfileUsingPOST(
discreteCopyNumberEventType = "AMP", # specifies amplification
discreteCopyNumberFilter = {"entrezGeneIds":geneList, "sampleListId": studyId + "_all"}, # specifies genes to return
molecularProfileId = studyId + "_cna", # specifies gistic for amplification events
projection = "DETAILED" # returns detailed gene name for later use
).result())
mutationsInStudies.append(cbioportal.mutations.fetchMutationsInMolecularProfileUsingPOST(
mutationFilter = {"entrezGeneIds":geneList, "sampleListId": studyId + "_all"}, # specifies genes to return
molecularProfileId = studyId + "_mutations", # specifies gistic for amplification events
projection = "DETAILED"
).result())
mutation_counts = Counter()
for mutations in mutationsInStudies:
mutation_counts += Counter([m.gene.hugoGeneSymbol for m in mutations])
print(mutation_counts.most_common(6))
amplification_counts = Counter()
for amplifications in amplificationsInStudies:
amplification_counts = Counter([a.gene.hugoGeneSymbol for a in amplifications])
print(amplification_counts.most_common(5))
outWorkBook = xlsxwriter.Workbook("rnf_output.xlsx")
mutSheet = outWorkBook.add_worksheet("Mutations")
ampSheet = outWorkBook.add_worksheet("Amplifications")
mutSheet.write("A1", "Gene")
mutSheet.write("B1","Mutation")
ampSheet.write("A1", "Gene")
ampSheet.write("B1","Amplification")
# Print summary to terminal
geneCount = 0
print("\nGenes with Substantial Mutations:\n")
for gene, tally in mutation_counts.most_common():
if tally < 10:
break
print(gene, tally)
mutSheet.write(geneCount+1,0,gene)
mutSheet.write(geneCount+1,1,tally)
geneCount += 1
geneCount = 0
print("\nGenes with Substantial Amplification:\n")
for gene, tally in amplification_counts.most_common():
if tally < 10:
break
print(gene, tally)
ampSheet.write(geneCount+1,0,gene)
ampSheet.write(geneCount+1,1,tally)
geneCount += 1
outWorkBook.close() |
991,093 | 24a3c385c5b88fe8d6c34f9b43943b696b5564db | from flask import render_template
from app import app, headers
from flask import request
import requests, os
@app.route('/')
def index():
return render_template('index.html')
@app.route('/notify', methods=['POST'])
def notify():
fcm_token = request.form['fcm_token']
data = {}
if 'url' in request.form:
data['url'] = request.form['url']
data['message'] = 'You have been assigned to grade a new submission!'
data['type'] = 'assignment'
elif 'created' in request.form:
data['message'] = 'Request created. Push notification, queue enquiry activated'
data['type'] = 'activation'
elif 'closed' in request.form:
data['message'] = 'Request closed. Push notification, queue enquiry deactivated'
data['type'] = 'deactivation'
payload = {
'to': fcm_token,
'data': data
}
response = requests.post('https://fcm.googleapis.com/fcm/send', json=payload, headers=headers)
return response.text
@app.route('/register', methods=['POST'])
def register():
fcm_token = request.form['fcm_token']
udacity_token = request.form['udacity_token']
payload = {
'to': fcm_token,
'data': {
'type': 'registration',
'token': udacity_token
}
}
response = requests.post('https://fcm.googleapis.com/fcm/send', json=payload, headers=headers)
return response.text
port = os.getenv('PORT', '5000')
if __name__ == "__main__":
app.run(debug=True, host='0.0.0.0', port=int(port), threaded=True)
|
991,094 | f576769cd09868e815de6021d7094adb3f7dc5e0 | #-- coding: utf-8 --
# ******
# __author__ = 'chaoneng'
# __time__ =2017/4/25 9:59
# ******* |
991,095 | b9a9ac163f02e2b631047ef87a4a84752f2e082a | import os
os.system("sudo apt-get install mosquitto")
os.system("sudo apt-get install mosquitto-clients -y")
os.system("pip3 install paho-mqtt")
os.system("./configure.sh")
print("Username:")
username = input()
command = "mosquitto_passwd -c /etc/mosquitto/pwfile " + username
os.system(command)
os.system("sudo systemctl status mosquitto")
|
991,096 | 3c14c503f7b8d5000af42eec27f9505afe280164 | #!/usr/bin/python
import fileinput
import itertools
nums = [ int(ln) for ln in fileinput.input() ]
seen = set()
value = 0
for n in itertools.cycle(nums):
value += n
if value in seen:
print(value)
break
seen.add(value)
|
991,097 | b8a66e4cbf3b9c8c9822a5aa8be26ac0ecf79a77 | # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from django.conf import settings
from django.core.urlresolvers import reverse
from django import http
from django.test.utils import override_settings
from django.utils.http import urlunquote
from mox3.mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.volumes.volumes \
import tables as volume_tables
from openstack_dashboard.test import helpers as test
INDEX_URL = reverse('horizon:project:volumes:index')
class VolumeAndSnapshotsAndBackupsTests(test.TestCase):
@test.create_stubs({api.cinder: ('tenant_absolute_limits',
'volume_list',
'volume_list_paged',
'volume_snapshot_list',
'volume_backup_supported',
'volume_backup_list_paged',
),
api.nova: ('server_list',)})
def test_index(self, instanceless_volumes=False):
vol_snaps = self.cinder_volume_snapshots.list()
volumes = self.cinder_volumes.list()
if instanceless_volumes:
for volume in volumes:
volume.attachments = []
api.cinder.volume_backup_supported(IsA(http.HttpRequest)).\
MultipleTimes().AndReturn(False)
api.cinder.volume_list_paged(
IsA(http.HttpRequest), marker=None, search_opts=None,
sort_dir='desc', paginate=True).\
AndReturn([volumes, False, False])
if not instanceless_volumes:
api.nova.server_list(IsA(http.HttpRequest), search_opts=None,
detailed=False).\
AndReturn([self.servers.list(), False])
api.cinder.volume_snapshot_list(IsA(http.HttpRequest)).\
AndReturn(vol_snaps)
api.cinder.tenant_absolute_limits(IsA(http.HttpRequest)).\
MultipleTimes().AndReturn(self.cinder_limits['absolute'])
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'project/volumes/index.html')
def test_index_no_volume_attachments(self):
self.test_index(instanceless_volumes=True)
@test.create_stubs({api.cinder: ('tenant_absolute_limits',
'volume_list_paged',
'volume_backup_supported',
'volume_snapshot_list'),
api.nova: ('server_list',)})
def _test_index_paginated(self, marker, sort_dir, volumes, url,
has_more, has_prev):
backup_supported = True
vol_snaps = self.cinder_volume_snapshots.list()
api.cinder.volume_backup_supported(IsA(http.HttpRequest)).\
MultipleTimes().AndReturn(backup_supported)
api.cinder.volume_list_paged(IsA(http.HttpRequest), marker=marker,
sort_dir=sort_dir, search_opts=None,
paginate=True).\
AndReturn([volumes, has_more, has_prev])
api.cinder.volume_snapshot_list(
IsA(http.HttpRequest), search_opts=None).AndReturn(vol_snaps)
api.nova.server_list(IsA(http.HttpRequest), search_opts=None,
detailed=False).\
AndReturn([self.servers.list(), False])
api.cinder.tenant_absolute_limits(IsA(http.HttpRequest)).MultipleTimes().\
AndReturn(self.cinder_limits['absolute'])
self.mox.ReplayAll()
res = self.client.get(urlunquote(url))
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'project/volumes/index.html')
self.mox.UnsetStubs()
return res
def ensure_attachments_exist(self, volumes):
volumes = copy.copy(volumes)
for volume in volumes:
if not volume.attachments:
volume.attachments.append({
"id": "1", "server_id": '1', "device": "/dev/hda"})
return volumes
@override_settings(API_RESULT_PAGE_SIZE=2)
def test_index_paginated(self):
mox_volumes = self.ensure_attachments_exist(self.cinder_volumes.list())
size = settings.API_RESULT_PAGE_SIZE
# get first page
expected_volumes = mox_volumes[:size]
url = INDEX_URL
res = self._test_index_paginated(marker=None, sort_dir="desc",
volumes=expected_volumes, url=url,
has_more=True, has_prev=False)
volumes = res.context['volumes_table'].data
self.assertItemsEqual(volumes, expected_volumes)
# get second page
expected_volumes = mox_volumes[size:2 * size]
marker = expected_volumes[0].id
next = volume_tables.VolumesTable._meta.pagination_param
url = "?".join([INDEX_URL, "=".join([next, marker])])
res = self._test_index_paginated(marker=marker, sort_dir="desc",
volumes=expected_volumes, url=url,
has_more=True, has_prev=True)
volumes = res.context['volumes_table'].data
self.assertItemsEqual(volumes, expected_volumes)
# get last page
expected_volumes = mox_volumes[-size:]
marker = expected_volumes[0].id
next = volume_tables.VolumesTable._meta.pagination_param
url = "?".join([INDEX_URL, "=".join([next, marker])])
res = self._test_index_paginated(marker=marker, sort_dir="desc",
volumes=expected_volumes, url=url,
has_more=False, has_prev=True)
volumes = res.context['volumes_table'].data
self.assertItemsEqual(volumes, expected_volumes)
@override_settings(API_RESULT_PAGE_SIZE=2)
def test_index_paginated_prev_page(self):
mox_volumes = self.ensure_attachments_exist(self.cinder_volumes.list())
size = settings.API_RESULT_PAGE_SIZE
# prev from some page
expected_volumes = mox_volumes[size:2 * size]
marker = expected_volumes[0].id
prev = volume_tables.VolumesTable._meta.prev_pagination_param
url = "?".join([INDEX_URL, "=".join([prev, marker])])
res = self._test_index_paginated(marker=marker, sort_dir="asc",
volumes=expected_volumes, url=url,
has_more=True, has_prev=True)
volumes = res.context['volumes_table'].data
self.assertItemsEqual(volumes, expected_volumes)
# back to first page
expected_volumes = mox_volumes[:size]
marker = expected_volumes[0].id
prev = volume_tables.VolumesTable._meta.prev_pagination_param
url = "?".join([INDEX_URL, "=".join([prev, marker])])
res = self._test_index_paginated(marker=marker, sort_dir="asc",
volumes=expected_volumes, url=url,
has_more=True, has_prev=False)
volumes = res.context['volumes_table'].data
self.assertItemsEqual(volumes, expected_volumes)
|
991,098 | 4dc55b4fd56627cc54e648ea872e085d9065fc47 | magicians = ['alice','david','carolina']
for magician in magicians: #forๅพช็ฏ้ๅlist, magicianๆฏไธชๅ้,ๅปบ่ฎฎๅ้ๅๅlistๅ็งฐ็ธไผผ๏ผๆฏ่พๆๆไน๏ผ้ๅธธlistๅ็งฐไธบๅคๆฐ๏ผๅ้ๅ็งฐไธบๅๆฐ
print(magician)
magicians = ['alice','david','carolina']
for magician in magicians:
print(magician.title() + ", that was a great trick!")
print("I can't wait to see your next trick, " + magician.title() + ".\n")
print("Thank you, everyone. That was a great magic show!")
#็ปไน
#practice 1
pizzas = ['tomato','cheese','seafood']
for pizza in pizzas:
print("I like " + pizza + "pizza!\n")
print("I really like pizza!")
#practice 2
animals = ['cat','dog','deer']
for animal in animals:
print(animal)
print("A" + animal + " would make a great pet!\n")
print("All of these animals would make a great pet!") |
991,099 | d378d778c4d9bf9d5085feeb9e0dfb3be191484f | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# A script to update an XML file in order to:
# - Include a correct path to the DTD file
# - Include a correct path to some LTL-to-Buechi tool
import os
import sys
import resource
import subprocess
import signal
import xml.dom.minidom
# =====================================================
# Function for printing usage information for the tool
# =====================================================
def usage():
print "updatePathsInXML.py - A tool for updating an Unbeast specificiation XML file in order to contain correct paths to the syntax validation DTD file and the LTL-to-Bรผchi tool."
print "(C) 2010 by Ruediger Ehlers."
print ""
print "Usage:"
print " /path/to/updatePathsInXML.py <path/to/XMLFile> <path/to/ltl-to-buechitool/tool> <ltl-to-buechitool parameters>"
print ""
print "For details, please see the README file enclosed in the Unbeast distribution."
sys.exit(1)
if len(sys.argv)<3:
usage()
sys.exit(1)
# =====================================================
# Try to read XML file
# =====================================================
filename = sys.argv[1]
try:
xmlFile = xml.dom.minidom.parse(filename)
except IOError:
print "Error: Failed to read the XML file '"+filename+"' - probably it does not exist or you do not have sufficient rights."
sys.exit(1)
except xml.parsers.expat.ExpatError:
print "Error: Could not parse the XML file '"+filename+"' - probably it is not a valid XML file?"
# =====================================================
# Find the DTD file on disk
# =====================================================
if (sys.argv[0][0]<>os.sep):
sys.argv[0] = os.sep+sys.argv[0]
absolutePath = os.getcwd()+sys.argv[0]
pos = absolutePath.rfind("updatePathsInXML.py")
absolutePath = absolutePath[0:pos]+"SynSpec.dtd"
if not os.path.exists(absolutePath):
print "Error: The file '"+absolutePath+"' could not be found. However, the Unbeast distribution contains the file 'SynSpec.dtd' in the same directory as this script. As this file name has been produced by concatenating the script location and this string, this means that you have moved this script or deleted that file. Please undo this for proper operation."
sys.exit(1)
# Now change the data
if xmlFile.doctype == None:
print "Error: The file '"+filename+"' does not contain a DocType tag. Have a look at the README file of the Unbeast distribution for a valid input example"
sys.exit(1)
xmlFile.doctype.systemId = absolutePath
# =====================================================
# Now try the LTL-to-Bรผchi converter
# =====================================================
cmdLine = " ".join(sys.argv[2:])+" '[] <> p' "
print "Trying to execute the LTL-to-Bรผchi converter: "+cmdLine
# Execute the converter
p = subprocess.Popen(cmdLine, shell=True,stdout=subprocess.PIPE,bufsize=1000000)
p.wait()
# Output & check result
outputOpened = False
outputClosed = False
for line in p.stdout:
sys.stdout.write("O: "+line)
if line.startswith("never {"):
outputOpened = True
if line.startswith("}"):
outputClosed = True
if (not outputOpened) or (not outputClosed):
print "Error: the LTL-to-Bรผchi converter specified does not yield a valid never claim. Note that both ltl2ba and spot need some additional parameters to run (which you need to supply):"
print " - ltl2ba needs the parameter '-f'"
print " - spot needs the paramter '-N' - you should also specify some simplification options, e.g., '-N -R3 -r7'"
sys.exit(1)
# Alright? Then replace LTL-to-Bรผchi tool in XML file
replacedPath = False
for baseNode in xmlFile.childNodes:
if baseNode.nodeType==baseNode.ELEMENT_NODE:
if baseNode.nodeName<>"SynthesisProblem":
print "Error: XML file seems invalid. Found a top-level '"+baseNode.nodeName+"' node."
sys.exit(1)
for basicNode in baseNode.childNodes:
if basicNode.nodeType == basicNode.ELEMENT_NODE:
if basicNode.nodeName=="PathToLTLCompiler":
# Search for a text node
if len(basicNode.childNodes)<>1:
print "Error: The PathToLTLCompiler XML node should only contain a single piece of text (no comments, etc.)"
sys.exit(1)
if basicNode.childNodes[0].nodeType!=basicNode.TEXT_NODE:
print "Error: The PathToLTLCompiler XML node should only contain a single piece of text (no comments, etc.)"
sys.exit(1)
replacedPath = True
basicNode.childNodes[0].nodeValue = " ".join(sys.argv[2:])
# =====================================================
# Write XML to file
# =====================================================
print "Writing back xml file..."
try:
f = open(filename,'w')
except IOError:
print "Error: Could not open '"+filename+"' for writing. Probably the file has the wrong permissions?"
sys.exit(1)
try:
f.write(xmlFile.toxml())
except IOError:
print "Error: Writing to the XML file failed. Probably the disk is full?"
f.close()
sys.exit(0)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.