hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1111178ffeca17f97dbc94edf513c3e6554c30c4
| 5,179
|
py
|
Python
|
myfunc.py
|
dedo94/GUIGG
|
be4d6243ee9abcfaf42ab9aec6cd87f8e2149d4d
|
[
"MIT"
] | 1
|
2019-02-15T22:38:40.000Z
|
2019-02-15T22:38:40.000Z
|
myfunc.py
|
dedo94/GUIGG
|
be4d6243ee9abcfaf42ab9aec6cd87f8e2149d4d
|
[
"MIT"
] | null | null | null |
myfunc.py
|
dedo94/GUIGG
|
be4d6243ee9abcfaf42ab9aec6cd87f8e2149d4d
|
[
"MIT"
] | null | null | null |
import os
import platform
import sys
from os.path import relpath
sys.path.append('/usr/local/bin/dot')
sys.path.append('/usr/bin/dot')
from graphviz import Digraph
# struttura dati
class node:
def __init__(self, id, istruction, *nxt_node):
self.id = id
self.ist = istruction
self.next_node = []
for i in nxt_node:
self.next_node.append(i)
if str(istruction).count("->") == str(istruction).count(":") == 1: # se contiene le informazioni nel formato richiesto le separo
strz = str(istruction).split("->") # analizzo istruction e la divido in
self.snd = strz[0].strip() # mittente
strz = strz[1].split(":")
self.recv = strz[0].strip() # destinatario
self.msg = strz[1].strip() # messaggio
else:
self.snd = self.recv = self.msg = "null"
# definisce il path
def pathfy(filepath):
prgpath = os.path.dirname(os.path.abspath(__file__))
pathz = relpath(filepath, prgpath)
return pathz
# data una struttura dati ed un id, restituisce la sua posizione
def id_to_pos(str_gr, id_node): # data un struttura e un id
for x in range(str_gr.__len__()): # restituisce la posizione del
if str_gr[x].id == id_node: # nodo al suoi interno
return x # se presente
# data una struttura dati ed una istruzione, restituisce la posizione
def ist_to_pos(str_gr, ist): # data un struttura e un istruzione
for x in range(str_gr.__len__()): # restituisce la posizione del
if str_gr[x].ist == ist: # nodo al suoi interno
return x # se presente
# data una struttura dati ed una istruzione, restituisce il suo id
def ist_to_id(str_gr, ist): # data un struttura e un istruzione
for x in range(str_gr.__len__()): # restituisce l'id del
if str_gr[x].ist == ist: # nodo associato
return str_gr[x].id
# data una istruzione ed un numero di partenza, riassegna tutti gli id a partire dal numero dato
def reassign_id(str_gr, start_id):
new_str_gr = []
for el in range(str_gr.__len__()):
if str_gr[el].ist != "start": # se diversi da start e end
new_str_gr.append(node(int(str_gr[el].id) + start_id, str_gr[el].ist))
for ele in range(str_gr[el].next_node.__len__()):
new_str_gr[-1].next_node.append(int(str_gr[el].next_node[ele]) + start_id)
return new_str_gr
# data una struttura e una istruzione, restituisce il predecessore
def prec_node(graph, node_ist): # funzione che restituice i nodi che precedono quello dato
graph_gr = graph
pred = []
id_nodo = -1
for z in range(graph_gr.__len__()):
if node_ist == graph_gr[z].ist:
id_nodo = graph_gr[z].id
if id_nodo == -1:
print("Can't find node in that graph")
else:
for x in range(graph_gr.__len__()):
for y in range(graph_gr[x].next_node.__len__()):
if graph_gr[x].next_node[y] == id_nodo:
pred.append(graph_gr[x].id)
return pred
# restituisce l'id massimo contenuto in una struttura
def max_id(str_gr):
max = 0
for x in range(str_gr.__len__()):
if int(str_gr[x].id) > max:
max = int(str_gr[x].id)
max += 1
return max
# stampa una struttura
def print_str(struct_gr, space):
for k in range(struct_gr.__len__()):
if space == 1:
print("---")
print(struct_gr[k].id)
print(struct_gr[k].ist)
print(struct_gr[k].next_node)
# data una struttura ed un id restituisce la posizione
def find_pos(gr, id):
for el in range(gr.__len__()):
if int(gr[el].id) == int(id):
return el
| 43.158333
| 181
| 0.448542
| 576
| 5,179
| 3.814236
| 0.230903
| 0.056896
| 0.02731
| 0.032772
| 0.28721
| 0.208921
| 0.187528
| 0.17888
| 0.166591
| 0.166591
| 0
| 0.004012
| 0.470554
| 5,179
| 119
| 182
| 43.521008
| 0.797228
| 0.189033
| 0
| 0.126582
| 0
| 0
| 0.018461
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.126582
| false
| 0
| 0.063291
| 0
| 0.303797
| 0.075949
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1111834ed10ea00b3973a4e7b45b84a2fd41c455
| 2,466
|
py
|
Python
|
EllucianEthosPythonClient/ChangeNotificationUtils.py
|
rmetcalf9/EllucainEthosPythonClient
|
6913322b1e583f655f67399f2baa763833583c27
|
[
"MIT"
] | 1
|
2021-02-09T22:05:50.000Z
|
2021-02-09T22:05:50.000Z
|
EllucianEthosPythonClient/ChangeNotificationUtils.py
|
rmetcalf9/EllucainEthosPythonClient
|
6913322b1e583f655f67399f2baa763833583c27
|
[
"MIT"
] | 1
|
2020-07-02T11:44:54.000Z
|
2020-07-02T11:45:38.000Z
|
EllucianEthosPythonClient/ChangeNotificationUtils.py
|
rmetcalf9/EllucainEthosPythonClient
|
6913322b1e583f655f67399f2baa763833583c27
|
[
"MIT"
] | 1
|
2021-01-13T21:35:11.000Z
|
2021-01-13T21:35:11.000Z
|
from .ChangeNotificationMessage import ChangeNotificationMessage
import json
def requestBatchOfPagesAndReturnRemainingCountLib(
pageLimit,
lastProcessedID,
clientAPIInstance,
loginSession,
processIndividualMessage
):
params = {
"limit": str(pageLimit)
}
if lastProcessedID is not None:
params["lastProcessedID"] = lastProcessedID
result = clientAPIInstance.sendGetRequest(
url="/consume",
params=params,
loginSession=loginSession,
injectHeadersFn=None
)
if result.status_code != 200:
clientAPIInstance.raiseResponseException(result)
remainingMessages = int(result.headers["x-remaining"])
resultDict = json.loads(result.content)
for curResult in resultDict:
changeNotification = ChangeNotificationMessage(dict=curResult, clientAPIInstance=clientAPIInstance)
processIndividualMessage(changeNotification=changeNotification)
return remainingMessages
class ChangeNotificationIterator:
clientAPIInstance = None
loginSession = None
pageLimit = None
maxRequests = None
requestsRemaining = None
curIdx = None
curResultList = None
def __init__(self, loginSession, pageLimit, maxRequests, clientAPIInstance):
self.clientAPIInstance = clientAPIInstance
self.loginSession = loginSession
self.pageLimit = pageLimit
self.maxRequests = maxRequests
self.requestsRemaining = self.maxRequests
self.curIdx = 0
self.curResultList = []
def __iter__(self):
self.requestsRemaining = self.maxRequests
self.curIdx = 0
self.curResultList = []
return self
def loadNewPageOfResults(self):
self.curIdx = 0
self.curResultList = []
def processIndividualMessage(changeNotification):
self.curResultList.append(changeNotification)
requestBatchOfPagesAndReturnRemainingCountLib(
pageLimit=self.pageLimit,
clientAPIInstance=self.clientAPIInstance,
loginSession=self.loginSession,
processIndividualMessage=processIndividualMessage,
lastProcessedID=None
)
def __next__(self):
if self.curIdx >= len(self.curResultList):
if self.requestsRemaining==0:
raise StopIteration
self.requestsRemaining -= 1
self.loadNewPageOfResults()
if self.curIdx >= len(self.curResultList):
# We tried getting a new page but there are still not results
# so terminate
raise StopIteration
retVal = self.curResultList[self.curIdx]
self.curIdx += 1
return retVal
| 27.098901
| 103
| 0.744931
| 209
| 2,466
| 8.727273
| 0.344498
| 0.038377
| 0.018092
| 0.024671
| 0.123904
| 0.123904
| 0.070175
| 0.070175
| 0.070175
| 0
| 0
| 0.004467
| 0.182887
| 2,466
| 90
| 104
| 27.4
| 0.900744
| 0.029197
| 0
| 0.166667
| 0
| 0
| 0.016311
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.027778
| 0
| 0.263889
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1112b034f98c7dc617526ab156487a42f2db45b6
| 1,821
|
py
|
Python
|
schafkopf/players/models/evaluate_calssifier.py
|
Taschee/schafkopf
|
96c5b9199d9260b4fdd74de8a6e54805b407407b
|
[
"MIT"
] | 10
|
2018-07-30T14:02:25.000Z
|
2022-01-19T23:48:31.000Z
|
schafkopf/players/models/evaluate_calssifier.py
|
TimiH/schafkopf-1
|
deafaa28d6cba866d097b4347dd84ce37b3b594d
|
[
"MIT"
] | 1
|
2018-08-12T07:25:51.000Z
|
2018-08-27T21:04:04.000Z
|
schafkopf/players/models/evaluate_calssifier.py
|
Taschee/schafkopf
|
96c5b9199d9260b4fdd74de8a6e54805b407407b
|
[
"MIT"
] | 2
|
2019-01-23T10:02:57.000Z
|
2019-08-26T22:05:52.000Z
|
import keras
import numpy as np
from schafkopf.players.data.load_data import load_data_bidding
from schafkopf.players.data.encodings import decode_on_hot_hand
import matplotlib.pyplot as plt
x_test, y_test = load_data_bidding('../data/test_data.p')
x_train, y_train = load_data_bidding('../data/train_data.p')
modelpath = "bigger_classifier50.hdf5"
model = keras.models.load_model(modelpath)
predictions = model.predict_classes(x_test)
false_pred_list = []
pairs = [(i, j) for i in range(9) for j in range(9)]
false_counts = {pair: 0 for pair in pairs}
for pred, x, y in zip(predictions, x_test, y_test):
y_ind = np.where(y == 1)[0][0]
if pred != y_ind:
false_pred_list.append((pred, y_ind))
print('Predicted {} instead of {}'.format(pred, y_ind))
print('Hand : ', decode_on_hot_hand(x))
num_false = len(false_pred_list)
print('Number of false predictions : ', num_false)
for pair in false_pred_list:
false_counts[pair] += 1
fig, ax = plt.subplots(1, 1)
tick_labels = ['No game', 'Partner, bells', 'Partner, Leaves', 'Partner, Acorns',
'Wenz', 'Solo, Bells', 'Solo, Hearts', 'Solo, Leaves', 'Solo, Acorns']
for y_pred, y_true in pairs:
plt.scatter(y_pred, y_true, s=3*false_counts[(y_pred, y_true)], c='blue', alpha=0.6)
ax.set_xticks(np.arange(0, 9, 1))
ax.set_xticklabels(tick_labels, rotation='vertical', fontsize=11)
ax.set_yticks(np.arange(0, 9, 1))
ax.set_yticklabels(tick_labels, rotation='horizontal', fontsize=11)
ax.set_xlabel('Bidding network', fontsize=13)
ax.set_ylabel('Human player', fontsize=13)
ax.axis('equal')
plt.tight_layout()
plt.show()
test_scores = model.evaluate(x_test, y_test)
val_scores = model.evaluate(x_train, y_train)
print('Total Test accuracy : ', test_scores[1])
print('Total Train accuracy : ', val_scores[1])
| 29.852459
| 88
| 0.713344
| 298
| 1,821
| 4.137584
| 0.365772
| 0.024331
| 0.042174
| 0.024331
| 0.025953
| 0.025953
| 0.025953
| 0
| 0
| 0
| 0
| 0.019796
| 0.140033
| 1,821
| 60
| 89
| 30.35
| 0.767561
| 0
| 0
| 0
| 0
| 0
| 0.180617
| 0.013216
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.121951
| 0
| 0.121951
| 0.121951
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
111a060dfd860a5ffaba0f5cb789e1d77010aef4
| 1,742
|
py
|
Python
|
PyFlow/Packages/DepthAI_Device/Nodes/NeuralNetwork/NeuralNetworkNode.py
|
AsherVo/depthai-gui
|
f6d5da7c00f09239d07ff77dd2e4433d40e43633
|
[
"Apache-2.0"
] | 46
|
2021-01-05T13:41:54.000Z
|
2022-03-29T09:47:20.000Z
|
PyFlow/Packages/DepthAI_Device/Nodes/NeuralNetwork/NeuralNetworkNode.py
|
AsherVo/depthai-gui
|
f6d5da7c00f09239d07ff77dd2e4433d40e43633
|
[
"Apache-2.0"
] | 7
|
2021-01-29T22:26:05.000Z
|
2022-02-24T10:16:35.000Z
|
PyFlow/Packages/DepthAI_Device/Nodes/NeuralNetwork/NeuralNetworkNode.py
|
AsherVo/depthai-gui
|
f6d5da7c00f09239d07ff77dd2e4433d40e43633
|
[
"Apache-2.0"
] | 10
|
2021-03-11T15:00:40.000Z
|
2022-03-24T02:28:39.000Z
|
from pathlib import Path
from common import DeviceNode, get_property_value
from PyFlow.Core.Common import *
from PyFlow.Core.NodeBase import NodePinsSuggestionsHelper
class NeuralNetworkNode(DeviceNode):
def __init__(self, name):
super(NeuralNetworkNode, self).__init__(name)
self.input = self.createInputPin('in', 'AnyPin')
self.blob = self.createInputPin('blob', 'StringPin')
self.out = self.createOutputPin('out', 'NeuralTensorPin')
self.blob.setInputWidgetVariant("FilePathWidget")
self.input.enableOptions(PinOptions.AllowAny)
self.input.enableOptions(PinOptions.AllowMultipleConnections)
self.out.enableOptions(PinOptions.AllowMultipleConnections)
@staticmethod
def pinTypeHints():
helper = NodePinsSuggestionsHelper()
helper.addInputDataType('AnyPin')
helper.addInputDataType('StringPin')
helper.addOutputDataType('NeuralTensorPin')
helper.addInputStruct(StructureType.Multi)
helper.addOutputStruct(StructureType.Multi)
return helper
@staticmethod
def category():
return 'NeuralNetwork'
@staticmethod
def keywords():
return []
@staticmethod
def description():
return "Description in rst format."
def build_pipeline(self, pipeline):
detection_nn = pipeline.createNeuralNetwork()
path = get_property_value(self, "blob")
if path is None or len(path) == 0:
raise RuntimeError(f"Blob file path must be set in the {self.name} node")
detection_nn.setBlobPath(str(Path(path).resolve().absolute()))
self.connection_map["out"] = detection_nn.out
self.connection_map["in"] = detection_nn.input
| 35.55102
| 85
| 0.695178
| 171
| 1,742
| 6.97076
| 0.450292
| 0.050336
| 0.026846
| 0.053691
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.000723
| 0.206085
| 1,742
| 48
| 86
| 36.291667
| 0.861171
| 0
| 0
| 0.1
| 0
| 0
| 0.103904
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.15
| false
| 0
| 0.1
| 0.075
| 0.375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
111d1cfa9500d15ba56748062cc1aaac7850fbbb
| 1,800
|
py
|
Python
|
tests/unit/test_flask_app/test_boot.py
|
andersoncontreira/flask-skeleton-python
|
4a3087cf94f387830850dc438338251da86c3cfb
|
[
"MIT"
] | 1
|
2021-08-11T21:29:50.000Z
|
2021-08-11T21:29:50.000Z
|
tests/unit/test_flask_app/test_boot.py
|
andersoncontreira/flask-skeleton-python
|
4a3087cf94f387830850dc438338251da86c3cfb
|
[
"MIT"
] | null | null | null |
tests/unit/test_flask_app/test_boot.py
|
andersoncontreira/flask-skeleton-python
|
4a3087cf94f387830850dc438338251da86c3cfb
|
[
"MIT"
] | null | null | null |
import os
import unittest
from flask_app.boot import load_dot_env, reset, is_loaded, load_env
from tests.unit.testutils import BaseUnitTestCase, get_function_name
from unittest_data_provider import data_provider
def get_env():
return (None, True), ('dev', True), ('development', True), ('integration', True), ('staging', True), (
'production', True)
def get_load_dot_env():
return (None, True), ('dev', True), ('development', True), ('integration', False), ('staging', False), (
'production', False)
class BootTestCase(BaseUnitTestCase):
@data_provider(get_env)
def test_load_env(self, env, expected):
self.logger.info('Running test: %s - %s', get_function_name(__name__), env)
APP_TYPE = os.environ['APP_TYPE']
self.logger.info("APP_TYPE: {}".format(APP_TYPE))
if APP_TYPE == 'Chalice':
reset()
load_env(env)
self.assertEqual(is_loaded(), expected)
else:
self.skipTest('test_load_env - Ignored because the APP_TYPE {}'.format(APP_TYPE))
@data_provider(get_load_dot_env)
def test_load_dot_env(self, env, expected):
self.logger.info('Running test: %s - %s', get_function_name(__name__), env)
APP_TYPE = os.environ['APP_TYPE']
self.logger.info("APP_TYPE: {}".format(APP_TYPE))
if APP_TYPE == 'Flask':
# AWS Image condition
if 'ENVIRONMENT_NAME' in os.environ:
if env == os.environ['ENVIRONMENT_NAME']:
expected = True
reset()
load_dot_env(env)
self.assertEqual(is_loaded(), expected)
else:
self.skipTest('test_load_dot_env - Ignored because the APP_TYPE {}'.format(APP_TYPE))
if __name__ == '__main__':
unittest.main()
| 34.615385
| 108
| 0.632778
| 225
| 1,800
| 4.746667
| 0.257778
| 0.09176
| 0.05618
| 0.059925
| 0.533708
| 0.533708
| 0.531835
| 0.531835
| 0.531835
| 0.363296
| 0
| 0
| 0.235
| 1,800
| 51
| 109
| 35.294118
| 0.775599
| 0.010556
| 0
| 0.307692
| 0
| 0
| 0.177628
| 0
| 0
| 0
| 0
| 0
| 0.051282
| 1
| 0.102564
| false
| 0
| 0.128205
| 0.051282
| 0.307692
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
111dff9ff8122d79f8e2380fe35f3b04555c5059
| 805
|
py
|
Python
|
quadratic.py
|
Varanasi-Software-Junction/Python-repository-for-basics
|
01128ccb91866cb1abb6d8abf035213f722f5750
|
[
"MIT"
] | 2
|
2021-07-14T11:01:58.000Z
|
2021-07-14T11:02:01.000Z
|
quadratic.py
|
Maurya232Abhishek/Python-repository-for-basics
|
3dcec5c529a0847df07c9dcc1424675754ce6376
|
[
"MIT"
] | 4
|
2021-04-09T10:14:06.000Z
|
2021-04-13T10:25:58.000Z
|
quadratic.py
|
Maurya232Abhishek/Python-repository-for-basics
|
3dcec5c529a0847df07c9dcc1424675754ce6376
|
[
"MIT"
] | 2
|
2021-07-11T08:17:30.000Z
|
2021-07-14T11:10:58.000Z
|
def QuadraticRegression(px,py):
sumy = 0
sumx1= 0
sumx2= 0
sumx3 = 0
sumx4 = 0
sumxy = 0
sum2y = 0
n=len(px)
for i in range (n):
x = px[i]
y = py[i]
sumx1 += x
sumy += y
sumx2 += x*x
sumx3 += x*x*x
sumx4 += x*x*x*x
p = (n * sumxy - sumx1 * sumy) * (n * sumx3 - sumx2 * sumx1) - (n * sumx2 - sumx2 * sumy) * (n * sumx2 - sumx1 * sumx1)
q = (n * sumx3 - sumx1 * sumx2) * (n * sumx3 - sumx2 * sumx1) - (n * sumx4 - sumx2 * sumx2) * (n * sumx2 - sumx1 * sumx1)
c = (p/q)
b = ((n*sumxy - sumx1*sumy) - c*(n*sumx3 - sumx1*sumx2))/(n*sumx2 - sumx1*sumx1)
a = (sumy - b*sumx1 - c*sumx2)/n
return a,b,c
x=[0,1,2,3,4]
y=[1,1.8,1.3,2.5,6.3]
print(QuadraticRegression(x,y))
| 25.967742
| 129
| 0.468323
| 130
| 805
| 2.9
| 0.261538
| 0.03183
| 0.023873
| 0.127321
| 0.27321
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114943
| 0.351553
| 805
| 30
| 130
| 26.833333
| 0.60728
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038462
| false
| 0
| 0
| 0
| 0.076923
| 0.038462
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
111e4289c2fc2ba12c2caeb52c1314824fc19de1
| 5,591
|
py
|
Python
|
config/atcoder-tools/custom_code_generator.py
|
ay65535/dotfiles-sei40kr
|
32a930b0b3f08b15038c28f14e11b5f4ccf367fd
|
[
"MIT"
] | null | null | null |
config/atcoder-tools/custom_code_generator.py
|
ay65535/dotfiles-sei40kr
|
32a930b0b3f08b15038c28f14e11b5f4ccf367fd
|
[
"MIT"
] | null | null | null |
config/atcoder-tools/custom_code_generator.py
|
ay65535/dotfiles-sei40kr
|
32a930b0b3f08b15038c28f14e11b5f4ccf367fd
|
[
"MIT"
] | null | null | null |
from typing import Any, Dict, Optional
from atcodertools.codegen.code_style_config import CodeStyleConfig
from atcodertools.codegen.models.code_gen_args import CodeGenArgs
from atcodertools.codegen.template_engine import render
from atcodertools.fmtprediction.models.format import (Format, ParallelPattern,
Pattern, SingularPattern,
TwoDimensionalPattern)
from atcodertools.fmtprediction.models.type import Type
from atcodertools.fmtprediction.models.variable import Variable
class RustCodeGenerator:
def __init__(self,
format_: Optional[Format[Variable]],
config: CodeStyleConfig) -> None:
self._format = format_
self._config = config
def generate_parameters(self) -> Dict[str, Any]:
if self._format is None:
return dict(prediction_success=False)
return dict(formal_arguments=self._formal_arguments(),
actual_arguments=self._actual_arguments(),
input_part=self._input_part(),
prediction_success=True)
def _input_part(self):
lines = []
for pattern in self._format.sequence:
var = pattern.all_vars()[0]
if isinstance(pattern, SingularPattern):
lines.append(self._generate_value_type_annotation(var))
elif isinstance(pattern, ParallelPattern):
lines.append(self._generate_array_type_annotation(var))
elif isinstance(pattern, TwoDimensionalPattern):
lines.append(self._generate_2darray_type_annotation(var))
else:
raise NotImplementedError
lines.append('')
lines.append('input! {')
for pattern in self._format.sequence:
var = pattern.all_vars()[0]
input_indent = ' '
if isinstance(pattern, SingularPattern):
lines.append(input_indent + self._generate_value_input(var))
elif isinstance(pattern, ParallelPattern):
lines.append(input_indent + self._generate_array_input(var))
elif isinstance(pattern, TwoDimensionalPattern):
lines.append(input_indent + self._generate_2darray_input(var))
else:
raise NotImplementedError
lines.append('};')
return "\n".join(lines)
def _generate_value_type_annotation(self, var: Variable):
return 'let {name}: {type};'.format(
type=self._to_rust_type(var.type),
name=self._var_name(var.name, True))
def _generate_array_type_annotation(self, var: Variable):
return 'let {name}: Vec<{type}>;'.format(
name=self._var_name(var.name, False),
type=self._to_rust_type(var.type))
def _generate_2darray_type_annotation(self, var: Variable):
return 'let {name}: Vec<Vec<{type}>>;'.format(
name=self._var_name(var.name, False),
type=self._to_rust_type(var.type))
def _to_rust_type(self, type_: Type):
if type_ == Type.int:
return 'i64'
if type_ == Type.float:
return 'f64'
if type_ == Type.str:
return 'Vec<char>'
else:
raise NotImplementedError
def _generate_value_input(self, var: Variable):
return '{name}: {type},'.format(
name=self._var_name(var.name, True),
type=self._to_input_type(var.type))
def _generate_array_input(self, var: Variable):
length = var.first_index.get_length()
return '{name}: [{type}; {num_rows} as usize],'.format(
name=self._var_name(var.name, False),
type=self._to_input_type(var.type),
num_rows=self._var_name(str(length), True) if length.is_variable_node() else length)
def _generate_2darray_input(self, var: Variable):
second_length = var.second_index.get_length()
first_length = var.first_index.get_length()
return '{name}: [[{type}; {num_cols} as usize]; {num_rows} as usize],'.format(
name=self._var_name(var.name, False),
type=self._to_input_type(var.type),
num_cols=self._var_name(str(second_length), True) if second_length.is_variable_node() else second_length,
num_rows=self._var_name(str(first_length), True) if first_length.is_variable_node() else first_length)
def _to_input_type(self, type_: Type):
if type_ == Type.int:
return 'i64'
if type_ == Type.float:
return 'f64'
if type_ == Type.str:
return 'chars'
else:
raise NotImplementedError
def _var_name(self, name: str, singular: bool) -> str:
if singular:
return name.lower()
# `as` is a reserved word
if name == 'A':
return 'As'
return name.lower() + 's'
def _actual_arguments(self) -> str:
"""
:return always empty string
"""
return ''
def _formal_arguments(self) -> str:
"""
:return always empty string
"""
return ''
class NoPredictionResultGiven(Exception):
pass
def main(args: CodeGenArgs) -> str:
code_parameters = RustCodeGenerator(
args.format, args.config).generate_parameters()
return render(
args.template,
mod=args.constants.mod,
yes_str=args.constants.yes_str,
no_str=args.constants.no_str,
**code_parameters
)
| 34.94375
| 117
| 0.609909
| 618
| 5,591
| 5.247573
| 0.176375
| 0.034536
| 0.030527
| 0.027752
| 0.48967
| 0.458526
| 0.362936
| 0.272587
| 0.222017
| 0.19519
| 0
| 0.003518
| 0.288142
| 5,591
| 159
| 118
| 35.163522
| 0.811307
| 0.014309
| 0
| 0.319328
| 0
| 0
| 0.042522
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.12605
| false
| 0.008403
| 0.058824
| 0.033613
| 0.378151
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
111f8861f8a268462a9c22cdca35e2db764c3102
| 16,873
|
py
|
Python
|
crypto_futures_py/binance_futures.py
|
bear2u/CryptoFuturesPy
|
9cfbf5f3a32b35a8a7cd53c2a3ded55d7b3c78d0
|
[
"MIT"
] | 7
|
2020-08-23T19:02:33.000Z
|
2022-03-24T15:48:18.000Z
|
crypto_futures_py/binance_futures.py
|
bear2u/CryptoFuturesPy
|
9cfbf5f3a32b35a8a7cd53c2a3ded55d7b3c78d0
|
[
"MIT"
] | null | null | null |
crypto_futures_py/binance_futures.py
|
bear2u/CryptoFuturesPy
|
9cfbf5f3a32b35a8a7cd53c2a3ded55d7b3c78d0
|
[
"MIT"
] | 1
|
2021-09-15T04:17:04.000Z
|
2021-09-15T04:17:04.000Z
|
"""
This module contains an implementation for Binance Futures (BinanceFuturesExchangeHandler)
"""
from __future__ import annotations
import pandas as pd
import typing
import json
import logging
import pandas as pd
from datetime import datetime
from dataclasses import dataclass
from . import futurespy as fp
from . import AbstractExchangeHandler
class BinanceFuturesExchangeHandler(AbstractExchangeHandler):
exchange_information = fp.MarketData().exchange_info()
def __init__(self, public_key, private_key):
super().__init__(public_key, private_key)
self._client = fp.Client(
testnet=False, api_key=self._public_key, sec_key=self._private_key
)
self._orderId_dict = {}
self._clOrderId_dict = {}
self.logger = logging.Logger(__name__)
def get_symbols_data(self) -> typing.Dict[str, AbstractExchangeHandler.SymbolData]:
symbols_dict = {}
exchange_symbols_data = BinanceFuturesExchangeHandler.exchange_information[
"symbols"
]
for symbol_data in exchange_symbols_data:
min_volume = float(symbol_data["filters"][1]["minQty"])
max_volume = float(symbol_data["filters"][1]["maxQty"])
step_size = float(symbol_data["filters"][1]["stepSize"])
symbols_dict[symbol_data["symbol"]] = self.SymbolData(
min_volume=min_volume, max_volume=max_volume, step_size=step_size
)
return symbols_dict
def start_kline_socket(
self,
on_update: typing.Callable[[AbstractExchangeHandler.KlineCallback], None],
candle_type: str,
pair_name: str,
) -> None:
def _on_update(message):
candle = message["k"]
on_update(
self.KlineCallback(
time=pd.to_datetime(candle["t"], unit="ms"),
open=float(candle["o"]),
high=float(candle["h"]),
low=float(candle["l"]),
close=float(candle["c"]),
volume=float(candle["v"]),
final=candle["x"],
message=message,
)
)
ws = fp.WebsocketMarket(
symbol=pair_name,
on_message=lambda _, message: _on_update(message),
on_close=lambda _: self.start_kline_socket(
on_update, candle_type, pair_name
),
interval=candle_type,
)
ws.candle_socket()
def start_price_socket(
self,
on_update: typing.Callable[[AbstractExchangeHandler.PriceCallback], None],
pair_name: str,
) -> None:
def _on_update(message):
on_update(self.PriceCallback(float(message["p"])))
ws = fp.WebsocketMarket(
symbol=pair_name,
on_message=lambda _, message: _on_update(message),
on_close=lambda _: self.start_price_socket(on_update, pair_name),
)
ws.mark_price_socket()
def start_user_update_socket(
self, on_update: typing.Callable[[AbstractExchangeHandler.UserUpdate], None]
) -> None:
super().start_user_update_socket(on_update)
for data in self._client.balance():
on_update(self.BalanceUpdate(balance=data["balance"], symbol=data["asset"]))
for event in self._client.current_open_orders():
order_data = dict(
orderID=str(event["orderId"]),
client_orderID=str(event["clientOrderId"]),
status=event["status"],
symbol=event["symbol"],
price=float(event["price"]),
average_price=float(event["avgPrice"]),
fee=float(event["n"]) if "n" in event else 0,
fee_asset=event["N"] if "N" in event else "",
volume=float(event["origQty"]),
volume_realized=float(event["executedQty"]),
time=pd.to_datetime(event["time"], unit="ms"),
message=event,
)
self._register_order_data(order_data)
on_update(self.OrderUpdate(**order_data))
for position in self._client.position_info():
on_update(
self.PositionUpdate(
symbol=position["symbol"],
size=float(position["positionAmt"]),
value=float(position["positionAmt"])
* float(position["entryPrice"]),
entry_price=float(position["entryPrice"]),
liquidation_price=float(position["liquidationPrice"]),
)
)
def _on_update_recieved(message: typing.Dict[str, typing.Any]) -> None:
if message["e"] == "ACCOUNT_UPDATE":
for balance in message["a"]["B"]:
on_update(
self.BalanceUpdate(balance=balance["wb"], symbol=balance["a"])
)
for position in message["a"]["P"]:
on_update(
self.PositionUpdate(
symbol=position["s"],
size=float(position["pa"]),
value=float(position["pa"]) * float(position["ep"]),
entry_price=float(position["ep"]),
liquidation_price=float("nan"), # TODO
)
)
elif message["e"] == "ORDER_TRADE_UPDATE":
event = message["o"]
order_data = dict(
orderID=str(event["i"]),
client_orderID=str(event["c"]),
status=event["X"],
symbol=event["s"],
price=float(event["p"]),
average_price=float(event["ap"]),
fee=float(event["n"]) if "n" in event else 0,
fee_asset=event["N"] if "N" in event else "",
volume=float(event["q"]),
volume_realized=float(event["z"]),
time=pd.to_datetime(event["T"], unit="ms"),
message=message,
)
self._register_order_data(order_data)
on_update(self.OrderUpdate(**order_data))
self._client.user_update_socket(
on_message=lambda ws, message: _on_update_recieved(json.loads(message)),
on_close=lambda x: self.start_user_update_socket(on_update),
)
def _round_price(
self, symbol: str, price: typing.Optional[float]
) -> typing.Optional[float]:
for d in self.exchange_information["symbols"]:
if d["symbol"] == symbol:
price_precision = d["pricePrecision"]
break
else:
raise ValueError(f"{symbol} is not in exchange info")
return None if price is None else round(price, price_precision)
_T = typing.TypeVar("_T", float, None)
def _round_volume(self, symbol: str, volume: _T) -> _T:
for d in self.exchange_information["symbols"]:
if d["symbol"] == symbol:
quantity_precision = d["quantityPrecision"]
break
else:
raise ValueError(f"{symbol} is not in exchange info")
if (
not isinstance(volume, float)
and not isinstance(volume, int)
and volume is not None
):
raise ValueError
return (
None
if volume is None
else round(typing.cast(float, volume), quantity_precision)
)
@staticmethod
def get_pairs_list() -> typing.List[str]:
"""get_pairs_list Returns all available pairs on exchange
Returns:
typing.List[str]: The list of symbol strings
"""
return [
pair["symbol"]
for pair in BinanceFuturesExchangeHandler.exchange_information["symbols"]
]
async def load_historical_data(
self, symbol: str, candle_type: str, amount: int
) -> pd.DataFrame:
"""load_historical_data Loads historical klines from exchange
Args:
symbol (str): Pair name
candle_type (str): Exchange specific type of candles ("1m" for example)
amount (int): Number of klines to load
Returns:
pd.DataFrame: Dataframe with columns: Date, Open, High, Low, Close, Volume
"""
marketDataLoader = fp.MarketData(
symbol=symbol, interval=candle_type, testnet=False
)
data = marketDataLoader.load_historical_candles(count=amount).iloc[:-1]
data = data[["Date", "Open", "High", "Low", "Close", "Volume"]]
return data
async def create_order(
self,
symbol: str,
side: str,
price: typing.Optional[float],
volume: float,
client_ordID: typing.Optional[str] = None,
) -> AbstractExchangeHandler.NewOrderData:
"""create_order Place one limit or market order
Args:
symbol (str): Pair name, for which to place an order
side (str): "Buy" or "Sell"
price (typing.Optional[float]): If the price is set,
the price for limit order. Else - market order.
volume (float): The volume of the order
client_ordID (typing.Optional[str], optional): Client order_id.
Could be generated using generate_client_order_id(). Defaults to None.
Returns:
AbstractExchangeHandler.NewOrderData: Data of the resulting order.
"""
if client_ordID is None:
if price is not None:
result = self._client.new_order(
symbol=symbol,
side=side.upper(),
orderType="LIMIT",
quantity=self._round_volume(symbol, volume),
price=self._round_price(symbol, price),
timeInForce="GTX", # POST ONLY
)
else:
result = self._client.new_order(
symbol=symbol,
side=side.upper(),
quantity=self._round_volume(symbol, volume),
orderType="MARKET",
)
else:
self._user_update_pending(
client_ordID,
self._round_price(symbol, price),
self._round_volume(symbol, volume),
symbol,
side.upper(),
)
if price is not None:
result = self._client.new_order(
newClientOrderId=client_ordID,
symbol=symbol,
side=side.upper(),
orderType="LIMIT",
quantity=self._round_volume(symbol, volume),
price=self._round_price(symbol, price),
timeInForce="GTX", # POST ONLY
)
else:
result = self._client.new_order(
newClientOrderId=client_ordID,
symbol=symbol,
quantity=self._round_volume(symbol, volume),
side=side.upper(),
orderType="MARKET",
)
try:
return AbstractExchangeHandler.NewOrderData(
orderID=result["orderId"], client_orderID=result["clientOrderId"]
)
except:
if client_ordID is not None:
self._user_update_failed(client_ordID)
return AbstractExchangeHandler.NewOrderData(
orderID="", client_orderID=client_ordID
)
else:
raise
async def create_orders(
self,
symbol: str,
data: typing.List[typing.Tuple[str, float, float, typing.Optional[str]]],
) -> typing.List[AbstractExchangeHandler.NewOrderData]:
"""create_orders Create a lot of orders from one request (if the exchange supports it)
If the exchange does not support it, should create a parallel http requests, but it should be warned in docstring.
Args:
symbol (str): Pair name, for which to place orders
data (typing.List[typing.Tuple[str, float, float, typing.Optional[str]]]): The list of tuple params like in
create_order() - (side, price, volume, client_ordID), except price should not be None.
Returns:
typing.List[AbstractExchangeHandler.NewOrderData]: List of results
"""
orders: typing.List[typing.Dict[str, typing.Union[str, float]]] = [
{
"symbol": symbol,
"side": order_data[0].upper(),
"type": "LIMIT",
"quantity": self._round_volume(symbol, order_data[2]),
"price": typing.cast(float, self._round_price(symbol, order_data[1])),
# "timeInForce" : "GTX" # POST ONLY
}
if len(order_data) == 3 or order_data[3] is None
else {
"clOrdID": order_data[3],
"symbol": symbol,
"side": order_data[0].upper(),
"type": "LIMIT",
"quantity": self._round_volume(symbol, order_data[2]),
"price": typing.cast(float, self._round_price(symbol, order_data[1])),
# "timeInForce" : "GTX" # POST ONLY
}
for order_data in data
]
for order in orders:
self._user_update_pending(
client_orderID=str(order["clOrdID"]),
price=float(order["price"]),
volume=float(order["quantity"]),
symbol=str(order["symbol"]),
side=str(order["side"]),
)
results = []
orders_list = self._split_list(lst=orders, size=5)
for tmp_orders_list in orders_list:
results.append(self._client.place_multiple_orders(tmp_orders_list))
return [
AbstractExchangeHandler.NewOrderData(
orderID=result["orderID"], client_orderID=result["clOrdID"]
)
for result in results
]
async def cancel_order(
self,
order_id: typing.Optional[str] = None,
client_orderID: typing.Optional[str] = None,
) -> None:
"""cancel_order Cancel one order via order_id or client_orderID
Either order_id or client_orderID should be sent.
If both are sent, will use order_id.
Args:
order_id (typing.Optional[str], optional): Server's order id. Defaults to None.
client_orderID (typing.Optional[str], optional): Client's order id. Defaults to None.
"""
self._user_update_pending_cancel(
order_id=order_id, client_orderID=client_orderID
)
if order_id is not None and order_id in self._order_table_id:
self._client.cancel_order(
symbol=self._order_table_id[order_id]["symbol"], orderId=order_id
)
elif client_orderID is not None and client_orderID in self._order_table_clid:
self._client.cancel_order(
symbol=self._order_table_clid[client_orderID]["symbol"],
orderId=client_orderID,
clientID=True,
)
else:
raise ValueError(
"Either order_id of client_orderID should be sent, but both are None"
)
@staticmethod
def _split_list(lst, size):
return [lst[i : i + size] for i in range(0, len(lst), size)]
async def cancel_orders(self, orders: typing.List[str]) -> None:
"""cancel_orders Cancels a lot of orders in one requets
If the exchange does not support it, should create a parallel http requests, but it should be warned in docstring.
Args:
orders (typing.List[str]): The list of server's order_ids.
"""
for order_id in orders:
self._user_update_pending_cancel(order_id=order_id)
to_cancel_dict: typing.Dict[str, typing.List[str]] = {}
for order in orders:
order_symbol: str = self._order_table_id[order]["symbol"]
if order_symbol not in to_cancel_dict:
to_cancel_dict[order_symbol] = []
to_cancel_dict[order_symbol].append(order)
results = []
for symbol in to_cancel_dict.keys():
tmp_list = self._split_list(to_cancel_dict[symbol], 10)
for lst in tmp_list:
result = self._client.cancel_multiple_orders(
symbol=symbol, orderIdList=lst
)
results.append(result)
| 37.412417
| 122
| 0.551295
| 1,759
| 16,873
| 5.08357
| 0.159181
| 0.018788
| 0.010736
| 0.016439
| 0.393872
| 0.327108
| 0.264147
| 0.245694
| 0.229143
| 0.194476
| 0
| 0.001819
| 0.348367
| 16,873
| 450
| 123
| 37.495556
| 0.81146
| 0.017543
| 0
| 0.319527
| 0
| 0
| 0.048647
| 0
| 0
| 0
| 0
| 0.002222
| 0
| 1
| 0.035503
| false
| 0
| 0.029586
| 0.002959
| 0.100592
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
11207abb79cc5820b7575449526dbb965b477f82
| 1,971
|
py
|
Python
|
repsim/kernels/kernel_base.py
|
wrongu/representational-similarity
|
adca614053973def176044437e6a064c04943ce0
|
[
"MIT"
] | 2
|
2022-03-23T21:24:21.000Z
|
2022-03-24T04:18:30.000Z
|
repsim/kernels/kernel_base.py
|
wrongu/representational-similarity
|
adca614053973def176044437e6a064c04943ce0
|
[
"MIT"
] | 3
|
2022-03-23T19:35:58.000Z
|
2022-03-24T04:20:29.000Z
|
repsim/kernels/kernel_base.py
|
wrongu/representational-similarity
|
adca614053973def176044437e6a064c04943ce0
|
[
"MIT"
] | 1
|
2022-03-23T19:16:19.000Z
|
2022-03-23T19:16:19.000Z
|
import torch
from typing import Union, Iterable
def center(k: torch.Tensor) -> torch.Tensor:
"""Center features of a kernel by pre- and post-multiplying by the centering matrix H.
In other words, if k_ij is dot(x_i, x_j), the result will be dot(x_i - mu_x, x_j - mu_x).
:param k: a n by n Gram matrix of inner products between xs
:return: a n by n centered matrix
"""
n = k.size()[0]
if k.size() != (n, n):
raise ValueError(
f"Expected k to be nxn square matrix, but it has size {k.size()}"
)
H = (
torch.eye(n, device=k.device, dtype=k.dtype)
- torch.ones((n, n), device=k.device, dtype=k.dtype) / n
)
return H @ k @ H
class Kernel(object):
def __call__(
self, x: torch.Tensor, y: Union[None, torch.Tensor] = None
) -> torch.Tensor:
if y is None:
y = x
if x.size()[0] != y.size()[0]:
raise ValueError("Mismatch in first dimension of x and y")
return self._call_impl(x, y)
def _call_impl(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
raise NotImplementedError("Kernel._call_impl must be implemented by a subclass")
def string_id(self):
raise NotImplementedError("Kernel.name must be implemented by a subclass")
class SumKernel(Kernel):
def __init__(self, kernels: Iterable[Kernel], weights=None):
super(SumKernel, self).__init__()
self.kernels = list(kernels)
self.weights = (
torch.tensor(weights)
if weights is not None
else torch.ones(len(self.kernels))
)
def _call_impl(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
tot = self.weights[0] * self.kernels[0](x, y)
for w, k in zip(self.weights[1:], self.kernels[1:]):
tot += k(x, y) * w
return tot
def string_id(self):
return f"SumKernel[{'+'.join(k.string_id() for k in self.kernels)}]"
| 31.790323
| 93
| 0.598681
| 297
| 1,971
| 3.872054
| 0.316498
| 0.114783
| 0.041739
| 0.057391
| 0.193913
| 0.17913
| 0.130435
| 0.086957
| 0.086957
| 0.086957
| 0
| 0.004899
| 0.274987
| 1,971
| 61
| 94
| 32.311475
| 0.79986
| 0.136479
| 0
| 0.095238
| 0
| 0
| 0.151281
| 0.019655
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.047619
| 0.02381
| 0.357143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
11207edc04bb4169510f36f55f71d608452b5ac2
| 6,253
|
py
|
Python
|
add_sensors.py
|
gve-sw/gve_devnet_meraki_sensor_deployment
|
7add073bf3e2728f811ea8f5da80c138e3067af7
|
[
"RSA-MD"
] | null | null | null |
add_sensors.py
|
gve-sw/gve_devnet_meraki_sensor_deployment
|
7add073bf3e2728f811ea8f5da80c138e3067af7
|
[
"RSA-MD"
] | null | null | null |
add_sensors.py
|
gve-sw/gve_devnet_meraki_sensor_deployment
|
7add073bf3e2728f811ea8f5da80c138e3067af7
|
[
"RSA-MD"
] | null | null | null |
#!/usr/bin/env python3
"""Copyright (c) 2020 Cisco and/or its affiliates.
This software is licensed to you under the terms of the Cisco Sample
Code License, Version 1.1 (the "License"). You may obtain a copy of the
License at
https://developer.cisco.com/docs/licenses
All use of the material herein must be in accordance with the terms of
the License. All rights not expressly granted by the License are
reserved. Unless required by applicable law or agreed to separately in
writing, software distributed under the License is distributed on an "AS
IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
or implied."""
import csv
import sys
from meraki_functions import *
from env import *
sensors_file = open("sensors.csv", "r")
csv_reader = csv.DictReader(sensors_file)
networks = {} #dictionary will map network names to network ids
networks_to_organizations = {} #dictionary will map networks to their organizations
for row in csv_reader:
org_id = getOrgID(base_url, headers, row["organization"])
if org_id is None:
print("No organization exists with the name".format(row["organization"]))
sys.exit(1)
net_id = getNetworkID(base_url, headers, org_id, row["network"])
if net_id is None:
print("No network exists in the organization with ID {} with the name {}".format(org_id, row["network"]))
sys.exit(1)
networks[row["network"]] = net_id #the key row["network"] is the network name
networks_to_organizations[net_id] = org_id
serial = row["serial"]
status_code = claimDevicesToNetwork(base_url, headers, net_id, [serial])
if status_code != 200:
print("{} Error".format(status_code))
print("There was an error adding the device: {} to the network with ID {}.".format(serial, net_id))
sys.exit(1)
sensor_details = {
"name": row["name"],
"address": row["location"]
}
status_code = editDeviceDetails(base_url, headers, serial, sensor_details)
if status_code != 200:
print("{} Error".format(status_code))
print("There was an error editing the device {} with these details: {}".format(serial, sensor_details))
sys.exit(1)
print("Sensor {} was added to network {}".format(serial, row["network"]))
sensors_file.close()
sensor_profile_file = open("sensors_to_profiles.csv", "r")
csv_reader = csv.DictReader(sensor_profile_file)
sensors_to_profiles = {} #dictionary will map the sensors to the alert profiles they need
for row in csv_reader:
alert_profile = row["alert_profile"]
serial = row["sensor_serial"]
if alert_profile in sensors_to_profiles.keys(): #we've already added this alert profile to the dictionary, so we just add another sensor the list
sensors_to_profiles[alert_profile].append(serial)
else: #we haven't yet added this alert profile to the dictionary, so we create a new alert profile key and assign it a value of a new list with this serial number as the first value
sensors_to_profiles[alert_profile] = [serial]
sensor_profile_file.close()
alert_recipients_file = open("alert_recipients.csv", "r")
csv_reader = csv.DictReader(alert_recipients_file)
profiles_to_recipients = {} #dictionary will map the alert profiles to the alert recipients for that profile - this will be a nested dictionary
'''
Data structure example
profiles_to_recipients = {
"network name": {
"alert profile": ["email", "email", "email"],
"alert profile": ["email", "email", "email"]
}
}
'''
for row in csv_reader:
profile_name = row["alert_profile"]
net_name = row["network"]
recipient = row["email"] #the recipient is defined by an email address
if net_name in profiles_to_recipients.keys(): #we've already added this network to the dictionary, so we need to check if the alert profile has also already been seen
if profile_name in profiles_to_recipients[net_name].keys(): #we've already added this alert profile to the dictionary, so we just need to add the recipient to the list
profiles_to_recipients[net_name][profile_name].append(recipient)
else: #we haven't yet seen this alert profile, so we need to add a new key to the dictionary that is the profile name and assign it a value of a new list with this recipient as the first value
profiles_to_recipients[net_name][profile_name] = [recipient]
else: #we haven't yet added this network to the dictionary, so we need to add a new key to the dictionary that is the network name and assign it a value of a dictionary with a key of the alert profile name with the value of a new list with this recipient as the first value
profiles_to_recipients[net_name] = {
profile_name: [recipient]
}
alert_recipients_file.close()
alert_profile_file = open("alert_profiles.csv", "r")
csv_reader = csv.DictReader(alert_profile_file)
for row in csv_reader:
temp_threshold = row["temp_threshold"]
temp_duration = row["temp_duration"]
profile_name = row["name"]
net_name = row["network"]
net_id = networks[net_name]
org_id = networks_to_organizations[net_id]
serials = sensors_to_profiles[profile_name]
alert_profile_details = {
"name": profile_name,
"scheduleId": "",
"conditions": [
{
"type": "temperature",
"unit": "fahrenheit",
"direction": "+",
"threshold": temp_threshold,
"duration": temp_duration
}
],
"recipients": {
"emails": profiles_to_recipients[net_name][profile_name],
"snmp": False,
"allAdmins": False,
"smsNumbers": [],
"httpServerIds": [],
"pushUserIds": []
},
"serials": serials
}
status_code = createAlertProfile(base_url, headers, net_id, alert_profile_details)
if status_code != 201:
print("Error {}".format(status_code))
print("There was an issue creating the alert profile: {} to the network with ID {}".format(alert_profile_details, net_id))
print("Alert profile {} was added to the network {}".format(profile_name, net_name))
alert_profile_file.close()
| 40.869281
| 277
| 0.689909
| 876
| 6,253
| 4.767123
| 0.22032
| 0.066092
| 0.038314
| 0.020354
| 0.342433
| 0.255508
| 0.228448
| 0.178161
| 0.171456
| 0.152059
| 0
| 0.004072
| 0.214457
| 6,253
| 152
| 278
| 41.138158
| 0.846091
| 0.31185
| 0
| 0.166667
| 0
| 0
| 0.196285
| 0.005622
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.039216
| 0
| 0.039216
| 0.098039
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1120e9e47c16ba9929729ce5750b83aea2535437
| 663
|
py
|
Python
|
BasicOperations/01_01_PyQt4/tableDoubleClicked.py
|
UpSea/midProjects
|
ed6086e74f68b1b89f725abe0b270e67cf8993a8
|
[
"MIT"
] | 1
|
2018-07-02T13:54:49.000Z
|
2018-07-02T13:54:49.000Z
|
BasicOperations/01_01_PyQt4/tableDoubleClicked.py
|
UpSea/midProjects
|
ed6086e74f68b1b89f725abe0b270e67cf8993a8
|
[
"MIT"
] | null | null | null |
BasicOperations/01_01_PyQt4/tableDoubleClicked.py
|
UpSea/midProjects
|
ed6086e74f68b1b89f725abe0b270e67cf8993a8
|
[
"MIT"
] | 3
|
2016-05-28T15:13:02.000Z
|
2021-04-10T06:04:25.000Z
|
from PyQt4.QtGui import *
from PyQt4.QtCore import *
class MyTabView(QTableView):
def __init__(self, parent=None):
super(MyTabView, self).__init__(parent)
self.model = QStandardItemModel(4, 2)
self.setModel(self.model)
def mouseDoubleClickEvent(self, event):
QTableView.mouseDoubleClickEvent(self, event)
pos = event.pos()
item = self.indexAt(pos)
if item:
print ("item clicked at ", item.row(), " ", item.column())
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
w = QWidget()
w.resize(1024, 768)
v = MyTabView(w)
w.show()
app.exec_()
| 25.5
| 70
| 0.618401
| 77
| 663
| 5.103896
| 0.571429
| 0.045802
| 0.152672
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022267
| 0.254902
| 663
| 26
| 71
| 25.5
| 0.773279
| 0
| 0
| 0
| 0
| 0
| 0.037651
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095238
| false
| 0
| 0.142857
| 0
| 0.285714
| 0.047619
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1121313e46cf1f2e2cb2bc3065f395b37613a84b
| 19,860
|
py
|
Python
|
legacy/glucose-insulin.py
|
IllumiNate411/SBINNs
|
37e68ce97a997090d17a3d487de77aa9059bfc91
|
[
"Apache-2.0"
] | 23
|
2020-07-15T07:41:15.000Z
|
2022-02-10T23:09:03.000Z
|
legacy/glucose-insulin.py
|
IllumiNate411/SBINNs
|
37e68ce97a997090d17a3d487de77aa9059bfc91
|
[
"Apache-2.0"
] | 2
|
2021-06-20T20:41:52.000Z
|
2022-02-09T19:26:10.000Z
|
legacy/glucose-insulin.py
|
IllumiNate411/SBINNs
|
37e68ce97a997090d17a3d487de77aa9059bfc91
|
[
"Apache-2.0"
] | 21
|
2020-07-15T07:41:17.000Z
|
2022-03-03T12:01:37.000Z
|
import tensorflow as tf
import numpy as np
from scipy.integrate import odeint
import matplotlib.pyplot as plt
from plotting import newfig, savefig
import matplotlib.gridspec as gridspec
import seaborn as sns
import time
from utilities import neural_net, fwd_gradients, heaviside, \
tf_session, mean_squared_error, relative_error
class HiddenPathways:
# Initialize the class
def __init__(self, t_data, S_data, t_eqns, layers, meal_tq):
self.D = S_data.shape[1]
self.t_min = t_data.min(0)
self.t_max = t_data.max(0)
# self.S_scale = tf.Variable(np.array(self.D*[1.0]), dtype=tf.float32, trainable=False)
self.S_scale = S_data.std(0)
# data on all the species (only some are used as input)
self.t_data, self.S_data = t_data, S_data
self.t_eqns = t_eqns
# layers
self.layers = layers
self.mt = 2.0*(meal_tq[0] - self.t_min)/(self.t_max - self.t_min) - 1.0
self.mq = meal_tq[1]
# self.k = tf.Variable(1.0/120.0, dtype=tf.float32, trainable=False)
self.Rm = tf.Variable(209.0/100.0, dtype=tf.float32, trainable=False)
self.Vg = tf.Variable(10.0, dtype=tf.float32, trainable=False)
self.C1 = tf.Variable(300.0/100.0, dtype=tf.float32, trainable=False)
self.a1 = tf.Variable(6.6, dtype=tf.float32, trainable=False)
# self.Ub = tf.Variable(72.0/100.0, dtype=tf.float32, trainable=False)
# self.C2 = tf.Variable(144.0/100.0, dtype=tf.float32, trainable=False)
# self.U0 = tf.Variable(4.0/100.0, dtype=tf.float32, trainable=False)
# self.Um = tf.Variable(90.0/100.0, dtype=tf.float32, trainable=False)
# self.C3 = tf.Variable(100.0/100.0, dtype=tf.float32, trainable=False)
# self.C4 = tf.Variable(80.0/100.0, dtype=tf.float32, trainable=False)
self.Vi = tf.Variable(11.0, dtype=tf.float32, trainable=False)
self.E = tf.Variable(0.2, dtype=tf.float32, trainable=False)
self.ti = tf.Variable(100.0, dtype=tf.float32, trainable=False)
# self.beta = tf.Variable(1.772, dtype=tf.float32, trainable=False)
# self.Rg = tf.Variable(180.0/100.0, dtype=tf.float32, trainable=False)
# self.alpha = tf.Variable(7.5, dtype=tf.float32, trainable=False)
self.Vp = tf.Variable(3.0, dtype=tf.float32, trainable=False)
# self.C5 = tf.Variable(26.0/100.0, dtype=tf.float32, trainable=False)
self.tp = tf.Variable(6.0, dtype=tf.float32, trainable=False)
# self.td = tf.Variable(12.0, dtype=tf.float32, trainable=False)
self.logk = tf.Variable(-6.0, dtype=tf.float32, trainable=True)
# self.logRm = tf.Variable(0.0, dtype=tf.float32, trainable=True)
# self.logVg = tf.Variable(0.0, dtype=tf.float32, trainable=True)
# self.logC1 = tf.Variable(0.0, dtype=tf.float32, trainable=True)
# self.loga1 = tf.Variable(0.0, dtype=tf.float32, trainable=True)
self.logUb = tf.Variable(0.0, dtype=tf.float32, trainable=True)
self.logC2 = tf.Variable(0.0, dtype=tf.float32, trainable=True)
self.logU0 = tf.Variable(0.0, dtype=tf.float32, trainable=True)
self.logUm = tf.Variable(0.0, dtype=tf.float32, trainable=True)
self.logC3 = tf.Variable(0.0, dtype=tf.float32, trainable=True)
self.logC4 = tf.Variable(0.0, dtype=tf.float32, trainable=True)
# self.logVi = tf.Variable(0.0, dtype=tf.float32, trainable=True)
# self.logE = tf.Variable(0.0, dtype=tf.float32, trainable=True)
# self.logti = tf.Variable(0.0, dtype=tf.float32, trainable=True)
self.logbeta = tf.Variable(0.0, dtype=tf.float32, trainable=True)
self.logRg = tf.Variable(0.0, dtype=tf.float32, trainable=True)
self.logalpha = tf.Variable(0.0, dtype=tf.float32, trainable=True)
# self.logVp = tf.Variable(0.0, dtype=tf.float32, trainable=True)
self.logC5 = tf.Variable(0.0, dtype=tf.float32, trainable=True)
# self.logtp = tf.Variable(0.0, dtype=tf.float32, trainable=True)
self.logtd = tf.Variable(0.0, dtype=tf.float32, trainable=True)
self.var_list_eqns = [self.logk, self.logUb,
self.logC2, self.logU0, self.logUm, self.logC3, self.logC4,
self.logbeta, self.logRg, self.logalpha, self.logC5,
self.logtd]
self.k = tf.exp(self.logk)
# self.Rm = tf.exp(self.logRm)
# self.Vg = tf.exp(self.logVg)
# self.C1 = tf.exp(self.logC1)
# self.a1 = tf.exp(self.loga1)
self.Ub = tf.exp(self.logUb)
self.C2 = tf.exp(self.logC2)
self.U0 = tf.exp(self.logU0)
self.Um = tf.exp(self.logUm)
self.C3 = tf.exp(self.logC3)
self.C4 = tf.exp(self.logC4)
# self.Vi = tf.exp(self.logVi)
# self.E = tf.exp(self.logE)
# self.ti = tf.exp(self.logti)
self.beta = tf.exp(self.logbeta)
self.Rg = tf.exp(self.logRg)
self.alpha = tf.exp(self.logalpha)
# self.Vp = tf.exp(self.logVp)
self.C5 = tf.exp(self.logC5)
# self.tp = tf.exp(self.logtp)
self.td = tf.exp(self.logtd)
# placeholders for data
self.t_data_tf = tf.placeholder(tf.float32, shape=[None, 1])
self.S_data_tf = tf.placeholder(tf.float32, shape=[None, self.D])
self.t_eqns_tf = tf.placeholder(tf.float32, shape=[None, 1])
self.mt_tf = tf.placeholder(tf.float32, shape=[None, self.mt.shape[1]])
self.mq_tf = tf.placeholder(tf.float32, shape=[None, self.mq.shape[1]])
self.learning_rate = tf.placeholder(tf.float32, shape=[])
# physics uninformed neural networks
self.net_sysbio = neural_net(layers=self.layers)
self.H_data = 2.0*(self.t_data_tf - self.t_min)/(self.t_max - self.t_min) - 1.0
self.S_data_pred = self.S_data[0,:] + self.S_scale*(self.H_data+1.0)*self.net_sysbio(self.H_data)
# physics informed neural networks
self.H_eqns = 2.0*(self.t_eqns_tf - self.t_min)/(self.t_max - self.t_min) - 1.0
self.S_eqns_pred = self.S_data[0,:] + self.S_scale*(self.H_eqns+1.0)*self.net_sysbio(self.H_eqns)
self.E_eqns_pred, self.IG = self.SysODE(self.S_eqns_pred, self.t_eqns_tf,
self.H_eqns, self.mt_tf, self.mq_tf)
# Adaptive S_scale
# self.S_scale = 0.9*self.S_scale + 0.1*tf.math.reduce_std(self.S_eqns_pred, 0)
# scale_list = tf.unstack(self.S_scale)
# scale_list[2] = self.S_data.std(0)[2]
# self.S_scale = tf.stack(scale_list)
# loss
self.loss_data = mean_squared_error(self.S_data_tf[:,2:3]/self.S_scale[2:3], self.S_data_pred[:,2:3]/self.S_scale[2:3])
self.loss_eqns = mean_squared_error(0.0, self.E_eqns_pred/self.S_scale)
self.loss_auxl = mean_squared_error(self.S_data_tf[-1,:]/self.S_scale, self.S_data_pred[-1,:]/self.S_scale)
self.loss = 0.99*self.loss_data + 0.01*self.loss_eqns + 0.01*self.loss_auxl
# optimizers
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
self.optimizer_para = tf.train.AdamOptimizer(learning_rate=0.001)
self.train_op = self.optimizer.minimize(self.loss,
var_list=[self.net_sysbio.weights,
self.net_sysbio.biases,
self.net_sysbio.gammas])
self.trainpara_op = self.optimizer_para.minimize(self.loss,
var_list=self.var_list_eqns)
self.sess = tf_session()
def SysODE(self, S, t, H, mt, mq):
intake = self.k * mq * heaviside(H-mt) * tf.exp(self.k*(mt-H)*(self.t_max-self.t_min)/2.0)
IG = tf.reduce_sum(intake, axis=1, keepdims=True)
kappa = 1.0/self.Vi + 1.0/(self.E*self.ti)
f1 = self.Rm * tf.sigmoid(S[:,2:3]/(self.Vg*self.C1) - self.a1)
f2 = self.Ub * (1.0 - tf.exp(-S[:,2:3]/(self.Vg*self.C2)))
safe_log = tf.where(S[:,1:2] <= 0.0, tf.ones_like(S[:,1:2]), S[:,1:2])
f3 = (self.U0 + self.Um*tf.sigmoid(self.beta*tf.log(kappa*safe_log/self.C4))) / (self.Vg*self.C3)
f4 = self.Rg * tf.sigmoid(-self.alpha*(S[:,5:6]/(self.Vp*self.C5)-1.0))
F0 = f1 - self.E*(S[:,0:1]/self.Vp-S[:,1:2]/self.Vi) - S[:,0:1]/self.tp
F1 = self.E*(S[:,0:1]/self.Vp-S[:,1:2]/self.Vi) - S[:,1:2]/self.ti
F2 = f4 + IG - f2 - f3*S[:,2:3]
F3 = (S[:,0:1] - S[:,3:4]) / self.td
F4 = (S[:,3:4] - S[:,4:5]) / self.td
F5 = (S[:,4:5] - S[:,5:6]) / self.td
F = tf.concat([F0, F1, F2, F3, F4, F5], 1)
S_t = fwd_gradients(S, t)
E = S_t - F
return E, IG
def train(self, num_epochs, batch_size, learning_rate):
N_data = self.t_data.shape[0]
N_eqns = self.t_eqns.shape[0]
for epoch in range(num_epochs):
start_time = time.time()
for it in range(N_eqns//batch_size):
idx_data = np.concatenate([np.array([0]),
np.random.choice(np.arange(1, N_data-1), min(batch_size, N_data)-2),
np.array([N_data-1])])
idx_eqns = np.random.choice(N_eqns, batch_size)
t_data_batch, S_data_batch = self.t_data[idx_data,:], self.S_data[idx_data,:]
t_eqns_batch = self.t_eqns[idx_eqns,:]
mt_batch, mq_batch = self.mt[idx_eqns,:], self.mq[idx_eqns,:]
tf_dict = {self.t_data_tf: t_data_batch,
self.S_data_tf: S_data_batch,
self.t_eqns_tf: t_eqns_batch,
self.mt_tf: mt_batch, self.mq_tf: mq_batch,
self.learning_rate: learning_rate}
self.sess.run([self.train_op, self.trainpara_op], tf_dict)
# Print
if it % 10 == 0:
elapsed = time.time() - start_time
[loss_data_value,
loss_eqns_value,
loss_auxl_value,
learning_rate_value] = self.sess.run([self.loss_data,
self.loss_eqns,
self.loss_auxl,
self.learning_rate], tf_dict)
print('Epoch: %d, It: %d, Loss Data: %.3e, Loss Eqns: %.3e, Loss Aux: %.3e, Time: %.3f, Learning Rate: %.1e'
%(epoch, it, loss_data_value, loss_eqns_value, loss_auxl_value, elapsed, learning_rate_value))
start_time = time.time()
def predict(self, t_star, meal_tq):
meal_tq[0] = 2.0*(meal_tq[0] - self.t_min)/(self.t_max - self.t_min) - 1.0
tf_dict = {self.t_eqns_tf: t_star,
self.mt_tf: meal_tq[0], self.mq_tf: meal_tq[1]}
S_star, IG = self.sess.run([self.S_eqns_pred, self.IG], tf_dict)
S_star = np.append(S_star[:,:], IG[:], axis=1)
return S_star
if __name__ == "__main__":
layers = [1] + 6*[6*30] + [6]
meal_t = [300., 650., 1100., 2000.]
meal_q = [60e3, 40e3, 50e3, 100e3]
def intake(tn, k):
def s(mjtj):
return k*mjtj[1]*np.heaviside(tn-mjtj[0], 0.5)*np.exp(k*(mjtj[0]-tn))
IG = np.array([s(mjtj) for mjtj in list(zip(meal_t, meal_q))]).sum()
return IG
# function that returns dx/dt
def f(x, t): # x is 6 x 1
k = 1./120.
Rm = 209.
Vg = 10.
C1 = 300.
a1 = 6.6
Ub = 72.
C2 = 144.
U0 = 4.
Um = 90.
C3 = 100.
C4 = 80.
Vi = 11.
E = 0.2
ti = 100.
beta = 1.772
Rg = 180.
alpha = 7.5
Vp = 3.
C5 = 26.
tp = 6.
td = 12.
kappa = 1.0/Vi + 1.0/E/ti
f1 = Rm / (1.0 + np.exp(-x[2]/Vg/C1 + a1))
f2 = Ub * (1.0 - np.exp(-x[2]/Vg/C2))
f3 = (U0 + Um/(1.0+np.exp(-beta*np.log(kappa*x[1]/C4)))) / Vg / C3
f4 = Rg / (1.0 + np.exp(alpha*(x[5]/Vp/C5-1.0)))
x0 = f1 - E*(x[0]/Vp-x[1]/Vi) - x[0]/tp
x1 = E*(x[0]/Vp-x[1]/Vi) - x[1]/ti
x2 = f4 + intake(t, k) - f2 - f3*x[2]
x3 = (x[0] - x[3]) / td
x4 = (x[3] - x[4]) / td
x5 = (x[4] - x[5]) / td
X = np.array([x0, x1, x2, x3, x4, x5])
return X
# function that returns dx/dt
def f_pred(x, t): # x is 6 x 1
k = 0.007751
Rm = 73.858517
Vg = 10.000000
C1 = 319.160032
a1 = 6.253946
Ub = 86.824888
C2 = 152.637362
U0 = 19.412358
Um = 141.051173
C3 = 235.955381
C4 = 251.580667
Vi = 2.689281
E = 0.147199
ti = 36.766254
beta = 2.475349
Rg = 212.777472
alpha = 7.182466
Vp = 0.707807
C5 = 101.811242
tp = 139.384628
td = 7.417875
kappa = 1.0/Vi + 1.0/E/ti
f1 = Rm / (1.0 + np.exp(-x[2]/Vg/C1 + a1))
f2 = Ub * (1.0 - np.exp(-x[2]/Vg/C2))
f3 = (U0 + Um/(1.0+np.exp(-beta*np.log(kappa*x[1]/C4)))) / Vg / C3
f4 = Rg / (1.0 + np.exp(alpha*(x[5]/Vp/C5-1.0)))
x0 = f1 - E*(x[0]/Vp-x[1]/Vi) - x[0]/tp
x1 = E*(x[0]/Vp-x[1]/Vi) - x[1]/ti
x2 = f4 + intake(t, k) - f2 - f3*x[2]
x3 = (x[0] - x[3]) / td
x4 = (x[3] - x[4]) / td
x5 = (x[4] - x[5]) / td
X = np.array([x0, x1, x2, x3, x4, x5])
return X
def plotting(t_star, S_star, S_pred, perm, Vg2, forecast=False):
sns.set()
fig, ax = newfig(2.0, 0.7)
gs0 = gridspec.GridSpec(1, 1)
gs0.update(top=0.9, bottom=0.15, left=0.1, right=0.95, hspace=0.3, wspace=0.3)
ax = plt.subplot(gs0[0:1, 0:1])
ax.plot(t_star,S_star[:,2],'C1',linewidth=2,label='input data')
ax.scatter(t_star[perm],S_star[perm,2],marker='o',s=40,label='sampled input')
ax.set_xlabel('$t\ (min)$', fontsize=18)
ax.set_ylabel('$G\ (mg/dl) $', fontsize=18)
ax.legend(fontsize='large')
####################################
fig, ax = newfig(1.8, 0.75)
gs1 = gridspec.GridSpec(1, 2)
gs1.update(top=0.85, bottom=0.15, left=0.1, right=0.95, hspace=0.3, wspace=0.3)
ax = plt.subplot(gs1[0:1, 0:1])
ax.plot(t_star,S_star[:,0]*Vg2,'C1',linewidth=2,label='exact')
ax.plot(t_star,S_pred[:,0]*Vg2,'g-.',linewidth=3,label='learned')
ax.set_xlabel('$t\ (min)$', fontsize=18)
ax.set_ylabel('$I_p\ (\mu U/ml)$', fontsize=18)
ax.legend(fontsize='large')
ax = plt.subplot(gs1[0:1, 1:2])
ax.plot(t_star,S_star[:,1]*Vg2,'C1',linewidth=2)
ax.plot(t_star,S_pred[:,1]*Vg2,'g-.',linewidth=3)
ax.set_xlabel('$t\ (min)$', fontsize=18)
ax.set_ylabel('$I_i\ (\mu U/ml)$', fontsize=18)
fig, ax = newfig(1.8, 0.75)
gs2 = gridspec.GridSpec(1, 2)
gs2.update(top=0.85, bottom=0.15, left=0.1, right=0.95, hspace=0.3, wspace=0.3)
ax = plt.subplot(gs2[0:1, 0:1])
if not forecast:
ax.scatter(t_star[perm],S_star[perm,2],marker='o',c='C1',s=30)
else:
ax.plot(t_star,S_star[:,2],'C1',linewidth=2)
ax.plot(t_star,S_pred[:,2],'g-.',linewidth=3)
ax.set_xlabel('$t\ (min)$', fontsize=18)
ax.set_ylabel('$G\ (mg/dl)$', fontsize=18)
ax = plt.subplot(gs2[0:1, 1:2])
ax.plot(t_star,IG_star*Vg2,'C1',linewidth=2)
ax.plot(t_star,S_pred[:,6]*Vg2,'g-.',linewidth=3)
ax.set_xlabel('$t\ (min)$', fontsize=18)
ax.set_ylabel('$I_G\ (mg/min)$', fontsize=18)
# time points
t_star = np.arange(0, 3000, 1.0)
N = t_star.shape[0]
N_eqns = N
N_data = N // 5
k = 1./120.
Vp = 3.0
Vi = 11.0
Vg2 = 10.0*10.0
S0 = 12.0*Vp
S1 = 4.0*Vi
S2 = 110.0*Vg2
S3 = 0.0
S4 = 0.0
S5 = 0.0
# initial condition
x0 = np.array([S0, S1, S2, S3, S4, S5]).flatten()
# solve ODE
S_star = odeint(f, x0, t_star)
S_star /= Vg2 # scaling by Vg^2
IG_star = np.array([intake(t, k) for t in t_star]) / Vg2
t_train = t_star[:,None]
S_train = S_star
# two-point data must be given for all the species
# 1st: initial at t=0; 2nd: any point between (0,T]
N0 = 0
N1 = N - 1
idx_data = np.concatenate([np.array([N0]),
np.random.choice(np.arange(1, N-1), size=N_data, replace=False),
np.array([N-1]),
np.array([N1])])
idx_eqns = np.concatenate([np.array([N0]),
np.random.choice(np.arange(1, N-1), size=N_eqns-2, replace=False),
np.array([N-1])])
meal_tq = [np.array([N_eqns*[x] for x in meal_t]).T,
np.array([N_eqns*[x/Vg2] for x in meal_q]).T]
model = HiddenPathways(t_train[idx_data],
S_train[idx_data,:],
t_train[idx_eqns],
layers,
meal_tq)
model.train(num_epochs=25000, batch_size=N_eqns, learning_rate=1e-3)
model.train(num_epochs=25000, batch_size=N_eqns, learning_rate=1e-4)
model.train(num_epochs=10000, batch_size=N_eqns, learning_rate=1e-5)
# NN prediction
meal_tq = [np.array([N*[x] for x in meal_t]).T,
np.array([N*[x/Vg2] for x in meal_q]).T]
S_pred = model.predict(t_star[:,None], meal_tq)
plotting(t_star, S_star, S_pred, idx_data, Vg2)
print('k = %.6f' % ( model.sess.run(model.k) ) )
print('Rm = %.6f' % ( model.sess.run(model.Rm)*Vg2 ) )
print('Vg = %.6f' % ( model.sess.run(model.Vg) ) )
print('C1 = %.6f' % ( model.sess.run(model.C1)*Vg2 ) )
print('a1 = %.6f' % ( model.sess.run(model.a1) ) )
print('Ub = %.6f' % ( model.sess.run(model.Ub)*Vg2 ) )
print('C2 = %.6f' % ( model.sess.run(model.C2)*Vg2 ) )
print('U0 = %.6f' % ( model.sess.run(model.U0)*Vg2 ) )
print('Um = %.6f' % ( model.sess.run(model.Um)*Vg2 ) )
print('C3 = %.6f' % ( model.sess.run(model.C3)*Vg2 ) )
print('C4 = %.6f' % ( model.sess.run(model.C4)*Vg2 ) )
print('Vi = %.6f' % ( model.sess.run(model.Vi) ) )
print('E = %.6f' % ( model.sess.run(model.E) ) )
print('ti = %.6f' % ( model.sess.run(model.ti) ) )
print('beta = %.6f' % ( model.sess.run(model.beta) ) )
print('Rg = %.6f' % ( model.sess.run(model.Rg)*Vg2 ) )
print('alpha = %.6f' % ( model.sess.run(model.alpha) ) )
print('Vp = %.6f' % ( model.sess.run(model.Vp) ) )
print('C5 = %.6f' % ( model.sess.run(model.C5)*Vg2 ) )
print('tp = %.6f' % ( model.sess.run(model.tp) ) )
print('td = %.6f' % ( model.sess.run(model.td) ) )
# Prediction based on inferred parameters
# k = 0.007751
# Vp = 0.707807
# Vi = 2.689281
# S0 = 12.0*Vp
# S1 = 4.0*Vi
# S2 = 110.0*Vg2
# S3 = 0.0
# S4 = 0.0
# S5 = 0.0
# x0 = np.array([S0, S1, S2, S3, S4, S5]).flatten()
# S_pred = odeint(f_pred, x0, t_star)
# S_pred /= Vg2
# IG_pred = np.array([intake(t, k) for t in t_star]) / Vg2
# S_pred = np.append(S_pred[:,:], IG_pred[:,None], axis=1)
# plotting(t_star, S_star, S_pred, idx_data, Vg2, forecast=True)
# savefig('./figures/Glycolytic', crop = False)
| 42.801724
| 128
| 0.533938
| 3,122
| 19,860
| 3.28091
| 0.118834
| 0.043054
| 0.058772
| 0.096554
| 0.507469
| 0.432783
| 0.393049
| 0.347164
| 0.313873
| 0.261154
| 0
| 0.084945
| 0.296979
| 19,860
| 464
| 129
| 42.801724
| 0.648689
| 0.158711
| 0
| 0.126154
| 0
| 0.003077
| 0.029881
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027692
| false
| 0
| 0.027692
| 0.003077
| 0.076923
| 0.067692
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
112717e60382646da056e160e879beb3deb10306
| 4,123
|
py
|
Python
|
word_ladder/utilities.py
|
RacingTadpole/boggle
|
2185da9e204e2d1ed686ccaac76d0d73396408fb
|
[
"MIT"
] | null | null | null |
word_ladder/utilities.py
|
RacingTadpole/boggle
|
2185da9e204e2d1ed686ccaac76d0d73396408fb
|
[
"MIT"
] | null | null | null |
word_ladder/utilities.py
|
RacingTadpole/boggle
|
2185da9e204e2d1ed686ccaac76d0d73396408fb
|
[
"MIT"
] | null | null | null |
from typing import Dict, Iterable, Iterator, List, Sequence, Optional, Tuple
from word_ladder.types import WordDict
from word_ladder.rung import Rung
def get_word_with_letter_missing(word: str, position: int) -> str:
"""
>>> get_word_with_letter_missing('dog', 0)
'?og'
>>> get_word_with_letter_missing('dog', 1)
'd?g'
>>> get_word_with_letter_missing('dog', 2)
'do?'
"""
if position == 0:
return f'?{word[1:]}'
if position == len(word) - 1:
return f'{word[:-1]}?'
return f'{word[:position]}?{word[position + 1:]}'
def get_neighbors(word: str, words: WordDict) -> Sequence[str]:
"""
>>> words = {'?og': ['dog', 'log', 'fog'], 'd?g': ['dog', 'dig'], 'do?': ['dog'], 'l?g': ['log'], 'lo?': ['log']}
>>> sorted(get_neighbors('dig', words))
['dig', 'dog']
>>> sorted(get_neighbors('fog', words))
['dog', 'fog', 'log']
"""
return frozenset(
neighbor
for position in range(len(word))
for neighbor in words.get(get_word_with_letter_missing(word, position), [])
)
def get_all_previous_words(rung: Rung) -> Tuple[str]:
"""
>>> rung_0 = Rung(None, ['dig'], {})
>>> path = {'dog': ('log', 'fog', 'dig', 'dug', 'don', 'dob'), 'fig': ('dig', 'fog', 'fin')}
>>> words = ['dob', 'don', 'dug', 'fin', 'fog', 'log']
>>> rung_1 = Rung(rung_0, words, path)
>>> sorted(get_all_previous_words(rung_1))
['dig', 'dob', 'don', 'dug', 'fin', 'fog', 'log']
"""
return tuple(rung.words) + (get_all_previous_words(rung.previous) if rung.previous else ())
def get_next_rung(previous_rung: Rung, words: WordDict) -> Rung:
"""
>>> from word_ladder.compile_words import add_to_words_dict
>>> words = {}
>>> for w in ['dog', 'log', 'fog', 'dig', 'dug', 'dim', 'don', 'dob', 'lug', 'fin']:
... words = add_to_words_dict(words, w)
>>> rung = Rung(None, ['dog', 'fig'], {})
>>> next_rung = get_next_rung(rung, words)
>>> {k: sorted(v) for k,v in next_rung.path.items()}
{'dog': ['dig', 'dob', 'don', 'dug', 'fog', 'log'], 'fig': ['dig', 'fin', 'fog']}
>>> sorted(next_rung.words)
['dig', 'dob', 'don', 'dug', 'fin', 'fog', 'log']
"""
previous_words = get_all_previous_words(previous_rung)
path = {
source_word: tuple(w for w in get_neighbors(source_word, words) if w not in previous_words)
for source_word in previous_rung.words
}
word_soup = frozenset(w for these_words in path.values() for w in these_words)
return Rung(previous_rung, word_soup, path)
def keys_for_value(d: Dict[str, Iterable[str]], value: str) -> Iterator[str]:
"""
>>> d = {'a': ['x', 'y', 'z'], 'b': ['l', 'm', 'z'], 'c': ['t', 'u']}
>>> list(keys_for_value(d, 'y'))
['a']
>>> list(keys_for_value(d, 'u'))
['c']
>>> list(keys_for_value(d, 'z'))
['a', 'b']
"""
for key, values in d.items():
if value in values:
yield key
def get_ladders(rung: Rung, word: str) -> Sequence[List[str]]:
"""
>>> rung_0 = Rung(None, ['dig'], {})
>>> rung_1 = Rung(rung_0, ['dog', 'log', 'fig', 'din'], {'dig': ('dog', 'log', 'fig', 'din')})
>>> words = ['dig', 'dob', 'don', 'dug', 'fin', 'fog', 'log', 'din']
>>> path = {'dog': ('log', 'fog', 'dig', 'dug', 'don', 'dob'), 'fig': ('dig', 'fog', 'fin'), 'din': ('dig', 'fin')}
>>> rung_2 = Rung(rung_1, words, path)
>>> get_ladders(rung_2, 'fin')
[['dig', 'fig', 'fin'], ['dig', 'din', 'fin']]
"""
if not rung.previous:
return [[word]]
return [
ladder + [word]
for previous_word in keys_for_value(rung.path, word)
for ladder in get_ladders(rung.previous, previous_word)
]
def build_rungs(start_word, target_word, words) -> Rung:
rung = Rung(None, [start_word], {})
counter = 1
while target_word not in rung.words and len(rung.words) > 0:
rung = get_next_rung(rung, words)
counter += 1
if rung.words:
print(f'Round {counter}: {len(rung.words):3} possible words, eg. {", ".join(sorted(rung.words)[:6])}')
return rung
| 37.825688
| 119
| 0.551298
| 567
| 4,123
| 3.835979
| 0.179894
| 0.04046
| 0.025287
| 0.03908
| 0.297011
| 0.178391
| 0.067586
| 0.057931
| 0.034023
| 0.034023
| 0
| 0.007152
| 0.219985
| 4,123
| 108
| 120
| 38.175926
| 0.669154
| 0.441911
| 0
| 0
| 0
| 0.021739
| 0.074396
| 0.030435
| 0
| 0
| 0
| 0
| 0
| 1
| 0.152174
| false
| 0
| 0.065217
| 0
| 0.413043
| 0.021739
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
11290c9f84712a0ff71c67c6213ef09024350d23
| 923
|
py
|
Python
|
apps/plea/migrations/0015_datavalidation.py
|
uk-gov-mirror/ministryofjustice.manchester_traffic_offences_pleas
|
4c625b13fa2826bdde083a0270dcea1791f6dc18
|
[
"MIT"
] | 3
|
2015-12-22T16:37:14.000Z
|
2018-01-22T18:44:38.000Z
|
apps/plea/migrations/0015_datavalidation.py
|
uk-gov-mirror/ministryofjustice.manchester_traffic_offences_pleas
|
4c625b13fa2826bdde083a0270dcea1791f6dc18
|
[
"MIT"
] | 145
|
2015-03-04T11:17:50.000Z
|
2022-03-21T12:10:13.000Z
|
apps/plea/migrations/0015_datavalidation.py
|
uk-gov-mirror/ministryofjustice.manchester_traffic_offences_pleas
|
4c625b13fa2826bdde083a0270dcea1791f6dc18
|
[
"MIT"
] | 3
|
2015-12-29T14:59:12.000Z
|
2021-04-11T06:24:11.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('plea', '0014_auto_20151119_1136'),
]
operations = [
migrations.CreateModel(
name='DataValidation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('date_entered', models.DateTimeField(auto_created=True)),
('urn_entered', models.CharField(max_length=50)),
('urn_standardised', models.CharField(max_length=50)),
('urn_formatted', models.CharField(max_length=50)),
('case_match_count', models.PositiveIntegerField(default=0)),
('case_match', models.ForeignKey(blank=True, to='plea.Case', null=True)),
],
),
]
| 34.185185
| 114
| 0.600217
| 90
| 923
| 5.911111
| 0.6
| 0.084586
| 0.101504
| 0.135338
| 0.157895
| 0.109023
| 0
| 0
| 0
| 0
| 0
| 0.035294
| 0.263272
| 923
| 26
| 115
| 35.5
| 0.747059
| 0.022752
| 0
| 0
| 0
| 0
| 0.146667
| 0.025556
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.1
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
112b2b33af67e21e163e5b4e0d7e900b33eee428
| 1,407
|
py
|
Python
|
day_2/day_2.py
|
DillonHirth/advent_of_code
|
3af280134757945958f816c5c1522c8b7178c290
|
[
"MIT"
] | null | null | null |
day_2/day_2.py
|
DillonHirth/advent_of_code
|
3af280134757945958f816c5c1522c8b7178c290
|
[
"MIT"
] | null | null | null |
day_2/day_2.py
|
DillonHirth/advent_of_code
|
3af280134757945958f816c5c1522c8b7178c290
|
[
"MIT"
] | null | null | null |
# PART 1
with open('input.txt') as input_file:
x_pos = 0
y_pos = 0
for line in input_file:
direction = line.split(' ')[0]
distance = int(line.split(' ')[1])
if direction == "forward":
x_pos += distance
elif direction == "down":
y_pos += distance
else:
y_pos -= distance
print(x_pos * y_pos)
# PART 2
with open('input.txt') as input_file:
x_pos = 0
y_pos = 0
aim_vector = 0
for line in input_file:
direction = line.split(' ')[0]
distance = int(line.split(' ')[1])
print("*******************************************")
print("direction:", direction)
print("distance:", distance)
print("old_aim:", aim_vector)
print("old_x_pos:", x_pos)
print("old_y_pos:", y_pos)
print("---------------------------")
if direction == "forward":
x_pos += distance
y_pos += aim_vector * distance
elif direction == "down":
aim_vector += distance
else:
aim_vector -= distance
print("direction:", direction)
print("distance:", distance)
print("aim:", aim_vector)
print("x_pos:", x_pos)
print("y_pos:", y_pos)
print("*******************************************")
print("x_pos:", x_pos)
print("y_pos:", y_pos)
print("x*y:", x_pos * y_pos)
| 30.586957
| 60
| 0.484009
| 166
| 1,407
| 3.873494
| 0.174699
| 0.080871
| 0.054432
| 0.037325
| 0.679627
| 0.639191
| 0.545879
| 0.393468
| 0.393468
| 0.393468
| 0
| 0.01119
| 0.30135
| 1,407
| 46
| 61
| 30.586957
| 0.64293
| 0.00924
| 0
| 0.697674
| 0
| 0
| 0.18319
| 0.081178
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.395349
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
112f282e3098cdc6a98d1c6bbec33fdd6b4350c1
| 23,588
|
py
|
Python
|
test2.py
|
juanmed/singleshot6Dpose
|
a32d5159d557451ac3ed710ca7d4da6f7c64ff52
|
[
"MIT"
] | 5
|
2019-03-27T08:40:07.000Z
|
2021-01-08T05:44:46.000Z
|
test2.py
|
juanmed/singleshot6Dpose
|
a32d5159d557451ac3ed710ca7d4da6f7c64ff52
|
[
"MIT"
] | null | null | null |
test2.py
|
juanmed/singleshot6Dpose
|
a32d5159d557451ac3ed710ca7d4da6f7c64ff52
|
[
"MIT"
] | 1
|
2019-07-11T09:20:25.000Z
|
2019-07-11T09:20:25.000Z
|
# import support libraries
import os
import time
import numpy as np
# import main working libraries
import cv2
import torch
from torch.autograd import Variable
from torchvision import transforms
from PIL import Image
# import app libraries
from darknet import Darknet
from utils import *
from MeshPly import MeshPly
class Line():
def __init__(self,p1,p2):
# tilt
if( (p2[0]-p1[0]) == 0.0 ):
self.m = "NaN" # vertical line
else:
self.m = (p2[1]-p1[1])/(p2[0]-p1[0])
# intercept
if(self.m == "NaN"):
self.b = "NaN"
else:
self.b = -1.0*self.m*p1[0] + p1[1]
self.p = p1 #store one sample
def eval(self,x):
# TODO verify if line is vertical
return(x*self.m + self.b)
def find_intersection(l1, l2):
x = (l2.b - l1.b)/(l1.m - l2.m) # x coord of intersection point
y = l1.eval(x) # y coord of intersection point
return x,y
# estimate bounding box
#@torch.no_grad
def test(datacfg, cfgfile, weightfile, imgfile):
# ******************************************#
# PARAMETERS PREPARATION #
# ******************************************#
#parse configuration files
options = read_data_cfg(datacfg)
meshname = options['mesh']
name = options['name']
#Parameters for the network
seed = int(time.time())
gpus = '0' # define gpus to use
test_width = 608 # define test image size
test_height = 608
torch.manual_seed(seed) # seed torch random
use_cuda = True
if use_cuda:
os.environ['CUDA_VISIBLE_DEVICES'] = gpus
torch.cuda.manual_seed(seed) # seed cuda random
conf_thresh = 0.1
num_classes = 1
# Read object 3D model, get 3D Bounding box corners
mesh = MeshPly(meshname)
vertices = np.c_[np.array(mesh.vertices), np.ones((len(mesh.vertices), 1))].transpose()
#print("Vertices are:\n {} Shape: {} Type: {}".format(vertices,vertices.shape, type(vertices)))
corners3D = get_3D_corners(vertices)
feet_cm = 30.48 # 1 ft = 30.48 cm
corners3D[0] = np.array([-11*feet_cm/2.0, -11*feet_cm/2.0, -11*feet_cm/2.0, -11*feet_cm/2.0, 11*feet_cm/2.0, 11*feet_cm/2.0, 11*feet_cm/2.0, 11*feet_cm/2.0])
corners3D[1] = np.array([-feet_cm/2.0, -feet_cm/2.0, feet_cm/2.0, feet_cm/2.0, -feet_cm/2.0, -feet_cm/2.0, feet_cm/2.0, feet_cm/2.0])
corners3D[2] = np.array([-11*feet_cm/2.0, 11*feet_cm/2.0, -11*feet_cm/2.0, 11*feet_cm/2.0, -11*feet_cm/2.0, 11*feet_cm/2.0, -11*feet_cm/2.0, 11*feet_cm/2.0])
#print("3D Corners are:\n {} Shape: {} Type: {}".format(corners3D,corners3D.shape, type(corners3D)))
diam = float(options['diam'])
# now configure camera intrinsics
internal_calibration = get_camera_intrinsic()
# ******************************************#
# NETWORK CREATION #
# ******************************************#
# Create the network based on cfg file
model = Darknet(cfgfile)
#model.print_network()
model.load_weights(weightfile)
model.cuda()
model.eval()
# ******************************************#
# INPUT IMAGE PREPARATION FOR NN #
# ******************************************#
# Now prepare image: convert to RGB, resize, transform to Tensor
# use cuda,
img = Image.open(imgfile).convert('RGB')
ori_size = img.size # store original size
img = img.resize((test_width, test_height))
t1 = time.time()
img = transforms.Compose([transforms.ToTensor(),])(img)#.float()
img = Variable(img, requires_grad = True)
img = img.unsqueeze(0)
img = img.cuda()
# ******************************************#
# PASS IT TO NETWORK AND GET PREDICTION #
# ******************************************#
# Forward pass
output = model(img).data
#print("Output Size: {}".format(output.size(0)))
t2 = time.time()
# ******************************************#
# EXTRACT PREDICTIONS #
# ******************************************#
# Using confidence threshold, eliminate low-confidence predictions
# and get only boxes over the confidence threshold
all_boxes = get_region_boxes(output, conf_thresh, num_classes)
boxes = all_boxes[0]
# iterate through boxes to find the one with highest confidence
best_conf_est = -1
best_box_index = -1
for j in range(len(boxes)):
# the confidence is in index = 18
if( boxes[j][18] > best_conf_est):
box_pr = boxes[j] # get bounding box
best_conf_est = boxes[j][18]
best_box_index = j
#print("Best box is: {} and 2D prediction is {}".format(best_box_index,box_pr))
#print("Confidence is: {}".format(best_conf_est))
print(best_conf_est.item(),type(best_conf_est.item()))
# Denormalize the corner predictions
# This are the predicted 2D points with which a bounding cube can be drawn
corners2D_pr = np.array(np.reshape(box_pr[:18], [9, 2]), dtype='float32')
corners2D_pr[:, 0] = corners2D_pr[:, 0] * ori_size[0] # Width
corners2D_pr[:, 1] = corners2D_pr[:, 1] * ori_size[1] # Height
t3 = time.time()
# **********************************************#
# GET OBJECT POSE ESTIMATION #
# Remember the problem in 6D Pose estimation #
# is exactly to estimate the pose - position #
# and orientation of the object of interest #
# with reference to a camera frame. That is #
# why although the 2D projection of the 3D #
# bounding cube are ready, we still need to #
# compute the rotation matrix -orientation- #
# and a translation vector -position- for the #
# object #
# #
# **********************************************#
# get rotation matrix and transform
R_pr, t_pr = pnp(np.array(np.transpose(np.concatenate((np.zeros((3, 1)), corners3D[:3, :]), axis=1)), dtype='float32'), corners2D_pr, np.array(internal_calibration, dtype='float32'))
t4 = time.time()
# ******************************************#
# DISPLAY IMAGE WITH BOUNDING CUBE #
# ******************************************#
# Reload Original img
img = cv2.imread(imgfile)
# create a window to display image
wname = "Prediction"
cv2.namedWindow(wname)
# draw each predicted 2D point
for i, (x,y) in enumerate(corners2D_pr):
# get colors to draw the lines
col1 = 28*i
col2 = 255 - (28*i)
col3 = np.random.randint(0,256)
cv2.circle(img, (x,y), 3, (col1,col2,col3), -1)
cv2.putText(img, str(i), (int(x) + 5, int(y) + 5),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (col1, col2, col3), 1)
# Get each predicted point and the centroid
p1 = corners2D_pr[1]
p2 = corners2D_pr[2]
p3 = corners2D_pr[3]
p4 = corners2D_pr[4]
p5 = corners2D_pr[5]
p6 = corners2D_pr[6]
p7 = corners2D_pr[7]
p8 = corners2D_pr[8]
center = corners2D_pr[0]
# Draw cube lines around detected object
# draw front face
line_point = 2
cv2.line(img,(p1[0],p1[1]),(p2[0],p2[1]), (0,255,0),line_point)
cv2.line(img,(p2[0],p2[1]),(p4[0],p4[1]), (0,255,0),line_point)
cv2.line(img,(p4[0],p4[1]),(p3[0],p3[1]), (0,255,0),line_point)
cv2.line(img,(p3[0],p3[1]),(p1[0],p1[1]), (0,255,0),line_point)
# draw back face
cv2.line(img,(p5[0],p5[1]),(p6[0],p6[1]), (0,255,0),line_point)
cv2.line(img,(p7[0],p7[1]),(p8[0],p8[1]), (0,255,0),line_point)
cv2.line(img,(p6[0],p6[1]),(p8[0],p8[1]), (0,255,0),line_point)
cv2.line(img,(p5[0],p5[1]),(p7[0],p7[1]), (0,255,0),line_point)
# draw right face
cv2.line(img,(p2[0],p2[1]),(p6[0],p6[1]), (0,255,0),line_point)
cv2.line(img,(p1[0],p1[1]),(p5[0],p5[1]), (0,255,0),line_point)
# draw left face
cv2.line(img,(p3[0],p3[1]),(p7[0],p7[1]), (0,255,0),line_point)
cv2.line(img,(p4[0],p4[1]),(p8[0],p8[1]), (0,255,0),line_point)
# Calculate gate dimensions
min_x = np.min(corners3D[0,:]) # this are the gate outermost corners
max_x = np.max(corners3D[0,:])
min_y = np.min(corners3D[1,:])
max_y = np.max(corners3D[1,:])
min_z = np.min(corners3D[2,:])
max_z = np.max(corners3D[2,:])
gate_dim_z = max_z - min_z
gate_dim_x = max_x - min_x
gate_dim_y = max_y - min_y
############################################################
# PREDICT FLYABLE AREA BASED ON ESTIMATED 2D PROJECTIONS
############################################################
# Calculate Fly are based based on offset from predicted 2D
# Projection
flyarea_side = 243.84 #cm 8ft
offset_z = (gate_dim_z - flyarea_side)/2.0
offset_x = (gate_dim_x - flyarea_side)/2.0
offset_z_ratio = (offset_z/gate_dim_z) # calculate as ratio wrt side, to use with pixels later
offset_x_ratio = (offset_x/gate_dim_x)
#print("Offset X ratio: {}, Offset Z ratio: {}".format(offset_x_ratio,offset_z_ratio))
# GATE FRONT
#
# array to store all 4 points
flyarea_corners_front = np.zeros((4,2), dtype = 'float32')
# corner 1
flyarea_corners_front[0][0] = p4[0] + int((p2[0]-p4[0])*offset_x_ratio)
flyarea_corners_front[0][1] = p4[1] + int((p3[1]-p4[1])*offset_z_ratio)
# corner 2
flyarea_corners_front[1][0] = p2[0] + int((p4[0]-p2[0])*offset_x_ratio)
flyarea_corners_front[1][1] = p2[1] + int((p1[1]-p2[1])*offset_x_ratio)
# corner 3
flyarea_corners_front[2][0] = p1[0] + int((p3[0]-p1[0])*offset_x_ratio)
flyarea_corners_front[2][1] = p1[1] + int((p2[1]-p1[1])*offset_x_ratio)
# corner 4
flyarea_corners_front[3][0] = p3[0] + int((p1[0]-p3[0])*offset_x_ratio)
flyarea_corners_front[3][1] = p3[1] + int((p4[1]-p3[1])*offset_x_ratio)
#print("Front points: {}".format(flyarea_corners_front))
# draw front gate area
fa_p1_f = flyarea_corners_front[0]
fa_p2_f = flyarea_corners_front[1]
fa_p3_f = flyarea_corners_front[2]
fa_p4_f = flyarea_corners_front[3]
"""
cv2.line(img,(fa_p1_f[0],fa_p1_f[1]),(fa_p2_f[0],fa_p2_f[1]), (255,0,255),line_point)
cv2.line(img,(fa_p2_f[0],fa_p2_f[1]),(fa_p3_f[0],fa_p3_f[1]), (255,0,255),line_point)
cv2.line(img,(fa_p4_f[0],fa_p4_f[1]),(fa_p1_f[0],fa_p1_f[1]), (255,0,255),line_point)
cv2.line(img,(fa_p3_f[0],fa_p3_f[1]),(fa_p4_f[0],fa_p4_f[1]), (255,0,255),line_point)
"""
# GATE BACK
#
# array to store all 4 points
flyarea_corners_back = np.zeros((4,2), dtype = 'float32')
# corner 1
flyarea_corners_back[0][0] = p8[0] + int((p6[0]-p8[0])*offset_x_ratio)
flyarea_corners_back[0][1] = p8[1] + int((p7[1]-p8[1])*offset_z_ratio)
# corner 2
flyarea_corners_back[1][0] = p6[0] + int((p8[0]-p6[0])*offset_x_ratio)
flyarea_corners_back[1][1] = p6[1] + int((p5[1]-p6[1])*offset_x_ratio)
# corner 3
flyarea_corners_back[2][0] = p5[0] + int((p7[0]-p5[0])*offset_x_ratio)
flyarea_corners_back[2][1] = p5[1] + int((p6[1]-p5[1])*offset_x_ratio)
# corner 4
flyarea_corners_back[3][0] = p7[0] + int((p5[0]-p7[0])*offset_x_ratio)
flyarea_corners_back[3][1] = p7[1] + int((p8[1]-p7[1])*offset_x_ratio)
#print("Back points: {}".format(flyarea_corners_back))
# draw back gate area
fa_p1_b = flyarea_corners_back[0]
fa_p2_b = flyarea_corners_back[1]
fa_p3_b = flyarea_corners_back[2]
fa_p4_b = flyarea_corners_back[3]
"""
cv2.line(img,(fa_p1_b[0],fa_p1_b[1]),(fa_p2_b[0],fa_p2_b[1]), (255,0,255),line_point)
cv2.line(img,(fa_p2_b[0],fa_p2_b[1]),(fa_p3_b[0],fa_p3_b[1]), (255,0,255),line_point)
cv2.line(img,(fa_p4_b[0],fa_p4_b[1]),(fa_p1_b[0],fa_p1_b[1]), (255,0,255),line_point)
cv2.line(img,(fa_p3_b[0],fa_p3_b[1]),(fa_p4_b[0],fa_p4_b[1]), (255,0,255),line_point)
"""
"""
# draw each predicted 2D point
for i, (x,y) in enumerate(flyarea_corners_front):
# get colors to draw the lines
col1 = 0#np.random.randint(0,256)
col2 = 0#np.random.randint(0,256)
col3 = 255#np.random.randint(0,256)
cv2.circle(img, (x,y), 3, (col1,col2,col3), -1)
cv2.putText(img, str(i), (int(x) + 5, int(y) + 5),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (col1, col2, col3), 1)
# draw each predicted 2D point
for i, (x,y) in enumerate(flyarea_corners_back):
# get colors to draw the lines
col1 = 0#np.random.randint(0,256)
col2 = 0#np.random.randint(0,256)
col3 = 255#np.random.randint(0,256)
cv2.circle(img, (x,y), 3, (col1,col2,col3), -1)
cv2.putText(img, str(i+4), (int(x) + 5, int(y) + 5),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (col1, col2, col3), 1)
"""
# GATE ALL FRONT AND BACK
# LINES
# FRONT
front_up = Line(flyarea_corners_front[0],flyarea_corners_front[1])
front_right = Line(flyarea_corners_front[1],flyarea_corners_front[2])
front_down = Line(flyarea_corners_front[2],flyarea_corners_front[3])
front_left = Line(flyarea_corners_front[3],flyarea_corners_front[0])
#print("Front Up Line: m {:.4f} b{:.4f}".format(front_up.m, front_up.b))
#print("Front Right Line: m {:.4f} b{:.4f}".format(front_right.m, front_right.b))
#print("Front Down Line: m {:.4f} b{:.4f}".format(front_down.m, front_down.b))
#print("Front Left Line: m {:.4f} b{:.4f}".format(front_left.m, front_left.b))
# BACK
back_up = Line(flyarea_corners_back[0],flyarea_corners_back[1])
back_right = Line(flyarea_corners_back[1],flyarea_corners_back[2])
back_down = Line(flyarea_corners_back[2],flyarea_corners_back[3])
back_left = Line(flyarea_corners_back[3],flyarea_corners_back[0])
#print("Back Up Line: m {:.4f} b{:.4f}".format(back_up.m, back_up.b))
#print("Back Right Line: m {:.4f} b{:.4f}".format(back_right.m, back_right.b))
#print("Back Down Line: m {:.4f} b{:.4f}".format(back_down.m, back_down.b))
#print("Back Left Line: m {:.4f} b{:.4f}".format(back_left.m, back_left.b))
# Intersections
intersections = np.zeros((8,2))
# store in an structure that makes looping easy
front_lines = [[front_right,front_left],[front_right,front_left],[front_up,front_down],[front_up,front_down]]
back_lines = [back_up,back_down,back_right,back_left]
# compare back line with corresponding front lines
for k, (back_line, front_line_pair) in enumerate(zip(back_lines, front_lines)):
for j, front_line in enumerate(front_line_pair):
x_i = (back_line.b - front_line.b)/(front_line.m - back_line.m) # x coord of intersection point
y_i = back_line.eval(x_i) # y coord of intersection point
intersections[k*2+j][0] = x_i
intersections[k*2+j][1] = y_i
#print("Intersections: ")
#print(intersections)
# draw each intersection point
#for i, (x,y) in enumerate(intersections):
#cv2.circle(img, (int(x),int(y)), 3, (0,255,255), -1)
#cv2.putText(img, str(i), (int(x) + 5, int(y) + 5),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,255), 1)
# group all points
points = np.concatenate((flyarea_corners_front,flyarea_corners_back, intersections), axis = 0)
# the corners of the flyable area is composed of the 4 points with the
# shortest distance to the centroid
points_sorted = [(np.linalg.norm(points[i]-center),points[i]) for i in range(points.shape[0])]
points_sorted.sort()
#print(points_sorted)
flyarea_corners = np.zeros((4,2), dtype = 'float32')
"""
for k in range(4):
#print(k)
point = points_sorted[k][1]
#print(point)
flyarea_corners[k] = point
x = point[0]
y = point[1]
cv2.circle(img, (int(x),int(y)), 10, (0,255,255), -1)
cv2.putText(img, str(k), (int(x) + 5, int(y) + 5),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,255), 1)
"""
# corner 1
x1,y1 = find_intersection(front_up,back_left)
dummy1 = np.array([x1,y1])
x1,y1 = find_intersection(back_up,front_left)
dummy2 = np.array([x1,y1])
c_points = np.stack((flyarea_corners_front[0],flyarea_corners_back[0],dummy1,dummy2))
points_sorted = [(np.linalg.norm(c_points[i]-center),c_points[i]) for i in range(c_points.shape[0])]
points_sorted.sort()
flyarea_corners[0]=points_sorted[0][1] # extract the point with shortest distance to centroid
"""
# draw each intersection point
for i, (x,y) in enumerate(c_points):
cv2.circle(img, (int(x),int(y)), 3, (0,255,255), -1)
cv2.putText(img, str(i), (int(x) + 5, int(y) + 5),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,255), 1)
"""
# corner 2
x1,y1 = find_intersection(front_up,back_right)
dummy1 = np.array([x1,y1])
x1,y1 = find_intersection(back_up,front_right)
dummy2 = np.array([x1,y1])
c_points = np.stack((flyarea_corners_front[1],flyarea_corners_back[1],dummy1,dummy2))
points_sorted = [(np.linalg.norm(c_points[i]-center),c_points[i]) for i in range(c_points.shape[0])]
points_sorted.sort()
flyarea_corners[1]=points_sorted[0][1] # extract the point with shortest distance to centroid
"""
# draw each intersection point
for i, (x,y) in enumerate(c_points):
cv2.circle(img, (int(x),int(y)), 3, (0,255,255), -1)
cv2.putText(img, str(i), (int(x) + 5, int(y) + 5),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,255), 1)
"""
# corner 3
x1,y1 = find_intersection(front_down,back_right)
dummy1 = np.array([x1,y1])
x1,y1 = find_intersection(back_down,front_right)
dummy2 = np.array([x1,y1])
c_points = np.stack((flyarea_corners_front[2],flyarea_corners_back[2],dummy1,dummy2))
points_sorted = [(np.linalg.norm(c_points[i]-center),c_points[i]) for i in range(c_points.shape[0])]
points_sorted.sort()
flyarea_corners[2]=points_sorted[0][1] # extract the point with shortest distance to centroid
"""
# draw each intersection point
for i, (x,y) in enumerate(c_points):
cv2.circle(img, (int(x),int(y)), 3, (0,255,255), -1)
cv2.putText(img, str(i), (int(x) + 5, int(y) + 5),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,255), 1)
"""
# corner 4
x1,y1 = find_intersection(front_down,back_left)
dummy1 = np.array([x1,y1])
x1,y1 = find_intersection(back_down,front_left)
dummy2 = np.array([x1,y1])
c_points = np.stack((flyarea_corners_front[3],flyarea_corners_back[3],dummy1,dummy2))
points_sorted = [(np.linalg.norm(c_points[i]-center),c_points[i]) for i in range(c_points.shape[0])]
points_sorted.sort()
flyarea_corners[3]=points_sorted[0][1] # extract the point with shortest distance to centroid
"""
# draw each intersection point
for i, (x,y) in enumerate(c_points):
cv2.circle(img, (int(x),int(y)), 3, (0,255,255), -1)
cv2.putText(img, str(i), (int(x) + 5, int(y) + 5),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,255), 1)
"""
fa_p1 = flyarea_corners[0]
fa_p2 = flyarea_corners[1]
fa_p3 = flyarea_corners[2]
fa_p4 = flyarea_corners[3]
"""
cv2.line(img,(fa_p1[0],fa_p1[1]),(fa_p2[0],fa_p2[1]), (0,0,255),line_point)
cv2.line(img,(fa_p2[0],fa_p2[1]),(fa_p3[0],fa_p3[1]), (0,0,255),line_point)
cv2.line(img,(fa_p4[0],fa_p4[1]),(fa_p1[0],fa_p1[1]), (0,0,255),line_point)
cv2.line(img,(fa_p3[0],fa_p3[1]),(fa_p4[0],fa_p4[1]), (0,0,255),line_point)
"""
# YET ANOTHER METHOD
if( back_up.p[1] > front_up.p[1]):
up_line = back_up
else:
up_line = front_up
if( back_down.p[1] < front_down.p[1]):
down_line = back_down
else:
down_line = front_down
if( back_right.p[0] < front_right.p[0]):
right_line = back_right
else:
right_line = front_right
if( back_left.p[0] > front_left.p[0] ):
left_line = back_left
else:
left_line = front_left
x1,y1 = find_intersection(up_line,left_line)
dummy1 = np.array([x1,y1])
flyarea_corners[0] = dummy1
x1,y1 = find_intersection(up_line,right_line)
dummy1 = np.array([x1,y1])
flyarea_corners[1] = dummy1
x1,y1 = find_intersection(down_line,right_line)
dummy1 = np.array([x1,y1])
flyarea_corners[2] = dummy1
x1,y1 = find_intersection(down_line,left_line)
dummy1 = np.array([x1,y1])
flyarea_corners[3] = dummy1
fa_p1 = flyarea_corners[0]
fa_p2 = flyarea_corners[1]
fa_p3 = flyarea_corners[2]
fa_p4 = flyarea_corners[3]
cv2.line(img,(fa_p1[0],fa_p1[1]),(fa_p2[0],fa_p2[1]), (0,0,255),line_point)
cv2.line(img,(fa_p2[0],fa_p2[1]),(fa_p3[0],fa_p3[1]), (0,0,255),line_point)
cv2.line(img,(fa_p4[0],fa_p4[1]),(fa_p1[0],fa_p1[1]), (0,0,255),line_point)
cv2.line(img,(fa_p3[0],fa_p3[1]),(fa_p4[0],fa_p4[1]), (0,0,255),line_point)
"""
############################################################
# PREDICT FLYABLE AREA BASED ON ESTIMATED POSE
############################################################
offset = 0.0 # flyable area corners are at an offset from outermost corners
y = min_y # and they are over a plane
p1 = np.array([[min_x+offset],[y],[min_z+offset]])
p2 = np.array([[min_x+offset],[y],[max_z-offset]])
p3 = np.array([[max_x-offset],[y],[min_z+offset]])
p4 = np.array([[max_x-offset],[y],[max_z-offset]])
# These are 4 points defining the square of the flyable area
flyarea_3Dpoints = np.concatenate((p1,p2,p3,p4), axis = 1)
flyarea_3Dpoints = np.concatenate((flyarea_3Dpoints, np.ones((1,4))), axis = 0)
print("Gate Flyable Area 3D:\n{}".format(flyarea_3Dpoints))
# get transform
Rt_pr = np.concatenate((R_pr, t_pr), axis=1)
flyarea_2Dpoints = compute_projection(flyarea_3Dpoints, Rt_pr, internal_calibration)
print("Gate Flyable Area 2D projection:\n{}".format(flyarea_2Dpoints))
for i,(x,y) in enumerate(flyarea_2Dpoints.T):
col1 = 0#np.random.randint(0,256)
col2 = 0#np.random.randint(0,256)
col3 = 255#np.random.randint(0,256)
cv2.circle(img, (x,y), 3, (col1,col2,col3), -1)
cv2.putText(img, str(i), (int(x) + 5, int(y) + 5),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (col1, col2, col3), 1)
p1_2d = np.array([ flyarea_2Dpoints[0][0], flyarea_2Dpoints[1][0]])
p2_2d = np.array([ flyarea_2Dpoints[0][1], flyarea_2Dpoints[1][1]])
p3_2d = np.array([ flyarea_2Dpoints[0][2], flyarea_2Dpoints[1][2]])
p4_2d = np.array([ flyarea_2Dpoints[0][3], flyarea_2Dpoints[1][3]])
"""
# Show the image and wait key press
cv2.imshow(wname, img)
cv2.waitKey()
print("Rotation: {}".format(R_pr))
print("Translation: {}".format(t_pr))
print(" Predict time: {}".format(t2 - t1))
print(" 2D Points extraction time: {}".format(t3- t2))
print(" Pose calculation time: {}:".format(t4 - t3))
print(" Total time: {}".format(t4-t1))
print("Press any key to close.")
if __name__ == '__main__':
import sys
if (len(sys.argv) == 5):
datacfg_file = sys.argv[1] # data file
cfgfile_file = sys.argv[2] # yolo network file
weightfile_file = sys.argv[3] # weightd file
imgfile_file = sys.argv[4] # image file
test(datacfg_file, cfgfile_file, weightfile_file, imgfile_file)
else:
print('Usage:')
print(' python valid.py datacfg cfgfile weightfile imagefile')
| 40.115646
| 187
| 0.598186
| 3,721
| 23,588
| 3.609245
| 0.100779
| 0.077141
| 0.020849
| 0.014296
| 0.481608
| 0.458749
| 0.409829
| 0.34274
| 0.308786
| 0.305063
| 0
| 0.072554
| 0.208835
| 23,588
| 588
| 188
| 40.115646
| 0.64709
| 0.224394
| 0
| 0.135338
| 0
| 0
| 0.023363
| 0
| 0
| 0
| 0
| 0.001701
| 0
| 1
| 0.015038
| false
| 0
| 0.045113
| 0.003759
| 0.067669
| 0.037594
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
112fbb6bacb51a637008a37470e77beab2c5a20e
| 4,028
|
py
|
Python
|
nba/model/utils.py
|
mattdhart/GBling
|
ed868dccfcaf7588e7a1297f2294fd62b62e43be
|
[
"Apache-2.0"
] | null | null | null |
nba/model/utils.py
|
mattdhart/GBling
|
ed868dccfcaf7588e7a1297f2294fd62b62e43be
|
[
"Apache-2.0"
] | null | null | null |
nba/model/utils.py
|
mattdhart/GBling
|
ed868dccfcaf7588e7a1297f2294fd62b62e43be
|
[
"Apache-2.0"
] | null | null | null |
team_abbr_lookup = {
"Toronto Raptors": "TOR",
"Brooklyn Nets": "BRK",
"New York Knicks": "NYK",
"Boston Celtics": "BOS",
"Philadelphia 76ers": "PHI",
"Indiana Pacers": "IND",
"Chicago Bulls": "CHI",
"Cleveland Cavaliers": "CLE",
"Detroit Pistons": "DET",
"Milwaukee Bucks": "MIL",
"Miami Heat": "MIA",
"Washington Wizards": "WAS",
"Charlotte Bobcats": "CHA",
"Charlotte Hornets": "CHA",
"Atlanta Hawks": "ATL",
"Orlando Magic": "ORL",
"Oklahoma City Thunder": "OKC",
"Portland Trail Blazers": "POR",
"Minnesota Timberwolves": "MIN",
"Denver Nuggets": "DEN",
"Utah Jazz": "UTA",
"Los Angeles Clippers": "LAC",
"Golden State Warriors": "GSW",
"Phoenix Suns": "PHO",
"Sacramento Kings": "SAC",
"Los Angeles Lakers": "LAL",
"San Antonio Spurs": "SAS",
"Houston Rockets": "HOU",
"Memphis Grizzlies": "MEM",
"Dallas Mavericks": "DAL",
"New Orleans Pelicans": "NOP"
}
abbr_team_lookup = {
"TOR": "Toronto Raptors",
"BRK": "Brooklyn Nets",
"NYK": "New York Knicks",
"BOS": "Boston Celtics",
"PHI": "Philadelphia 76ers",
"IND": "Indiana Pacers",
"CHI": "Chicago Bulls",
"CLE": "Cleveland Cavaliers",
"DET": "Detroit Pistons",
"MIL": "Milwaukee Bucks",
"MIA": "Miami Heat",
"WAS": "Washington Wizards",
"CHA": "Charlotte Hornets",
"ATL": "Atlanta Hawks",
"ORL": "Orlando Magic",
"OKC": "Oklahoma City Thunder",
"POR": "Portland Trail Blazers",
"MIN": "Minnesota Timberwolves",
"DEN": "Denver Nuggets",
"UTA": "Utah Jazz",
"LAC": "Los Angeles Clippers",
"GSW": "Golden State Warriors",
"PHO": "Phoenix Suns",
"SAC": "Sacramento Kings",
"LAL": "Los Angeles Lakers",
"SAS": "San Antonio Spurs",
"HOU": "Houston Rockets",
"MEM": "Memphis Grizzlies",
"DAL": "Dallas Mavericks",
"NOP": "New Orleans Pelicans"
}
oddsshark_team_id_lookup = {
"Toronto Raptors": 20742,
"Brooklyn Nets": 20749,
"New York Knicks": 20747,
"Boston Celtics": 20722,
"Philadelphia 76ers": 20731,
"Indiana Pacers": 20737,
"Chicago Bulls": 20732,
"Cleveland Cavaliers": 20735,
"Detroit Pistons": 20743,
"Milwaukee Bucks": 20725,
"Miami Heat": 20726,
"Washington Wizards": 20746,
"Charlotte Bobcats": 20751,
"Atlanta Hawks": 20734,
"Orlando Magic": 20750,
"Oklahoma City Thunder": 20728,
"Portland Trail Blazers": 20748,
"Minnesota Timberwolves": 20744,
"Denver Nuggets": 20723,
"Utah Jazz": 20738,
"Los Angeles Clippers": 20736,
"Golden State Warriors": 20741,
"Phoenix Suns": 20730,
"Sacramento Kings": 20745,
"Los Angeles Lakers": 20739,
"San Antonio Spurs": 20724,
"Houston Rockets": 20740,
"Memphis Grizzlies": 20729,
"Dallas Mavericks": 20727,
"New Orleans Pelicans": 20733
}
oddsshark_city_lookup = {
"Toronto": "Toronto Raptors",
"Brooklyn": "Brooklyn Nets",
"New York": "New York Knicks",
"Boston": "Boston Celtics",
"Philadelphia": "Philadelphia 76ers",
"Indiana": "Indiana Pacers",
"Chicago": "Chicago Bulls",
"Cleveland": "Cleveland Cavaliers",
"Detroit": "Detroit Pistons",
"Milwaukee": "Milwaukee Bucks",
"Miami": "Miami Heat",
"Washington": "Washington Wizards",
"Charlotte": "Charlotte Hornets",
"Atlanta": "Atlanta Hawks",
"Orlando": "Orlando Magic",
"Oklahoma City": "Oklahoma City Thunder",
"Portland": "Portland Trail Blazers",
"Minnesota": "Minnesota Timberwolves",
"Denver": "Denver Nuggets",
"Utah": "Utah Jazz",
"LA Clippers": "Los Angeles Clippers",
"Golden State": "Golden State Warriors",
"Phoenix": "Phoenix Suns",
"Sacramento": "Sacramento Kings",
"LA Lakers": "Los Angeles Lakers",
"San Antonio": "San Antonio Spurs",
"Houston": "Houston Rockets",
"Memphis": "Memphis Grizzlies",
"Dallas": "Dallas Mavericks",
"New Orleans": "New Orleans Pelicans"
}
| 29.188406
| 45
| 0.606753
| 415
| 4,028
| 5.86747
| 0.318072
| 0.032854
| 0.021355
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.050674
| 0.225919
| 4,028
| 137
| 46
| 29.40146
| 0.730276
| 0
| 0
| 0
| 0
| 0
| 0.590266
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
11309186f51ff34a8ed70944cd3fd480bd97b840
| 335
|
py
|
Python
|
FCMDemo_server/main.py
|
charsyam/AndroidFCMDemo
|
67e3bb2fbbdd1bb7ba5e194d064b9f9fc62d5697
|
[
"MIT"
] | null | null | null |
FCMDemo_server/main.py
|
charsyam/AndroidFCMDemo
|
67e3bb2fbbdd1bb7ba5e194d064b9f9fc62d5697
|
[
"MIT"
] | null | null | null |
FCMDemo_server/main.py
|
charsyam/AndroidFCMDemo
|
67e3bb2fbbdd1bb7ba5e194d064b9f9fc62d5697
|
[
"MIT"
] | null | null | null |
from flask import Flask, request
import redis
app = Flask(__name__)
rconn = redis.StrictRedis()
def keygen(key):
return "token:{key}".format(key=key)
@app.route('/api/register', methods=["POST"])
def register_token():
userid = request.form['userid']
token = request.form['token']
rconn.set(keygen(userid), token)
| 18.611111
| 45
| 0.683582
| 44
| 335
| 5.090909
| 0.522727
| 0.098214
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.152239
| 335
| 17
| 46
| 19.705882
| 0.788732
| 0
| 0
| 0
| 0
| 0
| 0.116418
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.181818
| false
| 0
| 0.181818
| 0.090909
| 0.454545
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1131872ab4a0cec6debce22fccdd6997732871ab
| 3,975
|
py
|
Python
|
src/tfx_helper/local.py
|
dlabsai/tfx-helper
|
74a05ffeaa14fdc0866d063e36114f7d654a5ae9
|
[
"MIT"
] | null | null | null |
src/tfx_helper/local.py
|
dlabsai/tfx-helper
|
74a05ffeaa14fdc0866d063e36114f7d654a5ae9
|
[
"MIT"
] | null | null | null |
src/tfx_helper/local.py
|
dlabsai/tfx-helper
|
74a05ffeaa14fdc0866d063e36114f7d654a5ae9
|
[
"MIT"
] | null | null | null |
import os.path
from typing import Any, Iterable, Mapping, Optional, Tuple
import tfx.v1 as tfx
from absl import logging
from ml_metadata.proto import metadata_store_pb2
from tfx.dsl.components.base.base_component import BaseComponent
from tfx.types.channel import Channel
from .base import BasePipelineHelper
from .interface import DEFAULT_CUSTOM_CONFIG, Resources
class LocalPipelineHelper(BasePipelineHelper, arbitrary_types_allowed=True):
model_push_destination: tfx.proto.PushDestination
def construct_trainer(
self,
*,
examples: Optional[Channel] = None,
transform_graph: Optional[Channel] = None,
schema: Optional[Channel] = None,
base_model: Optional[Channel] = None,
hyperparameters: Optional[Channel] = None,
run_fn: str,
train_args: Optional[tfx.proto.TrainArgs] = None,
eval_args: Optional[tfx.proto.EvalArgs] = None,
custom_config: Mapping[str, Any] = DEFAULT_CUSTOM_CONFIG,
) -> BaseComponent:
return tfx.components.Trainer(
examples=examples,
transform_graph=transform_graph,
schema=schema,
base_model=base_model,
hyperparameters=hyperparameters,
run_fn=run_fn,
train_args=train_args,
eval_args=eval_args,
custom_config=dict(custom_config),
)
def construct_tuner(
self,
*,
examples: Channel,
schema: Optional[Channel] = None,
transform_graph: Optional[Channel] = None,
base_model: Optional[Channel] = None,
tuner_fn: str,
train_args: Optional[tfx.proto.TrainArgs] = None,
eval_args: Optional[tfx.proto.EvalArgs] = None,
custom_config: Mapping[str, Any] = DEFAULT_CUSTOM_CONFIG,
) -> BaseComponent:
return tfx.components.Tuner(
examples=examples,
schema=schema,
transform_graph=transform_graph,
base_model=base_model,
tuner_fn=tuner_fn,
train_args=train_args,
eval_args=eval_args,
custom_config=dict(custom_config),
)
def construct_pusher(
self,
*,
model: Optional[Channel] = None,
model_blessing: Optional[Channel] = None,
infra_blessing: Optional[Channel] = None,
custom_config: Mapping[str, Any] = DEFAULT_CUSTOM_CONFIG,
) -> BaseComponent:
return tfx.components.Pusher(
model=model,
model_blessing=model_blessing,
infra_blessing=infra_blessing,
push_destination=self.model_push_destination,
custom_config=dict(custom_config),
)
def get_metadata_connection_config(self) -> metadata_store_pb2.ConnectionConfig:
metadata_path = os.path.join(
self.output_dir, "tfx_metadata", self.pipeline_name, "metadata.db"
)
logging.info("Pipeline will store metadata in %r", metadata_path)
return tfx.orchestration.metadata.sqlite_metadata_connection_config(
metadata_path
)
def create_and_run_pipeline(
self,
components: Iterable[BaseComponent],
enable_cache: bool = False,
) -> None:
logging.info(
"Creating local pipeline name=%r, root=%r, enable_cache=%r",
self.pipeline_name,
self.pipeline_root,
enable_cache,
)
metadata_connection_config = self.get_metadata_connection_config()
pipeline = tfx.dsl.Pipeline(
pipeline_name=self.pipeline_name,
pipeline_root=self.pipeline_root,
components=list(components),
enable_cache=enable_cache,
metadata_connection_config=metadata_connection_config,
)
logging.info("Runnig pipeline using local DAG runner")
tfx.orchestration.LocalDagRunner().run(pipeline)
logging.info("Pipeline run finished")
| 35.176991
| 84
| 0.646289
| 417
| 3,975
| 5.911271
| 0.232614
| 0.063286
| 0.084787
| 0.032454
| 0.333469
| 0.305071
| 0.292495
| 0.292495
| 0.219878
| 0.219878
| 0
| 0.001037
| 0.272453
| 3,975
| 112
| 85
| 35.491071
| 0.851314
| 0
| 0
| 0.372549
| 0
| 0
| 0.043522
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04902
| false
| 0
| 0.088235
| 0.029412
| 0.196078
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
113207da50dd87a4fba010e2037a5449d9f802b7
| 7,956
|
py
|
Python
|
flash_services/utils.py
|
textbook/flash_services
|
9422f48f62dd0cbef4ad5e593513de357496ed72
|
[
"0BSD"
] | 2
|
2016-05-05T20:09:45.000Z
|
2017-09-29T08:52:56.000Z
|
flash_services/utils.py
|
textbook/flash_services
|
9422f48f62dd0cbef4ad5e593513de357496ed72
|
[
"0BSD"
] | 27
|
2016-04-18T08:32:47.000Z
|
2021-11-25T11:05:15.000Z
|
flash_services/utils.py
|
textbook/flash_services
|
9422f48f62dd0cbef4ad5e593513de357496ed72
|
[
"0BSD"
] | null | null | null |
"""Useful utility functions for services."""
import logging
import re
from datetime import datetime, timezone
from inspect import Parameter, Signature
from dateutil.parser import parse
from humanize import naturaldelta, naturaltime
logger = logging.getLogger(__name__)
WORDS = {'1': 'one', '2': 'two', '3': 'three', '4': 'four', '5': 'five',
'6': 'six', '7': 'seven', '8': 'eight', '9': 'nine', '10': 'ten'}
NUMBERS = re.compile(r'\b([1-9]|10)\b')
class Outcome:
"""Possible outcomes for a CI build."""
WORKING = 'working'
PASSED = 'passed'
CANCELLED = 'cancelled'
FAILED = 'failed'
CRASHED = 'crashed'
def _numeric_words(text):
"""Replace numbers 1-10 with words.
Arguments:
text (:py:class:`str`): The text to replace numbers in.
Returns:
:py:class:`str`: The new text containing words.
"""
return NUMBERS.sub(lambda m: WORDS[m.group()], text)
def friendlier(func):
"""Replace numbers to make functions friendlier.
Arguments:
func: The function to wrap.
Returns:
A wrapper function applying :py:func:`_numeric_words`.
"""
def wrapper(*args, **kwargs):
"""Wrapper function to apply _numeric_words."""
result = func(*args, **kwargs)
try:
return _numeric_words(result)
except TypeError:
return result
return wrapper
naturaldelta = friendlier(naturaldelta) # pylint: disable=invalid-name
naturaltime = friendlier(naturaltime) # pylint: disable=invalid-name
def elapsed_time(start, end):
"""Calculate the elapsed time for a service activity.
Arguments:
start (:py:class:`str`): The activity start time.
end (:py:class:`str`): The activity end time.
Returns:
:py:class:`tuple`: The start and end times and humanized elapsed
time.
"""
start_time = safe_parse(start)
end_time = safe_parse(end)
if start_time is None or end_time is None:
logger.warning('failed to generate elapsed time')
text = 'elapsed time not available'
else:
text = 'took {}'.format(naturaldelta(parse(end) - parse(start)))
return to_utc_timestamp(start_time), to_utc_timestamp(end_time), text
def to_utc_timestamp(date_time):
"""Convert a naive or timezone-aware datetime to UTC timestamp.
Arguments:
date_time (:py:class:`datetime.datetime`): The datetime to
convert.
Returns:
:py:class:`int`: The timestamp (in seconds).
"""
if date_time is None:
return
if date_time.tzname() is None:
timestamp = date_time.replace(tzinfo=timezone.utc).timestamp()
else:
timestamp = date_time.timestamp()
return int(round(timestamp, 0))
def safe_parse(time):
"""Parse a string without throwing an error.
Arguments:
time (:py:class:`str`): The string to parse.
Returns:
:py:class:`datetime.datetime`: The parsed datetime.
"""
if time is None:
return
try:
return parse(time)
except (OverflowError, ValueError):
pass
def occurred(at_):
"""Calculate when a service event occurred.
Arguments:
at_ (:py:class:`str`): When the event occurred.
Returns:
:py:class:`str`: The humanized occurrence time.
"""
try:
occurred_at = parse(at_)
except (TypeError, ValueError):
logger.warning('failed to parse occurrence time %r', at_)
return 'time not available'
utc_now = datetime.now(tz=timezone.utc)
try:
return naturaltime((utc_now - occurred_at).total_seconds())
except TypeError: # at_ is a naive datetime
return naturaltime((datetime.now() - occurred_at).total_seconds())
def health_summary(builds):
"""Summarise the health of a project based on builds.
Arguments:
builds (:py:class:`list`): List of builds.
Returns:
:py:class:`str`: The health summary.
"""
for build in builds:
if build['outcome'] in {Outcome.PASSED}:
return 'ok'
elif build['outcome'] in {Outcome.CRASHED, Outcome.FAILED}:
return 'error'
else:
continue
return 'neutral'
def estimate_time(builds):
"""Update the working build with an estimated completion time.
Takes a simple average over the previous builds, using those
whose outcome is ``'passed'``.
Arguments:
builds (:py:class:`list`): All builds.
"""
try:
index, current = next(
(index, build) for index, build in enumerate(builds[:4])
if build['outcome'] == 'working'
)
except StopIteration:
return # no in-progress builds
if current.get('started_at') is None:
current['elapsed'] = 'estimate not available'
return
usable = [
current for current in builds[index + 1:]
if current['outcome'] == 'passed' and current['duration'] is not None
]
if not usable:
current['elapsed'] = 'estimate not available'
return
average_duration = int(sum(build['duration'] for build in usable) /
float(len(usable)))
finish = current['started_at'] + average_duration
remaining = (datetime.fromtimestamp(finish) -
datetime.now()).total_seconds()
if remaining >= 0:
current['elapsed'] = '{} left'.format(naturaldelta(remaining))
else:
current['elapsed'] = 'nearly done'
GITHUB_ISSUE = re.compile(r'''
(?: # one of:
fix(?:e(?:s|d))? # fix, fixes or fixed
| close(?:s|d)? # close, closes or closed
| resolve(?:s|d)? # resolve, resolves or resolved
)\s*(?:[^/]+/[^#]+)? # the account and repository name
\#\d+ # the issue number
''', re.IGNORECASE + re.VERBOSE)
"""Pattern for commit comment issue ID format, per `GitHub documentation`_.
.. _GitHub documentation: https://help.github.com/articles/closing-issues-via-commit-messages/
"""
TRACKER_STORY = re.compile(r'''
\[(?:
(?:
finish(?:e(?:s|d))? # finish, finishes or finished
| complete(?:s|d)? # complete, completes or completed
| fix(?:e(?:s|d))? # fix, fixes or fixed
)?
\s*\#\d+\s* # the story ID
)+\]
''', re.IGNORECASE + re.VERBOSE)
"""Pattern for commit hook story ID format, per `Tracker documentation`_.
.. _Tracker documentation: https://www.pivotaltracker.com/help/api/rest/v5#Source_Commits
"""
def remove_tags(commit_message):
"""Remove issue/tracker tags from a commit message.
Note:
Currently implemented for :py:class:`~.Tracker` and
:py:class:`~.GitHub` commit messages.
Arguments:
commit_message (:py:class:`str`): The commit message.
Returns:
:py:class:`str`: The message with tags removed.
"""
for remove in [GITHUB_ISSUE, TRACKER_STORY]:
commit_message = remove.sub('', commit_message)
return commit_message.strip()
def required_args(attrs):
"""Extract the required arguments from a class's attrs.
Arguments:
attrs (:py:class:`dict`) :The attributes of a class.
Returns:
:py:class:`set`: The required arguments.
"""
init_args = attr_args = set()
if '__init__' in attrs:
sig = Signature.from_callable(attrs['__init__'])
init_args = set(
name
for name, param in sig.parameters.items()
if param.kind == Parameter.KEYWORD_ONLY
and param.default is Signature.empty
)
if 'REQUIRED' in attrs:
attr_args = attrs['REQUIRED']
return set.union(attr_args, init_args)
def provided_args(attrs):
"""Extract the provided arguments from a class's attrs.
Arguments:
attrs (:py:class:`dict`) :The attributes of a class.
Returns:
:py:class:`set`: The provided arguments.
"""
return attrs.get('PROVIDED', set())
| 27.434483
| 94
| 0.616013
| 960
| 7,956
| 5.015625
| 0.28125
| 0.031983
| 0.020768
| 0.024299
| 0.135826
| 0.078505
| 0.06189
| 0.046521
| 0.037799
| 0.037799
| 0
| 0.003886
| 0.256159
| 7,956
| 289
| 95
| 27.529412
| 0.809733
| 0.289844
| 0
| 0.147059
| 0
| 0
| 0.205841
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.088235
| false
| 0.029412
| 0.044118
| 0
| 0.330882
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
11320bf37db22b6bcf70d050353d8c49a441efd2
| 1,084
|
py
|
Python
|
Python3/0407-Trapping-Rain-Water-II/soln-2.py
|
wyaadarsh/LeetCode-Solutions
|
3719f5cb059eefd66b83eb8ae990652f4b7fd124
|
[
"MIT"
] | 5
|
2020-07-24T17:48:59.000Z
|
2020-12-21T05:56:00.000Z
|
Python3/0407-Trapping-Rain-Water-II/soln-2.py
|
zhangyaqi1989/LeetCode-Solutions
|
2655a1ffc8678ad1de6c24295071308a18c5dc6e
|
[
"MIT"
] | null | null | null |
Python3/0407-Trapping-Rain-Water-II/soln-2.py
|
zhangyaqi1989/LeetCode-Solutions
|
2655a1ffc8678ad1de6c24295071308a18c5dc6e
|
[
"MIT"
] | 2
|
2020-07-24T17:49:01.000Z
|
2020-08-31T19:57:35.000Z
|
class Solution:
def trapRainWater(self, heightMap: List[List[int]]) -> int:
if not any(heightMap):
return 0
m, n = len(heightMap), len(heightMap[0])
pq = []
visited = set()
for j in range(n):
pq.append((heightMap[0][j], 0, j))
pq.append((heightMap[m - 1][j], m - 1, j))
visited.add((0, j))
visited.add((m - 1, j))
for i in range(1, m - 1):
pq.append((heightMap[i][0], i, 0))
pq.append((heightMap[i][n - 1], i, n - 1))
visited.add((i, 0))
visited.add((i, n - 1))
heapq.heapify(pq)
water = 0
while pq:
level, i, j = heapq.heappop(pq)
for ni, nj in (i - 1, j), (i + 1, j), (i, j - 1), (i, j + 1):
if 0 <= ni < m and 0 <= nj < n and (ni, nj) not in visited:
visited.add((ni, nj))
water += max(level - heightMap[ni][nj], 0)
heapq.heappush(pq, (max(level, heightMap[ni][nj]), ni, nj))
return water
| 38.714286
| 79
| 0.439114
| 154
| 1,084
| 3.090909
| 0.246753
| 0.05042
| 0.142857
| 0.07563
| 0.088235
| 0
| 0
| 0
| 0
| 0
| 0
| 0.036309
| 0.390221
| 1,084
| 27
| 80
| 40.148148
| 0.683812
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037037
| false
| 0
| 0
| 0
| 0.148148
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
11327c7421ed7b895a1170478e90b2ac25d66a3a
| 1,233
|
py
|
Python
|
d16.py
|
JasperGeurtz/aoc-2020
|
976b54016364e24fdf827b6e60edae82e9458277
|
[
"MIT"
] | 1
|
2021-01-03T12:08:39.000Z
|
2021-01-03T12:08:39.000Z
|
d16.py
|
JasperGeurtz/aoc-2020
|
976b54016364e24fdf827b6e60edae82e9458277
|
[
"MIT"
] | null | null | null |
d16.py
|
JasperGeurtz/aoc-2020
|
976b54016364e24fdf827b6e60edae82e9458277
|
[
"MIT"
] | null | null | null |
import utils
m = utils.opener.raw("input/16.txt")
rm, tm, om = m.split("\n\n")
rules = {}
for line in rm.split("\n"):
name, expr = line.split(": ")
rules[name] = [[int(q) for q in x.split("-")] for x in expr.split(" or ")]
myticket = [int(x) for x in tm.split("\n")[1].split(",")]
tickets = [[int(q) for q in x.split(",")] for x in tm.split("\n")[1:] + om.split("\n")[1:-1]]
s1 = 0
for t in tickets[:]:
for v in t:
if not any([r[0][0] <= v <= r[0][1] or r[1][0] <= v <= r[1][1] for r in rules.values()]):
s1 += v
tickets.remove(t)
print("1:", s1)
possible = {}
for rule in rules:
possible[rule] = set(range(len(myticket)))
for t in tickets:
for i, v in enumerate(t):
for rname, r in rules.items():
if not (r[0][0] <= v <= r[0][1] or r[1][0] <= v <= r[1][1]):
if i in possible[rname]:
possible[rname].remove(i)
found = {}
while possible:
k, v = min(possible.items(), key=lambda item: item[1])
found[k] = list(v)[0]
del possible[k]
for val in possible.values():
val.remove(found[k])
s2 = 1
for k, v in found.items():
if k.startswith("departure"):
s2 *= myticket[v]
print("2:", s2)
| 25.6875
| 97
| 0.518248
| 215
| 1,233
| 2.972093
| 0.27907
| 0.046948
| 0.018779
| 0.025039
| 0.206573
| 0.156495
| 0.156495
| 0.118936
| 0.118936
| 0.118936
| 0
| 0.037322
| 0.261152
| 1,233
| 47
| 98
| 26.234043
| 0.664105
| 0
| 0
| 0
| 0
| 0
| 0.037307
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.027027
| 0
| 0.027027
| 0.054054
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
11331886bdb42648eba47c6e484600231ff9a470
| 4,931
|
py
|
Python
|
run_portfolio.py
|
drewvolpe/vc_modeling
|
5ba33e41e3c1ffad212d1a0a1abb585b2c384221
|
[
"MIT"
] | 1
|
2020-07-12T09:16:37.000Z
|
2020-07-12T09:16:37.000Z
|
run_portfolio.py
|
drewvolpe/vc_modeling
|
5ba33e41e3c1ffad212d1a0a1abb585b2c384221
|
[
"MIT"
] | null | null | null |
run_portfolio.py
|
drewvolpe/vc_modeling
|
5ba33e41e3c1ffad212d1a0a1abb585b2c384221
|
[
"MIT"
] | null | null | null |
from collections import Counter
import random
import math
###
# Parameters of assumptions
###
# How many initial investments and avg check size
num_seed_rounds = 50
invested_per_seed_round = 0.5
# Probabilities of different outcomes (prob, outcome multiple)
outcome_probs_seed = [ [0.01, 100], # N% chance of Mx return
[0.03, 20],
[0.03, 10],
[0.03, 6],
[0.25, 1],
[0.65, 0]]
follow_on_pct = 0.5 # % of deals in which fund invests in next round
invested_per_follow_on = 1.0 # avg size of follow-on investment
outcome_probs_follow = [ [0.02, 30],
[0.06, 15],
[0.06, 8],
[0.06, 4],
[0.30, 1],
[0.50, 0]]
# number of simulated portfolios to generate
num_simulations = 10000
# constants
fund_size = (num_seed_rounds * invested_per_seed_round) +\
(num_seed_rounds * follow_on_pct * invested_per_follow_on)
###
# Classes
###
class Investment:
def __init__(self, amt_in, outcome, is_seed=True):
self.is_seed = is_seed
self.amt_in = amt_in
self.outcome = outcome
@property
def amt_out(self):
return (self.outcome * self.amt_in)
class Portfolio:
def __init__(self, investments):
self.investments = investments
@property
def total_invested(self):
return sum([i.amt_in for i in self.investments])
@property
def total_returned(self):
return sum([i.amt_out for i in self.investments])
@property
def return_multiple(self):
return ((self.total_returned*1.0) / self.total_invested)
def __str__(self):
l = ['invested: %s' % self.total_invested,
'returned: %s' % self.total_returned,
'return_multiple %s' % self.return_multiple,
'num_deals_total %s' % len(self.investments),
'num_deals_seed %s' % len([i for i in self.investments if i.is_seed]),
'num_deals_follow %s' % len([i for i in self.investments if not i.is_seed]),
]
return '%s' % l
###
# Funcs
##
def validate_params():
if (sum([x[0] for x in outcome_probs_seed]) != 1.0):
raise Exception("Seed probabilities don't add to 1! ")
if (sum([x[0] for x in outcome_probs_follow]) != 1.0):
raise Exception("Follow on probabilities don't add to 1! ")
def create_portfolio():
investments = []
# Seed rounds
for i in range(0, num_seed_rounds):
r = random.random()
prob_sum = 0
for (cur_prob, cur_outcome) in outcome_probs_seed:
prob_sum += cur_prob
if (r <= prob_sum):
investments.append(Investment(invested_per_seed_round, cur_outcome))
break
# Follow on
for i in range(0, num_seed_rounds):
if (random.random() > follow_on_pct):
continue # did not follow on
r = random.random() # for now, make them uncorrelated
prob_sum = 0
for (cur_prob, cur_outcome) in outcome_probs_follow:
prob_sum += cur_prob
if (r <= prob_sum):
investments.append(Investment(invested_per_follow_on, cur_outcome, is_seed=False))
return Portfolio(investments)
def run_simulations(num_iters):
portfolios = []
for i in range(0, num_iters):
cur_portfolio = create_portfolio()
portfolios.append(cur_portfolio)
# print a few, for debugging
print('Sample portfolios:')
for p in portfolios[0:10]:
print(' P: %s' % p)
print('# of portfolios with different multiple returns')
returns_counter = Counter([math.floor(p.return_multiple) for p in portfolios])
for (ret, cnt) in sorted(returns_counter.items()):
pct = 100 * ((cnt*1.0) / num_iters)
print(' %sx - %s (%0.0f%%)' % (ret, cnt, pct))
print('# of portfolios with different multiple returns (to 0.1x)')
returns_counter = Counter([round(p.return_multiple,1) for p in portfolios])
cum_pct = 0
for (ret, cnt) in sorted(returns_counter.items()):
pct = 100 * ((cnt*1.0) / num_iters)
cum_pct += pct
stars = '*' * int(pct*10)
print(' %sx - %s (%0.0f%%) (%0.0f%%) %s' % (ret, cnt, pct, cum_pct, stars))
###
# main()
###
if __name__ == "__main__":
# for dev
# random.seed(31331)
print('starting...')
print('validating params...')
validate_params()
print('Parameters')
print(' $%0.0fm fund which makes %s $%sm seed investments.' %\
( fund_size, num_seed_rounds, invested_per_seed_round))
print(' Follows on with $%sm, %s of the time.' % (invested_per_follow_on, follow_on_pct))
print('')
print('Running portfolio simluation...')
run_simulations(num_simulations)
print('done.')
| 28.33908
| 98
| 0.589536
| 652
| 4,931
| 4.239264
| 0.230061
| 0.034732
| 0.015195
| 0.028944
| 0.299204
| 0.278944
| 0.256874
| 0.201158
| 0.183068
| 0.115051
| 0
| 0.032553
| 0.289799
| 4,931
| 173
| 99
| 28.50289
| 0.75671
| 0.089231
| 0
| 0.165138
| 0
| 0
| 0.12098
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.091743
| false
| 0
| 0.027523
| 0.036697
| 0.192661
| 0.12844
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
11344bfdd8f3f077e971333f0359d4844c75765b
| 611
|
py
|
Python
|
tests/__init__.py
|
rhit-goldmate/lab-1
|
4f9f606f24c783495a246c13bde1f24a44bcf247
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
rhit-goldmate/lab-1
|
4f9f606f24c783495a246c13bde1f24a44bcf247
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
rhit-goldmate/lab-1
|
4f9f606f24c783495a246c13bde1f24a44bcf247
|
[
"MIT"
] | 1
|
2021-09-13T14:47:48.000Z
|
2021-09-13T14:47:48.000Z
|
import os
from flask import Blueprint, Flask
def create_app(opts = {}):
app = Flask(__name__)
# We will learn how to store our secrets properly in a few short weeks.
# In the meantime, we'll use this:
app.config['SECRET_KEY'] = os.getenv('SECRET_KEY') or "Don't ever store secrets in your actual code"
# For local debugging purposes. Not ideal for production environements:
app.config['TEMPLATES_AUTO_RELOAD'] = True
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
from .simple_photo_processor import spp as spp_blueprint
app.register_blueprint(spp_blueprint)
return app
| 35.941176
| 104
| 0.728314
| 92
| 611
| 4.641304
| 0.706522
| 0.063232
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002024
| 0.191489
| 611
| 17
| 105
| 35.941176
| 0.862348
| 0.283142
| 0
| 0
| 0
| 0
| 0.252874
| 0.105747
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.3
| 0
| 0.5
| 0.3
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
113a13cfc94224ffc2876a0d52f150f295d86f1c
| 20,820
|
py
|
Python
|
jscodestyle/main.py
|
zeth/jscodestyle
|
43c98de7b544bf2203b23792677a7cefb5daf1d9
|
[
"Apache-2.0"
] | null | null | null |
jscodestyle/main.py
|
zeth/jscodestyle
|
43c98de7b544bf2203b23792677a7cefb5daf1d9
|
[
"Apache-2.0"
] | null | null | null |
jscodestyle/main.py
|
zeth/jscodestyle
|
43c98de7b544bf2203b23792677a7cefb5daf1d9
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2018 The JsCodeStyle Authors.
# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Checks JavaScript files for common style guide violations.
gjslint.py is designed to be used as a PRESUBMIT script to check for javascript
style guide violations. As of now, it checks for the following violations:
* Missing and extra spaces
* Lines longer than 80 characters
* Missing newline at end of file
* Missing semicolon after function declaration
* Valid JsDoc including parameter matching
Someday it will validate to the best of its ability against the entirety of the
JavaScript style guide.
This file is a front end that parses arguments and flags. The core of the code
is in tokenizer.py and checker.py.
"""
from __future__ import print_function
import argparse
import sys
import time
import os
import glob
import re
import multiprocessing
import errno
from itertools import tee
from functools import partial
from jscodestyle.errorrecord import check_path, fix_path
from jscodestyle.error_check import STRICT_DOC, JSLINT_ERROR_DOC
from jscodestyle.error_fixer import ErrorFixer
GJSLINT_ONLY_FLAGS = ['--unix_mode', '--beep', '--nobeep', '--time',
'--check_html', '--summary', '--quiet']
# Comment - Below are all the arguments from gjslint. There are way
# too many, we should think what is really useful and cull some.
# Perhaps we should rely more on a config file for advance setups
class JsCodeStyle(object):
"""This class is a front end that parses arguments and flags."""
def __init__(self):
parser = argparse.ArgumentParser()
parser.add_argument(
'paths',
help='the files to check',
type=str,
nargs='*',
default=sys.stdin)
parser.add_argument(
'-u', '--unix_mode',
help='emit warnings in standard unix format e.g. for Emacs',
action='store_true')
parser.add_argument(
'-b', '--beep',
help='do not beep when errors are found',
action='store_false')
parser.add_argument(
'-t', '--time',
help='emit timing statistics',
action='store_true')
parser.add_argument(
'-c', '--check_html',
help='check javascript in html files',
action='store_true')
parser.add_argument(
'-s', '--summary',
help='show an error count summary',
action='store_true')
parser.add_argument(
'-q', '--quiet',
help=('minimize logged messages. '
'Most useful for per-file linting, such as that '
'performed by the presubmit linter service.'),
action='store_true')
parser.add_argument(
'-p', '--singleprocess',
help=('disable parallelised linting using the '
'multiprocessing module; this may make debugging easier.'),
action='store_true')
parser.add_argument(
'-a', '--additional_extensions',
help=('Additional file extensions (not js) that should '
'be treated as JavaScript files e.g. es, es6 or ts.'),
metavar='ext',
nargs='+')
parser.add_argument(
'-r', '--recurse',
help=('recurse in to the subdirectories of the given path'),
action='append',
nargs='+',
metavar='dir')
parser.add_argument(
'-e', '--exclude_directories',
help=('exclude the specified directories '
'(only applicable along with -r'),
type=str,
action='append',
nargs='+',
metavar='dir')
parser.add_argument(
'-x', '--exclude_files',
type=str,
nargs='*',
help='exclude the specified files',
action='append',
metavar='file')
parser.add_argument(
'--limited_doc_files',
help=('List of files with relaxed documentation checks. Will not '
'report errors for missing documentation, some missing '
'descriptions, or methods whose @return tags don\'t have a '
'matching return statement.'),
action='append',
nargs='*',
metavar="filename")
parser.add_argument(
'--error_trace',
help='show error exceptions.',
action='store_true')
parser.add_argument(
'--closurized_namespaces',
help=('namespace prefixes, used for testing of'
'goog.provide/require'),
action='append',
nargs='*',
metavar="prefix")
parser.add_argument(
'--ignored_extra_namespaces',
help=('Fully qualified namespaces that should be not be reported '
'as extra by the linter.'),
action='append',
nargs='*',
metavar="namespace")
parser.add_argument(
'--custom_jsdoc_tags',
help=('extra jsdoc tags to allow'),
action='append',
nargs='*',
metavar="tagname")
parser.add_argument(
'--dot_on_next_line',
help=('Require dots to be'
'placed on the next line for wrapped expressions'),
action='store_true')
parser.add_argument(
'--check_trailing_comma',
help=('check trailing commas '
'(ES3, not needed from ES5 onwards)'),
action='store_true')
parser.add_argument(
'--debug_indentation',
help='print debugging information for indentation',
action='store_true')
# Comment - watch this one, backwards internally than before
parser.add_argument(
'--jsdoc',
help='disable reporting errors for missing JsDoc.',
action='store_true')
# Comment - this should change to named errors
parser.add_argument(
'--disable',
help=('Disable specific error. Usage Ex.: gjslint --disable 1 '
'0011 foo.js.'),
action='append',
nargs='*',
metavar='error_num')
# Comment - old version checked for minimum of N=1,
# so maybe check for negative later
parser.add_argument(
'--max_line_length',
type=int,
help=('Maximum line length allowed '
'without warning (default 80).'),
metavar='N',
default=80)
parser.add_argument(
'--strict',
help=STRICT_DOC,
action='store_true')
parser.add_argument(
'--jslint_error',
help=JSLINT_ERROR_DOC,
action='append',
nargs='+')
parser.add_argument(
'--dry_run',
help='(fixjscodestyle) do not modify the file, only print it.',
action='store_true')
parser.add_argument(
'--disable_indentation_fixing',
help='(fixjscodestyle) disable automatic fixing of indentation.',
action='store_true')
parser.add_argument(
'--fix_error_codes',
help=('(fixjscodestyle) list of specific error codes to '
'fix. Defaults to all supported error codes when empty. '
'See errors.py for a list of error codes.'),
action='append',
nargs='+',
metavar='error_num')
self.args = parser.parse_args()
# Emacs sets the environment variable INSIDE_EMACS in the subshell.
# Request Unix mode as emacs will expect output to be in Unix format
# for integration.
# See https://www.gnu.org/software/emacs/manual/html_node/emacs/
# Interactive-Shell.html
if 'INSIDE_EMACS' in os.environ:
self.args.unix_mode = True
self.suffixes = ['.js']
if self.args.additional_extensions:
self.suffixes += ['.%s' % ext for ext in self.args.additional_extensions]
if self.args.check_html:
self.suffixes += ['.html', '.htm']
self.paths = None
self._get_paths()
self.start_time = time.time()
def matches_suffixes(self, filename):
"""Returns whether the given filename matches one of the given suffixes.
Args:
filename: Filename to check.
Returns:
Whether the given filename matches one of the given suffixes.
"""
suffix = filename[filename.rfind('.'):]
return suffix in self.suffixes
def get_user_specified_files(self):
"""Returns files to be linted, specified directly on the command line.
Can handle the '*' wildcard in filenames, but no other wildcards.
Args:
argv: Sequence of command line arguments. The second and following arguments
are assumed to be files that should be linted.
suffixes: Expected suffixes for the file type being checked.
Returns:
A sequence of files to be linted.
"""
all_files = []
lint_files = []
# Perform any necessary globs.
for filename in self.args.paths:
if filename.find('*') != -1:
for result in glob.glob(filename):
all_files.append(result)
else:
all_files.append(filename)
for filename in all_files:
if self.matches_suffixes(filename):
lint_files.append(filename)
return lint_files
def get_recursive_files(self):
"""Returns files to be checked specified by the --recurse flag.
Returns:
A list of files to be checked.
"""
lint_files = []
# Perform any request recursion
if self.args.recurse:
for start in self.args.recurse:
for root, _, files in os.walk(start):
for filename in files:
if self.matches_suffixes(filename):
lint_files.append(os.path.join(root, filename))
return lint_files
def filter_files(self, files):
"""Filters the list of files to be linted be removing any excluded files.
Filters out files excluded using --exclude_files and --exclude_directories.
Args:
files: Sequence of files that needs filtering.
Returns:
Filtered list of files to be linted.
"""
num_files = len(files)
ignore_dirs_regexs = []
excluded_dirs = (self.args.exclude_directories if
self.args.exclude_directories else [])
excluded_files = (self.args.exclude_files if
self.args.exclude_files else [])
for ignore in excluded_dirs:
ignore_dirs_regexs.append(re.compile(r'(^|[\\/])%s[\\/]' % ignore))
result_files = []
for filename in files:
add_file = True
for exclude in excluded_files:
if filename.endswith('/' + exclude) or filename == exclude:
add_file = False
break
for ignore in ignore_dirs_regexs:
if ignore.search(filename):
# Break out of ignore loop so we don't add to
# filtered files.
break
if add_file:
# Convert everything to absolute paths so we can easily remove duplicates
# using a set.
result_files.append(os.path.abspath(filename))
skipped = num_files - len(result_files)
if skipped:
print('Skipping %d file(s).' % skipped)
self.paths = set(result_files)
def _get_paths(self):
"""Finds all files specified by the user on the commandline."""
files = self.get_user_specified_files()
if self.args.recurse:
files += self.get_recursive_files()
self.filter_files(files)
def _multiprocess_check_paths(self, check_fn):
"""Run _check_path over mutltiple processes.
Tokenization, passes, and checks are expensive operations. Running in a
single process, they can only run on one CPU/core. Instead,
shard out linting over all CPUs with multiprocessing to parallelize.
Args:
paths: paths to check.
Yields:
errorrecord.ErrorRecords for any found errors.
"""
pool = multiprocessing.Pool()
path_results = pool.imap(check_fn, self.paths)
for results in path_results:
for result in results:
yield result
# Force destruct before returning, as this can sometimes raise spurious
# "interrupted system call" (EINTR), which we can ignore.
try:
pool.close()
pool.join()
del pool
except OSError as err:
if err.errno is not errno.EINTR:
raise err
def _check_paths(self, check_fn):
"""Run _check_path on all paths in one thread.
Args:
paths: paths to check.
Yields:
errorrecord.ErrorRecords for any found errors.
"""
for path in self.paths:
results = check_fn(path)
for record in results:
yield record
def _print_file_summary(self, records):
"""Print a detailed summary of the number of errors in each file."""
paths = list(self.paths)
paths.sort()
for path in paths:
path_errors = [e for e in records if e.path == path]
print('%s: %d' % (path, len(path_errors)))
@staticmethod
def _print_file_separator(path):
print('----- FILE : %s -----' % path)
def _print_error_records(self, error_records):
"""Print error records strings in the expected format."""
current_path = None
for record in error_records:
if current_path != record.path:
current_path = record.path
if not self.args.unix_mode:
self._print_file_separator(current_path)
print(record.error_string)
def _print_summary(self, paths, error_records):
"""Print a summary of the number of errors and files."""
error_count = len(error_records)
all_paths = set(paths)
all_paths_count = len(all_paths)
if error_count is 0:
print ('%d files checked, no errors found.' % all_paths_count)
new_error_count = len([e for e in error_records if e.new_error])
error_paths = set([e.path for e in error_records])
error_paths_count = len(error_paths)
no_error_paths_count = all_paths_count - error_paths_count
if (error_count or new_error_count) and not self.args.quiet:
error_noun = 'error' if error_count == 1 else 'errors'
new_error_noun = 'error' if new_error_count == 1 else 'errors'
error_file_noun = 'file' if error_paths_count == 1 else 'files'
ok_file_noun = 'file' if no_error_paths_count == 1 else 'files'
print('Found %d %s, including %d new %s, in %d %s (%d %s OK).' %
(error_count,
error_noun,
new_error_count,
new_error_noun,
error_paths_count,
error_file_noun,
no_error_paths_count,
ok_file_noun))
@staticmethod
def _format_time(duration):
"""Formats a duration as a human-readable string.
Args:
duration: A duration in seconds.
Returns:
A formatted duration string.
"""
if duration < 1:
return '%dms' % round(duration * 1000)
return '%.2fs' % duration
def check(self):
"""Check the JavaScript files for style."""
check_path_p = partial(
check_path,
unix_mode=self.args.unix_mode,
limited_doc_files=self.args.limited_doc_files,
error_trace=self.args.error_trace,
closurized_namespaces=self.args.closurized_namespaces,
ignored_extra_namespaces=self.args.ignored_extra_namespaces,
custom_jsdoc_tags=self.args.custom_jsdoc_tags,
dot_on_next_line=self.args.dot_on_next_line,
check_trailing_comma=self.args.check_trailing_comma,
debug_indentation=self.args.debug_indentation,
jslint_error = self.args.jslint_error,
strict = self.args.strict,
jsdoc=self.args.jsdoc,
disable=self.args.disable,
max_line_length=self.args.max_line_length)
if self.args.singleprocess:
records_iter = self._check_paths(check_path_p)
else:
records_iter = self._multiprocess_check_paths(check_path_p)
records_iter, records_iter_copy = tee(records_iter, 2)
self._print_error_records(records_iter_copy)
error_records = list(records_iter)
self._print_summary(self.paths, error_records)
exit_code = 0
# If there are any errors
if error_records:
exit_code += 1
# If there are any new errors
if [r for r in error_records if r.new_error]:
exit_code += 2
if exit_code:
if self.args.summary:
self._print_file_summary(error_records)
if self.args.beep:
# Make a beep noise.
sys.stdout.write(chr(7))
# Write out instructions for using fixjsstyle script to fix some of the
# reported errors.
fix_args = []
for flag in sys.argv[1:]:
for go_flag in GJSLINT_ONLY_FLAGS:
if flag.startswith(go_flag):
break
else:
fix_args.append(flag)
if not self.args.quiet:
print("""
Some of the errors reported by GJsLint may be auto-fixable using the
command fixjsstyle. Please double check any changes it makes and report
any bugs. The command can be run by executing:
fixjsstyle %s """ % ' '.join(fix_args))
if self.args.time:
print ('Done in %s.' % self._format_time(time.time() -
self.start_time))
sys.exit(exit_code)
def fix(self):
"""Fix the code style of the JavaScript files."""
fixer = ErrorFixer(
dry_run=self.args.dry_run,
disable_indentation_fixing=self.args.disable_indentation_fixing,
fix_error_codes=self.args.fix_error_codes)
# Check the list of files.
for path in self.paths:
fix_path(
path,
fixer,
None,
limited_doc_files=self.args.limited_doc_files,
error_trace=self.args.error_trace,
closurized_namespaces=self.args.closurized_namespaces,
ignored_extra_namespaces=self.args.ignored_extra_namespaces,
custom_jsdoc_tags=self.args.custom_jsdoc_tags,
dot_on_next_line=self.args.dot_on_next_line,
check_trailing_comma=self.args.check_trailing_comma,
debug_indentation=self.args.debug_indentation,
jslint_error = self.args.jslint_error,
strict = self.args.strict,
jsdoc=self.args.jsdoc,
disable=self.args.disable,
max_line_length=self.args.max_line_length)
def fix():
"""Automatically fix simple style guide violations."""
style_checker = JsCodeStyle()
style_checker.fix()
def main():
"""Used when called as a command line script."""
style_checker = JsCodeStyle()
style_checker.check()
if __name__ == '__main__':
main()
| 33.365385
| 89
| 0.579443
| 2,404
| 20,820
| 4.851082
| 0.219218
| 0.034299
| 0.040816
| 0.021609
| 0.213171
| 0.18676
| 0.12605
| 0.12605
| 0.112845
| 0.097925
| 0
| 0.003245
| 0.333958
| 20,820
| 623
| 90
| 33.418941
| 0.837744
| 0.210711
| 0
| 0.30829
| 0
| 0
| 0.178934
| 0.00895
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044041
| false
| 0
| 0.036269
| 0
| 0.095855
| 0.049223
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1140d660290898ce8ff771db41de2f9db2a0fbed
| 350
|
py
|
Python
|
tests/test_helpers.py
|
jlmcgehee21/disterminal
|
0517483960459d81f2f7361e53c91bd12c12130b
|
[
"MIT"
] | 10
|
2018-03-25T19:14:21.000Z
|
2018-05-20T04:04:27.000Z
|
tests/test_helpers.py
|
jlmcgehee21/disterminal
|
0517483960459d81f2f7361e53c91bd12c12130b
|
[
"MIT"
] | 1
|
2018-04-06T17:33:45.000Z
|
2018-04-06T17:33:45.000Z
|
tests/test_helpers.py
|
jlmcgehee21/disterminal
|
0517483960459d81f2f7361e53c91bd12c12130b
|
[
"MIT"
] | null | null | null |
import pytest
from disterminal import helpers
import numpy as np
def main_call(x):
out = np.zeros(x.shape)
out[1] = 0.1
out[-1] = 0.1
return out
def test_autorange():
x = helpers.autorange(main_call, '')
assert x.shape == (100,)
assert x.min() == pytest.approx(-9999.95)
assert x.max() == pytest.approx(9999.95)
| 17.5
| 45
| 0.625714
| 55
| 350
| 3.927273
| 0.490909
| 0.097222
| 0.046296
| 0.055556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.077491
| 0.225714
| 350
| 19
| 46
| 18.421053
| 0.719557
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.230769
| 1
| 0.153846
| false
| 0
| 0.230769
| 0
| 0.461538
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
114113c2327e984853bcfe3d2bdb8fbe4a9538bc
| 4,149
|
py
|
Python
|
tests/test_lookups.py
|
gluk-w/python-tuple-lookup
|
b0c44bb8fb9c94925c97b54b02ffc8abeb570914
|
[
"MIT"
] | null | null | null |
tests/test_lookups.py
|
gluk-w/python-tuple-lookup
|
b0c44bb8fb9c94925c97b54b02ffc8abeb570914
|
[
"MIT"
] | null | null | null |
tests/test_lookups.py
|
gluk-w/python-tuple-lookup
|
b0c44bb8fb9c94925c97b54b02ffc8abeb570914
|
[
"MIT"
] | null | null | null |
import pytest
from listlookup import ListLookup
sample_list = [
{"id": 1, "country": "us", "name": "Atlanta"},
{"id": 2, "country": "us", "name": "Miami"},
{"id": 3, "country": "uk", "name": "Britain"},
{"id": 5, "country": "uk", "name": "Bermingham"},
{"id": 4, "country": "ca", "name": "Barrie"},
]
def test_lookups():
cities = ListLookup(sample_list)
cities.index("id", lambda d: d['id'], unique=True)
cities.index("country", lambda d: d['country'])
assert list(cities.lookup(id=1, preserve_order=True)) == [
{"id": 1, "country": "us", "name": "Atlanta"}
]
assert list(cities.lookup(id=1, country="us", preserve_order=False)) == [
{"id": 1, "country": "us", "name": "Atlanta"}
]
assert list(cities.lookup(country="us", preserve_order=True)) == [
{"id": 1, "country": "us", "name": "Atlanta"},
{"id": 2, "country": "us", "name": "Miami"}
]
assert list(cities.lookup(id=2, country="uk")) == []
def test_callable_lookup():
cities = ListLookup(sample_list)
cities.index('country', lambda d: d['country'])
cities.index('name', lambda d: d['name'])
callback_call_count = 0
def lookup_starts_with(v):
nonlocal callback_call_count
callback_call_count += 1
return v.startswith('B')
result = list(cities.lookup(country='uk', name=lookup_starts_with))
assert len(result) == 2
assert result[0]['name'].startswith('B')
assert result[1]['name'].startswith('B')
assert callback_call_count == 2
def test_index_multiple():
cities = ListLookup(sample_list)
cities.index('country', lambda d: [d['country'], "%s_a" % d['country'], "%s_b" % d['country']], multiple=True)
cities.index('name', lambda d: [d['name'], "1%s" % d['name'], "2%s" % d['name']], multiple=True)
result = list(cities.lookup(country='uk', name=lambda term: term.startswith('B')))
assert len(result) == 2
assert result[0]['name'].startswith('B')
assert result[1]['name'].startswith('B')
result = list(cities.lookup(country='uk_a', name="2Bermingham"))
assert len(result) == 1
assert result[0]['country'] == 'uk'
assert result[0]['name'] == 'Bermingham'
def test_unique_index_multiple():
cities = ListLookup(sample_list)
cities.index("id", lambda d: [d['id'], d['id'] * 10], unique=True, multiple=True)
assert list(cities.lookup(id=1, preserve_order=True)) == [
{"id": 1, "country": "us", "name": "Atlanta"}
]
assert list(cities.lookup(id=10, preserve_order=True)) == [
{"id": 1, "country": "us", "name": "Atlanta"}
]
assert list(cities.lookup(id=2, preserve_order=True))[0]['id'] != 1
def test_lookup_terminated():
cities = ListLookup(sample_list)
cities.index("id", lambda d: d['id'])
cities.index("country", lambda d: d['country'])
result = list(cities.lookup(id=2, country="xx"))
assert len(result) == 0
def test_lookup_nothing_found():
cities = ListLookup(sample_list)
cities.index("id", lambda d: d['id'])
cities.index("country", lambda d: d['country'])
cities.index("name", lambda d: d['name'])
result = list(cities.lookup(country="xx"))
assert len(result) == 0
result = list(cities.lookup(country='us', name='DC'))
assert len(result) == 0
def test_lookup_does_not_modify_indexes():
"""
There was a bug that modified index after lookup
"""
cities = ListLookup(sample_list)
cities.index("country", lambda d: d['country'])
cities.index("name", lambda d: d['name'])
result = list(cities.lookup(country='us', name='Miami'))
assert len(result) == 1
second_res = list(cities.lookup(country='us', name='Atlanta'))
assert len(second_res) == 1
def test_validation():
cities = ListLookup(sample_list)
cities.index("country", lambda d: d['country'])
with pytest.raises(ValueError):
cities.index("country", lambda d: d['name'])
with pytest.raises(ValueError):
cities.index("preserve_order", lambda d: d['name'])
with pytest.raises(ValueError):
list(cities.lookup(dummy_index='us'))
| 31.195489
| 114
| 0.614124
| 547
| 4,149
| 4.561243
| 0.140768
| 0.096192
| 0.054509
| 0.083367
| 0.703407
| 0.69018
| 0.624449
| 0.54509
| 0.47495
| 0.468136
| 0
| 0.012767
| 0.188238
| 4,149
| 133
| 115
| 31.195489
| 0.728029
| 0.011569
| 0
| 0.423913
| 0
| 0
| 0.134851
| 0
| 0
| 0
| 0
| 0
| 0.23913
| 1
| 0.097826
| false
| 0
| 0.021739
| 0
| 0.130435
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1143cbb13d91eca82341ad8a60ceba57b21e31ee
| 13,697
|
py
|
Python
|
ImagePipeline_utils.py
|
titsitits/image-restoration
|
7434917c8e14c9c78cd1a9aa06ff1a058368543b
|
[
"Apache-2.0"
] | 18
|
2019-07-24T15:58:11.000Z
|
2022-02-16T04:14:15.000Z
|
ImagePipeline_utils.py
|
titsitits/image-restoration
|
7434917c8e14c9c78cd1a9aa06ff1a058368543b
|
[
"Apache-2.0"
] | 2
|
2020-09-15T10:26:31.000Z
|
2021-02-23T16:52:50.000Z
|
ImagePipeline_utils.py
|
titsitits/image-restoration
|
7434917c8e14c9c78cd1a9aa06ff1a058368543b
|
[
"Apache-2.0"
] | 7
|
2019-10-01T07:28:58.000Z
|
2022-01-08T12:45:01.000Z
|
import time
import numpy as np
import os, sys, shutil
from contextlib import contextmanager
from numba import cuda as ncuda
import PIL
from PIL import Image, ImageFilter, ImageDraw, ImageFont
import cv2
import contextlib
from copy import deepcopy
import subprocess
from glob import glob
from os import path as osp
from os import path
utilspath = os.path.join(os.getcwd(), 'utils/')
@contextmanager
def timing(description: str) -> None:
start = time.time()
yield
elapsed_time = time.time() - start
print( description + ': finished in ' + f"{elapsed_time:.4f}" + ' s' )
class Quiet:
def __init__(self):
#Store initial stdout in this variable
self._stdout = sys.stdout
def __del__(self):
sys.stdout = self._stdout
@contextmanager
def suppress_stdout(self, raising = False):
with open(os.devnull, "w") as devnull:
error_raised = False
error = "there was an error"
sys.stdout = devnull
try:
yield
except Exception as e:
error_raised = True
error = e
sys.stdout = self._stdout
print(e)
finally:
finished = True
sys.stdout = self._stdout
sys.stdout = self._stdout
if error_raised:
if raising:
raise(error)
else:
print(error)
#Mute stdout inside this context
@contextmanager
def quiet_and_timeit(self, description = "Process running", raising = False, quiet = True):
print(description+"...", end = '')
start = time.time()
try:
if quiet:
#with suppress_stdout(raising):
sys.stdout = open(os.devnull, "w")
yield
if quiet:
sys.stdout = self._stdout
except Exception as e:
if quiet:
sys.stdout = self._stdout
if raising:
sys.stdout = self._stdout
raise(e)
else:
sys.stdout = self._stdout
print(e)
elapsed_time = time.time() - start
sys.stdout = self._stdout
print(': finished in ' + f"{elapsed_time:.4f}" + ' s' )
#Force printing in stdout, regardless of the context (such as the one defined above)
def force_print(self, value):
prev_stdout = sys.stdout
sys.stdout = self._stdout
print(value)
sys.stdout = prev_stdout
def duplicatedir(src,dst):
if not os.path.exists(src):
print('ImagePipeline_utils. duplicatedir: Source directory does not exists!')
return
if src != dst:
if os.path.exists(dst):
shutil.rmtree(dst)
shutil.copytree(src=src,dst=dst)
def createdir_ifnotexists(directory):
#create directory, recursively if needed, and do nothing if directory already exists
os.makedirs(directory, exist_ok=True)
def initdir(directory):
if os.path.exists(directory):
shutil.rmtree(directory)
os.makedirs(directory)
def to_RGB(image):
return image.convert('RGB')
def to_grayscale(image):
return image.convert('L')
def split_RGB_images(input_dir):
imname = '*'
orignames = glob(os.path.join(input_dir, imname))
for orig in orignames:
try:
im = Image.open(orig)
#remove alpha component
im = to_RGB(im)
#split channels
r, g, b = Image.Image.split(im)
r = to_RGB(r)
g = to_RGB(g)
b = to_RGB(b)
#save as png (and remove previous version)
f, e = os.path.splitext(orig)
os.remove(orig)
r.save(f+"_red.png")
g.save(f+"_green.png")
b.save(f+"_blue.png")
except Exception as e:
print(e)
def unsplit_RGB_images(input_dir):
imname = '*_red.png'
orignames = glob(os.path.join(input_dir, imname))
for orig in orignames:
try:
substring = orig[:-8]
r = to_grayscale(Image.open(substring+'_red.png'))
g = to_grayscale(Image.open(substring+'_green.png'))
b = to_grayscale(Image.open(substring+'_blue.png'))
im = Image.merge('RGB', (r,g,b) )
#save as png (and remove monochannel images)
os.remove(substring+'_red.png')
os.remove(substring+'_green.png')
os.remove(substring+'_blue.png')
im.save(substring+".png")
except Exception as e:
print(e)
def preprocess(input_dir, gray = True, resize = True, size = (1000,1000)):
imname = '*'
orignames = glob(os.path.join(input_dir, imname))
for orig in orignames:
try:
im = Image.open(orig)
#remove alpha component
im = to_RGB(im)
#convert to grayscale
if gray:
im = to_grayscale(im)
#resize
if resize:
width, height = im.size
#resize only if larger than limit
if width > size[0] or height > size[1]:
im.thumbnail(size,Image.ANTIALIAS)
#save as png (and remove previous version)
f, e = os.path.splitext(orig)
os.remove(orig)
im.save(f+".png")
except Exception as e:
print(e)
def filtering(input_dir, median = True, median_winsize = 5, mean = True, mean_winsize = 5):
with timing("Filtering (median) with PIL (consider using filtering_opencv for faster processing)"):
imname = '*'
orignames = glob(os.path.join(input_dir, imname))
for orig in orignames:
try:
im = Image.open(orig)
#median blur
if median:
im = im.filter(ImageFilter.MedianFilter(median_winsize))
#mean blur
if mean:
im = im.filter(ImageFilter.Meanfilter(mean_winsize))
#save as png (and remove previous version)
f, e = os.path.splitext(orig)
os.remove(orig)
im.save(f+".png")
except Exception as e:
print(e)
def filtering_opencv(input_dir, median = True, median_winsize = 5, gaussian = True, gaussian_x = 5, gaussian_y = 5, gaussian_std = 0, mean = True, mean_winsize = 3):
with timing("Filtering (median) with opencv"):
imname = '*'
orignames = glob(os.path.join(input_dir, imname))
for orig in orignames:
print(orig)
try:
im = cv2.imread(orig, cv2.IMREAD_COLOR)
#median blur
if median:
im = cv2.medianBlur(im,median_winsize)
if gaussian:
im = cv2.GaussianBlur(im,(gaussian_x,gaussian_y),gaussian_std)
#mean blur
if mean:
im = cv2.blur(im,(mean_winsize,mean_winsize))
#save as png (and remove previous version)
f, e = os.path.splitext(orig)
os.remove(orig)
cv2.imwrite(f+".png", im)
except Exception as e:
print(e)
def rotate_images(input_dir):
imname = '*'
orignames = glob(os.path.join(input_dir, imname))
for orig in orignames:
try:
im = Image.open(orig)
#remove alpha component
im = im.transpose(Image.ROTATE_90)
#save as png (and remove previous version)
f, e = os.path.splitext(orig)
os.remove(orig)
im.save(f+".png")
except Exception as e:
print(e)
def unrotate_images(input_dir):
imname = '*'
orignames = glob(os.path.join(input_dir, imname))
for orig in orignames:
try:
im = Image.open(orig)
#remove alpha component
im = im.transpose(Image.ROTATE_270)
#save as png (and remove previous version)
f, e = os.path.splitext(orig)
os.remove(orig)
im.save(f+".png")
except Exception as e:
print(e)
def reset_gpu(device = 0):
ncuda.select_device(device)
ncuda.close()
import os, time, datetime
#import PIL.Image as Image
import numpy as np
from skimage.measure import compare_psnr, compare_ssim
from skimage.io import imread, imsave
def to_tensor(img):
if img.ndim == 2:
return img[np.newaxis,...,np.newaxis]
elif img.ndim == 3:
return np.moveaxis(img,2,0)[...,np.newaxis]
def from_tensor(img):
return np.squeeze(np.moveaxis(img[...,0],0,-1))
def save_result(result,path):
path = path if path.find('.') != -1 else path+'.png'
ext = os.path.splitext(path)[-1]
if ext in ('.txt','.dlm'):
np.savetxt(path,result,fmt='%2.4f')
else:
imsave(path,np.clip(result,0,1))
fontfile = os.path.join(utilspath,"arial.ttf")
def addnoise(im, sigma = 10, imagetype = 'L', add_label = False):
x = np.array(im)
y = x + np.random.normal(0, sigma, x.shape)
y=np.clip(y, 0, 255)
im = PIL.Image.fromarray(y.astype('uint8'), imagetype)
if add_label:
d = ImageDraw.Draw(im)
fnt = ImageFont.truetype(fontfile, 40)
if imagetype == 'L':
fill = 240
elif imagetype == 'RGB':
fill = (255, 0, 0)
elif imagetype == 'RGBA':
fill = (255,0,0,0)
d.text((10,10), "sigma = %s" % sigma, font = fnt, fill = fill)
return im
utilspath = os.path.join(os.getcwd(), 'utils/')
fontfile = os.path.join(utilspath,"arial.ttf")
def concat_images(img_list, labels = [], imagetype = None, sameheight = True, imagewidth = None, imageheight = None, labelsize = 30, labelpos = (10,10), labelcolor = None):
"""
imagetype: allow to convert all images to a PIL.Image.mode (L = grayscale, RGB, RGBA, ...)
sameheight: put all images to same height (size of smallest image of the list, or imageheight if not None)
imageheight: if not None, force all images to have this height (keep aspect ratio). Force sameheight to True
imagewidth: if not None, force all images to have this width (keep aspect ratio if sameheight=False and imageheight=None)
"""
images = deepcopy(img_list)
if imagetype == None:
imagetype = 'RGB'
images = [im.convert(imagetype) for im in images]
#force all image to imageheight (keep aspect ratio)
if imageheight is not None:
sameheight = True
widths, heights = zip(*(i.size for i in images))
#resize needed ?
if ( (len(set(heights)) > 1) & sameheight ) or (imageheight is not None) or (imagewidth is not None):
if imageheight is None:
imageheight = min(heights)
#force all images to same width
if imagewidth is not None:
if sameheight: #force width and height
images = [im.resize( (int(imagewidth),int(imageheight)),PIL.Image.ANTIALIAS ) for im in images]
else: #force width (keep aspect ratio)
images = [im.resize( (int(imagewidth),int(im.height*imagewidth/im.width)),PIL.Image.ANTIALIAS ) for im in images]
else: #force height (keep aspect ratio)
images = [im.resize( (int(im.width*imageheight/im.height), imageheight) ,PIL.Image.ANTIALIAS) for im in images]
widths, heights = zip(*(i.size for i in images))
total_width = sum(widths)
max_height = max(heights)
new_im = PIL.Image.new(imagetype, (total_width, max_height))
#add labels to images
if len(labels) == len(images):
fnt = ImageFont.truetype(fontfile, labelsize)
if imagetype == 'L':
fill = 240
elif imagetype == 'RGB':
fill = (176,196,222)
elif imagetype == 'RGBA':
fill = (176,196,222,0)
if labelcolor is not None:
fill = labelcolor
for i in range(len(labels)):
d = ImageDraw.Draw(images[i]).text(labelpos, labels[i], font = fnt, fill = fill)
x_offset = 0
for im in images:
new_im.paste(im, (x_offset,0))
x_offset += im.size[0]
return new_im
def display_images(im_list, labels = [], **kwargs):
display(concat_images(im_list, labels, **kwargs))
def get_filepaths(directory):
files = [os.path.join(directory, file) for file in os.listdir(directory) if os.path.isfile(os.path.join(directory, file))]
return files
def get_filenames(directory):
files = [file for file in os.listdir(directory) if os.path.isfile(os.path.join(directory, file))]
return files
def display_folder(directory, limit = 10, **kwargs):
files = get_filepaths(directory)
files.sort()
if len(files) > limit:
files = files[:limit]
display_images([PIL.Image.open(f) for f in files], [os.path.split(f)[1] for f in files], **kwargs)
def compare_folders(dirs, labels = [], **kwargs):
if type(dirs) is list:
#dirs is a list of folders containings processed images to compare
dirlist = dirs
elif type(dirs) is str:
#dirs if parent folder of subfolders containings processed images to compare
dirlist = glob(os.path.join(dirs,'*'))
dirlist = [d for d in dirlist if os.path.isdir(d)]
first_dir = dirlist[0]
names = get_filenames(first_dir)
names.sort()
for n in names:
paths = [glob(os.path.join(d,osp.splitext(n)[0]+'*'))[0] for d in dirlist]
display_images([PIL.Image.open(p) for p in paths], [os.path.split(d)[1] for d in dirlist], **kwargs)
def clone_git(url, dir_name = None, tag = None, reclone = False):
"""
url: url of the git repository to clone
dir_name: name of the folder to give to the repository. If not given, the git repository name is used
tag: allows to checkout a specific commit if given
reclone: overwrite existing repo
"""
old_dir = os.getcwd()
if dir_name is None:
dir_name = os.path.split(url)[1] #use git repo name
dir_name = os.path.splitext(dir_name)[0] #remove ".git" if present
if reclone and os.path.exists(dir_name):
shutil.rmtree(dir_name)
if not os.path.exists(dir_name):
command = "git clone %s %s" % (url, dir_name)
subprocess.run(command, shell = True)
os.chdir(dir_name)
if tag is not None:
command = "git checkout %s" % tag
subprocess.run(command, shell = True)
git_path = os.path.join(os.getcwd())
os.chdir(old_dir)
return git_path
def download_gdrive(file_id):
subprocess.run("wget https://raw.githubusercontent.com/GitHub30/gdrive.sh/master/gdrive.sh", shell = True)
subprocess.run("curl gdrive.sh | bash -s %s" % file_id, shell = True)
subprocess.run("rm gdrive.sh", shell = True)
def image_average(imlist, weights):
assert len(imlist)==len(weights), "Input lists should have same size."
weights = np.array(weights)
weights = weights/np.sum(weights)
# Assuming all images are the same size, get dimensions of first image
w,h=Image.open(imlist[0]).convert("RGB").size
N=len(imlist)
# Create a numpy array of floats to store the average (assume RGB images)
arr=np.zeros((h,w,3),np.float)
# Build up average pixel intensities, casting each image as an array of floats
for im in imlist:
imarr=np.array(Image.open(im),dtype=np.float)
arr=arr+imarr/N
# Round values in array and cast as 8-bit integer
arr=np.array(np.round(arr),dtype=np.uint8)
# Generate, save and preview final image
out=Image.fromarray(arr,mode="RGB")
return out
| 24.768535
| 172
| 0.678032
| 2,074
| 13,697
| 4.40405
| 0.187078
| 0.023648
| 0.018612
| 0.020801
| 0.340158
| 0.266915
| 0.239654
| 0.210204
| 0.189621
| 0.158091
| 0
| 0.010962
| 0.19413
| 13,697
| 552
| 173
| 24.813406
| 0.816543
| 0.154851
| 0
| 0.361516
| 0
| 0
| 0.059253
| 0
| 0
| 0
| 0
| 0
| 0.002915
| 1
| 0.093294
| false
| 0
| 0.052478
| 0.008746
| 0.183673
| 0.049563
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1144dfe3b0de92ac50325fd69bcff937bffb9527
| 371
|
py
|
Python
|
py_tea_code/2.mypro_io/test_os/my05.py
|
qq4215279/study_python
|
b0eb9dedfc4abb2fd6c024a599e7375869c3d77a
|
[
"Apache-2.0"
] | null | null | null |
py_tea_code/2.mypro_io/test_os/my05.py
|
qq4215279/study_python
|
b0eb9dedfc4abb2fd6c024a599e7375869c3d77a
|
[
"Apache-2.0"
] | null | null | null |
py_tea_code/2.mypro_io/test_os/my05.py
|
qq4215279/study_python
|
b0eb9dedfc4abb2fd6c024a599e7375869c3d77a
|
[
"Apache-2.0"
] | null | null | null |
#coding=utf-8
#测试os.walk()递归遍历所有的子目录和子文件
import os
all_files = []
path = os.getcwd()
list_files = os.walk(path)
for dirpath,dirnames,filenames in list_files:
for dir in dirnames:
all_files.append(os.path.join(dirpath,dir))
for file in filenames:
all_files.append(os.path.join(dirpath,file))
#打印所有的子目录和子文件
for file in all_files:
print(file)
| 20.611111
| 52
| 0.71159
| 57
| 371
| 4.526316
| 0.421053
| 0.124031
| 0.108527
| 0.124031
| 0.24031
| 0.24031
| 0.24031
| 0
| 0
| 0
| 0
| 0.003257
| 0.172507
| 371
| 18
| 53
| 20.611111
| 0.837134
| 0.132075
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.090909
| 0
| 0.090909
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1144ebed87008c80403fadd34329c7f64e53da5b
| 2,801
|
py
|
Python
|
lib_drl/layer_utils/proposal_layer.py
|
chang010453/GRP-HAI
|
60f7c7633e33dbdd852f5df3e0a3d1017b6b2a22
|
[
"MIT"
] | null | null | null |
lib_drl/layer_utils/proposal_layer.py
|
chang010453/GRP-HAI
|
60f7c7633e33dbdd852f5df3e0a3d1017b6b2a22
|
[
"MIT"
] | null | null | null |
lib_drl/layer_utils/proposal_layer.py
|
chang010453/GRP-HAI
|
60f7c7633e33dbdd852f5df3e0a3d1017b6b2a22
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------
# Faster R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick and Xinlei Chen
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from model.config import cfg
from model.bbox_transform import bbox_transform_inv, clip_boxes
from model.nms_wrapper import nms
def proposal_layer(rpn_cls_prob, rpn_bbox_pred, im_info, cfg_key, _feat_stride,
anchors):
"""A simplified version compared to fast/er RCNN
For details please see the technical report
"""
if type(cfg_key) == bytes:
cfg_key = cfg_key.decode('utf-8')
pre_nms_topN = cfg[cfg_key].RPN_PRE_NMS_TOP_N
post_nms_topN = cfg[cfg_key].RPN_POST_NMS_TOP_N
nms_thresh = cfg[cfg_key].RPN_NMS_THRESH
# Get the scores and bounding boxes
scores = rpn_cls_prob[:, :, :, cfg.NBR_ANCHORS:]
rpn_bbox_pred = rpn_bbox_pred.reshape((-1, 4))
scores = scores.reshape((-1, 1))
proposals = bbox_transform_inv(anchors, rpn_bbox_pred)
proposals = clip_boxes(proposals, im_info[:2])
# Pick the top region proposals
order = scores.ravel().argsort()[::-1]
if pre_nms_topN > 0:
order = order[:pre_nms_topN]
proposals = proposals[order, :]
scores = scores[order]
# Non-maximal suppression
keep = nms(np.hstack((proposals, scores)), nms_thresh)
# Pick th top region proposals after NMS
if post_nms_topN > 0:
keep = keep[:post_nms_topN]
proposals = proposals[keep, :]
scores = scores[keep]
# Only support single image as input
batch_inds = np.zeros((proposals.shape[0], 1), dtype=np.float32)
blob = np.hstack((batch_inds, proposals.astype(np.float32, copy=False)))
return blob, scores
def proposal_layer_all(rpn_bbox_pred, im_info, anchors):
"""
Simply returns every single RoI; GRP-HAI later decides
which are forwarded to the class-specific module.
"""
# Get the bounding boxes
batch_sz, height, width = rpn_bbox_pred.shape[0: 3]
rpn_bbox_pred = rpn_bbox_pred.reshape((-1, 4))
proposals = bbox_transform_inv(anchors, rpn_bbox_pred)
proposals = clip_boxes(proposals, im_info[:2])
# Create initial (all-zeros) observation RoI volume
roi_obs_vol = np.zeros((batch_sz, height, width, cfg.NBR_ANCHORS),
dtype=np.int32)
not_keep_ids = np.zeros((1, 1), dtype=np.int32)
# Only support single image as input
batch_inds = np.zeros((proposals.shape[0], 1), dtype=np.float32)
rois_all = np.hstack((batch_inds, proposals.astype(np.float32, copy=False)))
return rois_all, roi_obs_vol, not_keep_ids
| 35.455696
| 80
| 0.670118
| 399
| 2,801
| 4.441103
| 0.353383
| 0.035553
| 0.055869
| 0.020316
| 0.310384
| 0.291196
| 0.269752
| 0.269752
| 0.269752
| 0.234763
| 0
| 0.014166
| 0.193502
| 2,801
| 79
| 81
| 35.455696
| 0.770252
| 0.246341
| 0
| 0.186047
| 0
| 0
| 0.002424
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046512
| false
| 0
| 0.162791
| 0
| 0.255814
| 0.023256
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1145f38136a9b2f21e2507449a336cde84624ed4
| 14,999
|
py
|
Python
|
tools/verification/trt_verify.py
|
mrsempress/mmdetection
|
cb650560c97a2fe56a9b369a1abc8ec17e06583a
|
[
"Apache-2.0"
] | null | null | null |
tools/verification/trt_verify.py
|
mrsempress/mmdetection
|
cb650560c97a2fe56a9b369a1abc8ec17e06583a
|
[
"Apache-2.0"
] | null | null | null |
tools/verification/trt_verify.py
|
mrsempress/mmdetection
|
cb650560c97a2fe56a9b369a1abc8ec17e06583a
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import print_function
import os
import time
import numpy as np
import tensorrt as trt
import pycuda.driver as cuda
import pycuda.autoinit
import cv2
import mmcv
from tqdm import tqdm
import pickle as pkl
from vis_util import show_corners
from tools.model_zoo import model_zoo as zoo
TRT_LOGGER = trt.Logger()
# Simple helper data class that's a little nicer to use than a 2-tuple.
class HostDeviceMem(object):
def __init__(self, host_mem, device_mem):
self.host = host_mem
self.device = device_mem
def __str__(self):
return "Host:\n" + str(self.host) + "\nDevice:\n" + str(self.device)
def __repr__(self):
return self.__str__()
# Allocates all buffers required for an engine, i.e. host/device inputs/outputs.
def allocate_buffers(engine):
inputs = []
outputs = []
output_names = []
bindings = []
stream = cuda.Stream()
for binding in engine:
size = trt.volume(
engine.get_binding_shape(binding)) * engine.max_batch_size
dtype = trt.nptype(engine.get_binding_dtype(binding))
print('binding:{}, size:{}, dtype:{}'.format(binding, size, dtype))
# Allocate host and device buffers
host_mem = cuda.pagelocked_empty(size, dtype)
device_mem = cuda.mem_alloc(host_mem.nbytes)
# Append the device buffer to device bindings.
bindings.append(int(device_mem))
# Append to the appropriate list.
if engine.binding_is_input(binding):
inputs.append(HostDeviceMem(host_mem, device_mem))
else:
outputs.append(HostDeviceMem(host_mem, device_mem))
output_names.append(binding)
return inputs, outputs, output_names, bindings, stream
# This function is generalized for multiple inputs/outputs.
# inputs and outputs are expected to be lists of HostDeviceMem objects.
def do_inference(context, bindings, inputs, outputs, stream, batch_size=1):
# Transfer input data to the GPU.
[cuda.memcpy_htod_async(inp.device, inp.host, stream) for inp in inputs]
# Run inference.
context.execute_async(
batch_size=batch_size, bindings=bindings, stream_handle=stream.handle)
# Transfer predictions back from the GPU.
[cuda.memcpy_dtoh_async(out.host, out.device, stream) for out in outputs]
# Synchronize the stream
stream.synchronize()
# Return only the host outputs.
return [out.host for out in outputs]
def get_engine(onnx_file_path, engine_file_path=""):
"""Attempts to load a serialized engine if available, otherwise builds a new TensorRT engine and saves it."""
def build_engine(builder):
"""Takes an ONNX file and creates a TensorRT engine to run inference with"""
with builder.create_network() as network, trt.OnnxParser(
network, TRT_LOGGER) as parser:
builder.max_workspace_size = 1 << 27 # 1GB
builder.max_batch_size = 1
print('max workspace size: {:.2f} MB'.format(
builder.max_workspace_size / 1024 / 1024))
tic = time.time()
# Parse model file
if not os.path.exists(onnx_file_path):
print('ONNX file {} not found, please generate it.'.format(
onnx_file_path))
exit(0)
print('Loading ONNX file from path {}...'.format(onnx_file_path))
with open(onnx_file_path, 'rb') as model:
print('Beginning ONNX file parsing')
parser.parse(model.read())
print('Completed parsing of ONNX file')
print('Building an engine from file {}; this may take a while...'.
format(onnx_file_path))
engine = builder.build_cuda_engine(network)
if engine is None:
raise Exception('build engine failed')
else:
print('Completed! time cost: {:.1f}s'.format(time.time() -
tic))
with open(engine_file_path, "wb") as f:
f.write(engine.serialize())
return engine
with trt.Builder(TRT_LOGGER) as builder:
if builder.platform_has_fast_fp16:
print('enable fp16 mode!')
builder.fp16_mode = True
builder.strict_type_constraints = True
engine_file_path = engine_file_path.replace('.trt', '_fp16.trt')
if os.path.exists(engine_file_path):
# If a serialized engine exists, use it instead of building an engine.
print("Reading engine from file {}".format(engine_file_path))
with open(engine_file_path,
"rb") as f, trt.Runtime(TRT_LOGGER) as runtime:
return runtime.deserialize_cuda_engine(f.read())
else:
return build_engine(builder)
def preprocess(image, use_rgb=True):
image = image.astype(np.float32)
mean_rgb = np.array([123.675, 116.28, 103.53])
std_rgb = np.array([58.395, 57.12, 57.375])
if use_rgb:
image = image[..., [2, 1, 0]]
image -= mean_rgb
image /= std_rgb
else:
mean_bgr = mean_rgb[[2, 1, 0]]
std_bgr = std_rgb[[2, 1, 0]]
image -= mean_bgr
image /= std_bgr
image = np.transpose(image, [2, 0, 1])
return np.ascontiguousarray(image)
def postprocess_2d(wh_feats, reg_offsets, heatmaps, heatmap_indexs, rf):
score_thresh = 0.01
h, w = heatmaps.shape[-2:]
batch, topk = heatmap_indexs.shape
heatmaps = heatmaps.reshape((batch, -1))
scores = np.take(heatmaps, heatmap_indexs)
labels = (heatmap_indexs // (h * w)).astype(int)
spatial_idx = heatmap_indexs % (h * w)
offsetx = np.take(reg_offsets[:, 0, ...].reshape((batch, -1)), spatial_idx)
offsety = np.take(reg_offsets[:, 1, ...].reshape((batch, -1)), spatial_idx)
pred_w = np.take(wh_feats[:, 0, ...].reshape((batch, -1)), spatial_idx)
pred_h = np.take(wh_feats[:, 1, ...].reshape((batch, -1)), spatial_idx)
cx = spatial_idx % w + offsetx
cy = spatial_idx // w + offsety
x1 = cx - pred_w / 2
y1 = cy - pred_h / 2
x2 = cx + pred_w / 2
y2 = cy + pred_h / 2
bboxes = np.stack([x1, y1, x2, y2], axis=2) * pool_scale
return bboxes, labels, scores
def show_results_2d(img, outputs, output_image_path, class_names):
# Run the post-processing algorithms on the TensorRT outputs and get the bounding box details of detected objects
bboxes, labels, scores = postprocess_2d(*outputs)
scores = scores[..., np.newaxis]
bboxes = np.concatenate((bboxes, scores), axis=2)
mmcv.imshow_det_bboxes(
img,
bboxes[0],
labels[0],
class_names=class_names,
score_thr=0.35,
show=output_image_path is None,
out_file=output_image_path)
# Draw the bounding boxes onto the original input image and save it as a PNG file
# obj_detected_img = draw_bboxes(resized_image, boxes[0], scores[0], classes[0])
#
# cv2.imwrite(output_image_path, obj_detected_img)
# print('Saved image with bounding boxes of detected objects to {}.'.format(
# output_image_path))
def postprocess_3d(heatmaps, height_feats, reg_xoffsets, reg_yoffsets, poses,
heatmap_indexs):
batch, _, h, w = heatmaps.shape
results = []
for i in range(batch):
idxs = heatmap_indexs[i]
scores = heatmaps[i].reshape(-1)[idxs]
labels = (idxs // (h * w)).astype(int)
idxs = idxs % (h * w)
x_idxs = idxs % w
y_idxs = idxs // w
offsetx = reg_xoffsets[i, :, y_idxs, x_idxs]
offsety = reg_yoffsets[i, :, y_idxs, x_idxs]
height = height_feats[i, :, y_idxs, x_idxs]
poses = poses[i, y_idxs, x_idxs]
cx = x_idxs[:, np.newaxis] + offsetx
cy = y_idxs[:, np.newaxis] + offsety
cy1 = cy - height / 2
cy2 = cy + height / 2
corners = np.stack([np.hstack([cx, cx]), np.hstack([cy1, cy2])])
corners = np.transpose(corners, [1, 2, 0]) * pool_scale
pose_scores = np.zeros_like(poses)
results.append([corners, labels, scores, poses, pose_scores])
return results
def show_results_3d(img, outputs, output_image_path, class_names):
height_feats, reg_xoffsets, reg_yoffsets, poses, heatmaps, heatmap_indexs, _ = outputs
results = postprocess_3d(heatmaps, height_feats, reg_xoffsets,
reg_yoffsets, poses, heatmap_indexs)
score_thresh = 0.35
with open('/private/ningqingqun/undistort.pkl', 'wb') as f:
pkl.dump(results[0][0][0], f)
show_corners(
img,
results[0],
class_names,
score_thr=score_thresh,
out_file=output_image_path,
pad=0)
def get_images2():
im_list = [
'/private/ningqingqun/datasets/undist_img.png'
# '/private/ningqingqun/datasets/outsource/201910110946_00000187_1570758388348.jpg'
]
return im_list
def get_images():
# input_image_path = '/private/ningqingqun/bags/truck1_2019_07_24_14_54_43_26.msg/front_right/201907241454/201907241454_00000000_1563951283641.jpg'
# input_image_path = '/private/ningqingqun/bags/truck2_2019_07_26_17_02_47_1.msg/front_right/201907261702/201907261702_00000002_1564131768223.jpg'
# input_image_path = '/private/ningqingqun/bags/truck1_2019_09_06_13_48_14_19.msg/front_right/201909061348/201909061348_00000005_1567748895027.jpg'
# data_dir = '/private/ningqingqun/bags/crane/howo1_2019_12_04_09_14_54_0.msg/front_left/201912040915'
# data_dir = '/private/ningqingqun/bags/howo1_2019_12_11_08_59_10_6.msg/head_right/201912110859'
# data_dir = '/private/ningqingqun/bags/jinlv4_2019_10_18_09_18_50_6.msg/head_right/201910180919'
data_dir = '/private/ningqingqun/bags/howo1_2019_12_24_17_49_48_2.msg/front_left/201912241750'
# data_dir = '//private/ningqingqun/datasets/outsource/mine/truck2/front_right/20191220'
im_list = [
os.path.join(data_dir, f) for f in os.listdir(data_dir)
if f.endswith('.jpg')
]
return im_list
# Output shapes expected by the post-processor
version = 'v5.5.2'
if 'cm' in version:
num_fg = 12
else:
num_fg = 7
topk = 50
input_h, input_w = (800, 1280)
out_channels = 64
pool_scale = 4
output_h = int(input_h / pool_scale)
output_w = int(input_w / pool_scale)
onnx_files = {
'v4_fp16':
'/private/ningqingqun/torch/centernet/r34_fp16_epoch_16_iter_60000.onnx',
'v5.1.16':
'/private/ningqingqun/mmdet/outputs/v5.1.16/centernet_r18_ignore_1017_1915_gpu12/epoch_35_iter_3675.onnx',
'v5.tmp':
'work_dirs/debug/centernet_r18_ignore_1105_1118_desktop/epoch_1_iter_500.onnx',
'cm-v0.1':
'work_dirs/debug/centernet_r18_no_1119_1954_desktop/epoch_35_iter_4305.onnx',
'cm-v0.2':
'work_dirs/debug/centernet_r18_no_1120_1157_desktop/epoch_40_iter_4920.onnx',
'cm-v0.6':
'/private/ningqingqun/mmdet/outputs/no31_36/centernet_r18_adam_no_crop_1129_1920_gpu9/epoch_10_iter_2000.onnx',
'cm-v0.8':
'/work/work_dirs/v5.3.3/centernet_r18_finetune_large_1207_1707_desktop/epoch_20_iter_1160.onnx'
}
name2shape = {
'heatmap': (1, num_fg, output_h, output_w),
'height_feats': (1, 3, output_h, output_w),
'reg_xoffset': (1, 3, output_h, output_w),
'reg_yoffset': (1, 3, output_h, output_w),
'pose': (1, output_h, output_w),
'raw_features': (1, output_h, output_w, out_channels),
'heatmap_indexs': (1, topk),
'wh_feats': (1, 2, output_h, output_w),
'reg_offset': (1, 2, output_h, output_w),
}
def main():
"""Create a TensorRT engine for ONNX-based centernet and run inference."""
try:
cuda.init()
major, minor = cuda.Device(0).compute_capability()
except:
raise Exception("failed to get gpu compute capability")
onnx_file_path = zoo[version]['model_file'].replace('.pth', '.onnx')
new_ext = '-{}.{}.trt'.format(major, minor)
engine_file_path = onnx_file_path.replace('.onnx', new_ext)
# engine_file_path ='/private/ningqingqun/torch/centernet/vision_detector_fabu_v4.0.0-5.1.5.0-6.1.trt'
# Download a dog image and save it to the following file path:
image_list = get_images()
out_dir = '/private/ningqingqun/results/trt_results/' + version + '_20191220_mining'
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
# Do inference with TensorRT
trt_outputs = []
with get_engine(onnx_file_path, engine_file_path
) as engine, engine.create_execution_context() as context:
inputs, outputs, output_names, bindings, stream = allocate_buffers(
engine)
# Do inference
# print('Running inference on image {}...'.format(input_image_path))
# Set host input to the image.
# The common.do_inference function will copy the input to the GPU
# before executing.
for input_image_path in tqdm(image_list):
# input_h, input_w = (input_h // 32 * 32, input_w // 32 * 32)
im = cv2.imread(input_image_path)
resized_image = cv2.resize(im, (input_w, input_h))
input_image = preprocess(resized_image)
inputs[0].host = input_image
# tic = time.time()
trt_outputs = do_inference(
context,
bindings=bindings,
inputs=inputs,
outputs=outputs,
stream=stream)
# print('inference time cost: {:.1f}ms'.format(
# (time.time() - tic) * 1000))
# Before doing post-processing, we need to reshape the outputs as the common.do_inference will give us flat arrays.
trt_outputs = [
output.reshape(name2shape[name])
for output, name in zip(trt_outputs, output_names)
]
class_names = [
'car', 'bus', 'truck', 'person', 'bicycle', 'tricycle', 'block'
]
out_file = os.path.join(out_dir,
os.path.basename(input_image_path))
if 'v5' in version:
show_results_3d(resized_image.copy(), trt_outputs, out_file,
class_names)
elif 'cm' in version:
class_names = [
'right20',
'right40',
'right45',
'left20',
'left40',
'left45',
'NO31',
'NO32',
'NO33',
'NO34',
'NO35',
'NO36',
]
show_results_2d(resized_image.copy(), trt_outputs, out_file,
class_names)
else:
show_results_2d(resized_image.copy(), trt_outputs, out_file,
class_names)
if __name__ == '__main__':
main()
| 37.876263
| 151
| 0.626775
| 1,986
| 14,999
| 4.481873
| 0.250252
| 0.017976
| 0.015729
| 0.012583
| 0.158746
| 0.125716
| 0.081901
| 0.06078
| 0.033592
| 0.028761
| 0
| 0.063529
| 0.263284
| 14,999
| 395
| 152
| 37.972152
| 0.741991
| 0.191079
| 0
| 0.065744
| 0
| 0
| 0.126222
| 0.066092
| 0
| 0
| 0
| 0
| 0
| 1
| 0.051903
| false
| 0
| 0.044983
| 0.00692
| 0.141869
| 0.038062
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1146252ac942d4c9ff4deece36ba6f7c91187e06
| 1,741
|
py
|
Python
|
Main.py
|
0ne0rZer0/Mon-T-Python
|
c263ed540d811a8bc238b859f03a52cc1151779c
|
[
"MIT"
] | null | null | null |
Main.py
|
0ne0rZer0/Mon-T-Python
|
c263ed540d811a8bc238b859f03a52cc1151779c
|
[
"MIT"
] | null | null | null |
Main.py
|
0ne0rZer0/Mon-T-Python
|
c263ed540d811a8bc238b859f03a52cc1151779c
|
[
"MIT"
] | null | null | null |
import os, time, sys, hashlib
# Python Recreation of MonitorSauraus Rex.
# Originally Developed by Luke Barlow, Dayan Patel, Rob Shire, Sian Skiggs.
# Aims:
# - Detect Rapid File Changes
# - Cut Wifi Connections
# - Create Logs for running processes at time of trigger, find source infection file.
# - Create "Nest" Safe folder , with encryption and new file types. ".egg" type?
# - Create Notification for a user/admin? Connect to a database?
# - kill running processes in aim to kill attack.
# Getting MD5 Hash of a string:
# print (hashlib.md5("Your String".encode('utf-8')).hexdigest())
origHashList = []
# Getting MD5 Hash of a file:
def md5(fname):
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
# Shows Correct Hash Changes Upon File Alteration.
def getOrigMd5():
fileMd5 = md5("/home/barlowl3/test/test.txt")
origHashList.append(fileMd5)
time.sleep(3) # For Testing
fileMd5 = md5("/home/barlowl3/test/test.txt")
origHashList.append(fileMd5)
updateOrigHashText(origHashList)
# Prints The Collected Hashes.
def updateOrigHashText(origList):
ohl = open("/home/barlowl3/test/test.txt", "a")
for hash in origList:
ohl.write(hash)
ohl.write('\n')
ohl.close
# Main Method
def main():
getOrigMd5()
main()
#Use checksumdir python package available for calculating checksum/hash of directory. It's available at https://pypi.python.org/pypi/checksumdir/1.0.5
#Usage :
#import checksumdir
#hash = checksumdir.dirhash("c:\\temp")
#print hash
| 27.634921
| 151
| 0.66054
| 227
| 1,741
| 5.052863
| 0.572687
| 0.015693
| 0.041848
| 0.05231
| 0.150828
| 0.101133
| 0.101133
| 0.101133
| 0.101133
| 0.101133
| 0
| 0.020849
| 0.228604
| 1,741
| 62
| 152
| 28.080645
| 0.833209
| 0.529581
| 0
| 0.166667
| 0
| 0
| 0.121253
| 0.114441
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.041667
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1148e9602cf3ea5d501cac86ca50ffbe359518e0
| 4,444
|
py
|
Python
|
src/Competition/4.25.com3.py
|
Peefy/PeefyLeetCode
|
92156e4b48ba19e3f02e4286b9f733e9769a1dee
|
[
"Apache-2.0"
] | 2
|
2018-05-03T07:50:03.000Z
|
2018-06-17T04:32:13.000Z
|
src/Competition/4.25.com3.py
|
Peefy/PeefyLeetCode
|
92156e4b48ba19e3f02e4286b9f733e9769a1dee
|
[
"Apache-2.0"
] | null | null | null |
src/Competition/4.25.com3.py
|
Peefy/PeefyLeetCode
|
92156e4b48ba19e3f02e4286b9f733e9769a1dee
|
[
"Apache-2.0"
] | 3
|
2018-11-09T14:18:11.000Z
|
2021-11-17T15:23:52.000Z
|
import math
class Solution(object):
def bfs(self, maze, i, j, fx, fy, m, n):
if i == fx and j == fy:
return 0
path = 0
bfsqueue = []
bfsvisit = [[0 for j in range(n)] for i in range(m)]
bfscost = [[math.inf for j in range(n)] for i in range(m)]
bfsvisit[i][j] = 2
bfscost[i][j] = 0
bfsqueue.append((i, j))
nextb = []
while len(bfsqueue) != 0:
(i, j) = bfsqueue.pop(0)
nexti, nextj = i + 1, j
if nexti >= 0 and nexti < m and nextj >= 0 and nextj < n and maze[nexti][nextj] != '#':
if bfsvisit[nexti][nextj] == 0:
bfsqueue.append((nexti, nextj))
bfsvisit[nexti][nextj] = 2
bfscost[nexti][nextj] = bfscost[i][j] + 1
nexti, nextj = i , j + 1
if nexti >= 0 and nexti < m and nextj >= 0 and nextj < n and maze[nexti][nextj] != '#':
if bfsvisit[nexti][nextj] == 0:
bfsqueue.append((nexti, nextj))
bfsvisit[nexti][nextj] = 2
bfscost[nexti][nextj] = bfscost[i][j] + 1
nexti, nextj = i - 1, j
if nexti >= 0 and nexti < m and nextj >= 0 and nextj < n and maze[nexti][nextj] != '#':
if bfsvisit[nexti][nextj] == 0:
bfsqueue.append((nexti, nextj))
bfsvisit[nexti][nextj] = 2
bfscost[nexti][nextj] = bfscost[i][j] + 1
nexti, nextj = i, j - 1
if nexti >= 0 and nexti < m and nextj >= 0 and nextj < n and maze[nexti][nextj] != '#':
if bfsvisit[nexti][nextj] == 0:
bfsqueue.append((nexti, nextj))
bfsvisit[nexti][nextj] = 2
bfscost[nexti][nextj] = bfscost[i][j] + 1
path += 1
bfsvisit[i][j] = 1
return bfscost
def minStep(self, maze, sx, sy, fx, fy, m, n):
return self.bfs(maze, sx, sy, fx, fy, m, n)
def minimalSteps(self, maze):
m = len(maze)
n = len(maze[0])
s_index_x = -1
s_index_y = -1
o_index_x = -1
o_index_y = -1
t_index_x = -1
t_index_y = -1
Mcount = 0
Ocount = 0
cost = 0
Mlist = []
Olist = []
StoOcost = []
OtoMcost = []
MtoTcost = []
for i in range(m):
for j in range(n):
if maze[i][j] == 'S':
s_index_x = i
s_index_y = j
if maze[i][j] == 'O':
Ocount += 1
Olist.append((i, j))
if maze[i][j] == 'T':
t_index_x = i
t_index_y = j
if maze[i][j] == 'M':
Mcount += 1
Mlist.append((i, j))
if s_index_x == -1 or t_index_x == -1:
return -1
if Mcount == 0:
dis = self.minStep(maze, s_index_x, s_index_y, t_index_x, t_index_y, m, n)[t_index_x][t_index_y]
if dis == math.inf:
return -1
return dis
StoOcost = self.minStep(maze, s_index_x, s_index_y, o_index_x, o_index_y, m, n)
cost = 0
mcost = math.inf
for oindex in Olist:
o_index_x, o_index_y = oindex
stoocost = StoOcost[o_index_x][o_index_y]
if stoocost == math.inf:
continue
OtoMcost.clear()
MtoTcost.clear()
for index in Mlist:
m_index_x, m_index_y = index
otomcost = self.minStep(maze, o_index_x, o_index_y, m_index_x, m_index_y, m, n)
mtotcost = self.minStep(maze, t_index_x, t_index_y, m_index_x, m_index_y, m, n)
OtoMcost.append(otomcost[m_index_x][m_index_y])
MtoTcost.append(mtotcost[m_index_x][m_index_y])
OtoMcostTwoSum = sum(OtoMcost) * 2
for i in range(Mcount):
mcost = min(mcost, stoocost + OtoMcostTwoSum - OtoMcost[i] + MtoTcost[i])
if mcost == math.inf:
return -1
return mcost
if __name__ == "__main__":
solution = Solution()
print(solution.minimalSteps(["S#O", "M..", "M.T"]))
print(solution.minimalSteps(["S#O", "M.#", "M.T"]))
print(solution.minimalSteps(["S#O", "M.T", "M.."]))
| 37.982906
| 108
| 0.464446
| 587
| 4,444
| 3.366269
| 0.112436
| 0.121457
| 0.072874
| 0.020243
| 0.56832
| 0.531377
| 0.488866
| 0.436235
| 0.436235
| 0.406883
| 0
| 0.020045
| 0.405041
| 4,444
| 116
| 109
| 38.310345
| 0.727307
| 0
| 0
| 0.229358
| 0
| 0
| 0.009683
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027523
| false
| 0
| 0.009174
| 0.009174
| 0.119266
| 0.027523
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
11499a7441906f3bce3d215812d969fa784411f0
| 3,836
|
py
|
Python
|
coinextAPI.py
|
R-Mascarenhas/CryptoTrade
|
491a7a2e562694312843fbc58a003904d3d97000
|
[
"Apache-2.0"
] | 1
|
2021-05-28T15:31:53.000Z
|
2021-05-28T15:31:53.000Z
|
coinextAPI.py
|
R-Mascarenhas/CryptoTrade
|
491a7a2e562694312843fbc58a003904d3d97000
|
[
"Apache-2.0"
] | null | null | null |
coinextAPI.py
|
R-Mascarenhas/CryptoTrade
|
491a7a2e562694312843fbc58a003904d3d97000
|
[
"Apache-2.0"
] | null | null | null |
import requests
import json
from datetime import date, datetime, timedelta
class Coinext:
def __init__(self, ativo):
self.ativo = ativo
self.urlCoinext = 'https://api.coinext.com.br:8443/AP/'
def service_url(service_name):
return 'https://api.coinext.com.br:8443/AP/%s' % service_name
def call_get(self, service_name):
res = requests.get(self.service_url(service_name))
return json.loads(res.content)
def call_post(self, service_name, payload):
res = requests.post(self.service_url(service_name), data=json.dumps(payload))
return json.loads(res.content)
def obterBooks(self):
payload = {
'OMSId': 1,
'AccountId': 1,
'InstrumentId': 1,
'Depth': 1
}
return self.call_post('GetL2Snapshot', payload)
#return self.executarRequestCoinext('GET', payload, 'GetL2Snapshot')
def obterSaldo(self):
return self.executarRequestBrasilBTC('GET', '','/api/get_balance')
def obterOrdemPorId(self, idOrdem):
return self.executarRequestBrasilBTC('GET', '', 'api/check_order/{}'.format(idOrdem))
def enviarOrdemCompra(self, quantity, tipoOrdem, precoCompra):
# objeto que será postado para o endpoint
payload = {
'coin_pair': 'BRL{}'.format(self.ativo),
'order_type': tipoOrdem,
'type': 'buy',
'amount': quantity,
'price': precoCompra
}
# sem serializar o payload (json.dumps), irá retornar erro de moeda não encontrada
retorno = self.executarRequestBrasilBTC('POST', json.dumps(payload), 'api/create_order')
return retorno
def enviarOrdemVenda(self, quantity, tipoOrdem, precoVenda):
# objeto que será postado para o endpoint
payload = {
'coin_pair': 'BRL{}'.format(self.ativo),
'order_type': tipoOrdem,
'type': 'sell',
'amount': quantity,
'price': precoVenda
}
# sem serializar o payload (json.dumps), irá retornar erro de moeda não encontrada
retorno = self.executarRequestBrasilBTC('POST', json.dumps(payload), 'api/create_order')
return retorno
def TransferirCrypto(self, quantity):
config = Util.obterCredenciais()
# objeto que será postado para o endpoint
payload = {
'coin': self.ativo,
'amount': quantity,
'address': config["MercadoBitcoin"]["Address"],
'priority': 'medium'
}
# sem serializar o payload (json.dumps), irá retornar erro de moeda não encontrada
return self.executarRequestBrasilBTC('POST', json.dumps(payload), '/api/send')
def cancelarOrdem(self, idOrdem):
return self.executarRequestBrasilBTC('GET', '', 'api/remove_order/{}'.format(idOrdem))
def obterOrdensAbertas(self):
return self.executarRequestBrasilBTC('GET', '','/api/my_orders')
def obterDadosUsuario(self):
return self.executarRequestCoinext('POST', '', 'GetUserInfo')
def obterToken(self):
config = Util.obterCredenciais()
res = requests.get(self.urlCoinext+'authenticate', auth=(config['Coinext']['Login'], config['Coinext']['Password']))
auth = json.loads(res.text.encode('utf8'))
if auth['Authenticated']:
return auth['Token']
def executarRequestCoinext(self, requestMethod, payload, endpoint):
headers ={
'aptoken': self.obterToken(),
'Content-type': 'application/json'
}
# requisição básica com módulo requests
res = requests.request(requestMethod, self.urlCoinext+endpoint, headers=headers, data=payload)
return json.loads(res.text.encode('utf8'))
| 36.188679
| 124
| 0.618352
| 390
| 3,836
| 6.015385
| 0.3
| 0.034101
| 0.072464
| 0.063086
| 0.461637
| 0.42029
| 0.336743
| 0.249361
| 0.249361
| 0.230605
| 0
| 0.005624
| 0.258342
| 3,836
| 106
| 125
| 36.188679
| 0.818981
| 0.122002
| 0
| 0.253333
| 0
| 0
| 0.148765
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0.013333
| 0.04
| 0.08
| 0.44
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
11499dc46efd3a0f04d31a58e295c03134ec2637
| 469
|
py
|
Python
|
example/soft_spi_example.py
|
amaork/raspi-io
|
aaea4532569010a64f3c54036b9db7eb81515d1a
|
[
"MIT"
] | 8
|
2018-02-28T16:02:36.000Z
|
2021-08-06T12:57:39.000Z
|
example/soft_spi_example.py
|
amaork/raspi-io
|
aaea4532569010a64f3c54036b9db7eb81515d1a
|
[
"MIT"
] | null | null | null |
example/soft_spi_example.py
|
amaork/raspi-io
|
aaea4532569010a64f3c54036b9db7eb81515d1a
|
[
"MIT"
] | 1
|
2019-05-08T06:50:33.000Z
|
2019-05-08T06:50:33.000Z
|
from raspi_io import SoftSPI, GPIO
import raspi_io.utility as utility
if __name__ == "__main__":
address = utility.scan_server(0.05)[0]
cpld = SoftSPI(address, GPIO.BCM, cs=7, clk=11, mosi=10, miso=9, bits_per_word=10)
flash = SoftSPI(address, GPIO.BCM, cs=8, clk=11, mosi=10, miso=9, bits_per_word=8)
cpld.write([0x0])
cpld.write([0x10])
cpld.write([0x30])
cpld.write([0x80])
data = flash.xfer([0x9f], 3)
flash.print_binary(data)
| 31.266667
| 86
| 0.66951
| 77
| 469
| 3.87013
| 0.545455
| 0.120805
| 0.120805
| 0.14094
| 0.33557
| 0.181208
| 0.181208
| 0.181208
| 0.181208
| 0
| 0
| 0.085052
| 0.172708
| 469
| 14
| 87
| 33.5
| 0.68299
| 0
| 0
| 0
| 0
| 0
| 0.017058
| 0
| 0
| 0
| 0.040512
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.166667
| 0.083333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1149ea534c3710b9c8fba988306c661b296e5d6e
| 342
|
py
|
Python
|
Python3/0678-Valid-Parenthesis-String/soln.py
|
wyaadarsh/LeetCode-Solutions
|
3719f5cb059eefd66b83eb8ae990652f4b7fd124
|
[
"MIT"
] | 5
|
2020-07-24T17:48:59.000Z
|
2020-12-21T05:56:00.000Z
|
Python3/0678-Valid-Parenthesis-String/soln.py
|
zhangyaqi1989/LeetCode-Solutions
|
2655a1ffc8678ad1de6c24295071308a18c5dc6e
|
[
"MIT"
] | null | null | null |
Python3/0678-Valid-Parenthesis-String/soln.py
|
zhangyaqi1989/LeetCode-Solutions
|
2655a1ffc8678ad1de6c24295071308a18c5dc6e
|
[
"MIT"
] | 2
|
2020-07-24T17:49:01.000Z
|
2020-08-31T19:57:35.000Z
|
class Solution:
def checkValidString(self, s):
"""
:type s: str
:rtype: bool
"""
cmin = cmax = 0
for ch in s:
cmax = cmax - 1 if ch == ')' else cmax + 1
cmin = cmin + 1 if ch == '(' else max(cmin - 1, 0)
if cmax < 0: return False
return cmin == 0
| 28.5
| 62
| 0.432749
| 44
| 342
| 3.363636
| 0.5
| 0.067568
| 0.067568
| 0.121622
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.042328
| 0.447368
| 342
| 12
| 63
| 28.5
| 0.740741
| 0.073099
| 0
| 0
| 0
| 0
| 0.006993
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0
| 0
| 0.375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
114a920b441f7acbb102aa82afab60cd9f2a194e
| 2,527
|
py
|
Python
|
video/train_vqvae_lstm.py
|
arash-safari/vp
|
377e0172112157b79690b32349481a17e7590063
|
[
"MIT"
] | null | null | null |
video/train_vqvae_lstm.py
|
arash-safari/vp
|
377e0172112157b79690b32349481a17e7590063
|
[
"MIT"
] | null | null | null |
video/train_vqvae_lstm.py
|
arash-safari/vp
|
377e0172112157b79690b32349481a17e7590063
|
[
"MIT"
] | null | null | null |
from torch import nn, optim
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
import torch
def get_optimizer(model, lr):
return optim.Adam(model.parameters(), lr=lr)
def _to_one_hot(y, num_classes):
scatter_dim = len(y.size())
y_tensor = y.view(*y.size(), -1)
zeros = torch.zeros(*y.size(), num_classes, dtype=y.dtype)
return zeros.scatter(scatter_dim, y_tensor, 1).permute(0, 3, 2, 1)
def one_hot_to_int(y):
y_trans = y.permute(0, 2, 3, 1)
y_trans = y_trans.argmax(dim=-1)
return y_trans
def train(model, input_channel, loader, callback, epoch_num, device, lr, run_num, ):
image_samples = 10
writer_path = 'vqvae_videomnist_1_00099_lstm'
optimizer = get_optimizer(model, lr)
model = model.to(device)
model = nn.DataParallel(model)
criterion = nn.MSELoss()
writer = SummaryWriter(log_dir='logs/{}_{}'.format(*[writer_path, run_num]))
for epoch in range(epoch_num):
loader = tqdm(loader)
mse_sum = 0
mse_n = 0
for iter, (frames, video_inds, frame_inds) in enumerate(loader):
model.zero_grad()
for i in range(frames.shape[1] - 1):
input_ = _to_one_hot(frames[:, i, :, :], input_channel).float()
output = _to_one_hot(frames[:, i + 1, :, :], input_channel).float()
input_ = input_.to(device)
output = output.to(device)
cell_states = model(input_)
pred , cell_state = cell_states[-1]
loss = criterion(pred, output)
loss.backward()
optimizer.step()
mse_sum += loss.item() * input_.shape[0]
mse_n += input_.shape[0]
lr = optimizer.param_groups[0]['lr']
if iter % 200 is 0:
loader.set_description(
(
'iter: {iter + 1}; mse: {loss.item():.5f}; '
f'avg mse: {mse_sum / mse_n:.5f}; '
f'lr: {lr:.5f}'
)
)
if iter is 0 and epoch > 0:
writer.add_scalar('Loss/train', mse_sum / mse_n, epoch_num)
sample = pred[:image_samples, :, :, :]
sample = one_hot_to_int(sample)
callback(sample, video_inds[i], epoch)
torch.save(model.state_dict(),
'../video/checkpoints/videomnist/vqvae-lstm/{}/{}.pt'.format(*[run_num, str(epoch).zfill(5)]))
| 35.591549
| 117
| 0.550455
| 322
| 2,527
| 4.099379
| 0.341615
| 0.022727
| 0.018182
| 0.028788
| 0.022727
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022635
| 0.318164
| 2,527
| 70
| 118
| 36.1
| 0.743471
| 0
| 0
| 0
| 0
| 0
| 0.074397
| 0.031658
| 0
| 0
| 0
| 0
| 0
| 1
| 0.070175
| false
| 0
| 0.070175
| 0.017544
| 0.192982
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
114baac9b0ba0fd601c9c440b172f038a36ec799
| 307
|
py
|
Python
|
Curso_de_Python_3_do_Basico_Ao_Avancado_Udemy/aula069/zip_e_zip_longest.py
|
DanilooSilva/Cursos_de_Python
|
8f167a4c6e16f01601e23b6f107578aa1454472d
|
[
"MIT"
] | null | null | null |
Curso_de_Python_3_do_Basico_Ao_Avancado_Udemy/aula069/zip_e_zip_longest.py
|
DanilooSilva/Cursos_de_Python
|
8f167a4c6e16f01601e23b6f107578aa1454472d
|
[
"MIT"
] | null | null | null |
Curso_de_Python_3_do_Basico_Ao_Avancado_Udemy/aula069/zip_e_zip_longest.py
|
DanilooSilva/Cursos_de_Python
|
8f167a4c6e16f01601e23b6f107578aa1454472d
|
[
"MIT"
] | null | null | null |
"""
Zip - Unindo iteráveis
Zip_longest _ Itertools
"""
from itertools import zip_longest, count
index = count()
cidades = ['Sao Paulo', 'Belo Horizonte', 'Salvador', 'Monte Belo']
estados = ['SP', 'MG', 'BA']
cidades_estados = zip_longest(cidades, estados)
for valor in cidades_estados:
print(valor)
| 20.466667
| 67
| 0.70684
| 39
| 307
| 5.410256
| 0.615385
| 0.14218
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153094
| 307
| 15
| 68
| 20.466667
| 0.811538
| 0.149837
| 0
| 0
| 0
| 0
| 0.185039
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.142857
| 0.142857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
114bfce52e4cd09b2cceb92b610dc1db5f94447b
| 7,087
|
py
|
Python
|
VoiceAssistant/speechrecognition/neuralnet/train.py
|
Reyansh0667/A-Programmer-AI-Voice-Assistant
|
7350050515fe333627c9c27b17d1e98d99b8a5c2
|
[
"MIT"
] | 575
|
2020-05-29T07:31:40.000Z
|
2022-03-31T16:06:48.000Z
|
VoiceAssistant/speechrecognition/neuralnet/train.py
|
Reyansh0667/A-Programmer-AI-Voice-Assistant
|
7350050515fe333627c9c27b17d1e98d99b8a5c2
|
[
"MIT"
] | 67
|
2020-08-05T16:17:28.000Z
|
2022-03-12T09:04:33.000Z
|
VoiceAssistant/speechrecognition/neuralnet/train.py
|
Reyansh0667/A-Programmer-AI-Voice-Assistant
|
7350050515fe333627c9c27b17d1e98d99b8a5c2
|
[
"MIT"
] | 259
|
2020-05-30T15:04:59.000Z
|
2022-03-30T02:56:03.000Z
|
import os
import ast
import torch
import torch.nn as nn
from torch.nn import functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
from pytorch_lightning.core.lightning import LightningModule
from pytorch_lightning import Trainer
from argparse import ArgumentParser
from model import SpeechRecognition
from dataset import Data, collate_fn_padd
from pytorch_lightning.loggers import TensorBoardLogger
from pytorch_lightning.callbacks import ModelCheckpoint
class SpeechModule(LightningModule):
def __init__(self, model, args):
super(SpeechModule, self).__init__()
self.model = model
self.criterion = nn.CTCLoss(blank=28, zero_infinity=True)
self.args = args
def forward(self, x, hidden):
return self.model(x, hidden)
def configure_optimizers(self):
self.optimizer = optim.AdamW(self.model.parameters(), self.args.learning_rate)
self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(
self.optimizer, mode='min',
factor=0.50, patience=6)
return [self.optimizer], [self.scheduler]
def step(self, batch):
spectrograms, labels, input_lengths, label_lengths = batch
bs = spectrograms.shape[0]
hidden = self.model._init_hidden(bs)
hn, c0 = hidden[0].to(self.device), hidden[1].to(self.device)
output, _ = self(spectrograms, (hn, c0))
output = F.log_softmax(output, dim=2)
loss = self.criterion(output, labels, input_lengths, label_lengths)
return loss
def training_step(self, batch, batch_idx):
loss = self.step(batch)
logs = {'loss': loss, 'lr': self.optimizer.param_groups[0]['lr'] }
return {'loss': loss, 'log': logs}
def train_dataloader(self):
d_params = Data.parameters
d_params.update(self.args.dparams_override)
train_dataset = Data(json_path=self.args.train_file, **d_params)
return DataLoader(dataset=train_dataset,
batch_size=self.args.batch_size,
num_workers=self.args.data_workers,
pin_memory=True,
collate_fn=collate_fn_padd)
def validation_step(self, batch, batch_idx):
loss = self.step(batch)
return {'val_loss': loss}
def validation_epoch_end(self, outputs):
avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
self.scheduler.step(avg_loss)
tensorboard_logs = {'val_loss': avg_loss}
return {'val_loss': avg_loss, 'log': tensorboard_logs}
def val_dataloader(self):
d_params = Data.parameters
d_params.update(self.args.dparams_override)
test_dataset = Data(json_path=self.args.valid_file, **d_params, valid=True)
return DataLoader(dataset=test_dataset,
batch_size=self.args.batch_size,
num_workers=self.args.data_workers,
collate_fn=collate_fn_padd,
pin_memory=True)
def checkpoint_callback(args):
return ModelCheckpoint(
filepath=args.save_model_path,
save_top_k=True,
verbose=True,
monitor='val_loss',
mode='min',
prefix=''
)
def main(args):
h_params = SpeechRecognition.hyper_parameters
h_params.update(args.hparams_override)
model = SpeechRecognition(**h_params)
if args.load_model_from:
speech_module = SpeechModule.load_from_checkpoint(args.load_model_from, model=model, args=args)
else:
speech_module = SpeechModule(model, args)
logger = TensorBoardLogger(args.logdir, name='speech_recognition')
trainer = Trainer(logger=logger)
trainer = Trainer(
max_epochs=args.epochs, gpus=args.gpus,
num_nodes=args.nodes, distributed_backend=None,
logger=logger, gradient_clip_val=1.0,
val_check_interval=args.valid_every,
checkpoint_callback=checkpoint_callback(args),
resume_from_checkpoint=args.resume_from_checkpoint
)
trainer.fit(speech_module)
if __name__ == "__main__":
parser = ArgumentParser()
# distributed training setup
parser.add_argument('-n', '--nodes', default=1, type=int, help='number of data loading workers')
parser.add_argument('-g', '--gpus', default=1, type=int, help='number of gpus per node')
parser.add_argument('-w', '--data_workers', default=0, type=int,
help='n data loading workers, default 0 = main process only')
parser.add_argument('-db', '--dist_backend', default='ddp', type=str,
help='which distributed backend to use. defaul ddp')
# train and valid
parser.add_argument('--train_file', default=None, required=True, type=str,
help='json file to load training data')
parser.add_argument('--valid_file', default=None, required=True, type=str,
help='json file to load testing data')
parser.add_argument('--valid_every', default=1000, required=False, type=int,
help='valid after every N iteration')
# dir and path for models and logs
parser.add_argument('--save_model_path', default=None, required=True, type=str,
help='path to save model')
parser.add_argument('--load_model_from', default=None, required=False, type=str,
help='path to load a pretrain model to continue training')
parser.add_argument('--resume_from_checkpoint', default=None, required=False, type=str,
help='check path to resume from')
parser.add_argument('--logdir', default='tb_logs', required=False, type=str,
help='path to save logs')
# general
parser.add_argument('--epochs', default=10, type=int, help='number of total epochs to run')
parser.add_argument('--batch_size', default=64, type=int, help='size of batch')
parser.add_argument('--learning_rate', default=1e-3, type=float, help='learning rate')
parser.add_argument('--pct_start', default=0.3, type=float, help='percentage of growth phase in one cycle')
parser.add_argument('--div_factor', default=100, type=int, help='div factor for one cycle')
parser.add_argument("--hparams_override", default="{}", type=str, required=False,
help='override the hyper parameters, should be in form of dict. ie. {"attention_layers": 16 }')
parser.add_argument("--dparams_override", default="{}", type=str, required=False,
help='override the data parameters, should be in form of dict. ie. {"sample_rate": 8000 }')
args = parser.parse_args()
args.hparams_override = ast.literal_eval(args.hparams_override)
args.dparams_override = ast.literal_eval(args.dparams_override)
if args.save_model_path:
if not os.path.isdir(os.path.dirname(args.save_model_path)):
raise Exception("the directory for path {} does not exist".format(args.save_model_path))
main(args)
| 43.478528
| 111
| 0.658389
| 892
| 7,087
| 5.039238
| 0.258969
| 0.03604
| 0.068076
| 0.015128
| 0.255617
| 0.193993
| 0.178865
| 0.135706
| 0.121468
| 0.082314
| 0
| 0.007349
| 0.231974
| 7,087
| 163
| 112
| 43.478528
| 0.818482
| 0.011712
| 0
| 0.075188
| 0
| 0
| 0.147
| 0.003429
| 0
| 0
| 0
| 0
| 0
| 1
| 0.082707
| false
| 0
| 0.105263
| 0.015038
| 0.263158
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
114f19bb66b60d61b441f7697a5eae83b5d30c4e
| 596
|
py
|
Python
|
DRL/models/oct/18-argon/session1/reward.py
|
EXYNOS-999/AWS_JPL_DRL
|
ea9df7f293058b0ca2dc63753e68182fcc5380f5
|
[
"Apache-2.0"
] | null | null | null |
DRL/models/oct/18-argon/session1/reward.py
|
EXYNOS-999/AWS_JPL_DRL
|
ea9df7f293058b0ca2dc63753e68182fcc5380f5
|
[
"Apache-2.0"
] | 1
|
2020-01-08T06:52:03.000Z
|
2020-01-08T07:05:44.000Z
|
DRL/models/oct/18-argon/session1a/reward.py
|
EXYNOS-999/AWS_JPL_DRL
|
ea9df7f293058b0ca2dc63753e68182fcc5380f5
|
[
"Apache-2.0"
] | null | null | null |
"""
AWS DeepRacer reward function using only progress
"""
#===============================================================================
#
# REWARD
#
#===============================================================================
def reward_function(params):
# Skipping the explanation and verbose math here...
baseline = 102
motivator = -1
distance_to_goal = 100.0 - params['progress']
reward = baseline + \
motivator + \
-distance_to_goal
# 1e-8 is a crash so we ALWAYS need to be higher than that
return float(max(reward, 1e-3))
| 27.090909
| 80
| 0.458054
| 56
| 596
| 4.785714
| 0.75
| 0.104478
| 0.104478
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02537
| 0.206376
| 596
| 21
| 81
| 28.380952
| 0.541226
| 0.540268
| 0
| 0
| 0
| 0
| 0.030769
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
114fdc8df483131a51698126243a63c5be6a6a0e
| 579
|
py
|
Python
|
djcelery_model/tests/testapp/tasks.py
|
idanshimon/django-celery-model
|
0127bdf7a30ca97a2f0054413c7892477bd03d2f
|
[
"MIT"
] | null | null | null |
djcelery_model/tests/testapp/tasks.py
|
idanshimon/django-celery-model
|
0127bdf7a30ca97a2f0054413c7892477bd03d2f
|
[
"MIT"
] | 5
|
2020-07-13T17:33:29.000Z
|
2020-09-11T16:21:54.000Z
|
djcelery_model/tests/testapp/tasks.py
|
idanshimon/django-celery-model
|
0127bdf7a30ca97a2f0054413c7892477bd03d2f
|
[
"MIT"
] | 1
|
2020-12-07T13:27:02.000Z
|
2020-12-07T13:27:02.000Z
|
from __future__ import absolute_import, unicode_literals
from hashlib import sha1
from time import sleep
from celery import shared_task
from .models import JPEGFile
@shared_task
def calculate_etag(pk):
jpeg = JPEGFile.objects.get(pk=pk)
jpeg.etag = sha1(jpeg.file.read()).hexdigest()
sleep(5)
jpeg.save()
@shared_task(bind=True)
def forced_failure(self):
raise Exception('forced failure')
@shared_task(bind=True, max_retries=None)
def retry_forever(self):
self.retry(countdown=5)
@shared_task(bind=True)
def sleep_for_success(self):
sleep(5)
| 19.3
| 56
| 0.749568
| 85
| 579
| 4.905882
| 0.494118
| 0.119904
| 0.100719
| 0.129496
| 0.100719
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010101
| 0.145078
| 579
| 29
| 57
| 19.965517
| 0.832323
| 0
| 0
| 0.2
| 0
| 0
| 0.02418
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.25
| 0
| 0.45
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1157f9d0f3382897cf392138bb21e63963ec687a
| 1,311
|
py
|
Python
|
backtesting/__init__.py
|
mhconradt/research-tools
|
b60f42bcce571665d918c1637f532a5a9f5caf4b
|
[
"MIT"
] | null | null | null |
backtesting/__init__.py
|
mhconradt/research-tools
|
b60f42bcce571665d918c1637f532a5a9f5caf4b
|
[
"MIT"
] | null | null | null |
backtesting/__init__.py
|
mhconradt/research-tools
|
b60f42bcce571665d918c1637f532a5a9f5caf4b
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
from backtesting.analysis import plot_cost_proceeds, plot_holdings, \
plot_performance
from backtesting.report import Report
from backtesting.simulation import simulate
def main() -> None:
from string import ascii_uppercase
np.random.seed(42)
markets = list(ascii_uppercase[:5])
m = len(markets)
n = 86400
fee = 0.0001
expiration = 10
times = pd.date_range('2000-01-01', freq='S', periods=n)
bf_ = (1 - np.random.rand(n, m) ** (1 / 30))
buy_fraction = pd.DataFrame(bf_, index=times, columns=markets)
sf_ = 1 - (1 - np.random.rand(n, m)) ** (1 / 300)
sell_fraction = pd.DataFrame(sf_, index=times, columns=markets)
_prices = np.random.lognormal(1e-7, 1e-4, size=(n, m)).cumprod(axis=0)
price = pd.DataFrame(_prices, index=times, columns=markets)
drop = np.random.permutation(np.arange(price.size).reshape(*price.shape))
price[drop % 7 == 0] = np.nan
_report = simulate(100_000., buy_fraction, sell_fraction, price, fee,
expiration, expiration, single_trade=True)
plot_holdings(_report)
import matplotlib.pyplot as plt
plt.show()
if __name__ == '__main__':
main()
__all__ = ['simulate', 'plot_holdings', 'plot_cost_proceeds',
'plot_performance']
| 33.615385
| 77
| 0.670481
| 181
| 1,311
| 4.646409
| 0.469613
| 0.047562
| 0.060642
| 0.085612
| 0.03805
| 0.03805
| 0.03805
| 0
| 0
| 0
| 0
| 0.043768
| 0.198322
| 1,311
| 38
| 78
| 34.5
| 0.756422
| 0
| 0
| 0
| 0
| 0
| 0.056445
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03125
| false
| 0
| 0.21875
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
11582c4c142efc6bf040a2f6c49882faa3503209
| 24,681
|
py
|
Python
|
relation_extraction/data/preprocess.py
|
geetickachauhan/relation-extraction
|
aa920449b20c7127954eaaaa05244e7fc379e018
|
[
"MIT"
] | 19
|
2019-06-24T18:33:36.000Z
|
2022-01-21T03:16:12.000Z
|
relation_extraction/data/preprocess.py
|
geetickachauhan/relation-extraction
|
aa920449b20c7127954eaaaa05244e7fc379e018
|
[
"MIT"
] | null | null | null |
relation_extraction/data/preprocess.py
|
geetickachauhan/relation-extraction
|
aa920449b20c7127954eaaaa05244e7fc379e018
|
[
"MIT"
] | 11
|
2019-06-02T08:59:16.000Z
|
2021-08-23T04:31:07.000Z
|
'''
Author: Geeticka Chauhan
Performs pre-processing on a csv file independent of the dataset (once converters have been applied).
Refer to notebooks/Data-Preprocessing for more details. The methods are specifically used in the non
_original notebooks for all datasets.
'''
import os, pandas as pd, numpy as np
import nltk
import spacy
from spacy.tokens import Doc
# important global variables for identifying the location of entities
entity1 = 'E'
entity2 = 'EOTHER'
entity_either = 'EEITHER'
'''
The methods below are for the preprocessing type 1
'''
# separate the indexes of entity 1 and entity 2 by what is intersecting
# and what is not
def get_common_and_separate_entities(e1_indexes, e2_indexes):
e1_indexes = set(e1_indexes)
e2_indexes = set(e2_indexes)
common_indexes = e1_indexes.intersection(e2_indexes)
only_e1_indexes = list(e1_indexes.difference(common_indexes))
only_e2_indexes = list(e2_indexes.difference(common_indexes))
return only_e1_indexes, only_e2_indexes, list(common_indexes)
# given an entity replacement dictionary like {'0:0': 'entity1'}
# provide more information related to the location of the entity
def entity_replacement_dict_with_entity_location(entity_replacement_dict,
only_e1_indexes, only_e2_indexes, common_indexes):
def update_dict_with_indexes(new_entity_replacement_dict, only_indexes, start, end):
for i in only_indexes:
key = str(i[0]) + ':' + str(i[-1])
new_entity_replacement_dict[key]['start'] = start
new_entity_replacement_dict[key]['end'] = end
return new_entity_replacement_dict
new_entity_replacement_dict = {}
# below is just for initialization purposes, when start and end is none, means we are not
# inserting anything before or after those words in the sentence
for key in entity_replacement_dict.keys():
new_entity_replacement_dict[key] = {'replace_by': entity_replacement_dict[key],
'start': None, 'end': None}
new_entity_replacement_dict = update_dict_with_indexes(new_entity_replacement_dict, only_e1_indexes,
entity1 + 'START', entity1 + 'END')
new_entity_replacement_dict = update_dict_with_indexes(new_entity_replacement_dict, only_e2_indexes,
entity2 + 'START', entity2 + 'END')
new_entity_replacement_dict = update_dict_with_indexes(new_entity_replacement_dict, common_indexes,
entity_either + 'START', entity_either + 'END')
return new_entity_replacement_dict
###
### Helper functions
###
#given string 12:30, return 12, 30 as a tuple of ints
def parse_position(position):
positions = position.split(':')
return int(positions[0]), int(positions[1])
def sort_position_keys(entity_replacement_dict):
positions = list(entity_replacement_dict.keys())
sorted_positions = sorted(positions, key=lambda x: int(x.split(':')[0]))
return sorted_positions
# remove any additional whitespace within a line
def remove_whitespace(line):
return str(" ".join(line.split()).strip())
def list_to_string(sentence):
return " ".join(sentence)
# adapted from tag_sentence method in converter_ddi
# note that white spaces are added in the new sentence on purpose
def replace_with_concept(row):
sentence = row.tokenized_sentence.split()
e1_indexes = row.metadata['e1']['word_index']
e2_indexes = row.metadata['e2']['word_index'] # assuming that within the same entity indexes, no overlap
new_sentence = ''
only_e1_indexes, only_e2_indexes, common_indexes = \
get_common_and_separate_entities(e1_indexes, e2_indexes)
entity_replacement_dict = row.metadata['entity_replacement'] # assuming no overlaps in replacement
new_entity_replacement_dict = entity_replacement_dict_with_entity_location(entity_replacement_dict,
only_e1_indexes, only_e2_indexes,
common_indexes)
repl_dict = new_entity_replacement_dict # just using proxy because names are long
sorted_positions = sort_position_keys(new_entity_replacement_dict)
for i in range(len(sorted_positions)):
curr_pos = sorted_positions[i]
curr_start_pos, curr_end_pos = parse_position(curr_pos)
start_replace = '' if repl_dict[curr_pos]['start'] is None else repl_dict[curr_pos]['start'].upper()
end_replace = '' if repl_dict[curr_pos]['end'] is None else repl_dict[curr_pos]['end'].upper()
between_replace = repl_dict[curr_pos]['replace_by'].upper() # between the entity replacement
if i == 0:
new_sentence += list_to_string(sentence[:curr_start_pos]) + ' ' + start_replace + ' ' + \
between_replace + ' ' + end_replace + ' '
else:
prev_pos = sorted_positions[i-1]
_, prev_end_pos = parse_position(prev_pos)
middle = list_to_string(sentence[prev_end_pos+1 : curr_start_pos]) # refers to middle between prev segment and the
# current segment
if middle == '':
middle = ' '
new_sentence += middle + ' ' + start_replace + ' ' + between_replace + ' ' + end_replace + ' '
if i == len(sorted_positions) - 1 and curr_end_pos < len(sentence) - 1:
new_sentence += ' ' + list_to_string(sentence[curr_end_pos+1:])
new_sentence = remove_whitespace(new_sentence)
return new_sentence
'''
Preprocessing Type 2: Removal of stop words, punctuations and the replacement of digits
'''
# gives a dictionary signifying the location of the different entities in the sentence
def get_entity_location_dict(only_e1_indexes, only_e2_indexes, common_indexes):
entity_location_dict = {}
def update_dict_with_indexes(entity_location_dict, only_indexes, start, end):
for i in only_indexes:
key = str(i[0]) + ':' + str(i[-1])
entity_location_dict[key] = {'start': start, 'end': end}
return entity_location_dict
entity_location_dict = update_dict_with_indexes(entity_location_dict, only_e1_indexes,
entity1 + 'START', entity1 + 'END')
entity_location_dict = update_dict_with_indexes(entity_location_dict, only_e2_indexes,
entity2 + 'START', entity2 + 'END')
entity_location_dict = update_dict_with_indexes(entity_location_dict, common_indexes,
entity_either + 'START', entity_either + 'END')
return entity_location_dict
# given the index information of the entities, return the sentence with
# tags ESTART EEND etc to signify the location of the entities
def get_new_sentence_with_entity_replacement(sentence, e1_indexes, e2_indexes):
new_sentence = ''
only_e1_indexes, only_e2_indexes, common_indexes = \
get_common_and_separate_entities(e1_indexes, e2_indexes)
entity_loc_dict = get_entity_location_dict(only_e1_indexes, only_e2_indexes, common_indexes)
sorted_positions = sort_position_keys(entity_loc_dict)
for i in range(len(sorted_positions)):
curr_pos = sorted_positions[i]
curr_start_pos, curr_end_pos = parse_position(curr_pos)
start_replace = entity_loc_dict[curr_pos]['start']
end_replace = entity_loc_dict[curr_pos]['end']
if i == 0:
new_sentence += list_to_string(sentence[:curr_start_pos]) + ' ' + start_replace + ' ' + \
list_to_string(sentence[curr_start_pos : curr_end_pos + 1]) + ' ' + end_replace + ' '
else:
prev_pos = sorted_positions[i-1]
_, prev_end_pos = parse_position(prev_pos)
middle = list_to_string(sentence[prev_end_pos+1 : curr_start_pos])
if middle == '':
middle = ' '
new_sentence += middle + ' ' + start_replace + ' ' + \
list_to_string(sentence[curr_start_pos: curr_end_pos+1]) + ' ' + end_replace + ' '
if i == len(sorted_positions) - 1 and curr_end_pos < len(sentence) - 1:
new_sentence += ' ' + list_to_string(sentence[curr_end_pos+1:])
new_sentence = remove_whitespace(new_sentence)
# TODO write some code to do the replacement
return new_sentence
# preprocessing 2: remove the stop words and punctuation from the data
# and replace all digits
# TODO: might be nice to give an option to specify whether to remove the stop words or not
# this is a low priority part though
def replace_digit_punctuation_stop_word(row, stop_word_removal=True):
nlp = spacy.load('en_core_web_lg')
sentence = row.tokenized_sentence.split()
e1_indexes = row.metadata['e1']['word_index']
e2_indexes = row.metadata['e2']['word_index']
sentence = get_new_sentence_with_entity_replacement(sentence, e1_indexes, e2_indexes)
# detection of stop words, punctuations and digits
index_to_keep_dict = {} # index: {keep that token or not, replace_with}
tokenizedSentence = sentence.lower().split()
doc = Doc(nlp.vocab, words=tokenizedSentence)
nlp.tagger(doc)
nlp.parser(doc)
for token in doc:
word_index = token.i
stop_word = token.is_stop
punct = token.is_punct
num = token.like_num
if (stop_word_removal and (stop_word or punct)) or (not stop_word_removal and punct):
index_to_keep_dict[word_index] = {'keep': False, 'replace_with': None}
elif num:
index_to_keep_dict[word_index] = {'keep': True, 'replace_with': 'NUMBER'}
else:
index_to_keep_dict[word_index] = {'keep': True, 'replace_with': None}
# generation of the new sentence based on the above findings
sentence = sentence.split()
new_sentence = []
for i in range(len(sentence)):
word = sentence[i]
if word.endswith('END') or word.endswith('START'):
new_sentence.append(word)
continue
if not index_to_keep_dict[i]['keep']:
continue # don't append when it is a stop word or punctuation
if index_to_keep_dict[i]['replace_with'] is not None:
new_sentence.append(index_to_keep_dict[i]['replace_with'])
continue
new_sentence.append(word)
return list_to_string(new_sentence)
'''
Preprocessing Type 3 part 1: NER
'''
# a method to check for overlap between the ner_dict that is created
def check_for_overlap(ner_dict):
def expand_key(string): # a string that looks like '2:2' to [2]
start = int(string.split(':')[0])
end = int(string.split(':')[1])
return list(range(start, end+1))
expanded_keys = [expand_key(key) for key in ner_dict.keys()]
for i1, item in enumerate(expanded_keys):
for i2 in range(i1 + 1, len(expanded_keys)):
if set(item).intersection(expanded_keys[i2]):
return True # overlap is true
for i2 in range(0, i1):
if set(item).intersection(expanded_keys[i2]):
return True
return False
###
### Helper functions for the NER replacement
###
def overlap_index(index1, index2):
def expand(index):
start = int(index[0])
end = int(index[1])
return list(range(start, end+1))
expand_index1 = expand(index1)
expand_index2 = expand(index2)
if set(expand_index1).intersection(set(expand_index2)):
return True
else: return False
# for indexes that look like (1,1) and (2,2) check if the left is fully included in the right
def fully_included(index1, index2):
if int(index1[0]) >= int(index2[0]) and int(index1[1]) <= int(index2[1]): return True
else: return False
def beginning_overlap(index1, index2): # this is tricky when (1,1) and (2,2) are there
if int(index1[0]) < int(index2[0]) and int(index1[1]) <= int(index2[1]): return True
else: return False
def end_overlap(index1, index2): # this is tricky
if int(index1[0]) >= int(index2[0]) and int(index1[1]) > int(index2[1]): return True
else: return False
def beginning_and_end_overlap(index1, index2):
if int(index1[0]) < int(index2[0]) and int(index1[1]) > int(index2[1]): return True
else:
return False
#else there is no overlap
# taken from https://stackoverflow.com/questions/46548902/converting-elements-of-list-of-nested-lists-from-string-to-integer-in-python
def list_to_int(lists):
return [int(el) if not isinstance(el,list) else convert_to_int(el) for el in lists]
def correct_entity_indexes_with_ner(ner_dict, e_index):
new_e_index = []
for i in range(len(e_index)): # we are reading tuples here
for key in ner_dict.keys():
indexes = e_index[i]
index2 = indexes
index1 = parse_position(key) # checking if ner is fully included etc
if not overlap_index(index1, index2): # don't do below if there is no overlap
continue
if beginning_overlap(index1, index2):
e_index[i] = (index1[0], e_index[i][1])
elif end_overlap(index1, index2):
e_index[i] = (e_index[i][0], index1[1])
elif beginning_and_end_overlap(index1, index2):
e_index[i] = (index1[0], index1[1]) # else you don't change or do anything
return e_index
# given all of these dictionaries, return the ner replacement dictionary
def get_ner_replacement_dictionary(only_e1_index, only_e2_index, common_indexes, ner_dict):
def update_dict_with_entity(e_index, ner_repl_dict, entity_name):
for indexes in e_index:
key1 = str(indexes[0]) + ':' + str(indexes[0]) + ':' + entity_name + 'START'
ner_repl_dict[key1] = {'replace_by': None, 'insert': entity_name + 'START'}
key2 = str(int(indexes[-1]) + 1) + ':' + str(int(indexes[-1]) + 1) + ':' + entity_name + 'END'
ner_repl_dict[key2] = {'replace_by': None, 'insert': entity_name + 'END'}
return ner_repl_dict
# we are going to do something different: only spans for NER will be counted, but
# for the ENTITYSTART and ENTITYEND, we will keep the span as what token to insert before
ner_repl_dict = {}
for key in ner_dict:
ner_repl_dict[key] = {'replace_by': ner_dict[key], 'insert': None}
ner_repl_dict = update_dict_with_entity(only_e1_index, ner_repl_dict, entity1)
ner_repl_dict = update_dict_with_entity(only_e2_index, ner_repl_dict, entity2)
ner_repl_dict = update_dict_with_entity(common_indexes, ner_repl_dict, entity_either)
return ner_repl_dict
# this function is different from the sort_position_keys because
# we care about sorting not just by the beginning token, but also by the length that the span contains
def ner_sort_position_keys(ner_repl_dict): # this can potentially replace sort_position_keys
# but only if the application of this function does not change the preprocessed CSVs generated
def len_key(key):
pos = parse_position(key)
return pos[1] - pos[0] + 1
def start_or_end(key):
# handle the case where the ending tag of the entity is in the same place as the
#starting tag of another entity - this happens when two entities are next to each other
if len(key.split(':')) <= 2: # means that this is a named entity
return 3
start_or_end = key.split(':')[2]
if start_or_end.endswith('END'): # ending spans should get priority
return 1
elif start_or_end.endswith('START'):
return 2
positions = list(ner_repl_dict.keys())
sorted_positions = sorted(positions, key=lambda x: (parse_position(x)[0], len_key(x), start_or_end(x)))
return sorted_positions
# given a splitted sentence - make sure that the sentence is in list form
def get_ner_dict(sentence, nlp):
#nlp = spacy.load(spacy_model_name)
tokenizedSentence = sentence # in this case lowercasing is not helpful
doc = Doc(nlp.vocab, words=tokenizedSentence)
nlp.tagger(doc)
nlp.parser(doc)
nlp.entity(doc) # run NER
ner_dict = {} # first test for overlaps within ner
for ent in doc.ents:
key = str(ent.start) + ':' + str(ent.end - 1)
ner_dict[key] = ent.label_
return ner_dict
def convert_indexes_to_int(e_idx):
new_e_idx = []
for indexes in e_idx:
t = (int(indexes[0]), int(indexes[1]))
new_e_idx.append(t)
return new_e_idx
def replace_ner(row, nlp, check_ner_overlap=False): # similar to concept_replace, with some caveats
sentence = row.tokenized_sentence.split()
e1_indexes = row.metadata['e1']['word_index']
e2_indexes = row.metadata['e2']['word_index']
e1_indexes = convert_indexes_to_int(e1_indexes)
e2_indexes = convert_indexes_to_int(e2_indexes)
only_e1_indexes, only_e2_indexes, common_indexes = \
get_common_and_separate_entities(e1_indexes, e2_indexes)
ner_dict = get_ner_dict(sentence, nlp)
if check_ner_overlap and check_for_overlap(ner_dict):
print("There is overlap", ner_dict) # only need to check this once
#Below code works only if there isn't overlap within ner_dict, so make sure that there isn't overlap
# overlaps between ner label and e1 and e2 indexes are a problem
# And they can be of two types
# Type 1: NER overlaps with e1 or e2 in the beginning or end
# Here we want to keep the NER link the same but extend e1 or e2 index to the beginning or end of the
# NER
#Type 2: NER is inside of the entity completely: At this point it should be simply ok to mention at what
# token to insert ENTITYstart and ENTITYend
# Type 1 is a problem, but Type 2 is easy to handle while the new sentence is being created
only_e1_indexes = correct_entity_indexes_with_ner(ner_dict, only_e1_indexes)
only_e2_indexes = correct_entity_indexes_with_ner(ner_dict, only_e2_indexes)
common_indexes = correct_entity_indexes_with_ner(ner_dict, common_indexes)
# below needs to be done in case there was again a shift that might have caused both e1 and e2 to have
# the same spans
only_e1_indexes, only_e2_indexes, common_indexes2 = \
get_common_and_separate_entities(only_e1_indexes, only_e2_indexes)
common_indexes.extend(common_indexes2)
ner_repl_dict = get_ner_replacement_dictionary(only_e1_indexes, only_e2_indexes, common_indexes,
ner_dict)
sorted_positions = ner_sort_position_keys(ner_repl_dict)
new_sentence = '' # this below part is buggy, shouldn't be too bad to fix
for i in range(len(sorted_positions)):
curr_pos = sorted_positions[i]
curr_start_pos, curr_end_pos = parse_position(curr_pos)
curr_dict = ner_repl_dict[curr_pos]
start_insert = '' if curr_dict['insert'] is None else curr_dict['insert'].upper()
between_replace = '' if curr_dict['replace_by'] is None else curr_dict['replace_by']
if i == 0:
new_sentence += list_to_string(sentence[:curr_start_pos]) + ' ' + start_insert + ' ' + \
between_replace + ' '
else:
prev_pos = sorted_positions[i-1]
_, prev_end_pos = parse_position(prev_pos)
if ner_repl_dict[prev_pos]['insert'] is None: # means middle will be starting from prev_pos + 1
middle = list_to_string(sentence[prev_end_pos+1 : curr_start_pos])
else: # means middle needs to start from the prev_pos
middle = list_to_string(sentence[prev_end_pos: curr_start_pos])
if middle == '':
middle = ' '
new_sentence += middle + ' ' + start_insert + ' ' + between_replace + ' '
if i == len(sorted_positions) - 1 and curr_end_pos < len(sentence) - 1:
position = curr_end_pos + 1 if curr_dict['insert'] is None else curr_end_pos
new_sentence += ' ' + list_to_string(sentence[position:])
new_sentence = remove_whitespace(new_sentence)
return new_sentence
'''
Below methods do entity detection from the tagged sentences, i.e. a sentence that contains
ESTART, EEND etc, use that to detect the locations of the respective entities and remove the tags
from the sentence to return something clean
'''
# below is taken directly from the ddi converter and
# removes the first occurence of the start and end, and tells of their location
def get_entity_start_and_end(entity_start, entity_end, tokens):
e_start = tokens.index(entity_start)
e_end = tokens.index(entity_end) - 2 # 2 tags will be eliminated
between_tags = 0
for index in range(e_start + 1, e_end + 2):
# we want to check between the start and end for occurence of other tags
if tokens[index].endswith('START') or tokens[index].endswith('END'):
between_tags += 1
e_end -= between_tags
# only eliminate the first occurence of the entity_start and entity_end
new_tokens = []
entity_start_seen = 0
entity_end_seen = 0
for x in tokens:
if x == entity_start:
entity_start_seen += 1
if x == entity_end:
entity_end_seen += 1
if x == entity_start and entity_start_seen == 1:
continue
if x == entity_end and entity_end_seen == 1:
continue
new_tokens.append(x)
return (e_start, e_end), new_tokens
# based upon the method in converter for DDI, this will do removal of the entity tags and keep
# track of where they are located in the sentence
def get_entity_positions_and_replacement_sentence(tokens):
e1_idx = []
e2_idx = []
tokens_for_indexing = tokens
for token in tokens:
if token.endswith('START'):
ending_token = token[:-5] + 'END'
e_idx, tokens_for_indexing = get_entity_start_and_end(token, ending_token, tokens_for_indexing)
if token == entity1 + 'START' or token == entity_either + 'START':
e1_idx.append(e_idx)
if token == entity2 + 'START' or token == entity_either + 'START':
e2_idx.append(e_idx)
return e1_idx, e2_idx, tokens_for_indexing
#TODO unify the preprocessing code with actually writing to a dataframe so that experiments can be started
# Read the original dataframe, generate the replacement sentence and then from that, you should just
# call the get_entity_positions_and_replacement_sentence
# might be good to just have one method to do this because it seems like the tasks are kinda similar
# just different methods to call for preprocessing 1 vs 2
'''
Returns the dataframe after doing the preprocessing
'''
# update the metadata and the sentence with the preprocessed version
def update_metadata_sentence(row):
tagged_sentence = row.tagged_sentence
e1_idx, e2_idx, tokens_for_indexing = get_entity_positions_and_replacement_sentence(tagged_sentence.split())
new_sentence = list_to_string(tokens_for_indexing)
metadata = row.metadata
metadata['e1']['word_index'] = e1_idx
metadata['e2']['word_index'] = e2_idx
metadata.pop('entity_replacement', None) # remove the entity replacement dictionary from metadata
row.tokenized_sentence = new_sentence
row.metadata = metadata
return row
# give this preprocessing function a method to read the dataframe, and the location of the original
# dataframe to read so that it can do the preprocessing
# whether to do type 1 vs type 2 of the preprocessing
# 1: replace with all concepts in the sentence, 2: replace the stop words, punctuations and digits
# 3: replace only punctuations and digits
def preprocess(read_dataframe, df_directory, nlp, type_to_do=1):
df = read_dataframe(df_directory)
if type_to_do == 1:
df['tagged_sentence'] = df.apply(replace_with_concept, axis=1) # along the column axis
elif type_to_do == 2:
df['tagged_sentence'] = df.apply(replace_digit_punctuation_stop_word, args=(True,), axis=1)
elif type_to_do == 3:
df['tagged_sentence'] = df.apply(replace_digit_punctuation_stop_word, args=(False,), axis=1)
elif type_to_do == 4:
df['tagged_sentence'] = df.apply(replace_ner, args=(nlp, False), axis=1)
df = df.apply(update_metadata_sentence, axis=1)
#df = df.rename({'tokenized_sentence': 'preprocessed_sentence'}, axis=1)
df = df.drop(['tagged_sentence'], axis=1)
return df
| 48.680473
| 134
| 0.677485
| 3,489
| 24,681
| 4.532531
| 0.124391
| 0.022954
| 0.033198
| 0.024282
| 0.438093
| 0.371443
| 0.320476
| 0.290945
| 0.259011
| 0.231504
| 0
| 0.01663
| 0.234999
| 24,681
| 506
| 135
| 48.77668
| 0.820931
| 0.233378
| 0
| 0.309192
| 0
| 0
| 0.035615
| 0
| 0
| 0
| 0
| 0.003953
| 0
| 1
| 0.094708
| false
| 0
| 0.011142
| 0.008357
| 0.203343
| 0.002786
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
115918a7f0ed81b2789ef7c2542b4e40e41471f5
| 9,868
|
py
|
Python
|
SWAPLINEmain.py
|
ernforslab/Hu-et-al._GBMlineage2022
|
508744307746f357c75c1b1e92d9739a11d76870
|
[
"BSD-3-Clause"
] | 1
|
2022-03-01T23:51:26.000Z
|
2022-03-01T23:51:26.000Z
|
SWAPLINEmain.py
|
ernforslab/Hu-et-al._GBMlineage2022
|
508744307746f357c75c1b1e92d9739a11d76870
|
[
"BSD-3-Clause"
] | null | null | null |
SWAPLINEmain.py
|
ernforslab/Hu-et-al._GBMlineage2022
|
508744307746f357c75c1b1e92d9739a11d76870
|
[
"BSD-3-Clause"
] | 3
|
2022-03-01T23:53:20.000Z
|
2022-03-28T08:01:07.000Z
|
import datetime
import seaborn as sns
import pickle as pickle
from scipy.spatial.distance import cdist, pdist, squareform
import pandas as pd
from sklearn.linear_model import LogisticRegression, LogisticRegressionCV
#from sklearn.model_selection import StratifiedShuffleSplit
from collections import defaultdict
from sklearn import preprocessing
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LogisticRegression, LogisticRegressionCV
from sklearn.model_selection import StratifiedShuffleSplit
from collections import defaultdict
from sklearn import preprocessing
import random
import datetime
from sklearn.decomposition import PCA
import scipy
from sklearn.metrics import pairwise_distances
from scipy.sparse import issparse, coo_matrix
import sys
def prediction(mwanted_order, mclasses_names, mprotogruop,
mdf_train_set,mtrain_index,mreorder_ix,
mcolor_dict,net,learninggroup="train"):
#mwanted_order = mwanted_order, mclasses_names = mclasses_names, mprotogruop = dfpfcclus.loc["Cluster"].values,
#mdf_train_set = mdf_train_set, figsizeV = 18, mtrain_index = mtrain_index, net = net, mreorder_ix = mreorder_ix,
#mcolor_dict = refcolor_dict, learninggroup = "test"
if learninggroup=="train":
mreorder_ix = [list(mclasses_names).index(i) for i in mwanted_order]
mbool00 = np.in1d( mclasses_names[mtrain_index], mwanted_order )
if sum(mcolor_dict.index.isin(mwanted_order))!=len(mwanted_order):
mcolor_dict={}
for item in mwanted_order:
mcolor_dict[item]=random.sample(range(0, 255), 3)
mcolor_dict = mcolor_dict.map(lambda x: list(map(lambda y: y/255., x)))
#mcolor_dict = mcolor_dict.map(lambda x: list(map(lambda y: y/255., x)))
#rcParams['savefig.dpi'] = 500
#mnewcolors = array(list(mcolor_dict[mprotogruop].values))
normalizer = 0.9*mdf_train_set.values.max(1)[:,np.newaxis]
refdataLR=net.predict_proba((mdf_train_set.values/ normalizer).T)
todaytime=f"{datetime.datetime.now():%Y%m%d%I%M%p}"
dataRef= refdataLR[:,mreorder_ix]
mreordername=[]
for i in mreorder_ix:
mreordername.append(list(mclasses_names)[i])
dfprobCL=pd.DataFrame(dataRef*100, index=mdf_train_set.columns,columns=mreordername)
#dfnewcl=pd.DataFrame(array([xtest,ytest]).T, index=mdf_train_set.columns)
return mreordername, dfprobCL, mcolor_dict, refdataLR, mreorder_ix
elif learninggroup=="test":
#mreorder_ix = [list(mwanted_order).index(i) for i in mwanted_order]
if sum(mcolor_dict.index.isin(mwanted_order))!=len(mwanted_order):
mcolor_dict={}
for item in mwanted_order:
mcolor_dict[item]=random.sample(range(0, 255), 3)
mcolor_dict = mcolor_dict.map(lambda x: list(map(lambda y: y/255., x)))
#mnewcolors = array(list(mcolor_dict[mprotogruop].values))
normalizerTest=mdf_train_set.max(1)-mdf_train_set.min(1)
normalizedValue=(mdf_train_set.sub(mdf_train_set.min(1),0).div(normalizerTest,0).fillna(0).values).T
dataRef=net.predict_proba( normalizedValue)
mreordername=[]
for i in mreorder_ix:
mreordername.append(list(mclasses_names)[i])
dfprobCL=pd.DataFrame(dataRef*100, index=mdf_train_set.columns,columns=mreordername)
#dfnewcl=pd.DataFrame(array([xtest,ytest]).T, index=mdf_train_set.columns)
return mreordername, dfprobCL, mcolor_dict, dataRef
def permutationTest(mdf_train_set,net, dfprobRef,mreorder_ix,num):
test = mdf_train_set.values.reshape((len(mdf_train_set.columns) * len(mdf_train_set.index)))
test = np.random.permutation(test)
test = test.reshape((len(mdf_train_set.index), len(mdf_train_set.columns)))
dftest = pd.DataFrame(test).astype(float)
xp = dftest.values
xp -= xp.min()
xp /= xp.ptp()
test0 = net.predict_proba((xp).T)[:, mreorder_ix]
for i in range(0, num):
test = mdf_train_set.values.reshape((len(mdf_train_set.columns) * len(mdf_train_set.index)))
test = np.random.permutation(test)
test = test.reshape((len(mdf_train_set.index), len(mdf_train_set.columns)))
dftest = pd.DataFrame(test).astype(float)
xp = dftest.values
xp -= xp.min()
xp /= xp.ptp()
dataRef2 = net.predict_proba((xp).T)[:, mreorder_ix]
test0 = np.append(test0, dataRef2, axis=0)
# test0=test0+dataRef2
thresholdlist = []
temp = []
for threshold in np.arange(0.0, 1.0, 0.01):
thresholdlist.append("Prob_%s%%" % int(threshold * 100))
temp.append((np.sum(test0 > threshold, axis=0) / test0.shape[0]))
ratiodf = pd.DataFrame(temp)
ratiodf.index = thresholdlist
ratiodf.columns = dfprobRef.columns
dftest0 = pd.DataFrame(test0 * 100, columns=dfprobRef.columns)
return dftest0, ratiodf
def indices_distancesDensematrix(D, n_neighbors):
sample_range = np.arange(D.shape[0])[:, None]
indices = np.argpartition(D, n_neighbors-1, axis=1)[:, :n_neighbors]
indices = indices[sample_range, np.argsort(D[sample_range, indices])]
distances = D[sample_range, indices]
return indices, distances
def sparse_matrixindicesDistances(indices, distances, n_obs, n_neighbors):
n_nonzero = n_obs * n_neighbors
indptr = np.arange(0, n_nonzero + 1, n_neighbors)
D = scipy.sparse.csr_matrix((distances.copy().ravel(), # copy the data, otherwise strange behavior here
indices.copy().ravel(),
indptr),
shape=(n_obs, n_obs))
D.eliminate_zeros()
return D
def SWAPLINE_dist(dfnn, n_neighbors, dfposi, metric = 'euclidean', n_pcs=30, TopN=30):
#n_pcs = 30, n_neighbors = len(dfnn.index), metric = 'euclidean'
X = dfnn
pca_ = PCA(n_components=n_pcs, svd_solver='arpack', random_state=0)
X_pca = pca_.fit_transform(X)
PariDistances = pairwise_distances(X_pca, metric=metric)
knn_indices, knn_distances = indices_distancesDensematrix(PariDistances, n_neighbors)
_distances = sparse_matrixindicesDistances(knn_indices, knn_distances, X_pca.shape[0], n_neighbors)
dftestdist = pd.DataFrame(knn_distances)
dftest = 0
dftestindex = pd.DataFrame(knn_indices)
# dfnn=df.T
# dfnn.shape
dftestindex.index = dfnn.index
umap1AllCluster = []
umap2AllCluster = []
clusternames = list(set(dfposi["Cluster"]))
sys.stdout.write("[%s]" % "Processing")
sys.stdout.flush()
sys.stdout.write("\b" * (50 + 1)) # return to start of line, after '['
perc = len(clusternames)
for item in clusternames:
# toolbar_width = len(clusternames)
itemindex = clusternames.index(item)
# setup toolbar
sys.stdout.write("-%s%%-" % int(itemindex*100 / perc))
sys.stdout.flush()
umap1cluster = []
umap2cluster = []
clustemp = dfposi.loc[dfposi["Cluster"] == item]["Index"]
for i in range(len(dftestindex.index)):
nearestvalue = dftestindex.iloc[i, :].loc[dftestindex.iloc[i, :].isin(clustemp)][:TopN].tolist()
umap1cluster.append(
(dfposi.iloc[nearestvalue, 1].astype(float).mean() + dfposi.iloc[nearestvalue[0], 1]) / 2)
umap2cluster.append(
(dfposi.iloc[nearestvalue, 0].astype(float).mean() + dfposi.iloc[nearestvalue[0], 0]) / 2)
umap1AllCluster.append(umap1cluster + np.random.uniform(-0.075, 0.075, size=len(umap1cluster)))
umap2AllCluster.append(umap2cluster + np.random.uniform(-0.075, 0.075, size=len(umap2cluster)))
dfcellclusumap1 = pd.DataFrame(umap1AllCluster, index=clusternames, columns=dftestindex.index).T
dfcellclusumap2 = pd.DataFrame(umap2AllCluster, index=clusternames, columns=dftestindex.index).T
sys.stdout.write("]\n")
return dfcellclusumap1, dfcellclusumap2
def SWAPLINE_assign(dfprobCL, negtest, n, dfcellclusumap1,dfcellclusumap2,nodelist):
#n= len(set(dfposi["Cluster"]))
#nodelist=[['Neural_crest', 'Neural_tube', 'Ectoderm'],['Neural_crest','Pericyte/SMC', 'VLMC'],['Neural_crest', 'Ectoderm','VLMC'],
#['Rgl','Neural_tube', 'Ectoderm'],['Rgl','Neural_tube', 'Glia'],['Rgl','Neural_tube', 'OPCs'],['Rgl','Neural_tube', 'Neuron'],
#['Rgl','OPCs', 'Neuron'],['Rgl','Glia', 'Neuron'],['Rgl','Glia', 'OPCs']]
dffinalprob = dfprobCL - negtest
dffinalprob[dffinalprob < 0] = 0
dfrank2 = dffinalprob.T
# dfrank.shape
sumlist = []
for testx in range(len(dfrank2.columns)):
dftempnn = dfprobCL.T.loc[dfrank2.nlargest(n, dfrank2.columns[testx]).iloc[:n, :].index, dfrank2.columns[testx]]
sumlist.append(np.sum(dftempnn))
dfsumnew = dfprobCL.T
dfsumnew.loc["sum_nn"] = sumlist
indexlist = dfsumnew.T.loc[dfsumnew.loc["sum_nn"] > 1].index
dfrank = dffinalprob.T
newumap1 = []
newumap2 = []
for testx in dfrank.columns:
nodeprob = []
for item in nodelist:
nodeprob.append(dfrank[testx].loc[item].sum())
nodename = nodelist[np.array(nodeprob).argmax(axis=0)]
# dftempnn=dfrank.nlargest(n,testx)[testx][:n]
dftempnn = dfrank.loc[nodename, testx]
newumap1.append(np.sum(dfcellclusumap1.loc[testx, dftempnn.index] * (dftempnn / np.sum(dftempnn))))
newumap2.append(np.sum(dfcellclusumap2.loc[testx, dftempnn.index] * (dftempnn / np.sum(dftempnn))))
dfnewumap = pd.DataFrame([newumap2, newumap1], columns=dffinalprob.index)
dfnewumap=dfnewumap.T
return dfnewumap
| 48.851485
| 138
| 0.667511
| 1,212
| 9,868
| 5.292079
| 0.212046
| 0.029935
| 0.04116
| 0.022451
| 0.339726
| 0.335048
| 0.322264
| 0.281727
| 0.268631
| 0.259277
| 0
| 0.019483
| 0.204195
| 9,868
| 202
| 139
| 48.851485
| 0.797275
| 0.143697
| 0
| 0.259259
| 0
| 0
| 0.016049
| 0.00462
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037037
| false
| 0
| 0.135802
| 0
| 0.216049
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1159ace76695ba7ee79a54fb2dfd624cc5d70bce
| 1,988
|
py
|
Python
|
main.py
|
b0kch01/ColorfulValorant
|
9fdbcc6ca4626fc3d7f0349eb7564ffac1fc26c2
|
[
"MIT"
] | 1
|
2021-06-07T13:52:48.000Z
|
2021-06-07T13:52:48.000Z
|
main.py
|
B0kCh01/ColorfulValorant
|
9fdbcc6ca4626fc3d7f0349eb7564ffac1fc26c2
|
[
"MIT"
] | 1
|
2021-09-26T10:49:16.000Z
|
2021-09-27T03:27:55.000Z
|
main.py
|
b0kch01/ColorfulValorant
|
9fdbcc6ca4626fc3d7f0349eb7564ffac1fc26c2
|
[
"MIT"
] | null | null | null |
# Colorful VALORANT by b0kch01
import os, ctypes
# Disable quick-edit mode (pauses bot)
kernel32 = ctypes.windll.kernel32
kernel32.SetConsoleMode(kernel32.GetStdHandle(-10), 128)
from pyfiglet import Figlet
from termcolor import cprint, colored
import colorama
import keyboard
import time
# Fix legacy console color
colorama.init()
cprint("Setting up...")
cprint(" - [¤] Windows", "green")
cprint(" - [¤] Imported Modules", "green")
if ctypes.windll.shell32.IsUserAnAdmin() == 0:
cprint(" - [x] Please run as administrator", "red")
input("[ ENTER ] to quit")
exit(0)
def clear():
os.system("cls")
# User Interface
f = Figlet(font="ogre")
bgs = ["on_red", "on_yellow", "on_green", "on_blue", "on_magenta"]
CACHED_TITLESCREEN = f"""
{ "".join([colored(" " + "COLORFUL"[i] + " ", "grey", bgs[i % 4]) for i in range(8)]) }
{ colored(f.renderText("Valorant"), "red") }
{ colored(" Created with ♥ by b0kch01! ", "grey", "on_white") }
{ colored(" USE AT YOUR OWN RISK ", "grey", "on_yellow") }
"""
i = 0
colors = [
"<enemy>",
"<team>",
"<system>",
"<notification>",
"<warning>"
]
colorMap = [
"red",
"blue",
"yellow",
"green",
"magenta"
]
def goUp():
global i
i += 1
render()
def goDown():
global i
i -= 1
render()
def makeColor():
time.sleep(0.05)
keyboard.send("home")
keyboard.write(colors[i % 5])
keyboard.send("end")
keyboard.send("backspace")
keyboard.write("</>")
keyboard.send("\n")
def render():
global i
clear()
print(CACHED_TITLESCREEN)
print("Color: " + colored(colors[i % 5], "white", "on_" + colorMap[i % 5]))
keyboard.add_hotkey("\\", makeColor)
keyboard.add_hotkey("up", goUp)
keyboard.add_hotkey("down", goDown)
try:
render()
print("Instructions are on https://github.com/b0kch01/ColorfulValorant")
print("\nEnjoy! :)")
keyboard.wait("up + down")
except KeyboardInterrupt:
exit(0)
| 20.708333
| 89
| 0.607646
| 243
| 1,988
| 4.930041
| 0.514403
| 0.040067
| 0.042571
| 0.015025
| 0.03005
| 0.03005
| 0
| 0
| 0
| 0
| 0
| 0.024188
| 0.209759
| 1,988
| 96
| 90
| 20.708333
| 0.736474
| 0.052817
| 0
| 0.111111
| 0
| 0.013889
| 0.344332
| 0.017563
| 0
| 0
| 0
| 0
| 0
| 1
| 0.069444
| false
| 0
| 0.097222
| 0
| 0.166667
| 0.125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
115e6da0adc887e907135e22cea5b992136e5b12
| 791
|
py
|
Python
|
typus/chars.py
|
byashimov/typus
|
b0576d6065163cc46a171b90027f2e3321ae7615
|
[
"BSD-3-Clause"
] | 65
|
2016-06-15T08:44:58.000Z
|
2021-02-02T10:42:23.000Z
|
typus/chars.py
|
byashimov/typus
|
b0576d6065163cc46a171b90027f2e3321ae7615
|
[
"BSD-3-Clause"
] | 4
|
2018-11-15T17:10:05.000Z
|
2020-01-09T19:44:39.000Z
|
typus/chars.py
|
byashimov/typus
|
b0576d6065163cc46a171b90027f2e3321ae7615
|
[
"BSD-3-Clause"
] | 6
|
2017-10-20T16:28:45.000Z
|
2021-11-11T18:41:21.000Z
|
__all__ = (
'ANYSP',
'DLQUO',
'DPRIME',
'LAQUO',
'LDQUO',
'LSQUO',
'MDASH',
'MDASH_PAIR',
'MINUS',
'NBSP',
'NDASH',
'NNBSP',
'RAQUO',
'RDQUO',
'RSQUO',
'SPRIME',
'THNSP',
'TIMES',
'WHSP',
)
NBSP = '\u00A0'
NNBSP = '\u202F'
THNSP = '\u2009'
WHSP = ' '
ANYSP = r'[{}{}{}{}]'.format(WHSP, NBSP, NNBSP, THNSP)
NDASH = '–'
MDASH = '—'
MDASH_PAIR = NNBSP + MDASH + THNSP
HYPHEN = ''
MINUS = '−'
TIMES = '×'
LSQUO = '‘' # left curly quote mark
RSQUO = '’' # right curly quote mark/apostrophe
LDQUO = '“' # left curly quote marks
RDQUO = '”' # right curly quote marks
DLQUO = '„' # double low curly quote mark
LAQUO = '«' # left angle quote marks
RAQUO = '»' # right angle quote marks
SPRIME = '′'
DPRIME = '″'
| 16.829787
| 54
| 0.525917
| 93
| 791
| 4.516129
| 0.483871
| 0.119048
| 0.1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017361
| 0.271808
| 791
| 46
| 55
| 17.195652
| 0.694444
| 0.223767
| 0
| 0
| 0
| 0
| 0.234323
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1160107f399496c19ae30848738f2468e25e6508
| 5,259
|
py
|
Python
|
src/wagtail_live/models.py
|
Stormheg/wagtail-live
|
a5eb79024d44c060079ae7d4707d6220ea66ff5b
|
[
"BSD-3-Clause"
] | null | null | null |
src/wagtail_live/models.py
|
Stormheg/wagtail-live
|
a5eb79024d44c060079ae7d4707d6220ea66ff5b
|
[
"BSD-3-Clause"
] | null | null | null |
src/wagtail_live/models.py
|
Stormheg/wagtail-live
|
a5eb79024d44c060079ae7d4707d6220ea66ff5b
|
[
"BSD-3-Clause"
] | null | null | null |
""" Wagtail Live models."""
from django.db import models
from django.utils.timezone import now
from wagtail.admin.edit_handlers import FieldPanel, StreamFieldPanel
from wagtail.core.fields import StreamField
from .blocks import LivePostBlock
class LivePageMixin(models.Model):
"""A helper class for pages using Wagtail Live.
Attributes:
channel_id (str):
Id of the corresponding channel in a messaging app.
live_posts (StreamField):
StreamField containing all the posts/messages published
respectively on this page/channel.
"""
channel_id = models.CharField(
help_text="Channel ID",
max_length=255,
blank=True,
unique=True,
)
live_posts = StreamField(
[
("live_post", LivePostBlock()),
],
blank=True,
)
panels = [
FieldPanel("channel_id"),
StreamFieldPanel("live_posts"),
]
@property
def last_update_timestamp(self):
"""Timestamp of the last update of this page."""
return self.latest_revision_created_at.timestamp()
def _get_live_post_index(self, message_id):
"""Retrieves the index of a live post.
Args:
message_id (str):
ID of the message corresponding to a live post.
Returns:
(int) Index of the live post if found else -1
"""
for i, post in enumerate(self.live_posts):
if post.value["message_id"] == message_id:
return i
return
def get_live_post_index(self, message_id):
"""Retrieves index of a livepost."""
return self._get_live_post_index(message_id=message_id)
def get_live_post_by_index(self, live_post_index):
"""Retrieves a live post by its index.
Args:
live_post_index (str): Index of the live post to look for.
Returns:
(LivePostBlock) The live post instance
Raises:
(IndexError) if a live post with the given index doesn't exist.
"""
return self.live_posts[live_post_index]
def get_live_post_by_message_id(self, message_id):
"""Retrieves a live post by its ID.
Args:
message_id (str):
ID of the message corresponding to a live post.
Returns:
(LivePostBlock) The live post instance
Raises:
(KeyError) if a live post with the given ID doesn't exist.
"""
live_post_index = self.get_live_post_index(message_id=message_id)
if live_post_index is None:
raise KeyError
return self.get_live_post_by_index(live_post_index)
def add_live_post(self, live_post):
"""Adds a new live post to live page.
Args:
live_post (LivePostBlock):
live post to add
"""
posts = self.live_posts
lp_index = 0
post_created_at = live_post["created"]
while lp_index < len(posts):
if posts[lp_index].value["created"] < post_created_at:
break
lp_index += 1
# Insert to keep posts sorted by time
self.live_posts.insert(lp_index, ("live_post", live_post))
self.save_revision().publish()
def delete_live_post(self, message_id):
"""Deletes the live post corresponding to message_id.
Args:
message_id (str):
ID of the message corresponding to a live post.
Raises:
(KeyError) if live post containing message with message_id doesn't exist.
"""
live_post_index = self.get_live_post_index(message_id=message_id)
if live_post_index is None:
raise KeyError
del self.live_posts[live_post_index]
self.save_revision().publish()
def update_live_post(self, live_post):
"""Updates a live post when it has been edited.
Args:
live_post (livePostBlock): Live post to update.
"""
live_post.value["modified"] = now()
self.save_revision().publish()
def get_updates_since(self, last_update_ts):
"""Retrieves new updates since a given timestamp value.
Args:
last_update_ts (DateTime):
Timestamp of the last update.
Returns:
(list, dict) a tuple containing the current live posts
and the updated posts since last_update_ts.
"""
current_posts, updated_posts = [], {}
for post in self.live_posts:
if not post.value["show"]:
continue
post_id = post.id
current_posts.append(post_id)
created = post.value["created"]
if created > last_update_ts: # This is a new post
updated_posts[post_id] = post.render(context={"block_id": post_id})
continue
last_modified = post.value["modified"]
if last_modified and last_modified > last_update_ts:
# This is an edited post
updated_posts[post_id] = post.render(context={"block_id": post_id})
return (updated_posts, current_posts)
class Meta:
abstract = True
| 28.895604
| 85
| 0.601065
| 652
| 5,259
| 4.630368
| 0.219325
| 0.124545
| 0.060285
| 0.026499
| 0.403445
| 0.297118
| 0.264657
| 0.198741
| 0.198741
| 0.158993
| 0
| 0.001679
| 0.320403
| 5,259
| 181
| 86
| 29.055249
| 0.843033
| 0.338087
| 0
| 0.2
| 0
| 0
| 0.037606
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.12
| false
| 0
| 0.066667
| 0
| 0.346667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1161293fb1e28e5788a7aa124f039306bb2b8a3e
| 2,291
|
py
|
Python
|
python/test_inprod_analytic.py
|
solepomies/MAOOAM
|
3a30c4030da384a9c4a8510a628c5c1f8ff511cc
|
[
"MIT"
] | 18
|
2016-04-21T08:45:15.000Z
|
2021-11-30T11:21:40.000Z
|
python/test_inprod_analytic.py
|
solepomies/MAOOAM
|
3a30c4030da384a9c4a8510a628c5c1f8ff511cc
|
[
"MIT"
] | 1
|
2019-07-15T13:01:21.000Z
|
2019-07-15T13:01:21.000Z
|
python/test_inprod_analytic.py
|
solepomies/MAOOAM
|
3a30c4030da384a9c4a8510a628c5c1f8ff511cc
|
[
"MIT"
] | 15
|
2016-05-12T12:09:51.000Z
|
2021-12-17T18:43:07.000Z
|
import numpy as np
from inprod_analytic import *
from params_maooam import natm, noc
init_inprod()
real_eps = 2.2204460492503131e-16
"""This module print the coefficients computed in the inprod_analytic module"""
for i in range(0, natm):
for j in range(0, natm):
if(abs(atmos.a[i, j]) >= real_eps):
print ("a["+str(i+1)+"]"+"["+str(j+1)+"] = % .5E" % atmos.a[i, j])
if(abs(atmos.c[i, j]) >= real_eps):
print ("c["+str(i+1)+"]"+"["+str(j+1)+"] = % .5E" % atmos.c[i, j])
for k in range(0, natm):
if(abs(atmos.b[i, j, k]) >= real_eps):
print (
"b["+str(i+1)+"]["+str(j+1)+"]["+str(k+1)+"] =%.5E"
% atmos.b[i, j, k])
if(abs(atmos.g[i, j, k]) >= real_eps):
print (
"g["+str(i+1)+"]["+str(j+1)+"]["+str(k+1)+"] = % .5E"
% atmos.g[i, j, k])
for i in range(0, natm):
for j in range(0, noc):
if(abs(atmos.d[i, j]) >= real_eps):
print ("d["+str(i+1)+"]"+"["+str(j+1)+"] = % .5E" % atmos.d[i, j])
if(abs(atmos.s[i, j]) >= real_eps):
print ("s["+str(i+1)+"]"+"["+str(j+1)+"] = % .5E" % atmos.s[i, j])
for i in range(0, noc):
for j in range(0, noc):
if(abs(ocean.M[i, j]) >= real_eps):
print ("M["+str(i+1)+"]"+"["+str(j+1)+"] = % .5E" % ocean.M[i, j])
if(abs(ocean.N[i, j]) >= real_eps):
print ("N["+str(i+1)+"]"+"["+str(j+1)+"] = % .5E" % ocean.N[i, j])
for k in range(0, noc):
if(abs(ocean.O[i, j, k]) >= real_eps):
print (
"O["+str(i+1)+"]["+str(j+1)+"]["+str(k+1)+"] = % .5E"
% ocean.O[i, j, k])
if(abs(ocean.C[i, j, k]) >= real_eps):
print (
"C["+str(i+1)+"]["+str(j+1)+"]["+str(k+1)+"] = % .5E"
% ocean.C[i, j, k])
for j in range(0, natm):
if(abs(ocean.K[i, j]) >= real_eps):
print (
"K["+str(i+1)+"]"+"["+str(j+1)+"] = % .5E"
% ocean.K[i, j])
if(abs(ocean.W[i, j]) >= real_eps):
print (
"W["+str(i+1)+"]" + "["+str(j+1)+"] = % .5E"
% ocean.W[i, j])
| 38.830508
| 79
| 0.395024
| 358
| 2,291
| 2.480447
| 0.131285
| 0.054054
| 0.162162
| 0.108108
| 0.762387
| 0.537162
| 0.483108
| 0.40991
| 0.202703
| 0.202703
| 0
| 0.044914
| 0.339153
| 2,291
| 58
| 80
| 39.5
| 0.541612
| 0
| 0
| 0.24
| 0
| 0
| 0.07327
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.06
| 0
| 0.06
| 0.24
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
11618053ba49ca083edd95cb07327f86424a2f0d
| 849
|
py
|
Python
|
public/views/fallback.py
|
jgarber623/openstates.org
|
0c514c955f7ffbe079c77c3ec00345b20818ad04
|
[
"MIT"
] | null | null | null |
public/views/fallback.py
|
jgarber623/openstates.org
|
0c514c955f7ffbe079c77c3ec00345b20818ad04
|
[
"MIT"
] | null | null | null |
public/views/fallback.py
|
jgarber623/openstates.org
|
0c514c955f7ffbe079c77c3ec00345b20818ad04
|
[
"MIT"
] | null | null | null |
from django.http import Http404, HttpResponse
from django.shortcuts import redirect
import boto3
from botocore.errorfactory import ClientError
from ..models import PersonProxy
def fallback(request):
BUCKET_NAME = "legacy.openstates.org"
key = request.path.lstrip("/") + "index.html"
s3 = boto3.client("s3")
try:
obj = s3.get_object(Bucket=BUCKET_NAME, Key=key)
return HttpResponse(obj["Body"].read())
except ClientError:
raise Http404(request.path + "index.html")
def legislator_fallback(request, legislator_id):
try:
p = PersonProxy.objects.get(
identifiers__scheme="legacy_openstates",
identifiers__identifier=legislator_id,
)
return redirect(p.pretty_url(), permanent=True)
except PersonProxy.DoesNotExist:
return fallback(request)
| 28.3
| 56
| 0.693757
| 95
| 849
| 6.073684
| 0.526316
| 0.07799
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016345
| 0.207303
| 849
| 29
| 57
| 29.275862
| 0.84101
| 0
| 0
| 0.086957
| 0
| 0
| 0.076561
| 0.024735
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086957
| false
| 0
| 0.217391
| 0
| 0.434783
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
116638e98b91db5181f4b52e40fed58dce87a1e3
| 1,038
|
py
|
Python
|
aws_tests/aws_mlops_scripts/sagemaker_trigger.py
|
Chronicles-of-AI/archives
|
23b978a709c785ff00ec90487039944b8ab8f4fb
|
[
"MIT"
] | null | null | null |
aws_tests/aws_mlops_scripts/sagemaker_trigger.py
|
Chronicles-of-AI/archives
|
23b978a709c785ff00ec90487039944b8ab8f4fb
|
[
"MIT"
] | null | null | null |
aws_tests/aws_mlops_scripts/sagemaker_trigger.py
|
Chronicles-of-AI/archives
|
23b978a709c785ff00ec90487039944b8ab8f4fb
|
[
"MIT"
] | null | null | null |
import os
import sagemaker
from sagemaker import get_execution_role
from sagemaker.tensorflow.estimator import TensorFlow
sagemaker_session = sagemaker.Session()
# role = get_execution_role()
region = sagemaker_session.boto_session.region_name
training_input_path = "s3://intel-edge-poc/mask_dataset_datagen/train/"
validation_input_path = "s3://intel-edge-poc/mask_dataset_datagen/val/"
hyperparam = {
"save_model_dir": "s3://intel-edge-poc/saved/",
"batch_size": 32,
"epochs": 2,
"optimizer": "adam",
"learning_rate": 1e-3,
}
#'train_dir': 'mask_dataset_datagen/train/',
#'val_dir': 'mask_dataset_datagen/val/'
#'bucket' : 'intel-edge-poc',
tf_estimator = TensorFlow(
entry_point="TrainingJob.py",
role="intel-edge-poc-role",
instance_count=1,
instance_type="ml.c4.xlarge",
framework_version="2.3",
py_version="py37",
hyperparameters=hyperparam,
script_mode=True,
)
# tf_estimator.fit()
tf_estimator.fit({"training": training_input_path, "validation": validation_input_path})
| 25.95
| 88
| 0.735067
| 135
| 1,038
| 5.362963
| 0.466667
| 0.062155
| 0.082873
| 0.058011
| 0.11326
| 0.11326
| 0.11326
| 0.11326
| 0.11326
| 0
| 0
| 0.015368
| 0.122351
| 1,038
| 39
| 89
| 26.615385
| 0.779363
| 0.149326
| 0
| 0
| 0
| 0
| 0.278221
| 0.13455
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.153846
| 0
| 0.153846
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
116ab6cd1db9f2f070145181b5804b80b331c8fe
| 2,040
|
py
|
Python
|
script2.py
|
joshigarvitgh/image-processing
|
70e3ca093882904d5d995153ca079d000996a240
|
[
"Apache-2.0"
] | null | null | null |
script2.py
|
joshigarvitgh/image-processing
|
70e3ca093882904d5d995153ca079d000996a240
|
[
"Apache-2.0"
] | null | null | null |
script2.py
|
joshigarvitgh/image-processing
|
70e3ca093882904d5d995153ca079d000996a240
|
[
"Apache-2.0"
] | null | null | null |
from pyimagesearch.shapedetector import ShapeDetector
from pyimagesearch.colorlabeler import ColorLabeler
import argparse
import imutils
import numpy as np
import cv2
import argparse
import imutils
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
if face_cascade.empty(): raise Exception("your face_cascade is empty. are you sure, the path is correct ?")
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
if eye_cascade.empty(): raise Exception("your eye_cascade is empty. are you sure, the path is correct ?")
video = cv2.VideoCapture(0)
while(video.isOpened()):
ret, frame = video.read()
if frame is not None:
resized = imutils.resize(frame,width=600)
ratio=frame.shape[0] / float(resized.shape[0])
blurred = cv2.GaussianBlur(resized, (5, 5), 0)
gray = cv2.cvtColor(blurred, cv2.COLOR_BGR2GRAY)
lab = cv2.cvtColor(blurred, cv2.COLOR_BGR2LAB)
thresh = cv2.threshold(gray, 60, 255, cv2.THRESH_BINARY)[1]
# find contours in the thresholded image and initialize the
# shape detector
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
sd = ShapeDetector()
cl = ColorLabeler()
# loop over the contours
for c in cnts:
# compute the center of the contour, then detect the name of the
# shape using only the contour
M = cv2.moments(c)
#cX = int((M["m10"] / M["m00"]) * ratio)
#cY = int((M["m01"] / M["m00"]) * ratio)
shape = sd.detect(c)
color = cl.label(lab, c)
print(shape)
print(color)
# multiply the contour (x, y)-coordinates by the resize ratio,
# then draw the contours and the name of the shape on the image
c = c.astype("float")
c *= ratio
c = c.astype("int")
cv2.drawContours(frame, [c], -1, (0, 255, 0), 2)
#cv2.putText(frame, shape, (cX, cY), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
cv2.imshow('Video',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video.release()
cv2.destroyAllWindows()
| 35.789474
| 107
| 0.698039
| 303
| 2,040
| 4.640264
| 0.419142
| 0.023471
| 0.02845
| 0.038407
| 0.16074
| 0.056899
| 0.056899
| 0.056899
| 0.056899
| 0.056899
| 0
| 0.041592
| 0.175
| 2,040
| 56
| 108
| 36.428571
| 0.793821
| 0.232353
| 0
| 0.097561
| 0
| 0
| 0.124116
| 0.022508
| 0
| 0
| 0.002572
| 0
| 0
| 1
| 0
| false
| 0
| 0.195122
| 0
| 0.195122
| 0.04878
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
116b7b4ac4b9d4a7f8c63237f875c149f4bb08e0
| 2,016
|
py
|
Python
|
qiskit_code/DeutschJozsa.py
|
OccumRazor/implement-quantum-algotirhms-with-qiskit
|
8574b6505fc34f12eb63e1791e969099d56e3974
|
[
"MIT"
] | 3
|
2020-11-03T01:21:48.000Z
|
2021-09-23T18:53:40.000Z
|
qiskit_code/DeutschJozsa.py
|
OccumRazor/implement-quantum-algotirhms-with-qiskit
|
8574b6505fc34f12eb63e1791e969099d56e3974
|
[
"MIT"
] | null | null | null |
qiskit_code/DeutschJozsa.py
|
OccumRazor/implement-quantum-algotirhms-with-qiskit
|
8574b6505fc34f12eb63e1791e969099d56e3974
|
[
"MIT"
] | null | null | null |
from qiskit import QuantumRegister,QuantumCircuit
from qiskit.aqua.operators import StateFn
from qiskit.aqua.operators import I
from qiskit_code.quantumMethod import add,ini
from qiskit_code.classicalMethod import Dec2Bi
def DeutschJozsa(l,method):
# Deutsch, D. and Jozsa, R., 1992. Rapid solution of problems by quantum computation.
# Proceedings of the Royal Society of London. Series A: Mathematical and Physical Sciences,
# 439(1907), pp.553-558.
# The input 'l' is the equivalent to the 'N' in the original paper of
# David Deutsch and Richard Jozsa, and 'method' denotes the 'unknown'
# function, if you input 'balanced' then it will be balanced and otherwise
# it will be constant.
qr0=QuantumRegister(l)
qr1=QuantumRegister(l+1)
# One qubit larger to carry.
ac=QuantumRegister(l) # Ancilla.
t0=QuantumRegister(1)
circ=QuantumCircuit(qr0,qr1,ac,t0)
circ.h(qr0)
if method=='balanced':
print('balanced oracle')
ini(circ,qr1,Dec2Bi(2**(l-1)))
else:
print('constant oracle')
ini(circ,qr1,Dec2Bi(0))
lst=range(l)
QIN1=[qr0[i] for i in lst]+[qr1[i] for i in range(l+1)]+[ac[i] for i in lst]
ADD=add(qr0,qr1,ac,l)
circ.append(ADD,QIN1)# Role of the U unitary
circ.cx(qr1[l],t0)# Role of the U unitary
circ.z(t0)# The S unitary.
circ.cx(qr1[l],t0)# Role of the U unitary
circ.append(ADD.inverse(),QIN1)# Role of the U unitary
psi=StateFn(circ)
phiReg0=QuantumRegister(l)
phiReg1=QuantumRegister(l+1)
phiReg2=QuantumRegister(l)
t1=QuantumRegister(1)
phiCirc=QuantumCircuit(phiReg0,phiReg1,phiReg2,t1)
phiCirc.h(phiReg0)
if method=='balanced':
ini(circ,qr1,Dec2Bi(2**(l-1)))
else:
ini(circ,qr1,Dec2Bi(0))
phi=StateFn(phiCirc)
operator=I.tensorpower(3*l+2)
expectation_value=(~psi@operator@phi).eval()
print(expectation_value)
#DeutschJozsa('constant')
#DeutschJozsa('balanced')
| 38.037736
| 92
| 0.671131
| 293
| 2,016
| 4.604096
| 0.37884
| 0.071164
| 0.029652
| 0.047443
| 0.211268
| 0.119348
| 0.093403
| 0.093403
| 0.059303
| 0.059303
| 0
| 0.042006
| 0.208829
| 2,016
| 52
| 93
| 38.769231
| 0.803762
| 0.304563
| 0
| 0.243902
| 0
| 0
| 0.034509
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02439
| false
| 0
| 0.121951
| 0
| 0.146341
| 0.073171
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
116bf2691d7781b16c90385ce38a0af9b3dfe37f
| 480
|
py
|
Python
|
web/products-manager/solve.py
|
cclauss/fbctf-2019-challenges
|
4353c2ce588cf097ac6ca9bcf7b943a99742ac75
|
[
"MIT"
] | 213
|
2019-06-14T18:28:40.000Z
|
2021-12-27T14:44:45.000Z
|
web/products-manager/solve.py
|
cclauss/fbctf-2019-challenges
|
4353c2ce588cf097ac6ca9bcf7b943a99742ac75
|
[
"MIT"
] | 2
|
2020-06-05T21:14:51.000Z
|
2021-06-10T21:34:03.000Z
|
web/products-manager/solve.py
|
cclauss/fbctf-2019-challenges
|
4353c2ce588cf097ac6ca9bcf7b943a99742ac75
|
[
"MIT"
] | 59
|
2019-06-17T17:35:29.000Z
|
2021-12-04T22:26:37.000Z
|
import requests
import random, string
x = ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase + string.digits) for _ in range(16))
URL = "http://localhost/"
secret = "aA11111111" + x
# Registering a user
requests.post(url = "%s/add.php" % URL, data = {
'name': 'facebook' + ' '*64 + 'abc',
'secret': secret,
'description': 'desc',
})
r = requests.post(url = "%s/view.php" % URL, data = {
'name': 'facebook',
'secret': secret,
})
print(r.text)
| 21.818182
| 110
| 0.63125
| 62
| 480
| 4.83871
| 0.612903
| 0.073333
| 0.1
| 0.106667
| 0.146667
| 0
| 0
| 0
| 0
| 0
| 0
| 0.030227
| 0.172917
| 480
| 21
| 111
| 22.857143
| 0.725441
| 0.0375
| 0
| 0.266667
| 0
| 0
| 0.223913
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.133333
| 0
| 0.133333
| 0.066667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
feba32dda1863dbf22b57f349bb7f5c4d2450b8d
| 737
|
py
|
Python
|
app/__main__.py
|
sabuj073/Pyqt
|
fd316ca81b57cf45c4b02661ae32d3e87da86643
|
[
"MIT"
] | 15
|
2019-07-17T04:35:43.000Z
|
2022-03-06T10:56:57.000Z
|
app/__main__.py
|
SadeghShabestani/pyqt-gui-template
|
7b0be93b28519fecef061ae6fd257b5e1414f609
|
[
"MIT"
] | null | null | null |
app/__main__.py
|
SadeghShabestani/pyqt-gui-template
|
7b0be93b28519fecef061ae6fd257b5e1414f609
|
[
"MIT"
] | 7
|
2019-11-02T05:03:01.000Z
|
2022-01-22T07:16:35.000Z
|
import argparse
import sys
import traceback
from .app import Application
def new_excepthook(type, value, tb):
# by default, Qt does not seem to output any errors, this prevents that
traceback.print_exception(type, value, tb)
sys.excepthook = new_excepthook
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--no-gui', action='store_true')
args = parser.parse_args()
app = Application()
if args.no_gui:
app.calculation(3)
else:
from PyQt5.QtWidgets import QApplication
from .gui import MainWindow
qapp = QApplication(sys.argv)
gui = MainWindow(app)
gui.show()
sys.exit(qapp.exec_())
if __name__ == '__main__':
main()
| 20.472222
| 75
| 0.663501
| 92
| 737
| 5.141304
| 0.586957
| 0.054968
| 0.046512
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003559
| 0.237449
| 737
| 35
| 76
| 21.057143
| 0.838078
| 0.093623
| 0
| 0
| 0
| 0
| 0.039039
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086957
| false
| 0
| 0.26087
| 0
| 0.347826
| 0.043478
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
febafd98c2edf8a650a93925007e3f317d57cdc1
| 848
|
py
|
Python
|
test/test_1030.py
|
ralphribeiro/uri-projecteuler
|
7151d86e014aea9c56026cc88f50b4e940117dd8
|
[
"MIT"
] | null | null | null |
test/test_1030.py
|
ralphribeiro/uri-projecteuler
|
7151d86e014aea9c56026cc88f50b4e940117dd8
|
[
"MIT"
] | null | null | null |
test/test_1030.py
|
ralphribeiro/uri-projecteuler
|
7151d86e014aea9c56026cc88f50b4e940117dd8
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from exercicios.ex1030 import calcula_suicidio
import random
class TestEx1030(TestCase):
def test_saida_com_erro_para_entradas_fora_do_intervalo(self):
chamada = [(0, 10), (10, 0), (10001, 10), (10, 1001)]
esperado = ("Case 1: entrada inválida\n"
"Case 2: entrada inválida\n"
"Case 3: entrada inválida\n"
"Case 4: entrada inválida\n")
retorno = calcula_suicidio(chamada)
self.assertEqual(esperado, retorno)
def test_saida_deve_retornar_case_1_3_para_entrada_5_2(self):
chamada = [(5, 2), (6, 3), (1234, 233)]
esperado = ("Case 1: 3\n"
"Case 2: 1\n"
"Case 3: 25\n")
retorno = calcula_suicidio(chamada)
self.assertEqual(esperado, retorno)
| 29.241379
| 66
| 0.59434
| 105
| 848
| 4.6
| 0.428571
| 0.05176
| 0.132505
| 0.124224
| 0.248447
| 0.248447
| 0.248447
| 0.248447
| 0.248447
| 0
| 0
| 0.089226
| 0.299528
| 848
| 28
| 67
| 30.285714
| 0.723906
| 0
| 0
| 0.210526
| 0
| 0
| 0.162736
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 1
| 0.105263
| false
| 0
| 0.157895
| 0
| 0.315789
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
febd1a039c30d408c01acbf196e318f0a33735b0
| 2,177
|
py
|
Python
|
src/messageHandler.py
|
lorandcheng/ee250-final-project
|
e99da9b0221b4f3fdf4737814b9fa4b9152e15d6
|
[
"MIT"
] | null | null | null |
src/messageHandler.py
|
lorandcheng/ee250-final-project
|
e99da9b0221b4f3fdf4737814b9fa4b9152e15d6
|
[
"MIT"
] | null | null | null |
src/messageHandler.py
|
lorandcheng/ee250-final-project
|
e99da9b0221b4f3fdf4737814b9fa4b9152e15d6
|
[
"MIT"
] | null | null | null |
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
# Author: Lorand Cheng https://github.com/lorandcheng
# Date: Nov 15, 2020
# Project: USC EE250 Final Project, Morse Code Translator and Messenger
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
import json
import requests
from datetime import datetime
from pprint import pprint
class messageHandler:
def __init__(self, name, serverAddress):
"""
Summary: Class that manages the HTTP interactions with the messaging server
Args:
name (string): Name of node
serverAddress (string): Target server to connect to in format ip_addr:port
"""
self.name = name
self.serverAddress = serverAddress
def sendMessage(self, message):
"""
Summary: Sends a POST message to the server
Args:
message (string): Content of the message
"""
headers = {
'Content-Type': 'application/json',
'Authorization': None # not using HTTP secure
}
payload = {
'sender': self.name,
'message': message,
'timestamp': datetime.now()
}
response = requests.post("http://{}/send-message".format(self.serverAddress),
headers=headers,
data=json.dumps(payload, indent=4, sort_keys=True, default=str))
if response.status_code == 200:
pprint(response.json())
return 1
else:
return 0
def getMessages(self,lastRead):
"""
Summary: Sends a GET message to the server
"""
params = {
'sender': self.name,
'lastRead': lastRead
}
return requests.get("http://{}/get-messages".format(self.serverAddress), params=params)
def getMessageHistory(self):
"""
Summary: Sends a GET message to the server
"""
params = {
'sender': self.name
}
return requests.get("http://{}/history".format(self.serverAddress), params=params)
| 28.272727
| 97
| 0.519982
| 203
| 2,177
| 5.541872
| 0.477833
| 0.035556
| 0.034667
| 0.048
| 0.158222
| 0.096
| 0.096
| 0.096
| 0.096
| 0.096
| 0
| 0.010608
| 0.350482
| 2,177
| 76
| 98
| 28.644737
| 0.785007
| 0.284796
| 0
| 0.108108
| 0
| 0
| 0.107543
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.108108
| false
| 0
| 0.108108
| 0
| 0.351351
| 0.054054
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
febdebe28a0eb11da7fb60e489e4b8faec751e19
| 1,898
|
py
|
Python
|
data_loader.py
|
isLinXu/AIToodlBox
|
bacdea77b35e370f728c9fd170ad15c0dd112a09
|
[
"MIT"
] | 3
|
2021-09-15T02:24:45.000Z
|
2021-09-16T03:27:58.000Z
|
data_loader.py
|
isLinXu/AIToodlBox
|
bacdea77b35e370f728c9fd170ad15c0dd112a09
|
[
"MIT"
] | null | null | null |
data_loader.py
|
isLinXu/AIToodlBox
|
bacdea77b35e370f728c9fd170ad15c0dd112a09
|
[
"MIT"
] | null | null | null |
import numpy as np
import os
class Dataset():
def __init__(self, images, labels):
# convert from [0, 255] -> [0.0, 1.0]
images = images.astype(np.float32)
images = np.multiply(images, 1.0 / 255.0)
self._images = images
self._labels = labels
@property # getter
def images(self):
return self._images
@property
def labels(self):
return self._labels
def extract_images(image_dir, name):
files = open(os.path.join(image_dir, name), 'rb')
files.read(16)
buf = files.read(28 * 28 * 60000)
images = np.frombuffer(buf, dtype=np.uint8)
# images = images.reshape(-1, 784)
images = images.reshape(-1, 1, 28, 28)
return images
def extract_labels(image_dir, name):
files = open(os.path.join(image_dir, name), 'rb')
files.read(8)
buf = files.read(28 * 28 * 10000)
labels = np.frombuffer(buf, dtype=np.uint8)
return labels
def read_data_sets(image_dir):
class DataSets():
pass
data_sets = DataSets()
TRAIN_IMAGES = 'train-images-idx3-ubyte'
TRAIN_LABELS = 'train-labels-idx1-ubyte'
TEST_IMAGES = 't10k-images-idx3-ubyte'
TEST_LABELS = 't10k-labels-idx1-ubyte'
VALIDATION_SIZE = 5000
train_images = extract_images(image_dir, TRAIN_IMAGES)
train_labels = extract_labels(image_dir, TRAIN_LABELS)
train_images = train_images[VALIDATION_SIZE:]
train_labels = train_labels[VALIDATION_SIZE:]
validation_images = train_images[:VALIDATION_SIZE]
validation_labels = train_labels[:VALIDATION_SIZE]
test_images = extract_images(image_dir, TEST_IMAGES)
test_labels = extract_labels(image_dir, TEST_LABELS)
data_sets.train = Dataset(train_images, train_labels)
data_sets.validation = Dataset(validation_images, validation_labels)
data_sets.test = Dataset(test_images, test_labels)
return data_sets
| 26.361111
| 72
| 0.682824
| 255
| 1,898
| 4.843137
| 0.223529
| 0.0583
| 0.038866
| 0.051012
| 0.344939
| 0.131174
| 0.087449
| 0.087449
| 0.087449
| 0.087449
| 0
| 0.04064
| 0.209168
| 1,898
| 72
| 73
| 26.361111
| 0.782145
| 0.039515
| 0
| 0.083333
| 0
| 0
| 0.051648
| 0.049451
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0.020833
| 0.041667
| 0.041667
| 0.3125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
febfb7afb944937a4daedbf45bdc05b9348c3b75
| 305
|
py
|
Python
|
scripts/pdutil.py
|
travisdowns/sort-bench
|
97e18e08a5c43dec337f01ac7e3c55e5acb37507
|
[
"MIT"
] | 50
|
2019-05-23T23:17:19.000Z
|
2022-02-19T05:17:00.000Z
|
scripts/pdutil.py
|
travisdowns/sort-bench
|
97e18e08a5c43dec337f01ac7e3c55e5acb37507
|
[
"MIT"
] | 1
|
2021-04-11T09:38:44.000Z
|
2021-04-22T15:14:32.000Z
|
scripts/pdutil.py
|
travisdowns/sort-bench
|
97e18e08a5c43dec337f01ac7e3c55e5acb37507
|
[
"MIT"
] | 4
|
2019-05-23T23:08:05.000Z
|
2021-10-02T21:49:24.000Z
|
# renames duplicate columns by suffixing _1, _2 etc
class renamer():
def __init__(self):
self.d = dict()
def __call__(self, x):
if x not in self.d:
self.d[x] = 0
return x
else:
self.d[x] += 1
return "%s_%d" % (x, self.d[x])
| 23.461538
| 51
| 0.481967
| 44
| 305
| 3.090909
| 0.545455
| 0.183824
| 0.132353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021505
| 0.390164
| 305
| 12
| 52
| 25.416667
| 0.709677
| 0.160656
| 0
| 0
| 0
| 0
| 0.019685
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fec02f47aa5ff13585413d302b592d2cd4c27b9a
| 6,111
|
py
|
Python
|
sbc_ngs/pathway.py
|
UoMMIB/SequenceGenie
|
65fce1df487afd2de32e9d3ebc487874e71436bc
|
[
"MIT"
] | 5
|
2019-11-01T19:38:09.000Z
|
2021-03-29T16:13:56.000Z
|
sbc_ngs/pathway.py
|
UoMMIB/SequenceGenie
|
65fce1df487afd2de32e9d3ebc487874e71436bc
|
[
"MIT"
] | null | null | null |
sbc_ngs/pathway.py
|
UoMMIB/SequenceGenie
|
65fce1df487afd2de32e9d3ebc487874e71436bc
|
[
"MIT"
] | 3
|
2021-05-05T20:01:24.000Z
|
2022-03-11T15:20:51.000Z
|
'''
sbc-ngs (c) University of Manchester 2019
All rights reserved.
@author: neilswainston
'''
# pylint: disable=no-member
# pylint: disable=too-few-public-methods
# pylint: disable=too-many-arguments
# pylint: disable=too-many-instance-attributes
# pylint: disable=unused-argument
# pylint: disable=wrong-import-order
from __future__ import division
import os
import subprocess
import sys
import uuid
import multiprocessing as mp
import pandas as pd
from sbc_ngs import demultiplex, results, utils, vcf_utils
class PathwayAligner():
'''Class to align NGS data to pathways.'''
def __init__(self, out_dir, in_dir, seq_files, min_length, max_read_files):
# Initialise project directory:
self.__out_dir = out_dir
if not os.path.exists(self.__out_dir):
os.makedirs(self.__out_dir)
self.__in_dir = in_dir
self.__seq_files = seq_files
self.__min_length = min_length
self.__max_read_files = max_read_files
self.__barcodes, self.__barcodes_df = \
demultiplex.get_barcodes(os.path.join(in_dir, 'barcodes.csv'))
# Backwards compatibility:
self.__barcodes_df.rename(columns={'actual_ice_id': 'known_seq_id'},
inplace=True)
self.__barcodes_df['known_seq_id'] = \
self.__barcodes_df['known_seq_id'].astype(str)
# Index sequence / template files:
for templ_filename in self.__seq_files.values():
subprocess.call(['bwa', 'index', templ_filename])
def score_alignments(self, tolerance, num_threads):
'''Score alignments.'''
num_threads = num_threads if num_threads > 0 else mp.cpu_count()
print('Running pathway with %d threads' % num_threads)
barcode_reads = demultiplex.demultiplex(self.__barcodes,
self.__in_dir,
self.__min_length,
self.__max_read_files,
self.__out_dir,
tolerance=tolerance,
num_threads=num_threads)
write_queue = mp.Manager().Queue()
results_thread = results.ResultsThread(sorted(self.__seq_files.keys()),
self.__barcodes_df,
write_queue)
results_thread.start()
for barcodes, reads_filename in barcode_reads.items():
_score_alignment(self.__out_dir,
barcodes,
reads_filename,
self.__get_seq_files(barcodes),
num_threads,
write_queue)
# Update summary:
results_thread.close()
results_thread.write(self.__out_dir)
def __get_seq_files(self, barcodes):
'''Get appropriate sequence files.'''
try:
seq_id = self.__barcodes_df.loc[barcodes, 'known_seq_id']
if seq_id:
return {seq_id: self.__seq_files[seq_id]}
except KeyError:
print('Unexpected barcodes: ' + str(barcodes))
return {}
return self.__seq_files
def _get_barcode_seq(barcode_seq_filename):
'''Get barcode seq dict.'''
barcode_seq = pd.read_csv(barcode_seq_filename,
dtype={'barcode': str, 'seq_id': str}) \
if barcode_seq_filename else None
return barcode_seq.set_index('barcode')['seq_id'].to_dict()
def _score_alignment(dir_name, barcodes, reads_filename, seq_files,
num_threads, write_queue):
'''Score an alignment.'''
for seq_id, seq_filename in seq_files.items():
barcode_dir_name = utils.get_dir(dir_name, barcodes, seq_id)
bam_filename = os.path.join(barcode_dir_name, '%s.bam' % barcodes[2])
vcf_filename = bam_filename.replace('.bam', '.vcf')
prc = subprocess.Popen(('bwa', 'mem',
'-x', 'ont2d',
'-O', '6',
'-t', str(num_threads),
seq_filename, reads_filename),
stdout=subprocess.PIPE)
subprocess.check_output(('samtools', 'sort',
'-@%i' % num_threads,
'-o', bam_filename, '-'),
stdin=prc.stdout)
prc.wait()
# Generate and analyse variants file:
prc = subprocess.Popen(['samtools',
'mpileup',
'-uvf',
seq_filename,
'-t', 'DP',
'-o', vcf_filename,
bam_filename])
prc.communicate()
vcf_utils.analyse(vcf_filename, seq_id, barcodes, write_queue)
print('Scored: %s against %s' % (reads_filename, seq_id))
def _get_seq_files(filename):
'''Get seq files.'''
seq_files = {}
if os.path.isdir(filename):
for fle in os.listdir(filename):
name, ext = os.path.splitext(os.path.basename(fle))
if ext == '.fasta':
seq_files[name] = os.path.join(filename, fle)
else:
seq_files[os.path.splitext(os.path.basename(filename))[0]] = filename
return seq_files
def main(args):
'''main method.'''
seq_files = {}
for seq_file in args[6:]:
seq_files.update(_get_seq_files(seq_file))
aligner = PathwayAligner(out_dir=os.path.join(args[0], str(uuid.uuid4())),
in_dir=args[1],
seq_files=seq_files,
min_length=int(args[2]),
max_read_files=int(args[3]))
aligner.score_alignments(int(args[4]), num_threads=int(args[5]))
if __name__ == '__main__':
main(sys.argv[1:])
| 34.139665
| 79
| 0.540992
| 655
| 6,111
| 4.699237
| 0.28855
| 0.05718
| 0.022742
| 0.015595
| 0.056205
| 0.050032
| 0
| 0
| 0
| 0
| 0
| 0.004578
| 0.35657
| 6,111
| 178
| 80
| 34.331461
| 0.77823
| 0.097693
| 0
| 0.035714
| 0
| 0
| 0.049441
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.071429
| 0
| 0.1875
| 0.026786
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fec152e2fa033df2f5583f6a022b052c96a15f0b
| 877
|
py
|
Python
|
src/problem12.py
|
aitc-h/euler
|
6fc07c741c31a632ce6f11f65c11007cd6c7eb29
|
[
"MIT"
] | null | null | null |
src/problem12.py
|
aitc-h/euler
|
6fc07c741c31a632ce6f11f65c11007cd6c7eb29
|
[
"MIT"
] | null | null | null |
src/problem12.py
|
aitc-h/euler
|
6fc07c741c31a632ce6f11f65c11007cd6c7eb29
|
[
"MIT"
] | null | null | null |
"""
Problem 12
Highly divisible triangular number
"""
from utility.decorators import timeit, printit
from utility.math_f import sum_naturals_to_n, get_divisors
from math import ceil, sqrt
def div_count(n):
# Returns the count of divisors of a number
total = 0
for i in range(1, int(ceil(sqrt(n)))+1): # Check up to sqrt(n)
if n % i == 0: # If i is a factor then n/i is also a factor
total += 2
if i*i == n: # If i is the sqrt(n) then it is a square (only one factor)
total -= 1
return total
@printit
@timeit
def run(m):
for n in range(1, 1000000):
if n % 2 == 0:
cnt = div_count(n/2) * div_count(n+1)
else:
cnt = div_count(n) * div_count((n+1)/2)
if cnt >= m:
return sum_naturals_to_n(n)
if __name__ == "__main__":
n = 500
run(n)
| 23.078947
| 81
| 0.575827
| 147
| 877
| 3.292517
| 0.394558
| 0.082645
| 0.092975
| 0.057851
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.041736
| 0.31699
| 877
| 37
| 82
| 23.702703
| 0.766277
| 0.238312
| 0
| 0
| 0
| 0
| 0.012327
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.125
| 0
| 0.291667
| 0.083333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fec193e201ee4720e007a3de6a116f0b7db806c8
| 469
|
py
|
Python
|
atcoder/abc183D_water_heater.py
|
uninhm/kyopro
|
bf6ed9cbf6a5e46cde0291f7aa9d91a8ddf1f5a3
|
[
"BSD-3-Clause"
] | 31
|
2020-05-13T01:07:55.000Z
|
2021-07-13T07:53:26.000Z
|
atcoder/abc183D_water_heater.py
|
uninhm/kyopro
|
bf6ed9cbf6a5e46cde0291f7aa9d91a8ddf1f5a3
|
[
"BSD-3-Clause"
] | 10
|
2020-05-20T07:22:09.000Z
|
2021-07-19T03:52:13.000Z
|
atcoder/abc183D_water_heater.py
|
uninhm/kyopro
|
bf6ed9cbf6a5e46cde0291f7aa9d91a8ddf1f5a3
|
[
"BSD-3-Clause"
] | 14
|
2020-05-11T05:58:36.000Z
|
2021-12-07T03:20:43.000Z
|
# uninhm
# https://atcoder.jp/contests/abc183/tasks/abc183_d
# data structures, sorting
n, w = map(int, input().split())
needed = []
for _ in range(n):
s, t, p = map(int, input().split())
needed.append((s, p))
needed.append((t, -p))
needed.sort()
cum = 0
for i in range(len(needed)):
cum += needed[i][1]
if i != len(needed)-1 and needed[i+1][0] == needed[i][0]:
continue
if cum > w:
print("No")
quit()
print("Yes")
| 18.038462
| 61
| 0.558635
| 74
| 469
| 3.513514
| 0.513514
| 0.080769
| 0.084615
| 0.123077
| 0.169231
| 0
| 0
| 0
| 0
| 0
| 0
| 0.03352
| 0.236674
| 469
| 25
| 62
| 18.76
| 0.692737
| 0.172708
| 0
| 0
| 0
| 0
| 0.013021
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fec1c9c0fc7bf9b096e6c493b061466eec3c8572
| 635
|
py
|
Python
|
inc/ReiSlack.py
|
REI-Systems/REISystems-OGPS-NYC-heartbeat
|
126ffd4ee2e80f346b00c3b2241d30c6ce7d93c0
|
[
"Apache-2.0"
] | null | null | null |
inc/ReiSlack.py
|
REI-Systems/REISystems-OGPS-NYC-heartbeat
|
126ffd4ee2e80f346b00c3b2241d30c6ce7d93c0
|
[
"Apache-2.0"
] | null | null | null |
inc/ReiSlack.py
|
REI-Systems/REISystems-OGPS-NYC-heartbeat
|
126ffd4ee2e80f346b00c3b2241d30c6ce7d93c0
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import os
from slackclient import SlackClient
def send(msg="no msg", rsp="ok"):
channel = os.environ['SLACK_CHANNEL']
if "ok" == rsp:
if 'SKIP_OK_MESSAGES' in os.environ and os.environ['SKIP_OK_MESSAGES']:
return
if 'SLACK_OK_CHANNEL' in os.environ and os.environ['SLACK_OK_CHANNEL']:
channel = os.environ['SLACK_OK_CHANNEL']
msg = ":white_check_mark: " + msg
else:
msg = ":bomb: " + msg
sc = SlackClient(token=os.environ['SLACK_API_TOKEN'])
sc.api_call(
"chat.postMessage",
channel=channel,
text=msg
)
| 24.423077
| 79
| 0.609449
| 84
| 635
| 4.416667
| 0.416667
| 0.169811
| 0.150943
| 0.113208
| 0.22372
| 0.123989
| 0
| 0
| 0
| 0
| 0
| 0
| 0.261417
| 635
| 25
| 80
| 25.4
| 0.791045
| 0.031496
| 0
| 0
| 0
| 0
| 0.260586
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.111111
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fec30b1306550fa1e0b5402e2443b04d91d4ab0b
| 678
|
py
|
Python
|
examples/human.py
|
VetoProjects/AudioPython
|
18f5e2c10158bf8cfd15fceb84240a420bf9c677
|
[
"MIT"
] | 8
|
2015-04-28T15:31:44.000Z
|
2017-02-24T22:57:37.000Z
|
examples/human.py
|
VetoProjects/AudioPython
|
18f5e2c10158bf8cfd15fceb84240a420bf9c677
|
[
"MIT"
] | null | null | null |
examples/human.py
|
VetoProjects/AudioPython
|
18f5e2c10158bf8cfd15fceb84240a420bf9c677
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# Idea taken from www.wavepot.com
import math
from AudioPython import *
from AudioPython.dsp import *
def bass_osc(n):
tri = triangle_wave(frequency=n, amplitude=0.24)
sine = sine_wave(frequency=n*32, amplitude=0.052)
while True:
yield next(tri) + next(sine)
def sub(gen, amp):
c = 0
tau = 2 * math.pi
while True:
c += 0.000014
yield math.sin(next(gen) * (1 + math.sin(1.1337 * c * tau)) * (2 + (1 +
math.sin(0.42 * c * tau)) * 15) + tau * c) * amp
n = 44100 / 500
channels = ((sub(bass_osc(n), 0.3),),)
samples = compute_samples(channels)
write_wavefile("temp.wav", samples)
| 21.870968
| 79
| 0.59587
| 105
| 678
| 3.790476
| 0.52381
| 0.052764
| 0.040201
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.080868
| 0.252212
| 678
| 30
| 80
| 22.6
| 0.704142
| 0.076696
| 0
| 0.105263
| 0
| 0
| 0.012821
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0
| 0.157895
| 0
| 0.263158
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fec6c828f7c2c56e87c8344597efe1d8c44178c3
| 986
|
py
|
Python
|
hood/urls.py
|
virginiah894/Hood-alert
|
9c00ca7e4bec3d8c46ff4b9b74f2f770f1c60873
|
[
"MIT"
] | 1
|
2020-03-10T18:01:51.000Z
|
2020-03-10T18:01:51.000Z
|
hood/urls.py
|
virginiah894/Hood-alert
|
9c00ca7e4bec3d8c46ff4b9b74f2f770f1c60873
|
[
"MIT"
] | 4
|
2020-06-06T01:09:13.000Z
|
2021-09-08T01:36:28.000Z
|
hood/urls.py
|
virginiah894/Hood-alert
|
9c00ca7e4bec3d8c46ff4b9b74f2f770f1c60873
|
[
"MIT"
] | null | null | null |
from django.urls import path , include
from . import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('', views.home,name='home'),
path('profile/', views.profile , name = 'profile'),
path('update_profile/',views.update_profile,name='update'),
path('updates/', views.updates, name='updates'),
path('new/update', views.new_update, name = 'newUpdate'),
path('posts', views.post, name='post'),
path('new/post', views.new_post, name='newPost'),
path('health', views.hosy, name='hosy'),
path('search', views.search_results, name = 'search_results'),
path('adminst', views.administration, name='admin'),
path('business', views.local_biz, name='biz'),
path('new/business', views.new_biz, name='newBiz'),
path('create/profile',views.create_profile, name='createProfile'),
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
| 30.8125
| 81
| 0.684584
| 125
| 986
| 5.304
| 0.32
| 0.045249
| 0.042232
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.143002
| 986
| 32
| 81
| 30.8125
| 0.784615
| 0
| 0
| 0
| 0
| 0
| 0.198582
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.190476
| 0
| 0.190476
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fec70c2989068076b5623aeccec1da14a757918e
| 962
|
py
|
Python
|
base/client/TargetTracker.py
|
marlamade/generals-bot
|
b485e416a2c4fc307e7d015ecdb70e278c4c1417
|
[
"MIT"
] | null | null | null |
base/client/TargetTracker.py
|
marlamade/generals-bot
|
b485e416a2c4fc307e7d015ecdb70e278c4c1417
|
[
"MIT"
] | null | null | null |
base/client/TargetTracker.py
|
marlamade/generals-bot
|
b485e416a2c4fc307e7d015ecdb70e278c4c1417
|
[
"MIT"
] | null | null | null |
from typing import List
from .tile import Tile
class TargetTracker(list):
"""
Track the targets that might be good to attack/explore
"""
def __init__(self, *args, **kwargs):
list.__init__(self, *args, **kwargs)
self.turn_last_updated: int = 0
def update_list(self, target_list: List[Tile], current_turn):
self.extend(target_list)
self.turn_last_updated = current_turn
def get_target(self, turn):
# print(self)
if turn > 1.5 * self.turn_last_updated + 30:
# if we haven't gotten an updated list since a third of the game ago,
# it's too old and we should throw it away
while self:
self.pop()
while self:
target = self[-1]
if target.is_city or target.is_mountain or target.is_basic or target.is_general:
self.pop()
continue
return target
return None
| 29.151515
| 92
| 0.591476
| 130
| 962
| 4.192308
| 0.515385
| 0.058716
| 0.066055
| 0.104587
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009274
| 0.327443
| 962
| 32
| 93
| 30.0625
| 0.833076
| 0.182952
| 0
| 0.2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.15
| false
| 0
| 0.1
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fec96362f67167dcf46b5bbb0c6f46d9d1526eeb
| 368
|
py
|
Python
|
6/max_average_subarray1.py
|
IronCore864/leetcode
|
a62a4cdde9814ae48997176debcaad537f7ad01f
|
[
"Apache-2.0"
] | 4
|
2018-03-07T02:56:03.000Z
|
2021-06-15T05:43:31.000Z
|
6/max_average_subarray1.py
|
IronCore864/leetcode
|
a62a4cdde9814ae48997176debcaad537f7ad01f
|
[
"Apache-2.0"
] | null | null | null |
6/max_average_subarray1.py
|
IronCore864/leetcode
|
a62a4cdde9814ae48997176debcaad537f7ad01f
|
[
"Apache-2.0"
] | 1
|
2021-09-02T12:05:15.000Z
|
2021-09-02T12:05:15.000Z
|
class Solution:
def findMaxAverage(self, nums: List[int], k: int) -> float:
pre_sum = sum(nums[0:k])
max_sum = pre_sum
for i in range(len(nums)-k):
next_sum = pre_sum - nums[i] + nums[i + k]
if next_sum > max_sum:
max_sum = next_sum
pre_sum = next_sum
return max_sum/k
| 26.285714
| 63
| 0.516304
| 54
| 368
| 3.296296
| 0.407407
| 0.134831
| 0.151685
| 0.146067
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004386
| 0.380435
| 368
| 13
| 64
| 28.307692
| 0.776316
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fece96dc896e75a634255768c6898114b3c6f1c0
| 9,568
|
py
|
Python
|
maps/foliumMaps.py
|
selinerguncu/Yelp-Spatial-Analysis
|
befbcb927ef225bda9ffaea0fd41a88344f9693c
|
[
"MIT"
] | null | null | null |
maps/foliumMaps.py
|
selinerguncu/Yelp-Spatial-Analysis
|
befbcb927ef225bda9ffaea0fd41a88344f9693c
|
[
"MIT"
] | null | null | null |
maps/foliumMaps.py
|
selinerguncu/Yelp-Spatial-Analysis
|
befbcb927ef225bda9ffaea0fd41a88344f9693c
|
[
"MIT"
] | null | null | null |
import folium
from folium import plugins
import numpy as np
import sqlite3 as sqlite
import os
import sys
import pandas as pd
#extract data from yelp DB and clean it:
DB_PATH = "/Users/selinerguncu/Desktop/PythonProjects/Fun Projects/Yelp/data/yelpCleanDB.sqlite"
conn = sqlite.connect(DB_PATH)
#######################################
############ organize data ############
#######################################
def organizeData(mapParameters):
business = str(mapParameters['business'])
region = str(mapParameters['region'])
price = str(mapParameters['price'])
rating = float(mapParameters['rating'])
print('mapParameters', mapParameters)
# if 'zipcode' in mapParameters.keys():
# zipcode = str(mapParameters['zipcode'])
# sql = "SELECT longitude, latitude, query_latitude, query_latitude, query_category, query_price, city, zip_code, price, rating, review_count FROM Business WHERE query_category = '%s' AND city = '%s' AND zip_code = '%s' AND price = '%s' AND rating = '%r'" % (business, city, zipcode, price, rating)
# coordinates = pd.read_sql_query(sql, conn)
# else:
# sql = "SELECT longitude, latitude, query_latitude, query_latitude, query_category, query_price, city, zip_code, price, rating, review_count FROM Business WHERE query_category = '%s' AND city = '%s' AND price = '%s' AND rating = '%r'" % (business, city, price, rating)
# coordinates = pd.read_sql_query(sql, conn)
# print('here')
sql = '''SELECT longitude, latitude, query_latitude, query_latitude, query_category, query_price, city, zip_code, price, rating, review_count, region
FROM CleanBusinessData
WHERE query_category = '%s' AND price = '%s' AND rating = '%r' AND region = '%r''' % (business, price, rating, region)
if region == 'Bay Area':
sql = '''SELECT longitude, latitude, query_latitude, query_latitude, query_category, query_price, city, zip_code, price, rating, review_count, region
FROM CleanBusinessData
WHERE query_category = '%s' AND price = '%s' AND rating = '%r' AND city != '%s' ''' % (business, price, rating, 'San Francisco')
elif region == 'Peninsula':
sql = '''SELECT longitude, latitude, query_latitude, query_latitude, query_category, query_price, city, zip_code, price, rating, review_count, region
FROM CleanBusinessData
WHERE query_category = '%s' AND price = '%s' AND rating = '%r' AND city != '%s' AND city != '%s' AND city != '%s' ''' % (business, price, rating, 'San Francisco', 'San Francisco - Downtown', 'San Francisco - Outer')
elif region == 'San Francisco':
sql = '''SELECT longitude, latitude, query_latitude, query_latitude, query_category, query_price, city, zip_code, price, rating, review_count, region
FROM CleanBusinessData
WHERE query_category = '%s' AND price = '%s' AND rating = '%r' AND city = ?''' % (business, price, rating, 'San Francisco')
elif region == 'Downtown SF':
sql = '''SELECT longitude, latitude, query_latitude, query_latitude, query_category, query_price, city, zip_code, price, rating, review_count, region
FROM CleanBusinessData
WHERE query_category = '%s' AND price = '%s' AND rating = '%r' AND city = '%s' ''' % (business, price, rating, 'San Francisco - Downtown')
elif region == 'Outer SF':
sql = '''SELECT longitude, latitude, query_latitude, query_latitude, query_category, query_price, city, zip_code, price, rating, review_count, region
FROM CleanBusinessData
WHERE query_category = '%s' AND price = '%s' AND rating = '%r' AND city = '%s' ''' % (business, price, rating, 'San Francisco - Outer')
elif region == 'East Bay':
sql = '''SELECT longitude, latitude, query_latitude, query_latitude, query_category, query_price, city, zip_code, price, rating, review_count, region
FROM CleanBusinessData
WHERE query_category = '%s' AND price = '%s' AND rating = '%r' AND region = '%s' ''' % (business, price, rating, 'eastBay')
elif region == 'North Bay':
sql = '''SELECT longitude, latitude, query_latitude, query_latitude, query_category, query_price, city, zip_code, price, rating, review_count, region
FROM CleanBusinessData
WHERE query_category = '%s' AND price = '%s' AND rating = '%r' AND region = '%s' ''' % (business, price, rating, 'northBay')
coordinates = pd.read_sql_query(sql, conn)
if len(coordinates) <= 1860:
for i in range(len(coordinates)):
if coordinates["longitude"][i] == None:
coordinates["longitude"][i] = coordinates["query_longitude"][i]
if coordinates["latitude"][i] == None:
coordinates["latitude"][i] = coordinates["query_latitude"][i]
# coordinates = []
# for i in range(len(coords)): #max ~1860 coordinates
# coordinate = []
# coordinate.append(coords["latitude"][i])
# coordinate.append(coords["longitude"][i])
# coordinates.append(coordinate)
# # convert list of lists to list of tuples
# coordinates = [tuple([i[0],i[1]]) for i in coordinates]
# # print(coordinates[0:10])
return coordinates
# else:
# print("Too many data points; cannot be mapped!")
#######################################
##### visualize the coordinates #######
#######################################
def makeMarkerMap(coordinates):
# # get center of map
# meanlat = np.mean([float(i[0]) for i in coordinates])
# meanlon = np.mean([float(i[1]) for i in coordinates])
print('coordinates', len(coordinates))
meanlat = np.mean(coordinates['latitude'])
meanlon = np.mean(coordinates['longitude'])
#Initialize map
mapa = folium.Map(location=[meanlat, meanlon],
tiles='Cartodb Positron', zoom_start=10)
# add markers
for i in range(len(coordinates)):
# create popup on click
html="""
Rating: {}<br>
Popularity: {}<br>
Price: {}<br>
"""
html = html.format(coordinates["rating"][i],\
coordinates["review_count"][i],\
coordinates["price"][i])
iframe = folium.Div(html=html, width=150, height=100) #element yok
popup = folium.Popup(iframe, max_width=2650)
# add marker to map
folium.Marker(tuple([coordinates['latitude'][i],coordinates['longitude'][i]]), popup=popup,).add_to(mapa)
return mapa.save("/Users/selinerguncu/Desktop/PythonProjects/Fun Projects/Yelp/yelp/static/foliumMarkers.html")
#######################################
####### cluster nearby points #########
#######################################
def makeClusterMap(coordinates):
from folium.plugins import MarkerCluster # for marker clusters
meanlat = np.mean(coordinates['latitude'])
meanlon = np.mean(coordinates['longitude'])
# initialize map
mapa = folium.Map(location=[meanlat, meanlon],
tiles='Cartodb Positron', zoom_start=10)
coordinatesFinal = []
for i in range(len(coordinates)):
# add marker clusters
coordinate = []
coordinate.append(coordinates["latitude"][i])
coordinate.append(coordinates["longitude"][i])
coordinatesFinal.append(coordinate)
# convert list of lists to list of tuples
coordinatesFinal = [tuple([i[0],i[1]]) for i in coordinatesFinal]
# print('coordinatesFinal', len(coordinatesFinal))
mapa.add_child(MarkerCluster(locations=coordinatesFinal))
return mapa.save("/Users/selinerguncu/Desktop/PythonProjects/Fun Projects/Yelp/yelp/static/foliumCluster.html")
#######################################
####### generate a heat map ###########
#######################################
def makeHeatmapMap(coordinates):
from folium.plugins import HeatMap
meanlat = np.mean(coordinates['latitude'])
meanlon = np.mean(coordinates['longitude'])
# initialize map
mapa = folium.Map(location=[meanlat, meanlon],
tiles='Cartodb Positron', zoom_start=10) #tiles='OpenStreetMap'
coordinatesFinal = []
if len(coordinates) > 1090: #max len is 1090 for the Heat Map
for i in range(1090):
coordinate = []
coordinate.append(coordinates["latitude"][i])
coordinate.append(coordinates["longitude"][i])
coordinatesFinal.append(coordinate)
else:
for i in range(len(coordinates)):
coordinate = []
coordinate.append(coordinates["latitude"][i])
coordinate.append(coordinates["longitude"][i])
coordinatesFinal.append(coordinate)
# convert list of lists to list of tuples
coordinatesFinal = [tuple([i[0],i[1]]) for i in coordinatesFinal]
# add heat
mapa.add_child(HeatMap(coordinatesFinal))
# mapa.add_child(HeatMap((tuple([coordinates['latitude'][i],coordinates['longitude'][i]]))))
return mapa.save("/Users/selinerguncu/Desktop/PythonProjects/Fun Projects/Yelp/yelp/static/foliumHeatmap.html")
# saving the map as an image doesnt seem to work
# import os
# import time
# from selenium import webdriver
# from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
# # for different tiles: https://github.com/python-visualization/folium
# delay=5
# fn='foliumHeatmap.html'
# tmpurl='file:///Users/selinerguncu/Desktop/PythonProjects/Fun%20Projects/Yelp%20Project/Simulation/foliumHeatmap.html'.format(path=os.getcwd(),mapfile=fn)
# mapa.save(fn)
# firefox_capabilities = DesiredCapabilities.FIREFOX
# firefox_capabilities['marionette'] = True
# browser = webdriver.Firefox(capabilities=firefox_capabilities, executable_path='/Users/selinerguncu/Downloads/geckodriver')
# browser.get(tmpurl)
# #Give the map tiles some time to load
# time.sleep(delay)
# browser.save_screenshot('mynewmap.png')
# browser.quit()
| 44.502326
| 302
| 0.666074
| 1,121
| 9,568
| 5.599465
| 0.183765
| 0.062132
| 0.066911
| 0.082842
| 0.616537
| 0.59025
| 0.573044
| 0.536721
| 0.518241
| 0.494982
| 0
| 0.006633
| 0.164925
| 9,568
| 214
| 303
| 44.71028
| 0.778974
| 0.277069
| 0
| 0.436364
| 0
| 0.172727
| 0.46508
| 0.054543
| 0
| 0
| 0
| 0
| 0
| 1
| 0.036364
| false
| 0
| 0.081818
| 0
| 0.154545
| 0.018182
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fed030e5255f1c16fe14660b2bdc69ee621a5da4
| 706
|
py
|
Python
|
app/integrations/opsgenie.py
|
cds-snc/sre-bot
|
b34cdaba357fccbcdbaac1e1ac70ebbe408d7316
|
[
"MIT"
] | null | null | null |
app/integrations/opsgenie.py
|
cds-snc/sre-bot
|
b34cdaba357fccbcdbaac1e1ac70ebbe408d7316
|
[
"MIT"
] | 12
|
2022-02-21T18:57:07.000Z
|
2022-03-31T03:06:48.000Z
|
app/integrations/opsgenie.py
|
cds-snc/sre-bot
|
b34cdaba357fccbcdbaac1e1ac70ebbe408d7316
|
[
"MIT"
] | null | null | null |
import json
import os
from urllib.request import Request, urlopen
OPSGENIE_KEY = os.getenv("OPSGENIE_KEY", None)
def get_on_call_users(schedule):
content = api_get_request(
f"https://api.opsgenie.com/v2/schedules/{schedule}/on-calls",
{"name": "GenieKey", "token": OPSGENIE_KEY},
)
try:
data = json.loads(content)
return list(map(lambda x: x["name"], data["data"]["onCallParticipants"]))
except Exception:
return []
def api_get_request(url, auth):
req = Request(url)
req.add_header("Authorization", f"{auth['name']} {auth['token']}")
conn = urlopen(req) # nosec - Scheme is hardcoded to https
return conn.read().decode("utf-8")
| 28.24
| 81
| 0.651558
| 93
| 706
| 4.827957
| 0.591398
| 0.073497
| 0.057906
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003515
| 0.194051
| 706
| 24
| 82
| 29.416667
| 0.785589
| 0.050992
| 0
| 0
| 0
| 0
| 0.239521
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0
| 0.157895
| 0
| 0.421053
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fed4560e0eada1a8875a46b508b9927cb620d08a
| 8,991
|
py
|
Python
|
jenkinsapi_tests/unittests/test_nodes.py
|
kkpattern/jenkinsapi
|
6b0091c5f44e4473c0a3d5addbfdc416bc6515ca
|
[
"MIT"
] | 556
|
2016-07-27T03:42:48.000Z
|
2022-03-31T15:05:19.000Z
|
jenkinsapi_tests/unittests/test_nodes.py
|
kkpattern/jenkinsapi
|
6b0091c5f44e4473c0a3d5addbfdc416bc6515ca
|
[
"MIT"
] | 366
|
2016-07-24T02:51:45.000Z
|
2022-03-24T17:02:45.000Z
|
jenkinsapi_tests/unittests/test_nodes.py
|
kkpattern/jenkinsapi
|
6b0091c5f44e4473c0a3d5addbfdc416bc6515ca
|
[
"MIT"
] | 308
|
2016-08-01T03:35:45.000Z
|
2022-03-31T01:06:57.000Z
|
import pytest
from jenkinsapi.jenkins import Jenkins
from jenkinsapi.nodes import Nodes
from jenkinsapi.node import Node
DATA0 = {
'assignedLabels': [{}],
'description': None,
'jobs': [],
'mode': 'NORMAL',
'nodeDescription': 'the master Jenkins node',
'nodeName': '',
'numExecutors': 2,
'overallLoad': {},
'primaryView': {'name': 'All', 'url': 'http://halob:8080/'},
'quietingDown': False,
'slaveAgentPort': 0,
'unlabeledLoad': {},
'useCrumbs': False,
'useSecurity': False,
'views': [
{'name': 'All', 'url': 'http://halob:8080/'},
{'name': 'FodFanFo', 'url': 'http://halob:8080/view/FodFanFo/'}
]
}
DATA1 = {
'busyExecutors': 0,
'computer': [
{
'actions': [],
'displayName': 'master',
'executors': [{}, {}],
'icon': 'computer.png',
'idle': True,
'jnlpAgent': False,
'launchSupported': True,
'loadStatistics': {},
'manualLaunchAllowed': True,
'monitorData': {
'hudson.node_monitors.ArchitectureMonitor': 'Linux (amd64)',
'hudson.node_monitors.ClockMonitor': {'diff': 0},
'hudson.node_monitors.DiskSpaceMonitor': {
'path': '/var/lib/jenkins',
'size': 671924924416
},
'hudson.node_monitors.ResponseTimeMonitor': {'average': 0},
'hudson.node_monitors.SwapSpaceMonitor': {
'availablePhysicalMemory': 3174686720,
'availableSwapSpace': 17163087872,
'totalPhysicalMemory': 16810180608,
'totalSwapSpace': 17163087872
},
'hudson.node_monitors.TemporarySpaceMonitor': {
'path': '/tmp',
'size': 671924924416
}
},
'numExecutors': 2,
'offline': False,
'offlineCause': None,
'oneOffExecutors': [],
'temporarilyOffline': False
},
{
'actions': [],
'displayName': 'bobnit',
'executors': [{}],
'icon': 'computer-x.png',
'idle': True,
'jnlpAgent': False,
'launchSupported': True,
'loadStatistics': {},
'manualLaunchAllowed': True,
'monitorData': {
'hudson.node_monitors.ArchitectureMonitor': 'Linux (amd64)',
'hudson.node_monitors.ClockMonitor': {'diff': 4261},
'hudson.node_monitors.DiskSpaceMonitor': {
'path': '/home/sal/jenkins',
'size': 169784860672
},
'hudson.node_monitors.ResponseTimeMonitor': {'average': 29},
'hudson.node_monitors.SwapSpaceMonitor': {
'availablePhysicalMemory': 4570710016,
'availableSwapSpace': 12195983360,
'totalPhysicalMemory': 8374497280,
'totalSwapSpace': 12195983360
},
'hudson.node_monitors.TemporarySpaceMonitor': {
'path': '/tmp',
'size': 249737277440
}
},
'numExecutors': 1,
'offline': True,
'offlineCause': {},
'oneOffExecutors': [],
'temporarilyOffline': False
},
{
'actions': [],
'displayName': 'halob',
'executors': [{}],
'icon': 'computer-x.png',
'idle': True,
'jnlpAgent': True,
'launchSupported': False,
'loadStatistics': {},
'manualLaunchAllowed': True,
'monitorData': {
'hudson.node_monitors.ArchitectureMonitor': None,
'hudson.node_monitors.ClockMonitor': None,
'hudson.node_monitors.DiskSpaceMonitor': None,
'hudson.node_monitors.ResponseTimeMonitor': None,
'hudson.node_monitors.SwapSpaceMonitor': None,
'hudson.node_monitors.TemporarySpaceMonitor': None
},
'numExecutors': 1,
'offline': True,
'offlineCause': None,
'oneOffExecutors': [],
'temporarilyOffline': False
}
],
'displayName': 'nodes',
'totalExecutors': 2
}
DATA2 = {
'actions': [],
'displayName': 'master',
'executors': [{}, {}],
'icon': 'computer.png',
'idle': True,
'jnlpAgent': False,
'launchSupported': True,
'loadStatistics': {},
'manualLaunchAllowed': True,
'monitorData': {
'hudson.node_monitors.ArchitectureMonitor': 'Linux (amd64)',
'hudson.node_monitors.ClockMonitor': {'diff': 0},
'hudson.node_monitors.DiskSpaceMonitor': {
'path': '/var/lib/jenkins',
'size': 671942561792
},
'hudson.node_monitors.ResponseTimeMonitor': {'average': 0},
'hudson.node_monitors.SwapSpaceMonitor': {
'availablePhysicalMemory': 2989916160,
'availableSwapSpace': 17163087872,
'totalPhysicalMemory': 16810180608,
'totalSwapSpace': 17163087872
},
'hudson.node_monitors.TemporarySpaceMonitor': {
'path': '/tmp',
'size': 671942561792
}
},
'numExecutors': 2,
'offline': False,
'offlineCause': None,
'oneOffExecutors': [],
'temporarilyOffline': False
}
DATA3 = {
'actions': [],
'displayName': 'halob',
'executors': [{}],
'icon': 'computer-x.png',
'idle': True,
'jnlpAgent': True,
'launchSupported': False,
'loadStatistics': {},
'manualLaunchAllowed': True,
'monitorData': {
'hudson.node_monitors.ArchitectureMonitor': None,
'hudson.node_monitors.ClockMonitor': None,
'hudson.node_monitors.DiskSpaceMonitor': None,
'hudson.node_monitors.ResponseTimeMonitor': None,
'hudson.node_monitors.SwapSpaceMonitor': None,
'hudson.node_monitors.TemporarySpaceMonitor': None},
'numExecutors': 1,
'offline': True,
'offlineCause': None,
'oneOffExecutors': [],
'temporarilyOffline': False
}
@pytest.fixture(scope='function')
def nodes(monkeypatch):
def fake_jenkins_poll(cls, tree=None): # pylint: disable=unused-argument
return DATA0
monkeypatch.setattr(Jenkins, '_poll', fake_jenkins_poll)
def fake_nodes_poll(cls, tree=None): # pylint: disable=unused-argument
return DATA1
monkeypatch.setattr(Nodes, '_poll', fake_nodes_poll)
jenkins = Jenkins('http://foo:8080')
return jenkins.get_nodes()
def fake_node_poll(self, tree=None): # pylint: disable=unused-argument
"""
Fakes a poll of data by returning the correct section of the DATA1 test block.
"""
for node_poll in DATA1['computer']:
if node_poll['displayName'] == self.name:
return node_poll
return DATA2
def test_repr(nodes):
# Can we produce a repr string for this object
repr(nodes)
def test_baseurl(nodes):
assert nodes.baseurl == 'http://foo:8080/computer'
def test_get_master_node(nodes, monkeypatch):
monkeypatch.setattr(Node, '_poll', fake_node_poll)
node = nodes['master']
assert isinstance(node, Node)
def test_get_nonmaster_node(nodes, monkeypatch):
monkeypatch.setattr(Node, '_poll', fake_node_poll)
node = nodes['halob']
assert isinstance(node, Node)
def test_iterkeys(nodes):
expected_names = set(['master', 'bobnit', 'halob'])
actual_names = set([n for n in nodes.iterkeys()])
assert actual_names == expected_names
def test_keys(nodes):
expected_names = set(['master', 'bobnit', 'halob'])
actual_names = set(nodes.keys())
assert actual_names == expected_names
def items_test_case(nodes_method, monkeypatch):
monkeypatch.setattr(Node, '_poll', fake_node_poll)
expected_names = set(['master', 'bobnit', 'halob'])
actual_names = set()
for name, node in nodes_method():
assert name == node.name
assert isinstance(node, Node)
actual_names.add(name)
assert actual_names == expected_names
def test_iteritems(nodes, monkeypatch):
items_test_case(nodes.iteritems, monkeypatch)
def test_items(nodes, monkeypatch):
items_test_case(nodes.items, monkeypatch)
def values_test_case(nodes_method, monkeypatch):
monkeypatch.setattr(Node, '_poll', fake_node_poll)
expected_names = set(['master', 'bobnit', 'halob'])
actual_names = set()
for node in nodes_method():
assert isinstance(node, Node)
actual_names.add(node.name)
assert actual_names == expected_names
def test_itervalues(nodes, monkeypatch):
values_test_case(nodes.itervalues, monkeypatch)
def test_values(nodes, monkeypatch):
values_test_case(nodes.values, monkeypatch)
| 30.686007
| 82
| 0.571015
| 753
| 8,991
| 6.677291
| 0.205843
| 0.059666
| 0.107399
| 0.043755
| 0.730509
| 0.676611
| 0.614161
| 0.581742
| 0.561058
| 0.510541
| 0
| 0.04
| 0.293738
| 8,991
| 292
| 83
| 30.791096
| 0.751811
| 0.024469
| 0
| 0.547325
| 0
| 0
| 0.341522
| 0.138711
| 0
| 0
| 0
| 0
| 0.041152
| 1
| 0.065844
| false
| 0
| 0.016461
| 0.00823
| 0.102881
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fed6388f5baf349f9563436e423b3f0bfd27a9e9
| 790
|
py
|
Python
|
message_gen/legacy/messages/ClientGetCloudHostResponse.py
|
zadjii/nebula
|
50c4ec019c9f7eb15fe105a6c53a8a12880e281c
|
[
"MIT"
] | 2
|
2020-04-15T11:20:59.000Z
|
2021-05-12T13:01:36.000Z
|
message_gen/legacy/messages/ClientGetCloudHostResponse.py
|
zadjii/nebula
|
50c4ec019c9f7eb15fe105a6c53a8a12880e281c
|
[
"MIT"
] | 1
|
2018-06-05T04:48:56.000Z
|
2018-06-05T04:48:56.000Z
|
message_gen/legacy/messages/ClientGetCloudHostResponse.py
|
zadjii/nebula
|
50c4ec019c9f7eb15fe105a6c53a8a12880e281c
|
[
"MIT"
] | 1
|
2018-08-15T06:45:46.000Z
|
2018-08-15T06:45:46.000Z
|
from messages.SessionMessage import SessionMessage
from msg_codes import CLIENT_GET_CLOUD_HOST_RESPONSE as CLIENT_GET_CLOUD_HOST_RESPONSE
__author__ = 'Mike'
class ClientGetCloudHostResponse(SessionMessage):
def __init__(self, session_id=None, cname=None, ip=None, port=None, wsport=None):
super(ClientGetCloudHostResponse, self).__init__(session_id)
self.type = CLIENT_GET_CLOUD_HOST_RESPONSE
self.cname = cname
self.ip = ip
self.port = port
self.wsport = wsport
@staticmethod
def deserialize(json_dict):
msg = SessionMessage.deserialize(json_dict)
msg.cname = json_dict['cname']
msg.ip = json_dict['ip']
msg.port = json_dict['port']
msg.wsport = json_dict['wsport']
return msg
| 34.347826
| 86
| 0.698734
| 96
| 790
| 5.40625
| 0.34375
| 0.092486
| 0.080925
| 0.104046
| 0.150289
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.21519
| 790
| 22
| 87
| 35.909091
| 0.837097
| 0
| 0
| 0
| 0
| 0
| 0.026582
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0
| 0.105263
| 0
| 0.315789
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fed71aa40e24235555d670228f89196c28a60884
| 8,072
|
py
|
Python
|
research/route_diversity/timeline_from_csv.py
|
jweckstr/journey-diversity-scripts
|
7b754c5f47a77ee1d630a0b26d8ec5cf6be202ae
|
[
"MIT"
] | null | null | null |
research/route_diversity/timeline_from_csv.py
|
jweckstr/journey-diversity-scripts
|
7b754c5f47a77ee1d630a0b26d8ec5cf6be202ae
|
[
"MIT"
] | null | null | null |
research/route_diversity/timeline_from_csv.py
|
jweckstr/journey-diversity-scripts
|
7b754c5f47a77ee1d630a0b26d8ec5cf6be202ae
|
[
"MIT"
] | null | null | null |
"""
PSEUDOCODE:
Load csv to pandas
csv will be of form: city, event type, event name, year, theme_A, theme_B, theme_C...
City can contain multiple cities, separated by TBD?
Check min and max year
Open figure,
Deal with events in same year, offset a little bit?
For city in cities:tle
for event in events
"""
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
from collections import OrderedDict
from numpy import cos, sin, deg2rad, arange
from matplotlib import gridspec
from pylab import Circle
def clean_years(year):
if isinstance(year, str):
if len(year) > 4:
year = year[:4]
if year == "?":
return year
return int(year)
def split_to_separate_rows(df, column, split_key):
s = df[column].str.split(split_key, expand=True).stack()
i = s.index.get_level_values(0)
df2 = df.loc[i].copy()
df2[column] = s.values
return df2
def slot_location(n_slots, which_slot):
if n_slots == 1:
return (0, 0)
else:
coord_list = []
for i in range(0, n_slots):
angle = (360 / n_slots) * i
coord_list.append((offset * sin(deg2rad(angle)), offset * cos(deg2rad(angle))))
return coord_list[which_slot]
base_path = "/home/clepe/route_diversity/data/plannord_tables/"
themes_path = base_path + "themes.csv"
events_path = base_path + "events.csv"
year_length = 1
city_height = 1
size = 0.1
theme_length = 0.5
theme_width = 1
offset = 0.15
event_offset = 0.15
start_year = 2000
end_year = 2024
color_dict = {"Land use or infrastructure planning": "#66c2a5",
"Service level analysis or definitions": "#fc8d62",
"PTN plan or comparison": "#8da0cb",
"PT strategy": "#e78ac3",
"Transport system plan or strategy": "#a6d854",
'Other': "k"}
type_dict = {"Conference procedings": "Other",
'PTS whitepaper': "Other",
'Replies from hearing': "Other",
'PT authority strategy': "Other",
'PTS white paper': "Other",
'PT "product characterization"': "Other",
'Other': "Other",
"Infrastructure analysis or plan": "Land use or infrastructure planning",
"Master planning": "Land use or infrastructure planning",
"PT service level analysis": "Service level analysis or definitions",
"PT service level definitions": "Service level analysis or definitions",
"PTN comparison": "PTN plan or comparison",
"PTS plan": "PTN plan or comparison",
"PTS strategy": "PT strategy",
"Transport system plan": "Transport system plan or strategy",
"Transport system strategy": "Transport system plan or strategy"}
event_offsets = {"LRT/tram": event_offset,
"BHLS or large route overhaul": 0,
"BRT/superbus": -1 * event_offset}
event_colors = {"LRT/tram": "g",
"BHLS or large route overhaul": "#0042FF",
"BRT/superbus": "#001C6E"}
theme_angles = {"through_routes": 0, "network_simplicity": 120, "trunk_network": 240}
themes_df = pd.read_csv(themes_path)
events_df = pd.read_csv(events_path)
themes_df = themes_df[pd.notnull(themes_df['year'])]
events_df = events_df[pd.notnull(events_df['year'])]
themes_df["year"] = themes_df.apply(lambda x: clean_years(x.year), axis=1)
events_df["year"] = events_df.apply(lambda x: clean_years(x.year), axis=1)
themes_df = split_to_separate_rows(themes_df, "city", "/")
themes_df.loc[themes_df['city'] == "Fredrikstad-Sarpsborg", 'city'] = "F:stad-S:borg"
events_df.loc[events_df['city'] == "Fredrikstad-Sarpsborg", 'city'] = "F:stad-S:borg"
themes_df.loc[themes_df['city'] == "Porsgrunn-Skien", 'city'] = "P:grunn-Skien"
events_df.loc[events_df['city'] == "Porsgrunn-Skien", 'city'] = "P:grunn-Skien"
city_year_slots = {}
for i, row in themes_df[["city", "year"]].append(events_df[["city", "year"]]).iterrows():
if (row.city, row.year) in city_year_slots.keys():
city_year_slots[(row.city, row.year)] += 1
else:
city_year_slots[(row.city, row.year)] = 1
city_year_cur_slot = {key: 0 for key, value in city_year_slots.items()}
cities = [x for x in set(themes_df.city.dropna().tolist()) if "/" not in x]
cities.sort(reverse=True)
themes_df["type"] = themes_df.apply(lambda row: type_dict[row.type], axis=1)
types = [x for x in set(themes_df.type.dropna().tolist())]
fig = plt.figure()
ax1 = plt.subplot(111)
#gs = gridspec.GridSpec(1, 2, width_ratios=[1, 9])
#ax1 = plt.subplot(gs[1])
#ax2 = plt.subplot(gs[0], sharey=ax1)
"""
gs1 = gridspec.GridSpec(3, 3)
gs1.update(right=.7, wspace=0.05)
ax1 = plt.subplot(gs1[:-1, :])
ax2 = plt.subplot(gs1[-1, :-1])
ax3 = plt.subplot(gs1[-1, -1])
"""
groups = themes_df.groupby('type')
for i, row in events_df.iterrows():
e_offset = event_offsets[row.type]
c = event_colors[row.type]
y = city_height * cities.index(row.city) + e_offset
x = row.year
ax1.plot([row.year, end_year+1], [y, y], c=c, marker='o', label=row.type, zorder=2, markersize=3)
for name, group in groups:
for i, row in group.iterrows():
n_slots = city_year_slots[(row.city, row.year)]
cur_slot = city_year_cur_slot[(row.city, row.year)]
city_year_cur_slot[(row.city, row.year)] += 1
slot_offset = slot_location(n_slots, cur_slot)
y = city_height * cities.index(row.city) + slot_offset[0]
x = row.year + slot_offset[1]
if row.year < start_year:
continue
#circle = Circle((x, y), color=color_dict[name], radius=size, label=name, zorder=5)
ax1.scatter(x, y, color=color_dict[name], s=5, label=name, zorder=5) #add_patch(circle)
for theme, angle in theme_angles.items():
if pd.notnull(row[theme]):
ax1.plot([x, x + theme_length * sin(deg2rad(angle))], [y, y + theme_length * cos(deg2rad(angle))],
c=color_dict[name], zorder=10, linewidth=theme_width)
handles, labels = ax1.get_legend_handles_labels()
by_label = OrderedDict(zip(labels, handles))
#ax1.legend(by_label.values(), by_label.keys())
# TODO: add year for GTFS feed as vertical line
#ax2 = fig.add_subplot(121, sharey=ax1)
for city in cities:
y = city_height * cities.index(city)
x = end_year
ax1.text(x, y, city, horizontalalignment='left', verticalalignment='center', fontsize=10) #, bbox=dict(boxstyle="square", facecolor='white', alpha=0.5, edgecolor='white'))
ax1.plot([start_year-1, end_year+1], [y, y], c="grey", alpha=0.5, linewidth=0.1, zorder=1)
ax1.xaxis.set_major_locator(MaxNLocator(integer=True))
ax1.set_yticks([])
ax1.set_yticklabels([])
#ax2.axis('off')
ax1.set_xlim(start_year, end_year)
ax1.set_aspect("equal")
plt.xticks(arange(start_year, end_year, 5))
plt.savefig(base_path+'timeline.pdf', format="pdf", dpi=300, bbox_inches='tight')
fig = plt.figure()
ax2 = plt.subplot(111)
ax2.legend(by_label.values(), by_label.keys(), loc='center', #bbox_to_anchor=(0.5, -0.05),
fancybox=True, shadow=True, ncol=2)
ax2.axis('off')
plt.savefig(base_path+'legend.pdf', format="pdf", dpi=300, bbox_inches='tight')
#plt.show()
# create legend for themes in a separate figure
fig = plt.figure()
ax3 = plt.subplot(111)
x = 0
y = 0
circle = Circle((x, y), color="black", radius=size, zorder=5)
ax3.add_patch(circle)
for theme, angle in theme_angles.items():
x1 = x + theme_length * sin(deg2rad(angle))
y1 = y + theme_length * cos(deg2rad(angle))
x2 = x + theme_length * sin(deg2rad(angle)) * 1.2
y2 = y + theme_length * cos(deg2rad(angle)) * 1.2
ax3.annotate(theme.capitalize().replace("_", " "), (x1, y1), (x2, y2), horizontalalignment='center',
verticalalignment='center', color="red", zorder=10, size=15)
ax3.plot([x, x1], [y, y1], c="black",
linewidth=10*theme_width, zorder=1)
ax3.set_aspect("equal")
ax3.axis('off')
plt.savefig(base_path+'timeline_themes.pdf', format="pdf", dpi=300, bbox_inches='tight')
| 35.559471
| 175
| 0.650768
| 1,192
| 8,072
| 4.262584
| 0.250839
| 0.028341
| 0.015351
| 0.016532
| 0.301712
| 0.205471
| 0.138162
| 0.102539
| 0.046841
| 0.031096
| 0
| 0.032179
| 0.195367
| 8,072
| 226
| 176
| 35.716814
| 0.750115
| 0.101834
| 0
| 0.058442
| 0
| 0
| 0.189748
| 0.012886
| 0
| 0
| 0
| 0.004425
| 0
| 1
| 0.019481
| false
| 0
| 0.045455
| 0
| 0.097403
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fed896e00f41aed0c3e19962de5fce02825adb90
| 2,408
|
py
|
Python
|
api/ops/tasks/detection/core/detectionTypes/valueThreshold.py
|
LeiSoft/CueObserve
|
cc5254df7d0cb817a8b3ec427f5cb54a1d420f7e
|
[
"Apache-2.0"
] | 149
|
2021-07-16T13:37:30.000Z
|
2022-03-21T10:13:15.000Z
|
api/ops/tasks/detection/core/detectionTypes/valueThreshold.py
|
LeiSoft/CueObserve
|
cc5254df7d0cb817a8b3ec427f5cb54a1d420f7e
|
[
"Apache-2.0"
] | 61
|
2021-07-15T06:39:05.000Z
|
2021-12-27T06:58:10.000Z
|
api/ops/tasks/detection/core/detectionTypes/valueThreshold.py
|
LeiSoft/CueObserve
|
cc5254df7d0cb817a8b3ec427f5cb54a1d420f7e
|
[
"Apache-2.0"
] | 22
|
2021-07-19T07:20:49.000Z
|
2022-03-21T10:13:16.000Z
|
import dateutil.parser as dp
from dateutil.relativedelta import relativedelta
import pandas as pd, datetime as dt
def checkLatestAnomaly(df, operationCheckStr):
"""
Looks up latest anomaly in dataframe
"""
anomalies = df[df["anomaly"] == 15]
if anomalies.shape[0] > 0:
lastAnomalyRow = anomalies.iloc[-1]
anomalyTime = lastAnomalyRow["ds"]
return {
"operationCheck": operationCheckStr,
"value": float(lastAnomalyRow["y"]),
"anomalyTimeISO": dp.parse(anomalyTime).isoformat(),
"anomalyTime": dp.parse(anomalyTime).timestamp() * 1000,
}
return {}
def valueThresholdDetect(df, granularity, operator, value1, value2):
"""
Method to perform anomaly detection on given dataframe
"""
value1 = int(value1)
lowerVal = value1
upperVal = value1
if value2 != "null":
value2 = int(value2)
lowerVal = min(value1, value2)
upperVal = max(value1, value2)
operationStrDict = {
"greater": f'greater than {value1}',
"lesser": f'lesser than {value1}',
"!greater": f'not greater than {value1}',
"!lesser": f'not lesser than {value1}',
"between": f'between {lowerVal} and {upperVal}',
"!between": f'not between {lowerVal} and {upperVal}'
}
operationDict = {
"greater": '(df["y"] > value1) * 14 + 1',
"lesser": '(df["y"] < value1) * 14 + 1',
"!greater": '(df["y"] <= value1) * 14 + 1',
"!lesser": '(df["y"] >= value1) * 14 + 1',
"between": '((df["y"] >= lowerVal) & (df["y"] <= upperVal)) * 14 + 1',
"!between": '((df["y"] < lowerVal) | (df["y"] > upperVal)) * 14 + 1'
}
today = dt.datetime.now()
df["ds"] = pd.to_datetime(df["ds"])
df = df.sort_values("ds")
df["ds"] = df["ds"].apply(lambda date: date.isoformat()[:19])
todayISO = today.replace(hour=0, minute=0, second=0, microsecond=0, tzinfo=None).isoformat()[:19]
df = df[df["ds"] < todayISO]
df["anomaly"] = eval(operationDict[operator])
anomalyLatest = checkLatestAnomaly(df, operationStrDict[operator])
df = df[["ds", "y", "anomaly"]]
numActual = 45 if granularity == "day" else 24 * 7
output = {
"anomalyData": {
"actual": df[-numActual:].to_dict("records")
},
"anomalyLatest": anomalyLatest
}
return output
| 35.411765
| 101
| 0.572674
| 259
| 2,408
| 5.312741
| 0.374517
| 0.017442
| 0.026163
| 0.031977
| 0.135174
| 0.100291
| 0.100291
| 0.100291
| 0.100291
| 0.100291
| 0
| 0.03404
| 0.255814
| 2,408
| 68
| 102
| 35.411765
| 0.733817
| 0.037791
| 0
| 0
| 0
| 0.035714
| 0.259292
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035714
| false
| 0
| 0.053571
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fed8e9ad56ccf5ea28b13fbec8dee05b0037dc77
| 343
|
py
|
Python
|
src/chapter8/exercise6.py
|
group7BSE1/BSE-2021
|
2553b12e5fd5d1015af4746bcf84a8ee7c1cb8e0
|
[
"MIT"
] | null | null | null |
src/chapter8/exercise6.py
|
group7BSE1/BSE-2021
|
2553b12e5fd5d1015af4746bcf84a8ee7c1cb8e0
|
[
"MIT"
] | null | null | null |
src/chapter8/exercise6.py
|
group7BSE1/BSE-2021
|
2553b12e5fd5d1015af4746bcf84a8ee7c1cb8e0
|
[
"MIT"
] | 1
|
2021-04-07T14:49:04.000Z
|
2021-04-07T14:49:04.000Z
|
list = []
while True:
number = 0.0
input_num = input('Enter a number: ')
if input_num == 'done':
break
try:
number = float(input_num)
except:
print('Invalid input')
quit()
list.append(input_num)
if list:
print('Maximum: ', max(list) or None)
print('Minimum: ', min(list) or None)
| 22.866667
| 41
| 0.559767
| 45
| 343
| 4.177778
| 0.555556
| 0.170213
| 0.106383
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008333
| 0.300292
| 343
| 15
| 42
| 22.866667
| 0.775
| 0
| 0
| 0
| 0
| 0
| 0.148256
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.2
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fedb6c7eea105f52852855900c26c30796b4a06e
| 5,654
|
py
|
Python
|
preprocess/sketch_generation.py
|
code-gen/exploration
|
c83d79745df9566c5f1a82e581008e0984fcc319
|
[
"MIT"
] | null | null | null |
preprocess/sketch_generation.py
|
code-gen/exploration
|
c83d79745df9566c5f1a82e581008e0984fcc319
|
[
"MIT"
] | 1
|
2019-05-11T14:49:58.000Z
|
2019-05-24T15:02:54.000Z
|
preprocess/sketch_generation.py
|
code-gen/exploration
|
c83d79745df9566c5f1a82e581008e0984fcc319
|
[
"MIT"
] | null | null | null |
"""
Sketch (similar to Coarse-to-Fine)
- keep Python keywords as is
- strip off arguments and variable names
- substitute tokens with types: `NUMBER`, `STRING`
- specialize `NAME` token:
- for functions: `FUNC#<num_args>`
# Examples
x = 1 if True else 0
NAME = NUMBER if True else NUMBER
result = SomeFunc(1, 2, 'y', arg)
NAME = FUNC#4 ( NUMBER , NUMBER , STRING , NAME )
result = [x for x in DoWork(xs) if x % 2 == 0]
NAME = [ NAME for NAME in FUNC#1 ( NAME ) if NAME % NUMBER == NUMBER ]
"""
import ast
import builtins
import io
import sys
import token
from collections import defaultdict
from tokenize import TokenInfo, tokenize
import astpretty
from termcolor import colored
class ASTVisitor(ast.NodeVisitor):
def __init__(self):
self.functions = {} # map function name -> num args
@staticmethod
def name_by_type(node):
if isinstance(node, ast.Attribute):
return node.attr
if isinstance(node, ast.Name):
return node.id
if isinstance(node, ast.Subscript):
try:
return node.slice.value.id
except AttributeError:
return node.slice.value
return None
def visit_Call(self, node: ast.Call):
if isinstance(node.func, ast.Call):
self.visit_Call(node.func)
else:
func_name = self.name_by_type(node.func)
self.functions[func_name] = len(node.args)
if hasattr(node, 'keywords'):
self.functions[func_name] += len(node.keywords)
for arg in node.args:
if isinstance(arg, ast.Call):
self.visit_Call(arg)
else:
self.generic_visit(arg)
class SketchVocab:
NAME_ID = "NAME"
FUNC_ID = "FUNC"
STR_LITERAL_ID = "STRING"
NUM_LITERAL_ID = "NUMBER"
# RESERVED_ID = "<reserved>"
# ACCESSOR_ID = "<accessor>"
# ASSIGN_ID = "<assign>"
# ARITHMETIC_ID = "<arithmetic>"
# OP_ID = "<op>"
class Sketch:
def __init__(self, code_snippet: str, verbose=False):
self.code_snippet = code_snippet
self.names = defaultdict(lambda: [])
self.keywords = defaultdict(lambda: [])
self.literals = defaultdict(lambda: [])
self.operators = defaultdict(lambda: [])
self.ordered = []
if verbose:
print(colored(" * tokenizing [%s]" % code_snippet, 'yellow'))
self.tok_list = list(tokenize(io.BytesIO(self.code_snippet.encode('utf-8')).readline))
# AST
self.ast_visitor = ASTVisitor()
self.ast = None
try:
self.ast = self.ast_visitor.visit(ast.parse(self.code_snippet))
except SyntaxError:
if verbose:
print(colored(" * skipping ast generation for [%s]" % code_snippet, 'red'))
def refine_name(self, tok: TokenInfo):
if self.is_reserved_keyword(tok.string):
self.keywords[tok.string].append(tok.start[1])
self.ordered.append(tok.string)
else:
self.names[tok.string].append(tok.start[1])
if tok.string in self.ast_visitor.functions:
self.ordered.append(SketchVocab.FUNC_ID + "#%d" % self.ast_visitor.functions[tok.string])
else:
self.ordered.append(SketchVocab.NAME_ID)
def generate(self):
for tok in self.tok_list:
tok_type = token.tok_name[tok.type]
if tok_type == 'NAME':
self.refine_name(tok)
elif tok_type == 'STRING':
self.literals[tok.string].append(tok.start[1])
self.ordered.append(SketchVocab.STR_LITERAL_ID)
elif tok_type == 'NUMBER':
self.literals[tok.string].append(tok.start[1])
self.ordered.append(SketchVocab.NUM_LITERAL_ID)
elif tok_type == 'OP':
self.operators[tok.string].append(tok.start[1])
self.ordered.append(tok.string)
else:
assert tok_type in ['ENCODING', 'NEWLINE', 'ENDMARKER', 'ERRORTOKEN'], "%s" % tok_type
return self
def details(self):
return "names: %s\nkeywords: %s\nliterals: %s\noperators: %s" % (
str(list(self.names.keys())),
str(list(self.keywords.keys())),
str(list(self.literals.keys())),
str(list(self.operators.keys()))
)
def split(self, delim=' '):
return str(self).split(delim)
def __str__(self):
return ' '.join(self.ordered)
def __repr__(self):
return str(self)
def __len__(self):
return len(self.ordered)
@staticmethod
def is_reserved_keyword(name):
RESERVED_KEYWORDS = set(dir(builtins) + [
"and", "assert", "break", "class", "continue", "def", "del", "elif",
"else", "except", "exec", "finally", "for", "from", "global", "if",
"import", "in", "is", "lambda", "not", "or", "pass", "print", "raise",
"return", "try", "while", "yield", "None", "self"
]) # len = 182
return name in RESERVED_KEYWORDS
def main():
# v = ASTVisitor()
# t = v.visit(ast.parse('x = SomeFunc(2, 3, y, "test")'))
# print(v.functions)
# astpretty.pprint(tree.body[0], indent=' ' * 4)
# exec(compile(tree, filename="<ast>", mode="exec"))
code_snippet = sys.argv[1]
astpretty.pprint(ast.parse(code_snippet).body[0], indent=' ' * 4)
sketch = Sketch(code_snippet, verbose=True).generate()
# print(sketch.details())
print(sketch)
if __name__ == '__main__':
main()
| 29.447917
| 105
| 0.579413
| 674
| 5,654
| 4.728487
| 0.252226
| 0.034515
| 0.032005
| 0.02824
| 0.124255
| 0.099153
| 0.074051
| 0.074051
| 0.074051
| 0.074051
| 0
| 0.005941
| 0.285462
| 5,654
| 191
| 106
| 29.602094
| 0.782921
| 0.155642
| 0
| 0.127119
| 0
| 0
| 0.073638
| 0
| 0
| 0
| 0
| 0
| 0.016949
| 1
| 0.110169
| false
| 0.008475
| 0.084746
| 0.042373
| 0.355932
| 0.042373
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fedbf772bab9d4ac688fa0669b5207dce247b24c
| 8,538
|
py
|
Python
|
LPBv2/tests/game/test_player.py
|
TierynnB/LeaguePyBot
|
2e96230b9dc24d185ddc0c6086d79f7d01e7a643
|
[
"MIT"
] | 45
|
2020-11-28T04:45:45.000Z
|
2022-03-31T05:53:37.000Z
|
LPBv2/tests/game/test_player.py
|
TierynnB/LeaguePyBot
|
2e96230b9dc24d185ddc0c6086d79f7d01e7a643
|
[
"MIT"
] | 13
|
2021-01-15T00:50:10.000Z
|
2022-02-02T15:16:49.000Z
|
LPBv2/tests/game/test_player.py
|
TierynnB/LeaguePyBot
|
2e96230b9dc24d185ddc0c6086d79f7d01e7a643
|
[
"MIT"
] | 14
|
2020-12-21T10:03:31.000Z
|
2021-11-22T04:03:03.000Z
|
import pytest
from LPBv2.common import (
InventoryItem,
PlayerInfo,
PlayerScore,
PlayerStats,
TeamMember,
MinimapZone,
merge_dicts,
)
from LPBv2.game import Player
update_data = {
"abilities": {
"E": {
"abilityLevel": 0,
"displayName": "\u9b42\u306e\u8a66\u7df4",
"id": "IllaoiE",
"rawDescription": "GeneratedTip_Spell_IllaoiE_Description",
"rawDisplayName": "GeneratedTip_Spell_IllaoiE_DisplayName",
},
"Passive": {
"displayName": "\u65e7\u795e\u306e\u9810\u8a00\u8005",
"id": "IllaoiPassive",
"rawDescription": "GeneratedTip_Passive_IllaoiPassive_Description",
"rawDisplayName": "GeneratedTip_Passive_IllaoiPassive_DisplayName",
},
"Q": {
"abilityLevel": 0,
"displayName": "\u89e6\u624b\u306e\u9244\u69cc",
"id": "IllaoiQ",
"rawDescription": "GeneratedTip_Spell_IllaoiQ_Description",
"rawDisplayName": "GeneratedTip_Spell_IllaoiQ_DisplayName",
},
"R": {
"abilityLevel": 0,
"displayName": "\u4fe1\u4ef0\u9707",
"id": "IllaoiR",
"rawDescription": "GeneratedTip_Spell_IllaoiR_Description",
"rawDisplayName": "GeneratedTip_Spell_IllaoiR_DisplayName",
},
"W": {
"abilityLevel": 0,
"displayName": "\u904e\u9177\u306a\u308b\u6559\u8a13",
"id": "IllaoiW",
"rawDescription": "GeneratedTip_Spell_IllaoiW_Description",
"rawDisplayName": "GeneratedTip_Spell_IllaoiW_DisplayName",
},
},
"championStats": {
"abilityHaste": 0.0,
"abilityPower": 0.0,
"armor": 41.0,
"armorPenetrationFlat": 0.0,
"armorPenetrationPercent": 1.0,
"attackDamage": 73.4000015258789,
"attackRange": 125.0,
"attackSpeed": 0.5709999799728394,
"bonusArmorPenetrationPercent": 1.0,
"bonusMagicPenetrationPercent": 1.0,
"cooldownReduction": 0.0,
"critChance": 0.0,
"critDamage": 175.0,
"currentHealth": 601.0,
"healthRegenRate": 1.899999976158142,
"lifeSteal": 0.0,
"magicLethality": 0.0,
"magicPenetrationFlat": 0.0,
"magicPenetrationPercent": 1.0,
"magicResist": 32.0,
"maxHealth": 601.0,
"moveSpeed": 340.0,
"physicalLethality": 0.0,
"resourceMax": 300.0,
"resourceRegenRate": 1.5,
"resourceType": "MANA",
"resourceValue": 300.0,
"spellVamp": 0.0,
"tenacity": 0.0,
},
"currentGold": 888.6270751953125,
"level": 1,
"summonerName": "Supername",
"championName": "\u30a4\u30e9\u30aa\u30a4",
"isBot": False,
"isDead": False,
"items": [
{
"canUse": False,
"consumable": False,
"count": 1,
"displayName": "\u92fc\u306e\u30b7\u30e7\u30eb\u30c0\u30fc\u30ac\u30fc\u30c9",
"itemID": 3854,
"price": 400,
"rawDescription": "GeneratedTip_Item_3854_Description",
"rawDisplayName": "Item_3854_Name",
"slot": 0,
},
{
"canUse": False,
"consumable": False,
"count": 1,
"displayName": "\u30d7\u30ec\u30fc\u30c8 \u30b9\u30c1\u30fc\u30eb\u30ad\u30e3\u30c3\u30d7",
"itemID": 3047,
"price": 500,
"rawDescription": "GeneratedTip_Item_3047_Description",
"rawDisplayName": "Item_3047_Name",
"slot": 1,
},
{
"canUse": False,
"consumable": False,
"count": 1,
"displayName": "\u30ad\u30f3\u30c9\u30eb\u30b8\u30a7\u30e0",
"itemID": 3067,
"price": 400,
"rawDescription": "GeneratedTip_Item_3067_Description",
"rawDisplayName": "Item_3067_Name",
"slot": 2,
},
{
"canUse": True,
"consumable": False,
"count": 1,
"displayName": "\u30b9\u30c6\u30eb\u30b9 \u30ef\u30fc\u30c9",
"itemID": 3340,
"price": 0,
"rawDescription": "GeneratedTip_Item_3340_Description",
"rawDisplayName": "Item_3340_Name",
"slot": 6,
},
],
"position": "",
"rawChampionName": "game_character_displayname_Illaoi",
"respawnTimer": 0.0,
"runes": {
"keystone": {
"displayName": "\u4e0d\u6b7b\u8005\u306e\u63e1\u6483",
"id": 8437,
"rawDescription": "perk_tooltip_GraspOfTheUndying",
"rawDisplayName": "perk_displayname_GraspOfTheUndying",
},
"primaryRuneTree": {
"displayName": "\u4e0d\u6ec5",
"id": 8400,
"rawDescription": "perkstyle_tooltip_7204",
"rawDisplayName": "perkstyle_displayname_7204",
},
"secondaryRuneTree": {
"displayName": "\u9b54\u9053",
"id": 8200,
"rawDescription": "perkstyle_tooltip_7202",
"rawDisplayName": "perkstyle_displayname_7202",
},
},
"scores": {
"assists": 0,
"creepScore": 100,
"deaths": 0,
"kills": 0,
"wardScore": 0.0,
},
"skinID": 0,
"summonerSpells": {
"summonerSpellOne": {
"displayName": "\u30af\u30ec\u30f3\u30ba",
"rawDescription": "GeneratedTip_SummonerSpell_SummonerBoost_Description",
"rawDisplayName": "GeneratedTip_SummonerSpell_SummonerBoost_DisplayName",
},
"summonerSpellTwo": {
"displayName": "\u30a4\u30b0\u30be\u30fc\u30b9\u30c8",
"rawDescription": "GeneratedTip_SummonerSpell_SummonerExhaust_Description",
"rawDisplayName": "GeneratedTip_SummonerSpell_SummonerExhaust_DisplayName",
},
},
"team": "ORDER",
}
test_zone = MinimapZone(x=90, y=90, name="TestZone")
test_member = TeamMember(x=100, y=100, zone=test_zone)
@pytest.fixture
def get_player():
return Player()
def test_player_init(get_player):
assert get_player
assert isinstance(get_player.info, PlayerInfo)
assert isinstance(get_player.stats, PlayerStats)
assert isinstance(get_player.score, PlayerScore)
assert isinstance(get_player.inventory, list)
assert isinstance(get_player.location, str)
assert isinstance(get_player, Player)
@pytest.mark.asyncio
async def test_player_update_info(get_player):
await get_player.update_info(update_data)
assert get_player.info.name == "Supername"
assert get_player.info.level == 1
assert isinstance(get_player.info, PlayerInfo)
@pytest.mark.asyncio
async def test_player_update_stats(get_player):
await get_player.update_stats(update_data)
assert get_player.stats.maxHealth == 601.0
assert isinstance(get_player.stats, PlayerStats)
@pytest.mark.asyncio
async def test_player_update_score(get_player):
await get_player.update_score(update_data)
assert get_player.score.creepScore == 100
assert isinstance(get_player.score, PlayerScore)
@pytest.mark.asyncio
async def test_player_update_inventory(get_player):
await get_player.update_inventory(update_data)
assert isinstance(get_player.inventory, list)
assert len(get_player.inventory) > 0
assert isinstance(get_player.inventory[0], InventoryItem)
assert get_player.inventory[0].itemID == 3854
@pytest.mark.asyncio
async def test_player_update_location(get_player):
await get_player.update_location(test_member)
assert get_player.info.x == 100
assert get_player.info.y == 100
assert get_player.info.zone == test_zone
assert isinstance(get_player.info.zone, MinimapZone)
assert isinstance(get_player.info, PlayerInfo)
@pytest.mark.asyncio
async def test_player_update(get_player):
await get_player.update(update_data)
assert get_player.info.name == "Supername"
assert get_player.info.level == 1
assert isinstance(get_player.info, PlayerInfo)
assert get_player.stats.maxHealth == 601.0
assert isinstance(get_player.stats, PlayerStats)
assert get_player.score.creepScore == 100
assert isinstance(get_player.score, PlayerScore)
assert isinstance(get_player.inventory, list)
assert len(get_player.inventory) > 0
assert isinstance(get_player.inventory[0], InventoryItem)
assert get_player.inventory[0].itemID == 3854
| 33.093023
| 103
| 0.613844
| 805
| 8,538
| 6.310559
| 0.281988
| 0.085039
| 0.067323
| 0.088583
| 0.346457
| 0.30689
| 0.272638
| 0.231496
| 0.199213
| 0.199213
| 0
| 0.082487
| 0.261654
| 8,538
| 257
| 104
| 33.22179
| 0.72335
| 0
| 0
| 0.213675
| 0
| 0.004274
| 0.353127
| 0.173694
| 0
| 0
| 0
| 0
| 0.145299
| 1
| 0.008547
| false
| 0.017094
| 0.012821
| 0.004274
| 0.025641
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fee0850f728247adf6624bff53382da94eff6965
| 1,199
|
py
|
Python
|
tests/test_negate_with_undo.py
|
robobeaver6/hier_config
|
efd413ef709d462effe8bfd11ef0520c1d62eb33
|
[
"MIT"
] | null | null | null |
tests/test_negate_with_undo.py
|
robobeaver6/hier_config
|
efd413ef709d462effe8bfd11ef0520c1d62eb33
|
[
"MIT"
] | null | null | null |
tests/test_negate_with_undo.py
|
robobeaver6/hier_config
|
efd413ef709d462effe8bfd11ef0520c1d62eb33
|
[
"MIT"
] | null | null | null |
import unittest
import tempfile
import os
import yaml
import types
from hier_config import HConfig
from hier_config.host import Host
class TestNegateWithUndo(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.os = 'comware5'
cls.options_file = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'files',
'test_options_negate_with_undo.yml',
)
cls.running_cfg = 'test_for_undo\nundo test_for_redo\n'
cls.compiled_cfg = 'undo test_for_undo\ntest_for_redo\n'
cls.remediation = 'undo test_for_undo\ntest_for_redo\n'
with open(cls.options_file) as f:
cls.options = yaml.safe_load(f.read())
cls.host_a = Host('example1.rtr', cls.os, cls.options)
def test_merge(self):
self.host_a.load_config_from(config_type="running", name=self.running_cfg, load_file=False)
self.host_a.load_config_from(config_type="compiled", name=self.compiled_cfg, load_file=False)
self.host_a.load_remediation()
self.assertEqual(self.remediation, self.host_a.facts['remediation_config_raw'])
if __name__ == "__main__":
unittest.main(failfast=True)
| 30.74359
| 101
| 0.692244
| 167
| 1,199
| 4.646707
| 0.365269
| 0.032216
| 0.046392
| 0.050258
| 0.215206
| 0.215206
| 0.215206
| 0.215206
| 0
| 0
| 0
| 0.00209
| 0.201835
| 1,199
| 38
| 102
| 31.552632
| 0.808777
| 0
| 0
| 0
| 0
| 0
| 0.173478
| 0.095913
| 0
| 0
| 0
| 0
| 0.034483
| 1
| 0.068966
| false
| 0
| 0.241379
| 0
| 0.344828
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fee18a5b11572b38d902059c0db310b2cf42cd2d
| 6,984
|
py
|
Python
|
code/gauss_legendre.py
|
MarkusLohmayer/master-thesis-code
|
b107d1b582064daf9ad4414e1c9f332ef0be8660
|
[
"MIT"
] | 1
|
2020-11-14T15:56:07.000Z
|
2020-11-14T15:56:07.000Z
|
code/gauss_legendre.py
|
MarkusLohmayer/master-thesis-code
|
b107d1b582064daf9ad4414e1c9f332ef0be8660
|
[
"MIT"
] | null | null | null |
code/gauss_legendre.py
|
MarkusLohmayer/master-thesis-code
|
b107d1b582064daf9ad4414e1c9f332ef0be8660
|
[
"MIT"
] | null | null | null |
"""Gauss-Legendre collocation methods for port-Hamiltonian systems"""
import sympy
import numpy
import math
from newton import newton_raphson, DidNotConvergeError
from symbolic import eval_expr
def butcher(s):
"""Compute the Butcher tableau for a Gauss-Legendre collocation method.
Parameters
----------
s : int
Number of stages of the collocation method.
The resulting method is of order 2s.
Returns
-------
a : numpy.ndarray
Coefficients a_{ij}, i.e. the j-th lagrange polynomial integrated on (0, c_i).
b : numpy.ndarray
Coefficients b_j, i.e. the the i-th lagrange polynomial integrated on (0, 1).
c : numpy.ndarray
Coefficients c_i, i.e. the collocation points.
"""
from sympy.abc import tau, x
# shifted Legendre polynomial of order s
P = (x ** s * (x - 1) ** s).diff(x, s)
# roots of P
C = sympy.solve(P)
C.sort()
c = numpy.array([float(c_i) for c_i in C])
# Lagrange basis polynomials at nodes C
L = []
for i in range(s):
l = 1
for j in range(s):
if j != i:
l = (l * (tau - C[j]) / (C[i] - C[j])).simplify()
L.append(l)
# integrals of Lagrange polynomials
A = [[sympy.integrate(l, (tau, 0, c_i)) for l in L] for c_i in C]
a = numpy.array([[float(a_ij) for a_ij in row] for row in A])
B = [sympy.integrate(l, (tau, 0, 1)) for l in L]
b = numpy.array([float(b_j) for b_j in B])
return a, b, c
def gauss_legendre(
x,
xdot,
x_0,
t_f,
dt,
s=1,
functionals={},
params={},
tol=1e-9,
logger=None,
constraints=[],
):
"""Integrate a port-Hamiltonian system in time
based on a Gauss-Legendre collocation method.
Parameters
----------
x : sympy.Matrix
vector of symbols for state-space coordinates
xdot : List[sympy.Expr]
The right hand sides of the differtial equations
which have to hold at each collocation point.
x_0 : numpy.ndarray
Initial conditions.
t_f : float
Length of time interval.
dt : float
Desired time step.
s : int
Number of stages of the collocation method.
The resulting method is of order 2s.
functionals : Dict[sympy.Symbol, sympy.Expr]
Functionals on which xdot may depend.
params : Dict[sympy.Symbol, Union[sympy.Expr, float]]
Parameters on which the system may depend.
logger : Optional[Logger]
Logger object which is passed through to Newton-Raphsopn solver.
constraints : List[sympy.Expr]
Additional algebraic equations which have to hold
at each collocation point.
"""
# number of steps
K = int(t_f // dt)
# accurate time step
dt = t_f / K
# dimension of state space
N = len(x)
# Butcher tableau (multiplied with time step)
a, b, c = butcher(s)
a *= dt
b *= dt
c *= dt
# generate code for evaluating residuals vector and Jacobian matrix
code = _generate_code(x, xdot, N, a, s, functionals, params, constraints)
# print(code)
# return None, None
ldict = {}
exec(code, None, ldict)
compute_residuals = ldict["compute_residuals"]
compute_jacobian = ldict["compute_jacobian"]
del code, ldict
# array for storing time at every step
time = numpy.empty(K + 1, dtype=float)
time[0] = t_0 = 0.0
# array for storing the state at every step
solution = numpy.empty((K + 1, N), dtype=float)
solution[0] = x_0
# flows / unknowns (reused at every step)
f = numpy.zeros(s * N, dtype=float)
fmat = f.view()
fmat.shape = (s, N)
# residuals vector (reused at every step)
residuals = numpy.empty(s * (N + len(constraints)), dtype=float)
# jacobian matrix (reused at every step)
jacobian = numpy.empty((s * (N + len(constraints)), s * N), dtype=float)
for k in range(1, K + 1):
try:
newton_raphson(
f,
residuals,
lambda residuals, unknowns: compute_residuals(residuals, unknowns, x_0),
jacobian,
lambda jacobian, unknowns: compute_jacobian(jacobian, unknowns, x_0),
tol=tol,
iterations=500,
logger=logger,
)
except DidNotConvergeError:
print(f"Did not converge at step {k}.")
break
time[k] = t_0 = t_0 + dt
solution[k] = x_0 = x_0 - b @ fmat
return time, solution
def _generate_code(x, xdot, N, a, s, functionals, params, constraints):
"""Generate code for the two methods compute_residuals and compute_jacobian"""
# dynamics
xdot = [eval_expr(f, functionals) for f in xdot]
# algebraic constraints
constraints = [eval_expr(c, functionals) for c in constraints]
# symbols for Butcher coefficients a_{ij} multiplied by time step h
asym = [[sympy.Symbol(f"a{i}{j}") for j in range(s)] for i in range(s)]
# symbols for old state
osym = [sympy.Symbol(f"o[{n}]") for n in range(N)]
# symbols for unknowns (flow vector)
fsym = [[sympy.Symbol(f"f[{i},{n}]") for n in range(N)] for i in range(s)]
# polynomial approximation of the numerical solution at the collocation points
xc = [
[
(x[n], osym[n] - sum(asym[i][j] * fsym[j][n] for j in range(s)))
for n in range(N)
]
for i in range(s)
]
# expressions for the residuals vector
residuals = [
fsym[i][n] + xdot[n].subs(xc[i]) for i in range(s) for n in range(N)
] + [c.subs(xc[i]) for c in constraints for i in range(s)]
# expressions for the Jacobian matrix
jacobian = [[residual.diff(d) for r in fsym for d in r] for residual in residuals]
printer = sympy.printing.lambdarepr.PythonCodePrinter()
dim = s * N + s * len(constraints)
code = "def compute_residuals(residuals, f, o):\n"
code += f"\tf = f.view()\n\tf.shape = ({s}, {N})\n"
code += "".join(f"\ta{i}{j} = {a[i,j]}\n" for i in range(s) for j in range(s))
# code += "".join(f"\t{symbol} = {printer.doprint(value)}\n" for symbol, value in params.items())
for i in range(dim):
code += f"\tresiduals[{i}] = {printer.doprint(eval_expr(residuals[i], params=params).evalf())}\n"
# code += f"\tresiduals[{i}] = {printer.doprint(residuals[i])}\n"
code += "\n\ndef compute_jacobian(jacobian, f, o):\n"
code += f"\tf = f.view()\n\tf.shape = ({s}, {N})\n"
code += "".join(f"\ta{i}{j} = {a[i,j]}\n" for i in range(s) for j in range(s))
# code += "".join(f"\t{symbol} = {printer.doprint(value)}\n" for symbol, value in params.items())
for i in range(dim):
for j in range(s * N):
code += f"\tjacobian[{i},{j}] = {printer.doprint(eval_expr(jacobian[i][j], params=params).evalf())}\n"
# code += f"\tjacobian[{i},{j}] = {printer.doprint(jacobian[i][j])}\n"
return code
| 31.459459
| 114
| 0.593643
| 1,026
| 6,984
| 3.993177
| 0.197856
| 0.03588
| 0.027337
| 0.026849
| 0.299976
| 0.275812
| 0.194777
| 0.194777
| 0.161582
| 0.139126
| 0
| 0.006726
| 0.276203
| 6,984
| 221
| 115
| 31.60181
| 0.803759
| 0.384307
| 0
| 0.056075
| 0
| 0.018692
| 0.115027
| 0.046011
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028037
| false
| 0
| 0.056075
| 0
| 0.11215
| 0.037383
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fee307cf09fb64ad8f6da891a9a28954c9a3eeae
| 3,026
|
py
|
Python
|
teraserver/python/opentera/db/models/TeraDeviceParticipant.py
|
introlab/opentera
|
bfc4de672c9de40b7c9a659be2138731e7ee4e94
|
[
"Apache-2.0"
] | 10
|
2020-03-16T14:46:06.000Z
|
2022-02-11T16:07:38.000Z
|
teraserver/python/opentera/db/models/TeraDeviceParticipant.py
|
introlab/opentera
|
bfc4de672c9de40b7c9a659be2138731e7ee4e94
|
[
"Apache-2.0"
] | 114
|
2019-09-16T13:02:50.000Z
|
2022-03-22T19:17:36.000Z
|
teraserver/python/opentera/db/models/TeraDeviceParticipant.py
|
introlab/opentera
|
bfc4de672c9de40b7c9a659be2138731e7ee4e94
|
[
"Apache-2.0"
] | null | null | null |
from opentera.db.Base import db, BaseModel
class TeraDeviceParticipant(db.Model, BaseModel):
__tablename__ = 't_devices_participants'
id_device_participant = db.Column(db.Integer, db.Sequence('id_device_participant_sequence'), primary_key=True,
autoincrement=True)
id_device = db.Column(db.Integer, db.ForeignKey("t_devices.id_device"), nullable=False)
id_participant = db.Column(db.Integer, db.ForeignKey("t_participants.id_participant", ondelete='cascade'),
nullable=False)
device_participant_participant = db.relationship("TeraParticipant")
device_participant_device = db.relationship("TeraDevice")
def to_json(self, ignore_fields=[], minimal=False):
ignore_fields.extend(['device_participant_participant', 'device_participant_device'])
if minimal:
ignore_fields.extend([])
rval = super().to_json(ignore_fields=ignore_fields)
return rval
@staticmethod
def create_defaults(test=False):
if test:
from opentera.db.models.TeraParticipant import TeraParticipant
from opentera.db.models.TeraDevice import TeraDevice
participant1 = TeraParticipant.get_participant_by_id(1)
participant2 = TeraParticipant.get_participant_by_id(2)
device1 = TeraDevice.get_device_by_name('Apple Watch #W05P1')
device2 = TeraDevice.get_device_by_name('Kit Télé #1')
device3 = TeraDevice.get_device_by_name('Robot A')
dev_participant = TeraDeviceParticipant()
dev_participant.device_participant_device = device1
dev_participant.device_participant_participant = participant1
db.session.add(dev_participant)
dev_participant = TeraDeviceParticipant()
dev_participant.device_participant_device = device1
dev_participant.device_participant_participant = participant2
db.session.add(dev_participant)
dev_participant = TeraDeviceParticipant()
dev_participant.device_participant_device = device2
dev_participant.device_participant_participant = participant2
db.session.add(dev_participant)
db.session.commit()
@staticmethod
def get_device_participant_by_id(device_participant_id: int):
return TeraDeviceParticipant.query.filter_by(id_device_participant=device_participant_id).first()
@staticmethod
def query_devices_for_participant(participant_id: int):
return TeraDeviceParticipant.query.filter_by(id_participant=participant_id).all()
@staticmethod
def query_participants_for_device(device_id: int):
return TeraDeviceParticipant.query.filter_by(id_device=device_id).all()
@staticmethod
def query_device_participant_for_participant_device(device_id: int, participant_id: int):
return TeraDeviceParticipant.query.filter_by(id_device=device_id, id_participant=participant_id).first()
| 44.5
| 114
| 0.718771
| 322
| 3,026
| 6.413043
| 0.232919
| 0.139952
| 0.108475
| 0.090073
| 0.485714
| 0.394189
| 0.374334
| 0.345278
| 0.345278
| 0.317191
| 0
| 0.007054
| 0.203569
| 3,026
| 67
| 115
| 45.164179
| 0.849793
| 0
| 0
| 0.294118
| 0
| 0
| 0.073695
| 0.044944
| 0
| 0
| 0
| 0
| 0
| 1
| 0.117647
| false
| 0
| 0.058824
| 0.078431
| 0.411765
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fee39b66b3b2ef9dd7dd901d2d89a2d3c684442c
| 11,043
|
py
|
Python
|
leetcode_python/Linked_list/split-linked-list-in-parts.py
|
yennanliu/Python_basics
|
6a597442d39468295946cefbfb11d08f61424dc3
|
[
"Unlicense"
] | null | null | null |
leetcode_python/Linked_list/split-linked-list-in-parts.py
|
yennanliu/Python_basics
|
6a597442d39468295946cefbfb11d08f61424dc3
|
[
"Unlicense"
] | null | null | null |
leetcode_python/Linked_list/split-linked-list-in-parts.py
|
yennanliu/Python_basics
|
6a597442d39468295946cefbfb11d08f61424dc3
|
[
"Unlicense"
] | null | null | null |
"""
725. Split Linked List in Parts
Medium
0Given the head of a singly linked list and an integer k, split the linked list into k consecutive linked list parts.
The length of each part should be as equal as possible: no two parts should have a size differing by more than one. This may lead to some parts being null.
The parts should be in the order of occurrence in the input list, and parts occurring earlier should always have a size greater than or equal to parts occurring later.
Return an array of the k parts.
Example 1:
Input: head = [1,2,3], k = 5
Output: [[1],[2],[3],[],[]]
Explanation:
The first element output[0] has output[0].val = 1, output[0].next = null.
The last element output[4] is null, but its string representation as a ListNode is [].
Example 2:
Input: head = [1,2,3,4,5,6,7,8,9,10], k = 3
Output: [[1,2,3,4],[5,6,7],[8,9,10]]
Explanation:
The input has been split into consecutive parts with size difference at most 1, and earlier parts are a larger size than the later parts.
Constraints:
The number of nodes in the list is in the range [0, 1000].
0 <= Node.val <= 1000
1 <= k <= 50
"""
# V0
# IDEA : LINKED LIST OP + mod op
class Solution(object):
def splitListToParts(self, head, k):
# NO need to deal with edge case !!!
# get linked list length
_len = 0
_head = cur = head
while _head:
_len += 1
_head = _head.next
# init res
res = [None] * k
### NOTE : we loop over k
for i in range(k):
"""
2 cases
case 1) i < (_len % k) : there is "remainder" ((_len % k)), so we need to add extra 1
-> _cnt_elem = (_len // k) + 1
case 2) i == (_len % k) : there is NO "remainder"
-> _cnt_elem = (_len // k)
"""
# NOTE THIS !!!
_cnt_elem = (_len // k) + (1 if i < (_len % k) else 0)
### NOTE : we loop over _cnt_elem (length of each "split" linkedlist)
for j in range(_cnt_elem):
"""
3 cases
1) j == 0 (begin of sub linked list)
2) j == _cnt_elem - 1 (end of sub linked list)
3) 0 < j < _cnt_elem - 1 (middle within sub linked list)
"""
# NOTE THIS !!!
# NOTE we need keep if - else in BELOW ORDER !!
# -> j == 0, j == _cnt_elem - 1, else
if j == 0:
res[i] = cur
### NOTE this !!! :
# -> IF (but not elif)
# -> since we also need to deal with j == 0 and j == _cnt_elem - 1 case
if j == _cnt_elem - 1: # note this !!!
# get next first
tmp = cur.next
# point cur.next to None
cur.next = None
# move cur to next (tmp) for op in next i (for i in range(k))
cur = tmp
else:
cur = cur.next
#print ("res = " + str(res))
return res
# V0'
class Solution(object):
def splitListToParts(self, head, k):
# NO need to deal with edge case !!!
# get len
root = cur = head
_len = 0
while root:
root = root.next
_len += 1
res = [None] * k
for i in range(k):
tmp_cnt = (_len // k) + (1 if i < (_len % k) else 0)
for j in range(tmp_cnt):
# 3 cases
# j == 0
if j == 0:
res[i] = cur
# IF !!!! j == tmp_cnt - 1 !!!
if j == tmp_cnt-1:
_next = cur.next
cur.next = None
cur = _next
# 0 < j < tmp_cnt
else:
cur = cur.next
print ("res = " + str(res))
return res
# V0'
# IDEA : LINKED LIST OP
class Solution:
def splitListToParts(self, root, k):
def get_length(root):
ans = 0
while root is not None:
root = root.next
ans += 1
return ans
ans = [None]*k
cur = root
length = get_length(root)
for i in range(k):
no_elems = (length // k) + (1 if i < (length % k) else 0)
for j in range(no_elems):
if j == 0:
ans[i] = cur
if j == no_elems - 1:
temp = cur.next
cur.next = None
cur = temp
else:
cur = cur.next
return ans
# V1
# https://leetcode.com/problems/split-linked-list-in-parts/discuss/109284/Elegant-Python-with-Explanation-45ms
class Solution(object):
def splitListToParts(self, root, k):
# Count the length of the linked list
curr, length = root, 0
while curr:
curr, length = curr.next, length + 1
# Determine the length of each chunk
chunk_size, longer_chunks = length // k, length % k
res = [chunk_size + 1] * longer_chunks + [chunk_size] * (k - longer_chunks)
# Split up the list
prev, curr = None, root
for index, num in enumerate(res):
if prev:
prev.next = None
res[index] = curr
for i in range(num):
prev, curr = curr, curr.next
return res
### Test case : dev
# V1'
# https://leetcode.com/problems/split-linked-list-in-parts/discuss/139360/Simple-pythonic-solution.-Beats-100
def get_length(root):
ans = 0
while root is not None:
root = root.next
ans += 1
return ans
class Solution:
def splitListToParts(self, root, k):
ans = [None]*k
cur = root
length = get_length(root)
for i in range(k):
no_elems = (length // k) + (1 if i < (length % k) else 0)
for j in range(no_elems):
if j == 0:
ans[i] = cur
if j == no_elems - 1:
temp = cur.next
cur.next = None
cur = temp
else:
cur = cur.next
return ans
# V1''
# https://leetcode.com/problems/split-linked-list-in-parts/discuss/237516/python-solution-beat-100
class Solution:
def splitListToParts(self, root: 'ListNode', k: 'int') -> 'List[ListNode]':
n, p, res = 0, root, []
while p:
n, p = n + 1, p.next
a, m, start = n // k, n % k, root
for _ in range(k):
if not start: res.append(None)
else:
end = start
for _ in range(a + (1 if m else 0) - 1):
end = end.next
if m > 0: m -= 1
res.append(start)
start = end.next
end.next = None
return res
# V1'''
# http://bookshadow.com/weblog/2017/11/13/leetcode-split-linked-list-in-parts/
class Solution(object):
def splitNum(self, m, n):
q, r = m / n, m % n
if r > 0: return [q + 1] * r + [q] * (n - r)
if r < 0: return [q] * (n + r) + [q - 1] * -r
return [q] * n
def listLength(self, root):
ans = 0
while root:
ans += 1
root = root.next
return ans
def splitListToParts(self, root, k):
"""
:type root: ListNode
:type k: int
:rtype: List[ListNode]
"""
ans = []
s = self.listLength(root)
for p in self.splitNum(s, k):
if not p:
ans.append(None)
continue
node = root
for x in range(int(p) - 1):
node = node.next
ans.append(root)
if root:
root = node.next
node.next = None
return ans
# V1''''
# https://blog.csdn.net/fuxuemingzhu/article/details/79543931
class Solution(object):
def splitListToParts(self, root, k):
"""
:type root: ListNode
:type k: int
:rtype: List[ListNode]
"""
nodes = []
counts = 0
each = root
while each:
counts += 1
each = each.next
num = int(counts / k)
rem = int(counts % k)
for i in range(k):
head = ListNode(0)
each = head
for j in range(num):
node = ListNode(root.val)
each.next = node
each = each.next
root = root.next
if rem and root:
rmnode = ListNode(root.val)
each.next = rmnode
if root:
root = root.next
rem -= 1
nodes.append(head.next)
return nodes
# V1'''''
# https://leetcode.com/problems/split-linked-list-in-parts/solution/
# IDEA : CREATE NEW LISTS
# time complexity : O(N+K)
# spce complexity : O(N,K)
class Solution(object):
def splitListToParts(self, root, k):
cur = root
for N in range(1001):
if not cur: break
cur = cur.next
width, remainder = divmod(N, k)
ans = []
cur = root
for i in range(k):
head = write = ListNode(None)
for j in range(width + (i < remainder)):
write.next = write = ListNode(cur.val)
if cur: cur = cur.next
ans.append(head.next)
return ans
# V1''''''
# https://leetcode.com/problems/split-linked-list-in-parts/solution/
# IDEA : SPLIT INPUT LIST
# time complexity : O(N+K)
# spce complexity : O(K)
class Solution(object):
def splitListToParts(self, root, k):
cur = root
for N in range(1001):
if not cur: break
cur = cur.next
width, remainder = divmod(N, k)
ans = []
cur = root
for i in range(k):
head = cur
for j in range(width + (i < remainder) - 1):
if cur: cur = cur.next
if cur:
cur.next, cur = None, cur.next
ans.append(head)
return ans
# V2
# Time: O(n + k)
# Space: O(1)
class Solution(object):
def splitListToParts(self, root, k):
"""
:type root: ListNode
:type k: int
:rtype: List[ListNode]
"""
n = 0
curr = root
while curr:
curr = curr.__next__
n += 1
width, remainder = divmod(n, k)
result = []
curr = root
for i in range(k):
head = curr
for j in range(width-1+int(i < remainder)):
if curr:
curr = curr.__next__
if curr:
curr.next, curr = None, curr.next
result.append(head)
return result
| 29.845946
| 167
| 0.472698
| 1,410
| 11,043
| 3.652482
| 0.162411
| 0.031262
| 0.049126
| 0.021359
| 0.456505
| 0.382718
| 0.358252
| 0.326214
| 0.30466
| 0.298058
| 0
| 0.02926
| 0.421262
| 11,043
| 370
| 168
| 29.845946
| 0.776561
| 0.261614
| 0
| 0.584821
| 0
| 0
| 0.004183
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066964
| false
| 0
| 0
| 0
| 0.183036
| 0.004464
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fee526d6327eadfd2a1c6fc5732f854eab5a5bb2
| 1,645
|
py
|
Python
|
carl/charts.py
|
zaratec/carl
|
9d655c2cb75d90ddc6b2d101073248a2fc3c252e
|
[
"MIT"
] | null | null | null |
carl/charts.py
|
zaratec/carl
|
9d655c2cb75d90ddc6b2d101073248a2fc3c252e
|
[
"MIT"
] | null | null | null |
carl/charts.py
|
zaratec/carl
|
9d655c2cb75d90ddc6b2d101073248a2fc3c252e
|
[
"MIT"
] | 1
|
2020-11-19T23:41:28.000Z
|
2020-11-19T23:41:28.000Z
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
"""
def ecdf(sorted_views):
for view, data in sorted_views.iteritems():
yvals = np.arange(len(data))/float(len(data))
plt.plot(data, yvals, label=view)
plt.grid(True)
plt.xlabel('jaccard')
plt.ylabel('CDF')
lgnd = plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show()
plt.savefig("ecdf.png", bbox_extra_artists=(lgnd, ), bbox_inches='tight')
clear()
"""
#def ecdf_polished(sorted_views):
def ecdf(sorted_views):
view_to_label = {
"priv": "root domain",
"netloc": "fqdn",
"path": "full path"}
for view, data in sorted_views.iteritems():
if view in view_to_label.keys():
yvals = np.arange(len(data))/float(len(data))
plt.plot(data, yvals, label=view_to_label[view])
matplotlib.rcParams.update({'font.size': 22})
plt.grid(True)
plt.xlabel('jaccard index')
plt.ylabel('CDF')
lgnd = plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show()
plt.savefig("ecdf.png", bbox_extra_artists=(lgnd, ), bbox_inches='tight')
clear()
def density(sorted_views):
for view, data in sorted_views.iteritems():
xvals = range(len(data))
plt.plot(xvals, data, label=view)
plt.grid(True)
plt.xlabel('site')
plt.ylabel('jaccard')
lgnd = plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show()
plt.savefig("stack.png", bbox_extra_artists=(lgnd, ), bbox_inches='tight')
clear()
def clear():
plt.clf()
plt.cla()
plt.close()
| 25.703125
| 78
| 0.6231
| 231
| 1,645
| 4.311688
| 0.307359
| 0.077309
| 0.033133
| 0.039157
| 0.682731
| 0.682731
| 0.648594
| 0.566265
| 0.566265
| 0.477912
| 0
| 0.015456
| 0.213374
| 1,645
| 63
| 79
| 26.111111
| 0.75425
| 0.019453
| 0
| 0.285714
| 0
| 0
| 0.084874
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085714
| false
| 0
| 0.085714
| 0
| 0.171429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fee67822f155f266cc796b6f601f1860ad8b8823
| 4,760
|
py
|
Python
|
examples/Kane1985/Chapter5/Ex10.10.py
|
nouiz/pydy
|
20c8ca9fc521208ae2144b5b453c14ed4a22a0ec
|
[
"BSD-3-Clause"
] | 298
|
2015-01-31T11:43:22.000Z
|
2022-03-15T02:18:21.000Z
|
examples/Kane1985/Chapter5/Ex10.10.py
|
nouiz/pydy
|
20c8ca9fc521208ae2144b5b453c14ed4a22a0ec
|
[
"BSD-3-Clause"
] | 359
|
2015-01-17T16:56:42.000Z
|
2022-02-08T05:27:08.000Z
|
examples/Kane1985/Chapter5/Ex10.10.py
|
nouiz/pydy
|
20c8ca9fc521208ae2144b5b453c14ed4a22a0ec
|
[
"BSD-3-Clause"
] | 109
|
2015-02-03T13:02:45.000Z
|
2021-12-21T12:57:21.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Exercise 10.10 from Kane 1985."""
from __future__ import division
from sympy import expand, solve, symbols, sin, cos, S
from sympy.physics.mechanics import ReferenceFrame, RigidBody, Point
from sympy.physics.mechanics import dot, dynamicsymbols, inertia, msprint
from util import generalized_active_forces, partial_velocities
from util import potential_energy
# Define generalized coordinates, speeds, and constants:
q0, q1, q2 = dynamicsymbols('q0:3')
q0d, q1d, q2d = dynamicsymbols('q0:3', level=1)
u1, u2, u3 = dynamicsymbols('u1:4')
LA, LB, LP = symbols('LA LB LP')
p1, p2, p3 = symbols('p1:4')
A1, A2, A3 = symbols('A1:4')
B1, B2, B3 = symbols('B1:4')
C1, C2, C3 = symbols('C1:4')
D11, D22, D33, D12, D23, D31 = symbols('D11 D22 D33 D12 D23 D31')
g, mA, mB, mC, mD, t = symbols('g mA mB mC mD t')
TA_star, TB_star, TC_star, TD_star = symbols('TA* TB* TC* TD*')
## --- reference frames ---
E = ReferenceFrame('E')
A = E.orientnew('A', 'Axis', [q0, E.x])
B = A.orientnew('B', 'Axis', [q1, A.y])
C = B.orientnew('C', 'Axis', [0, B.x])
D = C.orientnew('D', 'Axis', [0, C.x])
## --- points and their velocities ---
pO = Point('O')
pA_star = pO.locatenew('A*', LA * A.z)
pP = pO.locatenew('P', LP * A.z)
pB_star = pP.locatenew('B*', LB * B.z)
pC_star = pB_star.locatenew('C*', q2 * B.z)
pD_star = pC_star.locatenew('D*', p1 * B.x + p2 * B.y + p3 * B.z)
pO.set_vel(E, 0) # Point O is fixed in Reference Frame E
pA_star.v2pt_theory(pO, E, A) # Point A* is fixed in Reference Frame A
pP.v2pt_theory(pO, E, A) # Point P is fixed in Reference Frame A
pB_star.v2pt_theory(pP, E, B) # Point B* is fixed in Reference Frame B
# Point C* is moving in Reference Frame B
pC_star.set_vel(B, pC_star.pos_from(pB_star).diff(t, B))
pC_star.v1pt_theory(pB_star, E, B)
pD_star.set_vel(B, pC_star.vel(B)) # Point D* is fixed rel to Point C* in B
pD_star.v1pt_theory(pB_star, E, B) # Point D* is moving in Reference Frame B
# --- define central inertias and rigid bodies ---
IA = inertia(A, A1, A2, A3)
IB = inertia(B, B1, B2, B3)
IC = inertia(B, C1, C2, C3)
ID = inertia(B, D11, D22, D33, D12, D23, D31)
# inertia[0] is defined to be the central inertia for each rigid body
rbA = RigidBody('rbA', pA_star, A, mA, (IA, pA_star))
rbB = RigidBody('rbB', pB_star, B, mB, (IB, pB_star))
rbC = RigidBody('rbC', pC_star, C, mC, (IC, pC_star))
rbD = RigidBody('rbD', pD_star, D, mD, (ID, pD_star))
bodies = [rbA, rbB, rbC, rbD]
## --- generalized speeds ---
kde = [u1 - dot(A.ang_vel_in(E), A.x),
u2 - dot(B.ang_vel_in(A), B.y),
u3 - dot(pC_star.vel(B), B.z)]
kde_map = solve(kde, [q0d, q1d, q2d])
for k, v in kde_map.items():
kde_map[k.diff(t)] = v.diff(t)
# kinetic energy of robot arm E
K = sum(rb.kinetic_energy(E) for rb in bodies).subs(kde_map)
print('K = {0}'.format(msprint(K)))
# find potential energy contribution of the set of gravitational forces
forces = [(pA_star, -mA*g*E.x), (pB_star, -mB*g*E.x),
(pC_star, -mC*g*E.x), (pD_star, -mD*g*E.x)]
## --- define partial velocities ---
partials = partial_velocities([f[0] for f in forces],
[u1, u2, u3], E, kde_map)
## -- calculate generalized active forces ---
Fr, _ = generalized_active_forces(partials, forces)
V = potential_energy(Fr, [q0, q1, q2], [u1, u2, u3], kde_map)
#print('V = {0}'.format(msprint(V)))
print('\nSetting C = g*mD*p1, α1, α2, α3 = 0')
V = V.subs(dict(zip(symbols('C α1 α2 α3'), [g*mD*p1, 0, 0, 0] )))
print('V = {0}'.format(msprint(V)))
Z1 = u1 * cos(q1)
Z2 = u1 * sin(q1)
Z3 = -Z2 * u2
Z4 = Z1 * u2
Z5 = -LA * u1
Z6 = -(LP + LB*cos(q1))
Z7 = u2 * LB
Z8 = Z6 * u1
Z9 = LB + q2
Z10 = Z6 - q2*cos(q1)
Z11 = u2 * Z9
Z12 = Z10 * u1
Z13 = -sin(q1) * p2
Z14 = Z9 + p3
Z15 = Z10 + sin(q1)*p1 - cos(q1)*p3
Z16 = cos(q1) * p2
Z17 = Z13*u1 + Z14*u2
Z18 = Z15 * u1
Z19 = Z16*u1 - u2*p1 + u3
Z20 = u1 * Z5
Z21 = LB * sin(q1) * u2
Z22 = -Z2 * Z8
Z23 = Z21*u1 + Z2*Z7
Z24 = Z1*Z8 - u2*Z7
Z25 = Z21 - u3*cos(q1) + q2*sin(q1)*u2
Z26 = 2*u2*u3 - Z2*Z12
Z27 = Z25*u1 + Z2*Z11 - Z1*u3
Z28 = Z1*Z12 - u2*Z11
Z29 = -Z16 * u2
Z30 = Z25 + u2*(cos(q1)*p1 + sin(q1)*p3)
Z31 = Z13 * u2
Z32 = Z29*u1 + u2*(u3 + Z19) - Z2*Z18
Z33 = Z30*u1 + Z2*Z17 - Z1*Z19
Z34 = Z31*u1 + Z1*Z18 - u2*Z17
K_expected = S(1)/2*(A1*u1**2 + (B1 + C1)*Z1**2 + (B2 + C2)*u2**2 +
(B3 + C3)*Z2**2 + Z1*(D11*Z1 + D12*u2 + D31*Z2) +
u2*(D12*Z1 + D22*u2 + D23*Z2) +
Z2*(D31*Z1 + D23*u2 + D33*Z2) + mA*Z5**2 +
mB*(Z7**2 + Z8**2) + mC*(Z11**2 + Z12**2 + u3**2) +
mD*(Z17**2 + Z18**2 + Z19**2))
V_expected = g*((mB*LB + mC*Z9 + mD*Z14)*sin(q1) + mD*p1*cos(q1))
assert expand(K - K_expected) == 0
assert expand(V - V_expected) == 0
| 33.521127
| 76
| 0.602731
| 895
| 4,760
| 3.126257
| 0.234637
| 0.021444
| 0.03431
| 0.025733
| 0.15654
| 0.117941
| 0.015726
| 0
| 0
| 0
| 0
| 0.106832
| 0.203571
| 4,760
| 141
| 77
| 33.758865
| 0.631232
| 0.171849
| 0
| 0
| 0
| 0
| 0.049335
| 0
| 0
| 0
| 0
| 0
| 0.019048
| 1
| 0
| false
| 0
| 0.057143
| 0
| 0.057143
| 0.038095
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fee67e3507fde627d604b24556de9fa5e1ddebf0
| 1,179
|
py
|
Python
|
src/test/test_pairwiseView.py
|
SensorDX/rainqc
|
d957705e0f1e2e05b3bf23c5b6fd77a135ac69cd
|
[
"Apache-2.0"
] | 1
|
2022-02-16T01:24:17.000Z
|
2022-02-16T01:24:17.000Z
|
src/test/test_pairwiseView.py
|
SensorDX/rainqc
|
d957705e0f1e2e05b3bf23c5b6fd77a135ac69cd
|
[
"Apache-2.0"
] | null | null | null |
src/test/test_pairwiseView.py
|
SensorDX/rainqc
|
d957705e0f1e2e05b3bf23c5b6fd77a135ac69cd
|
[
"Apache-2.0"
] | null | null | null |
from unittest import TestCase
from src.view import PairwiseView
import numpy as np
class TestPairwiseView(TestCase):
def setUp(self):
self.num_stations = 4
self.n = 200
self.stations = np.random.randn(self.n, self.num_stations)
self.pv = PairwiseView(variable='pr')
def test_make_view(self):
## Slicing in this way preserves the dimension of the array
#ref: https://stackoverflow.com/questions/3551242/numpy-index-slice-without-losing-dimension-information
X = self.pv.make_view(self.stations[:, 0:1:], [self.stations[:, 1:2:]]).x
self.assertEqual(X.shape[0], self.n)
y = self.pv.make_view(self.stations[:,0:1:],[self.stations[:, 1:2:],
self.stations[:, 2:3:],self.stations[:, 3:4:]]).x
self.assertEqual(y.shape, (self.n, self.num_stations-1))
self.assertIsNone(self.pv.label)
## Test multiple pairwise _views
for i in range(self.num_stations-1):
vw = self.pv.make_view(self.stations[:, 0:1:],
[self.stations[:, (i+1):(i+2):]]).x
self.assertEqual(vw.shape[0], self.n )
| 35.727273
| 112
| 0.603053
| 162
| 1,179
| 4.32716
| 0.395062
| 0.154066
| 0.085592
| 0.059914
| 0.233952
| 0.17689
| 0.17689
| 0.17689
| 0.17689
| 0.17689
| 0
| 0.03491
| 0.246819
| 1,179
| 32
| 113
| 36.84375
| 0.754505
| 0.160305
| 0
| 0
| 0
| 0
| 0.002041
| 0
| 0
| 0
| 0
| 0
| 0.2
| 1
| 0.1
| false
| 0
| 0.15
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
feea04b5b8f70213610fd5b8726978dd6e62c7f1
| 1,013
|
py
|
Python
|
bmi.py
|
blorincz1/bmi-tool
|
b49e66bac422ab1fe411642937bd0679862b7042
|
[
"MIT"
] | null | null | null |
bmi.py
|
blorincz1/bmi-tool
|
b49e66bac422ab1fe411642937bd0679862b7042
|
[
"MIT"
] | null | null | null |
bmi.py
|
blorincz1/bmi-tool
|
b49e66bac422ab1fe411642937bd0679862b7042
|
[
"MIT"
] | null | null | null |
# prompt user to enter how much they weigh in pounds
weight = int(input ("How much do you weigh (in pounds)? "))
# prompt user to enter their height in inches
height = int(input ("What is your height (in inches)? "))
# this converts weight to kilograms
weight_in_kg = weight / 2.2
# this converts height to centimeters
height_in_meter = height * 2.54 / 100
# this calculates BMI
bmi = round(weight_in_kg / (height_in_meter ** 2), 1)
if bmi <= 18.5:
print("Oh no, your BMI is", bmi, "which means you are underwewight. Eat some food!")
elif bmi > 18.5 and bmi < 25:
print('Congratulations! Your BMI is', bmi, 'which means you are in the normal range. Keep up the good work!')
elif bmi > 25 and bmi < 30:
print('Uh oh, your BMI is', bmi, 'which means you are overweight. Make healthy choices and exercise!')
elif bmi > 30:
print('Oh boy, your BMI is', bmi, 'which means you are obese. GO SEE YOUR DOCTOR~')
else:
print('Uh oh, something went wrong.')
| 31.65625
| 115
| 0.664363
| 167
| 1,013
| 3.982036
| 0.443114
| 0.04812
| 0.054135
| 0.07218
| 0.168421
| 0.168421
| 0.168421
| 0.168421
| 0
| 0
| 0
| 0.030848
| 0.231984
| 1,013
| 31
| 116
| 32.677419
| 0.823907
| 0.181639
| 0
| 0
| 0
| 0
| 0.508861
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.333333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
feee07121fe76d5736e52eb5411adc869715e8db
| 7,031
|
py
|
Python
|
day92021.py
|
GeirOwe/adventOfCode
|
fee1420cb8ecce8b7aaf9d48472364be191ca2a2
|
[
"MIT"
] | 1
|
2021-12-20T11:10:59.000Z
|
2021-12-20T11:10:59.000Z
|
day92021.py
|
GeirOwe/adventOfCode
|
fee1420cb8ecce8b7aaf9d48472364be191ca2a2
|
[
"MIT"
] | null | null | null |
day92021.py
|
GeirOwe/adventOfCode
|
fee1420cb8ecce8b7aaf9d48472364be191ca2a2
|
[
"MIT"
] | 1
|
2021-12-02T14:40:12.000Z
|
2021-12-02T14:40:12.000Z
|
# Day9 - 2021 Advent of code
# source: https://adventofcode.com/2021/day/9
import os
import numpy as np
def clear_console():
os.system('clear')
print('< .... AoC 2021 Day 9, part 1 .... >')
print()
return
def find_low_points(the_map, numOfRows, numOfCols):
low_points_list = []
row = 0
lastRow = numOfRows -1 #since we start at zero
while row < numOfRows:
col = 0
while col < numOfCols:
if row == 0:
#process first row
if col == 0:
#process first col
if (the_map[row, col+1] > the_map[row, col] and #signal to the right
the_map[row+1, col] > the_map[row, col]): #signal below
low_points_list.append(the_map[row, col])
elif col == (numOfCols -1):
#process last col
if (the_map[row, col-1] > the_map[row, col] and #signal to the left
the_map[row+1, col] > the_map[row, col]): #signal below
low_points_list.append(the_map[row, col])
else:
#process the other cols
if (the_map[row, col-1] > the_map[row, col] and #signal to the left
the_map[row, col+1] > the_map[row, col] and #signal to the right
the_map[row+1, col] > the_map[row, col]): #signal below
low_points_list.append(the_map[row, col])
elif row == lastRow:
#process last row
if col == 0:
#process first col
if (the_map[row, col+1] > the_map[row, col] and #signal to the right
the_map[row-1, col] > the_map[row, col]): # signal above
low_points_list.append(the_map[row, col])
elif col == (numOfCols -1):
#process last col
if (the_map[row, col-1] > the_map[row, col] and #signal to the left
the_map[row-1, col] > the_map[row, col]): # signal above
low_points_list.append(the_map[row, col])
else:
#process the other cols
if (the_map[row, col-1] > the_map[row, col] and #signal to the left
the_map[row, col+1] > the_map[row, col] and #signal to the right
the_map[row-1, col] > the_map[row, col]): # signal above
low_points_list.append(the_map[row, col])
else:
#process the other rows
if col == 0:
#process first col
if (the_map[row, col+1] > the_map[row, col] and #signal to the right
the_map[row-1, col] > the_map[row, col] and # signal above
the_map[row+1, col] > the_map[row, col]): #signal below
low_points_list.append(the_map[row, col])
elif col == (numOfCols -1):
#process last col
if (the_map[row, col-1] > the_map[row, col] and #signal to the left
the_map[row-1, col] > the_map[row, col] and # signal above
the_map[row+1, col] > the_map[row, col]): #signal below
low_points_list.append(the_map[row, col])
else:
#process the other cols
if (the_map[row, col-1] > the_map[row, col] and #signal to the left
the_map[row, col+1] > the_map[row, col] and #signal to the right
the_map[row-1, col] > the_map[row, col] and # signal above
the_map[row+1, col] > the_map[row, col]): #signal below
low_points_list.append(the_map[row, col])
col += 1
row += 1
return low_points_list
def summarize_risk(low_points_list):
sumRiskLowPoints = 0
for element in low_points_list:
# The risk level of a low point is 1 plus its height.
sumRiskLowPoints = sumRiskLowPoints + element + 1
return sumRiskLowPoints
def process_the_data(the_map, numOfRows, numOfCols):
sumRiskLowPoints = 0
# Your first goal is to find the low points - the locations that are lower than any of its adjacent locations.
low_points_list = find_low_points(the_map, numOfRows, numOfCols)
print('\nthe low points -> ', low_points_list,'\n')
# What is the sum of the risk levels of all low points on your heightmap
sumRiskLowPoints = summarize_risk(low_points_list)
return sumRiskLowPoints
def build_map(theData):
numOfRows = len(theData)
map_list = []
for row in theData:
numOfCols = len(row)
i = 0 #the positio in the row
while i < numOfCols:
#add numbers on the borad to along list - it will be reshaped into a 5x5 board in numpy array
map_list.append(int(row[i]))
i += 1
#move them into numpy arrays - to make it easier to process
#array (x, y, z) -> board, rows, columns -> : means all elements
the_map = np.array(map_list, dtype = "int").reshape(numOfRows, numOfCols)
return the_map, numOfRows, numOfCols
def get_the_data():
#read the test puzzle input
#theData = open('day92021_test_puzzle_input.txt', 'r')
#read the puzzle input
theData = open('day92021_puzzle_input.txt', 'r')
#move data into a list - read a line and remove lineshift
data_list = []
for element in theData:
elementTrimmed = element.strip()
data_list.append(elementTrimmed)
return data_list
def start_the_engine():
#get the data and read them into a list
theData = get_the_data()
the_map, numOfRows, numOfCols = build_map(theData)
#process the data and return the answer
valueX = process_the_data(the_map, numOfRows, numOfCols)
# Next, you need to find the largest basins. The size of a basin is the number
# of locations within the basin, including the low point.
# Find the three largest basins and multiply their sizes together.
#find adjacent cells:
#def adj_finder(matrix, position):
#adj = []
#for dx in range(-1, 2):
# for dy in range(-1, 2):
# rangeX = range(0, matrix.shape[0]) # X bounds
# rangeY = range(0, matrix.shape[1]) # Y bounds
#
# (newX, newY) = (position[0]+dx, position[1]+dy) # adjacent cell
#
# if (newX in rangeX) and (newY in rangeY) and (dx, dy) != (0, 0):
# adj.append((newX, newY))
#
#return adj
print('\nthe sum of the risk levels of all low points -> ', valueX,'\n')
return
#let's start
if __name__ == '__main__':
clear_console()
start_the_engine()
| 43.94375
| 114
| 0.54345
| 938
| 7,031
| 3.926439
| 0.185501
| 0.104263
| 0.139289
| 0.14662
| 0.50801
| 0.4776
| 0.469183
| 0.428455
| 0.428455
| 0.411078
| 0
| 0.018275
| 0.361826
| 7,031
| 160
| 115
| 43.94375
| 0.802541
| 0.286872
| 0
| 0.49
| 0
| 0
| 0.030763
| 0.00506
| 0
| 0
| 0
| 0
| 0
| 1
| 0.07
| false
| 0
| 0.02
| 0
| 0.16
| 0.04
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
feee0df189f0b37958204462a48904755aa19b63
| 7,420
|
py
|
Python
|
cogs/Console.py
|
KhangOP/PaladinsAssistantBot
|
9b705dc688610ba52909f0b0e152d8684006c6a6
|
[
"MIT"
] | null | null | null |
cogs/Console.py
|
KhangOP/PaladinsAssistantBot
|
9b705dc688610ba52909f0b0e152d8684006c6a6
|
[
"MIT"
] | null | null | null |
cogs/Console.py
|
KhangOP/PaladinsAssistantBot
|
9b705dc688610ba52909f0b0e152d8684006c6a6
|
[
"MIT"
] | null | null | null |
import discord
from discord.ext import commands
from datetime import date, datetime
# Class handles commands related to console players
class ConsoleCommands(commands.Cog, name="Console Commands"):
"""Console Commands"""
def __init__(self, bot):
self.bot = bot
# Returns a list of embeds of console players so they can store their Paladins id's in the bot
@commands.command(name='console', pass_context=True, ignore_extra=False, aliases=["Console"])
@commands.cooldown(3, 30, commands.BucketType.user)
async def console(self, ctx, player_name, platform: str):
async with ctx.channel.typing():
platform = platform.lower()
if platform == "xbox":
platform = "10"
elif platform == "ps4":
platform = "9"
elif platform == "switch":
platform = "22"
else:
await ctx.send("```Invalid platform name. Valid platform names are:\n1. Xbox\n2. PS4\n3. Switch```")
return None
# players = paladinsAPI.getPlayerId(player_name, "steam")
# players = paladinsAPI.getPlayerId(player_name, platform)
players = self.bot.paladinsAPI.searchPlayers(player_name)
if not players:
await ctx.send("Found `0` players with the name `{}`.".format(player_name))
return None
# Hi-Rez endpoint down.
if players is None:
await ctx.send("A Hi-Rez endpoint is down meaning this command won't work. "
"Please don't try again for a while and give Hi-Rez a few hours to get the "
"endpoint online again.")
return None
players = [player for player in players if player.playerName.lower() == player_name.lower() and
player['portal_id'] == platform]
num_players = len(players)
if num_players > 20: # Too many players...we must match case exactly
await ctx.send("Found `{}` players with the name `{}`. Switching to case sensitive mode..."
.format(num_players, player_name))
players = [player for player in players if player.playerName == player_name and
player['portal_id'] == platform]
num_players = len(players)
await ctx.send("Found `{}` players with the name `{}`."
.format(num_players, player_name))
if num_players > 20:
await ctx.send("```There are too many players with the name {}:\n\nPlease look on PaladinsGuru to "
"find the Player ID```https://paladins.guru/search?term={}&type=Player"
.format(player_name, player_name))
return None
ss = ""
recent_player = []
for player in players:
ss += str(player) + "\n"
player = self.bot.paladinsAPI.getPlayer(player=player.playerId)
current_date = date.today()
current_time = datetime.min.time()
today = datetime.combine(current_date, current_time)
last_seen = player.lastLoginDatetime
last_seen = (today - last_seen).days
# only add players seen in the last 90 days
if last_seen <= 90:
recent_player.append(player)
await ctx.send("Found `{}` recent player(s) `(seen in the last 90 days)`".format(len(recent_player)))
for player in recent_player:
current_date = date.today()
current_time = datetime.min.time()
today = datetime.combine(current_date, current_time)
last_seen = player.lastLoginDatetime
last_seen = (today - last_seen).days
if last_seen <= 0:
last_seen = "Today"
else:
last_seen = "{} days ago".format(last_seen)
embed = discord.Embed(
title=player.playerName,
description="↓↓↓ Player ID ↓↓↓```fix\n{}```".format(player.playerId),
colour=discord.colour.Color.dark_teal(),
)
embed.add_field(name='Last Seen:', value=last_seen, inline=True)
embed.add_field(name='Account Level:', value=player.accountLevel, inline=True)
embed.add_field(name='Hours Played:', value=player.hoursPlayed, inline=True)
embed.add_field(name='Account Created:', value=player.createdDatetime, inline=True)
await ctx.send(embed=embed)
# Returns an embed of how to format a console name
@commands.command(name='console_name')
async def usage(self, ctx):
embed = discord.Embed(
title="How to format your console name in PaladinsAssistant.",
colour=discord.Color.dark_teal(),
description="\u200b"
)
embed.add_field(name="To use a console name you must provide your name and platform surrounded in quotes.",
value="So for example a console player with the name `zombie killer` who plays on the "
"`Switch` would type their name as follows in the stats command.\n\n"
"`>>stats \"Zombie Killer Switch\"`\n\u200b", inline=False)
embed.add_field(
name="Now if you want to make your life easier I would recommend storing/linking your name to the "
"PaladinsAssistant.",
value="You can do this by using the `>>console` command to look up your Paladins `player_id` and then"
"using the `>>store` command by doing `>>store your_player_id`. Then in commands you can just use "
"the word `me` in place of your console name and platform.\n\u200b", inline=False)
embed.add_field(name="Below are the 3 steps (`with a picture`) of what you need to do if you are directed"
" to use Guru's site to find a console `player_id from the console command.`",
value="```md\n"
"1. Use the link generated from the command or go to https://paladins.guru/ and type "
"in the console player's name and then search.\n"
"2. Locate the account that you want and click on the name.\n"
"3. Then copy the number right before the player name.\n"
"4. Congrats you now have the console's players magical number.\n```", inline=False)
embed.set_thumbnail(
url="https://raw.githubusercontent.com/EthanHicks1/PaladinsAssistantBot/master/assets/Androxus.png")
embed.set_image(
url="https://raw.githubusercontent.com/EthanHicks1/PaladinsAssistantBot/master/assets/Console.png")
embed.set_footer(text="If you still have questions feel free to message me @ FeistyJalapeno#9045. "
"I am a very busy but will try to respond when I can.")
await ctx.send(embed=embed)
# Add this class to the cog list
def setup(bot):
bot.add_cog(ConsoleCommands(bot))
| 51.172414
| 119
| 0.568329
| 876
| 7,420
| 4.744292
| 0.302511
| 0.025024
| 0.025987
| 0.028633
| 0.281521
| 0.215351
| 0.199711
| 0.183349
| 0.150144
| 0.069297
| 0
| 0.009154
| 0.337466
| 7,420
| 144
| 120
| 51.527778
| 0.835028
| 0.062399
| 0
| 0.25
| 0
| 0.017857
| 0.322195
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.017857
| false
| 0.008929
| 0.026786
| 0
| 0.089286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
feeebbc5a748ddb1157bf558ba36f40a432ef1a6
| 666
|
py
|
Python
|
documentation/demonstrations/abfFromWks.py
|
swharden/PyOriginTools
|
536fb8e11234ffdc27e26b1800e0358179ca7d26
|
[
"MIT"
] | 11
|
2018-04-22T20:34:53.000Z
|
2022-03-12T12:02:47.000Z
|
documentation/demonstrations/abfFromWks.py
|
swharden/PyOriginTools
|
536fb8e11234ffdc27e26b1800e0358179ca7d26
|
[
"MIT"
] | 3
|
2018-01-11T14:54:46.000Z
|
2018-04-26T13:45:18.000Z
|
documentation/demonstrations/abfFromWks.py
|
swharden/PyOriginTools
|
536fb8e11234ffdc27e26b1800e0358179ca7d26
|
[
"MIT"
] | 3
|
2019-05-14T13:36:14.000Z
|
2020-09-02T16:13:57.000Z
|
R"""
try to get the worksheet name from a worksheet
run -pyf C:\Users\swharden\Documents\GitHub\PyOriginTools\documentation\demonstrations\abfFromWks.py
"""
import sys
if False:
# this code block will NEVER actually run
sys.path.append('../') # helps my IDE autocomplete
sys.path.append('../../') # helps my IDE autocomplete
sys.path.append('../../../') # helps my IDE autocomplete
import PyOriginTools as OR
import PyOrigin
if __name__=="__main__":
bookName,sheetName=OR.activeBookAndSheet()
worksheetPage=PyOrigin.WorksheetPages(bookName)
print(worksheetPage[0])
# for item in worksheetPage:
# print(item)
print("DONE")
| 30.272727
| 100
| 0.711712
| 82
| 666
| 5.682927
| 0.634146
| 0.045064
| 0.083691
| 0.11588
| 0.225322
| 0.225322
| 0.225322
| 0.225322
| 0.225322
| 0.225322
| 0
| 0.001789
| 0.160661
| 666
| 22
| 101
| 30.272727
| 0.831843
| 0.472973
| 0
| 0
| 0
| 0
| 0.088496
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.230769
| 0
| 0.230769
| 0.153846
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fef10be702d297731f0eada02c3e9a2ec0107a0f
| 5,932
|
py
|
Python
|
traj_er/t2vec_experience/classify_exp/tested_feature_extractor.py
|
lzzppp/DERT
|
e1f9ee2489f76e2ed741d6637fd2b1e8bb225fb6
|
[
"MIT"
] | 7
|
2020-08-21T02:19:15.000Z
|
2021-12-30T02:02:40.000Z
|
traj_er/t2vec_experience/classify_exp/tested_feature_extractor.py
|
lzzppp/DERT
|
e1f9ee2489f76e2ed741d6637fd2b1e8bb225fb6
|
[
"MIT"
] | 1
|
2021-04-21T13:50:53.000Z
|
2021-04-25T02:34:48.000Z
|
traj_er/t2vec_experience/classify_exp/tested_feature_extractor.py
|
lzzppp/DERT
|
e1f9ee2489f76e2ed741d6637fd2b1e8bb225fb6
|
[
"MIT"
] | 1
|
2020-12-02T07:15:13.000Z
|
2020-12-02T07:15:13.000Z
|
import numpy as np
import h5py
from datetime import datetime
from geopy.distance import distance
import argparse
import pickle
import json
import os
class TestedFeatureExtractor:
driving_time_norm = 1
def __init__(self, selected_feature, norm_param):
self.selected_feature = selected_feature
self._set_norm_param(norm_param)
def _set_norm_param(self, norm_param):
self.norm_driving_time = norm_param["driving_time"]
self.norm_driving_distance = norm_param["driving_distance"]
self.norm_speed = norm_param["speed"]
# def _set_param_dims(self):
# dim = 0
# self.norm_feature_dim = {}
# if 'time_of_day' in self.selected_feature:
# dim += 12
# if 'day_week' in self.selected_feature:
# dim += 7
# for feature in ['trip_time']:
def extract_from_h5(self, h5_path, save_path, number='all'):
f = h5py.File(h5_path, 'r')
traj_nums = f.attrs['traj_nums']
func = self.spatial_temporal_features_func(f)
if number == 'all':
out = np.array(list(map(func, range(traj_nums))))
elif isinstance(number, int):
out = np.array(list(map(func, range(number))))
else:
raise Exception("number of needed trajectories should be set properly")
f.close()
np.save(save_path, out)
def spatial_temporal_features_func(self, f):
def norma_traj(point):
"""If traj point in normal range. (lon, lat)"""
return point[0] >= -180 and point[0] <= 180 and point[1] >= -90 and point[1] <= 90
def func(tid):
trajs = np.array(f['trips/%d' % tid])
times = np.array(f['timestamps/%d' % tid])
trajs = np.array(list(filter(norma_traj, trajs)))
# time of day, day of week
out_feature = []
if 'time_of_day' in self.selected_feature or 'day_of_week' in self.selected_feature:
day_hour, date_week = self.unix_to_weekday_and_hour(times[0])
if 'time_of_day' in self.selected_feature:
out_feature += self.one_hot(day_hour, 12)
if 'day_of_week' in self.selected_feature:
out_feature += self.one_hot(date_week, 7)
if 'trip_time' in self.selected_feature:
out_feature.append(self._normalize(times[-1] - times[0], self.norm_driving_time))
if 'avg_speed' in self.selected_feature or 'max_speed' in self.selected_feature or 'drving_time' in self.selected_feature:
out_feature += self.driving_feature(trajs, times, len(trajs) == 0)
return out_feature
return func
def driving_feature(self, trips, times, abnormal=False):
distances = [coord_distance(coords) for coords in zip(trips[1:], trips[:-1])]
seg_times = times[1:] - times[:-1]
speeds = [distances[i] / seg_times[i] if seg_times[i] != 0.0 else 0.0 for i in range(len(distances))]
out_feature = []
if 'driving_distance' in self.selected_feature:
if not abnormal:
out_feature.append(self._normalize(sum(distances), self.norm_driving_distance))
else:
out_feature.append(0.0)
if 'avg_speed' in self.selected_feature:
if not abnormal:
out_feature.append(self._normalize(np.mean(speeds), self.norm_speed))
else:
out_feature.append(0.0)
if 'max_speed' in self.selected_feature:
if not abnormal:
out_feature.append(self._normalize(np.max(speeds), self.norm_speed))
else:
out_feature.append(0.0)
return out_feature
def unix_to_weekday_and_hour(self, unix_time):
"""Get hour and day of the week
For hour of day, it will be divided to 12 parts
Return:
[day_part, day_of_week]
"""
date = datetime.fromtimestamp(unix_time)
return [date.hour // 2, date.weekday()]
def one_hot(self, id, len):
return [0 if i != id else 1 for i in range(len)]
def _normalize(self, value, max_value):
# In this case, all value will be above 0, and I want to normalize them to -1,1
# normalize all column at once will be more efficient
if value > max_value:
return 1.0
else:
return value / max_value * 2 - 1.0
def coord_distance(coords):
"""return distance between two points
geopy.distance.distance accept [lat, lon] input, while this dataset is [lon, lat]
"""
return distance((coords[0][1], coords[0][0]), (coords[1][1], coords[1][0])).meters
def get_saved_path(city_name, train_or_test):
data_root = '/data3/zhuzheng/trajecotry/feature'
return os.path.join(data_root, city_name + '_' + train_or_test)
parser = argparse.ArgumentParser(description="extral trajectory's temporal related feature")
parser.add_argument("-region_name", type=str, default="region_porto_top100", help="")
args = parser.parse_args()
if __name__ == "__main__":
selected_feature = ['time_of_day', 'day_of_week', 'avg_speed', 'max_speed', 'trip_distance', 'trip_time']
with open('../hyper-parameters.json', 'r') as f:
hyper_param = json.loads(f.read())
with open('normalize_param.json', 'r') as f:
norm_param = json.loads(f.read())
feature_extractor = TestedFeatureExtractor(selected_feature, norm_param[args.region_name])
train_h5_path = hyper_param[args.region_name]['filepath']
test_h5_path = hyper_param[args.region_name]['testpath']
feature_extractor.extract_from_h5(train_h5_path, get_saved_path(hyper_param[args.region_name]['cityname'], 'train'))
feature_extractor.extract_from_h5(test_h5_path, get_saved_path(hyper_param[args.region_name]['cityname'], 'test'))
| 39.546667
| 134
| 0.630142
| 810
| 5,932
| 4.364198
| 0.22716
| 0.076379
| 0.080622
| 0.077228
| 0.334371
| 0.254314
| 0.233946
| 0.153324
| 0.132108
| 0.108911
| 0
| 0.017265
| 0.257923
| 5,932
| 150
| 135
| 39.546667
| 0.785779
| 0.110418
| 0
| 0.151515
| 0
| 0
| 0.097153
| 0.011158
| 0
| 0
| 0
| 0
| 0
| 1
| 0.121212
| false
| 0
| 0.080808
| 0.010101
| 0.323232
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fef114610ec0d475191a1220ffe83885004935bc
| 2,545
|
py
|
Python
|
psystem/plot.py
|
ranocha/Dispersive-wave-error-growth-notebooks
|
cffe67961db325291a02258118d3c7261fcce788
|
[
"MIT"
] | null | null | null |
psystem/plot.py
|
ranocha/Dispersive-wave-error-growth-notebooks
|
cffe67961db325291a02258118d3c7261fcce788
|
[
"MIT"
] | null | null | null |
psystem/plot.py
|
ranocha/Dispersive-wave-error-growth-notebooks
|
cffe67961db325291a02258118d3c7261fcce788
|
[
"MIT"
] | null | null | null |
from clawpack.petclaw.solution import Solution
import matplotlib
matplotlib.use('Agg')
import matplotlib.pylab as pl
from matplotlib import rc
import numpy as np
import os
def plot_q(frame,
file_prefix='claw',
path='./_output/',
xShift=0.0,
xlimits=None,
ylimits=None,
name=None,
plot_strain=True,
plot_path='',
ylabel='$\sigma$',
plot_title=True,
follow=False,
follow_window=50,
X=None, Eps=None, Vel=None, Sigma=None):
import sys
sys.path.append('.')
import psystem
sol=Solution(frame,file_format='petsc',read_aux=False,path=path,file_prefix=file_prefix)
x=sol.state.grid.x.centers
eps=sol.state.q[0,:]
# get stress
sigma = psystem.stress(sol.state)
# number the frame to name the file
if frame < 10:
str_frame = "000"+str(frame)
elif frame < 100:
str_frame = "00"+str(frame)
elif frame < 1000:
str_frame = "0"+str(frame)
else:
str_frame = str(frame)
# create the figure and plot the solution
pl.figure(figsize=(15,5))
if plot_strain:
pl.plot(x+xShift,eps,'-r',lw=1)
pl.plot(x+xShift,sigma,'-k',lw=3)
# format the plot
if plot_title:
pl.title("t= "+str(sol.state.t),fontsize=25)
#
if ylabel is not None:
pl.ylabel(ylabel,fontsize=30)
#
pl.xticks(size=25); pl.yticks(size=25)
if follow is True:
amax = sigma.argmax()
xmax = x[amax]
xlimits = [0,0]
xlimits[0] = xmax-follow_window
xlimits[1] = xmax+follow_window
if xlimits is not None:
xlim=[xlimits[0], xlimits[1]]
else:
xlim=[np.min(x),np.max(x)]
if ylimits is not None:
ylim=[ylimits[0], ylimits[1]]
else:
ylim=[np.min(sigma),np.max(sigma)]
pl.tight_layout()
pl.axis([xlim[0]+xShift,xlim[1]+xShift,ylim[0],ylim[1]])
pl.gca().ticklabel_format(useOffset=False)
if name is None:
if follow:
pl.savefig('./_plots_follow'+plot_path+'/sigma_'+str_frame+'.png',bbox_inches="tight")
else:
pl.savefig('./_plots'+plot_path+'/sigma_'+str_frame+'.png',bbox_inches="tight")
else:
pl.savefig(name+'.png',bbox_inches="tight")
#
pl.close()
# save the data
if X is not None:
X.append(x)
Eps.append(eps)
Vel.append(sol.state.q[1,:])
Sigma.append(sigma)
#
return x,eps,sigma
#
| 27.074468
| 98
| 0.574853
| 356
| 2,545
| 4.016854
| 0.30618
| 0.055944
| 0.025175
| 0.037762
| 0.072727
| 0.072727
| 0.072727
| 0.072727
| 0.072727
| 0.072727
| 0
| 0.025219
| 0.283301
| 2,545
| 93
| 99
| 27.365591
| 0.758772
| 0.044794
| 0
| 0.065789
| 0
| 0
| 0.044628
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.013158
| false
| 0
| 0.105263
| 0
| 0.131579
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fef15a29a302098c87559c64e7c95311ad1af7bc
| 2,285
|
py
|
Python
|
deepl/layers/utils.py
|
akamnev/deepl
|
392c757e21dec7bdd72cb0f71298389ef0d13968
|
[
"MIT"
] | 1
|
2020-06-08T14:06:36.000Z
|
2020-06-08T14:06:36.000Z
|
deepl/layers/utils.py
|
akamnev/deepl
|
392c757e21dec7bdd72cb0f71298389ef0d13968
|
[
"MIT"
] | null | null | null |
deepl/layers/utils.py
|
akamnev/deepl
|
392c757e21dec7bdd72cb0f71298389ef0d13968
|
[
"MIT"
] | null | null | null |
import torch
from typing import List
def get_min_value(tensor):
if tensor.dtype == torch.float16:
min_value = -1e4
elif tensor.dtype == torch.float32:
min_value = -1e9
else:
raise ValueError("{} not recognized. `dtype` "
"should be set to either `torch.float32` "
"or `torch.float16`".format(tensor.dtype))
return min_value
def get_attention_mask(input_ids):
max_length = max([len(x) for x in input_ids])
attention_mask = [[1.0] * len(x) + [0.0] * (max_length - len(x))
for x in input_ids]
return attention_mask
def get_vector_attention_mask(input_ids):
max_length = max([len(x) for x in input_ids])
attention_mask = [[1.0] * (len(x) + 1) +
[0.0] * (max_length - len(x))
for x in input_ids]
return attention_mask
def prune_input_sequence(input_ids, max_length):
fval = []
for ids in input_ids:
if len(ids) > max_length:
ids = ids[:max_length]
fval.append(ids)
return fval
def kl_div(mu, sigma):
"""
KL-divergence between a diagonal multivariate normal,
and a standard normal distribution (with zero mean and unit variance)
"""
sigma_2 = sigma * sigma
kld = 0.5 * torch.mean(mu * mu + sigma_2 - torch.log(sigma_2) - 1.0)
return kld
def kld_gaussian(mu, log_sigma, nu=0.0, rho=1.0):
"""
KL-divergence between a diagonal multivariate normal,
and a standard normal distribution
"""
device = mu.device
nu = torch.as_tensor(nu, device=device)
rho = torch.as_tensor(rho, device=device)
delta_variance = 2.0 * (log_sigma - torch.log(rho))
variance_term = torch.sum(torch.exp(delta_variance) - delta_variance)
mean_term = torch.sum((mu - nu) ** 2 / rho)
return 0.5 * (mean_term + variance_term - 1.0)
def rand_epanechnikov_trig(shape: List[int], device: torch.device, dtype: torch.dtype = torch.float32):
# https://stats.stackexchange.com/questions/6643/what-is-the-closed-form-solution-for-the-inverse-cdf-for-epanechnikov
xi = torch.rand(shape,
dtype=dtype,
device=device)
xi = 2 * torch.sin(torch.asin(2 * xi - 1) / 3)
return xi
| 31.736111
| 122
| 0.617068
| 325
| 2,285
| 4.187692
| 0.310769
| 0.047024
| 0.044085
| 0.023512
| 0.289493
| 0.289493
| 0.289493
| 0.289493
| 0.289493
| 0.289493
| 0
| 0.029167
| 0.26477
| 2,285
| 71
| 123
| 32.183099
| 0.780952
| 0.14442
| 0
| 0.125
| 0
| 0
| 0.044363
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.145833
| false
| 0
| 0.041667
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fef388e9c0a8cc5d31503d18e82095b931d385f7
| 13,762
|
py
|
Python
|
main.py
|
ooshyun/filterdesign
|
59dbea191b8cd44aa9f2d02d3787b5805d486ae2
|
[
"MIT"
] | 1
|
2021-12-27T00:38:32.000Z
|
2021-12-27T00:38:32.000Z
|
main.py
|
ooshyun/FilterDesign
|
7162ccad8e1ae8aebca370da56be56603b9e8b24
|
[
"MIT"
] | null | null | null |
main.py
|
ooshyun/FilterDesign
|
7162ccad8e1ae8aebca370da56be56603b9e8b24
|
[
"MIT"
] | null | null | null |
import os
import json
import numpy as np
from numpy import log10, pi, sqrt
import scipy.io.wavfile as wav
from scipy.fftpack import *
from src import (
FilterAnalyzePlot,
WaveProcessor,
ParametricEqualizer,
GraphicalEqualizer,
cvt_char2num,
maker_logger,
DEBUG,
)
if DEBUG:
PRINTER = maker_logger()
LIBRARY_PATH = "./" # First of all, it need to set the library(or this project) path
def filter_plot():
from src import lowpass, highpass, bandpass, notch, peaking, shelf, allpass
data_path = os.path.join(LIBRARY_PATH, "test/data/wav/")
file_name = "White Noise.wav"
result_path = ""
infile_path = os.path.join(data_path, file_name)
fs, data = wav.read(infile_path)
fft_size = 256
fft_band = np.arange(1, fft_size / 2 + 1) * fs / fft_size
# fc_band = np.arange(30, 22060, 10)
fc_band = np.array([100, 1000, 2000, 3000, 5000])
ploter = FilterAnalyzePlot(sample_rate=fs)
"""Plot the several filters
"""
fc = 1033.59375
gain = 6
Q = 1 / np.sqrt(2)
name = "Shelf Filter"
lowpass_filter = lowpass(Wn=2 * fc / fs, Q=Q)
highpass_filter = highpass(Wn=2 * fc / fs, Q=Q)
bandpass_filter = bandpass(Wn=2 * fc / fs, Q=Q)
notch_filter = notch(Wn=2 * fc / fs, Q=Q)
peak_filter = peaking(Wn=2 * fc / fs, Q=Q, dBgain=gain)
shelf_filter = shelf(Wn=2 * fc / fs, Q=Q, dBgain=gain)
allpass_filter = allpass(Wn=2 * fc / fs, Q=Q)
ploter.filters = peak_filter
ploter.plot(type=["freq", "phase", "pole"], save_path=None, name=name)
def filter_process():
"""Comparison between time domain and frequency domain using WavProcessor class
"""
from src import peaking, shelf
data_path = LIBRARY_PATH + "/test/data/wav/"
file_name = "White Noise.wav"
outfile_path = LIBRARY_PATH + "/test/result/wav/"
infile_path = os.path.join(data_path, file_name)
fs, data = wav.read(infile_path)
gain = 6
fc = 1033.59375
# time
wave_processor = WaveProcessor(wavfile_path=infile_path)
outfile_name = "White Noise_peak_time_domain.wav"
peak_filter = peaking(Wn=2 * fc / fs, Q=1 / np.sqrt(2), dBgain=gain)
wave_processor.filter_time_domain_list = peak_filter
wave_processor.run(savefile_path=outfile_path + outfile_name)
if len(wave_processor.time_filter_time) != 0:
print(
sum(wave_processor.time_filter_time) / len(wave_processor.time_filter_time)
)
# frequency
wave_processor = WaveProcessor(wavfile_path=infile_path)
outfile_name = "White Noise_peaking_freq_domain.wav"
fft_size = 256 # it should be designed before running
fft_band = np.arange(1, fft_size // 2 + 1) * fs / fft_size
coeff_frequency = np.ones(shape=(fft_size // 2 + 1,))
coeff_frequency[np.argwhere(fft_band == fc)] = 10 ** (gain / 20)
wave_processor.filter_freq_domain_list = coeff_frequency
wave_processor.run(savefile_path=outfile_path + outfile_name)
if len(wave_processor.time_filter_freq) != 0:
print(
sum(wave_processor.time_filter_freq) / len(wave_processor.time_filter_freq)
)
def serial_equalizer_plot():
"""Test frequency response for IIR filter cascade
"""
from src import peaking
data_path = LIBRARY_PATH + "/test/data/wav/"
infile_path = os.path.join(data_path, "White Noise.wav")
fs, _ = wav.read(infile_path)
ploter = FilterAnalyzePlot()
parametric_filter = ParametricEqualizer(fs)
fc_band = np.array([1000, 4000, 8000])
for f in fc_band:
peak_filter = peaking(Wn=2 * f / fs, dBgain=6, Q=4)
parametric_filter.coeff = peak_filter
ploter.filters = parametric_filter
ploter.plot(type=["freq", "phase", "pole"])
def serial_equalizer_process():
"""Test processing to wav for IIR filter cascade
"""
from src import peaking
data_path = LIBRARY_PATH + "/test/data/wav/"
result_path = LIBRARY_PATH + "/test/result/wav/"
infile_path = os.path.join(data_path, "White Noise.wav")
fs, _ = wav.read(infile_path)
wave_processor = WaveProcessor(wavfile_path=infile_path)
fc_band = np.array([1000, 4000, 8000])
for f in fc_band:
peak_filter = peaking(Wn=2 * f / fs, dBgain=12, Q=4)
b, a = peak_filter
wave_processor.filter_time_domain_list = b, a
# wave_processor.graphical_equalizer = True
wave_processor.run(
savefile_path=result_path + "/whitenoise_3peak_250_2000_8000.wav"
)
if len(wave_processor.time_filter_freq) != 0:
print(
sum(wave_processor.time_filter_freq) / len(wave_processor.time_filter_freq)
)
if len(wave_processor.time_filter_time) != 0:
print(
sum(wave_processor.time_filter_time) / len(wave_processor.time_filter_time)
)
def generator_test_vector_grahpical_equalizer():
"""Generate test vector for parallel strucuture equalizer called graphical equalizer
"""
sample_rate = 44100
# cuf-off freuqency case 1
cutoff_frequency = np.array(
(
20,
25,
31.5,
40,
50,
63,
80,
100,
125,
160,
200,
250,
315,
400,
500,
630,
800,
1000,
1250,
1600,
2000,
2500,
3150,
4000,
5000,
6300,
8000,
10000,
12500,
16000,
20000,
)
)
# gain
num_case = 5
test_gain_list = np.zeros(shape=(num_case, len(cutoff_frequency)))
# case 1
test_gain_list[0, :] = np.array(
[
12,
12,
10,
8,
4,
1,
0.5,
0,
0,
6,
6,
12,
6,
6,
-12,
12,
-12,
-12,
-12,
-12,
0,
0,
0,
0,
-3,
-6,
-9,
-12,
0,
0,
0,
]
)
# case 2
test_gain_list[1, 0::2] = 12
test_gain_list[1, 1::2] = -12
# case 3
test_gain_list[2, np.where(cutoff_frequency == 2000)] = 12
# case 4
test_gain_list[3, :] = np.ones_like(cutoff_frequency) * 12
# case 5
test_gain_list[4, 0::3] = 0
test_gain_list[4, 1::3] = 0
test_gain_list[4, 2::3] = 12
# cut-off frequency case 2, cutoff frequency with bandwith
f_bandwidth = np.array(
[
2.3,
2.9,
3.6,
4.6,
5.8,
7.3,
9.3,
11.6,
14.5,
18.5,
23.0,
28.9,
36.5,
46.3,
57.9,
72.9,
92.6,
116,
145,
185,
232,
290,
365,
463,
579,
730,
926,
1158,
1447,
1853,
2316,
]
)
f_upperband = np.array(
[
22.4,
28.2,
35.5,
44.7,
56.2,
70.8,
89.1,
112,
141,
178,
224,
282,
355,
447,
562,
708,
891,
1120,
1410,
1780,
2240,
2820,
3550,
4470,
5620,
7080,
8910,
11200,
14100,
17800,
22050,
]
)
f_lowerband = np.zeros_like(f_upperband)
f_lowerband[0] = 17.5
f_lowerband[1:] = f_upperband[:-1]
cutoff_frequency_bandwidth = np.zeros((2, len(cutoff_frequency)))
cutoff_frequency_bandwidth[0, :] = np.append(10, f_upperband[:-1])
cutoff_frequency_bandwidth[1, :] = cutoff_frequency
cutoff_frequency_bandwidth = cutoff_frequency_bandwidth.reshape(
(cutoff_frequency_bandwidth.shape[0] * cutoff_frequency_bandwidth.shape[1],),
order="F",
)
test_gain_bandwidth_list = np.zeros(
shape=(num_case, cutoff_frequency_bandwidth.shape[0])
)
for id_test_gain, test_gain in enumerate(test_gain_list):
buf_test_gain = np.zeros((2, len(cutoff_frequency)))
buf_test_gain[0, :] = test_gain
buf_test_gain[1, :] = test_gain
buf_test_gain = buf_test_gain.reshape(
(buf_test_gain.shape[0] * buf_test_gain.shape[1],), order="F"
)
buf_test_gain[1:] = buf_test_gain[:-1]
buf_test_gain[0] = 0
test_gain_bandwidth_list[id_test_gain, :] = buf_test_gain[:]
cutoff_frequency = cutoff_frequency.tolist()
test_gain_list = test_gain_list.tolist()
cutoff_frequency_bandwidth = cutoff_frequency_bandwidth.tolist()
test_gain_bandwidth_list = test_gain_bandwidth_list.tolist()
test_vector_graphical_equalizer = json.dumps(
{
"1": {
"sample_rate": sample_rate,
"cutoff_frequency": cutoff_frequency,
"test_gain": test_gain_list,
},
"2": {
"sample_rate": sample_rate,
"cutoff_frequency": cutoff_frequency_bandwidth,
"test_gain": test_gain_bandwidth_list,
},
},
indent=4,
)
with open(LIBRARY_PATH + "/test/data/json/test_graphical_equalizer.json", "w") as f:
f.write(test_vector_graphical_equalizer)
def parallel_equalizer_plot():
with open(LIBRARY_PATH + "/test/data/json/test_graphical_equalizer.json", "r") as f:
test_case = json.load(f)
fs, fc, gain = (
test_case["2"]["sample_rate"],
test_case["2"]["cutoff_frequency"],
test_case["2"]["test_gain"][1],
)
fs = int(fs)
fc = np.array(fc)
gain = np.array(gain)
eq = GraphicalEqualizer(fs, fc, gain)
w, h = eq.freqz(show=True)
file = "/test/data/txt/test_graphical_equalizer.txt"
eq.write_to_file(f"{LIBRARY_PATH}/{file}")
def parallel_equalizer_wav_process():
with open(LIBRARY_PATH + "/test/data/json/test_graphical_equalizer.json", "r") as f:
test_case = json.load(f)
fs, fc, gain = (
test_case["2"]["sample_rate"],
test_case["2"]["cutoff_frequency"],
test_case["2"]["test_gain"][1],
)
fs = int(fs)
fc = np.array(fc)
gain = np.array(gain)
eq = GraphicalEqualizer(fs, fc, gain)
# w, h = eq.freqz(show=True)
txt_file = "/test/data/txt/test_graphical_equalizer.txt"
eq.write_to_file(f"{LIBRARY_PATH}/{txt_file}")
"""Test wav file processing of parallel structure of iir filter
"""
data_path = LIBRARY_PATH + "/test/data/wav/"
result_path = LIBRARY_PATH + "/test/result/wav/"
wav_file = "White Noise.wav"
out_file = "White Noise_graphical_equalizer.wav"
infile_path = os.path.join(data_path, wav_file)
outfile_path = os.path.join(result_path, out_file)
coeff_text = open(f"{LIBRARY_PATH}/{txt_file}").read()
coeff_text = coeff_text.split("\n")[:-1]
coeff_text = [text.split(" ") for text in coeff_text]
cvt_char2num(coeff_text)
coeff_text, bias = np.array(coeff_text[:-1]), np.array(coeff_text[-1])
wave_processor = WaveProcessor(wavfile_path=infile_path)
wave_processor.graphical_equalizer = True
wave_processor.filter_time_domain_list = coeff_text
wave_processor.bias = bias
outresult_path = outfile_path
PRINTER.info(f"target file {outresult_path} is processing......")
wave_processor.run(savefile_path=outresult_path)
def analyze_filter():
from src import highpass, notch
fs = 44100
# """ Custom filter analysis"""
ploter = FilterAnalyzePlot(sample_rate=44100)
fc = 1000
filter_custom = highpass(Wn=2 * fc / fs, Q=1 / np.sqrt(2))
ploter.filters = filter_custom
ploter.plot(type=["freq", "phase", "pole"])
del filter_custom
""" Parametric filter analysis, serial structure"""
ploter = FilterAnalyzePlot()
fc = np.array([500, 4000])
peq = ParametricEqualizer(fs)
filter_custom = notch(Wn=2 * fc[0] / fs, Q=1 / np.sqrt(2))
peq.coeff = filter_custom
filter_custom = notch(Wn=2 * fc[1] / fs, Q=1 / np.sqrt(2))
peq.coeff = filter_custom
ploter.filters = peq
ploter.plot(type=["freq", "phase", "pole"])
del peq
""" Graphical filter analysis, parallel structure"""
with open(LIBRARY_PATH + "/test/data/json/test_graphical_equalizer.json", "r") as f:
test_case = json.load(f)
fs, fc, gain = (
test_case["1"]["sample_rate"],
test_case["1"]["cutoff_frequency"],
test_case["1"]["test_gain"][0],
)
fs = int(fs)
fc = np.array(fc)
gain = np.array(gain)
ploter = FilterAnalyzePlot()
geq = GraphicalEqualizer(fs, fc, gain)
ploter.filters = geq
ploter.plot(type=["freq", "phase", "pole"])
del geq
del ploter
if __name__ == "__main__":
PRINTER.info("Hello Digital Signal Processing World!")
"""Single filter design"""
filter_plot()
filter_process()
"""Serial structure of filters design"""
serial_equalizer_plot()
serial_equalizer_process()
"""Parallel structure of filters design"""
generator_test_vector_grahpical_equalizer()
parallel_equalizer_plot()
parallel_equalizer_wav_process()
""" Analyze filter"""
analyze_filter()
pass
| 26.113852
| 88
| 0.575861
| 1,732
| 13,762
| 4.332564
| 0.172055
| 0.041578
| 0.020789
| 0.03678
| 0.5004
| 0.446562
| 0.38193
| 0.330091
| 0.303038
| 0.298774
| 0
| 0.063013
| 0.309257
| 13,762
| 526
| 89
| 26.163498
| 0.726383
| 0.046723
| 0
| 0.267677
| 0
| 0
| 0.082156
| 0.035782
| 0
| 0
| 0
| 0
| 0
| 1
| 0.020202
| false
| 0.020202
| 0.030303
| 0
| 0.050505
| 0.010101
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fef4b3fa8786cd370700430b9b9414a5a831d2bf
| 3,322
|
py
|
Python
|
time_transfer.py
|
EternityNull/alfred_scripts-TimeTransfer
|
d7c24c977d174d0b71b9903193ce8225a5538c7c
|
[
"MIT"
] | null | null | null |
time_transfer.py
|
EternityNull/alfred_scripts-TimeTransfer
|
d7c24c977d174d0b71b9903193ce8225a5538c7c
|
[
"MIT"
] | null | null | null |
time_transfer.py
|
EternityNull/alfred_scripts-TimeTransfer
|
d7c24c977d174d0b71b9903193ce8225a5538c7c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import re
import json
from datetime import datetime
from alfred import *
TIMESTAMP_SEC_RE = r'^\d{10}$' # 1643372599
TIMESTAMP_MSEC_RE = r'^\d{13}$' # 1643372599000
# 2022-01-28 10:00:00
DATETIME_LONG_STR = r'^[1-9]\d{3}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}$'
DATETIME_SHORT_STR = r'^[1-9]\d{13}$' # 20220128100000
def judge_input(input_arg: str):
date_now = input_datetime = datetime.now()
title_to_display = result_to_display = str()
if re.match(TIMESTAMP_SEC_RE, input_arg):
input_datetime = datetime.fromtimestamp(float(input_arg))
result_to_display = input_datetime.strftime("%Y-%m-%d %H:%M:%S")
title_to_display = "日期时间: %s" % result_to_display
elif re.match(TIMESTAMP_MSEC_RE, input_arg):
input_datetime = datetime.fromtimestamp(float(int(input_arg) / 1000))
result_to_display = input_datetime.strftime("%Y-%m-%d %H:%M:%S")
title_to_display = "日期时间: %s" % result_to_display
elif re.match(DATETIME_SHORT_STR, input_arg):
input_datetime = datetime.strptime(input_arg, "%Y%m%d%H%M%S")
result_to_display = int(input_datetime.timestamp())
title_to_display = "时间戳: %s" % result_to_display
elif re.match(DATETIME_LONG_STR, input_arg):
input_datetime = datetime.strptime(input_arg, "%Y-%m-%d %H:%M:%S")
result_to_display = int(input_datetime.timestamp())
title_to_display = "时间戳: %s" % result_to_display
else:
exit(1)
prefix = "前"
diff_days = (date_now - input_datetime).days
diff_secs = (date_now - input_datetime).seconds
if date_now < input_datetime:
prefix = "后"
diff_days = (input_datetime - date_now).days
diff_secs = (input_datetime - date_now).seconds
subtitle_to_display = "距离当前时间 %s [%s] 天 + [%s] 秒" % (
prefix,
diff_days,
diff_secs)
return Alfred(title_to_display, subtitle_to_display, result_to_display).__dict__
def judge_now():
date_now = datetime.now()
display_list = list()
# 日期格式 long
display_list.append(
Alfred(
title=date_now.strftime("%Y-%m-%d %H:%M:%S"),
subtitle='Long日期格式',
arg=date_now.strftime("%Y-%m-%d %H:%M:%S")
).__dict__
)
# 日期格式 short
display_list.append(
Alfred(
title=date_now.strftime("%Y/%m/%d %H/%M/%S"),
subtitle='传统日期格式',
arg=date_now.strftime("%Y/%m/%d %H/%M/%S")
).__dict__
)
# 日期格式 short
display_list.append(
Alfred(
title=date_now.strftime("%Y%m%d%H%M%S"),
subtitle='Short日期格式',
arg=date_now.strftime("%Y%m%d%H%M%S")
).__dict__
)
# 时间戳格式
display_list.append(
Alfred(
title=int(date_now.timestamp()),
subtitle='秒级时间戳',
arg=int(date_now.timestamp())
).__dict__
)
return display_list
if __name__ == '__main__':
input_args = sys.argv[1:]
if len(input_args) > 2:
exit(1)
input_arg = ' '.join(input_args)
alfred_result = list()
if input_arg == 'now':
alfred_result.extend(judge_now())
else:
alfred_result.append(judge_input(input_arg))
print(json.dumps({"items": alfred_result}))
| 27.454545
| 84
| 0.609874
| 458
| 3,322
| 4.10917
| 0.203057
| 0.086079
| 0.079702
| 0.021254
| 0.49203
| 0.444208
| 0.444208
| 0.444208
| 0.372476
| 0.372476
| 0
| 0.030641
| 0.243528
| 3,322
| 120
| 85
| 27.683333
| 0.718265
| 0.042444
| 0
| 0.282353
| 0
| 0.011765
| 0.103437
| 0.007253
| 0
| 0
| 0
| 0
| 0
| 1
| 0.023529
| false
| 0
| 0.058824
| 0
| 0.105882
| 0.011765
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fef5faa5a487c2ba4ddeb8aafe0c3838370c774b
| 14,598
|
py
|
Python
|
ravager/bot/commands/admin_interface.py
|
CoolFool/Ravager
|
3d647115689dc23a160255221aaa493f879406a5
|
[
"MIT"
] | null | null | null |
ravager/bot/commands/admin_interface.py
|
CoolFool/Ravager
|
3d647115689dc23a160255221aaa493f879406a5
|
[
"MIT"
] | 1
|
2022-03-15T06:55:48.000Z
|
2022-03-15T15:38:20.000Z
|
ravager/bot/commands/admin_interface.py
|
CoolFool/Ravager
|
3d647115689dc23a160255221aaa493f879406a5
|
[
"MIT"
] | 2
|
2022-02-09T21:30:57.000Z
|
2022-03-15T06:19:57.000Z
|
import logging
from functools import wraps
import psutil
from telegram import InlineKeyboardMarkup, InlineKeyboardButton, ForceReply, ParseMode
from telegram.ext import CommandHandler, CallbackQueryHandler, MessageHandler, Filters
from ravager.bot.helpers.constants import *
from ravager.bot.helpers.timeout import ConversationTimeout
from ravager.config import MAX_TASKS_PER_USER, STORAGE_TIME, STORAGE_SIZE, GROUP_PASSWORD, USER_PASSWORD, ALLOWLIST, \
DOWNLOAD_DIR, LOGS_DIR,HEROKU_APP,HEROKU_API_TOKEN
from ravager.database.helpers.structs import UserStruct
from ravager.database.users import UserData
from ravager.helpers.humanize import humanize
logger = logging.getLogger(__file__)
HANDLE_ADMIN_PANEL, LIMITS_PANEL, FILTERS_PANEL, SYS_INFO_PANEL, LOGS_HANDLER = range(5)
limits_panel_text = "*Limits Configuration:*\
\nDownload storage size: *{}* GB\
\nDownload storage time: *{}* Hrs\n"
filter_panel_text = "*Filters and User Configuration:*\
\nFilters Enabled: *{}*\nGroup chat password: *{}*\
\nPrivate chat password: *{}*"
sys_info_text = "*System Information*\
\n*Cpu Usage Percent:* {}%\
\n*Used Ram:* {} {}\
\n*Available Ram:* {} {}\
\n*Network Ingress:* {} {}\
\n*Network Egress:* {} {}\
\n*Total Disk Space:* {} {}\
\n*Total Disk Space Available: *{} {}"
class AdminInterface:
def __init__(self):
self.end_selection = ConversationTimeout.end_selection
self.selection_timeout = ConversationTimeout.selection_timeout
self.user = UserStruct()
def _restricted(handlers):
wraps(handlers)
def wrapper(self, update, context, *args, **kwargs):
user_id = update.effective_user.id
user = UserStruct()
user.user_id = user_id
user = UserData(user=user).get_user()
if user is not None and bool(user.is_admin):
return handlers(self, update, context, *args, **kwargs)
update.message.reply_text(text="Unauthorized user", quote=True)
logger.error("Unauthorized access denied for {}.".format(user_id))
return -1
return wrapper
@staticmethod
def admin_panel():
admin_panel = [[InlineKeyboardButton(text="Limits", callback_data="admin|admin_limits"),
InlineKeyboardButton(text="Filters", callback_data="admin|admin_filters")],
[InlineKeyboardButton(text="Sys Info", callback_data="admin|admin_sys_info"),
InlineKeyboardButton(text="Close", callback_data="admin|close")]]
return InlineKeyboardMarkup(admin_panel)
@staticmethod
def admin_interface_filters():
filters_panel = [[InlineKeyboardButton(text="Revoke Access", callback_data="filters|revoke_user"),
InlineKeyboardButton(text="Add Admin", callback_data="filters|add_admin")],
[InlineKeyboardButton(text="Back", callback_data="admin|admin_main"),
InlineKeyboardButton(text="Close", callback_data="admin|close")]]
return InlineKeyboardMarkup(filters_panel)
@staticmethod
def admin_interface_limts():
limits_panel = [[InlineKeyboardButton(text="Back", callback_data="admin|admin_main"),
InlineKeyboardButton(text="Close", callback_data="admin|close")]]
return InlineKeyboardMarkup(limits_panel)
@staticmethod
def admin_interface_sys_info():
sys_info_panel = [[InlineKeyboardButton(text="System Info", callback_data="sys_info|sys_info"),
InlineKeyboardButton(text="Logs", callback_data="sys_info|logs")],
[InlineKeyboardButton(text="Back", callback_data="admin|admin_main"),
InlineKeyboardButton(text="Close", callback_data="admin|close")]]
return InlineKeyboardMarkup(sys_info_panel)
@staticmethod
def toggle_panel(back_menu):
toggle_panel = [[InlineKeyboardButton(text="Enable", callback_data=""),
InlineKeyboardButton(text="Disable", callback_data=""),
InlineKeyboardButton(text="Back", callback_data="{}|".format(back_menu))]]
return InlineKeyboardMarkup(toggle_panel)
@staticmethod
def last_step_btns(prev_menu):
last_step_panel = [[InlineKeyboardButton(text="Back", callback_data="{}".format(prev_menu)),
InlineKeyboardButton(text="Back to main menu", callback_data="admin|admin_main")]]
return InlineKeyboardMarkup(last_step_panel)
def handle_admin_panel(self, update, context):
callback_data = update.callback_query.data
callback_data = callback_data.split("|")
selection_option = callback_data[1]
if selection_option == "admin_main":
update.callback_query.edit_message_text(text="Admin Panel", reply_markup=self.admin_panel())
return HANDLE_ADMIN_PANEL
if selection_option == "admin_limits":
download_storage_time_threshold = STORAGE_TIME
download_storage_size_threshold = STORAGE_SIZE
stats = limits_panel_text.format(download_storage_size_threshold,download_storage_time_threshold)
update.callback_query.edit_message_text(text=stats, reply_markup=self.admin_interface_limts(),
parse_mode=ParseMode.MARKDOWN)
return LIMITS_PANEL
if selection_option == "admin_filters":
group_passwd = GROUP_PASSWORD
private_passwd = USER_PASSWORD
allowlist_enabled = str(ALLOWLIST)
text = filter_panel_text.format(allowlist_enabled, group_passwd, private_passwd)
update.callback_query.edit_message_text(text=text, reply_markup=self.admin_interface_filters(),
parse_mode=ParseMode.MARKDOWN)
return FILTERS_PANEL
if selection_option == "admin_sys_info":
update.callback_query.edit_message_text(text="Sys Health", reply_markup=self.admin_interface_sys_info())
return SYS_INFO_PANEL
if selection_option == "close":
update.callback_query.edit_message_text(text="Admin Interface closed")
return -1
def filters_options(self, update, context):
chat_id = update.effective_chat.id
callback_data = update.callback_query.data
callback_data = callback_data.split("|")
selection_option = callback_data[1]
if selection_option == "revoke_user":
text = "*Revoke user's access from bot*\nSend user's username or user id"
update.callback_query.edit_message_text(text=text, parse_mode=ParseMode.MARKDOWN,
reply_markup=self.last_step_btns(prev_menu="admin|admin_filters"))
context.bot.send_message(chat_id=chat_id, text="Username or User ID", parse_mode=ParseMode.MARKDOWN,
reply_markup=ForceReply())
if selection_option == "add_admin":
text = "*Revoke user's access from bot*\nSend user's username or user id"
update.callback_query.edit_message_text(text=text, parse_mode=ParseMode.MARKDOWN,
reply_markup=self.last_step_btns(prev_menu="admin|admin_filters"))
context.bot.send_message(chat_id=chat_id, text="Username or User ID", parse_mode=ParseMode.MARKDOWN,
reply_markup=ForceReply())
return
@staticmethod
def get_logs(update, context):
# dump logs properly
user_id = update.effective_user.id
try:
context.bot.sendDocument(chat_id=user_id, document=open("{}/ravager.log".format(LOGS_DIR), "rb"))
context.bot.sendDocument(chat_id=user_id, document=open("{}/celery.log".format(LOGS_DIR), "rb"))
context.bot.sendDocument(chat_id=user_id, document=open("{}/aria2.log".format(LOGS_DIR), "rb"))
except Exception as e:
update.message.reply_text(chat_id=user_id, text=str(e))
logger.error(e)
@staticmethod
def logs_panel():
logs_panel = [[InlineKeyboardButton(text="Aria logs", callback_data="sys_info_logs|aria_logs"),
InlineKeyboardButton(text="Celery logs", callback_data="sys_info_logs|celery_logs"),
InlineKeyboardButton(text="Ravager logs", callback_data="sys_info_logs|ravager_logs")],
[InlineKeyboardButton(text="Back", callback_data="admin|admin_sys_info"),
InlineKeyboardButton(text="Back to main menu", callback_data="admin|admin_main")]]
reply_markup = InlineKeyboardMarkup(logs_panel)
return reply_markup
def system_options(self, update, context):
callback_data = update.callback_query.data
callback_data = callback_data.split("|")
selection_option = callback_data[1]
if selection_option == "sys_info":
psutil.cpu_percent(interval=0.1)
cpu_percent = psutil.cpu_percent(interval=0.1)
mem = psutil.virtual_memory()
disk_usage = psutil.disk_usage(str(DOWNLOAD_DIR))
net = psutil.net_io_counters(pernic=False, nowrap=True)
used_mem = humanize(mem.used)
available_mem = humanize(mem.available)
bytes_sent = humanize(net.bytes_sent)
bytes_recvd = humanize(net.bytes_recv)
total_disk_space = humanize(disk_usage.total)
total_free_space = humanize(disk_usage.free)
text = sys_info_text.format(cpu_percent, used_mem.size, used_mem.unit, available_mem.size, available_mem.unit,
bytes_recvd.size, bytes_recvd.unit, bytes_sent.size, bytes_sent.unit,
total_disk_space.size, total_disk_space.unit, total_free_space.size,
total_free_space.unit)
update.callback_query.edit_message_text(text=text, parse_mode=ParseMode.MARKDOWN,
reply_markup=self.last_step_btns(prev_menu="admin|admin_sys_info"))
return SYS_INFO_PANEL
if selection_option == "logs":
update.callback_query.edit_message_text(text="*Get yo logs*", parse_mode=ParseMode.MARKDOWN,
reply_markup=self.logs_panel())
return LOGS_HANDLER
def logs_handler(self, update, context):
callback_data = update.callback_query.data
callback_data = callback_data.split("|")
selection_option = callback_data[1]
try:
if selection_option == "aria_logs":
context.bot.sendDocument(chat_id=update.callback_query.from_user.id,
document=open("{}/aria2.log".format(LOGS_DIR), "rb"))
if selection_option == "celery_logs":
context.bot.sendDocument(chat_id=update.callback_query.from_user.id, document=open("{}/celery.log".format(LOGS_DIR), "rb"))
if selection_option == "ravager_logs":
context.bot.sendDocument(chat_id=update.callback_query.from_user.id, document=open("{}/ravager.log".format(LOGS_DIR), "rb"))
return LOGS_HANDLER
except Exception as e:
logger.error(e)
@_restricted
def serve_admin_panel(self, update, context):
if str(update.effective_chat.type) == "group" or str(update.effective_chat.type) == "supergroup":
update.message.reply_text(text="This command can only be ran inside private chat")
return -1
self.user.user_id = update.effective_chat.id
self.user = UserData(user=self.user).get_user()
update.message.reply_text(text="Admin Panel", reply_markup=self.admin_panel())
return HANDLE_ADMIN_PANEL
def limits_options(self, update, context):
callback_data = update.callback_query.data
callback_data = callback_data.split("|")
selection_option = callback_data[1]
max_tasks_per_chat = MAX_TASKS_PER_USER
download_storage_size_treshold = STORAGE_SIZE
download_storage_time_treshold = STORAGE_TIME
if selection_option == "max_tasks_per_chat":
text = "*Max tasks per chat*\nCurrent value is: *{}*\nSend new value:".format(max_tasks_per_chat)
if selection_option == "storage_size_treshold":
text = "*Download storage size*\nCurrent value is: *{}* GB\nSend new value:".format(
download_storage_size_treshold)
if selection_option == "storage_duration":
text = "*Download storage duration*\nCurrent value is: *{}* Hrs\nSend new value:".format(
download_storage_time_treshold)
update.callback_query.edit_message_text(text=text, parse_mode=ParseMode.MARKDOWN,
reply_markup=self.last_step_btns(prev_menu="admin|admin_limits"))
return LIMITS_PANEL
def admin_interface_handler(self):
admin_interface_handler = ConversationHandler(
entry_points=[CommandHandler("admin_interface", self.serve_admin_panel)],
states={
HANDLE_ADMIN_PANEL: [CallbackQueryHandler(self.handle_admin_panel, pattern="admin")],
LIMITS_PANEL: [CallbackQueryHandler(self.limits_options, pattern="limits")],
FILTERS_PANEL: [CallbackQueryHandler(self.filters_options, pattern="filters")],
SYS_INFO_PANEL: [CallbackQueryHandler(self.system_options, pattern="sys_info")],
LOGS_HANDLER: [CallbackQueryHandler(self.logs_handler, pattern="sys_info_logs")]
},
fallbacks=[CallbackQueryHandler(self.handle_admin_panel, pattern="admin"),
CallbackQueryHandler(self.handle_admin_panel, pattern="limits"),
CallbackQueryHandler(self.handle_admin_panel, pattern="filters"),
CallbackQueryHandler(self.handle_admin_panel, pattern="close"),
CommandHandler('cancel', self.end_selection),
MessageHandler(Filters.regex('^\/'), self.end_selection)],
conversation_timeout=300
)
return admin_interface_handler
| 50.164948
| 140
| 0.644746
| 1,579
| 14,598
| 5.668778
| 0.137429
| 0.058988
| 0.038208
| 0.025695
| 0.481622
| 0.413697
| 0.355491
| 0.326556
| 0.299408
| 0.278293
| 0
| 0.001658
| 0.256405
| 14,598
| 290
| 141
| 50.337931
| 0.822939
| 0.001233
| 0
| 0.271186
| 0
| 0
| 0.103169
| 0.006517
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076271
| false
| 0.025424
| 0.04661
| 0
| 0.224576
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fef8828761203757d50e9784d410fa779ff9303d
| 563
|
py
|
Python
|
daoliagent/utils.py
|
foruy/openflow-multiopenstack
|
74140b041ac25ed83898ff3998e8dcbed35572bb
|
[
"Apache-2.0"
] | 1
|
2019-09-11T11:56:19.000Z
|
2019-09-11T11:56:19.000Z
|
daoliagent/utils.py
|
foruy/openflow-multiopenstack
|
74140b041ac25ed83898ff3998e8dcbed35572bb
|
[
"Apache-2.0"
] | null | null | null |
daoliagent/utils.py
|
foruy/openflow-multiopenstack
|
74140b041ac25ed83898ff3998e8dcbed35572bb
|
[
"Apache-2.0"
] | null | null | null |
import random
import six.moves.urllib.parse as urlparse
def replace_url(url, host=None, port=None, path=None):
o = urlparse.urlparse(url)
_host = o.hostname
_port = o.port
_path = o.path
if host is not None:
_host = host
if port is not None:
_port = port
netloc = _host
if _port is not None:
netloc = ':'.join([netloc, str(_port)])
if path is not None:
_path = path
return '%s://%s%s' % (o.scheme, netloc, _path)
def generate_seq():
return random.randint(1000000000, 4294967296)
| 20.107143
| 54
| 0.614565
| 81
| 563
| 4.123457
| 0.382716
| 0.05988
| 0.107784
| 0.071856
| 0.113772
| 0.113772
| 0
| 0
| 0
| 0
| 0
| 0.0489
| 0.273535
| 563
| 27
| 55
| 20.851852
| 0.767726
| 0
| 0
| 0
| 0
| 0
| 0.017762
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0
| 0.105263
| 0.052632
| 0.315789
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fefa551e8285feb448d258e854941881fb3ad2e9
| 759
|
py
|
Python
|
doggo_ears_definitions.py
|
jryzkns/doggo-ears
|
004dbb8b07a0a2170ce0d04b6e1458b268cdd543
|
[
"MIT"
] | 1
|
2020-08-28T16:49:32.000Z
|
2020-08-28T16:49:32.000Z
|
doggo_ears_definitions.py
|
jryzkns/doggo-ears
|
004dbb8b07a0a2170ce0d04b6e1458b268cdd543
|
[
"MIT"
] | null | null | null |
doggo_ears_definitions.py
|
jryzkns/doggo-ears
|
004dbb8b07a0a2170ce0d04b6e1458b268cdd543
|
[
"MIT"
] | null | null | null |
import numpy as np
import torch
torch.manual_seed(0)
# PRE-PROCESSING
RAVDESS_DSET_PATH = "C:\\Users\\***\\Downloads\\RAVDESS\\"
TESS_DSET_PATH = "C:\\Users\\***\\Downloads\\TESS\\"
N_WORKERS = 15
# DATASET
emote_id = {
"01" : "neutral", "03" : "happy",
"04" : "sad", "05" : "angry"}
emote_idn = {
0 : "neutral", 1 : "happy",
2 : "sad", 3 : "angry"}
N_CATEGORIES = len(emote_id)
label_id = { n : torch.tensor(i)
for i, n in enumerate(emote_id.values())}
# AUDIO
window_duration = 0.5
LISTENER_RATE = 44100
N_FEATURES = 2
NUM_INFERENCE_WINDOW = 10
samples_per_wind = int(LISTENER_RATE * window_duration)
# TRAINING
BATCH_SIZE = 16
loader_params = { "batch_size" : BATCH_SIZE,
"shuffle" : True}
| 22.323529
| 58
| 0.623188
| 104
| 759
| 4.307692
| 0.644231
| 0.046875
| 0.040179
| 0.0625
| 0.102679
| 0
| 0
| 0
| 0
| 0
| 0
| 0.045302
| 0.214756
| 759
| 34
| 59
| 22.323529
| 0.706376
| 0.048748
| 0
| 0
| 0
| 0
| 0.18663
| 0.0961
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.086957
| 0
| 0.086957
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fefd02d2de45b18b74656b9de90c0632735f1832
| 848
|
py
|
Python
|
leetcode/palindrome_pairs/palindrome_pairs.py
|
sagasu/python-algorithms
|
d630777a3f17823165e4d72ab780ede7b10df752
|
[
"MIT"
] | null | null | null |
leetcode/palindrome_pairs/palindrome_pairs.py
|
sagasu/python-algorithms
|
d630777a3f17823165e4d72ab780ede7b10df752
|
[
"MIT"
] | null | null | null |
leetcode/palindrome_pairs/palindrome_pairs.py
|
sagasu/python-algorithms
|
d630777a3f17823165e4d72ab780ede7b10df752
|
[
"MIT"
] | null | null | null |
class Solution:
def palindromePairs(self, words: List[str]) -> List[List[int]]:
lookup = {}
for index, word in enumerate(words):
lookup[word] = index
ans = set()
for index, word in enumerate(words):
for k in range(len(word) + 1):
current = word[:k][::-1]
if current in lookup and lookup[current] != index :
newword = word + current
if newword == newword[::-1]:
ans.add((index, lookup[current]))
current = word[len(word) - k:][::-1]
if current in lookup and lookup[current]!=index:
newword = current + word
if newword == newword[::-1]:
ans.add((lookup[current], index))
return list(ans)
| 38.545455
| 67
| 0.471698
| 89
| 848
| 4.494382
| 0.303371
| 0.13
| 0.135
| 0.07
| 0.51
| 0.51
| 0.255
| 0.255
| 0.255
| 0.255
| 0
| 0.009881
| 0.403302
| 848
| 22
| 68
| 38.545455
| 0.780632
| 0
| 0
| 0.315789
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0
| 0
| 0.157895
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a00eea590558911d75f7435e45a186ce7c2a0a1
| 30,437
|
py
|
Python
|
startExperiment.py
|
aydindemircioglu/radFS
|
b50b2a78f7c7975751b699b6b74a2761f7fa3501
|
[
"MIT",
"Unlicense"
] | 1
|
2022-02-24T02:16:55.000Z
|
2022-02-24T02:16:55.000Z
|
startExperiment.py
|
aydindemircioglu/radFS
|
b50b2a78f7c7975751b699b6b74a2761f7fa3501
|
[
"MIT",
"Unlicense"
] | null | null | null |
startExperiment.py
|
aydindemircioglu/radFS
|
b50b2a78f7c7975751b699b6b74a2761f7fa3501
|
[
"MIT",
"Unlicense"
] | null | null | null |
#!/usr/bin/python3
from functools import partial
from datetime import datetime
import pandas as pd
from joblib import parallel_backend
import random
import numpy as np
from sklearn.calibration import CalibratedClassifierCV
import shutil
import pathlib
import os
import math
import random
from matplotlib import pyplot
import matplotlib.pyplot as plt
import time
import copy
import random
import pickle
from joblib import Parallel, delayed
import tempfile
from xgboost import XGBClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis, QuadraticDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB, BernoulliNB, CategoricalNB, ComplementNB
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.neural_network import MLPClassifier
from joblib import Parallel, delayed
import itertools
import multiprocessing
import socket
from glob import glob
from collections import OrderedDict
import logging
import mlflow
from typing import Dict, Any
import hashlib
import json
from pymrmre import mrmr
from pprint import pprint
from sklearn.utils._testing import ignore_warnings
from sklearn.exceptions import ConvergenceWarning
from sklearn.feature_selection import RFE, RFECV
from sklearn.dummy import DummyClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import roc_curve, auc, roc_auc_score, precision_recall_curve
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, classification_report
from sklearn.linear_model import LogisticRegression, SGDClassifier
from sklearn.svm import SVC, LinearSVC
from sklearn.ensemble import AdaBoostClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import StandardScaler
from sklearn.covariance import EllipticEnvelope
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import IsolationForest, RandomForestClassifier
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.pipeline import Pipeline
from sklearn.feature_selection import SelectKBest, SelectFromModel
from sklearn.feature_selection import mutual_info_classif
from mlflow import log_metric, log_param, log_artifact, log_dict, log_image
from loadData import *
from utils import *
from parameters import *
from extraFeatureSelections import *
### parameters
TrackingPath = "/data/results/radFS/mlrun.benchmark"
print ("Have", len(fselParameters["FeatureSelection"]["Methods"]), "Feature Selection Methods.")
print ("Have", len(clfParameters["Classification"]["Methods"]), "Classifiers.")
# wie CV: alle parameter gehen einmal durch
def getExperiments (experimentList, expParameters, sKey, inject = None):
newList = []
for exp in experimentList:
for cmb in list(itertools.product(*expParameters.values())):
pcmb = dict(zip(expParameters.keys(), cmb))
if inject is not None:
pcmb.update(inject)
_exp = exp.copy()
_exp.append((sKey, pcmb))
newList.append(_exp)
experimentList = newList.copy()
return experimentList
# this is pretty non-generic, maybe there is a better way, for now it works.
def generateAllExperiments (experimentParameters, verbose = False):
experimentList = [ [] ]
for k in experimentParameters.keys():
if verbose == True:
print ("Adding", k)
if k == "BlockingStrategy":
newList = []
blk = experimentParameters[k].copy()
newList.extend(getExperiments (experimentList, blk, k))
experimentList = newList.copy()
elif k == "FeatureSelection":
# this is for each N too
print ("Adding feature selection")
newList = []
for n in experimentParameters[k]["N"]:
for m in experimentParameters[k]["Methods"]:
fmethod = experimentParameters[k]["Methods"][m].copy()
fmethod["nFeatures"] = [n]
newList.extend(getExperiments (experimentList, fmethod, m))
experimentList = newList.copy()
elif k == "Classification":
newList = []
for m in experimentParameters[k]["Methods"]:
newList.extend(getExperiments (experimentList, experimentParameters[k]["Methods"][m], m))
experimentList = newList.copy()
else:
experimentList = getExperiments (experimentList, experimentParameters[k], k)
return experimentList
# if we do not want scaling to be performed on all data,
# we need to save thet scaler. same for imputer.
def preprocessData (X, y):
simp = SimpleImputer(strategy="mean")
X = pd.DataFrame(simp.fit_transform(X),columns = X.columns)
sscal = StandardScaler()
X = pd.DataFrame(sscal.fit_transform(X),columns = X.columns)
return X, y
def applyFS (X, y, fExp):
print ("Applying", fExp)
return X, y
def applyCLF (X, y, cExp, fExp = None):
print ("Training", cExp, "on FS:", fExp)
return "model"
def testModel (y_pred, y_true, idx, fold = None):
t = np.array(y_true)
p = np.array(y_pred)
# naive bayes can produce nan-- on ramella2018 it happens.
# in that case we replace nans by 0
p = np.nan_to_num(p)
y_pred_int = [int(k>=0.5) for k in p]
acc = accuracy_score(t, y_pred_int)
df = pd.DataFrame ({"y_true": t, "y_pred": p}, index = idx)
return {"y_pred": p, "y_test": t,
"y_pred_int": y_pred_int,
"idx": np.array(idx).tolist()}, df, acc
def getRunID (pDict):
def dict_hash(dictionary: Dict[str, Any]) -> str:
dhash = hashlib.md5()
encoded = json.dumps(dictionary, sort_keys=True).encode()
dhash.update(encoded)
return dhash.hexdigest()
run_id = dict_hash(pDict)
return run_id
def getAUCCurve (modelStats, dpi = 100):
# compute roc and auc
fpr, tpr, thresholds = roc_curve (modelStats["y_test"], modelStats["y_pred"])
area_under_curve = auc (fpr, tpr)
if (math.isnan(area_under_curve) == True):
print ("ERROR: Unable to compute AUC of ROC curve. NaN detected!")
print (modelStats["y_test"])
print (modelStats["y_pred"])
raise Exception ("Unable to compute AUC")
sens, spec = findOptimalCutoff (fpr, tpr, thresholds)
return area_under_curve, sens, spec
def getPRCurve (modelStats, dpi = 100):
# compute roc and auc
precision, recall, thresholds = precision_recall_curve(modelStats["y_test"], modelStats["y_pred"])
try:
f1 = f1_score (modelStats["y_test"], modelStats["y_pred_int"])
except Exception as e:
print (modelStats["y_test"])
print (modelStats["y_pred_int"])
raise (e)
f1_auc = auc (recall, precision)
if (math.isnan(f1_auc) == True):
print ("ERROR: Unable to compute AUC of PR curve. NaN detected!")
print (modelStats["y_test"])
print (modelStats["y_pred"])
raise Exception ("Unable to compute AUC")
return f1, f1_auc
def logMetrics (foldStats):
y_preds = []
y_test = []
y_index = []
aucList = {}
for k in foldStats:
if "fold" in k:
y_preds.extend(foldStats[k]["y_pred"])
y_test.extend(foldStats[k]["y_test"])
y_index.extend(foldStats[k]["idx"])
fpr, tpr, thresholds = roc_curve (foldStats[k]["y_test"], foldStats[k]["y_pred"])
area_under_curve = auc (fpr, tpr)
aucList["AUC" + "_" + str(len(aucList))] = area_under_curve
auc_mean = np.mean(list(aucList.values()))
auc_std = np.std(list(aucList.values()))
aucList["AUC_mean"] = auc_mean
aucList["AUC_std"] = auc_std
modelStats, df, acc = testModel (y_preds, y_test, idx = y_index, fold = "ALL")
roc_auc, sens, spec = getAUCCurve (modelStats, dpi = 72)
f1, f1_auc = getPRCurve (modelStats, dpi = 72)
#pprint(aucList)
log_dict(aucList, "aucStats.json")
log_dict(modelStats, "params.yml")
log_metric ("Accuracy", acc)
log_metric ("Sens", sens)
log_metric ("Spec", spec)
log_metric ("AUC", roc_auc)
log_metric ("F1", f1)
log_metric ("F1_AUC", f1_auc)
#print (foldStats["features"])
log_dict(foldStats["features"], "features.json")
for k in foldStats["params"]:
log_param (k, foldStats["params"][k])
with tempfile.TemporaryDirectory() as temp_dir:
predFile = os.path.join(temp_dir, "preds.csv")
df.to_csv(predFile)
mlflow.log_artifact(predFile)
print(".", end = '', flush=True)
return {}
def createFSel (fExp, cache = True):
method = fExp[0][0]
nFeatures = fExp[0][1]["nFeatures"]
if method == "LASSO":
C = fExp[0][1]["C"]
clf = LogisticRegression(penalty='l1', max_iter=500, solver='liblinear', C = C)
pipe = SelectFromModel(clf, prefit=False, max_features=nFeatures)
if method == "ET":
clf = ExtraTreesClassifier()
pipe = SelectFromModel(clf, prefit=False, max_features=nFeatures)
if method == "ReliefF":
from ITMO_FS.filters.univariate import reliefF_measure
pipe = SelectKBest(reliefF_measure, k = nFeatures)
if method == "MIM":
pipe = SelectKBest(mutual_info_classif, k = nFeatures)
if method == "Chi2":
from ITMO_FS.filters.univariate import chi2_measure
pipe = SelectKBest(chi2_measure, k = nFeatures)
if method == "Anova":
from ITMO_FS.filters.univariate import anova
pipe = SelectKBest(anova, k = nFeatures)
if method == "InformationGain":
from ITMO_FS.filters.univariate import information_gain
pipe = SelectKBest(information_gain, k = nFeatures)
if method == "GiniIndex":
from ITMO_FS.filters.univariate import gini_index
pipe = SelectKBest(gini_index, k = nFeatures)
if method == "SUMeasure":
from ITMO_FS.filters.univariate import su_measure
pipe = SelectKBest(su_measure, k = nFeatures)
if method == "FCBF":
from ITMO_FS.filters.multivariate.FCBF import FCBFDiscreteFilter
def fcbf_fct (X, y):
fcbf = FCBFDiscreteFilter()
fcbf.fit(X,y)
idxList = fcbf.selected_features
scores = [1 if idx in idxList else 0 for idx in range(X.shape[1])]
return np.array(scores)
pipe = SelectKBest(fcbf_fct, k = nFeatures)
if method == "MCFS":
from ITMO_FS.filters import MCFS
def mcfs_fct (X, y):
mcfs = MCFS(nFeatures, scheme='0-1') # dot is broken
idxList = mcfs.feature_ranking(X)
scores = [1 if idx in idxList else 0 for idx in range(X.shape[1])]
return np.array(scores)
pipe = SelectKBest(mcfs_fct, k = nFeatures)
if method == "UDFS":
from ITMO_FS.filters import UDFS
def udfs_fct (X, y):
udfs = UDFS(nFeatures)
idxList = udfs.feature_ranking(X)
scores = [1 if idx in idxList else 0 for idx in range(X.shape[1])]
return np.array(scores)
pipe = SelectKBest(udfs_fct, k = nFeatures)
if method == "Pearson":
from ITMO_FS.filters.univariate import pearson_corr
pipe = SelectKBest(pearson_corr, k = nFeatures)
if method == "Kendall":
from scipy.stats import kendalltau
def kendall_corr_fct (X, y):
scores = [0]*X.shape[1]
for k in range(X.shape[1]):
scores[k] = 1-kendalltau(X[:,k], y)[1]
return np.array(scores)
pipe = SelectKBest(kendall_corr_fct, k = nFeatures)
if method == "Fechner":
from ITMO_FS.filters.univariate import fechner_corr
pipe = SelectKBest(fechner_corr, k = nFeatures)
if method == "Spearman":
from ITMO_FS.filters.univariate import spearman_corr
pipe = SelectKBest(spearman_corr, k = nFeatures)
if method == "Laplacian":
from ITMO_FS.filters.univariate import laplacian_score
def laplacian_score_fct (X, y):
scores = laplacian_score(X,y)
return -scores
pipe = SelectKBest(laplacian_score_fct, k = nFeatures)
if method == "FisherScore":
from ITMO_FS.filters.univariate import f_ratio_measure
pipe = SelectKBest(f_ratio_measure, k = nFeatures)
if method == "Relief":
from extraFeatureSelections import relief_measure
pipe = SelectKBest(relief_measure, k = nFeatures)
if method == "JMI":
from skfeature.function.information_theoretical_based import JMI
def jmi_score (X, y, nFeatures):
sol, _, _ = JMI.jmi (X,y, n_selected_features = nFeatures)
scores = [0]*X.shape[1]
for j,z in enumerate(sol):
scores[z] = (len(sol) - j)/len(sol)
scores = np.asarray(scores, dtype = np.float32)
return scores
jmi_score_fct = partial(jmi_score, nFeatures = nFeatures)
pipe = SelectKBest(jmi_score_fct, k = nFeatures)
if method == "ICAP":
from skfeature.function.information_theoretical_based import ICAP
def icap_score (X, y, nFeatures):
sol, _, _ =ICAP.icap (X,y, n_selected_features = nFeatures)
scores = [0]*X.shape[1]
for j,z in enumerate(sol):
scores[z] = (len(sol) - j)/len(sol)
scores = np.asarray(scores, dtype = np.float32)
return scores
icap_score_fct = partial(icap_score, nFeatures = nFeatures)
pipe = SelectKBest(icap_score_fct, k = nFeatures)
# not exported
if method == "DCSF":
from ITMO_FS.filters.multivariate import DCSF
def dcsf_score_fct (X, y):
selected_features = []
other_features = [i for i in range(0, X.shape[1]) if i not in selected_features]
scores = DCSF(np.array(selected_features), np.array(other_features), X, y)
return scores
pipe = SelectKBest(dcsf_score_fct, k = nFeatures)
if method == "CIFE":
from skfeature.function.information_theoretical_based import CIFE
def cife_score (X, y, nFeatures):
sol, _, _ = CIFE.cife (X,y, n_selected_features = nFeatures)
scores = [0]*X.shape[1]
for j,z in enumerate(sol):
scores[z] = (len(sol) - j)/len(sol)
scores = np.asarray(scores, dtype = np.float32)
return scores
cife_score_fct = partial(cife_score, nFeatures = nFeatures)
pipe = SelectKBest(cife_score_fct, k = nFeatures)
# should be the same as MIM
if method == "MIFS":
from ITMO_FS.filters.multivariate import MIFS
def mifs_score_fct (X, y):
selected_features = []
other_features = [i for i in range(0, X.shape[1]) if i not in selected_features]
scores = MIFS(np.array(selected_features), np.array(other_features), X, y, beta = 0.5)
return scores
pipe = SelectKBest(mifs_score_fct, k = nFeatures)
if method == "CMIM":
from skfeature.function.information_theoretical_based import CMIM
def cmim_score (X, y, nFeatures):
sol, _, _ =CMIM.cmim (X,y, n_selected_features = nFeatures)
scores = [0]*X.shape[1]
for j,z in enumerate(sol):
scores[z] = (len(sol) - j)/len(sol)
scores = np.asarray(scores, dtype = np.float32)
return scores
cmim_score_fct = partial(cmim_score, nFeatures = nFeatures)
pipe = SelectKBest(cmim_score_fct, k = nFeatures)
if method == "MRI":
from ITMO_FS.filters.multivariate import MRI
def mri_score_fct (X, y):
selected_features = []
other_features = [i for i in range(0, X.shape[1]) if i not in selected_features]
scores = MRI(np.array(selected_features), np.array(other_features), X, y)
return scores
pipe = SelectKBest(mri_score_fct, k = nFeatures)
if method == "MRMR":
def mrmr_score (X, y, nFeatures):
Xp = pd.DataFrame(X, columns = range(X.shape[1]))
yp = pd.DataFrame(y, columns=['Target'])
# we need to pre-specify the max solution length...
solutions = mrmr.mrmr_ensemble(features = Xp, targets = yp, solution_length=nFeatures, solution_count=1)
scores = [0]*Xp.shape[1]
for j,z in enumerate(solutions.iloc[0][0]):
scores[z] = (len(solutions.iloc[0][0]) - j)/len(solutions.iloc[0][0])
scores = np.asarray(scores, dtype = np.float32)
return scores
mrmr_score_fct = partial(mrmr_score, nFeatures = nFeatures)
pipe = SelectKBest(mrmr_score_fct, k = nFeatures)
if method == "MRMRe":
def mrmre_score (X, y, nFeatures):
Xp = pd.DataFrame(X, columns = range(X.shape[1]))
yp = pd.DataFrame(y, columns=['Target'])
# we need to pre-specify the max solution length...
solutions = mrmr.mrmr_ensemble(features = Xp, targets = yp, solution_length=nFeatures, solution_count=5)
scores = [0]*Xp.shape[1]
for k in solutions.iloc[0]:
for j, z in enumerate(k):
scores[z] = scores[z] + Xp.shape[1] - j
scores = np.asarray(scores, dtype = np.float32)
scores = scores/np.sum(scores)
return scores
mrmre_score_fct = partial(mrmre_score, nFeatures = nFeatures)
pipe = SelectKBest(mrmre_score_fct, k = nFeatures)
if method == "SVMRFE":
def svmrfe_score_fct (X, y):
svc = LinearSVC (C=1)
rfe = RFECV(estimator=svc, step=0.10, scoring='roc_auc', n_jobs=1)
rfe.fit(X, y)
scores = rfe.ranking_
return scores
pipe = SelectKBest(svmrfe_score_fct, k = nFeatures)
if method == "Boruta":
import boruta
def boruta_fct (X, y):
rfc = RandomForestClassifier(n_jobs=-1, class_weight='balanced_subsample')
b = boruta.BorutaPy (rfc, n_estimators = nFeatures)
b.fit(X, y)
scores = np.max(b.ranking_) - b.ranking_
return scores
pipe = SelectKBest(boruta_fct, k = nFeatures)
if method == "RandomizedLR":
from sklearn.utils import resample
def randlr_fct (X, y):
# only 100 instead of 1000
scores = None
for k in range(25):
boot = resample(range(0,X.shape[0]), replace=True, n_samples=X.shape[0], random_state=k)
model = LogisticRegression(solver = 'lbfgs', random_state = k)
model.fit(X[boot,:], y[boot])
if scores is None:
scores = model.coef_[0]*0
scores = scores + np.abs(model.coef_[0])
return scores
pipe = SelectKBest(randlr_fct, k = nFeatures)
if method == "tScore":
from skfeature.function.statistical_based import t_score
pipe = SelectKBest(t_score.t_score, k = nFeatures)
if method == "Wilcoxon":
from extraFeatureSelections import wilcoxon_score
pipe = SelectKBest(wilcoxon_score, k = nFeatures)
if method == "Variance":
def variance (X, y):
scores = np.var(X, axis = 0)
return scores
pipe = SelectKBest(variance, k = nFeatures)
if method == "TraceRatio":
from skfeature.function.similarity_based import trace_ratio
def trace_ratio_score (X, y, nFeatures):
fidx, fscore, _ = trace_ratio.trace_ratio (X,y, n_selected_features = nFeatures)
scores = [0]*X.shape[1]
for j in range(len(fidx)):
scores[fidx[j]] = fscore[j]
scores = np.asarray(scores, dtype = np.float32)
return scores
trace_ratio_score_fct = partial(trace_ratio_score, nFeatures = nFeatures)
pipe = SelectKBest(trace_ratio_score_fct, k = nFeatures)
if method == "Bhattacharyya":
def bhattacharyya_score_fct (X, y):
import cv2
yn = y/np.sum(y)
yn = np.asarray(yn, dtype = np.float32)
scores = [0]*X.shape[1]
for j in range(X.shape[1]):
xn = (X[:,j] - np.min(X[:,j]))/(np.max(X[:,j] - np.min(X[:,j])))
xn = xn/np.sum(xn)
xn = np.asarray(xn, dtype = np.float32)
scores[j] = cv2.compareHist(xn, yn, cv2.HISTCMP_BHATTACHARYYA)
scores = np.asarray(scores, dtype = np.float32)
return -scores
pipe = SelectKBest(bhattacharyya_score_fct, k = nFeatures)
if method == "None":
def dummy_score (X, y):
scores = np.ones(X.shape[1])
return scores
pipe = SelectKBest(dummy_score, k = 'all')
return pipe
def createClf (cExp):
#print (cExp)
method = cExp[0][0]
if method == "Constant":
model = DummyClassifier()
if method == "SVM":
C = cExp[0][1]["C"]
svc = LinearSVC(C = C)
model = CalibratedClassifierCV(svc)
if method == "RBFSVM":
C = cExp[0][1]["C"]
g = cExp[0][1]["gamma"]
model = SVC(kernel = "rbf", C = C, gamma = g, probability = True)
if method == "LDA":
model = LinearDiscriminantAnalysis(solver = 'lsqr', shrinkage = 'auto')
if method == "QDA":
model = QuadraticDiscriminantAnalysis()
if method == "LogisticRegression":
C = cExp[0][1]["C"]
model = LogisticRegression(solver = 'lbfgs', C = C, random_state = 42)
if method == "RandomForest":
n_estimators = cExp[0][1]["n_estimators"]
model = RandomForestClassifier(n_estimators = n_estimators)
if method == "kNN":
neighbors = cExp[0][1]["N"]
model = KNeighborsClassifier(neighbors)
if method == "XGBoost":
learning_rate = cExp[0][1]["learning_rate"]
n_estimators = cExp[0][1]["n_estimators"]
model = XGBClassifier(learning_rate = learning_rate, n_estimators = n_estimators, n_jobs = 1, use_label_encoder=False, eval_metric = "logloss", random_state = 42)
if method == "XGBoost_GPU":
learning_rate = cExp[0][1]["learning_rate"]
n_estimators = cExp[0][1]["n_estimators"]
model = XGBClassifier(learning_rate = learning_rate, n_estimators = n_estimators, use_label_encoder=False, eval_metric = "logloss", tree_method='gpu_hist', random_state = 42)
if method == "NaiveBayes":
model = GaussianNB()
if method == "NeuralNetwork":
N1 = cExp[0][1]["layer_1"]
N2 = cExp[0][1]["layer_2"]
N3 = cExp[0][1]["layer_3"]
model = MLPClassifier (hidden_layer_sizes=(N1,N2,N3,), random_state=42, max_iter = 1000)
return model
@ignore_warnings(category=ConvergenceWarning)
@ignore_warnings(category=UserWarning)
def executeExperiment (fselExperiments, clfExperiments, data, dataID):
mlflow.set_tracking_uri(TrackingPath)
y = data["Target"]
X = data.drop(["Target"], axis = 1)
X, y = preprocessData (X, y)
# need a fixed set of folds to be comparable
kfolds = RepeatedStratifiedKFold(n_splits = 10, n_repeats = 1, random_state = 42)
# make sure experiment gets selected
raceOK = False
while raceOK == False:
try:
mlflow.set_experiment(dataID)
raceOK = True
except:
time.sleep(0.5)
pass
stats = {}
for i, fExp in enumerate(fselExperiments):
np.random.seed(i)
random.seed(i)
for j, cExp in enumerate(clfExperiments):
timings = {}
foldStats = {}
foldStats["features"] = []
foldStats["params"] = {}
foldStats["params"].update(fExp)
foldStats["params"].update(cExp)
run_name = getRunID (foldStats["params"])
current_experiment = dict(mlflow.get_experiment_by_name(dataID))
experiment_id = current_experiment['experiment_id']
# check if we have that already
# recompute using mlflow did not work, so i do my own.
if len(glob (os.path.join(TrackingPath, str(experiment_id), "*/artifacts/" + run_name + ".ID"))) > 0:
print ("X", end = '', flush = True)
continue
# log what we do next
with open(os.path.join(TrackingPath, "curExperiments.txt"), "a") as f:
f.write("(RUN) " + datetime.now().strftime("%d/%m/%Y %H:%M:%S") + str(fExp) + "+" + str(cExp) + "\n")
expVersion = '_'.join([k for k in foldStats["params"] if "Experiment" not in k])
pID = str(foldStats["params"])
# register run in mlflow now
run_id = getRunID (foldStats["params"])
mlflow.start_run(run_name = run_id, tags = {"Version": expVersion, "pID": pID})
# this is stupid, but well, log a file with name=runid
log_dict(foldStats["params"], run_id+".ID")
for k, (train_index, test_index) in enumerate(kfolds.split(X, y)):
X_train, X_test = X.iloc[train_index].copy(), X.iloc[test_index].copy()
y_train, y_test = y[train_index].copy(), y[test_index].copy()
# log fold index too
log_dict({"Test": test_index.tolist(), "Train": train_index.tolist()}, "CVIndex_"+str(k)+".json")
fselector = createFSel (fExp)
with np.errstate(divide='ignore',invalid='ignore'):
timeFSStart = time.time()
fselector.fit (X_train.copy(), y_train.copy())
timeFSEnd = time.time()
timings["Fsel_Time_Fold_" + str(k)] = timeFSEnd - timeFSStart
feature_idx = fselector.get_support()
selected_feature_names = X_train.columns[feature_idx].copy()
all_feature_names = X_train.columns.copy()
# log also 0-1
fpat = np.zeros(X_train.shape[1])
for j,f in enumerate(feature_idx):
fpat[j] = int(f)
# just once
if k == 0:
log_dict({f:fpat[j] for j, f in enumerate(all_feature_names)}, "FNames_"+str(k)+".json")
log_dict({j:fpat[j] for j, f in enumerate(all_feature_names)}, "FPattern_"+str(k)+".json")
foldStats["features"].append(list([selected_feature_names][0].values))
# apply selector-- now the data is numpy, not pandas, lost its names
X_fs_train = fselector.transform (X_train)
y_fs_train = y_train
X_fs_test = fselector.transform (X_test)
y_fs_test = y_test
# check if we have any features
if X_fs_train.shape[1] > 0:
classifier = createClf (cExp)
timeClfStart = time.time()
classifier.fit (X_fs_train, y_fs_train)
timeClfEnd = time.time()
timings["Clf_Time_Fold_" + str(k)] = timeClfEnd - timeClfStart
y_pred = classifier.predict_proba (X_fs_test)
y_pred = y_pred[:,1]
foldStats["fold_"+str(k)], df, acc = testModel (y_pred, y_fs_test, idx = test_index, fold = k)
else:
# this is some kind of bug. if lasso does not select any feature and we have the constant
# classifier, then we cannot just put a zero there. else we get a different model than
# the constant predictor. we fix this by testing
if cExp[0][0] == "Constant":
print ("F:", fExp, end = '')
classifier = createClf (cExp)
classifier.fit (X_train.iloc[:,0:2], y_train)
y_pred = classifier.predict_proba (X_fs_test)[:,1]
else:
# else we can just take 0 as a prediction
y_pred = y_test*0 + 1
foldStats["fold_"+str(k)], df, acc = testModel (y_pred, y_fs_test, idx = test_index, fold = k)
stats[str(i)+"_"+str(j)] = logMetrics (foldStats)
log_dict(timings, "timings.json")
mlflow.end_run()
with open(os.path.join(TrackingPath, "curExperiments.txt"), "a") as f:
f.write("(DONE)" + datetime.now().strftime("%d/%m/%Y %H:%M:%S") + str(fExp) + "+" + str(cExp) + "\n")
def executeExperiments (z):
fselExperiments, clfExperiments, data, d = z
executeExperiment ([fselExperiments], [clfExperiments], data, d)
if __name__ == "__main__":
print ("Hi.")
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.WARNING)
# load data first
datasets = {}
dList = ["Li2020", "Carvalho2018", "Hosny2018A", "Hosny2018B", "Hosny2018C", "Ramella2018", "Keek2020", "Park2020", "Song2020" , "Toivonen2019"]
for d in dList:
eval (d+"().info()")
datasets[d] = eval (d+"().getData('./data/')")
print ("\tLoaded data with shape", datasets[d].shape)
# avoid race conditions later
try:
mlflow.set_tracking_uri(TrackingPath)
mlflow.create_experiment(d)
mlflow.set_experiment(d)
time.sleep(3)
except:
pass
for d in dList:
print ("\nExecuting", d)
data = datasets[d]
# generate all experiments
fselExperiments = generateAllExperiments (fselParameters)
print ("Created", len(fselExperiments), "feature selection parameter settings")
clfExperiments = generateAllExperiments (clfParameters)
print ("Created", len(clfExperiments), "classifier parameter settings")
print ("Total", len(clfExperiments)*len(fselExperiments), "experiments")
# generate list of experiment combinations
clList = []
for fe in fselExperiments:
for clf in clfExperiments:
clList.append( (fe, clf, data, d))
# execute
ncpus = 16
with parallel_backend("loky", inner_max_num_threads=1):
fv = Parallel (n_jobs = ncpus)(delayed(executeExperiments)(c) for c in clList)
#
| 36.451497
| 182
| 0.610934
| 3,696
| 30,437
| 4.892857
| 0.175595
| 0.021677
| 0.032902
| 0.031851
| 0.347324
| 0.261889
| 0.202555
| 0.17015
| 0.153451
| 0.143884
| 0
| 0.011957
| 0.27736
| 30,437
| 834
| 183
| 36.495204
| 0.81023
| 0.046522
| 0
| 0.195082
| 0
| 0
| 0.063913
| 0.002795
| 0
| 0
| 0
| 0
| 0
| 1
| 0.059016
| false
| 0.003279
| 0.152459
| 0
| 0.267213
| 0.039344
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a01b5b20e16dc59b45be5e462160adb8ae019e0
| 692
|
py
|
Python
|
dm/algorithms/HungarianAlg.py
|
forons/distance-measurement
|
39741aefed0aa2f86e8959338c867398ce6494c7
|
[
"MIT"
] | null | null | null |
dm/algorithms/HungarianAlg.py
|
forons/distance-measurement
|
39741aefed0aa2f86e8959338c867398ce6494c7
|
[
"MIT"
] | null | null | null |
dm/algorithms/HungarianAlg.py
|
forons/distance-measurement
|
39741aefed0aa2f86e8959338c867398ce6494c7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
from scipy import optimize, sparse
from .AbstractDistanceAlg import AbstractDistanceAlg
class HungarianAlg(AbstractDistanceAlg):
def __init__(self, df, size):
super().__init__(df, size)
def compute_matching(self):
distances = self.df.collect()
cost_matrix = sparse.coo_matrix((self.size, self.size), dtype=np.float32)
for row, col, dist in distances:
cost_matrix[row, col] = dist
results = optimize.linear_sum_assignment(cost_matrix)
for row, col in results:
self.matches.append((row, col, cost_matrix[row, col]))
return self.matches
| 31.454545
| 81
| 0.669075
| 87
| 692
| 5.137931
| 0.517241
| 0.067114
| 0.040268
| 0.071588
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007435
| 0.222543
| 692
| 21
| 82
| 32.952381
| 0.82342
| 0.062139
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.133333
| false
| 0
| 0.2
| 0
| 0.466667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a079d600f0144ca6ea7cb473635485bda6d1725
| 2,039
|
py
|
Python
|
python/oneflow/test/modules/test_linspace.py
|
lizhimeng159/oneflow
|
b5f504d7a2185c6d6ac2c97bc5f9a2a3dd78883d
|
[
"Apache-2.0"
] | null | null | null |
python/oneflow/test/modules/test_linspace.py
|
lizhimeng159/oneflow
|
b5f504d7a2185c6d6ac2c97bc5f9a2a3dd78883d
|
[
"Apache-2.0"
] | null | null | null |
python/oneflow/test/modules/test_linspace.py
|
lizhimeng159/oneflow
|
b5f504d7a2185c6d6ac2c97bc5f9a2a3dd78883d
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
from test_util import GenArgList
import oneflow as flow
import oneflow.unittest
from oneflow.test_utils.automated_test_util import *
@flow.unittest.skip_unless_1n1d()
class TestLinspace(flow.unittest.TestCase):
@autotest(n=30, auto_backward=False, rtol=1e-5, atol=1e-5, check_graph=True)
def test_linspace_int_with_random_data(test_case):
start = random().to(int)
end = start + random().to(int)
steps = random(0, end - start).to(int)
x = torch.linspace(start=start, end=end, steps=steps)
device = random_device()
x.to(device)
return x
@autotest(n=30, auto_backward=False, rtol=1e-5, atol=1e-5, check_graph=True)
def test_linspace_float_with_random_data(test_case):
start = random()
end = start + random()
steps = random(0, end - start).to(int)
x = torch.linspace(start=start, end=end, steps=steps)
device = random_device()
x.to(device)
return x
def test_consistent_naive(test_case):
placement = flow.placement("cpu", {0: [0]})
sbp = (flow.sbp.broadcast,)
x = flow.linspace(start=0, end=10, steps=2, placement=placement, sbp=sbp)
test_case.assertEqual(x.sbp, sbp)
test_case.assertEqual(x.placement, placement)
if __name__ == "__main__":
unittest.main()
| 33.983333
| 82
| 0.680726
| 286
| 2,039
| 4.723776
| 0.41958
| 0.044412
| 0.019245
| 0.023686
| 0.339008
| 0.339008
| 0.300518
| 0.251665
| 0.251665
| 0.251665
| 0
| 0.018916
| 0.222168
| 2,039
| 59
| 83
| 34.559322
| 0.832913
| 0.284944
| 0
| 0.352941
| 0
| 0
| 0.007908
| 0
| 0
| 0
| 0
| 0
| 0.058824
| 1
| 0.088235
| false
| 0
| 0.176471
| 0
| 0.352941
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a081670c8619a8dbe9b2b1bb3b4d9935ec6801d
| 1,577
|
py
|
Python
|
alexia/apps/general/templatetags/menuitem.py
|
LaudateCorpus1/alexia-1
|
9c0d3c90c0ffe2237299a561b755b9c17905e354
|
[
"BSD-3-Clause"
] | 8
|
2015-06-29T20:01:22.000Z
|
2020-10-19T13:49:38.000Z
|
alexia/apps/general/templatetags/menuitem.py
|
LaudateCorpus1/alexia-1
|
9c0d3c90c0ffe2237299a561b755b9c17905e354
|
[
"BSD-3-Clause"
] | 67
|
2015-10-05T16:57:14.000Z
|
2022-03-28T19:57:36.000Z
|
alexia/apps/general/templatetags/menuitem.py
|
LaudateCorpus1/alexia-1
|
9c0d3c90c0ffe2237299a561b755b9c17905e354
|
[
"BSD-3-Clause"
] | 6
|
2015-10-05T13:54:34.000Z
|
2021-11-30T05:11:58.000Z
|
import re
from django.template import Library, Node, TemplateSyntaxError
from django.template.base import token_kwargs
from django.urls import Resolver404, resolve
from django.utils.html import format_html
register = Library()
class MenuItemNode(Node):
def __init__(self, nodelist, pattern, kwargs):
self.nodelist = nodelist
self.pattern = pattern
self.kwargs = kwargs
def render(self, context):
pattern = self.pattern.resolve(context)
classes = []
if 'class' in self.kwargs:
classes = self.kwargs['class'].resolve(context).split()
try:
func = resolve(context['request'].path).func
except Resolver404:
return ''
match = func.__module__ + '.' + func.__name__
if re.search(pattern, match):
classes.append('active')
if classes:
open_tag = format_html('<li class="{}">', ' '.join(classes))
else:
open_tag = format_html('<li>')
content = self.nodelist.render(context)
close_tag = format_html('</li>')
return open_tag + content + close_tag
@register.tag
def menuitem(parser, token):
bits = token.split_contents()
if len(bits) < 2:
raise TemplateSyntaxError("'%s' takes at least one argument, a pattern matching a view name." % bits[0])
pattern = parser.compile_filter(bits[1])
kwargs = token_kwargs(bits[2:], parser)
nodelist = parser.parse(('endmenuitem',))
parser.delete_first_token()
return MenuItemNode(nodelist, pattern, kwargs)
| 29.203704
| 112
| 0.637286
| 181
| 1,577
| 5.403315
| 0.40884
| 0.0409
| 0.039877
| 0.046012
| 0.038855
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008418
| 0.246671
| 1,577
| 53
| 113
| 29.754717
| 0.814815
| 0
| 0
| 0
| 0
| 0
| 0.079264
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.075
| false
| 0
| 0.125
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a0830f683c3bcea14ab59eb19f8a4474d9635b6
| 3,984
|
py
|
Python
|
superai/log/logger.py
|
mysuperai/superai-sdk
|
796c411c6ab69209600bf727e8fd08c20f4d67b1
|
[
"Apache-2.0"
] | 1
|
2020-12-03T18:18:16.000Z
|
2020-12-03T18:18:16.000Z
|
superai/log/logger.py
|
mysuperai/superai-sdk
|
796c411c6ab69209600bf727e8fd08c20f4d67b1
|
[
"Apache-2.0"
] | 13
|
2021-02-22T18:27:58.000Z
|
2022-02-10T08:14:10.000Z
|
superai/log/logger.py
|
mysuperai/superai-sdk
|
796c411c6ab69209600bf727e8fd08c20f4d67b1
|
[
"Apache-2.0"
] | 1
|
2021-04-27T12:38:47.000Z
|
2021-04-27T12:38:47.000Z
|
""" Log initializer """
from __future__ import absolute_import, division, print_function, unicode_literals
import itertools
import logging
import sys
import os
from logging.handlers import RotatingFileHandler
from rich.logging import RichHandler
from typing import List
DEBUG = logging.DEBUG
INFO = logging.INFO
ERROR = logging.ERROR
WARNING = logging.WARNING
DEFAULT_LOG_FILENAME = "superai.log"
_log_format = (
"%(asctime)s - %(levelname)s - %(filename)s - %(threadName)s - [%(name)s:%(funcName)s:%(lineno)s] - %(message)s"
)
_rich_log_format = "%(message)s - %(threadName)s"
_date_format = "%Y-%m-%d %H:%M:%S"
_style = "{"
loggers: List[logging.Logger] = []
def create_file_handler(
log_format=_log_format,
log_filename=DEFAULT_LOG_FILENAME,
max_bytes=5000000,
backup_count=25,
):
"""Create rotating file handler"""
formatter = CustomFormatter(fmt=log_format, datefmt=_date_format, style=_style)
handler = RotatingFileHandler(log_filename, maxBytes=max_bytes, backupCount=backup_count)
handler.setFormatter(formatter)
return handler
def create_non_cli_handler(log_format=_log_format, stream=sys.stdout):
"""Create logging to non-CLI console (like ECS)"""
formatter = CustomFormatter(fmt=log_format, datefmt=_date_format)
console_handler = logging.StreamHandler(stream)
console_handler.setFormatter(formatter)
return console_handler
def create_cli_handler():
"""Create logging handler for CLI with rich structured output"""
rich_handler = RichHandler(rich_tracebacks=True)
return rich_handler
def get_logger(name=None, propagate=True):
"""Get logger object"""
logger = logging.getLogger(name)
logger.propagate = propagate
loggers.append(logger)
return logger
def exception(line):
"""Log exception"""
return logging.exception(line)
def debug(line):
"""Log debug"""
return logging.debug(line)
def warn(line):
"""Log warning"""
return logging.warn(line)
def error(line):
"""Log error"""
return logging.error(line)
def info(line):
"""Log info"""
return logging.info(line)
def init(filename=None, console=True, log_level=INFO, log_format=_log_format):
"""Initialize logging setup"""
if not log_format:
log_format = _log_format
log_handlers: List[logging.Handler] = []
if console:
if os.getenv("ECS", False) or os.getenv("JENKINS_URL", False):
log_handlers.append(create_non_cli_handler(log_format=log_format))
else:
# Use Rich for CLI
log_handlers.append(create_cli_handler())
# Set Format to short type for Rich
log_format = _rich_log_format
if filename is not None:
# Alwoys log to file with verbose format
log_handlers.append(create_file_handler(log_format=_log_format, log_filename=filename))
for pair in itertools.product(loggers, log_handlers):
pair[0].addHandler(pair[1])
pair[0].setLevel(log_level)
# Set Logging config based on CLI/Non/CLI Format
logging.basicConfig(format=log_format, level=log_level, handlers=log_handlers)
log = get_logger(__name__)
if log_level > logging.INFO:
log.log(level=log_level, msg=f"super.Ai logger initialized with log_level={log_level}")
return log
class CustomFormatter(logging.Formatter):
"""Custom Formatter does these 2 things:
1. Overrides 'funcName' with the value of 'func_name_override', if it exists.
2. Overrides 'filename' with the value of 'file_name_override', if it exists.
"""
def format(self, record):
if hasattr(record, "func_name_override"):
record.funcName = record.func_name_override
if hasattr(record, "file_name_override"):
record.filename = record.file_name_override
if hasattr(record, "lineno_override"):
record.lineno = record.lineno_override
return super(CustomFormatter, self).format(record)
init()
| 29.511111
| 116
| 0.704317
| 513
| 3,984
| 5.255361
| 0.272904
| 0.066766
| 0.04451
| 0.046736
| 0.148368
| 0.10089
| 0.10089
| 0.10089
| 0.034125
| 0
| 0
| 0.00464
| 0.188504
| 3,984
| 134
| 117
| 29.731343
| 0.829261
| 0.145331
| 0
| 0
| 0
| 0.012048
| 0.085963
| 0.016531
| 0
| 0
| 0
| 0
| 0
| 1
| 0.13253
| false
| 0
| 0.096386
| 0
| 0.373494
| 0.012048
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|