blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4c5d5afbe6cd8443c105488dc6448c1dd69437fc | 758d2bc11c5512332fb18ce5ad46f715e1caf542 | /Source/settings.py | 09c1aa962cc8932092f29ecf5fc08828a182f878 | [
"MIT"
] | permissive | j-t-burns/MI-ASS | b0cd610e79b6f84f84ea0a8ea48304f464847d9a | 9f70a545521a55dc7af2e504617363bf47241646 | refs/heads/master | 2021-01-18T05:29:14.158255 | 2015-10-17T23:58:29 | 2015-10-17T23:58:29 | 35,896,106 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,136 | py | # This is a file with all settings and options that we have for the AnnotateCilitate program
import multiprocessing
Options = {
'Telomeres' : True,
'MDS' : True,
'Pointers' : True,
'TelomereEndLimit' : 100,
'TelomereLength' : 10,
'BlastMaskLowercase' : True,
'RoughBlastTask' : 'megablast',
'RoughBlastWordSize' : 28,
'RoughBlastDust' : False,
'RoughBlastUngapped' : True,
'RoughCoverageLimit' : 5,
'FineBlastTask' : 'blastn-short',
'FineBlastWordSize' : 12,
'FineBlastDust' : False,
'FineBlastUngapped' : True,
'ThreadCount' : 2*multiprocessing.cpu_count(),
'DatabaseUpdate' : True,
'MIC_Coverage_Threshold' : 10,
'TelomericErrorTolerance' : 5,
# Regular expressions below are for Tetrahymena thermophila telomeric sequences
#'Tel_Reg_Exp_5' : "((AA){0,1}(CCCCAA)+(CCCC){0,1})",
#'Tel_Reg_Exp_3' : "((TT){0,1}(GGGGTT)+(GGGG){0,1})"
# Regular expressions below are for Oxytricha trifallax telomeric sequences
'Tel_Reg_Exp_5' : "(A{0,4}(C{4}A{4})+C{0,4})|(C{0,4}(A{4}C{4})+A{0,4})|(A{1,4}C{4}A{1,4})|(C{1,4}A{4}C{1,4})",
'Tel_Reg_Exp_3' : "(T{0,4}(G{4}T{4})+G{0,4})|(G{0,4}(T{4}G{4})+T{0,4})|(T{1,4}G{4}T{1,4})|(G{1,4}T{4}G{1,4})"
}
| [
"denys.kukushkin@gmail.com"
] | denys.kukushkin@gmail.com |
e3cfe05320b107334cd99f3c84823a5075d35ef2 | 469e966aae6a8074c5ecb852afb685ca0b49cbe2 | /jerky_utils.py | ee6964f55b3100780bc8a827d0e22d0339c1282c | [] | no_license | bexcite/behavioral-cloning | 975fa11c4f22cb56f1e2fa823b36673a2f868211 | 495bde8b64ea011d4c4515636749adb1440f62c9 | refs/heads/master | 2021-06-11T23:13:48.773509 | 2016-12-30T21:30:10 | 2016-12-30T21:30:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,887 | py | '''
Helper functions to remove sections from the datasets.
Used for cleaning data from bad driving and narrowing experiments.
'''
import os
import numpy as np
# Mapped Jerky sections that we need to remove - depends on dataset
jerky_sections = {}
jerky_sections['train1-complete'] = [
[0, 80],
[295, 350],
[429, 445],
[540, 550],
[680, 686],
[718, 760],
[773, 778],
[1007, 1013],
[1352, 1370],
[1459, 1474],
[1990, 2001],
[2166, 2172],
[2318, 2322]
]
jerky_sections['train2-complete'] = [
[0, 30],
[300, 320],
[900, 965],
[1546, 1575],
[2020, 2040],
[2170, 2200],
[3470, 3538]
]
jerky_sections['train3-complete'] = [
[157, 191],
[396, 413],
[495, 509],
[702, 706],
[848, 855],
[1199, 1206],
[1262, 1277],
[1304, 1317],
[1417, 1428],
[1465, 1511],
[1578, 1582],
[1661, 1664],
[1893, 1900],
[1939, 1946],
[2026, 2044],
[2084, 2095],
[2327, 2338],
[2509, 2521],
[2610, 2617],
[2693, 2705],
[2754, 2768],
[2807, 2820],
[2886, 2896],
[2932, 2939],
[3130, 3147],
[3252, 3263],
[3311, 3340],
[3393, 3401],
[3432, 3444],
[3473, 3495],
[3536, 3544],
[3770, 3785],
[3790, 3796],
[3854, 3862],
[3957, 3970],
[4033, 4050],
[4212, 4235],
[4272, 4284],
[4365, 4378]
]
jerky_sections['train4-complete'] = [
[0, 79],
[121, 134],
[202, 207],
[234, 246],
[280, 291],
[383, 392],
[412, 416],
[449, 457],
[625, 639],
[721, 727],
[827, 840],
[898, 912],
[952, 966],
[1014, 1029],
[1100, 1113],
[1153, 1161],
[1218, 1235],
[1346, 1363],
[1453, 1468],
[1560, 1585],
[1702, 1721],
[1731, 1757],
[1794, 1817],
[1857, 1872],
[1924, 1936],
[2175, 2183],
[2265, 2277],
[2307, 2330],
[2403, 2411],
[2459, 2477],
[2505, 2525],
[2576, 2600],
[2640, 2656],
[2870, 2885]
]
jerky_sections['train5-complete'] = [
[0, 117],
[123, 128],
[142, 151],
[162, 178],
[223, 247],
[285, 297],
[327, 381]
]
jerky_sections['train6-complete'] = [
[0, 37],
[95, 104],
[141, 152],
[160, 178],
[201, 212],
[251, 261],
[311, 321],
[366, 372],
[396, 400], # 431 - 448
[487, 498],
[546, 555],
[565, 587],
[603, 618],
[678, 691],
[706, 716],
[778, 784],
[820, 829],
[873, 880],
[932, 964],
[1024, 1040],
[1107, 1122],
[1140, 1160],
[1260, 1278],
[1323, 1333],
[1384, 1387],
[1490, 1499],
[1532, 1547],
[1619, 1629],
[1670, 1689],
[1709, 1721],
[1781, 1796],
[1981, 1986],
[2053, 2076],
[2114, 2116],
[2141, 2157],
[2240, 2249],
[2279, 2290],
[2308, 2323],
[2345, 2362],
[2392, 2413],
[2430, 2440],
[2456, 2471],
[2485, 2500],
[2523, 2537],
[2555, 2560],
[2575, 2618],
[2712, 2725],
[2769, 2775],
[2791, 2809],
[2855, 2858],
[2894, 2904],
[2952, 2961],
[2975, 2986],
[3054, 3072],
[3084, 3094],
[3106, 3113],
[3151, 3160],
[3179, 3189],
[3244, 3256],
[3282, 3288],
[3318, 3324],
[3349, 3360]
]
jerky_sections['train7-complete'] = [
[0, 304],
[351, 356],
[392, 398],
[446, 452],
[542, 549],
[565, 580],
[600, 612],
[639, 645],
[669, 676],
[693, 701],
[709, 721],
[733, 738],
[760, 772],
[788, 799],
[808, 816],
[831, 842],
[849, 861],
[866, 874],
[886, 908],
[950, 957],
[994, 999],
[1004, 1049]
]
jerky_sections['train8-complete'] = [
[0, 76],
[94, 116],
[138, 149],
[198, 203],
[299, 310],
[329, 347],
[359, 364],
[388, 395],
[429, 452],
[476, 488],
[512, 540],
[554, 568],
[603, 613],
[633, 646],
[719, 726],
[751, 755],
[781, 800],
[836, 851],
[891, 897],
[923, 927],
[1055, 1068],
[1097, 1104],
[1130, 1136],
[1145, 1154],
[1203, 1211],
[1235, 1252],
[1268, 1280],
[1289, 1302],
[1322, 1337],
[1361, 1375],
[1422, 1432],
[1458, 1465],
[1495, 1504],
[1546, 1554],
[1575, 1584],
[1636, 1640],
[1695, 1711],
[1762, 1787],
[1836, 1845],
[1890, 1900],
[1931, 1963],
[2004, 2033],
[2057, 2080],
[2106, 2125],
[2146, 2157],
[2230, 2244],
[2279, 2289],
[2311, 2331],
[2350, 2362],
[2436, 2458],
[2502, 2519],
[2531, 2596],
[2652, 2665],
[2697, 2709],
[2719, 2728],
[2749, 2760],
[2767, 2772],
[2785, 2795],
[2802, 2811],
[2817, 2823],
[2839, 2850],
[2860, 2865],
[2875, 2888],
[2897, 2906],
[2913, 2921],
[2928, 2935],
[2976, 2981],
[2989, 2996],
[3010, 3013],
[3021, 3023],
[3030, 3033],
[3066, 3077],
[3086, 3097],
[3119, 3126],
[3152, 3161],
[3167, 3170],
[3212, 3224],
[3245, 3253],
[3262, 3270],
[3282, 3295],
[3379, 3390],
[3398, 3407],
[3416, 3430],
[3439, 3452],
[3465, 3480],
[3503, 3516]
]
jerky_sections['train9-complete'] = [
[0, 52],
[64, 72],
[100, 109],
[123, 133],
[160, 173],
[199, 213],
[231, 246],
[264, 289],
[350, 357],
[378, 391],
[421, 432],
[455, 475],
[537, 552],
[607, 620],
[642, 654],
[672, 677],
[689, 705],
[713, 724],
[753, 760],
[773, 780],
[792, 802],
[817, 831],
[868, 881],
[911, 926],
[953, 971],
[994, 1011],
[1024, 1033],
[1050, 1063],
[1076, 1089],
[1096, 1104],
[1111, 1125],
[1132, 1138],
[1172, 1192],
[1214, 1227],
[1282, 1302],
[1346, 1352],
[1376, 1386],
[1420, 1427],
[1451, 1483],
[1491, 1502],
[1544, 1555],
[1566, 1577],
[1636, 1650],
[1668, 1693],
[1708, 1724],
[1736, 1752],
[1780, 1805],
[1846, 1861],
[1928, 1938],
[1955, 1968],
[2039, 2060],
[2068, 2076],
[2142, 2158],
[2196, 2205],
[2232, 2251],
[2283, 2295],
[2326, 2345],
[2395, 2409],
[2430, 2445],
[2462, 2484],
[2498, 2524],
[2541, 2554],
[2560, 2573],
[2599, 2614],
[2625, 2638],
[2651, 2661],
[2689, 2705],
[2721, 2748],
[2766, 2777],
[2819, 2824],
[2885, 2908],
[2946, 2966],
[2999, 3007],
[3043, 3055],
[3072, 3086],
[3117, 3130],
[3140, 3157],
[3169, 3182],
[3206, 3223],
[3241, 3255],
[3288, 3312],
[3327, 3349],
[3361, 3376],
[3416, 3435],
[3455, 3461],
[3488, 3507],
[3576, 3610],
[3653, 3694]
]
jerky_sections['train10-complete'] = [
[0, 61],
[85, 95],
[117, 132],
[151, 175],
[190, 206],
[222, 232],
[270, 297],
[312, 325],
[360, 372],
[380, 383],
[394, 401],
[409, 416],
[442, 458],
[540, 555],
[594, 599],
[609, 617],
[627, 639],
[658, 705]
]
jerky_sections['data'] = [
[2, 29],
[5331, 5337]
]
jerky_sections['corner2'] = [
[0, 511],
[626, 1022]
]
jerky_sections['corner3'] = [
[0, 511],
[742, 773]
]
def remove_jerky_sections(center_data, left_data, right_data, labels_data, dataset_path):
# Idxs to remove from dataset (bad driver:))
dataset_name = os.path.basename(os.path.normpath(dataset_path))
print('dataset_name =', dataset_name)
sections_to_remove = jerky_sections.get(dataset_name, [])
prev_size = len(center_data)
def leave_elements_idx(n, to_remove):
if len(to_remove) == 0: return np.arange(n)
all_list = []
for rm in to_remove:
rm_arr = np.arange(rm[0], rm[1])
all_list.append(rm_arr)
conc = np.concatenate(all_list, axis = 0)
return np.delete(np.arange(n), conc)
leave_idx = leave_elements_idx(len(center_data), sections_to_remove)
center_data_files = np.asarray(center_data)
center_data_files = center_data_files[leave_idx]
center_data_files = center_data_files.tolist()
left_data_files = np.asarray(left_data)
left_data_files = left_data_files[leave_idx]
left_data_files = left_data_files.tolist()
right_data_files = np.asarray(right_data)
right_data_files = right_data_files[leave_idx]
right_data_files = right_data_files.tolist()
labels = labels_data[leave_idx]
new_size = len(center_data_files)
print('Removed %d frames from dataset %s' % (prev_size - new_size, dataset_name))
return center_data_files, left_data_files, right_data_files, labels
| [
"pavel.bashmakov@gmail.com"
] | pavel.bashmakov@gmail.com |
f529b705500ce54a7c0dd741d3ceb9b2a35c1351 | 332cfcd2b68294fbc7e31b59d21810de3fb283c8 | /week0/ProblemF3_Generate file with random integers/solution.py | d03bf3ea9c6b4a09b1b00138385ec06a8d6c6af1 | [] | no_license | fyllmax/Programming101 | 6884b03422ac00710b6e9469ace43738934a5dff | e0d7bc23709bab11fa733a1be09bf2bf7eb9c623 | refs/heads/master | 2020-03-27T04:22:53.852489 | 2014-04-22T12:25:10 | 2014-04-22T12:25:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 471 | py | from sys import argv, exit
from random import randint
def main():
if len(argv) < 3:
exit("Error: Not enough arguments given!")
file_name = argv[1]
numbers = int(argv[2])
text_file = open(file_name, 'w+')
for i in range(numbers):
generated_numbers = (randint(1, numbers))
text_file.write(str(generated_numbers))
text_file.write(' ')
text_file.close()
if __name__ == '__main__':
main()
| [
"andi_77@abv.bg"
] | andi_77@abv.bg |
d0270031a171711e6ed22a2fbe278cad17493cb2 | bcbd4a074b79c747b08c941a1a83c774c1168778 | /src/Models/Auxiliaries/Transitions.py | 42ccd02fb65f33ec3a7feae7a20bde89e573fff2 | [
"MIT"
] | permissive | goncaloasimoes/nps | f8c0eab55e4e089972371b2d42357b0bd9b6e636 | 77f766661229327df16bef1e6813152e02350459 | refs/heads/main | 2023-07-02T09:55:00.296050 | 2021-07-26T23:20:03 | 2021-07-26T23:20:03 | 389,460,857 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,156 | py | class Transitions:
def __init__(self, transitions):
self.pair_transitions = {}
self.self_transitions = {}
for key in transitions.keys():
# Pair Transition
if len(key) == 2:
initiator = key[0]
receiver = key[1]
if initiator in self.pair_transitions:
if receiver in self.pair_transitions[initiator]:
raise Exception("Error when creating transitions: Receiver " + receiver + " for initiator " + initiator + " already defined.")
else:
self.pair_transitions[initiator] = {}
self.pair_transitions[initiator][receiver] = transitions[key]
elif len(key) == 1:
if key in self.self_transitions:
raise Exception("Error when creating transition: There is already a single transition for " + key + " defined.")
self.self_transitions[key] = transitions[key]
def get_all_pair_initiators(self):
return list(self.pair_transitions.keys())
def get_all_self_transitions(self):
return list(self.self_transitions.keys())
def get_receivers_of(self, initiator_state):
return list(self.pair_transitions[initiator_state].keys())
def check_if_state_has_pair_transitions(self, initiator_state):
return initiator_state in self.pair_transitions
def check_if_receiver_available(self, initiator_state, receiver_state):
return receiver_state in self.pair_transitions[initiator_state]
def call_pair_transition_function(self, initiator, initiator_state, receiver, receiver_state, network):
return self.pair_transitions[initiator_state][receiver_state](
initiator,
initiator_state,
receiver,
receiver_state,
network
)
def check_if_self_transition_exists(self, state):
return state in self.self_transitions
def call_self_transition_function(self, node, state, network):
return self.self_transitions[state](node, state, network) | [
"goncalo.a.simoes@tecnico.ulisboa.pt"
] | goncalo.a.simoes@tecnico.ulisboa.pt |
1fbf6e83617d19f5e3ae903b7b3c847b24814920 | aeb3427be6a555e3335f43e54ec5b68497d8f779 | /WindowsForm/visualization/visualization/bin/Debug/DataProcessing.py | 1a0ca700a6092571ffe8d2936fc2d0bcd255df8c | [] | no_license | s410385015/VisualizationTest | 4c78502644a0de994c128e2accf0dacf73fc29fb | aed2b7a023184d2d2bf3d503a8b8d2b672749878 | refs/heads/master | 2020-03-27T10:31:30.589033 | 2018-10-30T13:21:46 | 2018-10-30T13:21:46 | 146,426,081 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,154 | py | import csv
path='D:\Plastics_and_Chemicals_Macro.csv'
def main():
pass
def ReadFromCSV():
f = open(path,'r')
reader=csv.reader(f)
l=next(reader)
d=list()
for row in reader:
d.append(row)
f.close()
return l,d
def HandleData():
label,data=ReadFromCSV()
#delete the first null
del label[0]
#re-concat the label list into string
label_str=""
for l in label:
label_str+=l+","
label_str=label_str[:len(label_str)-1]
#re-concat the first column of the data
#In this case ,the first column is date information
date_info=""
date=[column[0] for column in data]
for d in date:
date_info+=d+","
date_info=date_info[:len(date_info)-1]
#re-concat the data and delete the first column
data_str=""
for d in data:
del d[0]
for d1 in d:
data_str+=d1+","
data_str=data_str[:len(data_str)-1]+'\n'
data_str=data_str[:len(data_str)-1]
print(label_str)
print(date_info)
print(data_str)
#return label_str,date_info,data_str
if __name__=="__main__":
main()
HandleData()
| [
"s410385015@gmail.com"
] | s410385015@gmail.com |
4081fa21664237ee4ce3433c17288f030e44006d | e9cfe55ce06842a27b251c1fb827ed71a7be5c0e | /filterCaptions.py | 96eae61b4ba015423741cad3824bc814175a4469 | [
"MIT"
] | permissive | chcaru/gcnet | eb0c0126e88d0f6b9799449dd34cc9a9dc2cff11 | ecbf0089eb4e53e34483e876bf70076675ba1a3d | refs/heads/master | 2020-06-18T05:03:41.626813 | 2016-11-29T05:18:39 | 2016-11-29T05:18:39 | 74,945,866 | 18 | 3 | null | null | null | null | UTF-8 | Python | false | false | 5,372 | py |
import gc, math, sys, re, os
import numpy as np
from sets import Set
from collections import Counter
# The max resulting size of the vocabulary.
# These are the top N most frequent words
maxVocab = 6000
# % of words in a caption that must be int
# the vocab before it is dropped (below quality threshold)
captionQualityThreshold = 0.9
# The fewest number of words in a caption before it is dropped
minWords = 3
# Determines if the captions will be trimmed
trimMax = True
# if trimMax:
# Captions over this length will be trimmed to it
# else:
# The greatest number of words in a caption before it is dropped
maxTrimWords = 16
# The greatest number of words in a caption before it is dropped
# The idea behind this is that if it's too long then the trimmed
# caption won't have enough information to learn from
maxWords = 22
# Filters out words from captions that are not in the vocabulary
# (this can result in bad grammar / english)
# Generally this is OK if captionQualityThreshold is high enough
removeWordsOutsideVocabFromCaption = True
gifDir = './gifs/'
minNumFrame = 16
# This should be the cleaned text...
# Cleaned text should have a higher quality and reduced complexity
text = open('clean.captions.txt', 'r').read();
# A path to trained word embeddings
preTrainedWordEmbeddingsPath = './data/glove/glove.840B.300d.txt'
# The size of the word embedding vectors
preTrainedWordEmbeddingSize = 300
showDroppedItems = False
lines = text.split('\n')
lines.pop()
captionIds = [line.split('\t')[0] for line in lines]
captions = [line.split('\t')[1] for line in lines]
tokens = ' '.join(captions).split(' ')
uniqueTokens = Counter(tokens)
tokenLookup = { token: index for token, index in zip(sorted(uniqueTokens, key=uniqueTokens.get, reverse=True), xrange(len(uniqueTokens))) }
reverseTokenLookup = { value: key for key, value in tokenLookup.items() }
# reverseTokenLookup[0] = '*'
maxVocab = min(maxVocab, len(reverseTokenLookup))
# This can take up a lot of memory (~40GB) when using glove.840B.300d
# If you have less than 64GB of RAM, then use glove.6B.300d (or smaller)
preTrainedEmbeddingLookup = { word: vector for word, vector in [(l[0], np.asarray(l[1:], dtype='float32')) for l in [ l.split(' ') for l in open(preTrainedWordEmbeddingsPath, 'r').readlines() ]] }
embeddingMatrix = np.zeros((maxVocab + 1, preTrainedWordEmbeddingSize), dtype='float32')
vocabFile = open('vocab.' + str(maxVocab) + '.txt', 'w')
for i in range(maxVocab):
vocabFile.write(reverseTokenLookup[i] + ' ' + str(uniqueTokens[reverseTokenLookup[i]]) + '\n')
word = reverseTokenLookup[i] if reverseTokenLookup[i] != 'N' else 'number'
wordVector = preTrainedEmbeddingLookup.get(word)
if wordVector is not None:
embeddingMatrix[i+1] = wordVector
else:
print reverseTokenLookup[i] + ' was not found in pre trained word embeddings'
vocabFile.close()
np.save('./embeddingMatrix.' + str(maxVocab) + '.npy', embeddingMatrix)
def wordToIndex(w):
i = tokenLookup[w]
return i + 1 if i + 1 < maxVocab else 0
def indexToWord(i):
return reverseTokenLookup[i-1] if i > 0 else '*'
def quality(indices):
return reduce((lambda x,y: x+y), map(lambda i: 1.0 if i > 0 else 0.0, indices)) / len(indices)
filtedCaptionsFile = open('filtered.captions.' + str(maxVocab) + '.txt', 'w')
encodedCaptions = np.zeros((len(captions), 1 + maxTrimWords), dtype='int32')
numKept = 0
tQuality = 0.0
ttQuality = 0.0
tLength = 0.0
ttLength = 0.0
for i in range(len(captions)):
line = captions[i]
words = line.split(' ')
indices = map(wordToIndex, words)
q = quality(indices)
ttQuality += q
ttLength += len(words)
if not os.path.isdir(gifDir + str(i)):
if showDroppedItems: print 'Dropping (no gif): ' + str(i) + ' ' + line
continue;
if len(os.listdir(gifDir + str(i))) < minNumFrame:
if showDroppedItems: print 'Dropping (too few frames): ' + str(i) + ' ' + line
continue;
if len(words) < minWords:
if showDroppedItems: print 'Dropping (too small): ' + str(i) + ' ' + line
continue
if len(words) > maxTrimWords:
if not trimMax or len(words) > maxWords:
if showDroppedItems: print 'Dropping (too big): ' + str(i) + ' ' + line
continue
else:
words = words[:maxTrimWords]
indices = map(wordToIndex, words)
q = quality(indices)
if q < captionQualityThreshold:
if showDroppedItems: print 'Dropping (low quality): ' + str(i) + ' ' + line
continue
if removeWordsOutsideVocabFromCaption:
indices = filter(lambda x: x > 0, indices)
tQuality += q
tLength += len(indices)
encodedCaptions[numKept][0] = int(captionIds[i])
encodedCaptions[numKept][1:len(indices)+1] = indices
filtedCaptionsFile.write(str(captionIds[i]) + ' ' + (' '.join(map(indexToWord, indices))) + '\n')
numKept += 1
filtedCaptionsFile.close()
encodedCaptions = encodedCaptions[:numKept]
np.save('dataY.captions.' + str(maxTrimWords) + '.npy', encodedCaptions)
tQuality /= numKept
tLength /= numKept
ttQuality /= len(captions)
ttLength /= len(captions)
print 'Captions kept: ' + str(numKept) + ' / ' + str(len(captions))
print 'Average quality: ' + str(tQuality) + ' / ' + str(ttQuality)
print 'Average length: ' + str(tLength) + ' / ' + str(ttLength) | [
"Chris Caruso"
] | Chris Caruso |
9aaf935c5e687889d66a3560dfbfc3d661402d47 | 61223ebc4266db257480602b707195949607264b | /function.py | 6bdbec4c26c4cf849b8df909bb7d62b5b0ca92fe | [
"MIT"
] | permissive | tqtrunghnvn/FixMatch-pytorch | 311b930091d6197943cf17690dc99e0355114a11 | b2a40c4dc1547ae5cf4b7fe28220f8b43d7d8363 | refs/heads/master | 2022-10-07T22:01:16.316743 | 2020-06-08T05:48:41 | 2020-06-08T05:48:41 | 264,972,120 | 1 | 0 | MIT | 2020-05-18T14:54:50 | 2020-05-18T14:54:49 | null | UTF-8 | Python | false | false | 3,008 | py | import os
import random
import numpy as np
import torch
import logging
def cdist(x, y):
'''
x, y: Tensor
'''
return torch.sqrt(torch.sum((x-y)**2))
def cdists_slow(batch):
'''
batch: the size of (N, M)
N: number of images
M: number of classes
'''
N, M = batch.size()[0], batch.size()[1]
dists = torch.zeros(N, N)
for i in range(N):
for j in range(N):
dists[i,j] = cdist(batch[i], batch[j])
return dists
def cdists_old(batch): # fast
'''
batch: the size of (N, M)
N: number of images
M: number of classes
'''
diff = torch.unsqueeze(batch, 0) - torch.unsqueeze(batch, 1)
return torch.sqrt(torch.sum(diff*diff, axis=-1))
def cdists(batch): # fast --> solve the problem of gradient of sqrt becomes NaN when meeting 0 value.
'''
batch: the size of (N, M)
N: number of images
M: number of classes
'''
diff = torch.unsqueeze(batch, 0) - torch.unsqueeze(batch, 1)
diff_2 = torch.sum(diff*diff, axis=-1)
# itself = torch.eye(diff_2.size(0), dtype=torch.bool)
# diff_2[itself] = 1.0
# return torch.sqrt(diff_2)
return diff_2
def batchhard(batch, idens, margin=0.1):
# soft-margin
dists = cdists(batch)
same_iden_ = (torch.unsqueeze(idens,0) == torch.unsqueeze(idens,1))
other_iden = ~same_iden_
itself = ~torch.eye(same_iden_.size(0), dtype=torch.bool).cuda()
same_iden = same_iden_ & itself
infs = torch.ones_like(dists)*torch.Tensor([float('inf')]).cuda()
dists_pos = torch.where(same_iden, dists, -infs)
pos = torch.max(dists_pos, axis=1).values
dists_neg = torch.where(other_iden, dists, infs)
neg = torch.min(dists_neg, axis=1).values
diff = (pos + margin) - neg
diff = torch.log(torch.exp(diff)+1)
return torch.mean(diff)
def batchhard2(batch, idens, margin=0.1):
# use relu
dists = cdists(batch)
same_iden_ = (torch.unsqueeze(idens,0) == torch.unsqueeze(idens,1))
other_iden = ~same_iden_
itself = ~torch.eye(same_iden_.size(0), dtype=torch.bool).cuda()
same_iden = same_iden_ & itself
infs = torch.ones_like(dists)*torch.Tensor([float('inf')]).cuda()
dists_pos = torch.where(same_iden, dists, -infs)
pos = torch.max(dists_pos, axis=1).values
dists_neg = torch.where(other_iden, dists, infs)
neg = torch.min(dists_neg, axis=1).values
diff = (pos + margin) - neg
diff = torch.nn.functional.relu(diff)
return torch.mean(diff)
def create_logger(out_dir, name, time_str):
log_file = '{}_{}.log'.format(name, time_str)
final_log_file = os.path.join(out_dir, log_file)
head = '%(asctime)-15s %(message)s'
logging.basicConfig(filename=str(final_log_file),
format=head)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
console = logging.StreamHandler()
logging.getLogger('').addHandler(console)
return logger
| [
"tranquangtrunghnvn@gmail.com"
] | tranquangtrunghnvn@gmail.com |
c94bf0cccffed7e38591c260e94b27e2d3dfa261 | 9a6f1044fe904311c3aa601013324123904a152d | /ProjetoLojaVirtual/inventario/models.py | f937cdf7ff7dc9a091745a708b715ada965df86b | [] | no_license | ldmaia/ProjetoLojaVirtual | 4a6b3f10e5e36dc431579a9aa75ee77b6fe7d573 | 07c5f80dc1d79e6a33b349773dcd013b6468b898 | refs/heads/master | 2022-12-10T17:20:15.805953 | 2019-04-04T06:52:01 | 2019-04-04T06:52:01 | 179,391,866 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,436 | py | from django.db import models
from django.urls import reverse
# Create your models here.
class Inventory(models.Model):
name = models.CharField('Nome', max_length=100)
slug = models.SlugField('Identificador', max_length=100)
created = models.DateTimeField('Criado em', auto_now_add=True)
modified = models.DateTimeField('Modificado em', auto_now=True)
class Meta:
verbose_name = 'Inventory'
verbose_name_plural = 'Inventory'
ordering = ['name']
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('inventario:inventory', kwargs={'slug': self.slug})
class Item(models.Model):
name = models.CharField('Nome', max_length=100)
slug = models.SlugField('Identificador', max_length=100)
inventory = models.ForeignKey('inventario.Inventory',on_delete=models.DO_NOTHING,verbose_name='Inventory')
description = models.TextField('Descrição', blank=True)
price = models.DecimalField('Preço', decimal_places=2, max_digits=8)
created = models.DateTimeField('Criado em', auto_now_add=True)
modified = models.DateTimeField('Modificado em', auto_now=True)
class Meta:
verbose_name = 'Item'
verbose_name_plural = 'Items'
ordering = ['name']
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('inventario:item', kwargs={'slug': self.slug}) | [
"ldmaia92@hotmail.com"
] | ldmaia92@hotmail.com |
96393a8b5f0fc74faa8c1fb00aaba8a7db75777c | e301a52127959e86cada66a1808cb7eb3aea72bd | /solutions/FrogJmp.py | ec9a8555ca90b7e61b9441565020b0d873d7b12f | [
"Apache-2.0"
] | permissive | PierreSp/Codility_Python_Solutions | 59afbcdc596a6543d304cdd5a3e01b10e3cd86b9 | 471fadcdf7087be66b720b4a775fdc8c630d473f | refs/heads/master | 2020-06-03T23:05:13.749053 | 2019-06-14T13:33:32 | 2019-06-14T13:33:32 | 191,768,481 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 511 | py | def solution(X, Y, D):
"""Calculates minimum amount of jumps from X to Y with jumps of length D
:param X: Start position (int)
:param Y: Target position (int)
:param D: Jump length (int)
:returns: Min number of jumps
:rtype: Integer
"""
# write your code in Python 3.6
distance = Y - X
modulo = divmod(distance, D)
if modulo[1] == 0:
jumps = modulo[0]
else:
# If there is a remainder add one jump
jumps = modulo[0] + 1
return jumps
| [
"pierre.springer@tum.de"
] | pierre.springer@tum.de |
6e3877952188cded94c414eb37f6d19ebeb95534 | 5462142b5e72cb39bea5b802dd46f55357c4ea84 | /test_pic/vmgirls/dl_vmgirls_pic.py | 499ac3f8cef5a386fe97d91b59fd55f04f358de3 | [] | no_license | qqmadeinchina/myhomeocde | a0996ba195020da9af32613d6d2822b049e515a0 | 291a30fac236feb75b47610c4d554392d7b30139 | refs/heads/master | 2023-03-23T05:28:53.076041 | 2020-08-24T08:39:00 | 2020-08-24T08:39:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,874 | py | # -*- coding: utf-8 -*-
# @time :2020/8/17 9:56
# @Author:老萝卜
# @file:dl_vmgirls_pic
# @Software:%{PRODUICT_NAME}
'''
爬取https://www.vmgirls.com/所有图片
'''
import time
import requests
from lxml import etree
import os
import json
basepath_picsave="e:\\temp\\pythontest\\vmgirls\\"
headers={
"user-agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.125 Safari/537.36"
}
sysdatetime=time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
sysdate=time.strftime('%Y-%m-%d',time.localtime(time.time()))
systime=time.strftime('%H:%M:%S',time.localtime(time.time()))
sysdatetime_compact==time.strftime('%Y%m%d%H%M%S',time.localtime(time.time()))
# 保存文本内容
def save_html(content,path,oprtye="a",encode="utf-8"):
with open(path,oprtye,encoding=encode) as file:
file.write(content)
# 第一步,请求网络 - 获取网络返回的数据
def get_page(url,encode="utf-8"):
html = requests.get(url,headers=headers).content.decode(encode) # 需要打开网站的编码格式,把拿到的数据进行解码,否m则出现乱码
return html
# 解析数据首页
def xpath_toppage(response):
pageslist=[]
html=etree.HTML(response)
# a_list=html.xpath("/a")
# # 将<a></a>信息保存
# temp_list=[]
# for item in a_list:
# str0=etree.tostring(item,encoding="utf-8").decode("utf-8")
# temp_list.append(str0)
# temp_str="\n".join(temp_list)
# save_html(temp_str,"page_a_content.txt","w")
urllist=html.xpath("//a[@class='media-content']/@href")
for url in urllist:
newurl = "https://www.vmgirls.com/" + url
if newurl not in pageslist:
pageslist.append(newurl)
return pageslist
# 创建目录
def createdir(dir_name):
if not os.path.exists(dir_name):
os.mkdir(dir_name)
# 解析每个人的页面
def xpath_pages(response):
pagelist = []
html = etree.HTML(response)
title=html.xpath("//h1[@class='post-title h3']/text()")[0]
author=html.xpath("//a[@class='author-popup']/text()")
# urllist=html.xpath("//a[class='nc-light-gallery-item']/@href")
urllist=html.xpath(f"//a[@title='{title}']/@href")
# print("author=",author)
# print("urllist=",urllist)
savepath=basepath_picsave+title+"\\"
createdir(savepath)
return (savepath,urllist)
def savepic(filepath,url):
req = requests.get(url,headers=headers)
with open(filepath, "wb") as file:
file.write(req.content)
def savejson(data,filepath,oprtype="a",encode="utf-8"):
with open(filepath,oprtype,encoding=encode) as fjson:
json.dump(data,fjson,)
def main():
url="https://www.vmgirls.com/"
response=get_page(url)
save_html(response,f".\\www.vmgirls.com.{sysdate}.html","w")
if response=="":
print("网页打开失败")
return
pageslist=xpath_toppage(response)
# print("pageslist=",pageslist)
picurllist=[]
for picsurl in pageslist:
resp = get_page(picsurl)
save_html(resp,"1.html","w")
picpath,urllist=xpath_pages(resp)
# print("urllist=",urllist)
for picurl in urllist:
filename=picpath+picurl.split("/")[-1]
picurl1="https://www.vmgirls.com/"+picurl
picurllist.append((filename,picurl1))
# print("picurllist=", picurllist)
# print("(filename,picurl1)=",filename,picurl1)
# print("picurllist=",picurllist)
# temp_str="\n".join(picurllist)
# save_html(temp_str,"urllist","w")
savejson(picurllist,f"picurllist_{sysdatetime_compact}.json","w")
# with open("picurllist.json","r") as fjson:
# data=json.load(fjson)
# print("data=",data)
for filepath,pic_url in picurllist:
savepic(filepath,pic_url)
if __name__=="__main__":
main() | [
"newwxm@126.com"
] | newwxm@126.com |
8114f87ea4d123ce369f1ad9b8352b6eaf865dbf | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03208/s325634445.py | 323b17ee4ad8a29bda1ed175bcbbe1278f12231d | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 240 | py | def resolve():
n, k = map(int, input().split())
H_sort = list(sorted([int(input()) for _ in range(n)],reverse=True))
ans = 10**9
for i in range(n-k+1):
ans = min(ans, H_sort[i]-H_sort[i+k-1])
print(ans)
resolve() | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
64a803604b6d762457fbc84462c8185a3f0425aa | 7a42d40a351824464a3c78dc0c3e78bbd8e0a92f | /bigdog_blog/manage.py | f00bf03a35f34d705c8b341ce90fdc096c01ada7 | [] | no_license | AhMay/DerekBlogLearn | 6595063eafbc237b932e187b5cb3ad8ff32637fc | fdd5ea2fc5732cdc82ad006f7be0a2a1f30d0ba9 | refs/heads/master | 2020-07-09T05:20:33.283672 | 2019-09-29T10:10:23 | 2019-09-29T10:10:23 | 203,891,215 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 631 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'bigdog_blog.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"meizi111082@hotmail.com"
] | meizi111082@hotmail.com |
e1082ff198d8db3adc6e0116f59b00b93f5df628 | c757d89396a933f057b2bf3a3066797be0e1ff9e | /programa/programa/wsgi.py | 9d92066ebfee985a1fe4b50c0ec58f157ec8453b | [] | no_license | brian0496/DJANGO | 968136d776de4897f8854794fc4ecb993a45b048 | d796963d41f4e1e2c84cb7273a47d55fb65cd9ec | refs/heads/main | 2023-07-15T19:22:41.111750 | 2021-08-02T19:59:06 | 2021-08-02T19:59:06 | 392,070,396 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | """
WSGI config for programa project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'programa.settings')
application = get_wsgi_application()
| [
"83350243+brian0496@users.noreply.github.com"
] | 83350243+brian0496@users.noreply.github.com |
ed8e36a1482266337850c9cd2a65b88a5bdb33e3 | 793cb34cbaf74f48fb495125abc5af1d588bbe51 | /examples/mujoco/tools.py | 19943882e21c5aee53c28ff57b84119825a5a60d | [
"MIT"
] | permissive | thu-ml/tianshou | 33cc2a2590309beef2815e812df80bd8fc40bbb1 | 600f4bbd555a1995ad5e2dc9ce74304de0941fcb | refs/heads/master | 2023-09-04T11:19:10.017664 | 2023-08-25T21:40:56 | 2023-08-25T21:40:56 | 129,815,042 | 6,964 | 1,154 | MIT | 2023-09-05T21:34:24 | 2018-04-16T22:47:38 | Python | UTF-8 | Python | false | false | 4,560 | py | #!/usr/bin/env python3
import argparse
import csv
import os
import re
from collections import defaultdict
import numpy as np
import tqdm
from tensorboard.backend.event_processing import event_accumulator
def find_all_files(root_dir, pattern):
"""Find all files under root_dir according to relative pattern."""
file_list = []
for dirname, _, files in os.walk(root_dir):
for f in files:
absolute_path = os.path.join(dirname, f)
if re.match(pattern, absolute_path):
file_list.append(absolute_path)
return file_list
def group_files(file_list, pattern):
res = defaultdict(list)
for f in file_list:
match = re.search(pattern, f)
key = match.group() if match else ""
res[key].append(f)
return res
def csv2numpy(csv_file):
csv_dict = defaultdict(list)
with open(csv_file) as f:
for row in csv.DictReader(f):
for k, v in row.items():
csv_dict[k].append(eval(v))
return {k: np.array(v) for k, v in csv_dict.items()}
def convert_tfevents_to_csv(root_dir, refresh=False):
"""Recursively convert test/reward from all tfevent file under root_dir to csv.
This function assumes that there is at most one tfevents file in each directory
and will add suffix to that directory.
:param bool refresh: re-create csv file under any condition.
"""
tfevent_files = find_all_files(root_dir, re.compile(r"^.*tfevents.*$"))
print(f"Converting {len(tfevent_files)} tfevents files under {root_dir} ...")
result = {}
with tqdm.tqdm(tfevent_files) as t:
for tfevent_file in t:
t.set_postfix(file=tfevent_file)
output_file = os.path.join(os.path.split(tfevent_file)[0], "test_reward.csv")
if os.path.exists(output_file) and not refresh:
with open(output_file) as f:
content = list(csv.reader(f))
if content[0] == ["env_step", "reward", "time"]:
for i in range(1, len(content)):
content[i] = list(map(eval, content[i]))
result[output_file] = content
continue
ea = event_accumulator.EventAccumulator(tfevent_file)
ea.Reload()
initial_time = ea._first_event_timestamp
content = [["env_step", "reward", "time"]]
for test_reward in ea.scalars.Items("test/reward"):
content.append(
[
round(test_reward.step, 4),
round(test_reward.value, 4),
round(test_reward.wall_time - initial_time, 4),
],
)
with open(output_file, "w") as f:
csv.writer(f).writerows(content)
result[output_file] = content
return result
def merge_csv(csv_files, root_dir, remove_zero=False):
"""Merge result in csv_files into a single csv file."""
assert len(csv_files) > 0
if remove_zero:
for v in csv_files.values():
if v[1][0] == 0:
v.pop(1)
sorted_keys = sorted(csv_files.keys())
sorted_values = [csv_files[k][1:] for k in sorted_keys]
content = [
[
"env_step",
"reward",
"reward:shaded",
*["reward:" + os.path.relpath(f, root_dir) for f in sorted_keys],
],
]
for rows in zip(*sorted_values):
array = np.array(rows)
assert len(set(array[:, 0])) == 1, (set(array[:, 0]), array[:, 0])
line = [rows[0][0], round(array[:, 1].mean(), 4), round(array[:, 1].std(), 4)]
line += array[:, 1].tolist()
content.append(line)
output_path = os.path.join(root_dir, f"test_reward_{len(csv_files)}seeds.csv")
print(f"Output merged csv file to {output_path} with {len(content[1:])} lines.")
with open(output_path, "w") as f:
csv.writer(f).writerows(content)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--refresh",
action="store_true",
help="Re-generate all csv files instead of using existing one.",
)
parser.add_argument(
"--remove-zero",
action="store_true",
help="Remove the data point of env_step == 0.",
)
parser.add_argument("--root-dir", type=str)
args = parser.parse_args()
csv_files = convert_tfevents_to_csv(args.root_dir, args.refresh)
merge_csv(csv_files, args.root_dir, args.remove_zero)
| [
"noreply@github.com"
] | thu-ml.noreply@github.com |
c400a4fb080e92bec33828e9cc29884d4747c74c | f7e6e874c979c18d049d73683a185d52194cd937 | /sources/implementations/units/animals/AnimalsLair.py | 153678b5b354daf288044f67170be7b122ccceee | [] | no_license | YouRockGarnov/AnotherMirror | fb4f9d9cb965e2770a619f6261b6a4c63a6279b8 | bf67c965cfee438547b9a188a425ea7965b2379c | refs/heads/master | 2020-03-08T08:49:28.899608 | 2018-04-04T08:25:21 | 2018-04-04T08:25:21 | 128,031,494 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 498 | py | from implementations.units.animals.AnimalsBreed import AnimalsBreed
from implementations.units.animals.Animal import Animal
from enum import Enum
class AnimalsLair: # factory
class STRENGTH(Enum):
BABY_STRENGTH = 0.8
ADULT_STRENGTH = 1
LEADER_STRENGTH = 1.2
def __init__(self, breed: AnimalsBreed):
self._breed = breed
def create_animal(self, strength: STRENGTH):
self._breed._set_strength(strength.value)
return Animal(self._breed)
| [
"hamta@yandex.ru"
] | hamta@yandex.ru |
cd2e8bb05fa338f68a4a77971b6ff1cbe8948808 | fb0e552591c2fb82a0bc589c887dde601eea336c | /djangoproject/coreapp/decorators.py | b55dfda987966a4d0fec447c445914fe0ab5a8a0 | [] | no_license | bsadd/meme-maker | be1cc2a5e8a084713b36b1056e18d7030f91bcc0 | 06279196a47242015d85d136c2ba4131a785e821 | refs/heads/master | 2023-01-20T13:24:38.019393 | 2020-09-03T09:17:54 | 2020-09-03T09:17:54 | 263,107,897 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 622 | py | from functools import wraps
from django.core.exceptions import PermissionDenied
def ajax_login_required(view):
@wraps(view)
def wrapper(request, *args, **kwargs):
if not request.user.is_authenticated:
raise PermissionDenied
return view(request, *args, **kwargs)
return wrapper
def moderator_login_required(view):
@wraps(view)
def wrapper(request, *args, **kwargs):
if request.user.is_authenticated and (request.user.is_moderator or request.user.is_superuser):
return view(request, *args, **kwargs)
raise PermissionDenied
return wrapper
| [
"subangkar.karmaker@gmail.com"
] | subangkar.karmaker@gmail.com |
6f596f42acf015533f00de76f644cd10748b6d87 | 19ed452b9b734b0988cd5bcb949e965b332451d4 | /klaytn-etl/klaytnetl/json_rpc_requests.py | 8b855f35a94df1c958dd992b5f5a191d535311dc | [
"Apache-2.0"
] | permissive | jisunglim/docker-airflow | e1c6c82b293127097057e0f113beac8103f525c6 | 5ddb85c2129eb5533036f0dc6fcf665498a005ee | refs/heads/master | 2021-03-14T08:02:37.285199 | 2020-04-10T04:22:21 | 2020-04-10T04:22:21 | 246,752,319 | 0 | 0 | null | 2020-03-12T05:36:26 | 2020-03-12T05:36:25 | null | UTF-8 | Python | false | false | 2,904 | py | # MIT License
#
# Copyright (c) 2018 Evgeny Medvedev, evge.medvedev@gmail.com
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
def generate_get_block_by_number_json_rpc(block_numbers, include_transactions):
for idx, block_number in enumerate(block_numbers):
yield generate_json_rpc(
method='klay_getBlockByNumber',
params=[hex(block_number), include_transactions],
request_id=idx)
def generate_get_block_with_receipt_by_number_json_rpc(block_numbers):
for idx, block_number in enumerate(block_numbers):
yield generate_json_rpc(
method='klay_getBlockWithConsensusInfoByNumber',
params=[hex(block_number)],
request_id=idx)
def generate_trace_block_by_number_json_rpc(block_numbers):
for block_number in block_numbers:
yield generate_json_rpc(
method='debug_traceBlockByNumber',
params=[hex(block_number), {'tracer': 'callTracer'}],
# save block_number in request ID, so later we can identify block number in response
request_id=block_number,
)
def generate_get_receipt_json_rpc(transaction_hashes):
for idx, transaction_hash in enumerate(transaction_hashes):
yield generate_json_rpc(
method='klay_getTransactionReceipt',
params=[transaction_hash],
request_id=idx
)
def generate_get_code_json_rpc(contract_addresses, block='latest'):
for idx, contract_address in enumerate(contract_addresses):
yield generate_json_rpc(
method='klay_getCode',
params=[contract_address, hex(block) if isinstance(block, int) else block],
request_id=idx
)
def generate_json_rpc(method, params, request_id=1):
return {
'jsonrpc': '2.0',
'method': method,
'params': params,
'id': request_id,
}
| [
"jensen.yap@groundx.xyz"
] | jensen.yap@groundx.xyz |
b414054bd417e686ac8b5754f00297d08c5689c5 | 2a82b992b4f4399c6d41a8dd80b6a090edb5ec74 | /restAPI.py | 1a09b60e6887830e13020f2bc00ed83c9a75302e | [] | no_license | JasonRectorTech/Real_Estate_Python | 392bb45fae0cb4631b585a3752c05ec1b1715d0f | 35ecd6ece8278b3e517521deff7070066b3ac363 | refs/heads/master | 2020-04-10T11:28:54.468156 | 2019-01-02T19:01:33 | 2019-01-02T19:01:33 | 160,994,856 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,093 | py | from flask import Flask, jsonify, request
import credentials
import mysql.connector
import traceback
import sqlQueries
#init Flask application
application = Flask(__name__)
#handling cors
@application.after_request
def after_request(response):
response.headers.add('Access-Control-Allow-Origin', 'http://real-estate-maps.s3-website.us-east-2.amazonaws.com')
response.headers.add('Access-Control-Allow-Headers', '*')
response.headers.add('Access-Control-Allow-Methods', 'GET')
return response
#returns all currently rented properties
@application.route('/getAllRentedProperties', methods=["GET"])
def getAllRentedProperties():
try:
cnx, cursor = connectDB()
#closes connection
cursor.close()
cnx.close()
#gets all property details from the properties table by county
rentedProperties = sqlQueries.getAllRentedProperties(cursor)
except:
errorMessage = str(traceback.format_exc())
print(errorMessage)
response = jsonify(results=rentedProperties)
response.status_code = 200
return response
@application.route('/getAllForSaleProperties', methods=["GET"])
def getAllForSaleProperties():
try:
cnx, cursor = connectDB()
#gets all property details from the properties table by county
forSaleProperties = sqlQueries.getAllForSaleProperties(cursor)
#closes connection
cursor.close()
cnx.close()
except:
errorMessage = str(traceback.format_exc())
print(errorMessage)
response = jsonify(results=forSaleProperties)
response.status_code = 200
return response
#gets properties from filter
@application.route('/getProperties', methods=["GET"])
def getPropertiesByFilter():
rentedProperties = []
#get parameters
#TODO add logic to pass in an array of cities
#city is required
cities = request.args.getlist("cities")
(
minForsalePrice,
maxForsalePrice,
minSqft,
maxSqft,
minPriceSqft,
maxPriceSqft,
isForSale,
isForeclosure,
isPending,
isSold,
isRecentlySold,
isForRent,
isRented,
isNoRentals,
beds,
baths
) = initParams(request)
try:
cnx, cursor = connectDB()
#gets all property details from the properties table by county
rentedProperties = sqlQueries.getPropertiesByFilter(cursor, cities, minForsalePrice, maxForsalePrice, minSqft, maxSqft, minPriceSqft, maxPriceSqft,
isForSale, isForeclosure, isPending, isSold, isRecentlySold,
isForRent, isRented, isNoRentals, beds, baths)
#closes connection
cursor.close()
cnx.close()
except:
errorMessage = str(traceback.format_exc())
print(errorMessage)
response = jsonify(results=rentedProperties)
response.status_code = 200
return response
def initParams(request) :
#if not passed in, defaulting to 0 which is assuming unlimited
minForsalePrice = request.args.get("minForsalePrice", 0.0, type=float)
maxForsalePrice = request.args.get("maxForsalePrice", 0.0, type=float)
minSqft = request.args.get("minSqft", 0, type=float)
maxSqft = request.args.get("maxSqft", 0, type=float)
minPriceSqft = request.args.get("minPriceSqft", 0.0, type=float)
maxPriceSqft = request.args.get("maxPriceSqft", 0.0, type=float)
#for sale is the most common use case, so assuming true if not passed in
isForSale = request.args.get("isForSale", True, type=bool)
isForeclosure = request.args.get("isForeclosure", False, type=bool)
isPending = request.args.get("isPending", False, type=bool)
isSold = request.args.get("isSold", False, type=bool)
isRecentlySold = request.args.get("isRecentlySold", False, type=bool)
isForRent = request.args.get("isForRent", False, type=bool)
isRented = request.args.get("isRented", False, type=bool)
isNoRentals = request.args.get("isNoRentals", True, type=bool)
#strings because of '3plus'
beds = request.args.get("beds", "1+", type=str)
baths = request.args.get("baths", "1+", type=str)
return (
minForsalePrice,
maxForsalePrice,
minSqft,
maxSqft,
minPriceSqft,
maxPriceSqft,
isForSale,
isForeclosure,
isPending,
isSold,
isRecentlySold,
isForRent,
isRented,
isNoRentals,
beds,
baths
)
# sets up the db connection
def connectDB():
env = "dev"
#gets username and password based on current environment
username, password = credentials.getDBCredentials(env)
#gets host based on current environment
host = credentials.getHost(env)
cnx = mysql.connector.connect(user=username, password=password, host=host, database='house_db')
cursor = cnx.cursor()
return cnx, cursor
if __name__ == '__main__':
application.debug = True
application.run() | [
"special43543@gmail.com"
] | special43543@gmail.com |
ec9438306b6d904c30e1b551c9e6b8500cf64de2 | 32dd7d178526cb822a462f0682cce74315430aeb | /eventex/subscriptions/migrations/0004_auto_20170619_1924.py | fa01f454a10a04cf200c2737e2d828dbffb56c9b | [] | no_license | leonardocintra/course_django_eventex | fce8e9b0d5694ca1e23a5c32f4af35ba57773c5e | 36f919d323bccf2d7271dd655268963151153b62 | refs/heads/master | 2023-02-12T19:00:40.725169 | 2021-01-12T04:44:52 | 2021-01-12T04:44:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 970 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-19 19:24
from __future__ import unicode_literals
from django.db import migrations, models
import eventex.subscriptions.validators
class Migration(migrations.Migration):
dependencies = [
('subscriptions', '0003_auto_20170605_2203'),
]
operations = [
migrations.AlterField(
model_name='subscription',
name='cpf',
field=models.CharField(max_length=11, validators=[eventex.subscriptions.validators.validate_cpf], verbose_name='CPF'),
),
migrations.AlterField(
model_name='subscription',
name='email',
field=models.EmailField(blank=True, max_length=254, verbose_name='e-mail'),
),
migrations.AlterField(
model_name='subscription',
name='phone',
field=models.CharField(blank=True, max_length=20, verbose_name='telefone'),
),
]
| [
"toguko@gmail.com"
] | toguko@gmail.com |
4bda416be56a2633614710951ef6f707578f0668 | 8801eec4650286ab80e7fd3d555093352bafb5d5 | /flaskblog/forms.py | 8edef1a2a47982a22bd767c812ad707a67c0c61a | [] | no_license | ibrahimaltay/document-sharing | 9c3b1803fb2de693bd3dffd91f9617736b2ccf02 | 5283f617f80c88f48ac96c1870acae569331b3ee | refs/heads/master | 2022-12-16T20:25:37.871154 | 2020-09-19T22:31:35 | 2020-09-19T22:31:35 | 296,961,591 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,092 | py | from flask_wtf import FlaskForm
from flask_wtf.file import FileField, FileAllowed
from flask_login import current_user
from wtforms import StringField, PasswordField, SubmitField, BooleanField, TextAreaField
from wtforms.validators import DataRequired, Length, Email, EqualTo, ValidationError
from flaskblog.models import User
class RegistrationForm(FlaskForm):
username = StringField('Kullanıcı Adı', validators=[DataRequired(), Length(min=2, max=20)])
email = StringField('Email', validators=[DataRequired(), Email()])
password = PasswordField('Şifre', validators=[DataRequired()])
confirm_password = PasswordField('Şifreyi Onayla', validators=[DataRequired(), EqualTo('password')])
submit = SubmitField('Kayıt Ol')
def validate_username(self, username):
user = User.query.filter_by(username=username.data).first()
if user:
raise ValidationError('Kullanıcı adı daha önce alınmış, başka bir tane deneyin.')
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user:
raise ValidationError('Email kullanımda, başka bir tane deneyin.')
class LoginForm(FlaskForm):
email = StringField('Email', validators=[DataRequired(), Email()])
password = PasswordField('Şifre', validators=[DataRequired()])
remember = BooleanField('Beni Hatırla')
submit = SubmitField('Giriş')
class UpdateAccountForm(FlaskForm):
username = StringField('Kullanıcı Adı', validators=[DataRequired(), Length(min=2, max=20)])
email = StringField('Email', validators=[DataRequired(), Email()])
picture = FileField('Profil Fotoğrafı', validators=[FileAllowed(['jpg','png','jpeg'])])
submit = SubmitField('Kaydet')
def validate_username(self, username):
if username.data != current_user.username:
user = User.query.filter_by(username=username.data).first()
if user:
raise ValidationError('Kullanıcı adı daha önce alınmış, başka bir tane deneyin.')
def validate_email(self, email):
if email.data != current_user.email:
user = User.query.filter_by(email=email.data).first()
if user:
raise ValidationError('Email kullanımda, başka bir tane deneyin.')
class PostForm(FlaskForm):
dosya = FileField('Döküman Yolla', validators = [FileAllowed(["jpg","png","jpeg","docx","pptx","ppt","txt","pdf"])])
title = StringField('Başlık', validators=[DataRequired()])
content = TextAreaField('İçerik', validators=[DataRequired()])
submit = SubmitField('Gönder')
class RequestResetForm(FlaskForm):
email = StringField('Email', validators=[DataRequired(), Email()])
submit = SubmitField('Şifre Sıfırlama İsteği')
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user is None:
raise ValidationError('Böyle bir hesap yok.')
class ResetPasswordForm(FlaskForm):
password = PasswordField('Şifre', validators=[DataRequired()])
confirm_password = PasswordField('Şifreyi Onayla', validators=[DataRequired(), EqualTo('password')])
submit = SubmitField('Şifreyi Sıfırla') | [
"ibrahimaltay152@hotmail.com"
] | ibrahimaltay152@hotmail.com |
4c37cfa9d7f322fb217de9bcf8b4bc5c7a1da3ac | 5e1385521729efb8a5e90af19638dc43c2fadb88 | /day02/p1.py | e471865cd0c47985422758a7e9772cdf8e28b778 | [] | no_license | pwicks86/adventofcode2017 | d8557f1496af0393b58e669f7f3c78a95565e871 | 11b5bd06ed900b857e726649c8ad2b8d619c2172 | refs/heads/master | 2021-08-30T16:17:15.609771 | 2017-12-18T15:59:21 | 2017-12-18T15:59:21 | 112,669,332 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 223 | py | f = open('input.txt')
lines = []
for l in f.readlines():
lines.append(map(int, l.split()))
checksum = []
for l in lines:
lsorted = sorted(l)
checksum.append(abs(lsorted[0] - lsorted[-1]))
print(sum(checksum))
| [
"pwicks86@gmail.com"
] | pwicks86@gmail.com |
a77e73006b4448942af52bf9235df27f8e98a948 | f22d59ae1534838e2706da26214813b7d66ee482 | /poll/polls/migrations/0001_initial.py | 81a23fca941088807f15761c536a0b9b62c6e266 | [] | no_license | s2krish/pollapp | c097e7bc8a53375fe59fc06631c51ada39f3ea0d | 1d3199cadca5278c1535ee9252c79fc915ebc2b0 | refs/heads/master | 2020-03-27T13:01:14.120494 | 2018-08-29T11:19:23 | 2018-08-29T11:19:23 | 146,585,084 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,211 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-08-29 10:34
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text', models.CharField(max_length=200)),
('votes', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='Poll',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question', models.CharField(max_length=200)),
('pub_date', models.DateTimeField(verbose_name='date published')),
],
),
migrations.AddField(
model_name='choice',
name='poll',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='polls.Poll'),
),
]
| [
"krishna.sunuwar@gmail.com"
] | krishna.sunuwar@gmail.com |
18dbce8db5ab987948f6a5c8624c7fca006bf3b9 | 61e3151fe0501f6aa93ef7f3ac9707395da14cd0 | /voc_io.py | d3a210db197372ada4ac857d5a8b2dffa187ebe7 | [] | no_license | mengruxing/voc-tools | 70cf807a9a09464c570c5262e1719acb8b1970fe | 1d5e0334b0f6ce95fd563d30bc04a46aef8c87e8 | refs/heads/master | 2020-06-26T08:27:57.927120 | 2019-07-30T06:14:58 | 2019-07-30T06:14:58 | 199,583,927 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,247 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Author : mrx
@Contact : mengruxing@gmail.com
@Date : 2019/7/21
@Project : work_shop
@File : voc_io.py
@Desc : 生成和读取 voc 格式的 xml 文件
"""
import os
import re
import codecs
import logging
from xml.etree import ElementTree
from xml.etree.ElementTree import Element, SubElement
from lxml import etree
def prettify(elem):
"""
Return a pretty-printed XML string for the Element.
"""
return etree.tostring(
etree.fromstring(ElementTree.tostring(elem, encoding='utf8')),
pretty_print=True,
encoding='utf-8'
).replace(" ".encode(), "\t".encode())
class PascalVoc(object):
def __init__(self):
self.verified = False
self.folder_name = None
self.image_name = None
self.image_path = None
self.image_shape = [0, 0, 0]
self.database = 'Unknown'
self.segmented = False
self.bboxes = {}
self.shapes = []
def get_image_path(self):
return self.image_path if os.path.exists(self.image_path) else os.path.join(self.folder_name, self.image_name)
class PascalVocWriter(object):
def __init__(self, folder_name, filename, img_size, database_src='Unknown', local_img_path=None):
"""
构建PascalVoc文件写入工具
:param folder_name: folder, 图片所在文件夹名称
:param filename: filename, 图片名称
:param img_size: size, 图片大小 (height, width, depth)
:param database_src: source.database, 数据集
:param local_img_path: path, 图片绝对路径
"""
self.folder_name = folder_name
self.filename = filename
self.local_img_path = local_img_path
self.database_src = database_src
self.img_size = img_size
self.box_list = []
self.verified = False
def add_bbox(self, xmin, ymin, xmax, ymax, name, difficult=False):
"""
添加一个框
:param xmin: xmin
:param ymin: ymin
:param xmax: xmax
:param ymax: ymax
:param name: label
:param difficult: 0
"""
self.box_list.append({'xmin': xmin, 'ymin': ymin, 'xmax': xmax, 'ymax': ymax, 'name': name, 'difficult': difficult})
def save_xml(self, target_path=None):
"""
保存 xml 文件
:param target_path:
:return:
"""
if self.filename is None or self.folder_name is None or self.img_size is None:
return None
top = Element('annotation')
if self.verified:
top.set('verified', 'yes')
folder = SubElement(top, 'folder')
folder.text = self.folder_name
filename = SubElement(top, 'filename')
filename.text = self.filename
if self.local_img_path is not None:
local_img_path = SubElement(top, 'path')
local_img_path.text = self.local_img_path
source = SubElement(top, 'source')
database = SubElement(source, 'database')
database.text = self.database_src
size_part = SubElement(top, 'size')
width = SubElement(size_part, 'width')
width.text = str(self.img_size[1])
height = SubElement(size_part, 'height')
height.text = str(self.img_size[0])
depth = SubElement(size_part, 'depth')
depth.text = str(self.img_size[2]) if len(self.img_size) == 3 else '1'
segmented = SubElement(top, 'segmented')
segmented.text = '0'
for each_object in self.box_list:
object_item = SubElement(top, 'object')
name = SubElement(object_item, 'name')
name.text = each_object['name']
pose = SubElement(object_item, 'pose')
pose.text = "Unspecified"
truncated = SubElement(object_item, 'truncated')
if int(float(each_object['ymax'])) == int(float(self.img_size[0])) or (int(float(each_object['ymin'])) == 1):
truncated.text = "1" # max == height or min
elif (int(float(each_object['xmax'])) == int(float(self.img_size[1]))) or (int(float(each_object['xmin'])) == 1):
truncated.text = "1" # max == width or min
else:
truncated.text = "0"
difficult = SubElement(object_item, 'difficult')
difficult.text = str(bool(each_object['difficult']) & 1)
bbox = SubElement(object_item, 'bndbox')
xmin = SubElement(bbox, 'xmin')
xmin.text = str(each_object['xmin'])
ymin = SubElement(bbox, 'ymin')
ymin.text = str(each_object['ymin'])
xmax = SubElement(bbox, 'xmax')
xmax.text = str(each_object['xmax'])
ymax = SubElement(bbox, 'ymax')
ymax.text = str(each_object['ymax'])
if target_path is None:
xml_file_name = re.sub(re.compile(r"\.(jpg|png)$", re.S), ".xml", self.filename)
target_path = os.path.join(self.folder_name, xml_file_name)
if not target_path.endswith('.xml'):
target_path += '.xml'
out_file = codecs.open(target_path, 'w', encoding='utf-8')
out_file.write(prettify(top).decode('utf8'))
out_file.close()
class PascalVocReader(object):
def __init__(self):
"""
构建PascalVoc文件写入工具
"""
self.shapes = []
self.bboxes = {}
self.verified = False
self.folder_name = None
self.image_name = None
self.image_path = None
self.image_shape = (0, 0, 0)
def get_image_path(self):
return self.image_path if os.path.exists(self.image_path) else os.path.join(self.folder_name, self.image_name)
def _add_bbox(self, label, bbox, difficult=False):
"""
添加一个框 (内部调用)
:param label:
:param bbox:
:param difficult:
:return:
"""
xmin = int(float(bbox.find('xmin').text))
ymin = int(float(bbox.find('ymin').text))
xmax = int(float(bbox.find('xmax').text))
ymax = int(float(bbox.find('ymax').text))
try:
self.bboxes[label].append([xmin, ymin, xmax, ymax])
except KeyError:
self.bboxes[label] = [[xmin, ymin, xmax, ymax]]
self.shapes.append((label, [(xmin, ymin), (xmax, ymin), (xmax, ymax), (xmin, ymax)], None, None, difficult))
def parse_xml(self, xml_path):
"""
解析 voc.xml 文件
:param xml_path: voc.xml 文件路径
:return:
"""
assert xml_path.endswith('.xml'), "Unsupported file format"
xml_tree = ElementTree.parse(xml_path, parser=etree.XMLParser(encoding='utf-8')).getroot()
try:
self.verified = xml_tree.attrib['verified'] == 'yes'
except KeyError:
pass
try:
self.folder_name = xml_tree.find('folder').text
except AttributeError:
logging.warning('AttributeError: catch exception while parsing folder.')
try:
self.image_name = xml_tree.find('filename').text
except AttributeError:
logging.warning('AttributeError: catch exception while parsing filename.')
try:
self.image_path = xml_tree.find('path').text
except AttributeError:
logging.warning('AttributeError: catch exception while parsing path.')
try:
size = xml_tree.find('size')
width = int(size.find('width').text)
height = int(size.find('height').text)
depth = int(size.find('depth').text)
except AttributeError:
logging.warning('AttributeError: catch exception while parsing size.')
else:
self.image_shape = (height, width, depth)
for object_iter in xml_tree.findall('object'):
label = object_iter.find('name').text
bbox = object_iter.find('bndbox')
difficult = object_iter.find('difficult')
self._add_bbox(label, bbox, False if difficult is None else bool(int(difficult.text)))
return self
| [
"mengruxing@gmail.com"
] | mengruxing@gmail.com |
a143fc16f6331dc078310d40e66a6ceb4909e318 | acc9d729e0182b17023e9660457eed0e19f4f828 | /test/test_token_resource.py | 6855ae9531f7996b2e7633ed1cc0a9aede6033b2 | [] | no_license | secuwave/nexpose_client | 2f00907ef3ffea33c8e9f5cc2543e708f349de6c | 5ceff219ae03cadb5407dc48d8858ffa56bb3463 | refs/heads/master | 2020-05-22T13:54:22.675479 | 2019-05-13T09:12:09 | 2019-05-13T09:12:09 | 186,369,310 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 48,871 | py | # coding: utf-8
"""
InsightVM API
# Overview This guide documents the InsightVM Application Programming Interface (API) Version 3. This API supports the Representation State Transfer (REST) design pattern. Unless noted otherwise this API accepts and produces the `application/json` media type. This API uses Hypermedia as the Engine of Application State (HATEOAS) and is hypermedia friendly. All API connections must be made to the security console using HTTPS. ## Versioning Versioning is specified in the URL and the base path of this API is: `https://<host>:<port>/api/3/`. ## Specification An <a target=\"_blank\" href=\"https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md\">OpenAPI v2</a> specification (also known as Swagger 2) of this API is available. Tools such as <a target=\"_blank\" href=\"https://github.com/swagger-api/swagger-codegen\">swagger-codegen</a> can be used to generate an API client in the language of your choosing using this specification document. <p class=\"openapi\">Download the specification: <a class=\"openapi-button\" target=\"_blank\" download=\"\" href=\"api.json\"> Download </a></p> ## Authentication Authorization to the API uses HTTP Basic Authorization (see <a target=\"_blank\" href=\"https://www.ietf.org/rfc/rfc2617.txt\">RFC 2617</a> for more information). Requests must supply authorization credentials in the `Authorization` header using a Base64 encoded hash of `\"username:password\"`. <!-- ReDoc-Inject: <security-definitions> --> ### 2FA This API supports two-factor authentication (2FA) by supplying an authentication token in addition to the Basic Authorization. The token is specified using the `Token` request header. To leverage two-factor authentication, this must be enabled on the console and be configured for the account accessing the API. ## Resources ### Naming Resource names represent nouns and identify the entity being manipulated or accessed. All collection resources are pluralized to indicate to the client they are interacting with a collection of multiple resources of the same type. Singular resource names are used when there exists only one resource available to interact with. The following naming conventions are used by this API: | Type | Case | | --------------------------------------------- | ------------------------ | | Resource names | `lower_snake_case` | | Header, body, and query parameters parameters | `camelCase` | | JSON fields and property names | `camelCase` | #### Collections A collection resource is a parent resource for instance resources, but can itself be retrieved and operated on independently. Collection resources use a pluralized resource name. The resource path for collection resources follow the convention: ``` /api/3/{resource_name} ``` #### Instances An instance resource is a \"leaf\" level resource that may be retrieved, optionally nested within a collection resource. Instance resources are usually retrievable with opaque identifiers. The resource path for instance resources follows the convention: ``` /api/3/{resource_name}/{instance_id}... ``` ## Verbs The following HTTP operations are supported throughout this API. The general usage of the operation and both its failure and success status codes are outlined below. | Verb | Usage | Success | Failure | | --------- | ------------------------------------------------------------------------------------- | ----------- | -------------------------------------------------------------- | | `GET` | Used to retrieve a resource by identifier, or a collection of resources by type. | `200` | `400`, `401`, `402`, `404`, `405`, `408`, `410`, `415`, `500` | | `POST` | Creates a resource with an application-specified identifier. | `201` | `400`, `401`, `404`, `405`, `408`, `413`, `415`, `500` | | `POST` | Performs a request to queue an asynchronous job. | `202` | `400`, `401`, `405`, `408`, `410`, `413`, `415`, `500` | | `PUT` | Creates a resource with a client-specified identifier. | `200` | `400`, `401`, `403`, `405`, `408`, `410`, `413`, `415`, `500` | | `PUT` | Performs a full update of a resource with a specified identifier. | `201` | `400`, `401`, `403`, `405`, `408`, `410`, `413`, `415`, `500` | | `DELETE` | Deletes a resource by identifier or an entire collection of resources. | `204` | `400`, `401`, `405`, `408`, `410`, `413`, `415`, `500` | | `OPTIONS` | Requests what operations are available on a resource. | `200` | `401`, `404`, `405`, `408`, `500` | ### Common Operations #### OPTIONS All resources respond to the `OPTIONS` request, which allows discoverability of available operations that are supported. The `OPTIONS` response returns the acceptable HTTP operations on that resource within the `Allow` header. The response is always a `200 OK` status. ### Collection Resources Collection resources can support the `GET`, `POST`, `PUT`, and `DELETE` operations. #### GET The `GET` operation invoked on a collection resource indicates a request to retrieve all, or some, of the entities contained within the collection. This also includes the optional capability to filter or search resources during the request. The response from a collection listing is a paginated document. See [hypermedia links](#section/Overview/Paging) for more information. #### POST The `POST` is a non-idempotent operation that allows for the creation of a new resource when the resource identifier is not provided by the system during the creation operation (i.e. the Security Console generates the identifier). The content of the `POST` request is sent in the request body. The response to a successful `POST` request should be a `201 CREATED` with a valid `Location` header field set to the URI that can be used to access to the newly created resource. The `POST` to a collection resource can also be used to interact with asynchronous resources. In this situation, instead of a `201 CREATED` response, the `202 ACCEPTED` response indicates that processing of the request is not fully complete but has been accepted for future processing. This request will respond similarly with a `Location` header with link to the job-oriented asynchronous resource that was created and/or queued. #### PUT The `PUT` is an idempotent operation that either performs a create with user-supplied identity, or a full replace or update of a resource by a known identifier. The response to a `PUT` operation to create an entity is a `201 Created` with a valid `Location` header field set to the URI that can be used to access to the newly created resource. `PUT` on a collection resource replaces all values in the collection. The typical response to a `PUT` operation that updates an entity is hypermedia links, which may link to related resources caused by the side-effects of the changes performed. #### DELETE The `DELETE` is an idempotent operation that physically deletes a resource, or removes an association between resources. The typical response to a `DELETE` operation is hypermedia links, which may link to related resources caused by the side-effects of the changes performed. ### Instance Resources Instance resources can support the `GET`, `PUT`, `POST`, `PATCH` and `DELETE` operations. #### GET Retrieves the details of a specific resource by its identifier. The details retrieved can be controlled through property selection and property views. The content of the resource is returned within the body of the response in the acceptable media type. #### PUT Allows for and idempotent \"full update\" (complete replacement) on a specific resource. If the resource does not exist, it will be created; if it does exist, it is completely overwritten. Any omitted properties in the request are assumed to be undefined/null. For \"partial updates\" use `POST` or `PATCH` instead. The content of the `PUT` request is sent in the request body. The identifier of the resource is specified within the URL (not the request body). The response to a successful `PUT` request is a `201 CREATED` to represent the created status, with a valid `Location` header field set to the URI that can be used to access to the newly created (or fully replaced) resource. #### POST Performs a non-idempotent creation of a new resource. The `POST` of an instance resource most commonly occurs with the use of nested resources (e.g. searching on a parent collection resource). The response to a `POST` of an instance resource is typically a `200 OK` if the resource is non-persistent, and a `201 CREATED` if there is a resource created/persisted as a result of the operation. This varies by endpoint. #### PATCH The `PATCH` operation is used to perform a partial update of a resource. `PATCH` is a non-idempotent operation that enforces an atomic mutation of a resource. Only the properties specified in the request are to be overwritten on the resource it is applied to. If a property is missing, it is assumed to not have changed. #### DELETE Permanently removes the individual resource from the system. If the resource is an association between resources, only the association is removed, not the resources themselves. A successful deletion of the resource should return `204 NO CONTENT` with no response body. This operation is not fully idempotent, as follow-up requests to delete a non-existent resource should return a `404 NOT FOUND`. ## Requests Unless otherwise indicated, the default request body media type is `application/json`. ### Headers Commonly used request headers include: | Header | Example | Purpose | | ------------------ | --------------------------------------------- | ---------------------------------------------------------------------------------------------- | | `Accept` | `application/json` | Defines what acceptable content types are allowed by the client. For all types, use `*/*`. | | `Accept-Encoding` | `deflate, gzip` | Allows for the encoding to be specified (such as gzip). | | `Accept-Language` | `en-US` | Indicates to the server the client's locale (defaults `en-US`). | | `Authorization ` | `Basic Base64(\"username:password\")` | Basic authentication | | `Token ` | `123456` | Two-factor authentication token (if enabled) | ### Dates & Times Dates and/or times are specified as strings in the ISO 8601 format(s). The following formats are supported as input: | Value | Format | Notes | | --------------------------- | ------------------------------------------------------ | ----------------------------------------------------- | | Date | YYYY-MM-DD | Defaults to 12 am UTC (if used for a date & time | | Date & time only | YYYY-MM-DD'T'hh:mm:ss[.nnn] | Defaults to UTC | | Date & time in UTC | YYYY-MM-DD'T'hh:mm:ss[.nnn]Z | | | Date & time w/ offset | YYYY-MM-DD'T'hh:mm:ss[.nnn][+|-]hh:mm | | | Date & time w/ zone-offset | YYYY-MM-DD'T'hh:mm:ss[.nnn][+|-]hh:mm[<zone-id>] | | ### Timezones Timezones are specified in the regional zone format, such as `\"America/Los_Angeles\"`, `\"Asia/Tokyo\"`, or `\"GMT\"`. ### Paging Pagination is supported on certain collection resources using a combination of two query parameters, `page` and `size`. As these are control parameters, they are prefixed with the underscore character. The page parameter dictates the zero-based index of the page to retrieve, and the `size` indicates the size of the page. For example, `/resources?page=2&size=10` will return page 3, with 10 records per page, giving results 21-30. The maximum page size for a request is 500. ### Sorting Sorting is supported on paginated resources with the `sort` query parameter(s). The sort query parameter(s) supports identifying a single or multi-property sort with a single or multi-direction output. The format of the parameter is: ``` sort=property[,ASC|DESC]... ``` Therefore, the request `/resources?sort=name,title,DESC` would return the results sorted by the name and title descending, in that order. The sort directions are either ascending `ASC` or descending `DESC`. With single-order sorting, all properties are sorted in the same direction. To sort the results with varying orders by property, multiple sort parameters are passed. For example, the request `/resources?sort=name,ASC&sort=title,DESC` would sort by name ascending and title descending, in that order. ## Responses The following response statuses may be returned by this API. | Status | Meaning | Usage | | ------ | ------------------------ |------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `200` | OK | The operation performed without error according to the specification of the request, and no more specific 2xx code is suitable. | | `201` | Created | A create request has been fulfilled and a resource has been created. The resource is available as the URI specified in the response, including the `Location` header. | | `202` | Accepted | An asynchronous task has been accepted, but not guaranteed, to be processed in the future. | | `400` | Bad Request | The request was invalid or cannot be otherwise served. The request is not likely to succeed in the future without modifications. | | `401` | Unauthorized | The user is unauthorized to perform the operation requested, or does not maintain permissions to perform the operation on the resource specified. | | `403` | Forbidden | The resource exists to which the user has access, but the operating requested is not permitted. | | `404` | Not Found | The resource specified could not be located, does not exist, or an unauthenticated client does not have permissions to a resource. | | `405` | Method Not Allowed | The operations may not be performed on the specific resource. Allowed operations are returned and may be performed on the resource. | | `408` | Request Timeout | The client has failed to complete a request in a timely manner and the request has been discarded. | | `413` | Request Entity Too Large | The request being provided is too large for the server to accept processing. | | `415` | Unsupported Media Type | The media type is not supported for the requested resource. | | `500` | Internal Server Error | An internal and unexpected error has occurred on the server at no fault of the client. | ### Security The response statuses 401, 403 and 404 need special consideration for security purposes. As necessary, error statuses and messages may be obscured to strengthen security and prevent information exposure. The following is a guideline for privileged resource response statuses: | Use Case | Access | Resource | Permission | Status | | ------------------------------------------------------------------ | ------------------ |------------------- | ------------ | ------------ | | Unauthenticated access to an unauthenticated resource. | Unauthenticated | Unauthenticated | Yes | `20x` | | Unauthenticated access to an authenticated resource. | Unauthenticated | Authenticated | No | `401` | | Unauthenticated access to an authenticated resource. | Unauthenticated | Non-existent | No | `401` | | Authenticated access to a unauthenticated resource. | Authenticated | Unauthenticated | Yes | `20x` | | Authenticated access to an authenticated, unprivileged resource. | Authenticated | Authenticated | No | `404` | | Authenticated access to an authenticated, privileged resource. | Authenticated | Authenticated | Yes | `20x` | | Authenticated access to an authenticated, non-existent resource | Authenticated | Non-existent | Yes | `404` | ### Headers Commonly used response headers include: | Header | Example | Purpose | | -------------------------- | --------------------------------- | --------------------------------------------------------------- | | `Allow` | `OPTIONS, GET` | Defines the allowable HTTP operations on a resource. | | `Cache-Control` | `no-store, must-revalidate` | Disables caching of resources (as they are all dynamic). | | `Content-Encoding` | `gzip` | The encoding of the response body (if any). | | `Location` | | Refers to the URI of the resource created by a request. | | `Transfer-Encoding` | `chunked` | Specified the encoding used to transform response. | | `Retry-After` | 5000 | Indicates the time to wait before retrying a request. | | `X-Content-Type-Options` | `nosniff` | Disables MIME type sniffing. | | `X-XSS-Protection` | `1; mode=block` | Enables XSS filter protection. | | `X-Frame-Options` | `SAMEORIGIN` | Prevents rendering in a frame from a different origin. | | `X-UA-Compatible` | `IE=edge,chrome=1` | Specifies the browser mode to render in. | ### Format When `application/json` is returned in the response body it is always pretty-printed (indented, human readable output). Additionally, gzip compression/encoding is supported on all responses. #### Dates & Times Dates or times are returned as strings in the ISO 8601 'extended' format. When a date and time is returned (instant) the value is converted to UTC. For example: | Value | Format | Example | | --------------- | ------------------------------ | --------------------- | | Date | `YYYY-MM-DD` | 2017-12-03 | | Date & Time | `YYYY-MM-DD'T'hh:mm:ss[.nnn]Z` | 2017-12-03T10:15:30Z | #### Content In some resources a Content data type is used. This allows for multiple formats of representation to be returned within resource, specifically `\"html\"` and `\"text\"`. The `\"text\"` property returns a flattened representation suitable for output in textual displays. The `\"html\"` property returns an HTML fragment suitable for display within an HTML element. Note, the HTML returned is not a valid stand-alone HTML document. #### Paging The response to a paginated request follows the format: ```json { resources\": [ ... ], \"page\": { \"number\" : ..., \"size\" : ..., \"totalResources\" : ..., \"totalPages\" : ... }, \"links\": [ \"first\" : { \"href\" : \"...\" }, \"prev\" : { \"href\" : \"...\" }, \"self\" : { \"href\" : \"...\" }, \"next\" : { \"href\" : \"...\" }, \"last\" : { \"href\" : \"...\" } ] } ``` The `resources` property is an array of the resources being retrieved from the endpoint, each which should contain at minimum a \"self\" relation hypermedia link. The `page` property outlines the details of the current page and total possible pages. The object for the page includes the following properties: - number - The page number (zero-based) of the page returned. - size - The size of the pages, which is less than or equal to the maximum page size. - totalResources - The total amount of resources available across all pages. - totalPages - The total amount of pages. The last property of the paged response is the `links` array, which contains all available hypermedia links. For paginated responses, the \"self\", \"next\", \"previous\", \"first\", and \"last\" links are returned. The \"self\" link must always be returned and should contain a link to allow the client to replicate the original request against the collection resource in an identical manner to that in which it was invoked. The \"next\" and \"previous\" links are present if either or both there exists a previous or next page, respectively. The \"next\" and \"previous\" links have hrefs that allow \"natural movement\" to the next page, that is all parameters required to move the next page are provided in the link. The \"first\" and \"last\" links provide references to the first and last pages respectively. Requests outside the boundaries of the pageable will result in a `404 NOT FOUND`. Paginated requests do not provide a \"stateful cursor\" to the client, nor does it need to provide a read consistent view. Records in adjacent pages may change while pagination is being traversed, and the total number of pages and resources may change between requests within the same filtered/queries resource collection. #### Property Views The \"depth\" of the response of a resource can be configured using a \"view\". All endpoints supports two views that can tune the extent of the information returned in the resource. The supported views are `summary` and `details` (the default). View are specified using a query parameter, in this format: ```bash /<resource>?view={viewName} ``` #### Error Any error responses can provide a response body with a message to the client indicating more information (if applicable) to aid debugging of the error. All 40x and 50x responses will return an error response in the body. The format of the response is as follows: ```json { \"status\": <statusCode>, \"message\": <message>, \"links\" : [ { \"rel\" : \"...\", \"href\" : \"...\" } ] } ``` The `status` property is the same as the HTTP status returned in the response, to ease client parsing. The message property is a localized message in the request client's locale (if applicable) that articulates the nature of the error. The last property is the `links` property. This may contain additional [hypermedia links](#section/Overview/Authentication) to troubleshoot. #### Search Criteria <a section=\"section/Responses/SearchCriteria\"></a> Multiple resources make use of search criteria to match assets. Search criteria is an array of search filters. Each search filter has a generic format of: ```json { \"field\": \"<field-name>\", \"operator\": \"<operator>\", [\"value\": \"<value>\",] [\"lower\": \"<value>\",] [\"upper\": \"<value>\"] } ``` Every filter defines two required properties `field` and `operator`. The field is the name of an asset property that is being filtered on. The operator is a type and property-specific operating performed on the filtered property. The valid values for fields and operators are outlined in the table below. Every filter also defines one or more values that are supplied to the operator. The valid values vary by operator and are outlined below. ##### Fields The following table outlines the search criteria fields and the available operators: | Field | Operators | | --------------------------------- | ------------------------------------------------------------------------------------------------------------------------------ | | `alternate-address-type` | `in` | | `container-image` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` ` is like` ` not like` | | `container-status` | `is` ` is not` | | `containers` | `are` | | `criticality-tag` | `is` ` is not` ` is greater than` ` is less than` ` is applied` ` is not applied` | | `custom-tag` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` ` is applied` ` is not applied` | | `cve` | `is` ` is not` ` contains` ` does not contain` | | `cvss-access-complexity` | `is` ` is not` | | `cvss-authentication-required` | `is` ` is not` | | `cvss-access-vector` | `is` ` is not` | | `cvss-availability-impact` | `is` ` is not` | | `cvss-confidentiality-impact` | `is` ` is not` | | `cvss-integrity-impact` | `is` ` is not` | | `cvss-v3-confidentiality-impact` | `is` ` is not` | | `cvss-v3-integrity-impact` | `is` ` is not` | | `cvss-v3-availability-impact` | `is` ` is not` | | `cvss-v3-attack-vector` | `is` ` is not` | | `cvss-v3-attack-complexity` | `is` ` is not` | | `cvss-v3-user-interaction` | `is` ` is not` | | `cvss-v3-privileges-required` | `is` ` is not` | | `host-name` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` ` is empty` ` is not empty` ` is like` ` not like` | | `host-type` | `in` ` not in` | | `ip-address` | `is` ` is not` ` in range` ` not in range` ` is like` ` not like` | | `ip-address-type` | `in` ` not in` | | `last-scan-date` | `is-on-or-before` ` is on or after` ` is between` ` is earlier than` ` is within the last` | | `location-tag` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` ` is applied` ` is not applied` | | `mobile-device-last-sync-time` | `is-within-the-last` ` is earlier than` | | `open-ports` | `is` ` is not` ` in range` | | `operating-system` | `contains` ` does not contain` ` is empty` ` is not empty` | | `owner-tag` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` ` is applied` ` is not applied` | | `pci-compliance` | `is` | | `risk-score` | `is` ` is not` ` in range` ` greater than` ` less than` | | `service-name` | `contains` ` does not contain` | | `site-id` | `in` ` not in` | | `software` | `contains` ` does not contain` | | `vAsset-cluster` | `is` ` is not` ` contains` ` does not contain` ` starts with` | | `vAsset-datacenter` | `is` ` is not` | | `vAsset-host-name` | `is` ` is not` ` contains` ` does not contain` ` starts with` | | `vAsset-power-state` | `in` ` not in` | | `vAsset-resource-pool-path` | `contains` ` does not contain` | | `vulnerability-assessed` | `is-on-or-before` ` is on or after` ` is between` ` is earlier than` ` is within the last` | | `vulnerability-category` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` | | `vulnerability-cvss-v3-score` | `is` ` is not` | | `vulnerability-cvss-score` | `is` ` is not` ` in range` ` is greater than` ` is less than` | | `vulnerability-exposures` | `includes` ` does not include` | | `vulnerability-title` | `contains` ` does not contain` ` is` ` is not` ` starts with` ` ends with` | | `vulnerability-validated-status` | `are` | ##### Enumerated Properties The following fields have enumerated values: | Field | Acceptable Values | | ----------------------------------------- | ------------------------------------------------------------------------------------------------------------- | | `alternate-address-type` | 0=IPv4, 1=IPv6 | | `containers` | 0=present, 1=not present | | `container-status` | `created` `running` `paused` `restarting` `exited` `dead` `unknown` | | `cvss-access-complexity` | <ul><li><code>L</code> = Low</li><li><code>M</code> = Medium</li><li><code>H</code> = High</li></ul> | | `cvss-integrity-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-confidentiality-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-availability-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-access-vector` | <ul><li><code>L</code> = Local</li><li><code>A</code> = Adjacent</li><li><code>N</code> = Network</li></ul> | | `cvss-authentication-required` | <ul><li><code>N</code> = None</li><li><code>S</code> = Single</li><li><code>M</code> = Multiple</li></ul> | | `cvss-v3-confidentiality-impact` | <ul><li><code>L</code> = Local</li><li><code>L</code> = Low</li><li><code>N</code> = None</li><li><code>H</code> = High</li></ul> | | `cvss-v3-integrity-impact` | <ul><li><code>L</code> = Local</li><li><code>L</code> = Low</li><li><code>N</code> = None</li><li><code>H</code> = High</li></ul> | | `cvss-v3-availability-impact` | <ul><li><code>N</code> = None</li><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `cvss-v3-attack-vector` | <ul><li><code>N</code> = Network</li><li><code>A</code> = Adjacent</li><li><code>L</code> = Local</li><li><code>P</code> = Physical</li></ul> | | `cvss-v3-attack-complexity` | <ul><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `cvss-v3-user-interaction` | <ul><li><code>N</code> = None</li><li><code>R</code> = Required</li></ul> | | `cvss-v3-privileges-required` | <ul><li><code>N</code> = None</li><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `host-type` | 0=Unknown, 1=Guest, 2=Hypervisor, 3=Physical, 4=Mobile | | `ip-address-type` | 0=IPv4, 1=IPv6 | | `pci-compliance` | 0=fail, 1=pass | | `vulnerability-validated-status` | 0=present, 1=not present | ##### Operator Properties <a section=\"section/Responses/SearchCriteria/OperatorProperties\"></a> The following table outlines which properties are required for each operator and the appropriate data type(s): | Operator | `value` | `lower` | `upper` | | ----------------------|-----------------------|-----------------------|-----------------------| | `are` | `string` | | | | `contains` | `string` | | | | `does-not-contain` | `string` | | | | `ends with` | `string` | | | | `in` | `Array[ string ]` | | | | `in-range` | | `numeric` | `numeric` | | `includes` | `Array[ string ]` | | | | `is` | `string` | | | | `is-applied` | | | | | `is-between` | | `numeric` | `numeric` | | `is-earlier-than` | `numeric` | | | | `is-empty` | | | | | `is-greater-than` | `numeric` | | | | `is-on-or-after` | `string` (yyyy-MM-dd) | | | | `is-on-or-before` | `string` (yyyy-MM-dd) | | | | `is-not` | `string` | | | | `is-not-applied` | | | | | `is-not-empty` | | | | | `is-within-the-last` | `string` | | | | `less-than` | `string` | | | | `like` | `string` | | | | `not-contains` | `string` | | | | `not-in` | `Array[ string ]` | | | | `not-in-range` | | `numeric` | `numeric` | | `not-like` | `string` | | | | `starts-with` | `string` | | | #### Discovery Connection Search Criteria <a section=\"section/Responses/DiscoverySearchCriteria\"></a> Dynamic sites make use of search criteria to match assets from a discovery connection. Search criteria is an array of search filters. Each search filter has a generic format of: ```json { \"field\": \"<field-name>\", \"operator\": \"<operator>\", [\"value\": \"<value>\",] [\"lower\": \"<value>\",] [\"upper\": \"<value>\"] } ``` Every filter defines two required properties `field` and `operator`. The field is the name of an asset property that is being filtered on. The list of supported fields vary depending on the type of discovery connection configured for the dynamic site (e.g vSphere, ActiveSync, etc.). The operator is a type and property-specific operating performed on the filtered property. The valid values for fields outlined in the tables below and are grouped by the type of connection. Every filter also defines one or more values that are supplied to the operator. See <a href=\"#section/Responses/SearchCriteria/OperatorProperties\">Search Criteria Operator Properties</a> for more information on the valid values for each operator. ##### Fields (ActiveSync) This section documents search criteria information for ActiveSync discovery connections. The discovery connections must be one of the following types: `\"activesync-ldap\"`, `\"activesync-office365\"`, or `\"activesync-powershell\"`. The following table outlines the search criteria fields and the available operators for ActiveSync connections: | Field | Operators | | --------------------------------- | ------------------------------------------------------------- | | `last-sync-time` | `is-within-the-last` ` is-earlier-than` | | `operating-system` | `contains` ` does-not-contain` | | `user` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (AWS) This section documents search criteria information for AWS discovery connections. The discovery connections must be the type `\"aws\"`. The following table outlines the search criteria fields and the available operators for AWS connections: | Field | Operators | | ----------------------- | ------------------------------------------------------------- | | `availability-zone` | `contains` ` does-not-contain` | | `guest-os-family` | `contains` ` does-not-contain` | | `instance-id` | `contains` ` does-not-contain` | | `instance-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `instance-state` | `in` ` not-in` | | `instance-type` | `in` ` not-in` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `region` | `in` ` not-in` | | `vpc-id` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (DHCP) This section documents search criteria information for DHCP discovery connections. The discovery connections must be the type `\"dhcp\"`. The following table outlines the search criteria fields and the available operators for DHCP connections: | Field | Operators | | --------------- | ------------------------------------------------------------- | | `host-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `mac-address` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (Sonar) This section documents search criteria information for Sonar discovery connections. The discovery connections must be the type `\"sonar\"`. The following table outlines the search criteria fields and the available operators for Sonar connections: | Field | Operators | | ------------------- | -------------------- | | `search-domain` | `contains` ` is` | | `ip-address` | `in-range` ` is` | | `sonar-scan-date` | `is-within-the-last` | ##### Fields (vSphere) This section documents search criteria information for vSphere discovery connections. The discovery connections must be the type `\"vsphere\"`. The following table outlines the search criteria fields and the available operators for vSphere connections: | Field | Operators | | -------------------- | ------------------------------------------------------------------------------------------ | | `cluster` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `data-center` | `is` ` is-not` | | `discovered-time` | `is-on-or-before` ` is-on-or-after` ` is-between` ` is-earlier-than` ` is-within-the-last` | | `guest-os-family` | `contains` ` does-not-contain` | | `host-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `power-state` | `in` ` not-in` | | `resource-pool-path` | `contains` ` does-not-contain` | | `last-time-seen` | `is-on-or-before` ` is-on-or-after` ` is-between` ` is-earlier-than` ` is-within-the-last` | | `vm` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Enumerated Properties (vSphere) The following fields have enumerated values: | Field | Acceptable Values | | ------------- | ------------------------------------ | | `power-state` | `poweredOn` `poweredOff` `suspended` | ## HATEOAS This API follows Hypermedia as the Engine of Application State (HATEOAS) principals and is therefore hypermedia friendly. Hyperlinks are returned in the `links` property of any given resource and contain a fully-qualified hyperlink to the corresponding resource. The format of the hypermedia link adheres to both the <a target=\"_blank\" href=\"http://jsonapi.org\">{json:api} v1</a> <a target=\"_blank\" href=\"http://jsonapi.org/format/#document-links\">\"Link Object\"</a> and <a target=\"_blank\" href=\"http://json-schema.org/latest/json-schema-hypermedia.html\">JSON Hyper-Schema</a> <a target=\"_blank\" href=\"http://json-schema.org/latest/json-schema-hypermedia.html#rfc.section.5.2\">\"Link Description Object\"</a> formats. For example: ```json \"links\": [{ \"rel\": \"<relation>\", \"href\": \"<href>\" ... }] ``` Where appropriate link objects may also contain additional properties than the `rel` and `href` properties, such as `id`, `type`, etc. See the [Root](#tag/Root) resources for the entry points into API discovery. # noqa: E501
OpenAPI spec version: 3
Contact: support@rapid7.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import nexpose_client
from nexpose_client.models.token_resource import TokenResource # noqa: E501
from nexpose_client.rest import ApiException
class TestTokenResource(unittest.TestCase):
"""TokenResource unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testTokenResource(self):
"""Test TokenResource"""
# FIXME: construct object with mandatory attributes with example values
# model = nexpose_client.models.token_resource.TokenResource() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"root@data-proc.openbase.co.kr"
] | root@data-proc.openbase.co.kr |
a7bd0e5000e6bb063933f8431162d23505e600a2 | 11980553c9883a9711df02e2b7f0dadad67359c5 | /tests/django2/mysite/wsgi.py | 77f71eb3330749fd7f95158e640a9c45bc35a208 | [] | permissive | praekeltfoundation/docker-django-bootstrap | 44a1047e12edebb372768884f226fe73668c241a | d3fd6ef9788ab7ac0b9a70e91419cbc68aaf2f5a | refs/heads/develop | 2023-07-23T20:05:55.581299 | 2023-07-18T12:10:59 | 2023-07-18T12:10:59 | 52,771,805 | 106 | 30 | BSD-3-Clause | 2023-07-20T07:07:32 | 2016-02-29T07:16:21 | Python | UTF-8 | Python | false | false | 389 | py | """
WSGI config for mysite project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mysite.settings')
application = get_wsgi_application()
| [
"jhewland@gmail.com"
] | jhewland@gmail.com |
cb872b1a2e8169f5130c4fe9920303b6c07c540a | a84c2a94635ed719d355c5eb20fa4b143888ccc3 | /controllers/user_controller.py | 6fbbb062c7bb75b3dc31c7b6fc5a8b96fa6560df | [] | no_license | caothanhha9/graph_ui | de426449162a33908de5ee953474c00d088aac33 | e146a66a4c4073b0e14cc913d9e4aa7ea430e138 | refs/heads/master | 2021-01-17T16:23:00.323084 | 2016-06-10T04:59:53 | 2016-06-10T04:59:53 | 60,820,706 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,311 | py | from knowledge_network.utility.service import gen_key
class Login(object):
def __init__(self):
"""
Create default values
:return:
"""
self.status = False
self.token = None
self.default_id = 'xdm'
self.default_pass = 'iloveyou'
self.default_token = '123456789101112131415'
self.user = None
self.password = None
def validate(self, _acc_id, _acc_pass):
"""
Check if account match with db
:param _acc_id: user account
:param _acc_pass: user password
:return: reassign status and token
"""
if (_acc_id == self.default_id) and (_acc_pass == self.default_pass):
user_status = True
if user_status:
self.status = True
self.token = gen_key.id_generator()
self.user = _acc_id
self.password = _acc_pass
def security_check(self, _acc_id, _acc_tok):
"""
check to authenticate the user
:param _acc_id: user account
:param _acc_tok: token sent from client
:return: True or False
"""
check = False
if (_acc_id == self.default_id) and (_acc_tok == self.default_token):
check = True
return check
| [
"hacaothanh@admicro.vn"
] | hacaothanh@admicro.vn |
90fc54f792f717fefdb9834a7e4301d4aa830df0 | 619651ff76c40873c7a8163ca865ff8957e9bf99 | /LeetCode/16. 最接近的三数之和.py | bc7c83e0767a440eb244642f660945a5ce726574 | [] | no_license | zjlyyq/algorithm009-class01 | 4a27f3dd55968b084deaebede36a4435e2013a52 | 43f6d47f947425d6c4904c826ea01be9a9b0566a | refs/heads/master | 2022-11-21T19:04:47.727695 | 2020-07-26T14:11:41 | 2020-07-26T14:11:41 | 264,580,587 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 997 | py | '''
思路一:
暴力枚举:
1. 先对数组升序排序
2. 枚举三数之和,大于target直接break(由于是升序,越往后越大不用判断了)
3. 在上述过程中记录下和target最接近的三数和
'''
import math
class Solution:
def threeSumClosest(self, nums: List[int], target: int) -> int:
nums.sort()
n = len(nums)
nearvalue = 1 << 31 - 1
ans = 0
if nums[0] + nums[1] + nums[2] > target:
return nums[0] + nums[1] + nums[2]
if nums[n-1] + nums[n-2] + nums[n-3] < target:
return nums[n-1] + nums[n-2] + nums[n-3]
for i in range(n-2):
for j in range(i+1, n-1):
for k in range(j+1,n):
s = nums[i] + nums[j] + nums[k]
if abs(s - target) < nearvalue:
nearvalue = abs(s - target)
ans = s
if s > target:
break
return ans | [
"jialuzhang0805@gmail.com"
] | jialuzhang0805@gmail.com |
e1806f01b4c805b9c3859b27e923dd9dd0553b8d | 45c9876f294a8f160fc8059bac31bcee2d2f4a9f | /visa/urls.py | 9eb34d2b07eb11b7ec86ccc97f92ad4f6c95fa21 | [] | no_license | xolmomin/visa | 32f8c7e507e238fc7a621d9792337535d8da5c00 | b088d1b999baf612a00953d351746bcc61cb50e9 | refs/heads/master | 2023-01-02T03:42:21.688320 | 2020-10-26T19:15:41 | 2020-10-26T19:15:41 | 307,476,444 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 791 | py | """visa URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('app.urls')),
]
| [
"xolmomin@gmail.com"
] | xolmomin@gmail.com |
78167c95383a13af58d15af1fb240076bb829358 | 8077979cde078adbe4a38816f287d2552c45bdc2 | /mergeexcel/merge/views.py | 5f16d4e0325d96ca028f22d86fc3ccb418581ca0 | [] | no_license | bogeUser/excel_tools | bf13230a120a31f8a6cb933ad83eceda7ecad6a1 | 6506c251a9bc9db86c36696a12d23285e92cfe45 | refs/heads/master | 2020-03-30T08:00:19.775823 | 2018-09-30T15:52:13 | 2018-09-30T15:52:13 | 150,980,903 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,073 | py | import os
from django.conf import settings
from .utils import *
from django.http import HttpResponse, StreamingHttpResponse, JsonResponse
from django.shortcuts import render
# Create your views here.
#上传文件函数
def upload(req):
#如果是post则处理上传的文件
if req.method == "POST":
# 获得文件
f = req.FILES.getlist("xls")
if len(f) == 1:
# 获取文件名字
file_name = f[0].name
# 拼接文件路径
file_path = os.path.join(settings.UPLOADFILES, file_name)
# 打开文件
with open(file_path, 'wb') as fp:
# 遍历写入我们的本地文件
for j in f[0].chunks():
fp.write(j)
#合并文件
ist = mergerexcel(file_path)
a = ' \ '
#拼接
h = a.join(ist)
print("表头是::::", h)
if type(ist) == type([]):
data = {
'code': 1,
'msg': "ok",
'header': h,
}
return JsonResponse(data)
else:
return HttpResponse("合并时出错,请联系技术员,错误信息是:" + str(ist))
else:
#多个文件的时候
#获取文件,生成列表
file_names = [i.name for i in f]
#拼接路径
file_paths = [os.path.join(settings.UPLOADFILES,i) for i in file_names]
# 将文件写入本地
for i in range(len(file_paths)):
with open(file_paths[i], 'wb') as fp:
# 遍历写入我们的本地文件
for j in f[i].chunks():
fp.write(j)
#合并文件
ist = mergerexcels(file_paths)
a = ' \ '
h = a.join(ist)
if type(ist) == type([]):
data = {
'code': 1,
'msg': "ok",
'header': h,
}
return JsonResponse(data)
else:
return HttpResponse("合并时出错,请联系技术员,错误信息是:" + str(ist))
else:
return render(req, "upload.html")
#按照要求排序
def sortfile(req):
#获取排序名称
name = req.GET.get("name")
#如果是old则表示前端没有输入排序的表头
if name == "old":
#路径下的文件
merge = os.listdir(settings.DOWNLOADFILES)
#拼接路径
path = os.path.join(settings.DOWNLOADFILES, merge[0])
#去重函数
quchong(merge=path)
data = {
'code': 1,
'msg': "ok",
'url': merge,
}
return JsonResponse(data)
else:
#切割用户输入的表头
namelist =name.split("\\")
#排序
if sortexcel(namelist):
merge = os.listdir(settings.DOWNLOADFILES)
path = os.path.join(settings.DOWNLOADFILES,merge[0])
#去重
quchong(merge=path)
data = {
'code': 1,
'msg': "ok",
'url': merge,
}
return JsonResponse(data)
else:
data = {
'code': 0,
'msg': "排序出错",
'url': "",
}
return JsonResponse(data)
#下载文件
def download(req):
try:
#获取前端传递给的下载文件名称
download_name = req.GET.get("filename")
#拼接路径
filename = os.path.join(settings.DOWNLOADFILES, download_name)
#获取文件流并生成响应
response = StreamingHttpResponse(readFile(filename))
response['Content-Type'] = 'application/octet-stream'
response['Content-Disposition'] = 'attachment;filename="{0}"'.format(download_name)
return response
except Exception as e:
print(e)
return response
| [
"1632651707@qq.com"
] | 1632651707@qq.com |
eb053bc723f385233dc47b22364f966e03d3712c | 6a75fbf5aa540842c07e48bb2c1b4d6aa819f02f | /venv/lib/python3.9/site-packages/google/cloud/vision_v1p4beta1/services/image_annotator/transports/grpc_asyncio.py | e42a03579624f84fb0bb9df8ef544940bd5d2087 | [] | no_license | echigawa0921/vision-api | 46e8f10afc95772592619093fc8699e9f4b61a89 | 1ae115e452e8e34a4264ab3cae281d1e3e2cc4cd | refs/heads/main | 2023-06-18T05:05:51.174620 | 2021-07-11T14:14:22 | 2021-07-11T14:14:22 | 384,146,672 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,494 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1 # type: ignore
from google.api_core import grpc_helpers_async # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import packaging.version
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.vision_v1p4beta1.types import image_annotator
from google.longrunning import operations_pb2 # type: ignore
from .base import ImageAnnotatorTransport, DEFAULT_CLIENT_INFO
from .grpc import ImageAnnotatorGrpcTransport
class ImageAnnotatorGrpcAsyncIOTransport(ImageAnnotatorTransport):
"""gRPC AsyncIO backend transport for ImageAnnotator.
Service that performs Google Cloud Vision API detection tasks
over client images, such as face, landmark, logo, label, and
text detection. The ImageAnnotator service returns detected
entities from the images.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "vision.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
self_signed_jwt_kwargs = cls._get_self_signed_jwt_kwargs(host, scopes)
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
**self_signed_jwt_kwargs,
**kwargs,
)
def __init__(
self,
*,
host: str = "vision.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsAsyncClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Sanity check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsAsyncClient(
self.grpc_channel
)
# Return the client from cache.
return self._operations_client
@property
def batch_annotate_images(
self,
) -> Callable[
[image_annotator.BatchAnnotateImagesRequest],
Awaitable[image_annotator.BatchAnnotateImagesResponse],
]:
r"""Return a callable for the batch annotate images method over gRPC.
Run image detection and annotation for a batch of
images.
Returns:
Callable[[~.BatchAnnotateImagesRequest],
Awaitable[~.BatchAnnotateImagesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "batch_annotate_images" not in self._stubs:
self._stubs["batch_annotate_images"] = self.grpc_channel.unary_unary(
"/google.cloud.vision.v1p4beta1.ImageAnnotator/BatchAnnotateImages",
request_serializer=image_annotator.BatchAnnotateImagesRequest.serialize,
response_deserializer=image_annotator.BatchAnnotateImagesResponse.deserialize,
)
return self._stubs["batch_annotate_images"]
@property
def batch_annotate_files(
self,
) -> Callable[
[image_annotator.BatchAnnotateFilesRequest],
Awaitable[image_annotator.BatchAnnotateFilesResponse],
]:
r"""Return a callable for the batch annotate files method over gRPC.
Service that performs image detection and annotation
for a batch of files. Now only "application/pdf",
"image/tiff" and "image/gif" are supported.
This service will extract at most 5 (customers can
specify which 5 in AnnotateFileRequest.pages) frames
(gif) or pages (pdf or tiff) from each file provided and
perform detection and annotation for each image
extracted.
Returns:
Callable[[~.BatchAnnotateFilesRequest],
Awaitable[~.BatchAnnotateFilesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "batch_annotate_files" not in self._stubs:
self._stubs["batch_annotate_files"] = self.grpc_channel.unary_unary(
"/google.cloud.vision.v1p4beta1.ImageAnnotator/BatchAnnotateFiles",
request_serializer=image_annotator.BatchAnnotateFilesRequest.serialize,
response_deserializer=image_annotator.BatchAnnotateFilesResponse.deserialize,
)
return self._stubs["batch_annotate_files"]
@property
def async_batch_annotate_images(
self,
) -> Callable[
[image_annotator.AsyncBatchAnnotateImagesRequest],
Awaitable[operations_pb2.Operation],
]:
r"""Return a callable for the async batch annotate images method over gRPC.
Run asynchronous image detection and annotation for a list of
images.
Progress and results can be retrieved through the
``google.longrunning.Operations`` interface.
``Operation.metadata`` contains ``OperationMetadata``
(metadata). ``Operation.response`` contains
``AsyncBatchAnnotateImagesResponse`` (results).
This service will write image annotation outputs to json files
in customer GCS bucket, each json file containing
BatchAnnotateImagesResponse proto.
Returns:
Callable[[~.AsyncBatchAnnotateImagesRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "async_batch_annotate_images" not in self._stubs:
self._stubs["async_batch_annotate_images"] = self.grpc_channel.unary_unary(
"/google.cloud.vision.v1p4beta1.ImageAnnotator/AsyncBatchAnnotateImages",
request_serializer=image_annotator.AsyncBatchAnnotateImagesRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["async_batch_annotate_images"]
@property
def async_batch_annotate_files(
self,
) -> Callable[
[image_annotator.AsyncBatchAnnotateFilesRequest],
Awaitable[operations_pb2.Operation],
]:
r"""Return a callable for the async batch annotate files method over gRPC.
Run asynchronous image detection and annotation for a list of
generic files, such as PDF files, which may contain multiple
pages and multiple images per page. Progress and results can be
retrieved through the ``google.longrunning.Operations``
interface. ``Operation.metadata`` contains ``OperationMetadata``
(metadata). ``Operation.response`` contains
``AsyncBatchAnnotateFilesResponse`` (results).
Returns:
Callable[[~.AsyncBatchAnnotateFilesRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "async_batch_annotate_files" not in self._stubs:
self._stubs["async_batch_annotate_files"] = self.grpc_channel.unary_unary(
"/google.cloud.vision.v1p4beta1.ImageAnnotator/AsyncBatchAnnotateFiles",
request_serializer=image_annotator.AsyncBatchAnnotateFilesRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["async_batch_annotate_files"]
__all__ = ("ImageAnnotatorGrpcAsyncIOTransport",)
| [
"69971834+echigawa0921@users.noreply.github.com"
] | 69971834+echigawa0921@users.noreply.github.com |
f03ead6b04d45c647bbcd679325f762ba8667d82 | 5231237e46f661c825a2bebffe1cd03da05d3a13 | /DoDoc_folder_printer.py | 5ed661bf7996f422310e5e88a188efb3aebd2384 | [] | no_license | peterdemin/DoDoc | 40a0e073ebece65f620415896a5e91e172378b07 | 7d74e909222ca984628f31cc369ad11835d7d93e | refs/heads/master | 2021-01-23T06:44:32.466473 | 2012-02-24T16:50:05 | 2012-02-24T16:50:05 | 3,476,905 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,132 | py | #!usr/bin/python
# -*- coding: utf8 -*-
encoding='utf8'
'''утф-8'''
import os
from OpenOffice_document import *
class Odt_printer(OpenOffice):
def __init__(self):
super(Odt_printer, self).__init__()
def startPrinting(self, filename):
frame = self.doc.getCurrentController().getFrame()
self.dispatcher.executeDispatch(frame, ".uno:UpdateAll", "", 0, ())
props = [
PropertyValue('Hidden', 0, True, 0),
]
self.dispatcher.executeDispatch(frame, ".uno:Print", "", 0, tuple(props))
return True
def printODT(inputs):
o = Odt_printer()
#print 'connect'
if o.connect():
#print 'open'
for input in inputs:
if o.open(input):
#print 'print'
o.startPrinting(input)
import time
#print 'sleeping...'
time.sleep(10)
#print 'close'
o.close()
#print 'disconnect'
o.disconnect()
def odts_in_folder(folder_name):
odts = []
if os.path.exists(folder_name):
if os.path.isdir(folder_name):
for item in os.listdir(folder_name):
item_path = os.path.join(folder_name, item)
if os.path.isdir(item_path):
odts.extend(odts_in_folder(item_path))
else:
noext, ext = os.path.splitext(item_path)
if ext.lower() == '.odt':
odts.append(item_path)
elif os.path.isfile(folder_name):
noext, ext = os.path.splitext(folder_name)
if ext.lower() == '.odt':
odts.append(folder_name)
return odts
def main():
import sys
if len(sys.argv) == 2:
folder_name = sys.argv[1]
else:
print 'DoDoc_folder_printer prints all *.odt files in given folder.'
print 'Usage:'
print ' python DoDoc_folder_printer.py folder_name'
#return
folder_name = 'printme'
inputs = odts_in_folder(folder_name)
printODT(inputs)
if __name__ == '__main__':
main()
| [
"deminpe@otd263"
] | deminpe@otd263 |
904b1dcf5a624c469500a1a77ca9757a80f1726b | e0832d35207eca2519ee50fdd83fb4737b47e9c5 | /AtCoder/ABC145/A.py | f2b38d692d00e1e5d15c1986bcc638b6cc680dce | [] | no_license | soqutto/practice | f202f7f5ba17d2c98b8e8323ac22e541daac8dbf | fa9f4749d5862037416e7c229ed4fda544a16229 | refs/heads/master | 2023-05-02T07:46:10.885731 | 2021-05-21T05:44:00 | 2021-05-21T05:44:00 | 310,563,016 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 219 | py | # -*- coding: utf-8 -*-
import math
# 半径1の円の面積
# 1 * pi
# 半径rの円の面積
# r ^ 2 * pi
# 半径rの円の面積は
# 半径1の円の面積のr^2倍である.
r = int(input())
print(r**2)
| [
"me@sokutto.com"
] | me@sokutto.com |
4787c40faa7bc23195b29472a10384a0f756d117 | 2be6e84bf811a18d97221c8a203e6df1b7bb791a | /PythonExercicios/ex13.py | eaab6795f33591778e437c7c1fc362ad07dbb6d4 | [] | no_license | Lucasalsferreira/video_em_aula | 6cf28091546f32125d234ab91f2a3203186338bf | e526066a15417ff2f5e29b4936758dd236d434e8 | refs/heads/master | 2022-12-23T03:37:26.209529 | 2020-09-23T11:04:06 | 2020-09-23T11:04:06 | 297,459,252 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 137 | py | s = float(input('Qual o seu sálario: '))
a = s + (s * 15 / 100)
print('O valor do seu salário com os 15% de aumento {:.2f}'.format(a))
| [
"lucas_araujo.20@live.com"
] | lucas_araujo.20@live.com |
4772ff7cf80ba0a1b49144b524c4918ac50fa36c | 2a62d34c84b0c2957be73d6c9d18a3d741d31754 | /src/loans/helper.py | a9d6a8fe2d6d1afcd140351b69cf782dd17d80eb | [] | no_license | OhioDataSolutions/web-backend | 385df7c759d0b4cd95592075a8a729005a0146ae | aa179bc5f926024add3deed4451b43660c66c5cb | refs/heads/master | 2023-02-12T15:06:29.439399 | 2021-01-09T17:06:44 | 2021-01-09T17:06:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,252 | py | """Helper file for loans"""
from pypika import PostgreSQLQuery as Query, Table, Parameter
import pypika.functions as ppfns
from lbshared.pypika_funcs import Greatest
import hashlib
from . import models
DELETED_LOANS_PERM = 'view_deleted_loans'
"""The name of the permission that gives a user permission to view deleted
loans"""
VIEW_ADMIN_EVENT_AUTHORS_PERM = 'view_admin_event_authors'
"""The name of the permission that gives a user permission to view who made
admin edits"""
EDIT_LOANS_PERMISSION = 'edit_loans'
"""The name of the permission that gives a user the ability to modify loans."""
def calculate_etag(itgs, loan_id) -> str:
"""Calculates a valid etag for the loan with the given id. If no such loan
exists this returns None.
"""
loans = Table('loans')
event_tables = [Table(t) for t in [
'loan_admin_events', 'loan_repayment_events', 'loan_unpaid_events'
]]
q = (
Query.from_(loans)
.select(Greatest(
loans.created_at,
loans.unpaid_at,
loans.deleted_at,
*[
tbl.created_at for tbl in event_tables
]
))
)
for tbl in event_tables:
q = q.left_join(tbl).on(loans.id == tbl.loan_id)
q = q.where(loans.id == Parameter('%s'))
itgs.read_cursor.execute(
q.get_sql(),
(loan_id,)
)
row = itgs.read_cursor.fetchone()
if row is None:
return None
(updated_at,) = row
raw_str = f'{loan_id}-{updated_at.timestamp()}'
return hashlib.sha256(raw_str.encode('ASCII')).hexdigest()
def get_basic_loan_info(itgs, loan_id, perms):
"""Get the models.BasicLoanInfo for the given loan if the loan exists and
the user has permission to view the loan. Otherwise, returns None
"""
loans = Table('loans')
query = get_basic_loan_info_query().where(loans.id == Parameter('%s'))
if DELETED_LOANS_PERM not in perms:
query = query.where(loans.deleted_at.isnull())
args = (loan_id,)
itgs.read_cursor.execute(
query.get_sql(),
args
)
row = itgs.read_cursor.fetchone()
if row is None:
return None
return parse_basic_loan_info(row)
def get_basic_loan_info_query():
"""Get the basic query that we use for fetching a loans information"""
loans = Table('loans')
usrs = Table('users')
moneys = Table('moneys')
lenders = usrs.as_('lenders')
borrowers = usrs.as_('borrowers')
principals = moneys.as_('principals')
principal_currencies = Table('currencies').as_('principal_currencies')
principal_repayments = moneys.as_('principal_repayments')
repayment_events = Table('loan_repayment_events')
latest_repayments = Table('latest_repayments')
query = (
Query
.with_(
Query
.from_(repayment_events)
.select(
repayment_events.loan_id,
ppfns.Max(repayment_events.created_at).as_('latest_created_at')
)
.groupby(repayment_events.loan_id),
'latest_repayments'
)
.from_(loans)
.select(
lenders.username,
borrowers.username,
principal_currencies.code,
principal_currencies.symbol,
principal_currencies.symbol_on_left,
principal_currencies.exponent,
principals.amount,
principal_repayments.amount,
loans.created_at,
latest_repayments.latest_created_at,
loans.repaid_at,
loans.unpaid_at,
loans.deleted_at
)
.join(lenders).on(lenders.id == loans.lender_id)
.join(borrowers).on(borrowers.id == loans.borrower_id)
.join(principals).on(principals.id == loans.principal_id)
.join(principal_currencies).on(principal_currencies.id == principals.currency_id)
.join(principal_repayments).on(principal_repayments.id == loans.principal_repayment_id)
.left_join(latest_repayments).on(latest_repayments.loan_id == loans.id)
)
return query
def parse_basic_loan_info(row):
"""Parses a row returned from a basic loan info query into the basic loan
response."""
return models.BasicLoanResponse(
lender=row[0],
borrower=row[1],
currency_code=row[2],
currency_symbol=row[3],
currency_symbol_on_left=row[4],
currency_exponent=row[5],
principal_minor=row[6],
principal_repayment_minor=row[7],
created_at=row[8].timestamp(),
last_repaid_at=row[9].timestamp() if row[9] is not None else None,
repaid_at=row[10].timestamp() if row[10] is not None else None,
unpaid_at=row[11].timestamp() if row[11] is not None else None,
deleted_at=row[12].timestamp() if row[12] is not None else None
)
def get_loan_events(itgs, loan_id, perms):
"""Get the loan events for the given loan if the user has access to view
the loan. The details of each event may also depend on what the user has
access to. Returns the events in ascending (oldest to newest) order.
"""
loans = Table('loans')
usrs = Table('users')
moneys = Table('moneys')
q = (
Query.from_(loans)
.select(loans.created_at)
.where(loans.id == Parameter('%s'))
)
if DELETED_LOANS_PERM not in perms:
q = q.where(loans.deleted_at.isnull())
itgs.read_cursor.execute(
q.get_sql(),
(loan_id,)
)
row = itgs.read_cursor.fetchone()
if row is None:
return []
(created_at,) = row
result = []
creation_infos = Table('loan_creation_infos')
itgs.read_cursor.execute(
Query.from_(creation_infos)
.select(
creation_infos.type,
creation_infos.parent_fullname,
creation_infos.comment_fullname
)
.where(creation_infos.loan_id == Parameter('%s'))
.get_sql(),
(loan_id,)
)
row = itgs.read_cursor.fetchone()
if row is not None:
(creation_type, parent_fullname, comment_fullname) = row
result.append(
models.CreationLoanEvent(
event_type='creation',
occurred_at=created_at.timestamp(),
creation_type=creation_type,
creation_permalink=(
None
if creation_type != 0
else
'https://www.reddit.com/comments/{}/redditloans/{}'.format(
parent_fullname[3:], comment_fullname[3:]
)
)
)
)
admin_events = Table('loan_admin_events')
admins = usrs.as_('admins')
old_principals = moneys.as_('old_principals')
new_principals = moneys.as_('new_principals')
old_principal_repayments = moneys.as_('old_principal_repayments')
new_principal_repayments = moneys.as_('new_principal_repayments')
itgs.read_cursor.execute(
Query.from_(admin_events)
.select(
admins.username,
admin_events.reason,
old_principals.amount,
new_principals.amount,
old_principal_repayments.amount,
new_principal_repayments.amount,
admin_events.old_created_at,
admin_events.new_created_at,
admin_events.old_repaid_at,
admin_events.new_repaid_at,
admin_events.old_unpaid_at,
admin_events.new_unpaid_at,
admin_events.old_deleted_at,
admin_events.new_deleted_at,
admin_events.created_at
)
.join(admins).on(admins.id == admin_events.admin_id)
.join(old_principals).on(old_principals.id == admin_events.old_principal_id)
.join(new_principals).on(new_principals.id == admin_events.new_principal_id)
.join(old_principal_repayments)
.on(old_principal_repayments.id == admin_events.old_principal_repayment_id)
.join(new_principal_repayments)
.on(new_principal_repayments.id == admin_events.new_principal_repayment_id)
.where(admin_events.loan_id == Parameter('%s'))
.get_sql(),
(loan_id,)
)
can_view_admins = VIEW_ADMIN_EVENT_AUTHORS_PERM in perms
row = itgs.read_cursor.fetchone()
while row is not None:
result.append(
models.AdminLoanEvent(
event_type='admin',
occurred_at=row[-1].timestamp(),
admin=(row[0] if can_view_admins else None),
reason=(row[1] if can_view_admins else None),
old_principal_minor=row[2],
new_principal_minor=row[3],
old_principal_repayment_minor=row[4],
new_principal_repayment_minor=row[5],
old_created_at=row[6].timestamp(),
new_created_at=row[7].timestamp(),
old_repaid_at=row[8].timestamp() if row[8] is not None else None,
new_repaid_at=row[9].timestamp() if row[9] is not None else None,
old_unpaid_at=row[10].timestamp() if row[10] is not None else None,
new_unpaid_at=row[11].timestamp() if row[11] is not None else None,
old_deleted_at=row[12].timestamp() if row[12] is not None else None,
new_deleted_at=row[13].timestamp() if row[13] is not None else None
)
)
row = itgs.read_cursor.fetchone()
repayment_events = Table('loan_repayment_events')
repayments = moneys.as_('repayments')
itgs.read_cursor.execute(
Query.from_(repayment_events)
.select(
repayments.amount,
repayment_events.created_at
)
.join(repayments).on(repayments.id == repayment_events.repayment_id)
.where(repayment_events.loan_id == Parameter('%s'))
.get_sql(),
(loan_id,)
)
row = itgs.read_cursor.fetchone()
while row is not None:
result.append(
models.RepaymentLoanEvent(
event_type='repayment',
occurred_at=row[1].timestamp(),
repayment_minor=row[0]
)
)
row = itgs.read_cursor.fetchone()
unpaid_events = Table('loan_unpaid_events')
itgs.read_cursor.execute(
Query.from_(unpaid_events)
.select(
unpaid_events.unpaid,
unpaid_events.created_at
)
.where(unpaid_events.loan_id == Parameter('%s'))
.get_sql(),
(loan_id,)
)
row = itgs.read_cursor.fetchone()
while row is not None:
result.append(
models.UnpaidLoanEvent(
event_type='unpaid',
occurred_at=row[1].timestamp(),
unpaid=row[0]
)
)
row = itgs.read_cursor.fetchone()
result.sort(key=lambda x: x.occurred_at)
return result
| [
"noreply@github.com"
] | OhioDataSolutions.noreply@github.com |
e0b9725b85f060765c87d3f90c763feba50b9bf3 | 6f1e5d5e8130608a2b8c112c5600c331347f953c | /src/bridge_sim/model/__init__.py | fa1f792967a10d2125531894d1611d717a032dbe | [
"MIT"
] | permissive | r-snijders/bridge-sim | ed95999385d04db2e6b115bf04f20f4115bd8f6c | 20cbb956dc83b9b5e1779048a65b2ebd515f7fe6 | refs/heads/master | 2022-09-29T08:02:03.959097 | 2020-05-28T12:04:20 | 2020-05-28T12:04:20 | 267,316,873 | 0 | 0 | MIT | 2020-05-27T12:43:37 | 2020-05-27T12:43:37 | null | UTF-8 | Python | false | false | 47,000 | py | """The core classes: Bridge, Config, PointLoad etc."""
import os
from enum import Enum
from itertools import chain
from timeit import default_timer as timer
from typing import List, Union, Tuple, Optional, Callable
import numpy as np
from matplotlib import cm as cm, colors as colors, pyplot as plt
from scipy.interpolate import interp1d
from bridge_sim.util import (
safe_str,
round_m,
flatten,
print_i,
print_w,
print_s,
_get_dir,
)
DIST_DECIMALS = 6
class PierSettlement:
def __init__(self, pier: int, settlement: float):
"""A vertical translation applied in simulation to a pier.
:param pier: index of a pier on a bridge.
:param settlement: amount of pier settlement to apply.
:return: A pier settlement object.
"""
self.pier = pier
self.settlement = settlement
def id_str(self):
return safe_str(f"{np.around(self.settlement, 3)}-{self.pier}")
class Point:
def __init__(self, x: float = 0, y: float = 0, z: float = 0):
"""A point described by three positions: (X, Y, Z).
:param x:
:param y:
:param z:
"""
self.x: float = np.around(x, DIST_DECIMALS)
self.y: float = np.around(y, DIST_DECIMALS)
self.z: float = np.around(z, DIST_DECIMALS)
def distance(self, point):
return np.around(
np.sqrt(
((self.x - point.x) ** 2)
+ ((self.y - point.y) ** 2)
+ ((self.z - point.z) ** 2)
),
DIST_DECIMALS,
)
def __str__(self):
return f"({self.x}, {self.y}, {self.z})"
class PointLoad:
def __init__(self, x: float, z: float, load: float):
"""A point load applied in simulation.
:param x: X position on a bridge.
:param z: Z position on a bridge.
:param load: intensity of the point load.
:return: A point load object.
"""
self.x = x
self.z = z
self.load = load
def __repr__(self):
"""Human readable representation."""
return f"x = {self.x}, z = {self.z}, load = {self.load}"
def id_str(self):
"""String uniquely representing this point load."""
return safe_str(
f"({np.around(self.x, DIST_DECIMALS)}, {np.around(self.z, DIST_DECIMALS)}, {np.around(self.load, DIST_DECIMALS)})"
)
def point(self) -> Point:
"""The 'Point' part of this point load."""
return Point(x=self.x, y=0, z=self.z)
class ResponseType(Enum):
"""A simulation response type."""
XTrans = "xtrans"
YTrans = "ytrans"
ZTrans = "ztrans"
StressXXB = "stressxxb"
StressXXT = "stressxxt"
StressZZB = "stresszzb"
StrainXXB = "strainxxb"
StrainXXT = "strainxxt"
StrainZZB = "strainzzb"
@staticmethod
def all() -> List["ResponseType"]:
"""A list of all response types."""
return [rt for rt in ResponseType]
def is_stress(self):
"""Is this response type a stress type?"""
return self in [
ResponseType.StressXXB,
ResponseType.StressXXT,
ResponseType.StressZZB,
]
def is_strain(self):
"""Is this response type a strain type?"""
return self in [
ResponseType.StrainXXB,
ResponseType.StrainXXT,
ResponseType.StrainZZB,
]
def ss_direction(self) -> str:
"""A stress or strain identifier e.g. XXB is applicable."""
if self.is_stress() or self.is_strain():
return self.name()[-3:]
raise ValueError("Not stress or strain")
def name(self) -> str:
"""Human readable name for a response type."""
return {
ResponseType.XTrans: "X translation",
ResponseType.YTrans: "Y translation",
ResponseType.ZTrans: "Z translation",
ResponseType.StressXXB: "Stress XXB",
ResponseType.StressXXT: "Stress XXT",
ResponseType.StressZZB: "Stress ZZB",
ResponseType.StrainXXB: "Strain XXB",
ResponseType.StrainXXT: "Strain XXT",
ResponseType.StrainZZB: "Strain ZZB",
}[self]
def units(self, short: bool = True) -> str:
"""Human readable units (long or short) for a response type."""
return {
ResponseType.XTrans: ("meters", "m"),
ResponseType.YTrans: ("meters", "m"),
ResponseType.ZTrans: ("meters", "m"),
ResponseType.StressXXB: ("kilo Newton", "N/mm²"),
ResponseType.StressXXT: ("kilo Newton", "N/mm²"),
ResponseType.StressZZB: ("kilo Newton", "N/mm²"),
ResponseType.StrainXXB: ("kilo Newton", ""),
ResponseType.StrainXXT: ("kilo Newton", ""),
ResponseType.StrainZZB: ("kilo Newton", ""),
}[self][int(short)]
# Shorthand for ResponseType.
RT = ResponseType
class Config:
def __init__(
self,
bridge: Callable[[], "Bridge"],
sim_runner: Callable[[], "FEMRunner"],
vehicle_data_path: str,
vehicle_pdf: List[Tuple[float, float]],
vehicle_pdf_col: str,
generated_data: str = "generated-data",
shorten_paths: bool = False,
):
"""Simulation configuration object.
Combines a Bridge and FEMRunner among other configuration.
:param bridge: function that returns a bridge.
:param sim_runner: simulation runner.
:param vehicle_data_path: path of the vehicles CSV file.
:param vehicle_pdf:
percentage of vehicles below a maximum value for that column.
Example: [(2.4, 0.5), (5.6, 94.5), (16, 5)]
Here 5% of vehicles are 2.4m or less in length, 94.5% greater than
2.4m and less than 5.6m, and the remaining 5% are less than 16m.
This applies if 'vehicle_pdf_col' is "length".
:param vehicle_pdf_col: column of vehicle_data to group by.
:param generated_data: directory where to save generated files.
:param shorten_paths: shorten simulation paths.
"""
# Core.
self._bridge = bridge
self.bridge = self._bridge()
self._sim_runner = sim_runner
self.sim_runner = self._sim_runner(self)
# OpenSees
self.os_model_template_path: str = "model-template.tcl"
self.os_3d_model_template_path: str = "model-template-3d.tcl"
# Simulation performance.
self.parallel = 1
self.parallel_ulm = True
self.shorten_paths = shorten_paths
self.resp_matrices = dict()
# Unit loads.
self.il_num_loads: int = 600
self.il_unit_load_kn: float = 1000
self.pd_unit_disp: float = 1.0
self.pd_unit_load_kn: int = 10
self.unit_axial_delta_temp_c: int = 1
self.unit_moment_delta_temp_c: int = 1
self.cte = 12e-6
# Responses & events.
self.sensor_hz: float = 1 / 100
self.event_time_s: float = 2 # Seconds.
# Vehicles.
self.perturb_stddev: float = 0.1
self.axle_width: float = 2.5
self.vehicle_pdf = vehicle_pdf
self.vehicle_pdf_col = vehicle_pdf_col
start = timer()
self.vehicle_data_path = vehicle_data_path
# Necessary to prevent a circular import.
from bridge_sim.vehicles.sample import load_vehicle_data
self.vehicle_data = load_vehicle_data(vehicle_data_path)
print_i(
f"Loaded vehicles data from {vehicle_data_path} in"
+ f" {timer() - start:.2f}s"
)
# Ensure vehicles probability density sums to 1.
pdf_sum = sum(map(lambda f: f[1], self.vehicle_pdf))
if int(pdf_sum) != 100:
pre_pdf_sum = pdf_sum
for i in range(len(self.vehicle_pdf)):
self.vehicle_pdf[i] = (
self.vehicle_pdf[i][0],
self.vehicle_pdf[i][1] / pdf_sum,
)
pdf_sum = sum(map(lambda f: f[1], self.vehicle_pdf))
print_w(f"Vehicle PDF sums to {pre_pdf_sum}, adjusted to sum to 1")
# Root directories for generated data.
self._root_generated_data_dir = generated_data
self.root_generated_data_dir = lambda: _get_dir(self._root_generated_data_dir)
if self._root_generated_data_dir[-1] in "/\\":
raise ValueError("generated_data must not end in path separator")
self.root_generated_images_dir = lambda: _get_dir(
os.path.join(self.root_generated_data_dir() + "-images")
)
# Bridge-specific directories for generated data.
def generated_data_dir(self):
return _get_dir(
os.path.join(self.root_generated_data_dir(), self.bridge.id_str(),)
)
def generated_images_dir(self):
return _get_dir(
os.path.join(self.root_generated_images_dir(), self.bridge.id_str(),)
)
# Bridge-specific but accuracy-independent directories.
def generated_data_dir_no_acc(self):
return _get_dir(
os.path.join(
self.root_generated_data_dir(),
self.bridge.id_str(msl=False, data_id=False),
)
)
def generated_images_dir_no_acc(self):
return _get_dir(
os.path.join(
self.root_generated_images_dir(),
self.bridge.id_str(msl=False, data_id=False),
)
)
def get_path_in(self, in_: str, dirname: str, filename: str):
"""Filepath in a directory in a directory (created if necessary).
TODO: Use safe_str here.
"""
dirpath = os.path.join(in_, dirname)
if not os.path.exists(dirpath):
os.makedirs(dirpath)
return os.path.join(dirpath, filename)
def get_data_path(
self, dirname: str, filename: str, bridge: bool = True, acc: bool = True
):
"""Get a bridge-specific image path in a named directory."""
dir_path = self.generated_data_dir()
if not bridge:
dir_path = self.root_generated_images_dir()
elif not acc:
dir_path = self.generated_data_dir_no_acc()
return self.get_path_in(dir_path, dirname, filename)
def get_image_path(
self, dirname: str, filename: str, bridge: bool = True, acc: bool = True
):
"""Get a bridge-specific image path in a named directory."""
dir_path = self.generated_images_dir()
if not bridge:
dir_path = self.root_generated_images_dir()
elif not acc:
dir_path = self.generated_images_dir_no_acc()
return self.get_path_in(dir_path, dirname, filename)
class Dimensions(Enum):
D3 = "D3"
def name(self) -> str:
"""Human readable name for dimensions."""
return {Dimensions.D3: "3D",}[self]
class Support:
"""A support of the bridge deck, when 3D modeling.
SIDE_VIEW:
<------------x----------->
<---length-->
|------------------|-----------|----------------------| ↑ h
\ / | e
\ / | i
\ / | g
\ / | h
\ / ↓ t
TOP_VIEW:
|-----------------------------------------------------| ↑+
|-----------------------------------------------------| |
|-----------------------------------------------------| |
|-----------------------------------------------------| |
|-----------------------------------------------------| 0
|------------------|-----------|----------------------| |
|------------------|-----------|----------------------| | z = -2
|------------------|-----------|----------------------| |
|-----------------------------------------------------| ↓-
FRONT_VIEW:
<---width-top---->
|----------------|
\ /
\ /
\ /
\ /
\______/
<------>
width-bottom
Args:
x: float, x position of center of the support in meters.
z: float, z position of center of the support in meters.
length: float, length of the support in meters.
height: float, height of the support in meters.
width_top: float, width of the top of the support in meters.
width_bottom: float, width of the bottom of the support in meters.
"""
def __init__(
self,
x: float,
z: float,
length: float,
height: float,
width_top: float,
width_bottom: float,
materials: Union[List["MaterialSupport"], Callable[[float], "MaterialSupport"]],
fix_x_translation: bool,
fix_z_translation: bool,
fix_y_translation: bool = True,
fix_x_rotation: bool = False,
fix_z_rotation: bool = False,
fix_y_rotation: bool = False,
):
self.x = x
self.z = z
self.length = length
self.height = height
self.width_top = width_top
self.width_bottom = width_bottom
self.fix_x_translation = fix_x_translation
self.fix_y_translation = fix_y_translation
self.fix_z_translation = fix_z_translation
self.fix_x_rotation = fix_x_rotation
self.fix_y_rotation = fix_y_rotation
self.fix_z_rotation = fix_z_rotation
self._sections = materials
# Must be callable or a list.
if not callable(self._sections):
assert isinstance(self._sections, list)
assert all(isinstance(s, MaterialSupport) for s in self._sections)
if self.width_top < self.width_bottom:
raise ValueError("Support: top width must be >= bottom width")
def x_min_max_top(self) -> Tuple[float, float]:
"""The min and max x positions for the top of this pier."""
half_length = self.length / 2
return round_m(self.x - half_length), round_m(self.x + half_length)
def y_min_max(self) -> Tuple[float, float]:
"""The min and max y positions for this pier."""
return round_m(-self.height), 0
def z_min_max_top(self) -> Tuple[float, float]:
"""The min and max z positions for the top of this pier."""
half_top = self.width_top / 2
return round_m(self.z - half_top), round_m(self.z + half_top)
def z_min_max_bottom(self) -> Tuple[float, float]:
"""The min and max z positions for the bottom of this pier."""
half_bottom = self.width_bottom / 2
return round_m(self.z - half_bottom), round_m(self.z + half_bottom)
class Lane:
"""A traffic lane spanning the length of a bridge.
Args:
z0: float, z ordinate of one edge of the lane in meters.
z1: float, z ordinate of the other edge of the lane in meters.
ltr: bool, whether traffic moves left to right, or opposite.
Attrs:
z_min, float, lower z position of the bridge in meters.
z_min, float, upper z position of the bridge in meters.
width, float, Width of the lane in meters.
"""
def __init__(self, z0: float, z1: float, ltr: bool):
self.z_min: float = round_m(min(z0, z1))
self.z_max: float = round_m(max(z0, z1))
self.ltr: bool = ltr
self.width = round_m(self.z_max - self.z_min)
self.z_center = round_m(self.z_min + (self.width / 2))
class Material:
"""An abstract class for material properties.
Args:
density: float, section density in kg/m.
thickness: float, section thickness in m.
youngs: float, Young's modulus of the section in N/mm1.
youngs_x: Optional[float], Young's modulus in x direction, in N/mm2.
poisson: float, Poisson's ratio.
start_x_frac: float, start of the section as a fraction of x position.
start_z_frac: float, start of the section as a fraction of z position.
end_x_frac: float, end of the section as a fraction of x position.
end_z_frac: float, end of the section as a fraction of z position.
"""
def __init__(
self,
thickness: float,
youngs: float,
poissons: float,
start_x_frac: float = 0,
start_z_frac: float = 0,
end_x_frac: float = 1,
end_z_frac: float = 1,
density: float = 0,
youngs_x: Optional[float] = None,
):
self.density = density
self.thickness = thickness
self.youngs = youngs
self.youngs_x = lambda: youngs if youngs_x is None else youngs_x
self.poissons = poissons
self.start_x_frac = start_x_frac
self.start_z_frac = start_z_frac
self.end_x_frac = end_x_frac
self.end_z_frac = end_z_frac
def contains(self, bridge: "Bridge", x: float, z: float) -> bool:
"""Whether this section contains the given point."""
x_frac, z_frac = bridge.x_frac(x), bridge.z_frac(z)
return (
(self.start_x_frac < x_frac or np.isclose(self.start_x_frac, x_frac))
and (self.end_x_frac > x_frac or np.isclose(self.end_x_frac, x_frac))
and (self.start_z_frac < z_frac or np.isclose(self.start_z_frac, z_frac))
and (self.end_z_frac > z_frac or np.isclose(self.end_z_frac, z_frac))
)
def mat_id_str(self):
"""Representation of this section by material properties."""
return f"{self.density}-{self.thickness}-{self.youngs}-{self.poissons}"
def y_min_max(self) -> Tuple[float, float]:
"""The min and max values in y for this section."""
return -self.thickness, 0
def prop_str(self):
"""Textual representation of material properties."""
return (
"Material"
+ f"\n starts at (x_frac, z_frac) ="
+ f" ({round_m(self.start_x_frac)}, {round_m(self.start_z_frac)})"
+ f"\n ends at (x_frac, z_frac) ="
+ f" ({round_m(self.end_x_frac)}, {round_m(self.end_z_frac)})"
+ f"\n density = {self.density} kg/m"
+ f"\n thickness = {self.thickness} m"
+ f"\n youngs = {self.youngs} MPa"
+ f"\n poissons = {self.poissons}"
)
MaterialDeck = Material
class MaterialSupport(Material):
"""Like Material but intended for describing piers.
Args:
density: float, section density in kg/m.
thickness: float, section thickness in m.
youngs: float, Young's modulus of the section in MPa.
poisson: float, Poisson's ratio.
start_frac_len: start of the section as a fraction of pier length.
"""
def __init__(
self,
density: float,
thickness: float,
youngs: float,
poissons: float,
start_frac_len: float,
):
super().__init__(
density=density,
thickness=thickness,
youngs=youngs,
poissons=poissons,
start_x_frac=None,
start_z_frac=None,
end_x_frac=None,
end_z_frac=None,
)
self.start_frac_len = start_frac_len
def prop_str(self):
"""Textual representation of material properties."""
return (
"Material"
+ f"\n starts at {round_m(self.start_frac_len)}"
+ f"\n density = {self.density} kg/m"
+ f"\n thickness = {self.thickness} m"
+ f"\n youngs = {self.youngs} MPa"
+ f"\n poissons = {self.poissons}"
)
class Bridge:
def __init__(
self,
name: str,
length: float,
width: float,
supports: List[Support],
materials: List["MaterialDeck"],
lanes: List[Lane],
msl: float,
data_id: str = "healthy",
single_sections: Optional[Tuple[Material, Material]] = None,
):
"""A bridge's geometry, material properties and boundary conditions.
Args:
name: name of this bridge.
length: length of this bridge.
width: width of this bridge.
supports: a list of Support.
materials: a list of bridge deck Material.
lanes: a list of Lane for traffic to drive on.
msl: maximum shell length.
data_id: additional identifier for saving/loading data.
single_sections: tuple of one deck and one material for supports.
"""
# Given arguments.
self.name = name
self.msl = msl
self.data_id = data_id
self.length = length
self.width = width
self.supports = supports
self.sections = materials
self.lanes = lanes
self.dimensions = Dimensions.D3
self.ref_temp_c = 17
self._next_section_id = 1
# Mesh.
self.base_mesh_deck_max_x = msl
self.base_mesh_deck_max_z = msl
self.base_mesh_pier_max_long = msl
# Attach single section option for asserts and printing info.
self.single_sections = single_sections
if self.single_sections is not None:
self.name += "-single-sections"
self.sections = [self.single_sections[0]] # Set deck section.
for pier in self.supports: # Set pier sections.
pier.sections = [self.single_sections[1]]
self.additional_xs = []
# Derived attributes.
#
# NOTE: The functions y_min_max and z_min_max calculate the min and max
# values of the bridge in y and z directions respectively, based on the
# supports and sections. For a 3D bridge neither supports nor sections
# contain information on the min or max values in z direction.
self.x_min, self.x_max = 0, length
self.y_min, self.y_max = self.y_min_max()
self.z_min, self.z_max = -width / 2, width / 2
self.x_center = (self.x_min + self.x_max) / 2
self.y_center = (self.y_min + self.y_max) / 2
self.z_center = (self.z_min + self.z_max) / 2
self.height = self.y_max - self.y_min
# All sections belonging to this bridge.
self._sections_dict = dict()
# Assert the bridge is fine and print info.
self._assert_bridge()
def _get_section(self, section: Material) -> Material:
"""An equivalent section if exists, else the given one."""
def with_id(s: Material) -> Material:
s.id = self._next_section_id
self._next_section_id += 1
return s
section_prop_str = section.prop_str()
if section_prop_str in self._sections_dict:
return with_id(self._sections_dict[section_prop_str])
self._sections_dict[section_prop_str] = section
return with_id(self._sections_dict[section_prop_str])
def deck_section_at(self, x: float, z: float) -> Material:
"""Return the deck section at given position."""
if callable(self.sections):
raise NotImplementedError()
if len(self.sections) == 1:
return self._get_section(self.sections[0])
for section in self.sections:
if section.contains(bridge=self, x=x, z=z):
return self._get_section(section)
raise ValueError("No section for x, z = {x}, {z}")
def pier_section_at_len(self, p_i: int, section_frac_len: float) -> Material:
"""Return the section at a fraction of a pier's length"""
assert 0 <= section_frac_len <= 1
pier = self.supports[p_i]
if callable(pier._sections):
return self._get_section(pier._sections(section_frac_len))
if len(pier._sections) == 1:
return self._get_section(pier._sections[0])
raise ValueError(f"Pier {p_i} sections are not a function")
def print_info(self, c: "Config", pier_fix_info: bool = False):
"""Print summary information about this bridge.
Args:
fix_info: print information on pier's fixed nodes.
"""
print_s(f"Bridge dimensions:")
print_s(f" x = ({self.x_min}, {self.x_max})")
print_s(f" y = ({self.y_min}, {self.y_max})")
print_s(f" z = ({self.z_min}, {self.z_max})")
print_s(f"Bridge lanes:")
wheel_tracks = self.wheel_tracks(c)
for l, lane in enumerate(self.lanes):
print_s(f" lane {l}: {lane.z_min} <= z <= {lane.z_max}")
print_s(f" lane {l}: center at z = {lane.z_center}")
track_0 = wheel_tracks[l * 2]
track_1 = wheel_tracks[l * 2 + 1]
print_s(f" lane {l}: wheel tracks at z = {track_0}, {track_1}")
if self.single_sections:
print_s("One section for the deck, one for piers:")
print_s(f"Deck:")
list(map(print_s, str(self.sections[0]).split("\n")))
print_s(f"Piers:")
list(map(print_s, str(self.supports[0].sections[0]).split("\n")))
if pier_fix_info:
for p, pier in enumerate(self.supports):
print_s(f"Pier {p} fixed:")
print_s(f" x-trans {pier.fix_x_translation}")
print_s(f" y-trans {pier.fix_y_translation}")
print_s(f" z-trans {pier.fix_z_translation}")
print_s(f" x-rot {pier.fix_x_rotation}")
print_s(f" y-rot {pier.fix_y_rotation}")
print_s(f" z-rot {pier.fix_z_rotation}")
def id_str(self, msl: bool = True, data_id: bool = True):
"""Name with accuracy information.
Args:
msl: bool, include msl in identifier.
data_id: bool, include data_id in identifier.
"""
acc_str = f"-{self.msl}" if msl else ""
data_id_str = f"-{self.data_id}" if data_id else ""
return safe_str(f"{self.name}{acc_str}{data_id_str}")
def closest_lane(self, z: float):
"""Index of the lane closest to the point."""
result = None
lane_dist = np.inf
for lane_ind, lane in enumerate(self.lanes):
this_dist = abs(lane.z_center - z)
if this_dist < lane_dist:
result = lane_ind
lane_dist = this_dist
return result
def wheel_track_zs(self, c: "Config"):
"""Z positions of wheel track on the bridge."""
half_axle = c.axle_width / 2
return sorted(
chain.from_iterable(
[lane.z_center - half_axle, lane.z_center + half_axle]
for lane in self.lanes
)
)
def wheel_track_xs(self, c: "Config"):
"""Unit load x positions for wheel tracks on this bridge."""
return round_m(np.linspace(c.bridge.x_min, c.bridge.x_max, c.il_num_loads))
def y_min_max(self):
"""The min and max values in y direction from supports and sections."""
return self._min_max(lambda s: s.y_min_max())
def z_min_max(self):
"""The min and max values in z direction from supports and sections."""
return self._min_max(lambda s: s.z_min_max())
def x_axis(self) -> List[float]:
"""Position of supports in meters along the bridge's x-axis."""
return np.interp([f.x_frac for f in self.supports], [0, 1], [0, self.length])
def x_axis_equi(self, n) -> List[float]:
"""n equidistant values along the bridge's x-axis, in meters."""
return np.interp(np.linspace(0, 1, n), [0, 1], [0, self.length])
def x_frac(self, x: float):
return float(
interp1d([self.x_min, self.x_max], [0, 1], fill_value="extrapolate")(x)
)
def x(self, x_frac: float):
return float(
interp1d([0, 1], [self.x_min, self.x_max], fill_value="extrapolate")(x_frac)
)
def y_frac(self, y: float):
assert self.y_min <= y <= self.y_max
return np.interp(y, [self.y_min, self.y_max], [0, 1])
def y(self, y_frac: float):
assert 0 <= y_frac <= 1
return np.interp(y_frac, [0, 1], [self.y_min, self.y_max])
def z_frac(self, z: float):
assert self.z_min <= z <= self.z_max
return np.interp(z, [self.z_min, self.z_max], [0, 1])
def z(self, z_frac: float):
assert 0 <= z_frac <= 1
return np.interp(z_frac, [0, 1], [self.z_min, self.z_max])
def _min_max(
self,
f: Callable[
[Union[Support, Material]], Tuple[Optional[float], Optional[float]]
],
) -> Tuple[float, float]:
"""The min and max values in a direction from supports and sections."""
z_min, z_max = None, None
def set_z_min(z: float):
nonlocal z_min
if z is None:
return
z_min = z if z_min is None or z < z_min else z_min
def set_z_max(z: float):
nonlocal z_max
if z is None:
return
z_max = z if z_max is None or z > z_max else z_max
for section in self.sections:
s_z_min, s_z_max = f(section)
set_z_min(s_z_min)
set_z_max(s_z_max)
for support in self.supports:
s_z_min, s_z_max = f(support)
set_z_min(s_z_min)
set_z_max(s_z_max)
return z_min, z_max
def _assert_bridge(self):
"""Assert this bridge makes sense."""
# Single section only in 3D.
if self.single_sections:
if self.dimensions != Dimensions.D3:
raise ValueError("Bridge.single_section only supported in 3D")
assert self.single_sections[0].start_x_frac == 0
assert self.single_sections[0].start_z_frac == 0
assert self.single_sections[1].start_x_frac == 0
assert self.single_sections[1].start_z_frac == 0
assert self.single_sections[1].start_frac_len == 0
assert len(self.sections) == 1
for pier in self.supports:
assert len(pier.sections) == 1
# Bridge boundaries should be correct in orientation.
assert self.x_min < self.x_max
assert self.y_min < self.y_max
assert self.z_min < self.z_max
# Derived dimensions should make sense.
assert self.length == self.x_max - self.x_min
assert self.width == self.z_max - self.z_min
# Base mesh must be of a minimum size.
assert self.base_mesh_deck_max_x <= self.length
if self.dimensions == Dimensions.D3:
assert self.base_mesh_deck_max_z <= self.width
# for pier in self.supports:
# TODO: Improve this assert, piers are not vertical.
# assert self.base_mesh_pier_max_long <= pier.height
self._assert_3d()
def _assert_3d(self):
# All sections are Material.
for section in self.sections:
if not isinstance(section, Material):
raise ValueError("3D bridge must use Material sections")
# First section must start at 0.
if self.sections[0].start_x_frac != 0:
raise ValueError("First section of 3D bridge must start at 0")
# Section must be in order.
last_start_x_frac = self.sections[0].start_x_frac
for section in self.sections[1:]:
if section.start_x_frac < last_start_x_frac:
raise ValueError("Sections not in order of start_x_frac")
last_start_x_frac = section.start_x_frac
# Lanes must be in range.
for i, lane in enumerate(self.lanes):
if lane.z_min < self.z_min:
raise ValueError(
f"Lane {i} lower position {lane.z_min} less than bridge"
+ f" {self.z_min}"
)
if lane.z_min > self.z_max:
raise ValueError(
f"Lane {i} lower position {lane.z_min} greater than bridge"
+ f" {self.z_max}"
)
if lane.z_max < self.z_min:
raise ValueError(
f"Lane {i} upper position {lane.z_max} less than bridge"
+ f" {self.z_min}"
)
if lane.z_max > self.z_max:
raise ValueError(
f"Lane {i} upper position {lane.z_max} greater than bridge"
+ f" {self.z_max}"
)
# Supports must be in range.
for i, support in enumerate(self.supports):
support_z_min, support_z_max = support.z_min_max_top()
if support_z_min < self.z_min:
raise ValueError(
f"Support {i} lower position {support_z_min} less than"
+ f" bridge {self.z_min}"
)
if support_z_min > self.z_max:
raise ValueError(
f"Support {i} lower position {support_z_min} greater than"
+ f" bridge {self.z_max}"
)
if support_z_max < self.z_min:
raise ValueError(
f"Support {i} upper position {support_z_max} less than"
+ f" bridge {self.z_min}"
)
if support_z_max > self.z_max:
raise ValueError(
f"Support {i} upper position {support_z_max} greater than"
+ f" bridge {self.z_max}"
)
class Vehicle:
def __init__(
self,
kn: Union[float, List[float], List[Tuple[float, float]]],
axle_distances: List[float],
axle_width: float,
kmph: float,
lane: int = 0,
init_x_frac: float = 0,
):
"""A vehicles, load intensities, position and speed.
:param kn:
intensity, either for the entire vehicles or per axle, or as a list
of tuple (per wheel, each tuple is left then right wheel), in kilo
Newton.
:param axle_distances: distance between axles in meters.
:param axle_width: width of the vehicles's axles in meters.
:param kmph: speed of this vehicles.
:param lane: index of a lane on a bridge.
:param init_x_frac: position at time 0 in a simulation.
"""
self.kn = kn
self.axle_distances = axle_distances
self.axle_width = axle_width
self.length = sum(self.axle_distances)
self.num_axles = len(self.axle_distances) + 1
self.num_wheels = self.num_axles * 2
self.kmph = kmph
self.mps = self.kmph / 3.6 # Meters per second.
self.lane = lane
self.init_x_frac = init_x_frac
assert self.init_x_frac <= 1
def total_kn():
if isinstance(self.kn, list):
if isinstance(self.kn[0], tuple):
return sum(chain.from_iterable(self.kn))
return sum(self.kn)
return self.kn
def kn_per_axle():
if isinstance(self.kn, list):
if isinstance(self.kn[0], tuple):
return list(map(sum, self.kn))
return self.kn
return [(self.kn / self.num_axles) for _ in range(self.num_axles)]
def kn_per_wheel():
if isinstance(self.kn, list):
if isinstance(self.kn[0], tuple):
return self.kn
return list(map(lambda kn: (kn / 2, kn / 2), self.kn))
wheel_kn = self.kn / self.num_wheels
return [(wheel_kn, wheel_kn) for _ in range(self.num_axles)]
self.total_kn = total_kn
self.kn_per_axle = kn_per_axle
self.kn_per_wheel = kn_per_wheel
def cmap_norm(self, all_vehicles: List["Vehicle"], cmin=0, cmax=1):
"""The colormap and norm for coloring vehicles."""
from plot import truncate_colormap
cmap = truncate_colormap(cm.get_cmap("YlGn"), cmin, cmax)
total_kns = [v.total_kn() for v in all_vehicles] + [self.total_kn()]
norm = colors.Normalize(vmin=min(total_kns), vmax=max(total_kns))
return cmap, norm
def color(self, all_vehicles: List["Vehicle"]):
"""Color of this vehicles scaled based on given vehicles."""
cmap, norm = self.cmap_norm(all_vehicles)
if len(all_vehicles) == 0:
return cmap(0.5)
return cmap(norm(self.total_kn()))
def wheel_tracks_zs(self, bridge: Bridge, meters: bool) -> Tuple[float, float]:
"""Positions of the vehicles's wheels in transverse direction.
Args:
bridge: Bridge, the bridge on which the vehicles is moving.
meters: bool, whether to return positions in meters (True) or
fractions (False) of the bridge width in [0 1].
"""
if not meters:
raise ValueError("Should not be doing this")
lane = bridge.lanes[self.lane]
tracks = [
lane.z_center - (self.axle_width / 2),
lane.z_center + (self.axle_width / 2),
]
if meters:
return tracks
return list(map(lambda z: bridge.z_frac(z), tracks))
def x_frac_at(self, time: float, bridge: Bridge) -> List[float]:
"""Fraction of x position of bridge in meters at given time.
Args:
time: float, time passed from initial position, in seconds.
bridge: Bridge, bridge the vehicles is moving on.
"""
delta_x_frac = (self.mps * time) / bridge.length
init_x_frac = self.init_x_frac
if bridge.lanes[self.lane].ltr:
return init_x_frac + delta_x_frac
else:
init_x_frac *= -1 # Make positive, move to right of bridge start.
init_x_frac += 1 # Move one bridge length to the right.
return init_x_frac - delta_x_frac
def x_at(self, time: float, bridge: Bridge):
"""X position of front axle on bridge at given time, in meters.
Args:
time: float, time passed from initial position, in seconds.
bridge: Bridge, bridge the vehicles is moving on.
"""
return bridge.x(self.x_frac_at(time=time, bridge=bridge))
def xs_at(self, time: float, bridge: Bridge):
"""X position on bridge for each axle in meters at given time."""
if not hasattr(self, "_xs_at_time"):
xs = [self.x_at(time=time, bridge=bridge)]
# Determine the distance between each pair of axles.
delta_xs = np.array(self.axle_distances)
if bridge.lanes[self.lane].ltr:
delta_xs *= -1
# Add the distance for each axle, after the front axle.
for delta_x in delta_xs:
xs.append(xs[-1] + delta_x)
self._xs_at_time = np.array(xs)
delta_x_time = self.x_at(time=time, bridge=bridge) - self._xs_at_time[0]
return sorted(self._xs_at_time + delta_x_time)
def x_fracs_at(self, time: float, bridge: Bridge):
"""Fraction of x position of bridge for each axle at given time."""
return list(map(bridge.x_frac, self.xs_at(time=time, bridge=bridge)))
def on_bridge(self, time: float, bridge: Bridge) -> bool:
"""Whether a moving load is on a bridge at a given time."""
x_fracs = list(map(bridge.x_frac, self.xs_at(time=time, bridge=bridge)))
# Left-most and right-most vehicles positions as fractions.
xl_frac, xr_frac = min(x_fracs), max(x_fracs)
return 0 <= xl_frac <= 1 or 0 <= xr_frac <= 1
def full_lanes(self, time: float, bridge: Bridge) -> float:
"""The amount of bridge lanes travelled by this vehicles."""
x_fracs = list(map(bridge.x_frac, self.xs_at(time=time, bridge=bridge)))
# Left-most and right-most vehicles positions as fractions.
xl_frac, xr_frac = min(x_fracs), max(x_fracs)
if bridge.lanes[self.lane].ltr:
return xl_frac
else:
return abs(xr_frac - 1)
def passed_bridge(self, time: float, bridge: Bridge) -> bool:
"""Whether the current vehicles has travelled over the bridge."""
return self.full_lanes(time=time, bridge=bridge) > 1
def time_at(self, x, bridge: Bridge):
"""Time the front axle is at the given x position."""
if not bridge.lanes[self.lane].ltr:
raise NotImplementedError()
init_x = bridge.x(self.init_x_frac)
assert init_x < x
return float(abs(init_x - x)) / self.mps
def time_entering_bridge(self, bridge: Bridge):
"""Time the vehicles begins to enter the bridge."""
init_x = bridge.x(self.init_x_frac)
assert init_x <= 0
return float(abs(init_x)) / self.mps
def time_entered_bridge(self, bridge: Bridge):
"""Time the vehicles has entered the bridge."""
init_x = bridge.x(self.init_x_frac)
assert init_x <= 0
return float(abs(init_x) + self.length) / self.mps
def time_leaving_bridge(self, bridge: Bridge):
"""Time the vehicles begins to leave the bridge."""
init_x = bridge.x(self.init_x_frac)
assert init_x <= 0
return float(abs(init_x) + bridge.length) / self.mps
def time_left_bridge(self, bridge: Bridge):
"""Time the vehicles has left the bridge."""
init_x = bridge.x(self.init_x_frac)
assert init_x <= 0
return float(abs(init_x) + bridge.length + self.length) / self.mps
def to_wheel_track_xs(
self, c: "Config", wheel_x: float, wheel_track_xs: Optional[List[float]] = None
) -> Tuple[Tuple[float, float], Tuple[float, float]]:
"""X positions (and weighting) of unit loads for a x position.
This implements wheel track bucketing!
"""
wheel_x = round_m(wheel_x)
if wheel_track_xs is None:
wheel_track_xs = c.bridge.wheel_track_xs(c)
unit_load_x_ind = np.searchsorted(wheel_track_xs, wheel_x)
unit_load_x = lambda: wheel_track_xs[unit_load_x_ind]
if unit_load_x() > wheel_x:
unit_load_x_ind -= 1
assert unit_load_x() <= wheel_x
# If the unit load is an exact match just return it.
if np.isclose(wheel_x, unit_load_x()):
return ((wheel_x, 1), (0, 0))
# Otherwise, return a combination of two unit loads. In this case the
# unit load's position is less than the wheel.
unit_load_x_lo = unit_load_x()
unit_load_x_hi = wheel_track_xs[unit_load_x_ind + 1]
assert unit_load_x_hi > wheel_x
dist_lo = abs(unit_load_x_lo - wheel_x)
dist_hi = abs(unit_load_x_hi - wheel_x)
dist = dist_lo + dist_hi
return ((unit_load_x_lo, dist_hi / dist), (unit_load_x_hi, dist_lo / dist))
def to_wheel_track_loads_(
self,
c: "Config",
time: float,
flat: bool = False,
wheel_track_xs: Optional[List[float]] = None,
):
"""Load intensities and positions per axle, per wheel.
"Bucketed" to fit onto wheel tracks.
NOTE: In each tuple of two point loads, one tuple per wheel, each point
load is for a unit load position in the wheel track. Each point load is
weighted by the distance to the unit load.
"""
if wheel_track_xs is None:
wheel_track_xs = c.bridge.wheel_track_xs(c)
xs = self.xs_at(time=time, bridge=c.bridge)
kns = self.kn_per_axle()
result = []
assert len(xs) == len(kns)
# For each axle.
for x, kn in zip(xs, kns):
# Skip axle if not on the bridge.
if (x < c.bridge.x_min and not np.isclose(x, c.bridge.x_min)) or (
x > c.bridge.x_max and not np.isclose(x, c.bridge.x_max)
):
continue
left, right = [], []
for (load_x, load_frac) in self.to_wheel_track_xs(
c=c, wheel_x=x, wheel_track_xs=wheel_track_xs,
):
if load_frac > 0:
bucket_kn = kn / 2 * load_frac
left.append((load_x, bucket_kn))
right.append((load_x, bucket_kn))
result.append((left, right))
if flat:
return flatten(result, PointLoad)
return result
def to_wheel_track_loads(
self, c: "Config", time: float, flat: bool = False
) -> List[Tuple[List[PointLoad], List[PointLoad]]]:
z0, z1 = self.wheel_tracks_zs(bridge=c.bridge, meters=True)
assert z0 < z1
result = []
for axle_loads in self.to_wheel_track_loads_(c=c, time=time):
left, right = [], []
left_loads, right_loads = axle_loads
for load_x, load_kn in left_loads:
left.append(PointLoad(x=load_x, z=z0, load=load_kn))
for load_x, load_kn in right_loads:
right.append(PointLoad(x=load_x, z=z1, load=load_kn))
result.append((left, right))
if flat:
return flatten(result, PointLoad)
return result
def to_point_load_pw(
self, time: float, bridge: Bridge, list: bool = False
) -> Union[List[Tuple[PointLoad, PointLoad]], List[PointLoad]]:
"""A tuple of point load per axle, one point load per wheel."""
z0, z1 = self.wheel_tracks_zs(bridge=bridge, meters=True)
assert z0 < z1
kn_per_axle = self.kn_per_axle()
result = []
# For each axle.
for x_i, x in enumerate(self.xs_at(time=time, bridge=bridge)):
# Skip axle if not on the bridge.
if (x < bridge.x_min and not np.isclose(x, bridge.x_min)) or (
x > bridge.x_max and not np.isclose(x, bridge.x_max)
):
continue
# Two wheel load intensities.
kn_wheel = kn_per_axle[x_i] / 2
result.append(
(
PointLoad(x=x, z=z0, load=kn_wheel),
PointLoad(x=x, z=z1, load=kn_wheel),
)
)
if list:
return flatten(result, PointLoad)
return result
def plot_wheels(self, c: "Config", time: float, label=None, **kwargs):
wheel_loads = self.to_point_load_pw(time=time, bridge=c.bridge, flat=True)
for i, load in enumerate(wheel_loads):
x, z = c.bridge.x(load.x_frac), c.bridge.z(load.z_frac)
plt.scatter(
[x],
[z],
facecolors="none",
edgecolors="black",
label=None if i > 0 else label,
**kwargs,
)
| [
"barischrooneyj@protonmail.com"
] | barischrooneyj@protonmail.com |
e2bd9a59636cfd0c2f76a1a4087cc2c5202b1935 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/216/usersdata/274/113966/submittedfiles/av2_p3_civil.py | d9f4bcefea50acc8b1dd920d630cdd854f8a3254 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 458 | py | # -*- coding: utf-8 -*-
import numpy as np
L=int(input("Quantidade de Linhas: "))
C=L
a=np.zeros((L,C))
x=int(input("Linhas: "))
y=int(input("Colunas: "))
for i in range(0,a.shape[0],1):
for j in range(0,a.shape[1],1):
a[i,j]=float(input("Valor da Linha: "))
soma1L=0
for i in range(x,C-y,1):
soma1L=soma1L+a[x,i+1]
soma2L=0
for i in range(x,y,1):
soma2L=soma2L+a
soma1C=0
for i in range(x,C-y,1):
soma1C=soma1C+a[x,i+1]
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
fe03327e97fff1983eaee4dd0427062b9d600377 | 05cda3ab89d001aef2ec19f2975fad9397c8dd0b | /experiments/sawyer/towel_classifier/conf.py | bec399b3897f8ecb885707dcf2e8c6335cc1ab37 | [
"MIT"
] | permissive | dhl8282/visual_foresight | ddcc940ad542222d433ca269e37e4d5f747732ea | 5e6205a85119c1dec4a39ba7e67d7c89e726a47c | refs/heads/master | 2022-09-05T18:16:07.590072 | 2020-05-26T08:41:52 | 2020-05-26T08:41:52 | 266,988,765 | 0 | 0 | MIT | 2020-05-26T08:36:22 | 2020-05-26T08:36:22 | null | UTF-8 | Python | false | false | 988 | py | from visual_mpc.video_prediction.setup_predictor import setup_predictor
from visual_mpc.video_prediction.vpred_model_interface import VPred_Model_Interface
from video_prediction.models.savp_model import SAVPVideoPredictionModel
import robonet
modeldir = '/home/sudeep/Documents/video_prediction/pretrained_models/mixed_datasets/towel_hard_objects/view0/'
configuration = {
'pred_model': VPred_Model_Interface,
'pred_model_class': SAVPVideoPredictionModel,
'setup_predictor':setup_predictor,
'json_dir': modeldir + '/model.savp.None',
'pretrained_model':modeldir + '/model.savp.None/model-300000', # 'filepath of a pretrained model to resume training from.' ,
'sequence_length': 15, # 'sequence length to load, including context frames.' ,
'context_frames': 2, # of frames before predictions.' ,
'model': 'appflow', #'model architecture to use - CDNA, DNA, or STP' ,
'batch_size': 50,
'sdim':8,
'adim':4,
'orig_size':[48,64],
'no_pix_distrib': '',
'ncam': 1
}
| [
"sdasari@berkeley.edu"
] | sdasari@berkeley.edu |
c0f734b4bdb2f37fdd2a0c33372562496f0016ec | 4cce482c0525d7a517595f2117bfae355b157477 | /desafio096.py | 7633b271eba55f98bd364ec06be712c42ffef31f | [] | no_license | Carlosfrancog/Scripts-Python | c179360fab7db20069bbc7017a6cab0a07f82b0c | f6f5829297710a76eafe86f8ff4ef4e521e4a793 | refs/heads/main | 2023-06-16T21:12:20.069556 | 2021-07-09T01:49:59 | 2021-07-09T01:49:59 | 384,291,591 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 282 | py | print('====== DESAFIO 096 ======')
def area(x, y):
print('-'*40)
print(f'''A largura é {x}m e o comprimento é {y}m
portanto a área desse terreno é de {x*y} m²''')
area(float(input('Digite a largura em metros: ')), float(input('Digite o comprimento em metros: ')))
| [
"carlinhosebba123@gmail.com"
] | carlinhosebba123@gmail.com |
7e0a4c94cc03b414ea57b6dfec9317df3a16b0a0 | ea7f2abf791a12ad68862664b39c5b569efb4e27 | /mysite/settings.py | d725f8d5ba29b4570250a5b24ad19ac06a0edf0c | [] | no_license | sparshchaudhary/DO-finHost | 628759e377e618b24a566d970b531f4902ceeaad | 7df5280121e298c461c759998549d064e43b956f | refs/heads/main | 2022-12-28T23:56:03.622114 | 2020-10-09T15:53:02 | 2020-10-09T15:53:02 | 302,612,285 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,372 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 3.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import os
from pathlib import Path
from django.contrib.messages import constants as messages
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '1sqe(3w@jyj6%8t%aie89d2uwrqzl++d*wo1dy4v8#i_r5jq&x'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['localhost', '167.99.233.80']
# Application definition
INSTALLED_APPS = [
#Django Pre Installed Apps
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
#MY Apps
'Index.apps.IndexConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'),
]
STATIC_ROOT = os.path.join(BASE_DIR, 'static_cdn')
| [
"paritoshsparsh@gmail.com"
] | paritoshsparsh@gmail.com |
4cb16d359b699a10e4dea3a8609ef6aa1812ce3a | dfd95c1f541b1b8d8de3e9a99e9ce8308e9e2dcf | /rabbitbreedapi/rabbitbreedapi/wsgi.py | 69fbee08e4ff7baec5ad47d7acfd4691583481d0 | [] | no_license | seniorita-developer/django-rabbitbreeds-rest-api | b6f68fcd22f96914326375f2082528107b716388 | 5d337325c76b121b1bfc81ae26e2db0671b87194 | refs/heads/master | 2023-08-09T12:34:06.589911 | 2021-03-13T22:39:30 | 2021-03-13T22:39:30 | 262,061,173 | 0 | 0 | null | 2021-09-22T19:09:39 | 2020-05-07T13:41:37 | JavaScript | UTF-8 | Python | false | false | 405 | py | """
WSGI config for rabbitbreedapi project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'rabbitbreedapi.settings')
application = get_wsgi_application()
| [
"valeriia.zabkowska@gmail.com"
] | valeriia.zabkowska@gmail.com |
76bdac5de824a62ecbd79d5265f6e4a71cfb0fb9 | a890d21f8b06b18be4253e53ae1ca99d53fad8d1 | /dissect.py | 1d281a885b406674cf1b5b1c13310173bbc8f1b8 | [] | no_license | mastupristi/memoryLayout | fe2e58e2fc3b11d38bac15d2a2f84e20c6f9d61c | 6bf7c0a8e0069ceb9194456b5610662f0a11dfcf | refs/heads/master | 2023-08-05T00:47:17.759264 | 2023-07-19T15:43:17 | 2023-07-19T15:43:17 | 257,404,761 | 5 | 1 | null | 2023-08-03T10:13:20 | 2020-04-20T21:11:15 | Python | UTF-8 | Python | false | false | 3,771 | py | #!/usr/bin/env python3
# is required that the GNU ARM toolchais is in PATH
import argparse
import sys
from RegionRetriever import RegionRetriever
from MetadataRetriever import MetadataRetriever
class LineEmitter:
def __init__(self, regionStringExtent=16, symbolStringExtent=40, csv=False):
charactersForRegion = max(regionStringExtent, 16)
charactersForSymbol = max(symbolStringExtent, 40)
self.formatStr = "%%%ds %%10s %%12s %%9s %%5s %%%ds %%s" % (charactersForRegion, charactersForSymbol)
self.csv = csv
def emitLine(self, elementlist, file2out):
if(True == self.csv):
stroutline = ','.join(elementlist)
else:
stroutline = self.formatStr % tuple(elementlist)
file2out.write(stroutline+"\n")
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-t", "--type", help="output type (default: normal)", choices=['normal', 'csv'], default='normal')
parser.add_argument("-o", "--out", help="out file (default: stdout)", type=argparse.FileType('w'), default=sys.stdout)
parser.add_argument("-r", "--region", help="memory region to dissect (default: all)", default='all', metavar='REG')
parser.add_argument("-u", "--uniq", help="filter symbols @address already populated", action='store_true')
parser.add_argument("-f", "--fill", help="try to guess the *fill* fields", action='store_true')
parser.add_argument("-l", "--noline", help="remove any line number from files", action='store_true')
parser.add_argument("-p", "--prefix", help="prefix for nm tool (e.g. arm-none-eabi-, default: \"\")", default='', metavar='PREFIX')
parser.add_argument("elffile", help="input elf file")
parser.add_argument("mapfile", help="input map file")
args = parser.parse_args()
if args.type == 'csv':
csv = True
else:
csv = False
try:
memMapRetriever = RegionRetriever(mapFile=args.mapfile)
except:
print("Error occurred! Does %s file exist?" % args.mapfile)
sys.exit()
Regions = memMapRetriever.GetRegions()
metadataRetriever = MetadataRetriever(args.elffile, args.mapfile, Regions, args.prefix)
symbolList = metadataRetriever.retreiveSymbols()
regionNameMaxLen = len(max(Regions.keys(), key=len))
symbolNameMaxLen = len(max([sym["name"] for sym in symbolList], key=len))
if "all" != args.region:
if args.region in Regions.keys():
symbolList = [d for d in symbolList if args.region == d["region"]]
else:
print("Region %s does not exist in %s" % (args.region, args.elffile))
sys.exit()
emitter = LineEmitter(regionNameMaxLen, symbolNameMaxLen, csv)
fields = [ "Region",
"addr(hex)",
"addr(dec)",
"size(dec)",
"type",
"symbol",
"path"]
emitter.emitLine(fields, args.out)
lastaddr = -1
for symbol in symbolList:
if args.uniq and lastaddr == symbol["addr"]:
continue
if (not args.fill) and symbol["fill"]:
continue
if symbol["file"] != "":
fileField = symbol["file"]
if False == args.noline and symbol["line"] > 0:
fileField += ":%d" % symbol["line"]
else:
fileField = ""
fields = [ symbol["region"],
"0x%08x" % symbol["addr"],
"%d" % symbol["addr"],
"%d" % symbol["dim"],
"%c" % symbol["attr"],
symbol["name"],
fileField
]
emitter.emitLine(fields, args.out)
lastaddr = symbol["addr"]
if __name__ == '__main__':
main()
| [
"cialdi@gmail.com"
] | cialdi@gmail.com |
273294c58b5523086e8ff42f3418e24ed11ae517 | dd2f58d6d885289cf8c455b9d33d82d0bfcbd493 | /journal_file.py | 57f00207894dbd996533da64afea8d6dd33d0ce5 | [] | no_license | emresn/sine_curve_generator_for_nx | 3b6a6f8f5e8e0de42bd2d53123ac41c64074e742 | 1832b00295c3d2a250a47f4056caa49b42c1a4b8 | refs/heads/master | 2023-01-22T21:54:03.117984 | 2020-11-22T22:24:19 | 2020-11-22T22:24:19 | 315,127,816 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,470 | py |
# NX 11.0.0.33
# Journal created by SinCurve Journal Generator for NX
#
import math
import NXOpen
import NXOpen.Features
import NXOpen.GeometricUtilities
import NXOpen.Preferences
def main() :
theSession = NXOpen.Session.GetSession()
workPart = theSession.Parts.Work
displayPart = theSession.Parts.Display
# ----------------------------------------------
# Menu: Tools->Expressions...
# ----------------------------------------------
theSession.Preferences.Modeling.UpdatePending = False
markId1 = theSession.SetUndoMark(NXOpen.Session.MarkVisibility.Visible, "Start")
theSession.SetUndoMarkName(markId1, "Expressions Dialog")
markId2 = theSession.SetUndoMark(NXOpen.Session.MarkVisibility.Invisible, "Expressions")
markId3 = theSession.SetUndoMark(NXOpen.Session.MarkVisibility.Invisible, "Make Up to Date")
markId4 = theSession.SetUndoMark(NXOpen.Session.MarkVisibility.Invisible, "Create Expression")
unit1 = workPart.UnitCollection.FindObject("MilliMeter")
expression1 = workPart.Expressions.CreateWithUnits("t=1", unit1)
markId5 = theSession.SetUndoMark(NXOpen.Session.MarkVisibility.Invisible, "Check Circular")
objects1 = [NXOpen.NXObject.Null] * 1
objects1[0] = expression1
theSession.UpdateManager.MakeUpToDate(objects1, markId5)
expression1.EditComment("")
markId6 = theSession.SetUndoMark(NXOpen.Session.MarkVisibility.Invisible, "Create Expression")
expression2 = workPart.Expressions.CreateWithUnits("xt=25*(30*t)", unit1)
markId7 = theSession.SetUndoMark(NXOpen.Session.MarkVisibility.Invisible, "Check Circular")
objects2 = [NXOpen.NXObject.Null] * 1
objects2[0] = expression2
theSession.UpdateManager.MakeUpToDate(objects2, markId7)
expression2.EditComment("")
objects3 = [NXOpen.NXObject.Null] * 2
objects3[0] = expression1
objects3[1] = expression2
theSession.UpdateManager.MakeUpToDate(objects3, markId3)
markId8 = theSession.SetUndoMark(NXOpen.Session.MarkVisibility.Invisible, "NX update")
nErrs1 = theSession.UpdateManager.DoUpdate(markId8)
theSession.DeleteUndoMark(markId8, "NX update")
theSession.DeleteUndoMark(markId3, None)
theSession.DeleteUndoMark(markId2, None)
theSession.SetUndoMarkName(markId1, "Expressions")
markId9 = theSession.SetUndoMark(NXOpen.Session.MarkVisibility.Visible, "Start")
theSession.SetUndoMarkName(markId9, "Expressions Dialog")
# ----------------------------------------------
# Dialog Begin Expressions
# ----------------------------------------------
markId10 = theSession.SetUndoMark(NXOpen.Session.MarkVisibility.Invisible, "Expressions")
markId11 = theSession.SetUndoMark(NXOpen.Session.MarkVisibility.Invisible, "Make Up to Date")
markId12 = theSession.SetUndoMark(NXOpen.Session.MarkVisibility.Invisible, "Create Expression")
expression3 = workPart.Expressions.CreateWithUnits("yt=0 +(10*sin(25*360*t))", unit1)
markId13 = theSession.SetUndoMark(NXOpen.Session.MarkVisibility.Invisible, "Check Circular")
objects4 = [NXOpen.NXObject.Null] * 1
objects4[0] = expression3
theSession.UpdateManager.MakeUpToDate(objects4, markId13)
expression3.EditComment("")
objects5 = [NXOpen.NXObject.Null] * 1
objects5[0] = expression3
theSession.UpdateManager.MakeUpToDate(objects5, markId11)
markId14 = theSession.SetUndoMark(NXOpen.Session.MarkVisibility.Invisible, "NX update")
nErrs2 = theSession.UpdateManager.DoUpdate(markId14)
theSession.DeleteUndoMark(markId14, "NX update")
theSession.DeleteUndoMark(markId11, None)
theSession.DeleteUndoMark(markId10, None)
theSession.SetUndoMarkName(markId9, "Expressions")
markId15 = theSession.SetUndoMark(NXOpen.Session.MarkVisibility.Visible, "Start")
theSession.SetUndoMarkName(markId15, "Expressions Dialog")
# ----------------------------------------------
# Dialog Begin Expressions
# ----------------------------------------------
markId16 = theSession.SetUndoMark(NXOpen.Session.MarkVisibility.Invisible, "Expressions")
markId17 = theSession.SetUndoMark(NXOpen.Session.MarkVisibility.Invisible, "Make Up to Date")
markId18 = theSession.SetUndoMark(NXOpen.Session.MarkVisibility.Invisible, "Create Expression")
expression4 = workPart.Expressions.CreateWithUnits("zt=0", unit1)
markId19 = theSession.SetUndoMark(NXOpen.Session.MarkVisibility.Invisible, "Check Circular")
objects6 = [NXOpen.NXObject.Null] * 1
objects6[0] = expression4
theSession.UpdateManager.MakeUpToDate(objects6, markId19)
expression4.EditComment("")
objects7 = [NXOpen.NXObject.Null] * 1
objects7[0] = expression4
theSession.UpdateManager.MakeUpToDate(objects7, markId17)
markId20 = theSession.SetUndoMark(NXOpen.Session.MarkVisibility.Invisible, "NX update")
nErrs3 = theSession.UpdateManager.DoUpdate(markId20)
theSession.DeleteUndoMark(markId20, "NX update")
theSession.DeleteUndoMark(markId17, None)
theSession.DeleteUndoMark(markId16, None)
theSession.SetUndoMarkName(markId15, "Expressions")
markId21 = theSession.SetUndoMark(NXOpen.Session.MarkVisibility.Visible, "Start")
theSession.SetUndoMarkName(markId21, "Expressions Dialog")
# ----------------------------------------------
# Dialog Begin Expressions
# ----------------------------------------------
markId22 = theSession.SetUndoMark(NXOpen.Session.MarkVisibility.Invisible, "Expressions")
theSession.DeleteUndoMark(markId22, None)
markId23 = theSession.SetUndoMark(NXOpen.Session.MarkVisibility.Invisible, "Expressions")
markId24 = theSession.SetUndoMark(NXOpen.Session.MarkVisibility.Invisible, "Make Up to Date")
markId25 = theSession.SetUndoMark(NXOpen.Session.MarkVisibility.Invisible, "NX update")
nErrs4 = theSession.UpdateManager.DoUpdate(markId25)
theSession.DeleteUndoMark(markId25, "NX update")
theSession.DeleteUndoMark(markId24, None)
theSession.DeleteUndoMark(markId23, None)
theSession.SetUndoMarkName(markId21, "Expressions")
# ----------------------------------------------
# Menu: Insert->Curve->Law Curve...
# ----------------------------------------------
markId26 = theSession.SetUndoMark(NXOpen.Session.MarkVisibility.Visible, "Start")
lawCurveBuilder1 = workPart.Features.CreateLawCurveBuilder(NXOpen.Features.LawCurve.Null)
expression5 = workPart.Expressions.CreateSystemExpressionWithUnits("0", unit1)
expression6 = workPart.Expressions.CreateSystemExpressionWithUnits("0", unit1)
expression7 = workPart.Expressions.CreateSystemExpressionWithUnits("0", unit1)
lawCurveBuilder1.XLaw.LawType = NXOpen.GeometricUtilities.LawBuilder.Type.ByEquation
lawCurveBuilder1.XLaw.Value.RightHandSide = "0"
lawCurveBuilder1.XLaw.StartValue.RightHandSide = "0"
lawCurveBuilder1.XLaw.EndValue.RightHandSide = "0"
lawCurveBuilder1.YLaw.LawType = NXOpen.GeometricUtilities.LawBuilder.Type.ByEquation
lawCurveBuilder1.YLaw.Value.RightHandSide = "0"
lawCurveBuilder1.YLaw.StartValue.RightHandSide = "0"
lawCurveBuilder1.YLaw.EndValue.RightHandSide = "0"
lawCurveBuilder1.ZLaw.LawType = NXOpen.GeometricUtilities.LawBuilder.Type.ByEquation
lawCurveBuilder1.ZLaw.Value.RightHandSide = "0"
lawCurveBuilder1.ZLaw.StartValue.RightHandSide = "0"
lawCurveBuilder1.ZLaw.EndValue.RightHandSide = "0"
theSession.SetUndoMarkName(markId26, "Law Curve Dialog")
lawCurveBuilder1.XLaw.AlongSpineData.Spine.DistanceTolerance = 0.01
lawCurveBuilder1.XLaw.AlongSpineData.Spine.ChainingTolerance = 0.0094999999999999998
lawCurveBuilder1.XLaw.LawCurve.DistanceTolerance = 0.01
lawCurveBuilder1.XLaw.LawCurve.ChainingTolerance = 0.0094999999999999998
lawCurveBuilder1.YLaw.AlongSpineData.Spine.DistanceTolerance = 0.01
lawCurveBuilder1.YLaw.AlongSpineData.Spine.ChainingTolerance = 0.0094999999999999998
lawCurveBuilder1.YLaw.LawCurve.DistanceTolerance = 0.01
lawCurveBuilder1.YLaw.LawCurve.ChainingTolerance = 0.0094999999999999998
lawCurveBuilder1.ZLaw.AlongSpineData.Spine.DistanceTolerance = 0.01
lawCurveBuilder1.ZLaw.AlongSpineData.Spine.ChainingTolerance = 0.0094999999999999998
lawCurveBuilder1.ZLaw.LawCurve.DistanceTolerance = 0.01
lawCurveBuilder1.ZLaw.LawCurve.ChainingTolerance = 0.0094999999999999998
markId27 = theSession.SetUndoMark(NXOpen.Session.MarkVisibility.Invisible, "Law Curve")
theSession.DeleteUndoMark(markId27, None)
markId28 = theSession.SetUndoMark(NXOpen.Session.MarkVisibility.Invisible, "Law Curve")
nXObject1 = lawCurveBuilder1.Commit()
theSession.DeleteUndoMark(markId28, None)
theSession.SetUndoMarkName(markId26, "Law Curve")
lawCurveBuilder1.Destroy()
workPart.Expressions.Delete(expression5)
workPart.Expressions.Delete(expression6)
workPart.Expressions.Delete(expression7)
# ----------------------------------------------
# Menu: Orient View->Top
# ----------------------------------------------
workPart.ModelingViews.WorkView.Orient(NXOpen.View.Canned.Top, NXOpen.View.ScaleAdjustment.Fit)
scaleAboutPoint1 = NXOpen.Point3d(-2.1283952823445289, 5.9748927805575391, 0.0)
viewCenter1 = NXOpen.Point3d(2.1283952823445289, -5.9748927805575391, 0.0)
workPart.ModelingViews.WorkView.ZoomAboutPoint(0.80000000000000004, scaleAboutPoint1, viewCenter1)
scaleAboutPoint2 = NXOpen.Point3d(-2.6604941029306612, 7.4686159756969248, 0.0)
viewCenter2 = NXOpen.Point3d(2.6604941029306612, -7.4686159756969248, 0.0)
workPart.ModelingViews.WorkView.ZoomAboutPoint(0.80000000000000004, scaleAboutPoint2, viewCenter2)
# ----------------------------------------------
# Menu: Tools->Journal->Stop Recording
# ----------------------------------------------
if __name__ == '__main__':
main()
| [
"esonmez357@gmail.com"
] | esonmez357@gmail.com |
978f7af95d4e531ed067249293500f71b88e4310 | f21e0479bb9f811be2383154c3643b0f29354be7 | /lib/data/dataset.py | c8d6dd038cbec357d79472552368f05bd1db899d | [] | no_license | cokeSchlumpf/thesis--text-sumarization | ceb8861ea8a7fbecd553d756b6c3ed7bb6d10432 | 09f5dd4c02169b1120238d84dc64433737e7b8b7 | refs/heads/master | 2023-03-26T23:38:48.542816 | 2021-03-27T22:03:11 | 2021-03-27T22:03:11 | 323,384,132 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 125 | py | from pydantic import BaseModel
class Dataset(BaseModel):
id: str
name: str
language: str
description: str
| [
"michael.wellner@gmail.com"
] | michael.wellner@gmail.com |
9d8c079179f285f75f1695a88d4e3807acf800c1 | ced968634cb9c6ee4677cd747b02b0a656ba3221 | /env/bin/easy_install | 98cbe45bf86c8f2fafe9ddf8b0ac9682e7acd4e4 | [] | no_license | veganna/hori | ad5c171fd0ea936f047cc375991e9f7a438df7ab | 92e195d9844e08bd9c9cbbbb4d1e1e6eef738461 | refs/heads/main | 2023-09-02T05:53:50.756717 | 2021-11-07T18:12:09 | 2021-11-07T18:12:09 | 425,580,033 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 244 | #!/home/mainsite/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"root@vultr.guest"
] | root@vultr.guest | |
f25ea43254cfc51bed417f8fa0f96ef3c0c306be | ef561ee66e8550945a449d4acce616f9224434eb | /dataset.py | d5f0e3010442affed117392f6fb4e48fba065945 | [] | no_license | cmax1018/genre-rator | 995ebc547c13d95a198d43c9470d8d6a2bfa32ef | de4d7a04b696fa31ffc35696d45180d3842e207c | refs/heads/master | 2022-12-30T19:41:55.803059 | 2020-10-01T23:11:05 | 2020-10-01T23:11:05 | 300,445,749 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,977 | py | import os
import librosa
import math
import json
DATASET_PATH = "genres"
JSON_PATH = "data.json"
SAMPLE_RATE = 22050
DURATION = 30
SAMPLES_PER_TRACK = SAMPLE_RATE * DURATION
def save_mfcc(dataset_path, json_path, n_mfcc=13, n_fft=2048, hop_length=512, num_segments=5):
#build dictionary to store data
data = {
"mapping": [],
"mfcc": [],
"labels": []
}
num_samples_per_segment = int(SAMPLES_PER_TRACK/ num_segments)
expected_num_mfcc_vectors_per_segment = math.ceil(num_samples_per_segment / hop_length)
#loop througb all genres
for i, (dirpath, dirnames, filenames) in enumerate(os.walk(dataset_path)):
# ensure that we're not at root level
if dirpath is not dataset_path:
# save the semantic label
dirpath_components = dirpath.split("/")
semantic_label = dirpath_components[-1]
data["mapping"].append(semantic_label)
print('\nProcessing {}'.format(semantic_label))
#process files for specific genre
for f in filenames:
file_path = os.path.join(dirpath, f)
signal, sr = librosa.load(file_path, sr=SAMPLE_RATE)
#process segments extracting mfcc and storing data
for s in range(num_segments):
start_sample = num_samples_per_segment * s
finish_sample = start_sample + num_samples_per_segment
mfcc = librosa.feature.mfcc(signal[start_sample:finish_sample], sr=sr, n_fft=n_fft, n_mfcc=n_mfcc, hop_length=hop_length)
mfcc = mfcc.T
#store mfcc for sgment if it has the expected length
if len(mfcc) == expected_num_mfcc_vectors_per_segment:
data["mfcc"].append(mfcc.tolist())
data["labels"].append(i-1) # -1 because we ignore the first past of loop over the root directory.
print("{}, segment:{}".format(file_path, s))
with open(json_path, "w") as fp:
json.dump(data, fp, indent=4)
if __name__ == "__main__":
save_mfcc(DATASET_PATH, JSON_PATH, num_segments=10)
| [
"cmax1018@gmail.com"
] | cmax1018@gmail.com |
4191a07d0866f458056fcfa0ba8eb8a28f71667e | 42862caf177ef92b4c289f219556ba4da34692f2 | /43-oop-in.py | 78333f93f0658076047911a87f111eaf2c636179 | [] | no_license | w3cp/hukush-pakush | d3691c1e78db8f9b9f6f15d9bc1922d339a69cb5 | 2c5b96c2f180e8b1943da05b50197b16738a8d84 | refs/heads/master | 2021-01-10T01:36:11.873953 | 2016-01-28T02:48:47 | 2016-01-28T02:48:47 | 50,080,537 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 314 | py | class Calculator:
def set( self, first, second ):
self.a = first
self.b = second
def add( self ):
return self.a + self.b
class NewCalculator( Calculator ):
def multiply( self ):
return self.a * self.b
calc = NewCalculator()
calc.set( 2, 3 )
print calc.multiply()
| [
"jannat.books@gmail.com"
] | jannat.books@gmail.com |
3814c9fa8ee783a4e7673e4446b3dd43435c865e | b2f808055ad24641b7b866d70d520f1232f33f6f | /Week_3/1_read.py | 18f6fca6bcbf396c7d3d7d410ee432a2baba2515 | [] | no_license | JayVer2/Python_openCV_Workshop | 361d9a467c85baf88626fb3e03c30f1071a66b35 | 5bafd8123c266461ea8c524ff43b1dc63b571cd5 | refs/heads/main | 2023-08-10T16:12:57.360355 | 2021-09-12T05:29:34 | 2021-09-12T05:29:34 | 401,165,403 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | import cv2 as cv
import numpy
img = cv.imread('Images/stock-photo.jpg')
cv.imshow('Stock Photo', img)
capture = cv.VideoCapture('Images/fire.mp4')
while True:
isTrue,frame=capture.read()
#show frame
cv.imshow('',frame)
#if the d key is pressed, kill screen
if cv.waitKey(20) & 0xFF==ord('d'):
break
capture.release()
cv.destroyAllWindows()
cv.waitKey(0) | [
"jver5037@uni.sydney.edu.au"
] | jver5037@uni.sydney.edu.au |
1b52c2c3ffe41c8ab7b25027361501a0cca289be | d7fda884eda22406b364f6658ecdbaaf940e5c66 | /func/Wordcloud.py | 11dc2828e7561070b869e1a4d4574667be43ce4b | [] | no_license | seraph05230/Wordcloud | 96715150e02fdf671a5d4c020d324dc3f9c874d9 | b2eda65c846f2dd192fdb09c0dc5191f18ca7a74 | refs/heads/master | 2022-12-06T09:48:20.978646 | 2020-09-02T16:30:16 | 2020-09-02T16:30:16 | 292,337,358 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 798 | py | from wordcloud import WordCloud
def Wordcloud(arg):
with open('Top {} keyword.txt'.format(arg), 'r', encoding = 'utf-8') as file:
datas = file.read()
seg_list = datas.replace('\n', ' ')
wc = WordCloud(
background_color = 'black', #背景顏色
max_words = 200, #最大分詞數量
mask = None, #背景圖片
max_font_size = None, #顯示字體的最大值
font_path = './src/kaiu.ttf', #若為中文則需引入中文字型(.TTF)
random_state = None, #隨機碼生成各分詞顏色
prefer_horizontal = 0.9) #調整分詞中水平和垂直的比例
wc.generate(seg_list)
wc.to_file('Top {} Wordcloud.png'.format(arg)) | [
"endlesslove05230@gmail.com"
] | endlesslove05230@gmail.com |
88ef0b98fb4afcc20cacebf64dc4d096901e441b | bd027b28f96e3191c7098ed5cc73c3d12b5cd88d | /depth-estimation/python/disparity2depth_calib.py | ab19e2e3fe456c9fe1b29190db47d0f72181a8a7 | [] | no_license | joaovictorcfs/learnopencv | 893d5be505ec0374ee51912748ef793b6ad5b874 | 94ad009bb189659bb76a9b0e526ea93b4ae5071a | refs/heads/master | 2023-03-14T23:32:03.493002 | 2021-03-27T22:34:13 | 2021-03-27T22:34:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,290 | py | import numpy as np
import cv2
import matplotlib
import matplotlib.pyplot as plt
# Check for left and right camera IDs
# These values can change depending on the system
CamL_id = 2 # Camera ID for left camera
CamR_id = 0 # Camera ID for right camera
CamL= cv2.VideoCapture(CamL_id)
CamR= cv2.VideoCapture(CamR_id)
# Reading the mapping values for stereo image rectification
cv_file = cv2.FileStorage("data/stereo_rectify_maps.xml", cv2.FILE_STORAGE_READ)
Left_Stereo_Map_x = cv_file.getNode("Left_Stereo_Map_x").mat()
Left_Stereo_Map_y = cv_file.getNode("Left_Stereo_Map_y").mat()
Right_Stereo_Map_x = cv_file.getNode("Right_Stereo_Map_x").mat()
Right_Stereo_Map_y = cv_file.getNode("Right_Stereo_Map_y").mat()
cv_file.release()
# These parameters can vary according to the setup
# Keeping the target object at max_dist we store disparity values
# after every sample_delta distance.
max_dist = 230 # max distance to keep the target object (in cm)
min_dist = 50 # Minimum distance the stereo setup can measure (in cm)
sample_delta = 40 # Distance between two sampling points (in cm)
Z = max_dist
Value_pairs = []
disp_map = np.zeros((600,600,3))
# Reading the stored the StereoBM parameters
cv_file = cv2.FileStorage("data/depth_estmation_params_py.xml", cv2.FILE_STORAGE_READ)
numDisparities = int(cv_file.getNode("numDisparities").real())
blockSize = int(cv_file.getNode("blockSize").real())
preFilterType = int(cv_file.getNode("preFilterType").real())
preFilterSize = int(cv_file.getNode("preFilterSize").real())
preFilterCap = int(cv_file.getNode("preFilterCap").real())
textureThreshold = int(cv_file.getNode("textureThreshold").real())
uniquenessRatio = int(cv_file.getNode("uniquenessRatio").real())
speckleRange = int(cv_file.getNode("speckleRange").real())
speckleWindowSize = int(cv_file.getNode("speckleWindowSize").real())
disp12MaxDiff = int(cv_file.getNode("disp12MaxDiff").real())
minDisparity = int(cv_file.getNode("minDisparity").real())
M = cv_file.getNode("M").real()
cv_file.release()
# Defining callback functions for mouse events
def mouse_click(event,x,y,flags,param):
global Z
if event == cv2.EVENT_LBUTTONDBLCLK:
if disparity[y,x] > 0:
Value_pairs.append([Z,disparity[y,x]])
print("Distance: %r cm | Disparity: %r"%(Z,disparity[y,x]))
Z-=sample_delta
cv2.namedWindow('disp',cv2.WINDOW_NORMAL)
cv2.resizeWindow('disp',600,600)
cv2.namedWindow('left image',cv2.WINDOW_NORMAL)
cv2.resizeWindow('left image',600,600)
cv2.setMouseCallback('disp',mouse_click)
# Creating an object of StereoBM algorithm
stereo = cv2.StereoBM_create()
while True:
# Capturing and storing left and right camera images
retR, imgR= CamR.read()
retL, imgL= CamL.read()
# Proceed only if the frames have been captured
if retL and retR:
imgR_gray = cv2.cvtColor(imgR,cv2.COLOR_BGR2GRAY)
imgL_gray = cv2.cvtColor(imgL,cv2.COLOR_BGR2GRAY)
# Applying stereo image rectification on the left image
Left_nice= cv2.remap(imgL_gray,
Left_Stereo_Map_x,
Left_Stereo_Map_y,
cv2.INTER_LANCZOS4,
cv2.BORDER_CONSTANT,
0)
# Applying stereo image rectification on the right image
Right_nice= cv2.remap(imgR_gray,
Right_Stereo_Map_x,
Right_Stereo_Map_y,
cv2.INTER_LANCZOS4,
cv2.BORDER_CONSTANT,
0)
# Setting the updated parameters before computing disparity map
stereo.setNumDisparities(numDisparities)
stereo.setBlockSize(blockSize)
stereo.setPreFilterType(preFilterType)
stereo.setPreFilterSize(preFilterSize)
stereo.setPreFilterCap(preFilterCap)
stereo.setTextureThreshold(textureThreshold)
stereo.setUniquenessRatio(uniquenessRatio)
stereo.setSpeckleRange(speckleRange)
stereo.setSpeckleWindowSize(speckleWindowSize)
stereo.setDisp12MaxDiff(disp12MaxDiff)
stereo.setMinDisparity(minDisparity)
# Calculating disparity using the StereoBM algorithm
disparity = stereo.compute(Left_nice,Right_nice)
# NOTE: compute returns a 16bit signed single channel image,
# CV_16S containing a disparity map scaled by 16. Hence it
# is essential to convert it to CV_16S and scale it down 16 times.
# Converting to float32
disparity = disparity.astype(np.float32)
# Scaling down the disparity values and normalizing them
disparity = (disparity/16.0 - minDisparity)/numDisparities
# Displaying the disparity map
cv2.imshow("disp",disparity)
cv2.imshow("left image",imgL)
if cv2.waitKey(1) == 27:
break
if Z < min_dist:
break
else:
CamL= cv2.VideoCapture(CamL_id)
CamR= cv2.VideoCapture(CamR_id)
# solving for M in the following equation
# || depth = M * (1/disparity) ||
# for N data points coeff is Nx2 matrix with values
# 1/disparity, 1
# and depth is Nx1 matrix with depth values
value_pairs = np.array(Value_pairs)
z = value_pairs[:,0]
disp = value_pairs[:,1]
disp_inv = 1/disp
# Plotting the relation depth and corresponding disparity
fig, (ax1,ax2) = plt.subplots(1,2,figsize=(12,6))
ax1.plot(disp, z, 'o-')
ax1.set(xlabel='Normalized disparity value', ylabel='Depth from camera (cm)',
title='Relation between depth \n and corresponding disparity')
ax1.grid()
ax2.plot(disp_inv, z, 'o-')
ax2.set(xlabel='Inverse disparity value (1/disp) ', ylabel='Depth from camera (cm)',
title='Relation between depth \n and corresponding inverse disparity')
ax2.grid()
plt.show()
# Solving for M using least square fitting with QR decomposition method
coeff = np.vstack([disp_inv, np.ones(len(disp_inv))]).T
ret, sol = cv2.solve(coeff,z,flags=cv2.DECOMP_QR)
M = sol[0,0]
C = sol[1,0]
print("Value of M = ",M)
# Storing the updated value of M along with the stereo parameters
cv_file = cv2.FileStorage("data/depth_estmation_params_py.xml", cv2.FILE_STORAGE_WRITE)
cv_file.write("numDisparities",numDisparities)
cv_file.write("blockSize",blockSize)
cv_file.write("preFilterType",preFilterType)
cv_file.write("preFilterSize",preFilterSize)
cv_file.write("preFilterCap",preFilterCap)
cv_file.write("textureThreshold",textureThreshold)
cv_file.write("uniquenessRatio",uniquenessRatio)
cv_file.write("speckleRange",speckleRange)
cv_file.write("speckleWindowSize",speckleWindowSize)
cv_file.write("disp12MaxDiff",disp12MaxDiff)
cv_file.write("minDisparity",minDisparity)
cv_file.write("M",M)
cv_file.release()
| [
"noreply@github.com"
] | joaovictorcfs.noreply@github.com |
00fd8f43ef3c1be530fe652d3726866583d2d0da | 0c24fcbf802ed1881abdfbf14b867d4545237602 | /students/y2336/laboratory_works/Sorokina Mariya/mysite/core/migrations/0003_attraction_image.py | 279eae79239c358f95ab6d3c8a4029611ed1d1f6 | [] | no_license | TonikX/ITMO_FSPO_Web_Django_2020 | d435d2984778d668f6d04c86d78c991b3f390c1a | 267141d6e4ee231ca689c8929f3db25fef15a5fd | refs/heads/master | 2023-01-18T17:38:05.964240 | 2020-11-20T18:08:57 | 2020-11-20T18:08:57 | 245,081,715 | 2 | 139 | null | 2023-04-25T18:54:50 | 2020-03-05T05:59:54 | Python | UTF-8 | Python | false | false | 395 | py | # Generated by Django 3.0.5 on 2020-09-10 18:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0002_auto_20200910_1929'),
]
operations = [
migrations.AddField(
model_name='attraction',
name='image',
field=models.ImageField(null=True, upload_to=''),
),
]
| [
"63239279+MariSorok@users.noreply.github.com"
] | 63239279+MariSorok@users.noreply.github.com |
f26896d45c1284b627b806fe97f62f1b741e4edb | 70a6c77d3a5cd1fee91f5332ac6ef1a030693fe5 | /Baekjoon/5585-거스름돈.py | 25c4bc5830d12bca67a7e8d790b7d6f63f462bfa | [] | no_license | devplutus/Python3 | 2ab32c33e9f326d2bedb18a77bcff05585e7973f | 0026c7a5c86c1dcc4685fdd922b94d6387e1752c | refs/heads/master | 2020-07-11T02:55:00.629864 | 2019-10-24T06:13:21 | 2019-10-24T06:13:21 | 204,431,195 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 212 | py | n = 1000 - int(input())
result = 0
_num = [500, 100, 50, 10, 5, 1]
for i in range(len(_num)):
result += n // _num[i]
if n % _num[i] == 0:
break
else:
n = n % _num[i]
print(result)
| [
"JungByeongMan@baggeunmin-ui-MacBookPro.local"
] | JungByeongMan@baggeunmin-ui-MacBookPro.local |
98638a754497de4b925c7afe2cfe4613cca99f49 | 9c409d4a6ddef7fb924e93f382149e1b5507ba81 | /v3/upg26.py | a990b532b49ab566e65c94f535c69c729558e5d1 | [] | no_license | amaroka/KYH-Practice | ded21136418f6845931a611a3a048617faaa3ba2 | 8014afcbec08618fccd2b5acc338fff6a9f62623 | refs/heads/master | 2023-01-12T02:15:30.864185 | 2020-11-16T09:38:45 | 2020-11-16T09:38:45 | 291,672,334 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 511 | py | #from pprint import pprint
import requests
film = input("Sök på en film: ")
r = requests.get("http://www.omdbapi.com/", params={"t": film, "apikey": "9f6d550c"})
data = r.json()
#Title Year Director
#Actors
#imdbRating
#Awards
#Runtime
print("*** Resultat från OMDB! ***")
print(f"{data['Title']} ({data['Year']}) regisserades av {data['Director']} ")
print(f"Skådisar: {data['Actors']}")
print(f"IMDB betyg: {data['imdbRating']}")
print(f"Awards: {data['Awards']}")
print(f"Längd: {data['Runtime']}") | [
"daniel.yngve@student.kyh.se"
] | daniel.yngve@student.kyh.se |
bdf93bfcb0c310da6292671da0b63c8f011fef5b | 331409f6fc5639df4231b942e82c214e552b4c23 | /organizaEvento/wsgi.py | 0ea91946a1dd54ae116d81a2b68a20967a50d9e0 | [] | no_license | LuanHB/Atividade01 | 6fe67acf4dfa0c48051ccb477f9c9fef43848dc7 | 68478033925f371a315512ef4eccf6c97b4c11cf | refs/heads/master | 2020-03-07T15:48:10.760238 | 2018-03-31T19:25:20 | 2018-03-31T19:25:20 | 127,560,251 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | """
WSGI config for organizaEvento project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "organizaEvento.settings")
application = get_wsgi_application()
| [
"luan.hackbart@gmail.com"
] | luan.hackbart@gmail.com |
78278e990a57092f2ec56732405baf87e7f9f84d | 1fe8d4133981e53e88abf633046060b56fae883e | /venv/lib/python3.8/site-packages/tensorflow/python/layers/normalization.py | 2ff79b4f2e9ffe0a6b49bfc40e106c0aa66daffd | [] | no_license | Akira331/flask-cifar10 | 6c49db8485038731ce67d23f0972b9574746c7a7 | 283e7a2867c77d4b6aba7aea9013bf241d35d76c | refs/heads/master | 2023-06-14T16:35:06.384755 | 2021-07-05T14:09:15 | 2021-07-05T14:09:15 | 382,864,970 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:6ba7d44e910c3440fadfac158cda4aa0136a4447c84f005b7144e6cd9b29444d
size 17101
| [
"business030301@gmail.com"
] | business030301@gmail.com |
85ae43871dcc3db57959c181396ab5c178961f2e | 330285bea42e66b1975d62e2f4dd742d4c3ab360 | /franka_moveit/scripts/create_demo_planning_scene.py | 9916a164cc43bf4866b219c232f5d029ec8d94c9 | [
"Apache-2.0"
] | permissive | justagist/franka_ros_interface | 946182e0430d21a9c119470729d7ec5e96caa404 | f1f3649a4b030a9191e0577d980680ec95afa6ab | refs/heads/master | 2021-12-24T22:22:14.599033 | 2021-12-22T13:42:30 | 2021-12-22T13:42:30 | 199,485,892 | 130 | 51 | Apache-2.0 | 2021-05-03T17:11:32 | 2019-07-29T16:07:08 | Python | UTF-8 | Python | false | false | 3,226 | py | #!/usr/bin/env python
# /***************************************************************************
#
# @package: franka_moveit
# @metapackage: franka_ros_interface
# @author: Saif Sidhik <sxs1412@bham.ac.uk>
#
# **************************************************************************/
# /***************************************************************************
# Copyright (c) 2019-2021, Saif Sidhik
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# **************************************************************************/
import sys
import rospy
import moveit_commander
from franka_moveit import ExtendedPlanningSceneInterface
from franka_moveit.utils import create_pose_stamped_msg
"""
A script for creating a simple environment as a PlanningScene. This script runs
by default when interface.launch is started, but can be disabled using argument.
"""
IRLab_workspace = [
{
'name': 'back_wall',
'pose': create_pose_stamped_msg(position = [-0.57,0.0,0.5], orientation = [1,0,0,0], frame = 'panda_link0'),
'size': [0.1,1.8,1]
},
{
'name': 'side_wall',
'pose': create_pose_stamped_msg(position = [-0.3,-0.85,0.5], orientation = [1,0,0,0], frame = 'panda_link0'),
'size': [0.6,0.1,1]
},
{
'name': 'table',
'pose': create_pose_stamped_msg(position = [0.45,-0.0,0], orientation = [1,0,0,0], frame = 'panda_link0'),
'size': [2,1.8,0.02]
},
{
'name': 'controller_box',
'pose': create_pose_stamped_msg(position = [-0.37,0.55,0.08], orientation = [1,0,0,0], frame = 'panda_link0'),
'size': [0.4,0.6,0.16]
},
{
'name': 'equipment_box',
'pose': create_pose_stamped_msg(position = [-0.35,-0.68,0.17], orientation = [1,0,0,0], frame = 'panda_link0'),
'size': [0.46,0.4,0.34]
}
]
def main():
try:
rospy.loginfo("Creating Demo Planning Scene")
scene = ExtendedPlanningSceneInterface()
rospy.sleep(1) # ----- Not having this delay sometimes caused failing to create some boxes
for config in IRLab_workspace:
rospy.loginfo("-- Creating object: {}..".format(config['name']))
success = scene.add_box(**config)
rospy.loginfo("------ {}".format("success" if success else "FAILED!"))
rospy.loginfo("Created Demo Planning Scene.")
except rospy.ROSInterruptException:
return
except KeyboardInterrupt:
return
if __name__ == '__main__':
rospy.init_node('simple_scene_creator',
anonymous=True)
moveit_commander.roscpp_initialize(sys.argv)
main()
| [
"saifksidhik@gmail.com"
] | saifksidhik@gmail.com |
c9905c4f0826bb701e09958514299e45c73b5843 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/173/usersdata/265/86697/submittedfiles/moedas.py | a742b7f67e4b1843dcb579ac41fef535ec50768c | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 300 | py | # -*- coding: utf-8 -*-
a = int(input('digite o valor de a: '))
b = int(input('digite o valor de b: '))
c = int(input('digite o valor de c: '))
for qa in range (0,c,1):
if (((c-(qa*a))%)b==0):
print(qa)
qb=(c-(qa*a))//b
print(qb)
break
else:
print('N') | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
9b00852b273076dfd5b240a68081bbe544c1bafa | f06cac9dad8ae3da0640cd23a6f5e84ac9420dbd | /carcosa.py | 594160b0919fbf2210c7ab0a3babe74e6b0c2409 | [] | no_license | funkaoshi/carcosa | f060c66423ce2747b46fa911c1e00e873fb9d701 | f2fbd7082caf032239cbfdc3cd362ca3af0daf49 | refs/heads/master | 2021-09-08T18:30:50.499485 | 2021-09-02T15:21:01 | 2021-09-02T15:21:01 | 7,251,850 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,688 | py | import random
from flask import render_template, Flask
import colour
import dice
import dinosaur
import monster
import settings
import settlement
import spawn
import weapon
import weird
app = Flask(__name__)
app.config.from_object(settings)
def random_hex():
return "%02d%02d" % (dice.d(40), dice.d(40))
@app.route('/')
def index():
return render_template("index.html",
hex_a=random_hex(), hex_b=random_hex(),
hex_c=random_hex(), hex_d=random_hex(),
hex_e=random_hex(), hex_f=random_hex(),
spawn=spawn.Spawn(), settlement=settlement.Settlement(),
dinosaur=dinosaur.Dinosaur(), monster=monster.Monster(),
title=settlement.Leader.get_name(colour.colour()),
weird=weird.WierdGenerator().weird())
@app.route('/about/')
def about():
return render_template("about.html")
@app.route('/roll/')
def roll():
return render_template("roll.html", dcarcosa=dice.carcosa())
@app.route('/settlement/')
def make_settlement():
return render_template("settlement.html", hex=random_hex(), settlement=settlement.Settlement())
@app.route('/title/')
def make_title():
return render_template("title.html", hex=random_hex(), title=settlement.Leader.get_name(colour.colour()))
@app.route('/spawn/')
def make_spawn():
return render_template("spawn.html", hex=random_hex(), spawn=spawn.Spawn())
@app.route('/monster/')
def make_monster():
return render_template("monster.html", hex=random_hex(), monster=monster.Monster())
@app.route('/dinosaur/')
def make_dinosaur():
return render_template("dinosaur.html", hex=random_hex(), dinosaur=dinosaur.Dinosaur())
@app.route('/weird/')
def make_weird():
return render_template("weird.html", hex=random_hex(), weird=weird.WierdGenerator().weird())
@app.route('/weapon/')
def make_weapon():
return render_template("weapon.html", hex=random_hex(), weapon=weapon.Weapon())
@app.route('/random/', defaults={'count': 32})
@app.route('/random/<int:count>/')
def make_random(count):
weird_gen = weird.WierdGenerator()
random_hexes = []
for i in range(count):
roll = dice.d(100)
if roll <= 40:
random_hexes.append(weird_gen.weird())
elif roll <= 70:
random_hexes.append(settlement.Settlement())
elif roll <= 83:
random_hexes.append(spawn.Spawn())
elif roll <= 88:
random_hexes.append(dinosaur.Dinosaur())
else:
random_hexes.append(monster.Monster())
return render_template("random.html", hexes=random_hexes,
count=int(len(random_hexes)/2))
if __name__ == '__main__':
app.run("0.0.0.0")
| [
"ramanan@funkaoshi.com"
] | ramanan@funkaoshi.com |
af9b32fe82f8d824bff595d6996da2de74b51b69 | c4c7140f84673b8268bf25225120916cbd9515e1 | /accounts/serializers.py | 0bd5469f63f2901c69d6f1ec63eb9d41589a9687 | [] | no_license | semyonich/edu | 692f11da22102de7984f59b966a7a9cb4c53ec54 | e91961ae45d781ed606481c5a0961de387c8824b | refs/heads/master | 2020-12-02T21:01:23.944816 | 2017-08-06T17:17:43 | 2017-08-06T17:17:43 | 96,244,738 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,634 | py | from rest_framework import serializers
from accounts.models import User
from blog.models import Article
from blog.serializers import ArticleSerializer
class UserSerializer(serializers.ModelSerializer):
likes_number = serializers.SerializerMethodField()
articles = serializers.SerializerMethodField()
def get_articles(self, obj):
return ArticleSerializer(Article.objects.filter(liked_by__in=[obj]), many=True).data
def get_likes_number(self, obj):
return Article.objects.filter(liked_by__in=[obj]).count()
class Meta:
model = User
fields = ('username', 'email', 'first_name', 'last_name', 'photo', 'birthday', 'likes_number', 'articles')
class UserCreationSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('username', 'email', 'password')
def validate(self, attrs):
if 'a' in attrs.get('username'):
raise serializers.ValidationError(' a - is not allowed symbol')
return attrs
class UserChangePasswordSerializer(serializers.Serializer):
old_password = serializers.CharField(max_length=64)
new_password = serializers.CharField(max_length=64)
new_password2 = serializers.CharField(max_length=64)
def validate(self, attrs):
user = self.context.get('user')
if not user.check_password(attrs.get('old_password')):
raise serializers.ValidationError('Incorrect old password')
if attrs.get('new_password') != attrs.get('new_password2'):
raise serializers.ValidationError('Not equal passwords')
print(user)
return attrs | [
"s.semenihin@gmail.com"
] | s.semenihin@gmail.com |
fca8609d88669b97081aaea80299bdcffd7d7a2d | 4555d586dc9cc8dd5cab58e4ab49b92c5405b9c4 | /talkingdata/tuning_xgb_test_fe2.py | d78c9fe568e1b44eeee54847a7bea2952dbf07a0 | [] | no_license | statchaitya/Kaggle | 7704fcde90f2c953c9deaa7578653df8170d784d | 012987027876f47b94e10cc9840283c2596ab7db | refs/heads/master | 2021-01-25T14:03:22.568640 | 2019-10-18T23:32:23 | 2019-10-18T23:32:23 | 123,641,160 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,192 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 10 23:43:51 2018
@author: cgokh
"""
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 10 13:26:41 2018
@author: cgokh
"""
#from sklearn.decomposition import PCA
import gc
import pandas as pd
import numpy as np
import lightgbm as lgb
import xgboost as xgb
from sklearn.cross_validation import train_test_split
dtypes = {
'ip' : 'uint32',
'app' : 'uint16',
'device' : 'uint16',
'os' : 'uint16',
'channel' : 'uint16',
'is_attributed' : 'uint8',
'click_id' : 'uint32'
}
path = 'C:/Kaggle/talkingdata/'
train_rows = 184903890
model_train_rows = 30000000
skip = train_rows - model_train_rows
train_columns = ['ip', 'app', 'device', 'os', 'channel', 'click_time','is_attributed']
train = pd.read_csv(path+"train.csv", skiprows = range(1,skip+1), usecols = train_columns, dtype=dtypes)
test_columns = train_columns[:-1]
test = pd.read_csv(path+"test.csv", usecols = test_columns, dtype=dtypes)
#train = pd.read_csv(path+"train.csv", nrows=10000000, usecols = train_columns, dtype=dtypes)
print("train data read. train data shape is", train.shape)
nrow_train = train.shape[0]
merge = pd.concat([train,test])
del train, test
gc.collect()
def timeFeatures(df):
# Make some new features with click_time column
df['datetime'] = pd.to_datetime(df['click_time'])
df['wday'] = df['datetime'].dt.dayofweek
df["hour"] = df["datetime"].dt.hour
#df["dteom"] = df["datetime"].dt.daysinmonth - df["datetime"].dt.day
df.drop(['click_time', 'datetime'], axis=1, inplace=True)
return df
print("Starting feature engineering")
def feature_engineering_2(df):
# HOUR AND DAY OF WEEK
df['datetime'] = pd.to_datetime(df['click_time'])
df['wday'] = df['datetime'].dt.dayofweek
df["hour"] = df["datetime"].dt.hour
df.drop(['click_time', 'datetime'], axis=1, inplace=True)
# Some info from test
#most_freq_hours_in_test_data = [4,5,9,10,13,14]
#least_freq_hours_in_test_data = [6,11,15]
nu_apps_ip = df.groupby(['ip'])['app'].nunique().reset_index()
nu_apps_ip.columns = ['ip', 'nu_apps_ip']
#df = pd.merge(df, nu_apps_ip, on='ip', how='left', sort=False)
#df['nu_apps_ip'] = df['nu_apps_ip'].astype('uint16')
print("Feature 1 done")
# Number of clicks for a particular IP,DAY,DIFFERENT TIMES OF DAY
nu_devices_ip = df.groupby(['ip'])['device'].nunique().reset_index()
nu_devices_ip.columns = ['ip', 'nu_devices_ip']
#df = pd.merge(df, nu_devices_ip, on='ip', how='left', sort=False)
#df['nu_devices_ip'] = df['nu_devices_ip'].astype('uint16')
print("Feature 2 done")
nu_channels_ip = df.groupby(['ip'])['channel'].nunique().reset_index()
nu_channels_ip.columns = ['ip', 'nu_channels_ip']
#df = pd.merge(df, nu_channels_ip, on='ip', how='left', sort=False)
#df['nu_channels_ip'] = df['nu_channels_ip'].astype('uint16')
print("Feature 3 done")
nu_os_ip = df.groupby(['ip'])['os'].nunique().reset_index()
nu_os_ip.columns = ['ip', 'nu_os_ip']
#df = pd.merge(df, nu_os_ip, on='ip', how='left', sort=False)
#df['nu_os_ip'] = df['nu_os_ip'].astype('uint16')
print("Feature 4 done")
nu_wday_ip = df.groupby(['ip'])['wday'].nunique().reset_index()
nu_wday_ip.columns = ['ip', 'nu_wday_ip']
#df = pd.merge(df, nu_wday_ip, on='ip', how='left', sort=False)
#df['nu_wday_ip'] = df['nu_wday_ip'].astype('uint16')
print("Feature 5 done")
nu_hour_ip = df.groupby(['ip'])['hour'].nunique().reset_index()
nu_hour_ip.columns = ['ip', 'nu_hour_ip']
#df = pd.merge(df, nu_hour_ip, on='ip', how='left', sort=False)
#df['nu_hour_ip'] = df['nu_hour_ip'].astype('uint16')
print("Feature 6 done")
gc.collect()
return nu_apps_ip, nu_devices_ip, nu_channels_ip, nu_os_ip, nu_wday_ip, nu_hour_ip
nu_apps_ip, nu_devices_ip, nu_channels_ip, nu_os_ip, nu_wday_ip, nu_hour_ip = feature_engineering_2(merge)
train = merge[:nrow_train]
del merge
gc.collect()
def merge_features(df):
df = pd.merge(df, nu_apps_ip, on='ip', how='left', sort=False)
df['nu_apps_ip'] = df['nu_apps_ip'].astype('uint16')
df = pd.merge(df, nu_devices_ip, on='ip', how='left', sort=False)
df['nu_devices_ip'] = df['nu_devices_ip'].astype('uint16')
print("Feature 2 done")
df = pd.merge(df, nu_channels_ip, on='ip', how='left', sort=False)
df['nu_channels_ip'] = df['nu_channels_ip'].astype('uint16')
print("Feature 3 done")
df = pd.merge(df, nu_os_ip, on='ip', how='left', sort=False)
df['nu_os_ip'] = df['nu_os_ip'].astype('uint16')
print("Feature 4 done")
df = pd.merge(df, nu_wday_ip, on='ip', how='left', sort=False)
df['nu_wday_ip'] = df['nu_wday_ip'].astype('uint16')
print("Feature 5 done")
df = pd.merge(df, nu_hour_ip, on='ip', how='left', sort=False)
df['nu_hour_ip'] = df['nu_hour_ip'].astype('uint16')
print("Feature 6 done")
gc.collect()
return df
train = merge_features(train)
del nu_apps_ip, nu_devices_ip, nu_channels_ip, nu_os_ip, nu_wday_ip, nu_hour_ip
y = train['is_attributed']
train.drop(['is_attributed','ip'], axis=1, inplace=True)
gc.collect()
x1, x2, y1, y2 = train_test_split(train, y, test_size=0.1, random_state=99)
dtrain = xgb.DMatrix(x1, y1)
dvalid = xgb.DMatrix(x2, y2)
del x1, y1, x2, y2
gc.collect()
watchlist = [(dtrain, 'train'), (dvalid, 'valid')]
params = {'eta': 0.1,
'tree_method': "hist",
'grow_policy': "lossguide",
'max_leaves': 1400,
'max_depth': 4,
'subsample': 0.75,
'colsample_bytree': 0.7,
'colsample_bylevel':0.7,
'min_child_weight':0.2,
'alpha':4,
'objective': 'binary:logistic',
'scale_pos_weight':9,
'eval_metric': 'auc',
'nthread':8,
'random_state': 99,
'silent': True}
model = xgb.train(params, dtrain, 804, watchlist, maximize=True, early_stopping_rounds = 15, verbose_eval=1)
gc.collect()
###############################################################################
eta_grid = [0.2,0.1,0.05,0.02]
def tune_eta(eta_grid):
x1, x2, y1, y2 = train_test_split(train, y, test_size=0.1, random_state=99)
dtrain = xgb.DMatrix(x1, y1)
dvalid = xgb.DMatrix(x2, y2)
del x1, y1, x2, y2
gc.collect()
watchlist = [(dtrain, 'train'), (dvalid, 'valid')]
eta_tuned = pd.DataFrame()
eta_tuned['eta_values'] = eta_grid
best_iteration = []
best_score = []
for i in range(0,len(eta_grid)):
params = {'eta': eta_grid[i],
'tree_method': "hist",
'grow_policy': "lossguide",
'max_leaves': 1400,
'max_depth': 4,
'subsample': 0.8,
'colsample_bytree': 0.7,
'colsample_bylevel':0.7,
'min_child_weight':0,
'alpha':4,
'objective': 'binary:logistic',
'scale_pos_weight':9,
'eval_metric': 'auc',
'nthread':8,
'random_state': 99,
'silent': True}
model = xgb.train(params, dtrain, 2500, watchlist, maximize=True, early_stopping_rounds = 15, verbose_eval=1)
best_iteration.append(model.best_iteration)
best_score.append(model.best_score)
del model
gc.collect()
eta_tuned['best_iteration'] = best_iteration
eta_tuned['best_score']= best_score
return eta_tuned
eta_tuned = tune_eta(eta_grid)
eta_tuned
############################################################################################
sub_sample_grid = [0.6,0.65,0.7,0.75,0.8,0.85,0.9,0.95,1]
def tune_subsample(sub_sample_grid):
x1, x2, y1, y2 = train_test_split(train, y, test_size=0.1, random_state=99)
dtrain = xgb.DMatrix(x1, y1)
dvalid = xgb.DMatrix(x2, y2)
del x1, y1, x2, y2
gc.collect()
watchlist = [(dtrain, 'train'), (dvalid, 'valid')]
subsample_tuned = pd.DataFrame()
subsample_tuned['sub_sample_values'] = sub_sample_grid
best_iteration = []
best_score = []
for i in range(0,len(sub_sample_grid)):
params = {'eta': 0.2,
'tree_method': "hist",
'grow_policy': "lossguide",
'max_leaves': 1400,
'max_depth': 0,
'subsample': sub_sample_grid[i],
'colsample_bytree': 0.7, #can be 1
'colsample_bylevel':0.7, #can be 1
'min_child_weight':0,
'alpha':4,
'objective': 'binary:logistic',
'scale_pos_weight':9,
'eval_metric': 'auc',
'nthread':8,
'random_state': 99,
'silent': True}
model = xgb.train(params, dtrain, 2500, watchlist, maximize=True, early_stopping_rounds = 15, verbose_eval=1)
best_iteration.append(model.best_iteration)
best_score.append(model.best_score)
del model
gc.collect()
subsample_tuned['best_iteration'] = best_iteration
subsample_tuned['best_score']= best_score
return subsample_tuned
subsample_tuned = tune_subsample(sub_sample_grid)
subsample_tuned.to_csv('subsample_tuned.csv',index=False)
############################################################################################
colsample_bt_grid = [0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1]
def tune_colsamplebt(colsample_bt_grid):
x1, x2, y1, y2 = train_test_split(train, y, test_size=0.1, random_state=99)
dtrain = xgb.DMatrix(x1, y1)
dvalid = xgb.DMatrix(x2, y2)
del x1, y1, x2, y2
gc.collect()
watchlist = [(dtrain, 'train'), (dvalid, 'valid')]
colsamplebt_tuned = pd.DataFrame()
colsamplebt_tuned['colsample_bt_values'] = colsample_bt_grid
best_iteration = []
best_score = []
for i in range(0,len(colsample_bt_grid)):
params = {'eta': 0.2,
'tree_method': "hist",
'grow_policy': "lossguide",
'max_leaves': 1400,
'max_depth': 0,
'subsample': 0.75,
'colsample_bytree': colsample_bt_grid[i], #can be 1
'colsample_bylevel':0.7, #can be 1
'min_child_weight':0,
'alpha':4,
'objective': 'binary:logistic',
'scale_pos_weight':9,
'eval_metric': 'auc',
'nthread':8,
'random_state': 99,
'silent': True}
model = xgb.train(params, dtrain, 1500, watchlist, maximize=True, early_stopping_rounds = 15, verbose_eval=1)
best_iteration.append(model.best_iteration)
best_score.append(model.best_score)
del model
gc.collect()
colsamplebt_tuned['best_iteration'] = best_iteration
colsamplebt_tuned['best_score']= best_score
return colsamplebt_tuned
colsamplebt_tuned = tune_colsamplebt(colsample_bt_grid)
colsamplebt_tuned.to_csv('colsamplebt_tuned.csv',index=False)
colsamplebt_tuned
subsample_tuned
############################################################################################
min_child_wt_grid = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]
def tune_minchildwt(min_child_wt_grid):
x1, x2, y1, y2 = train_test_split(train, y, test_size=0.1, random_state=99)
dtrain = xgb.DMatrix(x1, y1)
dvalid = xgb.DMatrix(x2, y2)
del x1, y1, x2, y2
gc.collect()
watchlist = [(dtrain, 'train'), (dvalid, 'valid')]
minchildwt_tuned = pd.DataFrame()
minchildwt_tuned['min_child_wt_values'] = min_child_wt_grid
best_iteration = []
best_score = []
for i in range(0,len(min_child_wt_grid)):
params = {'eta': 0.2,
'tree_method': "hist",
'grow_policy': "lossguide",
'max_leaves': 1400,
'max_depth': 0,
'subsample': 0.95,
'colsample_bytree': 0.65, #can be 1
'colsample_bylevel':0.7, #can be 1
'min_child_weight':min_child_wt_grid[i],
'alpha':4,
'objective': 'binary:logistic',
'scale_pos_weight':9,
'eval_metric': 'auc',
'nthread':8,
'random_state': 99,
'silent': True}
model = xgb.train(params, dtrain, 1500, watchlist, maximize=True, early_stopping_rounds = 15, verbose_eval=1)
best_iteration.append(model.best_iteration)
best_score.append(model.best_score)
del model
gc.collect()
minchildwt_tuned['best_iteration'] = best_iteration
minchildwt_tuned['best_score']= best_score
return minchildwt_tuned
minchildwt_tuned = tune_minchildwt(min_child_wt_grid)
minchildwt_tuned.to_csv('minchildwt_tuned.csv',index=False)
###############################################################################
gc.collect()
test_columns = train_columns[:-1]
test_columns.append('click_id')
test = pd.read_csv(path+"test.csv", usecols = test_columns, dtype=dtypes)
gc.collect()
sub = pd.DataFrame()
sub['click_id'] = test['click_id'].astype('int')
test.drop(['click_id'], axis=1, inplace=True)
gc.collect()
test = feature_engineering_2(test)
test.drop('is_attributed', axis=1, inplace=True)
dtest = xgb.DMatrix(test)
y_pred = model.predict(dtest, ntree_limit=model.best_ntree_limit)
sub['is_attributed'] = np.round(y_pred,4)
sub.to_csv('xgb_30m_fe_2.csv', index=False) | [
"statchaitya@gmail.com"
] | statchaitya@gmail.com |
f404474ad275a09ec62b37505c04bd7782010f35 | 8e8b6eb254e4d24cd94a2dc17db5507929d059c2 | /python/craftassist/voxel_models/geoscorer/models.py | 4e7e9a9fc046975e1dd449f620a97ca0869b9149 | [
"MIT"
] | permissive | afree2004/craftassist | 087a3d4dbb162c91b88ce30723ca320d15b7a0be | dff78e8773e3d907f6ea1a4623a2c0ade7185906 | refs/heads/master | 2020-12-21T13:45:37.276045 | 2020-01-17T14:18:09 | 2020-01-17T14:18:09 | 236,448,535 | 1 | 0 | MIT | 2020-01-27T08:38:53 | 2020-01-27T08:38:52 | null | UTF-8 | Python | false | false | 17,761 | py | """
Copyright (c) Facebook, Inc. and its affiliates.
"""
import argparse
import torch
import torch.optim as optim
import torch.nn as nn
import os
import random
import string
from shutil import copyfile
def conv3x3x3(in_planes, out_planes, stride=1, bias=True):
"""3x3x3 convolution with padding"""
return nn.Conv3d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=bias)
def conv3x3x3up(in_planes, out_planes, bias=True):
"""3x3x3 convolution with padding"""
return nn.ConvTranspose3d(
in_planes, out_planes, stride=2, kernel_size=3, padding=1, output_padding=1
)
def convbn(in_planes, out_planes, stride=1, bias=True):
return nn.Sequential(
(conv3x3x3(in_planes, out_planes, stride=stride, bias=bias)),
nn.BatchNorm3d(out_planes),
nn.ReLU(inplace=True),
)
def convbnup(in_planes, out_planes, bias=True):
return nn.Sequential(
(conv3x3x3up(in_planes, out_planes, bias=bias)),
nn.BatchNorm3d(out_planes),
nn.ReLU(inplace=True),
)
class ValueNet(nn.Module):
def __init__(self, opts):
super(ValueNet, self).__init__()
self.embedding_dim = opts.get("blockid_embedding_dim", 8)
self.num_layers = opts.get("num_layers", 4) # 32x32x32 input
num_words = opts.get("num_words", 3)
hidden_dim = opts.get("hidden_dim", 64)
self.embedding = nn.Embedding(num_words, self.embedding_dim)
self.layers = nn.ModuleList()
indim = self.embedding_dim
outdim = hidden_dim
self.layers.append(
nn.Sequential(
nn.Conv3d(indim, outdim, kernel_size=5, stride=2, padding=1),
nn.BatchNorm3d(outdim),
nn.ReLU(inplace=True),
)
)
indim = outdim
for i in range(self.num_layers - 1):
layer = nn.Sequential(convbn(indim, outdim), convbn(outdim, outdim, stride=2))
indim = outdim
self.layers.append(layer)
self.out = nn.Linear(outdim, 1)
# todo normalize things? margin doesn't mean much here
def forward(self, x):
# FIXME when pytorch is ready for this, embedding
# backwards is soooooo slow
# z = self.embedding(x)
szs = list(x.size())
x = x.view(-1)
z = self.embedding.weight.index_select(0, x)
szs.append(self.embedding_dim)
z = z.view(torch.Size(szs))
z = z.permute(0, 4, 1, 2, 3).contiguous()
for i in range(self.num_layers):
z = self.layers[i](z)
z = z.mean([2, 3, 4])
# szs = list(z.size())
# z = z.view(szs[0], szs[1], -1)
# z = z.max(2)[0]
# z = nn.functional.normalize(z, dim=1)
return self.out(z)
class ContextEmbeddingNet(nn.Module):
def __init__(self, opts, blockid_embedding):
super(ContextEmbeddingNet, self).__init__()
self.blockid_embedding_dim = opts.get("blockid_embedding_dim", 8)
spatial_embedding_dim = opts.get("output_embedding_dim", 8)
num_layers = opts.get("num_layers", 4)
hidden_dim = opts.get("hidden_dim", 64)
# A shared embedding for the block id types
self.blockid_embedding = blockid_embedding
# Create model for converting the context into HxWxL D dim representations
self.layers = nn.ModuleList()
# B dim block id -> hidden dim, maintain input size
self.layers.append(
nn.Sequential(
nn.Conv3d(self.blockid_embedding_dim, hidden_dim, kernel_size=5, padding=2),
nn.BatchNorm3d(hidden_dim),
nn.ReLU(inplace=True),
)
)
# hidden dim -> hidden dim, maintain input size
for i in range(num_layers - 1):
self.layers.append(
nn.Sequential(
nn.Conv3d(hidden_dim, hidden_dim, kernel_size=5, padding=2),
nn.BatchNorm3d(hidden_dim),
nn.ReLU(inplace=True),
)
)
# hidden dim -> spatial embedding dim, maintain input size
self.out = nn.Linear(hidden_dim, spatial_embedding_dim)
# Returns N x D x H x W x L
def forward(self, x):
if x.size()[1] != 32:
raise Exception("Size of input should be Nx32x32x32 but it is {}".format(x.size()))
sizes = list(x.size())
x = x.view(-1)
# Get the blockid embedding for each space in the context input
z = self.blockid_embedding.weight.index_select(0, x)
# Add the embedding dim B
sizes.append(self.blockid_embedding_dim)
z = z.view(torch.Size(sizes))
# N x H x W x L x B ==> N x B x H x W x L
z = z.permute(0, 4, 1, 2, 3).contiguous()
for i in range(len(self.layers)):
z = self.layers[i](z)
z = z.permute(0, 2, 3, 4, 1).contiguous()
return self.out(z)
class SegmentEmbeddingNet(nn.Module):
def __init__(self, opts, blockid_embedding):
super(SegmentEmbeddingNet, self).__init__()
self.blockid_embedding_dim = opts.get("blockid_embedding_dim", 8)
spatial_embedding_dim = opts.get("spatial_embedding_dim", 8)
hidden_dim = opts.get("hidden_dim", 64)
# A shared embedding for the block id types
self.blockid_embedding = blockid_embedding
# Create model for converting the segment into 1 D dim representation
# input size: 8x8x8
self.layers = nn.ModuleList()
# B dim block id -> hidden dim, maintain input size
self.layers.append(
nn.Sequential(
nn.Conv3d(self.blockid_embedding_dim, hidden_dim, kernel_size=5, padding=2),
nn.BatchNorm3d(hidden_dim),
nn.ReLU(inplace=True),
)
)
# hidden dim -> hidden dim
# (maintain input size x2, max pool to half) x 3: 8x8x8 ==> 1x1x1
for i in range(3):
self.layers.append(
nn.Sequential(
nn.Conv3d(hidden_dim, hidden_dim, kernel_size=5, padding=2),
nn.BatchNorm3d(hidden_dim),
nn.ReLU(inplace=True),
nn.Conv3d(hidden_dim, hidden_dim, kernel_size=5, padding=2),
nn.BatchNorm3d(hidden_dim),
nn.ReLU(inplace=True),
nn.MaxPool3d(2, stride=2),
)
)
# hidden dim -> spatial embedding dim, 1x1x1
self.out = nn.Linear(hidden_dim, spatial_embedding_dim)
# Returns N x D x 1 x 1 x 1
def forward(self, x):
if x.size()[1] != 8:
raise Exception("Size of input should be Nx8x8x8 but it is {}".format(x.size()))
sizes = list(x.size())
x = x.view(-1)
# Get the blockid embedding for each space in the context input
z = self.blockid_embedding.weight.index_select(0, x)
# Add the embedding dim B
sizes.append(self.blockid_embedding_dim)
z = z.view(torch.Size(sizes))
# N x H x W x L x B ==> N x B x H x W x L
z = z.permute(0, 4, 1, 2, 3).contiguous()
for i in range(len(self.layers)):
z = self.layers[i](z)
z = z.permute(0, 2, 3, 4, 1).contiguous()
return self.out(z)
class SegmentDirectionEmbeddingNet(nn.Module):
def __init__(self, opts):
super(SegmentDirectionEmbeddingNet, self).__init__()
output_embedding_dim = opts.get("output_embedding_dim", 8)
self.use_viewer_pos = opts.get("seg_use_viewer_pos", False)
self.use_viewer_look = opts.get("seg_use_viewer_look", False)
self.use_viewer_vec = opts.get("seg_use_viewer_vec", False)
self.use_direction = opts.get("seg_use_direction", False)
hidden_dim = opts.get("hidden_dim", 64)
num_layers = opts.get("num_seg_dir_layers", 3)
self.seg_input_dim = opts.get("spatial_embedding_dim", 8)
self.context_side_length = opts.get("context_side_length", 32)
input_dim = self.seg_input_dim
if self.use_viewer_pos:
input_dim += 3
if self.use_viewer_look:
input_dim += 3
if self.use_viewer_vec:
input_dim += 3
if self.use_direction:
input_dim += 5
# Create model for converting the segment, viewer info,
self.layers = nn.ModuleList()
self.layers.append(nn.Sequential(nn.Linear(input_dim, hidden_dim), nn.ReLU()))
for i in range(num_layers - 1):
self.layers.append(nn.Sequential(nn.Linear(hidden_dim, hidden_dim), nn.ReLU()))
self.out = nn.Linear(hidden_dim, output_embedding_dim)
# In: [seg_embedding, viewer_pos, viewer_look, viewer_vec, direction]
# Out: N x D x 1 x 1 x 1
def forward(self, x):
if len(x) != 5:
raise Exception("There should be 5 elements in the input")
if x[0].size()[1] != self.seg_input_dim:
raise Exception("The seg spatial embed is wrong size: {}".format(x[0].size()))
inp = [x[0]]
normalizing_const = self.context_side_length * 1.0 / 2.0
if self.use_viewer_pos:
inp.append(x[1].float().div_(normalizing_const))
if self.use_viewer_look:
inp.append(x[2].float().div_(normalizing_const))
if self.use_viewer_vec:
inp.append(x[3].float().div_(normalizing_const))
if self.use_direction:
inp.append(x[4].float())
z = torch.cat(inp, 1)
for i in range(len(self.layers)):
z = self.layers[i](z)
return self.out(z).unsqueeze(2).unsqueeze(3).unsqueeze(4)
class ContextSegmentScoringModule(nn.Module):
def __init__(self):
super(ContextSegmentScoringModule, self).__init__()
def forward(self, x):
context_emb = x[0] # N x 32 x 32 x 32 x D
seg_emb = x[1] # N x 1 x 1 x 1 x D
c_szs = context_emb.size() # N x 32 x 32 x 32 x D
batch_dim = c_szs[0]
emb_dim = c_szs[4]
num_scores = c_szs[1] * c_szs[2] * c_szs[3]
# Prepare context for the dot product
context_emb = context_emb.view(-1, emb_dim, 1) # N*32^3 x D x 1
# Prepare segment for the dot product
seg_emb = seg_emb.view(batch_dim, 1, -1) # N x 1 x D
seg_emb = seg_emb.expand(-1, num_scores, -1).contiguous() # N x 32^3 x D
seg_emb = seg_emb.view(-1, 1, emb_dim) # N*32^3 x 1 x D
# Dot product & reshape
# (K x 1 x D) bmm (K x D x 1) = (K x 1 x 1)
out = torch.bmm(seg_emb, context_emb)
return out.view(batch_dim, -1)
class spatial_emb_loss(nn.Module):
def __init__(self):
super(spatial_emb_loss, self).__init__()
self.lsm = nn.LogSoftmax()
self.crit = nn.NLLLoss()
# format [scores (Nx32^3), targets (N)]
def forward(self, inp):
assert len(inp) == 2
scores = inp[0]
targets = inp[1]
logsuminp = self.lsm(scores)
return self.crit(logsuminp, targets)
class rank_loss(nn.Module):
def __init__(self, margin=0.1, nneg=5):
super(rank_loss, self).__init__()
self.nneg = 5
self.margin = margin
self.relu = nn.ReLU()
def forward(self, inp):
# it is expected that the batch is arranged as pos neg neg ... neg pos neg ...
# with self.nneg negs per pos
assert inp.shape[0] % (self.nneg + 1) == 0
inp = inp.view(self.nneg + 1, -1)
pos = inp[0]
neg = inp[1:].contiguous()
errors = self.relu(neg - pos.repeat(self.nneg, 1) + self.margin)
return errors.mean()
class reshape_nll(nn.Module):
def __init__(self, nneg=5):
super(reshape_nll, self).__init__()
self.nneg = nneg
self.lsm = nn.LogSoftmax()
self.crit = nn.NLLLoss()
def forward(self, inp):
# it is expected that the batch is arranged as pos neg neg ... neg pos neg ...
# with self.nneg negs per pos
assert inp.shape[0] % (self.nneg + 1) == 0
inp = inp.view(-1, self.nneg + 1).contiguous()
logsuminp = self.lsm(inp)
o = torch.zeros(inp.size(0), device=inp.device).long()
return self.crit(logsuminp, o)
def prepare_variables(b, opts):
X = b.long()
if opts["cuda"]:
X = X.cuda()
return X
def save_checkpoint(tms, metadata, opts, path):
model_dict = {"context_net": tms["context_net"], "seg_net": tms["seg_net"]}
if opts.get("seg_direction_net", False):
model_dict["seg_direction_net"] = tms["seg_direction_net"]
# Add all models to dicts and move state to cpu
state_dicts = {}
for model_name, model in model_dict.items():
state_dicts[model_name] = model.state_dict()
for n, s in state_dicts[model_name].items():
state_dicts[model_name][n] = s.cpu()
# Save to path
torch.save(
{
"metadata": metadata,
"model_state_dicts": state_dicts,
"optimizer_state_dict": tms["optimizer"].state_dict(),
"options": opts,
},
path,
)
def create_context_segment_modules(opts):
possible_params = ["context_net", "seg_net", "seg_direction_net"]
# Add all of the modules
emb_dict = torch.nn.Embedding(opts["num_words"], opts["blockid_embedding_dim"])
tms = {
"context_net": ContextEmbeddingNet(opts, emb_dict),
"seg_net": SegmentEmbeddingNet(opts, emb_dict),
"score_module": ContextSegmentScoringModule(),
"lfn": spatial_emb_loss(),
}
if opts.get("seg_direction_net", False):
tms["seg_direction_net"] = SegmentDirectionEmbeddingNet(opts)
# Move everything to the right device
if "cuda" in opts and opts["cuda"]:
emb_dict.cuda()
for n in possible_params:
if n in tms:
tms[n].cuda()
# Setup the optimizer
all_params = []
for n in possible_params:
if n in tms:
all_params.extend(list(tms[n].parameters()))
tms["optimizer"] = get_optim(all_params, opts)
return tms
def load_context_segment_checkpoint(checkpoint_path, opts, backup=True, verbose=False):
if not os.path.isfile(checkpoint_path):
check_and_print_opts(opts, None)
return {}
if backup:
random_uid = "".join(
[random.choice(string.ascii_letters + string.digits) for n in range(4)]
)
backup_path = checkpoint_path + ".backup_" + random_uid
copyfile(checkpoint_path, backup_path)
print(">> Backing up checkpoint before loading and overwriting:")
print(" {}\n".format(backup_path))
checkpoint = torch.load(checkpoint_path)
if verbose:
print(">> Loading model from checkpoint {}".format(checkpoint_path))
for opt, val in checkpoint["metadata"].items():
print(" - {:>20}: {:<30}".format(opt, val))
print("")
check_and_print_opts(opts, checkpoint["options"])
checkpoint_opts_dict = checkpoint["options"]
if type(checkpoint_opts_dict) is not dict:
checkpoint_opts_dict = vars(checkpoint_opts_dict)
for opt, val in checkpoint_opts_dict.items():
opts[opt] = val
print(opts)
trainer_modules = create_context_segment_modules(opts)
trainer_modules["context_net"].load_state_dict(checkpoint["model_state_dicts"]["context_net"])
trainer_modules["seg_net"].load_state_dict(checkpoint["model_state_dicts"]["seg_net"])
trainer_modules["optimizer"].load_state_dict(checkpoint["optimizer_state_dict"])
if opts.get("seg_direction_net", False):
trainer_modules["seg_direction_net"].load_state_dict(checkpoint["seg_direction_net"])
return trainer_modules
def get_context_segment_trainer_modules(opts, checkpoint_path=None, backup=False, verbose=False):
trainer_modules = load_context_segment_checkpoint(checkpoint_path, opts, backup, verbose)
if len(trainer_modules) == 0:
trainer_modules = create_context_segment_modules(opts)
return trainer_modules
def check_and_print_opts(curr_opts, old_opts):
mismatches = []
print(">> Options:")
for opt, val in curr_opts.items():
print(" - {:>20}: {:<30}".format(opt, val))
if old_opts and opt in old_opts and old_opts[opt] != val:
mismatches.append((opt, val, old_opts[opt]))
print("")
if len(mismatches) > 0:
print(">> Mismatching options:")
for m in mismatches:
print(" - {:>20}: new '{:<10}' != old '{:<10}'".format(m[0], m[1], m[2]))
print("")
return True if len(mismatches) > 0 else False
def get_optim(model_params, opts):
optim_type = opts.get("optim", "adagrad")
lr = opts.get("lr", 0.1)
momentum = opts.get("momentum", 0.0)
betas = (0.9, 0.999)
if optim_type == "adagrad":
return optim.Adagrad(model_params, lr=lr)
elif optim_type == "sgd":
return optim.SGD(model_params, lr=lr, momentum=momentum)
elif optim_type == "adam":
return optim.Adam(model_params, lr=lr, betas=betas)
else:
raise Exception("Undefined optim type {}".format(optim_type))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--num_words", type=int, default=3, help="number of words in embedding")
parser.add_argument("--imsize", type=int, default=32, help="imsize, use 32 or 64")
parser.add_argument("--num_layers", type=int, default=4, help="number of layers")
parser.add_argument("--hsize", type=int, default=64, help="hidden dim")
opts = vars(parser.parse_args())
net = ValueNet(opts)
x = torch.LongTensor(7, 32, 32, 32).zero_()
y = net(x)
| [
"drotherm@fb.com"
] | drotherm@fb.com |
e0e94e4d38d76f3390d1bccd9905611afc425cc0 | 692f9d0f891fa670c56d88b96312c8295fcf06b3 | /olivemain/tests/core/full_node/full_sync/test_full_sync.py | a5dba0bbfdcc896e275950cd9f6b04545b81cccc | [
"Apache-2.0"
] | permissive | aisuyi065/Olive-blockchain | 97302c1002eb140957fa57eb1932f683847b4d64 | a4f5d48597af90343279597a81fd6441f4de9223 | refs/heads/main | 2023-06-30T04:41:09.882393 | 2021-08-07T03:53:27 | 2021-08-07T03:53:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,659 | py | # flake8: noqa: F811, F401
import asyncio
import logging
import time
from typing import List
import pytest
from olive.full_node.weight_proof import _validate_sub_epoch_summaries
from olive.protocols import full_node_protocol
from olive.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from olive.types.full_block import FullBlock
from olive.types.peer_info import PeerInfo
from olive.util.hash import std_hash
from olive.util.ints import uint16
from tests.core.fixtures import default_400_blocks, default_1000_blocks, default_10000_blocks, empty_blockchain
from tests.core.node_height import node_height_exactly, node_height_between
from tests.setup_nodes import bt, self_hostname, setup_n_nodes, setup_two_nodes, test_constants
from tests.time_out_assert import time_out_assert
@pytest.fixture(scope="session")
def event_loop():
loop = asyncio.get_event_loop()
yield loop
log = logging.getLogger(__name__)
class TestFullSync:
@pytest.fixture(scope="function")
async def two_nodes(self):
async for _ in setup_two_nodes(test_constants):
yield _
@pytest.fixture(scope="function")
async def three_nodes(self):
async for _ in setup_n_nodes(test_constants, 3):
yield _
@pytest.fixture(scope="function")
async def four_nodes(self):
async for _ in setup_n_nodes(test_constants, 4):
yield _
@pytest.fixture(scope="function")
async def five_nodes(self):
async for _ in setup_n_nodes(test_constants, 5):
yield _
@pytest.mark.asyncio
async def test_long_sync_from_zero(self, five_nodes, default_400_blocks):
# Must be larger than "sync_block_behind_threshold" in the config
num_blocks = len(default_400_blocks)
blocks: List[FullBlock] = default_400_blocks
full_node_1, full_node_2, full_node_3, full_node_4, full_node_5 = five_nodes
server_1 = full_node_1.full_node.server
server_2 = full_node_2.full_node.server
server_3 = full_node_3.full_node.server
server_4 = full_node_4.full_node.server
server_5 = full_node_5.full_node.server
# If this constant is changed, update the tests to use more blocks
assert test_constants.WEIGHT_PROOF_RECENT_BLOCKS < 400
# Syncs up less than recent blocks
for block in blocks[: test_constants.WEIGHT_PROOF_RECENT_BLOCKS - 5]:
await full_node_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
await server_2.start_client(
PeerInfo(self_hostname, uint16(server_1._port)), on_connect=full_node_2.full_node.on_connect
)
# The second node should eventually catch up to the first one
await time_out_assert(
150, node_height_exactly, True, full_node_2, test_constants.WEIGHT_PROOF_RECENT_BLOCKS - 5 - 1
)
for block in blocks[
test_constants.WEIGHT_PROOF_RECENT_BLOCKS - 5 : test_constants.WEIGHT_PROOF_RECENT_BLOCKS + 5
]:
await full_node_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
await server_3.start_client(
PeerInfo(self_hostname, uint16(server_1._port)), on_connect=full_node_3.full_node.on_connect
)
timeout_seconds = 150
# Node 3 and Node 2 sync up to node 1
await time_out_assert(
timeout_seconds, node_height_exactly, True, full_node_2, test_constants.WEIGHT_PROOF_RECENT_BLOCKS + 5 - 1
)
await time_out_assert(
timeout_seconds, node_height_exactly, True, full_node_3, test_constants.WEIGHT_PROOF_RECENT_BLOCKS + 5 - 1
)
cons = list(server_1.all_connections.values())[:]
for con in cons:
await con.close()
for block in blocks[test_constants.WEIGHT_PROOF_RECENT_BLOCKS + 5 :]:
await full_node_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
await server_2.start_client(
PeerInfo(self_hostname, uint16(server_1._port)), on_connect=full_node_2.full_node.on_connect
)
await server_3.start_client(
PeerInfo(self_hostname, uint16(server_1._port)), on_connect=full_node_3.full_node.on_connect
)
await server_4.start_client(
PeerInfo(self_hostname, uint16(server_1._port)), on_connect=full_node_4.full_node.on_connect
)
await server_3.start_client(
PeerInfo(self_hostname, uint16(server_2._port)), on_connect=full_node_3.full_node.on_connect
)
await server_4.start_client(
PeerInfo(self_hostname, uint16(server_3._port)), on_connect=full_node_4.full_node.on_connect
)
await server_4.start_client(
PeerInfo(self_hostname, uint16(server_2._port)), on_connect=full_node_4.full_node.on_connect
)
# All four nodes are synced
await time_out_assert(timeout_seconds, node_height_exactly, True, full_node_1, num_blocks - 1)
await time_out_assert(timeout_seconds, node_height_exactly, True, full_node_2, num_blocks - 1)
await time_out_assert(timeout_seconds, node_height_exactly, True, full_node_3, num_blocks - 1)
await time_out_assert(timeout_seconds, node_height_exactly, True, full_node_4, num_blocks - 1)
# Deep reorg, fall back from batch sync to long sync
blocks_node_5 = bt.get_consecutive_blocks(60, block_list_input=blocks[:350], seed=b"node5")
for block in blocks_node_5:
await full_node_5.full_node.respond_block(full_node_protocol.RespondBlock(block))
await server_5.start_client(
PeerInfo(self_hostname, uint16(server_1._port)), on_connect=full_node_5.full_node.on_connect
)
await time_out_assert(timeout_seconds, node_height_exactly, True, full_node_5, 409)
await time_out_assert(timeout_seconds, node_height_exactly, True, full_node_1, 409)
@pytest.mark.asyncio
async def test_sync_from_fork_point_and_weight_proof(self, three_nodes, default_1000_blocks, default_400_blocks):
start = time.time()
# Must be larger than "sync_block_behind_threshold" in the config
num_blocks_initial = len(default_1000_blocks) - 50
blocks_950 = default_1000_blocks[:num_blocks_initial]
blocks_rest = default_1000_blocks[num_blocks_initial:]
blocks_400 = default_400_blocks
full_node_1, full_node_2, full_node_3 = three_nodes
server_1 = full_node_1.full_node.server
server_2 = full_node_2.full_node.server
server_3 = full_node_3.full_node.server
for block in blocks_950:
await full_node_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
# Node 2 syncs from halfway
for i in range(int(len(default_1000_blocks) / 2)):
await full_node_2.full_node.respond_block(full_node_protocol.RespondBlock(default_1000_blocks[i]))
# Node 3 syncs from a different blockchain
for block in blocks_400:
await full_node_3.full_node.respond_block(full_node_protocol.RespondBlock(block))
await server_2.start_client(PeerInfo(self_hostname, uint16(server_1._port)), full_node_2.full_node.on_connect)
await server_3.start_client(PeerInfo(self_hostname, uint16(server_1._port)), full_node_3.full_node.on_connect)
# Also test request proof of weight
# Have the request header hash
res = await full_node_1.request_proof_of_weight(
full_node_protocol.RequestProofOfWeight(blocks_950[-1].height + 1, blocks_950[-1].header_hash)
)
assert res is not None
validated, _, _ = await full_node_1.full_node.weight_proof_handler.validate_weight_proof(
full_node_protocol.RespondProofOfWeight.from_bytes(res.data).wp
)
assert validated
# Don't have the request header hash
res = await full_node_1.request_proof_of_weight(
full_node_protocol.RequestProofOfWeight(blocks_950[-1].height + 1, std_hash(b"12"))
)
assert res is None
# The second node should eventually catch up to the first one, and have the
# same tip at height num_blocks - 1
await time_out_assert(180, node_height_exactly, True, full_node_2, num_blocks_initial - 1)
await time_out_assert(180, node_height_exactly, True, full_node_3, num_blocks_initial - 1)
def fn3_is_not_syncing():
return not full_node_3.full_node.sync_store.get_sync_mode()
await time_out_assert(180, fn3_is_not_syncing)
cons = list(server_1.all_connections.values())[:]
for con in cons:
await con.close()
for block in blocks_rest:
await full_node_3.full_node.respond_block(full_node_protocol.RespondBlock(block))
assert full_node_3.full_node.blockchain.get_peak().height >= block.height
log.warning(f"FN3 height {full_node_3.full_node.blockchain.get_peak().height}")
# TODO: fix this flaky test
await time_out_assert(120, node_height_exactly, True, full_node_3, 999)
await server_2.start_client(PeerInfo(self_hostname, uint16(server_1._port)), full_node_2.full_node.on_connect)
await server_3.start_client(PeerInfo(self_hostname, uint16(server_1._port)), full_node_3.full_node.on_connect)
await server_3.start_client(PeerInfo(self_hostname, uint16(server_2._port)), full_node_3.full_node.on_connect)
await time_out_assert(180, node_height_exactly, True, full_node_1, 999)
await time_out_assert(180, node_height_exactly, True, full_node_2, 999)
@pytest.mark.asyncio
async def test_batch_sync(self, two_nodes):
# Must be below "sync_block_behind_threshold" in the config
num_blocks = 20
num_blocks_2 = 9
blocks = bt.get_consecutive_blocks(num_blocks)
blocks_2 = bt.get_consecutive_blocks(num_blocks_2, seed=b"123")
full_node_1, full_node_2, server_1, server_2 = two_nodes
# 12 blocks to node_1
for block in blocks:
await full_node_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
# 9 different blocks to node_2
for block in blocks_2:
await full_node_2.full_node.respond_block(full_node_protocol.RespondBlock(block))
await server_2.start_client(
PeerInfo(self_hostname, uint16(server_1._port)),
on_connect=full_node_2.full_node.on_connect,
)
await time_out_assert(60, node_height_exactly, True, full_node_2, num_blocks - 1)
@pytest.mark.asyncio
async def test_backtrack_sync_1(self, two_nodes):
blocks = bt.get_consecutive_blocks(1, skip_slots=1)
blocks = bt.get_consecutive_blocks(1, blocks, skip_slots=0)
blocks = bt.get_consecutive_blocks(1, blocks, skip_slots=0)
full_node_1, full_node_2, server_1, server_2 = two_nodes
# 3 blocks to node_1 in different sub slots
for block in blocks:
await full_node_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
await server_2.start_client(
PeerInfo(self_hostname, uint16(server_1._port)),
on_connect=full_node_2.full_node.on_connect,
)
await time_out_assert(60, node_height_exactly, True, full_node_2, 2)
@pytest.mark.asyncio
async def test_backtrack_sync_2(self, two_nodes):
blocks = bt.get_consecutive_blocks(1, skip_slots=3)
blocks = bt.get_consecutive_blocks(8, blocks, skip_slots=0)
full_node_1, full_node_2, server_1, server_2 = two_nodes
# 3 blocks to node_1 in different sub slots
for block in blocks:
await full_node_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
await server_2.start_client(
PeerInfo(self_hostname, uint16(server_1._port)),
on_connect=full_node_2.full_node.on_connect,
)
await time_out_assert(60, node_height_exactly, True, full_node_2, 8)
@pytest.mark.asyncio
async def test_close_height_but_big_reorg(self, three_nodes):
blocks_a = bt.get_consecutive_blocks(50)
blocks_b = bt.get_consecutive_blocks(51, seed=b"B")
blocks_c = bt.get_consecutive_blocks(90, seed=b"C")
full_node_1, full_node_2, full_node_3 = three_nodes
server_1 = full_node_1.full_node.server
server_2 = full_node_2.full_node.server
server_3 = full_node_3.full_node.server
for block in blocks_a:
await full_node_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
for block in blocks_b:
await full_node_2.full_node.respond_block(full_node_protocol.RespondBlock(block))
for block in blocks_c:
await full_node_3.full_node.respond_block(full_node_protocol.RespondBlock(block))
await server_2.start_client(
PeerInfo(self_hostname, uint16(server_1._port)),
on_connect=full_node_2.full_node.on_connect,
)
await time_out_assert(60, node_height_exactly, True, full_node_1, 50)
await time_out_assert(60, node_height_exactly, True, full_node_2, 50)
await time_out_assert(60, node_height_exactly, True, full_node_3, 89)
await server_3.start_client(
PeerInfo(self_hostname, uint16(server_1._port)),
on_connect=full_node_3.full_node.on_connect,
)
await server_3.start_client(
PeerInfo(self_hostname, uint16(server_2._port)),
on_connect=full_node_3.full_node.on_connect,
)
await time_out_assert(60, node_height_exactly, True, full_node_1, 89)
await time_out_assert(60, node_height_exactly, True, full_node_2, 89)
await time_out_assert(60, node_height_exactly, True, full_node_3, 89)
@pytest.mark.asyncio
async def test_sync_bad_peak_while_synced(self, three_nodes, default_1000_blocks, default_10000_blocks):
# Must be larger than "sync_block_behind_threshold" in the config
num_blocks_initial = len(default_1000_blocks) - 250
blocks_750 = default_1000_blocks[:num_blocks_initial]
full_node_1, full_node_2, full_node_3 = three_nodes
server_1 = full_node_1.full_node.server
server_2 = full_node_2.full_node.server
server_3 = full_node_3.full_node.server
full_node_3.full_node.weight_proof_handler = None
for block in blocks_750:
await full_node_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
# Node 3 syncs from a different blockchain
for block in default_10000_blocks[:1100]:
await full_node_3.full_node.respond_block(full_node_protocol.RespondBlock(block))
await server_2.start_client(PeerInfo(self_hostname, uint16(server_1._port)), full_node_2.full_node.on_connect)
# The second node should eventually catch up to the first one, and have the
# same tip at height num_blocks - 1
await time_out_assert(180, node_height_exactly, True, full_node_2, num_blocks_initial - 1)
# set new heavy peak, fn3 cannot serve wp's
# node 2 should keep being synced and receive blocks
await server_3.start_client(PeerInfo(self_hostname, uint16(server_3._port)), full_node_3.full_node.on_connect)
# trigger long sync in full node 2
peak_block = default_10000_blocks[1050]
await server_2.start_client(PeerInfo(self_hostname, uint16(server_3._port)), full_node_2.full_node.on_connect)
con = server_2.all_connections[full_node_3.full_node.server.node_id]
peak = full_node_protocol.NewPeak(
peak_block.header_hash,
peak_block.height,
peak_block.weight,
peak_block.height,
peak_block.reward_chain_block.get_unfinished().get_hash(),
)
await full_node_2.full_node.new_peak(peak, con)
await asyncio.sleep(2)
assert not full_node_2.full_node.sync_store.get_sync_mode()
for block in default_1000_blocks[1000 - num_blocks_initial :]:
await full_node_2.full_node.respond_block(full_node_protocol.RespondBlock(block))
assert node_height_exactly(full_node_2, 999)
@pytest.mark.asyncio
async def test_block_ses_mismatch(self, two_nodes, default_1000_blocks):
full_node_1, full_node_2, server_1, server_2 = two_nodes
blocks = default_1000_blocks
for block in blocks[:501]:
await full_node_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
peak1 = full_node_1.full_node.blockchain.get_peak()
full_node_2.full_node.sync_store.set_long_sync(True)
await server_2.start_client(PeerInfo(self_hostname, uint16(server_1._port)), full_node_2.full_node.on_connect)
wp = await full_node_1.full_node.weight_proof_handler.get_proof_of_weight(peak1.header_hash)
summaries1, _ = _validate_sub_epoch_summaries(full_node_1.full_node.weight_proof_handler.constants, wp)
summaries2 = summaries1
s = summaries1[1]
# change summary so check would fail on 2 sub epoch
summaries2[1] = SubEpochSummary(
s.prev_subepoch_summary_hash,
s.reward_chain_hash,
s.num_blocks_overflow,
s.new_difficulty * 2,
s.new_sub_slot_iters * 2,
)
await full_node_2.full_node.sync_from_fork_point(0, 500, peak1.header_hash, summaries2)
log.info(f"full node height {full_node_2.full_node.blockchain.get_peak().height}")
assert node_height_between(full_node_2, 320, 400)
| [
"87711356+Olive-blockchain@users.noreply.github.com"
] | 87711356+Olive-blockchain@users.noreply.github.com |
ffbb923905cedb23748806a6e5a210f52377acc7 | c42672aeac984ab3f57d840710e145f4e918ba01 | /nasws/cnn/search_space/monodepth/analysis.py | 872b130085e3b8170a5f7d4627a9b3fd1c6b5248 | [
"MIT"
] | permissive | kcyu2014/nas-landmarkreg | 00212b6015d1fef3e7198bfa596fa69a898167c2 | a00c3619bf4042e446e1919087f0b09fe9fa3a65 | refs/heads/main | 2023-07-21T19:52:19.392719 | 2021-08-24T09:37:24 | 2021-08-24T09:37:24 | 350,368,390 | 10 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,434 | py | import os
import glob
from collections import defaultdict
from monodepth.utils.reporter import tensorboard_check_tags, tensorboard_load_summary, count_parameters_in_MB_search
from monodepth.utils.checkpointer import load_args
from monodepth.models import MidasNet, MidasNetSearch
from nni.nas.pytorch.fixed import FixedArchitecture
import torch
from monodepth.utils.checkpointer import load_json
from thop import profile
import numpy as np
def sort_tb_pairs(l, ignore_index=True):
slist = list(sorted(l, key=lambda x: x[0]))
if ignore_index:
return list(zip(*slist))[1]
else:
return slist
def average_last_K(l, top_K=5):
return sum(l[-top_K:]) / top_K
def collect_experiment_kdt_from_tensorboard(path):
args = load_args(path + '/args.json')
# print(args)
# store all the results as follow
tb_paths = glob.glob(path + '/log/*')
res = defaultdict()
for p in tb_paths:
# print(p)
tags = tensorboard_check_tags(p)
for t in tags:
steps, r = tensorboard_load_summary(p, t)
if t in res:
res[t] += list(zip(steps, r))
else:
res[t] = list(zip(steps, r))
tag_specified = [
'validation/sparse_kdt_0.0001',
'validation/sparse_spr_0.0001']
final_res = {}
for tag in tag_specified:
d = sort_tb_pairs(res[tag])
final_res[tag] = average_last_K(d)
return final_res
def collect_experiment_result(path):
# the final evaluation model should be recomputed based on the results over server
# load args
args = load_args(path + '/args.json')
# print(args)
# store all the results as follow
tb_paths = glob.glob(path + '/log/*')
res = defaultdict()
for p in tb_paths:
# print(p)
tags = tensorboard_check_tags(p)
for t in tags:
steps, r = tensorboard_load_summary(p, t)
if t in res:
res[t] += list(zip(steps, r))
else:
res[t] = list(zip(steps, r))
# print(res.keys())
# collect the associated statistics
num_epoch = len(res['train/sum'])
num_channels = 256 # fixed across the entire dataset
num_cells = 4
seed = 0
# store all the intermediate results of 1 run.
all_train_loss = sort_tb_pairs(res['train/sum'])
all_valid_loss = sort_tb_pairs(res['validation/ReDWeb'])
train_loss = average_last_K(sort_tb_pairs(res['train/sum']))
valid_loss = average_last_K(sort_tb_pairs(res['validation/ReDWeb']))
# from the current log, this is at is. we do not have more to analyze
# From this point, we need to get the result from checkpoint and store all the statistics accordingly
# use this to directly apply
arch = load_json(path + '/arch.json')
print('processing architecture ',arch)
model = MidasNetSearch(backbone='resnext101_wsl', args=args)
mutator = FixedArchitecture(model, arch)
mutator.reset()
ckpt_path = path + '/checkpoint.pt'
if os.path.exists(ckpt_path):
print('loading checkpoint...')
checkpoint = torch.load(ckpt_path)
model.load_state_dict(checkpoint['model'])
print('finish loading the model ...')
# count parameters
num_param = count_parameters_in_MB_search(model, arch)
return num_epoch, train_loss, valid_loss, num_param, arch, all_train_loss, all_valid_loss
| [
"16794548+kcyu2014@users.noreply.github.com"
] | 16794548+kcyu2014@users.noreply.github.com |
1d7242ee228c3499a7daee0cb5dc3b945a4c01c4 | 66eefa859d8e28a1872acf0876bdfd1452fff05b | /RadioButtons.py | 37c6ee279aff9b8d0b9ad0d61fad31379de1fd69 | [] | no_license | Aravinda93/Python_KinterApp_application | e42fb4dd954953401e7bb9e03f95cf4f1ddaed5f | d45b26be589bcdc0151fb06186561cb6ac68a258 | refs/heads/master | 2022-06-04T23:52:16.104014 | 2020-05-03T08:50:00 | 2020-05-03T08:54:42 | 260,862,076 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 885 | py | from tkinter import *
from PIL import ImageTk, Image
root = Tk()
root.title('Radio Buttons')
root.iconbitmap('./Icon_File.ico')
#r = IntVar()
#r.set('2')
#Radiobutton(root,text='Option 1', variable=r, value=1, command= lambda: clicked(r.get())).pack()
#Radiobutton(root,text='Option 2', variable=r, value=2, command= lambda: clicked(r.get())).pack()
Toppings = [
("Margarita", "Margarita"),
("Cheese", "Cheese"),
("Onion", "Onion"),
("Mushroom", "Mushroom")
]
pizza = StringVar()
pizza.set("Margarita")
for text, topping in Toppings:
Radiobutton(root, text=text, variable=pizza, value=topping).pack(anchor=W)
def clicked(value):
myLabel = Label(root, text=value)
myLabel.pack()
#myLabel = Label(root, text=pizza.get())
#myLabel.pack()
myButton = Button(root, text='Click Me', command= lambda: clicked(pizza.get()))
myButton.pack()
root.mainloop() | [
"ABaliga@its.jnj.com"
] | ABaliga@its.jnj.com |
c2173d4f6bb9c57954235c69d64e9d11d6ea7e15 | c083798cc6d9a7d0df5ab5534a07f5e7fd915095 | /Curso estadistica descriptiva/datos cuantitativos/boxplots_con_python.py | a017310c7c626376f78b0e16852e76421966e807 | [
"MIT"
] | permissive | DarkShadow4/Python | 2e26be4d4e8660a5bf7e6d0748379e70d41189c5 | 4cd94e0cf53ee06c9c31e9272572ca9656697c30 | refs/heads/master | 2021-07-07T19:01:35.596813 | 2020-08-19T17:51:06 | 2020-08-19T17:51:06 | 174,096,874 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 899 | py | import numpy as np
import matplotlib.pyplot as plt
np.random.seed(19990519)
dist1 = 100*np.random.rand(50)
dist2 = 50*np.random.rand(25)
dist3 = 100+100*np.random.rand(10)
dist4 = -100*np.random.rand(10)
data = np.concatenate((dist1, dist2, dist3, dist4))
plt.boxplot(data)
plt.boxplot(data, notch=True)
# Boxplot con los outliers personalizados
greendiamonds = dict(markerfacecolor = "g", marker="D")
plt.boxplot(data, notch = True, flierprops = greendiamonds)
# Boxplot sin outliers
plt.boxplot(data, showfliers = False)
# Boxplot en horizontal
plt.boxplot(data, vert = False)
# Boxplot con bigotes de tamaño distinto al por defecto
plt.boxplot(data, whis=0.75)
plt.boxplot(data, whis=3) # Si se pide que los bigotes tengan una longitud mayor a la necesaria para eliminar los outliers, se aplica la normade que los bigotes van hasta el valor minimo y maximo
plt.boxplot(data, whis=0)
| [
"exxericxx@gmail.com"
] | exxericxx@gmail.com |
00868bf5c2508b4f24084132b710bd214998c056 | 524acbbc16eac0ef28da58ff9f79d02d7cadcf1b | /backend/shop_time/categories/views.py | be8c7d554b3cbf84338a20c5cf0a4fe64763644b | [] | no_license | ZandTree/art-shop | 47eb6ed6f328157c852cef6e324e4be5ab3592f8 | faa506fb62f845168b9874a720c0b62808245058 | refs/heads/master | 2023-06-01T06:40:28.755068 | 2021-06-18T23:53:57 | 2021-06-18T23:53:57 | 376,624,201 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,802 | py | from rest_framework.views import APIView
from .models import Category
from .serializer import CategorySerializer
from rest_framework.permissions import AllowAny
from rest_framework import status
from rest_framework.response import Response
class CategoryList(APIView):
""" get all categories with tree structure"""
permission_classes = (AllowAny,)
def get(self,request,format=None):
"""
loop only for cat and sub_cat == 1 level inclusiveness;
need more: make loop deeper
"""
if Category.objects.all().exists():
categories = Category.objects.all()
result = []
for cat in categories:
if not cat.parent:
item = {}
item['id'] = cat.id
item['name'] = cat.name
item['slug'] = cat.slug
item['sub_categories'] = []
for category in categories:
sub_item = {}
if category.parent and category.parent.id == cat.id:
sub_item['id'] = category.id
sub_item['name'] = category.name
sub_item['sub_categories'] = []
item['sub_categories'].append(sub_item)
result.append(item)
return Response({'categories':result},status=status.HTTP_200_OK)
else:
# instead of 404 ( server error)
return Response({'errors':'No categories found'},status=status.HTTP_500_INTERNAL_SERVER_ERROR)
# def get_queryset(self, queryset=None):
#qs = Category.objects.all()
# TODO
# return queryset
# return queryset.get_cached_trees | [
"diafox2015@gmail.com"
] | diafox2015@gmail.com |
e8b5600959c1d3b58f0f27611d842e52ea095e4e | 4b8cf3702215220b875556e3c3600990f17e44e5 | /ctci/hackerrank_ctci_problems/ice_cream_two_sum.py | d739b117f48778223666961dd23fee8f71ad2e20 | [] | no_license | celeritas17/practice-makes-perfect | 8842795ab7cd397a8c916ed5ebec791970467656 | 600e65fbb30935f4fbd5e1951617ad4bf6e84755 | refs/heads/master | 2020-07-29T13:33:21.276609 | 2016-11-21T07:10:22 | 2016-11-21T07:10:22 | 73,665,026 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 392 | py | t = int(input().strip())
for a0 in range(t):
m = int(input().strip())
n = int(input().strip())
a = [int(x) for x in input().strip().split(' ')]
cost_map = {}
for index, cost in enumerate(a):
if (m - cost) in cost_map:
print('{0:d} {1:d}'.format(cost_map[m - cost] + 1, index + 1))
break
else:
cost_map[cost] = index | [
"ryan.koven@clicktripz.com"
] | ryan.koven@clicktripz.com |
fba04c2e430f603715e62015d0f0dff369156857 | 4eff087e9d93b70f8c137ff28000aff30030fbd0 | /OpenCV/Detect & Shot.py | 66851939eb1aff4e267e11cbeae0e35c1bd1436f | [] | no_license | hatamiarash7/Python-Projects | 3955fbc2653b6fac28498b7f3c3353e3ea533ef5 | 8de7a184d73666bcc9539ac710f54d79692de444 | refs/heads/master | 2022-07-27T04:22:34.039112 | 2022-05-25T06:43:53 | 2022-05-25T06:43:53 | 66,276,150 | 4 | 4 | null | 2022-05-05T13:23:15 | 2016-08-22T13:42:14 | Python | UTF-8 | Python | false | false | 1,115 | py | import cv2, math
camera_port = 0
image_number = 0
ramp_frames = 30
frame = 0
cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
camera = cv2.VideoCapture(camera_port)
def take_shot():
retval, im = camera.read()
return im
def snapshot():
global image_number, frame
for i in xrange(ramp_frames):
temp = take_shot()
print 'Taking image + ' , frame
camera_capture = take_shot()
file = "/home/hatamiarash7/OpenCV - Python/snapshot_temp/test_image" + str(image_number) + ".png"
image_number += 1
frame += 15
cv2.imwrite(file, camera_capture)
return temp
while(True):
global frame
_,frame=camera.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)
cv2.rectangle(frame,(x+5,y-5),(x+w-5,y+h+5),(255,0,0),2)
check = math.modf(frame , 60)
if check == 0 : temp2 = snapshot()
cv2.imshow('img',frame)
if cv2.waitKey(25) == 27 : break
cv2.destroyAllWindows()
camera.release() | [
"hatamiarash7@gmail.com"
] | hatamiarash7@gmail.com |
d116cf499ae6b5ea0e40f3a62ee8e3bcd94e6a5e | 824f19d20cdfa26c607db1ff3cdc91f69509e590 | /random/strings/345. Reverse Vowels of a String.py | 88e11095e0c062114eea3783f47a5500cedfc1f9 | [] | no_license | almamuncsit/LeetCode | 01d7e32300eebf92ab54c983de6e183242b3c985 | 17aa340649574c37067ec170ceea8d9326be2d6a | refs/heads/master | 2021-07-07T09:48:18.069020 | 2021-03-28T11:26:47 | 2021-03-28T11:26:47 | 230,956,634 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 689 | py | class Solution:
def reverseVowels(self, s: str) -> str:
s_list = list(s)
vowels = set({'a', 'e', 'i', 'o', 'u', 'A', 'E', 'I', 'O', 'U'})
left = 0
right = len(s_list)-1
while right > left:
if s_list[left] in vowels and s_list[right] in vowels:
s_list[left], s_list[right] = s_list[right], s_list[left]
left += 1
right -= 1
else:
if s_list[left] not in vowels:
left += 1
if s_list[right] not in vowels:
right -= 1
return ''.join(s_list)
sol = Solution()
print(sol.reverseVowels("leetcode"))
| [
"msarkar.cse@gmail.com"
] | msarkar.cse@gmail.com |
429956eddb56a7394a8ae202690bba6666176a3c | 451bc23c13bf5e4ea6894c5c388122396cb2266f | /06-Lessons/2/Activities/08-Evr_Itinerary/Solved/config.py | a45f08592bdc3a307e4e2d8d733fbfefb696f9e4 | [] | no_license | junepwk/UCF-VIRT-DATA-PT-04-2021-U-B | 92f54f5a13a9b48eec6e8ceca978bfa563ca6c39 | 30494d208a4af0ad6259056ffbf1c5f0009eb404 | refs/heads/main | 2023-07-17T23:21:33.939288 | 2021-09-02T00:55:08 | 2021-09-02T00:55:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28 | py | # Google API key
g_key = ""
| [
"43150545+wjosil@users.noreply.github.com"
] | 43150545+wjosil@users.noreply.github.com |
b96e448d5371d3490fb2925a38ecd17ddd7f10ef | 55ea867594ea4a7de0148da49707179f22f62e38 | /tags/annot-01-20-05/bin/gene_table.py | aaa0b48152afe673f5c34d1ee11cab1d59599173 | [] | no_license | polyactis/annot | 2120a1b717e0e8c211d8e7d4e83b6b91aa30b5f3 | 14ae017a68afa1afca2b96f848288fc3138361b1 | refs/heads/master | 2021-01-02T08:21:47.919049 | 2011-01-26T00:05:32 | 2011-01-26T00:05:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,579 | py | #!/usr/bin/env python
"""
Usage: gene_table.py -k SCHEMA -g ORGANISM [OPTION] DATADIR
Option:
-z ..., --hostname=... the hostname, zhoudb(default)
DATADIR is the directory containing all the datasets
-d ..., --dbname=... the database name, graphdb(default)
-k ..., --schema=... which schema in the database
-g ..., --organism=... two letter organism abbreviation
-u, --union takes the union of all genes in the datasets, default is intersection
-c, --commit commits the database transaction
-h, --help show this help
Examples:
gene_table.py -k hs_yh60 -g hs datasets/hs_wanted
Description:
This program sets up schema.gene from the datasets, which
are probably a subset from that organism's total datasets.
It depends on table graph.gene_id_to_no.
"""
import pickle, sys, os, psycopg, csv, getopt
from sets import Set
class gene_table:
'''
Initialize the local gene_id:gene_no mapping in table schema.gene
'''
def __init__(self, dir, hostname, dbname, schema, orgn, union=0, needcommit=0):
self.dir = dir
self.conn = psycopg.connect('host=%s dbname=%s'%(hostname, dbname))
self.curs = self.conn.cursor()
self.curs.execute("set search_path to %s"%schema)
self.union = int(union)
self.needcommit = int(needcommit)
self.org_short2long = {'at':'Arabidopsis thaliana',
'ce':'Caenorhabditis elegans',
'dm':'Drosophila melanogaster',
'hs':'Homo sapiens',
'mm':'Mus musculus',
'sc':'Saccharomyces cerevisiae',
'Arabidopsis thaliana':'Arabidopsis thaliana',
'Caenorhabditis elegans':'Caenorhabditis elegans',
'Drosophila melanogaster':'Drosophila melanogaster',
'Homo sapiens':'Homo sapiens',
'Mus musculus':'Mus musculus',
'Gorilla gorilla Pan paniscus Homo sapiens':'Homo sapiens',
'Saccharomyces cerevisiae':'Saccharomyces cerevisiae'}
self.organism = self.org_short2long[orgn]
#mapping between gene_id and gene_no
self.gene_id2gene_no = {}
#mapping between gene_id and its occurence
self.gene_id2freq = {}
#unique gene collection, for database submission
self.gene_set = Set()
def dstruc_loadin(self):
#setup self.gene_id2gene_no
self.curs.execute("select gene_id, gene_no from graph.gene_id_to_no where organism='%s'"%self.organism)
rows = self.curs.fetchall()
for row in rows:
self.gene_id2gene_no[row[0]] = row[1]
def run(self):
#load in the data structures first.
self.dstruc_loadin()
#iterate over all the datasets, find all the genes
files = os.listdir(self.dir)
sys.stderr.write("\tTotally, %d files to be processed.\n"%len(files))
new_yeast_gene_list = []
for f in files:
sys.stderr.write("%d/%d:\t%s\n"%(files.index(f)+1,len(files),f))
f_path = os.path.join(self.dir, f)
reader = csv.reader(file(f_path), delimiter='\t')
for row in reader:
if row[0] in self.gene_id2freq:
#not first encountering
self.gene_id2freq[row[0]] += 1
else:
#first encountering
self.gene_id2freq[row[0]] = 1
del reader
if self.union:
#take the union set
self.gene_set = Set(self.gene_id2freq.keys())
else:
#take the intersection set
for (gene_id, freq) in self.gene_id2freq.iteritems():
if freq == len(files):
#occur in all datasets
self.gene_set.add(gene_id)
sys.stderr.write("%d genes to be submitted\n"%len(self.gene_set))
#database submission
self.submit()
def submit(self):
sys.stderr.write("Database transacting...")
for gene_id in self.gene_set:
self.curs.execute("insert into gene(gene_id, gene_no) values ('%s', %d)"%\
(gene_id, self.gene_id2gene_no[gene_id] ))
if self.needcommit:
self.conn.commit()
sys.stderr.write("done.\n")
if __name__ == '__main__':
if len(sys.argv) == 1:
print __doc__
sys.exit(2)
try:
opts, args = getopt.getopt(sys.argv[1:], "hz:d:k:g:uc", ["help", "hostname=", "dbname=", "schema=", "organism=", "union", "commit"])
except:
print __doc__
sys.exit(2)
hostname = 'zhoudb'
dbname = 'graphdb'
schema = ''
organism = ''
union = 0
commit = 0
for opt, arg in opts:
if opt in ("-h", "--help"):
print __doc__
sys.exit(2)
elif opt in ("-z", "--hostname"):
hostname = arg
elif opt in ("-d", "--dbname"):
dbname = arg
elif opt in ("-k", "--schema"):
schema = arg
elif opt in ("-g", "--organism"):
organism = arg
elif opt in ("-u", "--union"):
union = 1
elif opt in ("-c", "--commit"):
commit = 1
if schema and organism and len(args) == 1:
instance = gene_table(args[0], hostname, dbname, schema, organism, union, commit)
instance.run()
else:
print __doc__
sys.exit(2)
| [
"(no author)@4ebff559-900f-0410-a220-daa186eb8a63"
] | (no author)@4ebff559-900f-0410-a220-daa186eb8a63 |
fca18013fb07368f89be1b33993994ad6bf196ec | c9ff31d952ad987ac82252cd105bb469bf9ece8b | /email_bot2.py | b90d73bb290914c742cecd4b51679393e733c32a | [] | no_license | jasdevdevelopment912/email_bot | d8b05a13cff70733e5e79dbba09f27e67c3abda7 | 18f4777051a4f8e3116a26302f2e3094942f97fc | refs/heads/main | 2023-07-24T03:42:49.715345 | 2021-09-07T06:13:17 | 2021-09-07T06:13:17 | 403,854,801 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,311 | py | import smtplib
import speech_recognition as sr
import pyttsx3
from email.message import EmailMessage
listener = sr.Recognizer()
engine = pyttsx3.init()
def talk(text):
engine.say(text)
engine.runAndWait()
def get_info():
try:
with sr.Microphone() as source:
print('listening...')
voice = listener.listen(source)
info = listener.recognize_google(voice)
print(info)
return info.lower()
except:
pass
def send_email(receiver, subject, message):
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
# Make sure to give app access in your Google account
server.login('sender_email', 'sender_password')
email = EmailMessage()
email['From'] = 'sender_email'
email['To'] = receiver
email['Subject'] = subject
email.set_content(message)
server.send_message(email)
email_list = {
'hello': 'sender_email_address',
}
def get_email_info():
talk('To Whom you want to send email')
name = get_info()
receiver = email_list[name]
print(receiver)
talk('What is the subject of your email?')
subject = get_info()
talk('Tell me the text in your email')
message = get_info()
send_email(receiver, subject, message)
get_email_info() | [
"noreply@github.com"
] | jasdevdevelopment912.noreply@github.com |
37c2f232d97c06d15de91eb5e0f0b6de7178a0a7 | 2b864a4979078e66cb8da2fbd7a8583bcaa00815 | /day_8/caser-cipher-3.py | 85a240e72c3f44b33f8dcd5beb2d4fc382ced904 | [] | no_license | yuvipatil007/python_new | e5bf2ab25b6f427a83ea6af1a40f4e4f7f924e09 | 25a4fdec6225134b1d756ff48356b48ee2b600b3 | refs/heads/master | 2023-08-22T21:43:45.207906 | 2021-10-06T19:50:46 | 2021-10-06T19:50:46 | 413,180,148 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,733 | py | alphabet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
direction = input("Type 'encode' to encrypt, type 'decode' to decrypt:\n")
text = input("Type your message:\n").lower()
shift = int(input("Type the shift number:\n"))
#TODO-1: Combine the encrypt() and decrypt() functions into a single function called caesar().
def caesar(plain_text,shift_amount,direction):
cipher_text=""
for letter in plain_text:
position = alphabet.index(letter)
if direction == "encode":
new_position = position + shift_amount
elif direction == "decode":
new_position = position - shift_amount
cipher_text += alphabet[new_position]
print(f"The {direction}d text is {cipher_text}")
# def encrypt(plain_text, shift_amount):
# cipher_text = ""
# for letter in plain_text:
# position = alphabet.index(letter)
# new_position = position + shift_amount
# cipher_text += alphabet[new_position]
# print(f"The encoded text is {cipher_text}")
# def decrypt(cipher_text, shift_amount):
# plain_text = ""
# for letter in cipher_text:
# position = alphabet.index(letter)
# new_position = position - shift_amount
# plain_text += alphabet[new_position]
# print(f"The decoded text is {plain_text}")
# if direction == "encode":
# encrypt(plain_text=text, shift_amount=shift)
# elif direction == "decode":
# decrypt(cipher_text=text, shift_amount=shift)
#TODO-2: Call the caesar() function, passing over the 'text', 'shift' and 'direction' values.
caesar(text,shift,direction) | [
"yuvipatil007@gmail.com"
] | yuvipatil007@gmail.com |
1b88da6362b35e870c5a429dd1a976a8098479b0 | 69e5d9167378621a17514ef1f802365aef6f7baf | /kits19/src/config.py | 116fca0562e8c07270d14c9559b29e4242763b21 | [] | no_license | ylochman/ML-algorithms | beb0656e39a8aaa8ec68840236199bc35d7b02b5 | 3be362b35cfadbadd9d14e52dc28c6fce5ea3cc3 | refs/heads/master | 2023-05-22T18:29:46.788626 | 2020-03-25T05:40:58 | 2020-03-25T05:40:58 | 175,162,185 | 0 | 1 | null | 2021-06-15T11:30:50 | 2019-03-12T07:59:32 | Jupyter Notebook | UTF-8 | Python | false | false | 221 | py | import torch
config = {
'CHECKPOINT': "unet.pth",
'LR': 0.001,
'L2': 0,
'DEBUG': False,
'CUDA': torch.cuda.is_available(),
'DEVICE': torch.device("cuda" if torch.cuda.is_available() else "cpu")
}
| [
"sasha.chepurnoii@gmail.com"
] | sasha.chepurnoii@gmail.com |
68dccaff016d11cce153e1b9db7affab3c07bd9b | 01ea95d7301b9ad3b84f11c8cbcfe02d00017250 | /bin/until/echarts/Line.py | 74f27f3640b6945c26b0de1fc9a04cfdff387304 | [] | no_license | windyStreet/MQSTATIC | 82962ae7a43d015dac61cb6ffce8d8853e6774df | b5a3d3862bd824b4a08b1c29436e417a9590dcab | refs/heads/master | 2020-12-02T21:13:37.952192 | 2017-07-20T10:20:14 | 2017-07-20T10:20:14 | 96,275,208 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,518 | py | #!/usr/bin/env python
# !-*- coding:utf-8 -*-
import datetime
from bin.until import Logger
from bin.until import Time
from bin.until import Mongo
from bin.until import DBCODE
from bin.until import Filter
from bin.logic import BO
from bin.until import Data
L = Logger.getInstance()
class Line(object):
# search_filter_infos = None, _step = 60, _step_count = 7, _title_text = "数据统计", _type = "line"
def __init__(self, _search_filter_infos, _title_text, _type, _step=60, _step_count=7):
self._search_filter_infos = _search_filter_infos
self._step_count = _step_count
self._step = _step
self._title_text = _title_text
self._type = _type
self.start_time = None
self.end_time = None
def getFileter(self):
pass
def getLineChartData(self):
series = []
_legend_datas = []
for key in self._search_filter_infos:
_legend_data = key
_legend_datas.append(_legend_data)
_search_filter_info = self._search_filter_infos[key]
_project = _search_filter_info['project_name']
self_collection = _search_filter_info['self_collection']
_filter_infos = _search_filter_info['filter_infos']
_statistic_type = _search_filter_info['statistic_type']
_statistic_name = _search_filter_info['statistic_name']
self.start_time = Time.getStartTime(step=self._step, step_count=self._step_count) # 获取起始时间
is_search_db = False
for _filter_info in _filter_infos:
key = _filter_info['key']
relation = _filter_info['relation']
value = _filter_info['value']
if key == 'time' and (relation == DBCODE.GT or relation == DBCODE.GTE):
self.start_time = value # 过滤条件中的起始时间
elif key == 'time' and (relation == DBCODE.LTE or relation == DBCODE.LT):
self.end_time = value # 过滤条件中的终止时间
else:
is_search_db = True
times = Time.getComputeTimes(start_time=self.start_time, end_time=self.end_time, step=self._step)
series_data = [] # y轴上的信息
if is_search_db is True: # 多条件查询
_self_filter = Filter.getInstance()
_self_filter.filter("project", _project, DBCODE.EQ)
_self_filter.filter("type", _statistic_type, DBCODE.EQ)
for _filter_info in _filter_infos:
if _filter_info['key'] != 'time':
_self_filter.filter(_filter_info['key'], _filter_info['value'], _filter_info['relation'])
for i in range(len(times) - 1):
_self_filter.filter("createtime", times[i], DBCODE.GT)
_self_filter.filter("createtime", times[i + 1], DBCODE.LTE)
_filter = _self_filter.filter_json()
count = self_collection.find(_filter).count()
series_data.append(count)
else:
# 计划分批次查询
res_collection = Mongo.getInstance(table=BO.BASE_statistic_res).getCollection()
res_filter = Filter.getInstance()
res_filter.filter("statistical_time", times[0], DBCODE.GT)
res_filter.filter("statistical_time", times[-1], DBCODE.LTE)
res_filter.filter("statistical_step", self._step, DBCODE.EQ)
res_filter.filter("statistical_type", _statistic_type, DBCODE.EQ)
res_filter.filter("statistical_project", _project, DBCODE.EQ)
if Data.isNone(_statistic_name):
_statistic_name = None
res_filter.filter("statistical_name", _statistic_name, DBCODE.EQ)
print(res_filter.filter_json())
ress = res_collection.find(res_filter.filter_json()).sort("statistical_time", -1) # 计算前半部分值
self._step_count = len(times) - 1
series_data = Data.getD4tArr(len=self._step_count, default_value=0) # 坐标轴上的值
# 先来尝试组合数据,发现数据无法组合完整时,补充数据
i = 0
for res in ress:
if i == 0 and ress.count() != (len(times) - 1) and res['statistical_time'] != times[-1]:
# 重新补录一个值
_self_filter = Filter.getInstance()
if not Data.isNone(_statistic_name):
_self_filter.filter("name", _statistic_name, DBCODE.EQ)
_self_filter.filter("project", _project, DBCODE.EQ)
_self_filter.filter("type", _statistic_type, DBCODE.EQ)
_self_filter.filter("createtime", times[-2], DBCODE.GT)
_self_filter.filter("createtime", times[-1], DBCODE.LTE)
_filter = _self_filter.filter_json()
count = self_collection.find(_filter).count()
series_data[i] = count
series_data[i + 1] = res['statistical_count']
i = i + 2
else:
series_data[i] = res['statistical_count']
i = i + 1
series_data.reverse()
xAxis_data = times[1:] # 横坐标轴信息[] 时间信息 去掉首要点
serie = {
"name": _legend_data,
"type": self._type,
"showSymbol":False,
"smooth":True,
# "stack": '总量',
"data": series_data.copy() # 坐标轴上的值
}
series.append(serie)
_result = {
"title": {
"text": self._title_text
},
"legend": {
"data": _legend_datas.copy()
},
"xAxis": {
"data": xAxis_data.copy()
},
"series": series
}
return _result
def getInsatnce(search_filter_infos=None, _title_text="数据统计", _type="line", _step=60, _step_count=7):
if search_filter_infos is None:
L.warn("init Line , not search_filter_infos par")
return None
return Line(search_filter_infos, _title_text, _type, _step, _step_count)
| [
"yq904276384@foxmail.com"
] | yq904276384@foxmail.com |
daa5151ecdd0e6255246f351a7bd9fe8a6322de9 | f323c22aa12212d2b32730fa435485dc66fafb7e | /bookseller/wsgi.py | aae45180e8fbb3ca91959823022aa61f5bda28b2 | [] | no_license | xinleima/azure-app-service-demo | 617b7519bd96e1607cbc75dc13fc2978b33d6942 | c83c834db651831b8ea1c058e85aeaebb4d5ea47 | refs/heads/master | 2023-04-26T23:14:01.094517 | 2019-11-26T11:16:58 | 2019-11-26T11:16:58 | 224,163,169 | 0 | 0 | null | 2023-04-21T20:42:37 | 2019-11-26T10:22:20 | HTML | UTF-8 | Python | false | false | 413 | py | """
WSGI config for bookseller project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'bookseller.settings')
application = get_wsgi_application()
| [
"1376260753@qq.com"
] | 1376260753@qq.com |
4442f06af05e88ccdffcc17cb294b3645525b836 | 29d1b8d1e01cda9c963b68074a4de18a67ef8c00 | /home_work_12 (2).py | 99d9ac6838eaff5c464b28f3ff3759fcacf019b8 | [
"MIT"
] | permissive | acc-cosc-1336/cosc-1336-fall-2017-stevepaul135 | fd515d3c7fdb75f408a045f4329efd6dfb783323 | 691cafe85cabd8f5829323fec77676d96c9225d4 | refs/heads/master | 2021-08-28T03:09:16.436604 | 2017-12-11T04:58:15 | 2017-12-11T04:58:15 | 103,597,729 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,082 | py | #Stephen Paul's 12th homework assignment
class Person:
def __init__(self, first_name, last_name):
self.name = first_name + ' ' + last_name
def displayPerson(self):
print("This persons name is", self.name)
class Student(Person):
def __init__(self, student_id, first_name, last_name, enroll_date):
self.student_id = student_id
self.__enrollDate = enroll_date
Person.__init__(self, first_name, last_name)
def displayStudentInfo(self):
print(self.student_id, self.name, self.__enrollDate)
class Professor(Person):
def __init__(self, professor_id, first_name, last_name, hire_date):
self.hire = hire_date
self.professor_id = professor_id
Person.__init__(self, first_name, last_name)
def displayProfessoInfo(self):
print(self.professor_id, self.name, self.hire)
class Course:
def __init__(self, course_id, title, credit_hours, professor):
self.course_id = course_id
self.title = title
self.hours = credit_hours
self.professor = professor #Should be a professor object
def displayCourseInfo(self):
print(self.course_id, self.title, self.hours, self.professor.name)
class Enrollment:
def __init__(self, enrollment_id, student, course, grade):
self.enrollment_id = enrollment_id
self.course = course #should be a course object
self.student = student #should be a student object
self.grade = grade
def displayEnrollment(self):
print(format(self.enrollment_id, '3')," ", format(self.course.title, '17'), format(self.course.hours,'12'), ' ', format(self.student.name,'24'), format(self.grade, '10'))
def changeGrade(self,new_grade):
self.grade = new_grade
class Transcript:
def __init__(self, student):
self.student_enrollments = {}
self.student = student #Should be a student object
def addEnrollments(self, enrollment): #enrollment should be an enrollment object
self.student_enrollments[enrollment.enrollment_id] = enrollment
def displayTranscript(self):
print("Name ", self.student.name)
print("Class ", "Credit Hours", "Credit Points", "Grade Points", "Grade")
creditpoint = ' '
gradepoint = ' '
Total_Credit_hours = 0
Total_Grade_Point = 0
for entry in self.student_enrollments:
if self.student_enrollments[entry].grade == 'A':
creditpoint = 4
elif self.student_enrollments[entry].grade == 'B':
creditpoint = 3
elif self.student_enrollments[entry].grade == 'C':
creditpoint = 2
elif self.student_enrollments[entry].grade == 'D':
creditpoint = 1
elif self.student_enrollments[entry].grade == 'F':
creditpoint = 0
elif self.student_enrollments[entry].grade == "I":
creditpoint = " "
elif self.student_enrollments[entry].grade == 'W':
creditpoint = " "
else:
creditpoint = " " #Case only when the Grade in an Enrollment hasn't been Updated
if creditpoint != " ":
gradepoint = creditpoint * self.student_enrollments[entry].course.hours
Total_Credit_hours += self.student_enrollments[entry].course.hours
Total_Grade_Point += gradepoint
print(format(self.student_enrollments[entry].course.title, '15'), format(self.student_enrollments[entry].course.hours, "11"), format(creditpoint, '12'), format(gradepoint, '13')," ", format(self.student_enrollments[entry].grade,'5'))
else:
gradepoint = " "
print(format(self.student_enrollments[entry].course.title, '15'), format(self.student_enrollments[entry].course.hours, "11"), format(creditpoint, '12'), format(gradepoint, '13')," ", format(self.student_enrollments[entry].grade,'5'))
print('-' * 60)
print(format(Total_Credit_hours, "26"), format(Total_Grade_Point, "25"))
print( "GPA :", Total_Grade_Point/Total_Credit_hours)
class Gradebook:
def __init__(self):
self.students = {}
#add to student dictionary
s = Student(1, "Carson", "Alexander", "09012005")
self.students[s.student_id] = s
s = Student(2, "Meredith", "Alonso", "09022002")
self.students[s.student_id] = s
s = Student(3, "Arturo", "Anand", "09032003")
self.students[s.student_id] = s
s = Student(4, "Gytis", "Barzdukas", "09012001")
self.students[s.student_id] = s
s = Student(5, "Peggy", "Justice", "09012001")
self.students[s.student_id] = s
s = Student(6, "Laura", "Norman", "09012003")
self.students[s.student_id] = s
s = Student(7, "Nino", "Olivetto", "09012005")
self.students[s.student_id] = s
self.professors = {}
#professor_id first_name last_name hire_date
p = Professor(1, "Kim", "Abercrombie", "1995-03-11")
self.professors[p.professor_id] = p
p = Professor(2, "Fadi", "Fakhouri", "2002-07-06")
self.professors[p.professor_id] = p
p = Professor(3, "Roger", "Harui", "1998-07-01")
self.professors[p.professor_id] = p
p = Professor(4, "Candace", "Kapoor", "2001-01-15")
self.professors[p.professor_id] = p
p = Professor(5, "Roger", "Zheng", "2004-02-12")
self.professors[p.professor_id] = p
self.courses = {}
#add to course dictionary
c = Course(1050, "Chemistry", 3, self.professors[1])
self.courses[c.course_id] = c
c = Course(4022, "Microeconomics", 3, self.professors[2])
self.courses[c.course_id] = c
c = Course(4041, "Macroeconomics", 3, self.professors[3])
self.courses[c.course_id] = c
c = Course(1045, "Calculus", 4, self.professors[4])
self.courses[c.course_id] = c
c = Course(3141, "Trigonometry", 4, self.professors[4])
self.courses[c.course_id] = c
c = Course(2021, "Composition", 3, self.professors[5])
self.courses[c.course_id] = c
c = Course(2042, "Literature", 4, self.professors[5])
self.courses[c.course_id] = c
self.enrollments = {}
#add enrolled students into courses
enroll_id = 11050 #combine student id + chemistry id
enrollment = Enrollment(enroll_id, self.students[1], self.courses[1050], " ")
self.enrollments[enroll_id] = enrollment
enroll_id = 14022 #combine student id + chemistry id
enrollment = Enrollment(enroll_id, self.students[1], self.courses[4022], " ")
self.enrollments[enroll_id] = enrollment
enroll_id = 14041 #combine student id + chemistry id
enrollment = Enrollment(enroll_id, self.students[1], self.courses[4041], " ")
self.enrollments[enroll_id] = enrollment
enroll_id = 21045 #combine student id + chemistry id
enrollment = Enrollment(enroll_id, self.students[2], self.courses[1045], " ")
self.enrollments[enroll_id] = enrollment
enroll_id = 23141 #combine student id + chemistry id
enrollment = Enrollment(enroll_id, self.students[2], self.courses[3141], " ")
self.enrollments[enroll_id] = enrollment
enroll_id = 22021 #combine student id + chemistry id
enrollment = Enrollment(enroll_id, self.students[2], self.courses[4041], " ")
self.enrollments[enroll_id] = enrollment
enroll_id = 31050 #combine student id + chemistry id
enrollment = Enrollment(enroll_id, self.students[3], self.courses[1050], " ")
self.enrollments[enroll_id] = enrollment
enroll_id = 41050 #combine student id + chemistry id
enrollment = Enrollment(enroll_id, self.students[4], self.courses[1050]," ")
self.enrollments[enroll_id] = enrollment
enroll_id = 44022 #combine student id + chemistry id
enrollment = Enrollment(enroll_id, self.students[4], self.courses[4022], " ")
self.enrollments[enroll_id] = enrollment
enroll_id = 54041 #combine student id + chemistry id
enrollment = Enrollment(enroll_id, self.students[5], self.courses[2021], " ")
self.enrollments[enroll_id] = enrollment
enroll_id = 61045 #combine student id + chemistry id
enrollment = Enrollment(enroll_id, self.students[6], self.courses[1045], " ")
self.enrollments[enroll_id] = enrollment
enroll_id = 73141 #combine student id + chemistry id
enrollment = Enrollment(enroll_id, self.students[7], self.courses[3141], " ")
self.enrollments[enroll_id] = enrollment
| [
"noreply@github.com"
] | acc-cosc-1336.noreply@github.com |
d12baed8bb90fd2eb5fc8f373ff621f866f1a512 | 1f3bbec6cce4c8d5388fdc978c61767112e286ef | /tests/unit/plugins/test_stackoverflow.py | 8604d6cd69dc601600aed40a767d3be5c54c2ffd | [
"MIT"
] | permissive | igorsobreira/eizzek | 615caae8c3a998084273e5b5d5743708d96bb95d | 2144e859f56130cbfa05a2a6604d3255ac81dbbf | refs/heads/master | 2021-01-02T23:14:03.834318 | 2010-09-21T04:25:07 | 2010-09-21T04:25:07 | 754,462 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,219 | py | import os.path
import unittest
from eizzek.plugins.stackoverflow import build_response, QuestionsParser, TaggedQuestionsParser
from eizzek import registry, PluginResolver
class RegexTestCase(unittest.TestCase):
def setUp(self):
# unregister the original plugin
self.stackoverflow_regex, self.stackoverflow_function = registry.plugins['stackoverflow']
registry.unregister('stackoverflow')
# register a mock, just to verify if the regex is working
self.called = False
self.tag = None
self.limit = 50
def stackoverflow_mock(conn, limit=None, tag=None, **kw):
self.called = True
self.tag = tag
self.limit = int(limit) if limit else 50
registry.register('stackoverflow', self.stackoverflow_regex, stackoverflow_mock)
self.resolver = PluginResolver()
def tearDown(self):
# undo de mock
registry.unregister('stackoverflow')
registry.register('stackoverflow', self.stackoverflow_regex, self.stackoverflow_function)
def test_simple(self):
self.resolver.resolve('stackoverflow', {})
assert self.called
assert self.tag is None
assert 50 == self.limit
def test_tagged(self):
self.resolver.resolve('stackoverflow python', {})
assert self.called
assert 'python' == self.tag
assert 50 == self.limit
def test_limit(self):
self.resolver.resolve('stackoverflow 10', {})
assert self.called
assert self.tag is None
assert 10 == self.limit
def test_tagged_limit(self):
self.resolver.resolve('stackoverflow 15 python', {})
assert self.called
assert 'python' == self.tag
assert 15 == self.limit
def test_different_tags(self):
tags = ('c++', 'c#', 'regular-language', 'asp.net', '.net', 'actionscript-3')
for tag in tags:
self.resolver.resolve('stackoverflow ' + tag, {})
assert self.called
assert tag == self.tag
self.called, self.tag = False, None
class ParseTestCase(unittest.TestCase):
here = os.path.realpath(os.path.dirname(__file__))
python_tag_page = os.path.join(here, 'stackoverflow_python_tag.html')
index_page = os.path.join(here, 'stackoverflow_top_questions.html')
def setUp(self):
with open(self.index_page) as file_obj:
self.index_data = QuestionsParser().parse( file_obj.read() )
with open(self.python_tag_page) as file_obj:
self.tagged_data = TaggedQuestionsParser().parse( file_obj.read() )
def test_read_all_elements(self):
assert 50 == len(self.tagged_data)
assert 96 == len(self.index_data)
def test_read_limited_elements(self):
parser = QuestionsParser()
with open(self.index_page) as file_obj:
data = parser.parse( file_obj.read(), limit=10 )
assert 10 == len(data)
def test_tagged_question_attributes(self):
question = self.tagged_data[0]
assert u'Python Rpy R data processing optimization' == question['summary']
assert u'http://stackoverflow.com/questions/3242670/python-rpy-r-data-processing-optimization' == question['link']
assert [u'python', u'r', u'rpy2'] == question['tags']
assert '0' == question['votes']
assert '0' == question['answers']
assert '0' == question['views']
def test_index_question_attributes(self):
question = self.index_data[0]
assert u'How to multiply two big big numbers' == question['summary']
assert u'http://stackoverflow.com/questions/3275986/how-to-multiply-two-big-big-numbers' == question['link']
assert [u'java', u'arrays', u'homework', u'problem', u'multiplication'] == question['tags']
assert '6' == question['votes']
assert '7' == question['answers']
assert '251' == question['views']
class BuildResponseTestCase(unittest.TestCase):
def setUp(self):
self.questions = [
{
'summary': 'Is it possible to mix generator and a recursive function ?',
'link': 'http://stackoverflow.com/questions/3276956/pyhon-is-it-possible-to-mix-generator-and-a-recursive-function',
'tags': ['python','recursive'],
'votes': '1',
'answers': '3',
'views': '20',
},
{
'summary': 'Set Django ModelForm visible fields at runtime?',
'link': 'http://stackoverflow.com/questions/3276896/set-django-modelform-visible-fields-at-runtime',
'tags': ['python','django'],
'votes': '4',
'answers': '2',
'views': '10',
},
]
def test_no_tag(self):
response = build_response(self.questions)
data = response.split('\n\n')
assert 3 == len(data)
header, question1, question2 = data
assert u'Stack Overflow: Top Questions' == header
line1, line2, line3 = question1.split('\n')
assert 'Is it possible to mix generator and a recursive function ?' == line1
assert 'http://stackoverflow.com/questions/3276956/pyhon-is-it-possible-to-mix-generator-and-a-recursive-function' == line2
assert 'Tags: python, recursive. (votes: 1, answers: 3, views: 20)' == line3
line1, line2, line3 = question2.split('\n')
assert 'Set Django ModelForm visible fields at runtime?' == line1
assert 'http://stackoverflow.com/questions/3276896/set-django-modelform-visible-fields-at-runtime' == line2
assert 'Tags: python, django. (votes: 4, answers: 2, views: 10)' == line3
def test_tagged(self):
response = build_response(self.questions, tag='recursive')
data = response.split('\n\n')
assert 2 == len(data)
assert u'Stack Overflow: recursive tag' == data[0]
| [
"igor@igorsobreira.com"
] | igor@igorsobreira.com |
de34a902c1cf0f225a3a4a48862d4adbda05a125 | b628c03aeaa7f77c2044ed4f65b92ca68dd48f6c | /absoluteCLF/src/util.py | be1604da231b2f1af13036ed5161bcd25f19dd84 | [
"MIT"
] | permissive | horoiwa/VisionML | 75ff8907cd1fe7b4fcaafd8d9ad6687a7284fef7 | 95fa454a4c309f5e985dff1d067dd8341d260a3c | refs/heads/master | 2020-08-31T22:20:22.618305 | 2019-11-02T15:20:20 | 2019-11-02T15:20:20 | 218,785,495 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,398 | py | import os
import shutil
def cleanup(mode='all'):
folders = ['__dataset__', '__checkpoints__', 'image_test',
'config_test']
for folder in folders:
if os.path.exists(folder):
shutil.rmtree(folder)
def folder_check():
if 'train' not in os.listdir('images'):
raise Exception("Error: train folder not exists")
elif 'valid' not in os.listdir('images'):
raise Exception("Error: valid folder not exists")
elif 'test' not in os.listdir('images'):
raise Exception("Error: test folder not exists")
dirs_train = os.listdir('images/train')
dirs_valid = os.listdir('images/valid')
if dirs_train != dirs_valid:
raise Exception("Error: train and valid not consisted")
def get_uniquename(name, n):
uniquename = name + str(n) + '.hdf5'
if os.path.exists(uniquename):
uniquename = get_uniquename(name, n+1)
else:
return name + str(n)
def get_latestname(name, n):
currentname = name + str(n) + '.hdf5'
nextname = name + str(n+1) + '.hdf5'
if os.path.exists(nextname):
get_latestname(name, n+1)
elif os.path.exists(currentname):
return currentname
else:
return None
def make_defaultfolder():
os.makedirs('images/test')
os.makedirs('images/train')
os.makedirs('images/valid')
if __name__ == '__main__':
folder_check()
| [
"horoiwa195@gmail.com"
] | horoiwa195@gmail.com |
90d6f660fc707b3274db7f0914067230d68bc7f1 | cc3553618de66276b7bd72175c7f975cf073ce89 | /Examples/scripts/binning_2.py | 9446c4b4205c8d723bf4ca669713ebf34ba53ff7 | [
"MIT"
] | permissive | brianpm/hacknostics | 4c221cb1c2beafc759ee39dcafa143071dd4e7fe | a792eae6f50ef3efd4bea646bac82963d07c23a1 | refs/heads/master | 2022-08-07T06:42:33.019906 | 2022-06-22T23:11:22 | 2022-06-22T23:11:22 | 190,219,024 | 3 | 2 | MIT | 2020-02-21T04:34:35 | 2019-06-04T14:33:50 | Jupyter Notebook | UTF-8 | Python | false | false | 5,013 | py | ;***************************************************************
; binning_2.ncl
;
; Concepts illustrated:
; - Create an array that spans the desired area
; - Read data [ here, create bogus data]
; - Loop over data and count instances of occurence
; - Plot the data
;
;***************************************************************
;
; These files are loaded by default in NCL V6.2.0 and newer
; load "$NCARG_ROOT/lib/ncarg/nclscripts/csm/gsn_code.ncl"
; load "$NCARG_ROOT/lib/ncarg/nclscripts/csm/gsn_csm.ncl"
;**************************************************************************
;--- Create desired grid. Here, 2x2 but can be (say) 1x3 if dlat=1, dlon= 3
;**************************************************************************
latS = 0
latN = 70
lonW = -120
lonE = 0
dlat = 2.0
dlon = 2.0
nlat = toint((latN-latS)/dlat) + 1
mlon = toint((lonE-lonW)/dlon) + 1
lat = fspan(latS, latN, nlat)
lon = fspan(lonW, lonE, mlon)
lat@units = "degrees_north"
lon@units = "degrees_east"
count = new( (/nlat,mlon/), "float", 1e20)
count!0 = "lat"
count!1 = "lon"
count&lat = lat
count&lon = lon
valavg = count
;********************************************************************
;--- Read data ===> Here, create bogus data
;********************************************************************
clat = random_normal( 23, 10, 10000)
clon = random_normal(-90, 10, 10000)
cval = random_normal( 75, 20, 10000)
clon = where(clon.lt.lonW, lonW, clon) ; deal with bogus outliers
clon = where(clon.gt.lonE, lonE, clon)
clat = where(clat.lt.latS, latS, clat)
clat = where(clat.gt.latN, latN, clat)
;********************************************************************
;--- Bin count and sum; This assumes a simple rectilinear grid
;********************************************************************
count = 0
valavg = 0
npts = dimsizes(clat)
do n=0,npts-1
if (clat(n).ge.latS .and. clat(n).le.latN .and. \
clon(n).ge.lonW .and. clon(n).le.lonE .and. \
.not.ismissing(cval(n)) ) then
jl = toint((clat(n)-latS)/dlat)
il = toint((clon(n)-lonW)/dlon)
count(jl,il) = count(jl,il) + 1
valavg(jl,il) = valavg(jl,il) + cval(n)
end if
end do
;count@long_name = "Occurrence Count"
;count@units = ""
printVarSummary(count)
print("count: min="+min(count)+" max="+max(count))
count = where(count.eq.0, count@_FillValue,count) ; don't divide by 0
;********************************************************************
;--- Average
;********************************************************************
valavg = valavg/count
;valavg@long_name = "..."
;valavg@units = "..."
printVarSummary(valavg)
print("valavg: min="+min(valavg)+" max="+max(valavg))
;********************************************************************
;--- Bin frequency (%)
;********************************************************************
freq = count
freq = (count/npts)*100
;freq@long_name = "frequency"
;freq@units = "%"
printVarSummary(freq)
print("freq: min="+min(freq)+" max="+max(freq))
;************************************************
; create plot
;************************************************
freq = where(freq .eq.0, freq@_FillValue, freq) ;
plot = new (3, "graphic")
wks = gsn_open_wks("png","binning") ; send graphics to PNG file
res = True ; plot mods desired
res@gsnAddCyclic = False
res@gsnDraw = False
res@gsnFrame = False
res@cnFillOn = True ; turn on color fill
res@cnFillPalette = "BlAqGrYeOrRe" ; set color map
res@cnFillMode = "RasterFill" ; Raster Mode
res@cnLinesOn = False ; turn of contour lines
res@lbOrientation = "vertical"
res@mpMinLatF = latS
res@mpMaxLatF = latN
res@mpMinLonF = lonW
res@mpMaxLonF = lonE
res@mpCenterLonF = (lonE+lonW)*0.5
res@mpGridAndLimbOn = True
res@mpGridLineDashPattern = 2 ; Dashed lines
res@mpGridLatSpacingF = 5.0
res@mpGridLonSpacingF = 10.0
res@cnLevelSpacingF = 1.0 ; contour spacing
res@gsnCenterString = "Occurrence Count"
plot(0) = gsn_csm_contour_map(wks,count, res)
res@cnLevelSpacingF = 0.05 ; contour spacing
res@gsnCenterString = "Frequency (%)"
plot(1) = gsn_csm_contour_map(wks,freq , res)
res@cnLevelSpacingF = 5.0 ; contour spacing
res@gsnCenterString = "Average"
plot(2) = gsn_csm_contour_map(wks,valavg,res)
resP = True ; modify the panel plot
resP@gsnMaximize = True
;;resP@gsnPanelMainString = "A common title"
gsn_panel(wks,plot,(/3,1/),resP) ; now draw as one plot
| [
"brianpm@ucar.edu"
] | brianpm@ucar.edu |
34f47be9ef55d3d72a7abc700bc1d17d771fd10e | f82e67dd5f496d9e6d42b4fad4fb92b6bfb7bf3e | /scripts/client/gui/scaleform/daapi/view/meta/serverstatsmeta.py | 4a28fbbfafb863429e1181c69015f9b805026cc8 | [] | no_license | webiumsk/WOT0.10.0 | 4e4413ed4e7b00e22fb85d25fdae9400cbb4e76b | a84f536c73f86d9e8fab559e97f88f99f2ad7e95 | refs/heads/master | 2021-01-09T21:55:00.662437 | 2015-10-23T20:46:45 | 2015-10-23T20:46:45 | 44,835,654 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,339 | py | # Embedded file name: scripts/client/gui/Scaleform/daapi/view/meta/ServerStatsMeta.py
from gui.Scaleform.framework.entities.BaseDAAPIComponent import BaseDAAPIComponent
class ServerStatsMeta(BaseDAAPIComponent):
def getServers(self):
self._printOverrideError('getServers')
def relogin(self, id):
self._printOverrideError('relogin')
def isCSISUpdateOnRequest(self):
self._printOverrideError('isCSISUpdateOnRequest')
def startListenCsisUpdate(self, startListenCsis):
self._printOverrideError('startListenCsisUpdate')
def as_setPeripheryChangingS(self, isChanged):
if self._isDAAPIInited():
return self.flashObject.as_setPeripheryChanging(isChanged)
def as_setServersListS(self, servers):
if self._isDAAPIInited():
return self.flashObject.as_setServersList(servers)
def as_disableRoamingDDS(self, disable):
if self._isDAAPIInited():
return self.flashObject.as_disableRoamingDD(disable)
def as_setServerStatsS(self, stats, tooltipType):
if self._isDAAPIInited():
return self.flashObject.as_setServerStats(stats, tooltipType)
def as_setServerStatsInfoS(self, tooltipFullData):
if self._isDAAPIInited():
return self.flashObject.as_setServerStatsInfo(tooltipFullData)
| [
"info@webium.sk"
] | info@webium.sk |
02a29652ff4002ff213de2e6753b4912bb85ea9e | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_392/ch4_2019_06_05_14_33_33_171155.py | cdcdf2b3d67996003e841afaca8af0a2d1c0ff25 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 158 | py | def classifica_idade(x):
if x <= 11:
return'crianca'
elif x >= 12 and x <= 17:
return 'adolescente'
else:
return 'adulto'
| [
"you@example.com"
] | you@example.com |
10d9141cdba16b8dd41e1990f2c5db77784d943c | b5c64bde359a14e21b785b85fc26ae740044cba8 | /uploads/settings.py | d77dafd24d517f42d1f2bcf538796b6a96aa77c2 | [
"MIT"
] | permissive | werwlo/Horoskop | 8ef6c739fed94282b1c39681db301e2f6871f468 | 361f448828d6d01b26be817fb82f7f793782d64d | refs/heads/master | 2022-04-29T18:21:21.465464 | 2020-01-20T12:17:22 | 2020-01-20T12:17:22 | 222,987,759 | 0 | 1 | MIT | 2022-04-22T22:45:12 | 2019-11-20T17:07:08 | HTML | UTF-8 | Python | false | false | 3,497 | py | """
Django settings for uploads project.
Generated by 'django-admin startproject' using Django 1.9.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'e#-^aknk(5k)ej6rh#h$i(%h(m9)-j*lwrc_1dxnk=a@-mixlt'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'uploads.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'uploads/templates'),],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.media',
],
},
},
]
WSGI_APPLICATION = 'uploads.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = (os.path.join(BASE_DIR, 'static'), )
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
| [
"noreply@github.com"
] | werwlo.noreply@github.com |
e7317c8785753725ec1d2975e007271c55247b94 | 179806a654a1fae15ee9af53d79d14874cd86ccc | /class/polymorphism.py | e7d3f29d45cbe956eeab5fec150bd05bb3bf14e6 | [] | no_license | Suhaiz/first | 6400cd26eb0e703482847133cf6bcf6fa0cb1df3 | 253f86cc5438b4d2963588f7e01d45ac068e1f0e | refs/heads/master | 2020-11-29T20:02:51.519735 | 2019-12-26T06:21:04 | 2019-12-26T06:21:04 | 230,205,240 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 647 | py | # many forms...........same name different functionalities
#
# 1.method overloading
# implements in the same class
# so there will be two methods with the same name inside a class but different parameters
# doesnt support directly in python
# 2.memaththod overiding
# inheritance is mandatory
# class parent:
# def phone(self):
# print("i have nokia 1166")
#
# class child:
# pass
#
# o=child()
# o.phone()
# example for method overiding
class parent:
def phone(self):
print("i have nokia 1166")
class child:
def phone(self):
print("i have samsung")
o=child()
o.phone()
# 3.operator overloading | [
"suhaizalikb@gmail.com"
] | suhaizalikb@gmail.com |
01dd6b0b9583bab5ad28d39492df06c720c411a8 | a85cbde1520cc5299eae7220a9739d948ce5dea1 | /ERAN/tf_verify/deeppoly_nodes.py | 1ae03b1fe9de2d44b3f74b958c22c2f40cf2eda5 | [] | no_license | nikos-kekatos/SapoForNN | 8c819f84604972cbebad771794205f80dc8b94c2 | 44a38cd36d442e7e555c2babc59dd91b8337a0ea | refs/heads/main | 2023-01-20T05:20:08.185279 | 2020-11-30T09:11:19 | 2020-11-30T09:11:19 | 304,479,761 | 0 | 0 | null | 2020-10-16T00:34:25 | 2020-10-16T00:34:24 | null | UTF-8 | Python | false | false | 24,862 | py | '''
@author: Adrian Hoffmann
'''
import numpy as np
from config import config, Device
if config.device == Device.CPU:
from fppoly import *
else:
from fppoly_gpu import *
from elina_interval import *
from elina_abstract0 import *
from elina_manager import *
from ai_milp import *
from functools import reduce
from refine_activation import *
def calc_bounds(man, element, nn, nlb, nub, relu_groups, is_refine_layer = False, destroy=True, use_krelu = False):
layerno = nn.calc_layerno()
bounds = box_for_layer(man, element, layerno)
num_neurons = get_num_neurons_in_layer(man, element, layerno)
itv = [bounds[i] for i in range(num_neurons)]
lbi = [x.contents.inf.contents.val.dbl for x in itv]
ubi = [x.contents.sup.contents.val.dbl for x in itv]
# if layerno==0:
# pesi = np.copy(nn.weights[0])
# bias = np.copy(nn.biases[0])
# a1 = np.copy(nn.specLB)
# b1 = np.copy(nn.specUB)
# numero = len(a1)
# bias2 = np.transpose(np.dot(pesi, np.transpose(a1))) + bias
# temp = np.copy(pesi)
# for i in range(0,num_neurons):
# temp[i,:] = np.multiply(pesi[i,:],b1-a1)
# zero = np.zeros((num_neurons,numero))
# pesi2 = np.minimum(temp,zero)
# uno = np.transpose(np.ones(numero))
# elelb = np.dot(pesi2, uno)+np.transpose(bias2)
# prova = np.sum(np.transpose(elelb)-lbi)
# elif layerno==2:
# pesi = np.copy(nn.weights[1])
# bias = np.copy(nn.biases[1])
# a1 = np.copy(nlb[1])
# b1 = np.copy(nub[1])
# numero = len(a1)
# bias2 = np.transpose(np.dot(pesi, np.transpose(a1))) + bias
# temp = np.copy(pesi)
# for i in range(0, num_neurons):
# temp[i, :] = np.multiply(pesi[i, :], b1 - a1)
# zero = np.zeros((num_neurons, numero))
# pesi2 = np.minimum(temp, zero)
# pesi3 = np.maximum(temp, zero)
# uno = np.transpose(np.ones(numero))
# elelb = np.dot(pesi2, uno) + np.transpose(bias2)
# eleub = np.dot(pesi3, uno) + np.transpose(bias2)
# provalb = np.sum(np.transpose(elelb) - lbi)
# provaub = np.sum(np.transpose(eleub) - ubi)
# elif layerno==1:
# elelb2 = np.maximum(nlb,np.zeros(len(nlb)))
# prova2 = np.sum(np.transpose(elelb2)-lbi)
if is_refine_layer:
nlb.append(lbi)
nub.append(ubi)
if destroy:
elina_interval_array_free(bounds,num_neurons)
return lbi, ubi
return layerno, bounds, num_neurons, lbi, ubi
def add_input_output_information_deeppoly(self, input_names, output_name, output_shape):
"""
sets for an object the three fields:
- self.output_length
- self.input_names
- self.output_name
which will mainly be used by the Optimizer, but can also be used by the Nodes itself
Arguments
---------
self : Object
will be a DeepzonoNode, but could be any object
input_names : iterable
iterable of strings, each one being the name of another Deepzono-Node
output_name : str
name of self
output_shape : iterable
iterable of ints with the shape of the output of this node
Return
------
None
"""
if len(output_shape)==4:
self.output_length = reduce((lambda x, y: x*y), output_shape[1:len(output_shape)])
else:
self.output_length = reduce((lambda x, y: x*y), output_shape[0:len(output_shape)])
self.input_names = input_names
self.output_name = output_name
class DeeppolyInput:
def __init__(self, specLB, specUB, input_names, output_name, output_shape,
lexpr_weights=None, lexpr_cst=None, lexpr_dim=None,
uexpr_weights=None, uexpr_cst=None, uexpr_dim=None,
expr_size=0):
"""
Arguments
---------
specLB : numpy.ndarray
1D array with the lower bound of the input spec
specUB : numpy.ndarray
1D array with the upper bound of the input spec
lexpr_weights: numpy.ndarray
ndarray of doubles with coefficients of lower polyhedral expressions
lexpr_cst: numpy.ndarray
ndarray of doubles with the constants of lower polyhedral expressions
lexpr_dim: numpy.ndarray
ndarray of unsigned int with the indexes of pixels from the original image for the lower polyhedral expressions
uexpr_weights: numpy.ndarray
ndarray of doubles with coefficients of upper polyhedral expressions
uexpr_cst: numpy.ndarray
ndarray of doubles with the constants of upper polyhedral expressions
uexpr_dim: numpy.ndarray
ndarray of unsigned int with the indexes of pixels from the original image for the upper polyhedral expressions
expr_size: numpy.ndarray
unsigned int with the sizes of polyhedral expressions
"""
self.specLB = np.ascontiguousarray(specLB, dtype=np.double)
self.specUB = np.ascontiguousarray(specUB, dtype=np.double)
if lexpr_weights is not None:
self.lexpr_weights = np.ascontiguousarray(lexpr_weights, dtype=np.double)
else:
self.lexpr_weights = None
if lexpr_cst is not None:
self.lexpr_cst = np.ascontiguousarray(lexpr_cst, dtype=np.double)
else:
self.lexpr_cst = None
if lexpr_dim is not None:
self.lexpr_dim = np.ascontiguousarray(lexpr_dim, dtype=np.uintp)
else:
self.lexpr_dim = None
if uexpr_weights is not None:
self.uexpr_weights = np.ascontiguousarray(uexpr_weights, dtype=np.double)
else:
self.uexpr_weights = None
if uexpr_cst is not None:
self.uexpr_cst = np.ascontiguousarray(uexpr_cst, dtype=np.double)
else:
self.uexpr_cst = None
if uexpr_dim is not None:
self.uexpr_dim = np.ascontiguousarray(lexpr_dim, dtype=np.uintp)
else:
self.uexpr_dim = None
self.expr_size = expr_size
add_input_output_information_deeppoly(self, input_names, output_name, output_shape)
def transformer(self, man):
"""
creates an abstract element from the input spec
Arguments
---------
man : ElinaManagerPtr
inside this manager the abstract element will be created
Return
------
output : ElinaAbstract0Ptr
new abstract element representing the element specified by self.specLB and self.specUB
"""
if self.expr_size == 0:
return fppoly_from_network_input(man, 0, len(self.specLB), self.specLB, self.specUB)
else:
return fppoly_from_network_input_poly(man, 0, len(self.specLB), self.specLB, self.specUB,
self.lexpr_weights, self.lexpr_cst, self.lexpr_dim,
self.uexpr_weights, self.uexpr_cst, self.uexpr_dim, self.expr_size)
class DeeppolyNode:
"""
Parent class for all the classes that implement fully connected layers
"""
def __init__(self, weights, bias, input_names, output_name, output_shape):
"""
Arguments
---------
weights : numpy.ndarray
matrix of the fully connected layer (must be 2D)
bias : numpy.ndarray
bias of the fully connected layer
"""
self.weights = np.ascontiguousarray(weights, dtype=np.double)
self.bias = np.ascontiguousarray(bias, dtype=np.double)
add_input_output_information_deeppoly(self, input_names, output_name, output_shape)
def get_arguments(self):
"""
facilitates putting together all the arguments for the transformers in the child classes
Return
------
output : tuple
the four entries are pointers to the rows of the matrix, the bias, the length of the output, and the length of the input
"""
xpp = self.get_xpp()
return xpp, self.bias, self.weights.shape[0], self.weights.shape[1], self.predecessors, len(self.predecessors)
def get_xpp(self):
"""
helper function to get pointers to the rows of self.weights.
Return
------
output : numpy.ndarray
pointers to the rows of the matrix
"""
return (self.weights.__array_interface__['data'][0]+ np.arange(self.weights.shape[0])*self.weights.strides[0]).astype(np.uintp)
class DeeppolyFCNode(DeeppolyNode):
def transformer(self, nn, man, element, nlb, nub, relu_groups, refine, timeout_lp, timeout_milp, use_default_heuristic, testing):
"""
transformer for the first layer of a neural network, if that first layer is fully connected with relu
Arguments
---------
man : ElinaManagerPtr
man to which element belongs
element : ElinaAbstract0Ptr
abstract element onto which the transformer gets applied
Return
------
output : ElinaAbstract0Ptr
abstract element after the transformer
"""
handle_fully_connected_layer(man, element, *self.get_arguments())
# Elina calculates the new box domain taking into account all the constraints
calc_bounds(man, element, nn, nlb, nub, relu_groups, is_refine_layer=True, use_krelu=refine)
nn.ffn_counter+=1
if testing:
return element, nlb[-1], nub[-1]
return element
class DeeppolyNonlinearity:
def __init__(self, input_names, output_name, output_shape):
"""
Arguments
---------
input_names : iterable
iterable with the name of the vector you want to apply the non-linearity to
output_name : str
name of this node's output
output_shape : iterable
iterable of ints with the shape of the output of this node
"""
add_input_output_information_deeppoly(self, input_names, output_name, output_shape)
def get_arguments(self, man, element):
"""
used by the children of this class to easily get the inputs for their transformers
Arguments
---------
man : ElinaManagerPtr
man to which element belongs
element : ElinaAbstract0Ptr
abstract element onto which the transformer gets applied
Return
------
output : tuple
arguments for the non-linearity transformers like Relu or Sigmoid
"""
length = self.output_length
return man, element, length, self.predecessors, len(self.predecessors)
class DeeppolyReluNode(DeeppolyNonlinearity):
def transformer(self, nn, man, element, nlb, nub, relu_groups, refine, timeout_lp, timeout_milp, use_default_heuristic, testing):
"""
transforms element with handle_relu_layer
Arguments
---------
man : ElinaManagerPtr
man to which element belongs
element : ElinaAbstract0Ptr
abstract element onto which the transformer gets applied
Return
------
output : ElinaAbstract0Ptr
abstract element after the transformer
"""
length = self.output_length
if refine:
refine_activation_with_solver_bounds(nn, self, man, element, nlb, nub, relu_groups, timeout_lp, timeout_milp, use_default_heuristic, 'deeppoly')
else:
handle_relu_layer(*self.get_arguments(man, element), use_default_heuristic)
# From nlb,nub is just applying the relu function (max(x,0))
calc_bounds(man, element, nn, nlb, nub, relu_groups, is_refine_layer=True, use_krelu=False)
nn.activation_counter+=1
if testing:
return element, nlb[-1], nub[-1]
return element
class DeeppolySigmoidNode(DeeppolyNonlinearity):
def transformer(self, nn, man, element, nlb, nub, relu_groups, refine, timeout_lp, timeout_milp, use_default_heuristic, testing):
"""
transforms element with handle_sigmoid_layer
Arguments
---------
man : ElinaManagerPtr
man to which element belongs
element : ElinaAbstract0Ptr
abstract element onto which the transformer gets applied
Return
------
output : ElinaAbstract0Ptr
abstract element after the transformer
"""
length = self.output_length
if refine:
refine_activation_with_solver_bounds(nn, self, man, element, nlb, nub, relu_groups, timeout_lp, timeout_milp, use_default_heuristic, 'deeppoly')
else:
handle_sigmoid_layer(*self.get_arguments(man, element))
calc_bounds(man, element, nn, nlb, nub, relu_groups, is_refine_layer=True, use_krelu=refine)
nn.activation_counter+=1
if testing:
return element, nlb[-1], nub[-1]
return element
class DeeppolyTanhNode(DeeppolyNonlinearity):
def transformer(self, nn, man, element, nlb, nub, relu_groups, refine, timeout_lp, timeout_milp, use_default_heuristic, testing):
"""
transforms element with handle_tanh_layer
Arguments
---------
man : ElinaManagerPtr
man to which element belongs
element : ElinaAbstract0Ptr
abstract element onto which the transformer gets applied
Return
------
output : ElinaAbstract0Ptr
abstract element after the transformer
"""
length = self.output_length
if refine:
refine_activation_with_solver_bounds(nn, self, man, element, nlb, nub, relu_groups, timeout_lp, timeout_milp, use_default_heuristic, 'deeppoly')
else:
handle_tanh_layer(*self.get_arguments(man, element))
calc_bounds(man, element, nn, nlb, nub, relu_groups, is_refine_layer=True, use_krelu=refine)
nn.activation_counter+=1
if testing:
return element, nlb[-1], nub[-1]
return element
class DeeppolyConv2dNode:
def __init__(self, filters, strides, pad_top, pad_left, bias, image_shape, input_names, output_name, output_shape):
"""
collects the information needed for the conv_handle_intermediate_relu_layer transformer and brings it into the required shape
Arguments
---------
filters : numpy.ndarray
the actual 4D filter of the convolutional layer
strides : numpy.ndarray
1D with to elements, stride in height and width direction
bias : numpy.ndarray
the bias of the layer
image_shape : numpy.ndarray
1D array of ints with 3 entries [height, width, channels] representing the shape of the of the image that is passed to the conv-layer
"""
self.image_shape = np.ascontiguousarray(image_shape, dtype=np.uintp)
self.filters = np.ascontiguousarray(filters, dtype=np.double)
self.strides = np.ascontiguousarray(strides, dtype=np.uintp)
self.bias = np.ascontiguousarray(bias, dtype=np.double)
self.out_size = (c_size_t * 3)(output_shape[1], output_shape[2], output_shape[3])
self.pad_top = pad_top
self.pad_left = pad_left
add_input_output_information_deeppoly(self, input_names, output_name, output_shape)
def get_arguments(self):
"""
facilitates putting together all the arguments for the transformers in the child classes
Return
------
output : tuple
the 5 entries are:
1. the filter (numpy.ndarray)
2. the bias (numpy.ndarray)
3. the image_shape (numpy.ndarray)
4. length of a side of the square kernel (int)
5. number of filters (int)
"""
filter_size = (c_size_t * 2) (self.filters.shape[0], self.filters.shape[1])
numfilters = self.filters.shape[3]
strides = (c_size_t * 2)(self.strides[0], self.strides[1])
return self.filters, self.bias, self.image_shape, filter_size, numfilters, strides, self.out_size, self.pad_top, self.pad_left, True, self.predecessors, len(self.predecessors)
def transformer(self, nn, man, element, nlb, nub, relu_groups, refine, timeout_lp, timeout_milp, use_default_heuristic, testing):
"""
transformer for a convolutional layer, if that layer is an intermediate of the network
Arguments
---------
man : ElinaManagerPtr
man to which element belongs
element : ElinaAbstract0Ptr
abstract element onto which the transformer gets applied
Return
------
output : ElinaAbstract0Ptr
abstract element after the transformer
"""
handle_convolutional_layer(man, element, *self.get_arguments())
calc_bounds(man, element, nn, nlb, nub, relu_groups, is_refine_layer=True)
nn.conv_counter+=1
if testing:
return element, nlb[-1], nub[-1]
return element
class DeeppolyPoolNode:
def __init__(self, input_shape, window_size, strides, pad_top, pad_left, input_names, output_name, output_shape,is_maxpool):
"""
collects the information needed for the handle_pool_layer transformer and brings it into the required shape
Arguments
---------
input_shape : numpy.ndarray
1D array of ints with 3 entries [height, width, channels] representing the shape of the of the image that is passed to the conv-layer
window_size : numpy.ndarray
1D array of ints with 2 entries [height, width] representing the window's size in these directions
strides : numpy.ndarray
1D array of ints with 2 entries [height, width] representing the stride in these directions
"""
self.input_shape = np.ascontiguousarray(input_shape, dtype=np.uintp)
self.window_size = np.ascontiguousarray(window_size, dtype=np.uintp)
self.strides = np.ascontiguousarray(strides, dtype=np.uintp)
self.pad_top = pad_top
self.pad_left = pad_left
self.output_shape = (c_size_t * 3)(output_shape[1],output_shape[2],output_shape[3])
self.is_maxpool = is_maxpool
add_input_output_information_deeppoly(self, input_names, output_name, output_shape)
def transformer(self, nn, man, element, nlb, nub, relu_groups, refine, timeout_lp, timeout_milp, use_default_heuristic, testing):
"""
transformer for a maxpool/averagepool layer, this can't be the first layer of a network
Arguments
---------
man : ElinaManagerPtr
man to which element belongs
element : ElinaAbstract0Ptr
abstract element onto which the transformer gets applied
Return
------
output : ElinaAbstract0Ptr
abstract element after the transformer
"""
h, w = self.window_size
H, W, C = self.input_shape
handle_pool_layer(man, element, (c_size_t *3)(h,w,1), (c_size_t *3)(H, W, C), (c_size_t *2)(self.strides[0], self.strides[1]), self.pad_top, self.pad_left, self.output_shape, self.predecessors, len(self.predecessors), self.is_maxpool)
calc_bounds(man, element, nn, nlb, nub, relu_groups, is_refine_layer=True, destroy=False)
nn.pool_counter += 1
if testing:
return element, nlb[-1], nub[-1]
return element
class DeeppolyResidualNode:
def __init__(self, input_names, output_name, output_shape):
"""
Arguments
---------
input_names : iterable
iterable with the names of the two nodes you want to add
output_name : str
name of this node's output
output_shape : iterable
iterable of ints with the shape of the output of this node
"""
add_input_output_information_deeppoly(self, input_names, output_name, output_shape)
def transformer(self, nn, man, element, nlb, nub, relu_groups, refine, timeout_lp, timeout_milp, use_default_heuristic, testing):
handle_residual_layer(man,element,self.output_length,self.predecessors, len(self.predecessors))
calc_bounds(man, element, nn, nlb, nub, relu_groups, use_krelu=refine, is_refine_layer=True)
# print("Residual ", nn.layertypes[layerno],layerno)
nn.residual_counter += 1
if testing:
return element, nlb[-1], nub[-1]
return element
class DeeppolyGather:
def __init__(self, indexes, input_names, output_name, output_shape):
"""
collects the information needed for the handle_gather_layer transformer and brings it into the required shape
Arguments
---------
indexes : numpy.ndarray
1D array of ints with 3 entries [height, width, channels] representing the shape of the of the image that is passed to the conv-layer
window_size : numpy.ndarray
1D array of ints with 2 entries [height, width] representing the window's size in these directions
strides : numpy.ndarray
1D array of ints with 2 entries [height, width] representing the stride in these directions
"""
self.indexes = np.ascontiguousarray(indexes, dtype=np.uintp)
add_input_output_information_deeppoly(self, input_names, output_name, output_shape)
def transformer(self, nn, man, element, nlb, nub, relu_groups, refine, timeout_lp, timeout_milp, use_default_heuristic, testing):
handle_gather_layer(man, element, self.indexes)
return element
class DeeppolySubNode:
def __init__(self, bias, is_minuend, input_names, output_name, output_shape):
"""
collects the information needed for the handle_gather_layer transformer and brings it into the required shape
Arguments
---------
indexes : numpy.ndarray
1D array of ints with 3 entries [height, width, channels] representing the shape of the of the image that is passed to the conv-layer
window_size : numpy.ndarray
1D array of ints with 2 entries [height, width] representing the window's size in these directions
strides : numpy.ndarray
1D array of ints with 2 entries [height, width] representing the stride in these directions
"""
self.bias = np.ascontiguousarray(bias.reshape(-1), dtype=np.float64)
self.is_minuend = is_minuend
add_input_output_information_deeppoly(self, input_names, output_name, output_shape)
def transformer(self, nn, man, element, nlb, nub, relu_groups, refine, timeout_lp, timeout_milp, use_default_heuristic, testing):
layerno = nn.calc_layerno()
num_neurons = get_num_neurons_in_layer(man, element, layerno)
handle_sub_layer(man, element, self.bias, self.is_minuend, num_neurons, self.predecessors, len(self.predecessors))
calc_bounds(man, element, nn, nlb, nub, relu_groups, is_refine_layer=True)
nn.ffn_counter+=1
if testing:
return element, nlb[-1], nub[-1]
return element
class DeeppolyMulNode:
def __init__(self, bias, input_names, output_name, output_shape):
"""
collects the information needed for the handle_gather_layer transformer and brings it into the required shape
Arguments
---------
indexes : numpy.ndarray
1D array of ints with 3 entries [height, width, channels] representing the shape of the of the image that is passed to the conv-layer
window_size : numpy.ndarray
1D array of ints with 2 entries [height, width] representing the window's size in these directions
strides : numpy.ndarray
1D array of ints with 2 entries [height, width] representing the stride in these directions
"""
self.bias = np.ascontiguousarray(bias.reshape(-1), dtype=np.float64)
add_input_output_information_deeppoly(self, input_names, output_name, output_shape)
def transformer(self, nn, man, element, nlb, nub, relu_groups, refine, timeout_lp, timeout_milp, use_default_heuristic, testing):
handle_mul_layer(man, element, self.bias, len(self.bias.reshape(-1)), self.predecessors, len(self.predecessors))
calc_bounds(man, element, nn, nlb, nub, relu_groups, is_refine_layer=True)
nn.ffn_counter+=1
if testing:
return element, nlb[-1], nub[-1]
return element
| [
"pippia.eleonora@gmail.com"
] | pippia.eleonora@gmail.com |
00b9b8a260c605b4020f1ce3da5e814c4811049e | 45e00ebe0707c04711caba76d0199899eda4ab7b | /finallabexam.py | bc88cb8d0b344c589280c55c591ffc679263b318 | [] | no_license | inwk6312winter2019/openbookfinal-kalpana-27 | 383039a1dd71087431406086e07f5ed6c4b8de50 | fbfef714683b99ce9d2ad4d47f3a7f561fc6a039 | refs/heads/master | 2020-04-23T04:17:11.488759 | 2019-02-15T19:26:55 | 2019-02-15T19:26:55 | 170,903,404 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 368 | py | fin = open("Book1.txt","r")
fin1 = open("Book2.txt","r")
fin2 = open("Book3.txt","r")
def unique_words(task1):
list1 = []
for line in task1:
word = line.strip()
word = word.split()
list1.append(word)
if word not in list1:
list1.append(word)
return list1
print(unique_words(fin))
print(unique_words(fin1))
print(unique_words(fin2))
| [
"kalpana@dal.ca"
] | kalpana@dal.ca |
8d14c06e58e1672981c98743bcf34772582e938e | f947eedcf7e9ab25d163608c87b3640f627eab63 | /change_bit.py | 876a472e0823ba5c1d6612611bef173e6990b0a2 | [] | no_license | FRsparrow/python | 1b6b6fcd8f3e46f94c68e9b96d3cf746c922e606 | 24ab8945bb8dd2d5c7bcb10532be440a0f14a348 | refs/heads/master | 2020-08-27T21:34:53.112323 | 2019-10-25T09:03:20 | 2019-10-25T09:03:20 | 217,494,209 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 275 | py | s = input()
length = len(s)
print(length)
for row in range(2,13):
if length % row == 0:
temp = ""
col = int(length / row)
for i in range(col):
for j in range(i, length, col):
temp += s[j]
print(temp)
| [
"noreply@github.com"
] | FRsparrow.noreply@github.com |
d437ea75e960d5ce81adcf104535caa79226f268 | 50eb3e1604ee4ad27a592ab827dd5f4cfe85e608 | /labelImg.py | 638dd713e50c62d89626f41404bbe739e4b15d35 | [
"MIT",
"GPL-1.0-or-later"
] | permissive | Xuner1213/labelImg-1.8.2 | a150825e0ecca76a34782f710ccdcd067ebdd20c | ce936dcfe17a5e8ae845f3a9b8718f239cf6e023 | refs/heads/master | 2022-07-21T01:34:32.685441 | 2019-08-31T14:24:39 | 2019-08-31T14:24:39 | 205,554,190 | 0 | 0 | MIT | 2022-07-06T20:14:55 | 2019-08-31T14:23:23 | Python | UTF-8 | Python | false | false | 59,028 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import codecs
import distutils.spawn
import os.path
import platform
import re
import sys
import subprocess
from functools import partial
from collections import defaultdict
# hello
try:
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
except ImportError:
# needed for py3+qt4
# Ref:
# http://pyqt.sourceforge.net/Docs/PyQt4/incompatible_apis.html
# http://stackoverflow.com/questions/21217399/pyqt4-qtcore-qvariant-object-instead-of-a-string
if sys.version_info.major >= 3:
import sip
sip.setapi('QVariant', 2)
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import resources
# Add internal libs
from libs.constants import *
from libs.lib import struct, newAction, newIcon, addActions, fmtShortcut, generateColorByText
from libs.settings import Settings
from libs.shape import Shape, DEFAULT_LINE_COLOR, DEFAULT_FILL_COLOR
from libs.stringBundle import StringBundle
from libs.canvas import Canvas
from libs.zoomWidget import ZoomWidget
from libs.labelDialog import LabelDialog
from libs.colorDialog import ColorDialog
from libs.labelFile import LabelFile, LabelFileError
from libs.toolBar import ToolBar
from libs.pascal_voc_io import PascalVocReader
from libs.pascal_voc_io import XML_EXT
from libs.yolo_io import YoloReader
from libs.yolo_io import TXT_EXT
from libs.ustr import ustr
from libs.version import __version__
from libs.hashableQListWidgetItem import HashableQListWidgetItem
__appname__ = 'labelImg'
# Utility functions and classes.
def have_qstring():
'''p3/qt5 get rid of QString wrapper as py3 has native unicode str type'''
return not (sys.version_info.major >= 3 or QT_VERSION_STR.startswith('5.'))
def util_qt_strlistclass():
return QStringList if have_qstring() else list
class WindowMixin(object):
def menu(self, title, actions=None):
menu = self.menuBar().addMenu(title)
if actions:
addActions(menu, actions)
return menu
def toolbar(self, title, actions=None):
toolbar = ToolBar(title)
toolbar.setObjectName(u'%sToolBar' % title)
# toolbar.setOrientation(Qt.Vertical)
toolbar.setToolButtonStyle(Qt.ToolButtonTextUnderIcon)
if actions:
addActions(toolbar, actions)
self.addToolBar(Qt.LeftToolBarArea, toolbar)
return toolbar
class MainWindow(QMainWindow, WindowMixin):
FIT_WINDOW, FIT_WIDTH, MANUAL_ZOOM = list(range(3))
def __init__(self, defaultFilename=None, defaultPrefdefClassFile=None, defaultSaveDir=None):
super(MainWindow, self).__init__()
self.setWindowTitle(__appname__)
# Load setting in the main thread
self.settings = Settings()
self.settings.load()
settings = self.settings
# Load string bundle for i18n
self.stringBundle = StringBundle.getBundle()
getStr = lambda strId: self.stringBundle.getString(strId)
# Save as Pascal voc xml
self.defaultSaveDir = defaultSaveDir
self.usingPascalVocFormat = True
self.usingYoloFormat = False
# For loading all image under a directory
self.mImgList = []
self.dirname = None
self.labelHist = []
self.lastOpenDir = None
# Whether we need to save or not.
self.dirty = False
self._noSelectionSlot = False
self._beginner = True
self.screencastViewer = self.getAvailableScreencastViewer()
self.screencast = "https://youtu.be/p0nR2YsCY_U"
# Load predefined classes to the list
self.loadPredefinedClasses(defaultPrefdefClassFile)
# Main widgets and related state.
self.labelDialog = LabelDialog(parent=self, listItem=self.labelHist)
self.itemsToShapes = {}
self.shapesToItems = {}
self.prevLabelText = ''
listLayout = QVBoxLayout()
listLayout.setContentsMargins(0, 0, 0, 0)
# Create a widget for using default label
self.useDefaultLabelCheckbox = QCheckBox(getStr('useDefaultLabel'))
self.useDefaultLabelCheckbox.setChecked(False)
self.defaultLabelTextLine = QLineEdit()
useDefaultLabelQHBoxLayout = QHBoxLayout()
useDefaultLabelQHBoxLayout.addWidget(self.useDefaultLabelCheckbox)
useDefaultLabelQHBoxLayout.addWidget(self.defaultLabelTextLine)
useDefaultLabelContainer = QWidget()
useDefaultLabelContainer.setLayout(useDefaultLabelQHBoxLayout)
# Create a widget for edit and diffc button
self.diffcButton = QCheckBox(getStr('useDifficult'))
self.diffcButton.setChecked(False)
self.diffcButton.stateChanged.connect(self.btnstate)
self.editButton = QToolButton()
self.editButton.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
# Add some of widgets to listLayout
listLayout.addWidget(self.editButton)
listLayout.addWidget(self.diffcButton)
listLayout.addWidget(useDefaultLabelContainer)
# Create and add a widget for showing current label items
self.labelList = QListWidget()
labelListContainer = QWidget()
labelListContainer.setLayout(listLayout)
self.labelList.itemActivated.connect(self.labelSelectionChanged)
self.labelList.itemSelectionChanged.connect(self.labelSelectionChanged)
self.labelList.itemDoubleClicked.connect(self.editLabel)
# Connect to itemChanged to detect checkbox changes.
self.labelList.itemChanged.connect(self.labelItemChanged)
listLayout.addWidget(self.labelList)
self.dock = QDockWidget(getStr('boxLabelText'), self)
self.dock.setObjectName(getStr('labels'))
self.dock.setWidget(labelListContainer)
self.fileListWidget = QListWidget()
self.fileListWidget.itemDoubleClicked.connect(self.fileitemDoubleClicked)
filelistLayout = QVBoxLayout()
filelistLayout.setContentsMargins(0, 0, 0, 0)
filelistLayout.addWidget(self.fileListWidget)
fileListContainer = QWidget()
fileListContainer.setLayout(filelistLayout)
self.filedock = QDockWidget(getStr('fileList'), self)
self.filedock.setObjectName(getStr('files'))
self.filedock.setWidget(fileListContainer)
self.zoomWidget = ZoomWidget()
self.colorDialog = ColorDialog(parent=self)
self.canvas = Canvas(parent=self)
self.canvas.zoomRequest.connect(self.zoomRequest)
self.canvas.setDrawingShapeToSquare(settings.get(SETTING_DRAW_SQUARE, False))
scroll = QScrollArea()
scroll.setWidget(self.canvas)
scroll.setWidgetResizable(True)
self.scrollBars = {
Qt.Vertical: scroll.verticalScrollBar(),
Qt.Horizontal: scroll.horizontalScrollBar()
}
self.scrollArea = scroll
self.canvas.scrollRequest.connect(self.scrollRequest)
self.canvas.newShape.connect(self.newShape)
self.canvas.shapeMoved.connect(self.setDirty)
self.canvas.selectionChanged.connect(self.shapeSelectionChanged)
self.canvas.drawingPolygon.connect(self.toggleDrawingSensitive)
self.setCentralWidget(scroll)
self.addDockWidget(Qt.RightDockWidgetArea, self.dock)
self.addDockWidget(Qt.RightDockWidgetArea, self.filedock)
self.filedock.setFeatures(QDockWidget.DockWidgetFloatable)
self.dockFeatures = QDockWidget.DockWidgetClosable | QDockWidget.DockWidgetFloatable
self.dock.setFeatures(self.dock.features() ^ self.dockFeatures)
# Actions
action = partial(newAction, self)
quit = action(getStr('quit'), self.close,
'Ctrl+Q', 'quit', getStr('quitApp'))
open = action(getStr('openFile'), self.openFile,
'Ctrl+O', 'open', getStr('openFileDetail'))
opendir = action(getStr('openDir'), self.openDirDialog,
'Ctrl+u', 'open', getStr('openDir'))
changeSavedir = action(getStr('changeSaveDir'), self.changeSavedirDialog,
'Ctrl+r', 'open', getStr('changeSavedAnnotationDir'))
openAnnotation = action(getStr('openAnnotation'), self.openAnnotationDialog,
'Ctrl+Shift+O', 'open', getStr('openAnnotationDetail'))
openNextImg = action(getStr('nextImg'), self.openNextImg,
'd', 'next', getStr('nextImgDetail'))
openPrevImg = action(getStr('prevImg'), self.openPrevImg,
'a', 'prev', getStr('prevImgDetail'))
verify = action(getStr('verifyImg'), self.verifyImg,
'space', 'verify', getStr('verifyImgDetail'))
save = action(getStr('save'), self.saveFile,
'Ctrl+S', 'save', getStr('saveDetail'), enabled=False)
save_format = action('&PascalVOC', self.change_format,
'Ctrl+', 'format_voc', getStr('changeSaveFormat'), enabled=True)
saveAs = action(getStr('saveAs'), self.saveFileAs,
'Ctrl+Shift+S', 'save-as', getStr('saveAsDetail'), enabled=False)
close = action(getStr('closeCur'), self.closeFile, 'Ctrl+W', 'close', getStr('closeCurDetail'))
resetAll = action(getStr('resetAll'), self.resetAll, None, 'resetall', getStr('resetAllDetail'))
color1 = action(getStr('boxLineColor'), self.chooseColor1,
'Ctrl+L', 'color_line', getStr('boxLineColorDetail'))
createMode = action(getStr('crtBox'), self.setCreateMode,
'w', 'new', getStr('crtBoxDetail'), enabled=False)
editMode = action('&Edit\nRectBox', self.setEditMode,
'Ctrl+J', 'edit', u'Move and edit Boxs', enabled=False)
create = action(getStr('crtBox'), self.createShape,
'w', 'new', getStr('crtBoxDetail'), enabled=False)
delete = action(getStr('delBox'), self.deleteSelectedShape,
'Delete', 'delete', getStr('delBoxDetail'), enabled=False)
copy = action(getStr('dupBox'), self.copySelectedShape,
'Ctrl+D', 'copy', getStr('dupBoxDetail'),
enabled=False)
advancedMode = action(getStr('advancedMode'), self.toggleAdvancedMode,
'Ctrl+Shift+A', 'expert', getStr('advancedModeDetail'),
checkable=True)
hideAll = action('&Hide\nRectBox', partial(self.togglePolygons, False),
'Ctrl+H', 'hide', getStr('hideAllBoxDetail'),
enabled=False)
showAll = action('&Show\nRectBox', partial(self.togglePolygons, True),
'Ctrl+A', 'hide', getStr('showAllBoxDetail'),
enabled=False)
help = action(getStr('tutorial'), self.showTutorialDialog, None, 'help', getStr('tutorialDetail'))
showInfo = action(getStr('info'), self.showInfoDialog, None, 'help', getStr('info'))
zoom = QWidgetAction(self)
zoom.setDefaultWidget(self.zoomWidget)
self.zoomWidget.setWhatsThis(
u"Zoom in or out of the image. Also accessible with"
" %s and %s from the canvas." % (fmtShortcut("Ctrl+[-+]"),
fmtShortcut("Ctrl+Wheel")))
self.zoomWidget.setEnabled(False)
zoomIn = action(getStr('zoomin'), partial(self.addZoom, 10),
'Ctrl++', 'zoom-in', getStr('zoominDetail'), enabled=False)
zoomOut = action(getStr('zoomout'), partial(self.addZoom, -10),
'Ctrl+-', 'zoom-out', getStr('zoomoutDetail'), enabled=False)
zoomOrg = action(getStr('originalsize'), partial(self.setZoom, 100),
'Ctrl+=', 'zoom', getStr('originalsizeDetail'), enabled=False)
fitWindow = action(getStr('fitWin'), self.setFitWindow,
'Ctrl+F', 'fit-window', getStr('fitWinDetail'),
checkable=True, enabled=False)
fitWidth = action(getStr('fitWidth'), self.setFitWidth,
'Ctrl+Shift+F', 'fit-width', getStr('fitWidthDetail'),
checkable=True, enabled=False)
# Group zoom controls into a list for easier toggling.
zoomActions = (self.zoomWidget, zoomIn, zoomOut,
zoomOrg, fitWindow, fitWidth)
self.zoomMode = self.MANUAL_ZOOM
self.scalers = {
self.FIT_WINDOW: self.scaleFitWindow,
self.FIT_WIDTH: self.scaleFitWidth,
# Set to one to scale to 100% when loading files.
self.MANUAL_ZOOM: lambda: 1,
}
edit = action(getStr('editLabel'), self.editLabel,
'Ctrl+E', 'edit', getStr('editLabelDetail'),
enabled=False)
self.editButton.setDefaultAction(edit)
shapeLineColor = action(getStr('shapeLineColor'), self.chshapeLineColor,
icon='color_line', tip=getStr('shapeLineColorDetail'),
enabled=False)
shapeFillColor = action(getStr('shapeFillColor'), self.chshapeFillColor,
icon='color', tip=getStr('shapeFillColorDetail'),
enabled=False)
labels = self.dock.toggleViewAction()
labels.setText(getStr('showHide'))
labels.setShortcut('Ctrl+Shift+L')
# Lavel list context menu.
labelMenu = QMenu()
addActions(labelMenu, (edit, delete))
self.labelList.setContextMenuPolicy(Qt.CustomContextMenu)
self.labelList.customContextMenuRequested.connect(
self.popLabelListMenu)
# Draw squares/rectangles
self.drawSquaresOption = QAction('Draw Squares', self)
self.drawSquaresOption.setShortcut('Ctrl+Shift+R')
self.drawSquaresOption.setCheckable(True)
self.drawSquaresOption.setChecked(settings.get(SETTING_DRAW_SQUARE, False))
self.drawSquaresOption.triggered.connect(self.toogleDrawSquare)
# Store actions for further handling.
self.actions = struct(save=save, save_format=save_format, saveAs=saveAs, open=open, close=close, resetAll = resetAll,
lineColor=color1, create=create, delete=delete, edit=edit, copy=copy,
createMode=createMode, editMode=editMode, advancedMode=advancedMode,
shapeLineColor=shapeLineColor, shapeFillColor=shapeFillColor,
zoom=zoom, zoomIn=zoomIn, zoomOut=zoomOut, zoomOrg=zoomOrg,
fitWindow=fitWindow, fitWidth=fitWidth,
zoomActions=zoomActions,
fileMenuActions=(
open, opendir, save, saveAs, close, resetAll, quit),
beginner=(), advanced=(),
editMenu=(edit, copy, delete,
None, color1, self.drawSquaresOption),
beginnerContext=(create, edit, copy, delete),
advancedContext=(createMode, editMode, edit, copy,
delete, shapeLineColor, shapeFillColor),
onLoadActive=(
close, create, createMode, editMode),
onShapesPresent=(saveAs, hideAll, showAll))
self.menus = struct(
file=self.menu('&File'),
edit=self.menu('&Edit'),
view=self.menu('&View'),
help=self.menu('&Help'),
recentFiles=QMenu('Open &Recent'),
labelList=labelMenu)
# Auto saving : Enable auto saving if pressing next
self.autoSaving = QAction(getStr('autoSaveMode'), self)
self.autoSaving.setCheckable(True)
self.autoSaving.setChecked(settings.get(SETTING_AUTO_SAVE, False))
# Sync single class mode from PR#106
self.singleClassMode = QAction(getStr('singleClsMode'), self)
self.singleClassMode.setShortcut("Ctrl+Shift+S")
self.singleClassMode.setCheckable(True)
self.singleClassMode.setChecked(settings.get(SETTING_SINGLE_CLASS, False))
self.lastLabel = None
# Add option to enable/disable labels being displayed at the top of bounding boxes
self.displayLabelOption = QAction(getStr('displayLabel'), self)
self.displayLabelOption.setShortcut("Ctrl+Shift+P")
self.displayLabelOption.setCheckable(True)
self.displayLabelOption.setChecked(settings.get(SETTING_PAINT_LABEL, False))
self.displayLabelOption.triggered.connect(self.togglePaintLabelsOption)
addActions(self.menus.file,
(open, opendir, changeSavedir, openAnnotation, self.menus.recentFiles, save, save_format, saveAs, close, resetAll, quit))
addActions(self.menus.help, (help, showInfo))
addActions(self.menus.view, (
self.autoSaving,
self.singleClassMode,
self.displayLabelOption,
labels, advancedMode, None,
hideAll, showAll, None,
zoomIn, zoomOut, zoomOrg, None,
fitWindow, fitWidth))
self.menus.file.aboutToShow.connect(self.updateFileMenu)
# Custom context menu for the canvas widget:
addActions(self.canvas.menus[0], self.actions.beginnerContext)
addActions(self.canvas.menus[1], (
action('&Copy here', self.copyShape),
action('&Move here', self.moveShape)))
self.tools = self.toolbar('Tools')
self.actions.beginner = (
open, opendir, changeSavedir, openNextImg, openPrevImg, verify, save, save_format, None, create, copy, delete, None,
zoomIn, zoom, zoomOut, fitWindow, fitWidth)
self.actions.advanced = (
open, opendir, changeSavedir, openNextImg, openPrevImg, save, save_format, None,
createMode, editMode, None,
hideAll, showAll)
self.statusBar().showMessage('%s started.' % __appname__)
self.statusBar().show()
# Application state.
self.image = QImage()
self.filePath = ustr(defaultFilename)
self.recentFiles = []
self.maxRecent = 7
self.lineColor = None
self.fillColor = None
self.zoom_level = 100
self.fit_window = False
# Add Chris
self.difficult = False
## Fix the compatible issue for qt4 and qt5. Convert the QStringList to python list
if settings.get(SETTING_RECENT_FILES):
if have_qstring():
recentFileQStringList = settings.get(SETTING_RECENT_FILES)
self.recentFiles = [ustr(i) for i in recentFileQStringList]
else:
self.recentFiles = recentFileQStringList = settings.get(SETTING_RECENT_FILES)
size = settings.get(SETTING_WIN_SIZE, QSize(600, 500))
position = QPoint(0, 0)
saved_position = settings.get(SETTING_WIN_POSE, position)
# Fix the multiple monitors issue
for i in range(QApplication.desktop().screenCount()):
if QApplication.desktop().availableGeometry(i).contains(saved_position):
position = saved_position
break
self.resize(size)
self.move(position)
saveDir = ustr(settings.get(SETTING_SAVE_DIR, None))
self.lastOpenDir = ustr(settings.get(SETTING_LAST_OPEN_DIR, None))
if self.defaultSaveDir is None and saveDir is not None and os.path.exists(saveDir):
self.defaultSaveDir = saveDir
self.statusBar().showMessage('%s started. Annotation will be saved to %s' %
(__appname__, self.defaultSaveDir))
self.statusBar().show()
self.restoreState(settings.get(SETTING_WIN_STATE, QByteArray()))
Shape.line_color = self.lineColor = QColor(settings.get(SETTING_LINE_COLOR, DEFAULT_LINE_COLOR))
Shape.fill_color = self.fillColor = QColor(settings.get(SETTING_FILL_COLOR, DEFAULT_FILL_COLOR))
self.canvas.setDrawingColor(self.lineColor)
# Add chris
Shape.difficult = self.difficult
def xbool(x):
if isinstance(x, QVariant):
return x.toBool()
return bool(x)
if xbool(settings.get(SETTING_ADVANCE_MODE, False)):
self.actions.advancedMode.setChecked(True)
self.toggleAdvancedMode()
# Populate the File menu dynamically.
self.updateFileMenu()
# Since loading the file may take some time, make sure it runs in the background.
if self.filePath and os.path.isdir(self.filePath):
self.queueEvent(partial(self.importDirImages, self.filePath or ""))
elif self.filePath:
self.queueEvent(partial(self.loadFile, self.filePath or ""))
# Callbacks:
self.zoomWidget.valueChanged.connect(self.paintCanvas)
self.populateModeActions()
# Display cursor coordinates at the right of status bar
self.labelCoordinates = QLabel('')
self.statusBar().addPermanentWidget(self.labelCoordinates)
# Open Dir if deafult file
if self.filePath and os.path.isdir(self.filePath):
self.openDirDialog(dirpath=self.filePath)
def keyReleaseEvent(self, event):
if event.key() == Qt.Key_Control:
self.canvas.setDrawingShapeToSquare(False)
def keyPressEvent(self, event):
if event.key() == Qt.Key_Control:
# Draw rectangle if Ctrl is pressed
self.canvas.setDrawingShapeToSquare(True)
## Support Functions ##
def set_format(self, save_format):
if save_format == FORMAT_PASCALVOC:
self.actions.save_format.setText(FORMAT_PASCALVOC)
self.actions.save_format.setIcon(newIcon("format_voc"))
self.usingPascalVocFormat = True
self.usingYoloFormat = False
LabelFile.suffix = XML_EXT
elif save_format == FORMAT_YOLO:
self.actions.save_format.setText(FORMAT_YOLO)
self.actions.save_format.setIcon(newIcon("format_yolo"))
self.usingPascalVocFormat = False
self.usingYoloFormat = True
LabelFile.suffix = TXT_EXT
def change_format(self):
if self.usingPascalVocFormat: self.set_format(FORMAT_YOLO)
elif self.usingYoloFormat: self.set_format(FORMAT_PASCALVOC)
def noShapes(self):
return not self.itemsToShapes
def toggleAdvancedMode(self, value=True):
self._beginner = not value
self.canvas.setEditing(True)
self.populateModeActions()
self.editButton.setVisible(not value)
if value:
self.actions.createMode.setEnabled(True)
self.actions.editMode.setEnabled(False)
self.dock.setFeatures(self.dock.features() | self.dockFeatures)
else:
self.dock.setFeatures(self.dock.features() ^ self.dockFeatures)
def populateModeActions(self):
if self.beginner():
tool, menu = self.actions.beginner, self.actions.beginnerContext
else:
tool, menu = self.actions.advanced, self.actions.advancedContext
self.tools.clear()
addActions(self.tools, tool)
self.canvas.menus[0].clear()
addActions(self.canvas.menus[0], menu)
self.menus.edit.clear()
actions = (self.actions.create,) if self.beginner()\
else (self.actions.createMode, self.actions.editMode)
addActions(self.menus.edit, actions + self.actions.editMenu)
def setBeginner(self):
self.tools.clear()
addActions(self.tools, self.actions.beginner)
def setAdvanced(self):
self.tools.clear()
addActions(self.tools, self.actions.advanced)
def setDirty(self):
self.dirty = True
self.actions.save.setEnabled(True)
def setClean(self):
self.dirty = False
self.actions.save.setEnabled(False)
self.actions.create.setEnabled(True)
def toggleActions(self, value=True):
"""Enable/Disable widgets which depend on an opened image."""
for z in self.actions.zoomActions:
z.setEnabled(value)
for action in self.actions.onLoadActive:
action.setEnabled(value)
def queueEvent(self, function):
QTimer.singleShot(0, function)
def status(self, message, delay=5000):
self.statusBar().showMessage(message, delay)
def resetState(self):
self.itemsToShapes.clear()
self.shapesToItems.clear()
self.labelList.clear()
self.filePath = None
self.imageData = None
self.labelFile = None
self.canvas.resetState()
self.labelCoordinates.clear()
def currentItem(self):
items = self.labelList.selectedItems()
if items:
return items[0]
return None
def addRecentFile(self, filePath):
if filePath in self.recentFiles:
self.recentFiles.remove(filePath)
elif len(self.recentFiles) >= self.maxRecent:
self.recentFiles.pop()
self.recentFiles.insert(0, filePath)
def beginner(self):
return self._beginner
def advanced(self):
return not self.beginner()
def getAvailableScreencastViewer(self):
osName = platform.system()
if osName == 'Windows':
return ['C:\\Program Files\\Internet Explorer\\iexplore.exe']
elif osName == 'Linux':
return ['xdg-open']
elif osName == 'Darwin':
return ['open', '-a', 'Safari']
## Callbacks ##
def showTutorialDialog(self):
subprocess.Popen(self.screencastViewer + [self.screencast])
def showInfoDialog(self):
msg = u'Name:{0} \nApp Version:{1} \n{2} '.format(__appname__, __version__, sys.version_info)
QMessageBox.information(self, u'Information', msg)
def createShape(self):
assert self.beginner()
self.canvas.setEditing(False)
self.actions.create.setEnabled(False)
def toggleDrawingSensitive(self, drawing=True):
"""In the middle of drawing, toggling between modes should be disabled."""
self.actions.editMode.setEnabled(not drawing)
if not drawing and self.beginner():
# Cancel creation.
print('Cancel creation.')
self.canvas.setEditing(True)
self.canvas.restoreCursor()
self.actions.create.setEnabled(True)
def toggleDrawMode(self, edit=True):
self.canvas.setEditing(edit)
self.actions.createMode.setEnabled(edit)
self.actions.editMode.setEnabled(not edit)
def setCreateMode(self):
assert self.advanced()
self.toggleDrawMode(False)
def setEditMode(self):
assert self.advanced()
self.toggleDrawMode(True)
self.labelSelectionChanged()
def updateFileMenu(self):
currFilePath = self.filePath
def exists(filename):
return os.path.exists(filename)
menu = self.menus.recentFiles
menu.clear()
files = [f for f in self.recentFiles if f !=
currFilePath and exists(f)]
for i, f in enumerate(files):
icon = newIcon('labels')
action = QAction(
icon, '&%d %s' % (i + 1, QFileInfo(f).fileName()), self)
action.triggered.connect(partial(self.loadRecent, f))
menu.addAction(action)
def popLabelListMenu(self, point):
self.menus.labelList.exec_(self.labelList.mapToGlobal(point))
def editLabel(self):
if not self.canvas.editing():
return
item = self.currentItem()
text = self.labelDialog.popUp(item.text())
if text is not None:
item.setText(text)
item.setBackground(generateColorByText(text))
self.setDirty()
# Tzutalin 20160906 : Add file list and dock to move faster
def fileitemDoubleClicked(self, item=None):
currIndex = self.mImgList.index(ustr(item.text()))
if currIndex < len(self.mImgList):
filename = self.mImgList[currIndex]
if filename:
self.loadFile(filename)
# Add chris
def btnstate(self, item= None):
""" Function to handle difficult examples
Update on each object """
if not self.canvas.editing():
return
item = self.currentItem()
if not item: # If not selected Item, take the first one
item = self.labelList.item(self.labelList.count()-1)
difficult = self.diffcButton.isChecked()
try:
shape = self.itemsToShapes[item]
except:
pass
# Checked and Update
try:
if difficult != shape.difficult:
shape.difficult = difficult
self.setDirty()
else: # User probably changed item visibility
self.canvas.setShapeVisible(shape, item.checkState() == Qt.Checked)
except:
pass
# React to canvas signals.
def shapeSelectionChanged(self, selected=False):
if self._noSelectionSlot:
self._noSelectionSlot = False
else:
shape = self.canvas.selectedShape
if shape:
self.shapesToItems[shape].setSelected(True)
else:
self.labelList.clearSelection()
self.actions.delete.setEnabled(selected)
self.actions.copy.setEnabled(selected)
self.actions.edit.setEnabled(selected)
self.actions.shapeLineColor.setEnabled(selected)
self.actions.shapeFillColor.setEnabled(selected)
def addLabel(self, shape):
shape.paintLabel = self.displayLabelOption.isChecked()
item = HashableQListWidgetItem(shape.label)
item.setFlags(item.flags() | Qt.ItemIsUserCheckable)
item.setCheckState(Qt.Checked)
item.setBackground(generateColorByText(shape.label))
self.itemsToShapes[item] = shape
self.shapesToItems[shape] = item
self.labelList.addItem(item)
for action in self.actions.onShapesPresent:
action.setEnabled(True)
def remLabel(self, shape):
if shape is None:
# print('rm empty label')
return
item = self.shapesToItems[shape]
self.labelList.takeItem(self.labelList.row(item))
del self.shapesToItems[shape]
del self.itemsToShapes[item]
def loadLabels(self, shapes):
s = []
for label, points, line_color, fill_color, difficult in shapes:
shape = Shape(label=label)
for x, y in points:
shape.addPoint(QPointF(x, y))
shape.difficult = difficult
shape.close()
s.append(shape)
if line_color:
shape.line_color = QColor(*line_color)
else:
shape.line_color = generateColorByText(label)
if fill_color:
shape.fill_color = QColor(*fill_color)
else:
shape.fill_color = generateColorByText(label)
self.addLabel(shape)
self.canvas.loadShapes(s)
def saveLabels(self, annotationFilePath):
annotationFilePath = ustr(annotationFilePath)
if self.labelFile is None:
self.labelFile = LabelFile()
self.labelFile.verified = self.canvas.verified
def format_shape(s):
return dict(label=s.label,
line_color=s.line_color.getRgb(),
fill_color=s.fill_color.getRgb(),
points=[(p.x(), p.y()) for p in s.points],
# add chris
difficult = s.difficult)
shapes = [format_shape(shape) for shape in self.canvas.shapes]
# Can add differrent annotation formats here
try:
if self.usingPascalVocFormat is True:
if annotationFilePath[-4:].lower() != ".xml":
annotationFilePath += XML_EXT
self.labelFile.savePascalVocFormat(annotationFilePath, shapes, self.filePath, self.imageData,
self.lineColor.getRgb(), self.fillColor.getRgb())
elif self.usingYoloFormat is True:
if annotationFilePath[-4:].lower() != ".txt":
annotationFilePath += TXT_EXT
self.labelFile.saveYoloFormat(annotationFilePath, shapes, self.filePath, self.imageData, self.labelHist,
self.lineColor.getRgb(), self.fillColor.getRgb())
else:
self.labelFile.save(annotationFilePath, shapes, self.filePath, self.imageData,
self.lineColor.getRgb(), self.fillColor.getRgb())
print('Image:{0} -> Annotation:{1}'.format(self.filePath, annotationFilePath))
return True
except LabelFileError as e:
self.errorMessage(u'Error saving label data', u'<b>%s</b>' % e)
return False
def copySelectedShape(self):
self.addLabel(self.canvas.copySelectedShape())
# fix copy and delete
self.shapeSelectionChanged(True)
def labelSelectionChanged(self):
item = self.currentItem()
if item and self.canvas.editing():
self._noSelectionSlot = True
self.canvas.selectShape(self.itemsToShapes[item])
shape = self.itemsToShapes[item]
# Add Chris
self.diffcButton.setChecked(shape.difficult)
def labelItemChanged(self, item):
shape = self.itemsToShapes[item]
label = item.text()
if label != shape.label:
shape.label = item.text()
shape.line_color = generateColorByText(shape.label)
self.setDirty()
else: # User probably changed item visibility
self.canvas.setShapeVisible(shape, item.checkState() == Qt.Checked)
# Callback functions:
def newShape(self):
"""Pop-up and give focus to the label editor.
position MUST be in global coordinates.
"""
if not self.useDefaultLabelCheckbox.isChecked() or not self.defaultLabelTextLine.text():
if len(self.labelHist) > 0:
self.labelDialog = LabelDialog(
parent=self, listItem=self.labelHist)
# Sync single class mode from PR#106
if self.singleClassMode.isChecked() and self.lastLabel:
text = self.lastLabel
else:
text = self.labelDialog.popUp(text=self.prevLabelText)
self.lastLabel = text
else:
text = self.defaultLabelTextLine.text()
# Add Chris
self.diffcButton.setChecked(False)
if text is not None:
self.prevLabelText = text
generate_color = generateColorByText(text)
shape = self.canvas.setLastLabel(text, generate_color, generate_color)
self.addLabel(shape)
if self.beginner(): # Switch to edit mode.
self.canvas.setEditing(True)
self.actions.create.setEnabled(True)
else:
self.actions.editMode.setEnabled(True)
self.setDirty()
if text not in self.labelHist:
self.labelHist.append(text)
else:
# self.canvas.undoLastLine()
self.canvas.resetAllLines()
def scrollRequest(self, delta, orientation):
units = - delta / (8 * 15)
bar = self.scrollBars[orientation]
bar.setValue(bar.value() + bar.singleStep() * units)
def setZoom(self, value):
self.actions.fitWidth.setChecked(False)
self.actions.fitWindow.setChecked(False)
self.zoomMode = self.MANUAL_ZOOM
self.zoomWidget.setValue(value)
def addZoom(self, increment=10):
self.setZoom(self.zoomWidget.value() + increment)
def zoomRequest(self, delta):
# get the current scrollbar positions
# calculate the percentages ~ coordinates
h_bar = self.scrollBars[Qt.Horizontal]
v_bar = self.scrollBars[Qt.Vertical]
# get the current maximum, to know the difference after zooming
h_bar_max = h_bar.maximum()
v_bar_max = v_bar.maximum()
# get the cursor position and canvas size
# calculate the desired movement from 0 to 1
# where 0 = move left
# 1 = move right
# up and down analogous
cursor = QCursor()
pos = cursor.pos()
relative_pos = QWidget.mapFromGlobal(self, pos)
cursor_x = relative_pos.x()
cursor_y = relative_pos.y()
w = self.scrollArea.width()
h = self.scrollArea.height()
# the scaling from 0 to 1 has some padding
# you don't have to hit the very leftmost pixel for a maximum-left movement
margin = 0.1
move_x = (cursor_x - margin * w) / (w - 2 * margin * w)
move_y = (cursor_y - margin * h) / (h - 2 * margin * h)
# clamp the values from 0 to 1
move_x = min(max(move_x, 0), 1)
move_y = min(max(move_y, 0), 1)
# zoom in
units = delta / (8 * 15)
scale = 10
self.addZoom(scale * units)
# get the difference in scrollbar values
# this is how far we can move
d_h_bar_max = h_bar.maximum() - h_bar_max
d_v_bar_max = v_bar.maximum() - v_bar_max
# get the new scrollbar values
new_h_bar_value = h_bar.value() + move_x * d_h_bar_max
new_v_bar_value = v_bar.value() + move_y * d_v_bar_max
h_bar.setValue(new_h_bar_value)
v_bar.setValue(new_v_bar_value)
def setFitWindow(self, value=True):
if value:
self.actions.fitWidth.setChecked(False)
self.zoomMode = self.FIT_WINDOW if value else self.MANUAL_ZOOM
self.adjustScale()
def setFitWidth(self, value=True):
if value:
self.actions.fitWindow.setChecked(False)
self.zoomMode = self.FIT_WIDTH if value else self.MANUAL_ZOOM
self.adjustScale()
def togglePolygons(self, value):
for item, shape in self.itemsToShapes.items():
item.setCheckState(Qt.Checked if value else Qt.Unchecked)
def loadFile(self, filePath=None):
"""Load the specified file, or the last opened file if None."""
self.resetState()
self.canvas.setEnabled(False)
if filePath is None:
filePath = self.settings.get(SETTING_FILENAME)
# Make sure that filePath is a regular python string, rather than QString
filePath = ustr(filePath)
unicodeFilePath = ustr(filePath)
# Tzutalin 20160906 : Add file list and dock to move faster
# Highlight the file item
if unicodeFilePath and self.fileListWidget.count() > 0:
index = self.mImgList.index(unicodeFilePath)
fileWidgetItem = self.fileListWidget.item(index)
fileWidgetItem.setSelected(True)
if unicodeFilePath and os.path.exists(unicodeFilePath):
if LabelFile.isLabelFile(unicodeFilePath):
try:
self.labelFile = LabelFile(unicodeFilePath)
except LabelFileError as e:
self.errorMessage(u'Error opening file',
(u"<p><b>%s</b></p>"
u"<p>Make sure <i>%s</i> is a valid label file.")
% (e, unicodeFilePath))
self.status("Error reading %s" % unicodeFilePath)
return False
self.imageData = self.labelFile.imageData
self.lineColor = QColor(*self.labelFile.lineColor)
self.fillColor = QColor(*self.labelFile.fillColor)
self.canvas.verified = self.labelFile.verified
else:
# Load image:
# read data first and store for saving into label file.
self.imageData = read(unicodeFilePath, None)
self.labelFile = None
self.canvas.verified = False
image = QImage.fromData(self.imageData)
if image.isNull():
self.errorMessage(u'Error opening file',
u"<p>Make sure <i>%s</i> is a valid image file." % unicodeFilePath)
self.status("Error reading %s" % unicodeFilePath)
return False
self.status("Loaded %s" % os.path.basename(unicodeFilePath))
self.image = image
self.filePath = unicodeFilePath
self.canvas.loadPixmap(QPixmap.fromImage(image))
if self.labelFile:
self.loadLabels(self.labelFile.shapes)
self.setClean()
self.canvas.setEnabled(True)
self.adjustScale(initial=True)
self.paintCanvas()
self.addRecentFile(self.filePath)
self.toggleActions(True)
# Label xml file and show bound box according to its filename
# if self.usingPascalVocFormat is True:
if self.defaultSaveDir is not None:
basename = os.path.basename(
os.path.splitext(self.filePath)[0])
xmlPath = os.path.join(self.defaultSaveDir, basename + XML_EXT)
txtPath = os.path.join(self.defaultSaveDir, basename + TXT_EXT)
"""Annotation file priority:
PascalXML > YOLO
"""
if os.path.isfile(xmlPath):
self.loadPascalXMLByFilename(xmlPath)
elif os.path.isfile(txtPath):
self.loadYOLOTXTByFilename(txtPath)
else:
xmlPath = os.path.splitext(filePath)[0] + XML_EXT
txtPath = os.path.splitext(filePath)[0] + TXT_EXT
if os.path.isfile(xmlPath):
self.loadPascalXMLByFilename(xmlPath)
elif os.path.isfile(txtPath):
self.loadYOLOTXTByFilename(txtPath)
self.setWindowTitle(__appname__ + ' ' + filePath)
# Default : select last item if there is at least one item
if self.labelList.count():
self.labelList.setCurrentItem(self.labelList.item(self.labelList.count()-1))
self.labelList.item(self.labelList.count()-1).setSelected(True)
self.canvas.setFocus(True)
return True
return False
def resizeEvent(self, event):
if self.canvas and not self.image.isNull()\
and self.zoomMode != self.MANUAL_ZOOM:
self.adjustScale()
super(MainWindow, self).resizeEvent(event)
def paintCanvas(self):
assert not self.image.isNull(), "cannot paint null image"
self.canvas.scale = 0.01 * self.zoomWidget.value()
self.canvas.adjustSize()
self.canvas.update()
def adjustScale(self, initial=False):
value = self.scalers[self.FIT_WINDOW if initial else self.zoomMode]()
self.zoomWidget.setValue(int(100 * value))
def scaleFitWindow(self):
"""Figure out the size of the pixmap in order to fit the main widget."""
e = 2.0 # So that no scrollbars are generated.
w1 = self.centralWidget().width() - e
h1 = self.centralWidget().height() - e
a1 = w1 / h1
# Calculate a new scale value based on the pixmap's aspect ratio.
w2 = self.canvas.pixmap.width() - 0.0
h2 = self.canvas.pixmap.height() - 0.0
a2 = w2 / h2
return w1 / w2 if a2 >= a1 else h1 / h2
def scaleFitWidth(self):
# The epsilon does not seem to work too well here.
w = self.centralWidget().width() - 2.0
return w / self.canvas.pixmap.width()
def closeEvent(self, event):
if not self.mayContinue():
event.ignore()
settings = self.settings
# If it loads images from dir, don't load it at the begining
if self.dirname is None:
settings[SETTING_FILENAME] = self.filePath if self.filePath else ''
else:
settings[SETTING_FILENAME] = ''
settings[SETTING_WIN_SIZE] = self.size()
settings[SETTING_WIN_POSE] = self.pos()
settings[SETTING_WIN_STATE] = self.saveState()
settings[SETTING_LINE_COLOR] = self.lineColor
settings[SETTING_FILL_COLOR] = self.fillColor
settings[SETTING_RECENT_FILES] = self.recentFiles
settings[SETTING_ADVANCE_MODE] = not self._beginner
if self.defaultSaveDir and os.path.exists(self.defaultSaveDir):
settings[SETTING_SAVE_DIR] = ustr(self.defaultSaveDir)
else:
settings[SETTING_SAVE_DIR] = ''
if self.lastOpenDir and os.path.exists(self.lastOpenDir):
settings[SETTING_LAST_OPEN_DIR] = self.lastOpenDir
else:
settings[SETTING_LAST_OPEN_DIR] = ''
settings[SETTING_AUTO_SAVE] = self.autoSaving.isChecked()
settings[SETTING_SINGLE_CLASS] = self.singleClassMode.isChecked()
settings[SETTING_PAINT_LABEL] = self.displayLabelOption.isChecked()
settings[SETTING_DRAW_SQUARE] = self.drawSquaresOption.isChecked()
settings.save()
def loadRecent(self, filename):
if self.mayContinue():
self.loadFile(filename)
def scanAllImages(self, folderPath):
extensions = ['.%s' % fmt.data().decode("ascii").lower() for fmt in QImageReader.supportedImageFormats()]
images = []
for root, dirs, files in os.walk(folderPath):
for file in files:
if file.lower().endswith(tuple(extensions)):
relativePath = os.path.join(root, file)
path = ustr(os.path.abspath(relativePath))
images.append(path)
images.sort(key=lambda x: x.lower())
return images
def changeSavedirDialog(self, _value=False):
if self.defaultSaveDir is not None:
path = ustr(self.defaultSaveDir)
else:
path = '.'
dirpath = ustr(QFileDialog.getExistingDirectory(self,
'%s - Save annotations to the directory' % __appname__, path, QFileDialog.ShowDirsOnly
| QFileDialog.DontResolveSymlinks))
if dirpath is not None and len(dirpath) > 1:
self.defaultSaveDir = dirpath
self.statusBar().showMessage('%s . Annotation will be saved to %s' %
('Change saved folder', self.defaultSaveDir))
self.statusBar().show()
def openAnnotationDialog(self, _value=False):
if self.filePath is None:
self.statusBar().showMessage('Please select image first')
self.statusBar().show()
return
path = os.path.dirname(ustr(self.filePath))\
if self.filePath else '.'
if self.usingPascalVocFormat:
filters = "Open Annotation XML file (%s)" % ' '.join(['*.xml'])
filename = ustr(QFileDialog.getOpenFileName(self,'%s - Choose a xml file' % __appname__, path, filters))
if filename:
if isinstance(filename, (tuple, list)):
filename = filename[0]
self.loadPascalXMLByFilename(filename)
def openDirDialog(self, _value=False, dirpath=None):
if not self.mayContinue():
return
defaultOpenDirPath = dirpath if dirpath else '.'
if self.lastOpenDir and os.path.exists(self.lastOpenDir):
defaultOpenDirPath = self.lastOpenDir
else:
defaultOpenDirPath = os.path.dirname(self.filePath) if self.filePath else '.'
targetDirPath = ustr(QFileDialog.getExistingDirectory(self,
'%s - Open Directory' % __appname__, defaultOpenDirPath,
QFileDialog.ShowDirsOnly | QFileDialog.DontResolveSymlinks))
self.importDirImages(targetDirPath)
def importDirImages(self, dirpath):
if not self.mayContinue() or not dirpath:
return
self.lastOpenDir = dirpath
self.dirname = dirpath
self.filePath = None
self.fileListWidget.clear()
self.mImgList = self.scanAllImages(dirpath)
self.openNextImg()
for imgPath in self.mImgList:
item = QListWidgetItem(imgPath)
self.fileListWidget.addItem(item)
def verifyImg(self, _value=False):
# Proceding next image without dialog if having any label
if self.filePath is not None:
try:
self.labelFile.toggleVerify()
except AttributeError:
# If the labelling file does not exist yet, create if and
# re-save it with the verified attribute.
self.saveFile()
if self.labelFile != None:
self.labelFile.toggleVerify()
else:
return
self.canvas.verified = self.labelFile.verified
self.paintCanvas()
self.saveFile()
def openPrevImg(self, _value=False):
# Proceding prev image without dialog if having any label
if self.autoSaving.isChecked():
if self.defaultSaveDir is not None:
if self.dirty is True:
self.saveFile()
else:
self.changeSavedirDialog()
return
if not self.mayContinue():
return
if len(self.mImgList) <= 0:
return
if self.filePath is None:
return
currIndex = self.mImgList.index(self.filePath)
if currIndex - 1 >= 0:
filename = self.mImgList[currIndex - 1]
if filename:
self.loadFile(filename)
def openNextImg(self, _value=False):
# Proceding prev image without dialog if having any label
if self.autoSaving.isChecked():
if self.defaultSaveDir is not None:
if self.dirty is True:
self.saveFile()
else:
self.changeSavedirDialog()
return
if not self.mayContinue():
return
if len(self.mImgList) <= 0:
return
filename = None
if self.filePath is None:
filename = self.mImgList[0]
else:
currIndex = self.mImgList.index(self.filePath)
if currIndex + 1 < len(self.mImgList):
filename = self.mImgList[currIndex + 1]
if filename:
self.loadFile(filename)
def openFile(self, _value=False):
if not self.mayContinue():
return
path = os.path.dirname(ustr(self.filePath)) if self.filePath else '.'
formats = ['*.%s' % fmt.data().decode("ascii").lower() for fmt in QImageReader.supportedImageFormats()]
filters = "Image & Label files (%s)" % ' '.join(formats + ['*%s' % LabelFile.suffix])
filename = QFileDialog.getOpenFileName(self, '%s - Choose Image or Label file' % __appname__, path, filters)
if filename:
if isinstance(filename, (tuple, list)):
filename = filename[0]
self.loadFile(filename)
def saveFile(self, _value=False):
if self.defaultSaveDir is not None and len(ustr(self.defaultSaveDir)):
if self.filePath:
imgFileName = os.path.basename(self.filePath)
savedFileName = os.path.splitext(imgFileName)[0]
savedPath = os.path.join(ustr(self.defaultSaveDir), savedFileName)
self._saveFile(savedPath)
else:
imgFileDir = os.path.dirname(self.filePath)
imgFileName = os.path.basename(self.filePath)
savedFileName = os.path.splitext(imgFileName)[0]
savedPath = os.path.join(imgFileDir, savedFileName)
self._saveFile(savedPath if self.labelFile
else self.saveFileDialog(removeExt=False))
def saveFileAs(self, _value=False):
assert not self.image.isNull(), "cannot save empty image"
self._saveFile(self.saveFileDialog())
def saveFileDialog(self, removeExt=True):
caption = '%s - Choose File' % __appname__
filters = 'File (*%s)' % LabelFile.suffix
openDialogPath = self.currentPath()
dlg = QFileDialog(self, caption, openDialogPath, filters)
dlg.setDefaultSuffix(LabelFile.suffix[1:])
dlg.setAcceptMode(QFileDialog.AcceptSave)
filenameWithoutExtension = os.path.splitext(self.filePath)[0]
dlg.selectFile(filenameWithoutExtension)
dlg.setOption(QFileDialog.DontUseNativeDialog, False)
if dlg.exec_():
fullFilePath = ustr(dlg.selectedFiles()[0])
if removeExt:
return os.path.splitext(fullFilePath)[0] # Return file path without the extension.
else:
return fullFilePath
return ''
def _saveFile(self, annotationFilePath):
if annotationFilePath and self.saveLabels(annotationFilePath):
self.setClean()
self.statusBar().showMessage('Saved to %s' % annotationFilePath)
self.statusBar().show()
def closeFile(self, _value=False):
if not self.mayContinue():
return
self.resetState()
self.setClean()
self.toggleActions(False)
self.canvas.setEnabled(False)
self.actions.saveAs.setEnabled(False)
def resetAll(self):
self.settings.reset()
self.close()
proc = QProcess()
proc.startDetached(os.path.abspath(__file__))
def mayContinue(self):
return not (self.dirty and not self.discardChangesDialog())
def discardChangesDialog(self):
yes, no = QMessageBox.Yes, QMessageBox.No
msg = u'You have unsaved changes, proceed anyway?'
return yes == QMessageBox.warning(self, u'Attention', msg, yes | no)
def errorMessage(self, title, message):
return QMessageBox.critical(self, title,
'<p><b>%s</b></p>%s' % (title, message))
def currentPath(self):
return os.path.dirname(self.filePath) if self.filePath else '.'
def chooseColor1(self):
color = self.colorDialog.getColor(self.lineColor, u'Choose line color',
default=DEFAULT_LINE_COLOR)
if color:
self.lineColor = color
Shape.line_color = color
self.canvas.setDrawingColor(color)
self.canvas.update()
self.setDirty()
def deleteSelectedShape(self):
self.remLabel(self.canvas.deleteSelected())
self.setDirty()
if self.noShapes():
for action in self.actions.onShapesPresent:
action.setEnabled(False)
def chshapeLineColor(self):
color = self.colorDialog.getColor(self.lineColor, u'Choose line color',
default=DEFAULT_LINE_COLOR)
if color:
self.canvas.selectedShape.line_color = color
self.canvas.update()
self.setDirty()
def chshapeFillColor(self):
color = self.colorDialog.getColor(self.fillColor, u'Choose fill color',
default=DEFAULT_FILL_COLOR)
if color:
self.canvas.selectedShape.fill_color = color
self.canvas.update()
self.setDirty()
def copyShape(self):
self.canvas.endMove(copy=True)
self.addLabel(self.canvas.selectedShape)
self.setDirty()
def moveShape(self):
self.canvas.endMove(copy=False)
self.setDirty()
def loadPredefinedClasses(self, predefClassesFile):
if os.path.exists(predefClassesFile) is True:
with codecs.open(predefClassesFile, 'r', 'utf8') as f:
for line in f:
line = line.strip()
if self.labelHist is None:
self.labelHist = [line]
else:
self.labelHist.append(line)
def loadPascalXMLByFilename(self, xmlPath):
if self.filePath is None:
return
if os.path.isfile(xmlPath) is False:
return
self.set_format(FORMAT_PASCALVOC)
tVocParseReader = PascalVocReader(xmlPath)
shapes = tVocParseReader.getShapes()
self.loadLabels(shapes)
self.canvas.verified = tVocParseReader.verified
def loadYOLOTXTByFilename(self, txtPath):
if self.filePath is None:
return
if os.path.isfile(txtPath) is False:
return
self.set_format(FORMAT_YOLO)
tYoloParseReader = YoloReader(txtPath, self.image)
shapes = tYoloParseReader.getShapes()
print (shapes)
self.loadLabels(shapes)
self.canvas.verified = tYoloParseReader.verified
def togglePaintLabelsOption(self):
for shape in self.canvas.shapes:
shape.paintLabel = self.displayLabelOption.isChecked()
def toogleDrawSquare(self):
self.canvas.setDrawingShapeToSquare(self.drawSquaresOption.isChecked())
def inverted(color):
return QColor(*[255 - v for v in color.getRgb()])
def read(filename, default=None):
try:
with open(filename, 'rb') as f:
return f.read()
except:
return default
def get_main_app(argv=[]):
"""
Standard boilerplate Qt application code.
Do everything but app.exec_() -- so that we can test the application in one thread
"""
app = QApplication(argv)
app.setApplicationName(__appname__)
app.setWindowIcon(newIcon("app"))
# Tzutalin 201705+: Accept extra agruments to change predefined class file
# Usage : labelImg.py image predefClassFile saveDir
win = MainWindow(argv[1] if len(argv) >= 2 else None,
argv[2] if len(argv) >= 3 else os.path.join(
os.path.dirname(sys.argv[0]),
'data', 'predefined_classes.txt'),
argv[3] if len(argv) >= 4 else None)
win.show()
return app, win
def main():
'''construct main app and run it'''
app, _win = get_main_app(sys.argv)
return app.exec_()
if __name__ == '__main__':
sys.exit(main())
| [
"1531002208@qq.com"
] | 1531002208@qq.com |
b1bf135e5fda6f5f89bcad4b6a5e4891984e99e2 | e2d56728bf7cd4506367bedf58e6f9f33ffd74b8 | /youTubePlaylistTime.py | 1f7a259a3b8b0dcaa2ba1a44f785e327a8ef3fc2 | [] | no_license | trilok01/youtubePlaylistTime | 69b11e9fab0056b44599f6dfd7c0e2f19befb7ec | 0b563bb4599fff846ba1bdd6978742292eaf03fb | refs/heads/main | 2023-06-04T06:27:02.174236 | 2021-06-19T15:03:02 | 2021-06-19T15:03:02 | 378,438,009 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,202 | py | from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
import time
PATH = 'C:\\Program Files (x86)\\chromedriver.exe'
driver = webdriver.Chrome(PATH)
time.sleep(5)
playlistURL = input('\n\n\nEnter Youtube playlist URL: ')
# Open playlist
driver.get(playlistURL)
time.sleep(5)
# Get total no of videos
try:
videoNum = (driver.find_element_by_xpath('/html/body/ytd-app/div/ytd-page-manager/ytd-browse/ytd-playlist-sidebar-renderer/div/ytd-playlist-sidebar-primary-info-renderer/div[1]/yt-formatted-string[1]/span[1]')).get_attribute('innerHTML')
except NoSuchElementException:
print(' Element not found')
#calculate total Time
totalTime = 0
try:
for i in range(1, int(videoNum)+1):
timePath = driver.find_element_by_xpath('/html/body/ytd-app/div/ytd-page-manager/ytd-browse/ytd-two-column-browse-results-renderer/div[1]/ytd-section-list-renderer/div[2]/ytd-item-section-renderer/div[3]/ytd-playlist-video-list-renderer/div[3]/ytd-playlist-video-renderer[' + str(i) + ']/div[2]/div[1]/ytd-thumbnail/a/div[1]/ytd-thumbnail-overlay-time-status-renderer/span')
videoTime = timePath.get_attribute('innerHTML').split(':')
if len(videoTime) == 1:
totalTime += int(videoTime[0])
elif len(videoTime) == 2:
totalTime += int(videoTime[0])*60 + int(videoTime[1])
else:
totalTime += int(videoTime[0]) * 3600 + int(videoTime[1]) * 60 + int(videoTime[2])
except NoSuchElementException:
print(' Video number ' + str(i) + ' not found')
# Print playlist Name
try:
playlistName = driver.find_element_by_css_selector('a.yt-simple-endpoint.style-scope.yt-formatted-string').get_attribute('innerHTML')
print('\n\n ' + playlistName)
except NoSuchElementException:
print(' Playlist Name not found')
# Print total time
hours = 0
minutes = 0
seconds = 0
print('\n\n ' + str(totalTime) + ' Seconds')
if totalTime >= 3600:
hours = int(totalTime / 3600)
seconds = totalTime % 3600
if seconds >= 60:
minutes = int(seconds / 60)
seconds = seconds % 60
print('\n\n ' + str(hours) + ' Hours ' + str(minutes) + ' Minutes ' + str(seconds) + ' Seconds')
time.sleep(2)
driver.close() | [
"noreply@github.com"
] | trilok01.noreply@github.com |
3726d7e50f4e7e01a69b4c4868584ae6e4ceac20 | 8fbcf2903d74c346bbf9e51b569db810a8f1a8fd | /detect-video-iou.py | 155275bc80de153b02a1323ad030940070b1bb4f | [] | no_license | chanbunlee/UI-Demo-for-object-detection | a45ead288203b2b74cf66ecc68db102302bde369 | 22e70a2d4212f905ae2650af7c969b9cb020b0c2 | refs/heads/main | 2023-06-06T03:12:54.228920 | 2021-06-23T09:37:39 | 2021-06-23T09:37:39 | 379,546,663 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,605 | py | import argparse
import os
import shutil
import time
from pathlib import Path
import cv2
import torch
import torch.backends.cudnn as cudnn
from numpy import random
from models.experimental import attempt_load
from utils.datasets import LoadStreams, LoadImages
from utils.general import (
check_img_size, non_max_suppression, apply_classifier, scale_coords,
xyxy2xywh, plot_one_box, strip_optimizer, set_logging)
from utils.torch_utils import select_device, load_classifier, time_synchronized
from compute_IOU import del_f_useIOU
def detect(save_img=False):
out, source, weights, view_img, save_txt, imgsz = \
opt.save_dir, opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size
webcam = source.isnumeric() or source.startswith(('rtsp://', 'rtmp://', 'http://')) or source.endswith('.txt')
# Initialize
set_logging()
device = select_device(opt.device)
half = device.type != 'cpu' # half precision only supported on CUDA
# Load model
model = attempt_load(weights, map_location=device) # load FP32 model
imgsz = check_img_size(imgsz, s=model.stride.max()) # check img_size
if half:
model.half() # to FP16
# Second-stage classifier
classify = False
if classify:
modelc = load_classifier(name='resnet101', n=2) # initialize
modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model']) # load weights
modelc.to(device).eval()
# Set Dataloader
vid_path, vid_writer = None, None
if webcam:
view_img = True
cudnn.benchmark = True # set True to speed up constant image size inference
dataset = LoadStreams(source, img_size=imgsz)
else:
save_img = True
view_img = True
dataset = LoadImages(source, img_size=imgsz)
# Get names and colors
names = model.module.names if hasattr(model, 'module') else model.names
# Run inference
t0 = time.time()
img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img
_ = model(img.half() if half else img) if device.type != 'cpu' else None # run once
for path, img, im0s, vid_cap in dataset:
img = torch.from_numpy(img).to(device)
img = img.half() if half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
if img.ndimension() == 3:
img = img.unsqueeze(0)
# Inference
t1 = time_synchronized()
pred = model(img, augment=opt.augment)[0]
# Apply NMS
pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms)
t2 = time_synchronized()
# Apply Classifier
if classify:
pred = apply_classifier(pred, modelc, img, im0s)
# Process detections
for i, det in enumerate(pred): # detections per image
if webcam: # batch_size >= 1
p, s, im0 = path[i], '%g: ' % i, im0s[i].copy()
else:
p, s, im0 = path, '', im0s
save_path = str(Path(out) / Path(p).name)
s += '%gx%g ' % img.shape[2:] # print string
if det is not None and len(det):
# Rescale boxes from img_size to im0 size
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
# Print results
for c in det[:, -1].unique():
n = (det[:, -1] == c).sum() # detections per class
s += '%g %ss, ' % (n, names[int(c)]) # add to string
label_list = []
# Write results
for *xyxy, conf, cls in reversed(det):
if save_img or view_img: # Add bbox to image
label_list.append(
[names[int(cls)], conf, int(xyxy[0]), int(xyxy[1]), int(xyxy[2]), int(xyxy[3])])
label_list1, nameList = del_f_useIOU(label_list)
for i in range(len(label_list1)):
print(nameList[i])
label = '{} {:.2f}'.format(nameList[i], label_list1[i][1])
top = label_list1[i][2]
left = label_list1[i][3]
bottom = label_list1[i][4]
right = label_list1[i][5]
cv2.rectangle(im0, (top, left), (bottom, right), [0, 255, 0], 3) # filled
cv2.putText(im0, label, (top, left - 2), 0, 0.8, [0, 0, 255], thickness=2,
lineType=cv2.LINE_AA)
# Print time (inference + NMS)
print('%sDone. (%.3fs)' % (s, t2 - t1))
# Stream results
if view_img:
cv2.imshow(p, im0)
if cv2.waitKey(1) == ord('q'): # q to quit
raise StopIteration
# Save results (image with detections)
if save_img:
if dataset.mode == 'images':
cv2.imwrite(save_path, im0)
else:
if vid_path != save_path: # new video
vid_path = save_path
if isinstance(vid_writer, cv2.VideoWriter):
vid_writer.release() # release previous video writer
fourcc = 'mp4v' # output video codec
fps = vid_cap.get(cv2.CAP_PROP_FPS)
w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*fourcc), fps, (w, h))
vid_writer.write(im0)
if save_img:
print('Results saved to %s' % Path(out))
print('Done. (%.3fs)' % (time.time() - t0))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--weights', nargs='+', type=str, default='weights/last.pt', help='model.pt path(s)')
parser.add_argument('--source', type=str, default='video/2.mp4', help='source') # file/folder, 0 for webcam
parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
parser.add_argument('--conf-thres', type=float, default=0.25, help='object confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--view-img', action='store_true', help='display results')
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
parser.add_argument('--save-dir', type=str, default='results', help='directory to save results')
parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3')
parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
parser.add_argument('--augment', action='store_true', help='augmented inference')
parser.add_argument('--update', action='store_true', help='update all models')
opt = parser.parse_args()
print(opt)
with torch.no_grad():
if opt.update: # update all models (to fix SourceChangeWarning)
for opt.weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt']:
detect()
strip_optimizer(opt.weights)
else:
detect()
| [
"chanbunlee@163.com"
] | chanbunlee@163.com |
bfc18eaa66e8178ea1f6ceae0421145d57bb023a | 8821970a489ea190ab7dd6a2da8f672681138543 | /piston/web/__init__.py | acfc64d31c318a7da92d7c776160f900b4897930 | [
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] | permissive | grey580/piston | ab293d449728c9fbcc442adc0463135628548deb | 5a3472517e2de14e75eb688cf9335b2c98c3e6f4 | refs/heads/master | 2021-01-09T05:35:52.587268 | 2017-02-02T09:37:50 | 2017-02-02T09:37:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,583 | py | import re
from ..utils import strfdelta, strfage
from ..storage import configStorage as configStore
from .app import app, socketio
from ..steem import SteemConnector
from . import views, assets
import logging
log = logging.getLogger(__name__)
steem = SteemConnector().getSteem()
__ALL__ = [
"app",
"assets",
"forms",
"socketio",
"views",
]
@app.template_filter('age')
def _jinja2_filter_age(date, fmt=None):
""" Format a datatime as age
"""
return strfage(date, fmt)
@app.template_filter('excert')
def _jinja2_filter_datetime(data):
""" Extract an excert of a post
"""
words = data.split(" ")
return " ".join(words[:100])
@app.template_filter('parseBody')
def _jinja2_filter_parseBody(body):
""" Pre-process the body of a post before
showing in the UI
"""
body = re.sub(
r"^(https?:.*/(.*\.(jpg|png|gif))\??.*)",
r"\n\n",
body, flags=re.MULTILINE)
return body
@app.template_filter('currency')
def _jinja2_filter_currency(value):
""" Format the crypto tokens properly
:param float value: The amount to format as string
"""
return "{:,.3f}".format(value)
def run(port, host):
""" Run the Webserver/SocketIO and app
"""
socketio.run(app,
debug=configStore.get("web:debug"),
host=host,
port=port)
# FIXME: Don't use .run()
# from gevent.wsgi import WSGIServer
# from yourapplication import app
# http_server = WSGIServer(('', 5000), app)
# http_server.serve_forever()
| [
"mail@xeroc.org"
] | mail@xeroc.org |
be8b3285716a1e4bdb01c378d9222a6232ea5d36 | b803e175e8080ebf8b713b09f81925a6336a8443 | /Word.py | fe21f15d0a189d1372eba36d42ef25ba307a4ca0 | [] | no_license | NDudeck/MachineLearningTest | dd346fbe35ac080c791142c0e9ec00e67626268d | 4d5f352a981a65b9de8b4e969ed4993a88b4369b | refs/heads/master | 2022-04-02T21:45:41.101882 | 2020-02-06T16:16:32 | 2020-02-06T16:16:32 | 197,981,435 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,033 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 12 11:26:16 2019
@author: ndudeck
This script defines a function object. To construct a function object, it
needs a lambda function to create.
"""
import numpy as np; #This does some math stuff easily
class Word:
#Any variable defined here will be shared by all Function objects
rate = 1501; # resolution of samples
sets = 399;
def __init__(self,data):
#This function takes the input data and places it into the training matrix
self.trainY = data;
self.fish_m = np.zeros((self.rate,1));
self.fish_m[:,0] = np.mean(self.trainY[:,0:self.sets-1], axis=1);
def fish_class(self):
#builds the things we need to classify for fisher 2 class
self.fish_m = np.zeros((self.rate,1));
self.fish_m[:,0] = np.mean(self.trainY[:,0:self.sets-1], axis=1);
self.fish_Sw = np.zeros((self.rate,self.rate));
for i in range(0,self.sets):
self.fish_Sw = self.fish_Sw + \
np.matmul(self.trainY[:,i].reshape(self.rate,1) - self.fish_m[:,0], \
(self.trainY[:,i].reshape(self.rate,1) - self.fish_m[:,0]).T)
def fish_sb(self, m):
return self.sets*np.matmul((self.fish_m - m),(self.fish_m - m).T)
def fish_5class(self):
self.fish_K = 5 #number of classes
self.fish_D = self.rate #number of dimensions
self.fish_m = np.zeros((self.rate,1))
self.fish_m[:,0] = np.mean(self.trainY[:,0:self.sets-1], axis = 1)
self.fish_Sw = np.zeros((self.rate,self.rate))
for i in range(0,self.sets):
self.fish_Sw = self.fish_Sw + \
np.matmul(self.trainY[:,i].reshape(self.rate,1) - self.fish_m[:,0], \
(self.trainY[:,i].reshape(self.rate,1) - self.fish_m[:,0]).T)
| [
"noreply@github.com"
] | NDudeck.noreply@github.com |
34d6d85b02c3b8d0e8734802762acb51523c3fa1 | b56c584ba04de13c7a05f6633893b318eb3fb19d | /课后作业/第五天作业/guoqijun/Chapter 12/scapy_ping_one_new.py | a64b040c34aa4916e5a8141ec9405dfbff24807a | [] | no_license | I318928/Python-Homework | 42133f1291cc3da90293f994ae1a09dce618bdad | 139b450f4bf2e4831688df80c12f43edcc00e468 | refs/heads/master | 2020-06-01T06:53:50.733061 | 2019-04-17T12:12:16 | 2019-04-17T12:12:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 429 | py | #!/usr/bin/env python3
# -*- coding=utf-8 -*-
from scapy.all import *
def qytang_ping(ip):
ping_pkt = IP(dst=ip) / ICMP()
ping_result = sr1(ping_pkt, timeout=1, verbose=False)
if ping_result:
return ip, 1
else:
return ip, 0
if __name__ == '__main__':
result = qytang_ping('192.168.220.129')
if result[1]:
print(result[0], '通!')
else:
print(result[0], '不通!')
| [
"xiaoyang429670@gmail.com"
] | xiaoyang429670@gmail.com |
14b05dbce16975b542d6409149a24a4079334f70 | 3b4f985759e44dc169134ae7dcee8e92747c4b01 | /tests/tests_app/components/multi_node/test_trainer.py | 249d7868652bb9800f69f27af8365d32b7063452 | [
"Apache-2.0"
] | permissive | SkafteNicki/pytorch-lightning | 4b09863bf222241ca7128d13df94ff60b71e50aa | 7df627b43746a85aa87671bec3e6dada0d98b556 | refs/heads/master | 2023-07-15T21:20:02.468216 | 2023-05-04T08:12:33 | 2023-05-04T08:12:33 | 248,216,299 | 3 | 1 | Apache-2.0 | 2023-07-10T02:40:24 | 2020-03-18T11:44:20 | Python | UTF-8 | Python | false | false | 3,538 | py | import os
from copy import deepcopy
from functools import partial
from unittest import mock
import pytest
from lightning_utilities.core.imports import module_available
from lightning_utilities.test.warning import no_warning_call
import pytorch_lightning as pl
from lightning.app.components.multi_node.trainer import _LightningTrainerRunExecutor
def dummy_callable(**kwargs):
t = pl.Trainer(**kwargs)
return t._all_passed_kwargs
def dummy_init(self, **kwargs):
self._all_passed_kwargs = kwargs
def _get_args_after_tracer_injection(**kwargs):
with mock.patch.object(pl.Trainer, "__init__", dummy_init):
ret_val = _LightningTrainerRunExecutor.run(
local_rank=0,
work_run=partial(dummy_callable, **kwargs),
main_address="1.2.3.4",
main_port=5,
node_rank=6,
num_nodes=7,
nprocs=8,
)
env_vars = deepcopy(os.environ)
return ret_val, env_vars
def check_lightning_pytorch_and_mps():
if module_available("pytorch_lightning"):
return pl.accelerators.MPSAccelerator.is_available()
return False
@pytest.mark.skipif(not check_lightning_pytorch_and_mps(), reason="pytorch_lightning and mps are required")
@pytest.mark.parametrize("accelerator_given,accelerator_expected", [("cpu", "cpu"), ("auto", "cpu"), ("gpu", "cpu")])
def test_trainer_run_executor_mps_forced_cpu(accelerator_given, accelerator_expected):
warning_str = (
r"Forcing accelerator=cpu as other accelerators \(specifically MPS\) are not supported "
+ "by PyTorch for distributed training on mps capable devices"
)
if accelerator_expected != accelerator_given:
warning_context = pytest.warns(UserWarning, match=warning_str)
else:
warning_context = no_warning_call(match=warning_str + "*")
with warning_context:
ret_val, env_vars = _get_args_after_tracer_injection(accelerator=accelerator_given)
assert ret_val["accelerator"] == accelerator_expected
@pytest.mark.parametrize(
"args_given,args_expected",
[
({"devices": 1, "num_nodes": 1, "accelerator": "gpu"}, {"devices": 8, "num_nodes": 7, "accelerator": "auto"}),
({"strategy": "ddp_spawn"}, {"strategy": "ddp"}),
({"strategy": "ddp_sharded_spawn"}, {"strategy": "ddp_sharded"}),
],
)
@pytest.mark.skipif(not module_available("torch"), reason="PyTorch is not available")
def test_trainer_run_executor_arguments_choices(
args_given: dict,
args_expected: dict,
):
if pl.accelerators.MPSAccelerator.is_available():
args_expected.pop("accelerator", None) # Cross platform tests -> MPS is tested separately
ret_val, env_vars = _get_args_after_tracer_injection(**args_given)
for k, v in args_expected.items():
assert ret_val[k] == v
assert env_vars["MASTER_ADDR"] == "1.2.3.4"
assert env_vars["MASTER_PORT"] == "5"
assert env_vars["GROUP_RANK"] == "6"
assert env_vars["RANK"] == str(0 + 6 * 8)
assert env_vars["LOCAL_RANK"] == "0"
assert env_vars["WORLD_SIZE"] == str(7 * 8)
assert env_vars["LOCAL_WORLD_SIZE"] == "8"
assert env_vars["TORCHELASTIC_RUN_ID"] == "1"
@pytest.mark.skipif(not module_available("lightning"), reason="lightning not available")
def test_trainer_run_executor_invalid_strategy_instances():
with pytest.raises(ValueError, match="DDP Spawned strategies aren't supported yet."):
_, _ = _get_args_after_tracer_injection(strategy=pl.strategies.DDPStrategy(start_method="spawn"))
| [
"noreply@github.com"
] | SkafteNicki.noreply@github.com |
7c0f8ef2e5e76dd512f4593f86eb29756a26e302 | be6e1acc03149aee1ffbdaa315cf8b7d175fffe9 | /event_log.py | 6d347539f3034a82bf2d2298b62c74976e512faf | [
"MIT"
] | permissive | rebcabin/cartpoleplusplus | 763c22d41fc6f13b01a1519da3b51de91cfd03f7 | f986f495755369f571dcbb9a79d21680b916c0f4 | refs/heads/master | 2020-04-05T08:27:28.420983 | 2018-11-24T01:17:47 | 2018-11-24T01:17:47 | 156,716,591 | 0 | 0 | MIT | 2018-11-08T14:10:29 | 2018-11-08T14:10:28 | null | UTF-8 | Python | false | false | 6,521 | py | #!/usr/bin/env python
import event_pb2
import gzip
import matplotlib.pyplot as plt
import numpy as np
import StringIO
import struct
def rgb_to_png(rgb):
"""convert RGB data from render to png"""
sio = StringIO.StringIO()
plt.imsave(sio, rgb)
return sio.getvalue()
def png_to_rgb(png_bytes):
"""convert png (from rgb_to_png) to RGB"""
# note PNG is always RGBA so we need to slice off A
rgba = plt.imread(StringIO.StringIO(png_bytes))
return rgba[:,:,:3]
def read_state_from_event(event):
"""unpack state from event (i.e. inverse of add_state_to_event)"""
if len(event.state[0].render) > 0:
num_repeats = len(event.state)
num_cameras = len(event.state[0].render)
eg_render = event.state[0].render[0]
state = np.empty((eg_render.height, eg_render.width, 3,
num_cameras, num_repeats))
for r_idx in range(num_repeats):
repeat = event.state[r_idx]
for c_idx in range(num_cameras):
png_bytes = repeat.render[c_idx].png_bytes
state[:,:,:,c_idx,r_idx] = png_to_rgb(png_bytes)
else:
state = np.empty((len(event.state), 2, 7))
for i, s in enumerate(event.state):
state[i][0] = s.cart_pose
state[i][1] = s.pole_pose
return state
class EventLog(object):
def __init__(self, path, use_raw_pixels):
self.log_file = open(path, "ab")
self.episode_entry = None
self.use_raw_pixels = use_raw_pixels
def reset(self):
if self.episode_entry is not None:
# *sigh* have to frame these ourselves :/
# (a long as a header-len will do...)
buff = self.episode_entry.SerializeToString()
if len(buff) > 0:
buff_len = struct.pack('=l', len(buff))
self.log_file.write(buff_len)
self.log_file.write(buff)
self.log_file.flush()
self.episode_entry = event_pb2.Episode()
def add_state_to_event(self, state, event):
"""pack state into event"""
if self.use_raw_pixels:
# TODO: be nice to have pose info here too in the pixel case...
num_repeats = state.shape[4]
for r_idx in range(num_repeats):
s = event.state.add()
num_cameras = state.shape[3]
for c_idx in range(num_cameras):
render = s.render.add()
render.width = state.shape[1]
render.height = state.shape[0]
render.png_bytes = rgb_to_png(state[:,:,:,c_idx,r_idx])
else:
num_repeats = state.shape[0]
for r in range(num_repeats):
s = event.state.add()
s.cart_pose.extend(map(float, state[r][0]))
s.pole_pose.extend(map(float, state[r][1]))
def add(self, state, action, reward):
event = self.episode_entry.event.add()
self.add_state_to_event(state, event)
if isinstance(action, int):
event.action.append(action) # single action
else:
assert action.shape[0] == 1 # never log batch operations
event.action.extend(map(float, action[0]))
event.reward = reward
def add_just_state(self, state):
event = self.episode_entry.event.add()
self.add_state_to_event(state, event)
class EventLogReader(object):
def __init__(self, path):
if path.endswith(".gz"):
self.log_file = gzip.open(path, "rb")
else:
self.log_file = open(path, "rb")
def entries(self):
episode = event_pb2.Episode()
while True:
buff_len_bytes = self.log_file.read(4)
if len(buff_len_bytes) == 0: return
buff_len = struct.unpack('=l', buff_len_bytes)[0]
buff = self.log_file.read(buff_len)
episode.ParseFromString(buff)
yield episode
def make_dir(d):
if not os.path.exists(d):
os.makedirs(d)
if __name__ == "__main__":
import argparse, os, sys, Image, ImageDraw
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--log-file', type=str, default=None)
parser.add_argument('--echo', action='store_true', help="write event to stdout")
parser.add_argument('--episodes', type=str, default=None,
help="if set only process these specific episodes (comma separated list)")
parser.add_argument('--img-output-dir', type=str, default=None,
help="if set output all renders to this DIR/e_NUM/s_NUM.png")
parser.add_argument('--img-debug-overlay', action='store_true',
help="if set overlay image with debug info")
# TODO args for episode range
opts = parser.parse_args()
episode_whitelist = None
if opts.episodes is not None:
episode_whitelist = set(map(int, opts.episodes.split(",")))
if opts.img_output_dir is not None:
make_dir(opts.img_output_dir)
total_num_read_episodes = 0
total_num_read_events = 0
elr = EventLogReader(opts.log_file)
for episode_id, episode in enumerate(elr.entries()):
if episode_whitelist is not None and episode_id not in episode_whitelist:
continue
if opts.echo:
print "-----", episode_id
print episode
total_num_read_episodes += 1
total_num_read_events += len(episode.event)
if opts.img_output_dir is not None:
dir = "%s/ep_%05d" % (opts.img_output_dir, episode_id)
make_dir(dir)
make_dir(dir + "/c0") # HACK: assume only max two cameras
make_dir(dir + "/c1")
for event_id, event in enumerate(episode.event):
for state_id, state in enumerate(event.state):
for camera_id, render in enumerate(state.render):
assert camera_id in [0, 1], "fix hack above"
# open RGB png in an image canvas
img = Image.open(StringIO.StringIO(render.png_bytes))
if opts.img_debug_overlay:
canvas = ImageDraw.Draw(img)
# draw episode and event number in top left
canvas.text((0, 0), "%d %d" % (episode_id, event_id), fill="black")
# draw simple fx/fy representation in bottom right...
# a bounding box
bx, by, bw = 40, 40, 10
canvas.line((bx-bw,by-bw, bx+bw,by-bw, bx+bw,by+bw, bx-bw,by+bw, bx-bw,by-bw), fill="black")
# then a simple fx/fy line
fx, fy = event.action[0], event.action[1]
canvas.line((bx,by, bx+(fx*bw), by+(fy*bw)), fill="black")
# write it out
img = img.resize((200, 200))
filename = "%s/c%d/e%05d_r%d.png" % (dir, camera_id, event_id, state_id)
img.save(filename)
print >>sys.stderr, "read", total_num_read_episodes, "episodes for a total of", total_num_read_events, "events"
| [
"matthew.kelcey@gmail.com"
] | matthew.kelcey@gmail.com |
2335ab5ef2d8e308ecae55994013a93003c46571 | 44d3091c8a93f49aebbdf2f9ffd2d81b44fbbaf9 | /23_list_check/list_check.py | 646ea04db8e24244e34bf88fb8ecb866cc37ac8e | [] | no_license | Tim-Birk/python-ds-practice | 9d54d4f5fb601c55de5371cbc83b7da696273758 | 9990672cf52b8e4d7be7425dabbbc954909f2886 | refs/heads/master | 2023-02-05T07:58:09.969208 | 2020-12-28T15:53:57 | 2020-12-28T15:53:57 | 324,801,231 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 243 | py | def list_check(lst):
"""Are all items in lst a list?
>>> list_check([[1], [2, 3]])
True
>>> list_check([[1], "nope"])
False
"""
return len([item for item in lst if type(item) == list]) == len(lst)
| [
"timbirkmire@yahoo.com"
] | timbirkmire@yahoo.com |
9c0f49814adb26d4b6bde24af40fb90845ccac80 | d34da4a69ebef62d4b17b8b56f0eca72f0c021e4 | /traffic_sign/subset_coco.py | 4e117ea2e1cf5dff2b36cba086a24552a7c93498 | [] | no_license | gy20073/aws | 91c193e18a15ab4d20acf9d58078bda791b39c38 | 1d73ce215026b1baa91a359628c26edeb59a22ce | refs/heads/master | 2020-03-19T03:55:24.406320 | 2019-10-12T05:35:30 | 2019-10-12T05:35:30 | 135,775,172 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,296 | py | import os
# for each label file, check whether stop sign in it.
# if do, then create new label file with only stop sign, in label dir, and add an entry of this image in the index file
subset="train2014"
label_path = "/scratch/yang/aws_data/coco/labels_bak/" + subset
out_path = "/scratch/yang/aws_data/coco/labels/" + subset
image_prefix = "/scratch/yang/aws_data/coco/images/" + subset
index_file = "/scratch/yang/aws_data/coco/filtered_" + subset + ".txt"
if not os.path.exists(out_path):
os.mkdir(out_path)
# 11 is stop sign
def filter_stop_sign(fname):
with open(fname, "r") as f:
lines = f.readlines()
out = []
for line in lines:
if line.startswith("11 "):
out.append("0 " + line[3:])
return out
def write_label(oname, filtered):
with open(oname, "w") as f:
for l in filtered:
f.write(l)
index = open(index_file, "w")
for file in os.listdir(label_path):
if file.endswith(".txt"):
filtered = filter_stop_sign(os.path.join(label_path, file))
if len(filtered) > 0:
# save the label
write_label(os.path.join(out_path, file), filtered)
# save the image name
index.write(os.path.join(image_prefix, file.replace(".txt", ".jpg")) + "\n")
index.close() | [
"gy20073@gmail.com"
] | gy20073@gmail.com |
2f032123bf187d3ba42c9136a45b41b4a8508e62 | bdee85d47d4fcdca9fa70a75de6f4317a015c1f8 | /dailyfresh/df_goods/apps.py | 64c11b12d446edf6485ebc43170ad5da11180c46 | [] | no_license | renjw234/django | ac8d6cdb7fe54bb9625ae5cdd07cd93e13a62135 | 5b25b9e754001697f6bc5043bbcf80c1c3201f66 | refs/heads/master | 2020-05-18T00:38:57.267327 | 2019-04-30T01:24:56 | 2019-04-30T01:24:56 | 184,068,295 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 131 | py | from __future__ import unicode_literals
from django.apps import AppConfig
class DfGoodsConfig(AppConfig):
name = 'df_goods'
| [
"renjw234@126.com"
] | renjw234@126.com |
44b7e6b025a9917ce35e63a322c922264b4455b4 | 5922398212b6e113f416a54d37c2765d7d119bb0 | /python/O(1) Check Power of 2.py | d664954d5d2b872362cab07d682b5469322e34d5 | [] | no_license | CrazyCoder4Carrot/lintcode | e777f73e1fdfe3b8abc9dbfc07d26602bf614151 | 33dcd7f0e2d9bee58840a3370837cb2db82de1eb | refs/heads/master | 2021-01-09T20:38:59.813198 | 2017-01-16T22:34:26 | 2017-01-16T22:34:26 | 60,287,619 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 218 | py | class Solution:
"""
@param n: An integer
@return: True or false
"""
def checkPowerOf2(self, n):
# write your code here
if n == 0:
return False
return n&(n-1) == 0 | [
"liuzhenbang1988@gmail.com"
] | liuzhenbang1988@gmail.com |
56da055426260d4e0c162b598de7480b3212e963 | 23a7baccb26c45c7c00da896d870ff0518c5fe75 | /ex5.py | 61f60c42b90dc5e2ac0cd85281071869a3427aea | [] | no_license | JMadamba/LPTHW | 01f2fd5877972eb261c89e5b3d0289a9227ec1f3 | 63871ba1318d1ffffe65fdf13b4f2744140238bc | refs/heads/master | 2021-05-09T08:45:22.005050 | 2018-01-29T15:36:05 | 2018-01-29T15:36:05 | 119,401,865 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 547 | py | name = 'Zed A.Shaw'
age = 35
height = 74
weight = 180
eyes= 'Blue'
teeth = 'White'
hair = 'Brown'
convert = height * 2.54
convertWeight = weight *2.20462
print "Let's talk about %s." % name
print "He's %d centimeters tall." % convert
print "He's %d kilograms heavy." % convertWeight
print "Actually that's not too heavy."
print "He's got %s eyes and %s hair." % (eyes, hair)
print "His teeth are usually %s depending on the coffee" % teeth
print "If I add %d, %d, and %d, I get %d." %(age, convert, convertWeight, age + convert + convertWeight) | [
"Jmadamba@tonicdesign.com"
] | Jmadamba@tonicdesign.com |
74c3ac8794131b67abc018da9e9b245ba196e45a | bd241c395589b147b12632821f74171aba313801 | /tanimoto.py | 816352b55a23164b390326b3894d200fda2ce12f | [] | no_license | timakin/dustbox | 133e1babec854905d801650dd29273c26c1dcc1b | de87022c9b64139bf8dd37b111438a9ab27a49de | refs/heads/master | 2021-01-17T04:21:58.961809 | 2015-04-28T12:23:58 | 2015-04-28T12:23:58 | 9,006,622 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 333 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Tanimoto係数。 A,Bという2つの集合のアイテムの類似度を示す。
# Cは共通集合を示している。クラスタリングされた集合の類似度を示している。
def tanimoto(a,b):
c=[v for v in a if v in b]
return float(len(c))/(len(a)+len(b)+len(c)) | [
"timaki.st@gmail.com"
] | timaki.st@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.