blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
373a588df61c9ab9c7435a2a7a4e9cffb1a9a2e5 | fead9e8a0aac4fba5463dc4b44c546dc5c1deca4 | /1.B-7.py | 03ac02afc8c23237a861d8e7fb5b14d41cf28c90 | [] | no_license | Lana1308/Praktika | 65e4720c834e39b457d24c4c106f0eb565f1cc96 | 6a2cfcf487d38c1b1295ed1a7b84d1af5331aa90 | refs/heads/master | 2020-03-21T03:30:35.761571 | 2018-06-20T19:13:59 | 2018-06-20T19:13:59 | 138,057,882 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,158 | py | import datetime
def printTimeStamp(name):
print('Автор програми: ' + name)
print('Час компіляції: ' + str(datetime.datetime.now()))
name = printTimeStamp("Lana")
house = input("Человечек, где ты живешь? (будинок/квартира/гуртожиток) ")
time = int(input("Человечек, сколько ты проводишь времени дома? "))
if house == "будинок" and time >= 18:
print ("В’єтнамське порося")
elif house == "будинок" and 10 <= time <= 18:
print ("Собака")
elif house == "будинок" and time < 10:
print ("Змія")
elif house == "квартира" and time >= 10:
print ("Кішка")
elif house == "квартира" and time < 10:
print ("Хом’як")
elif house == "гуртожиток" and time >= 6:
print ("Рибки")
elif house == "гуртожиток" and time < 6:
print ("Мурашник")
elif house == "будинок" and time < 0:
print ("ОШИБКА")
elif house == "квартира" and time < 0:
print ("ОШИБКА")
else:
print ("ОШИБКА")
| [
"noreply@github.com"
] | noreply@github.com |
662a6ec1ebac3ecb09c0568da4afd30b9dd69504 | 1770ba18aa32f7bd05caf2a1a572593cc778ae20 | /Spotkanie1/osobowe.py | 59c17f38ddf358dab77b5862065636f814a97d91 | [] | no_license | kukulap/Warsztaty_Python | 1fd325869f6651150abdf9bbed46698bfcbbce56 | ea82a200036065ad8af867f376aabb61b374eb78 | refs/heads/master | 2020-04-10T08:45:40.043958 | 2018-12-09T11:18:57 | 2018-12-09T11:18:57 | 160,913,711 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,131 | py | import bmi
class Osoba:
def __init__(self, imie, nazwisko, masa, wzrost):
self.imie = imie
self.nazwisko = nazwisko
self.masa = masa
self.wzrost = wzrost
def __str__(self):
return "{0}: {1} {2}, BMI: {3}".format(str(self.__class__.__name__), self.imie, self.nazwisko, bmi.bmi(self.masa, self.wzrost))
class Pracownik(Osoba):
def __init__(self, imie, nazwisko, masa, wzrost, pensja):
Osoba.__init__(self, imie, nazwisko, masa, wzrost)
self.pensja = pensja
def wyplata(self):
return self.pensja * 1.2
"""
def __str__(self):
return "Pracownik" + self.imie + " " + self.nazwisko
"""
class Kierownik(Pracownik):
"""
def __init__(self, imie, nazwisko, pensja):
Pracownik.__init__(self, imie, nazwisko, pensja)
"""
def wyplata(self):
return super().wyplata() + 1200.0
o = Osoba("Jan", "Kowalski", 90, 1.77)
p = Pracownik("Jan", "Nowak", 80, 1.75, 2250)
#print("{0} wypłata {1}".format(p, p.wyplata()))
k = Kierownik("Anna", "", 60, 1.65, 5000)
#print(k.wyplata())
print(k)
print(o)
print(p)
| [
"kapron.patrycja@gmail.com"
] | kapron.patrycja@gmail.com |
dbf85cc91b6700aa01a987a8b6c086f8ad2d04a3 | cdbf700691b2397408d690890be078f988052c3f | /Modified_deblurgan/train.py | a2624434ff4fc0d1eef503db348c46c9138b8d13 | [] | no_license | dongguk-dm/MDG_CNN | 8dd1fe00b051624f96ba85e185d41e0eaa5daead | f3c7e48ad7421719dbcfe57a1b9f8029c6fddcf6 | refs/heads/master | 2023-06-11T07:19:43.711770 | 2021-07-01T04:38:22 | 2021-07-01T04:38:22 | 381,562,897 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,543 | py | import time
import easydict
from data.data_loader import CreateDataLoader
from models.models import create_model
from util.visualizer import Visualizer
from util.metrics import PSNR, SSIM
from multiprocessing import freeze_support
import os
def print_current_errors(epoch, i, errors, t, opt):
message = '(epoch: %d, iters: %d, time: %.3f) ' % (epoch, i, t)
for k, v in errors.items():
message += '%s: %.3f ' % (k, v)
print(message)
log_name = os.path.join(opt.checkpoints_dir, opt.name, 'loss_log.txt')
with open(log_name, "a") as log_file:
log_file.write('%s\n' % message)
def train(opt, data_loader, model, visualizer):
dataset = data_loader.load_data()
dataset_size = len(data_loader)
print('#training images = %d' % dataset_size)
total_steps = 0
for epoch in range(opt.epoch_count, opt.niter + opt.niter_decay + 1):
epoch_start_time = time.time()
epoch_iter = 0
for i, data in enumerate(dataset):
iter_start_time = time.time()
total_steps += opt.batchSize
epoch_iter += opt.batchSize
model.set_input(data)
model.optimize_parameters()
if total_steps % opt.display_freq == 0:
results = model.get_current_visuals()
psnrMetric = PSNR(results['Restored_Train'], results['Sharp_Train'])
print('PSNR on Train = %f, Learning_rate = %.4f' % (psnrMetric, model.old_lr))
# visualizer.display_current_results(results, epoch)
if total_steps % opt.print_freq == 0:
errors = model.get_current_errors()
t = (time.time() - iter_start_time) / opt.batchSize
print_current_errors(epoch, epoch_iter, errors, t, opt)
if total_steps % opt.save_latest_freq == 0:
print('saving the latest model (epoch %d, total_steps %d)' % (epoch, total_steps))
model.save('latest')
if epoch % opt.save_epoch_freq == 0:
print('saving the model at the end of epoch %d, iters %d' % (epoch, total_steps))
model.save('latest')
model.save(epoch)
print('End of epoch %d / %d \t Time Taken: %d sec' % (epoch, opt.niter + opt.niter_decay, time.time() - epoch_start_time))
if epoch == 50:
model.update_learning_rate()
if __name__ == '__main__':
freeze_support()
opt = easydict.EasyDict({
#train option
"lr" : 0.0005,
"save_latest_freq" : 1000,
"save_epoch_freq" : 1,
"continue_train" : False,
"epoch_count" : 1,
"phase" : "train",
"which_epoch" : "latest",
"niter" : 70,
"niter_decay" : 30,
"beta1" : 0.5,
"lambda_A" : 100.0,
"lambda_B" : 10.0,
"identity" : 0.0,
"pool_size" : 50,
"no_html" : None,
#base option
"dataroot" : "train_no_flipping/",
"learn_residual" : True,
"dataset_mode" : "unaligned",
"checkpoints_dir" : "checkpoints/",
"batchSize": 4,
"loadSizeX": 256,
"loadSizeY": 256,
"fineSize": 256,
"input_nc": 3,
"output_nc": 3,
"ngf" : 64,
"ndf" : 64,
"which_model_netD" : "basic",
"which_model_netG" : "resnet_6blocks",
"gan_type" : "wgan-gp",
"n_layers_D" : 3,
"gpu_ids" : [0],
"name" : "",
"model" : "content_gan",
"which_direction" : "AtoB",
"nThreads" : 4,
"norm" : "batch",
"serial_batches" : True,
"isTrain" : True,
"no_dropout" : True,
"max_dataset_size" : float("inf"),
"display_freq": 100,
"print_freq": 100,
"display_id" : 1,
"display_port" : 8097,
"display_winsize" : 256,
"display_single_pane_ncols" : 0,
"resize_or_crop" : "resize"
})
opt.learn_residual = True
opt.fineSize = 256
opt.gan_type ="wgan-gp"
data_loader = CreateDataLoader(opt)
model = create_model(opt)
visualizer = Visualizer(opt)
train(opt, data_loader, model, Visualizer)
| [
"choijh1027@gmail.com"
] | choijh1027@gmail.com |
a126c611b817d111e72b64053c7ae3059f49d4c7 | ff7e78a7fa02ed3a5e56fd9ef37ac0442d4d2413 | /confs/test_stopsignal.py | 71b4c086cadf3d621b0e5ff6612b7a85e40f9a46 | [] | no_license | gbersac/taskmaster_42 | ed796963e875ef2bbb688b9100466bdcbe78832e | 050443ade7583aac84159a24ac8c5a021a30f074 | refs/heads/master | 2020-03-30T13:33:42.062239 | 2015-08-19T16:08:08 | 2015-08-19T16:08:08 | 40,602,141 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 384 | py | #!/nfs/zfs-student-3/users/2013/gbersac/.brew/bin/python3
import signal
def signal_handler(sig, frame):
if sig == signal.SIGINT:
print("SIGINT")
if sig == signal.SIGUSR1:
print("SIGUSR1")
if sig == signal.SIGQUIT:
print("SIGQUIT")
if __name__ == '__main__':
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGQUIT, signal_handler)
while True:
pass
| [
"bersac_1@hotmail.fr"
] | bersac_1@hotmail.fr |
15e513d4b634a7e0f9eeee344370860eeb75acfc | c6c90528f11298835ea3d5af78fa65c64cd9f2b3 | /project/models.py | 47bfeaa9257c38938f652e6f3d242372dc12539a | [
"MIT"
] | permissive | Eiriksak/Folder_structure | a1565f61091984f6c23efadf6957fe720cf0a280 | df9aba3232ab4265a5bb766326f64ca694b7b644 | refs/heads/master | 2022-12-11T18:55:51.836957 | 2018-10-02T16:12:39 | 2018-10-02T16:12:39 | 150,384,697 | 0 | 0 | null | 2022-12-08T02:54:34 | 2018-09-26T07:17:01 | Python | UTF-8 | Python | false | false | 855 | py | from project import db
# Lager en klasse for table = banan
class Bananer(db.Model):
__tablename__ = 'bananer'
# Setter hvilke kolonner vi skal ha og eventuelle constraints
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
type = db.Column(db.String)
# Lager en konstruktør for når vi skal lage og sette inn ting i tabellen
# I tilfelle med autoincrement id trenger vi ikke ha den her siden sql automatisk oppdaterer denne selv
def __init__(self, type):
self.type = type
def __repr__(self):
return '<title {}'.format(self.name)
class Epler(db.Model):
__tablename__ = 'epler'
id = db.Column(db.Integer, primary_key=True)
amount = db.Column(db.Integer)
def __init__(self, amount):
self.amount = amount
def __repr__(self):
return '<title {}'.format(self.name) | [
"eiriksak@hotmail.com"
] | eiriksak@hotmail.com |
5d55e8bc244b5488c6992ca537ab21c8079faf1d | b4e9544d5e98438804ba4f1c69b7d56b24da1072 | /QuicksortInPlace.py | 93cf338c05a8e63da29eda8a6457142357b75f61 | [] | no_license | taylortom/AlgorithmicFuntimes | 946f57d22bdc726e2f2888711d7092cb9499dae2 | 76f337701134ded06e3fdff8375e5259060f7632 | refs/heads/master | 2020-05-07T16:48:05.007295 | 2012-07-21T19:13:01 | 2012-07-21T19:13:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,527 | py | #
# In-place Quicksort
# Time: O(n log n) O(n2) worst-case
# Space: O(log n)
#
# Partition algorithm moves all elements less than a pivot point to one part of the array,
# and all elements larger to the other part, returning the final position of the pivot in the array.
#
# Each call to the algorithm reduces the array by at least one element, since the element at pivotNewIndex
# is placed in its final position.
#
import Random
# the list to sort
listToSort = Random.createRandomList(20)
# swaps the two elements in the passed list
def swap(list, index1, index2):
index1Val = list[index1]
index2Val = list[index2]
list[index1] = index2Val
list[index2] = index1Val
# sorts the
def partition(list, left, right, pivotIndex):
pivotVal = list[pivotIndex]
swap(list, pivotIndex, right)
storeIndex = left
for index in range(left, right):
if list[index] < pivotVal:
swap(list, index, storeIndex)
storeIndex = storeIndex + 1
swap(list, storeIndex, right)
return storeIndex
# sort the list
def quicksort(list, left, right):
if left < right:
# get a pivot index
pivotIndex = Random.getRandomInt(len(listToSort))
while pivotIndex < left or pivotIndex > right:
pivotIndex = Random.getRandomInt(len(listToSort))
pivotNewIndex = partition(list, left, right, pivotIndex)
# Recursively sort elements on each side
quicksort(list, left, pivotNewIndex - 1)
quicksort(list, pivotNewIndex + 1, right)
quicksort(listToSort, 0, len(listToSort) - 1)
print "SORTED GUV:", listToSort
| [
"tom@taylortom.co.uk"
] | tom@taylortom.co.uk |
1031decef22a5f8e9fa6d0446887620f1a17bbd6 | cb95b3a2714f003e76c5e1db1d3e4726f87f14d8 | /pstests/launch_schevers.py | 50881808378a6bad2b948300e21df85af51ae09c | [
"Apache-2.0"
] | permissive | DMALab/Het | 5aaa9fda1b8c77c0db24a477fe1eccd9665a9fe0 | 81b7e9f0f593108db969fc46a1af3df74b825230 | refs/heads/main | 2023-03-30T13:22:03.085283 | 2021-04-04T05:31:43 | 2021-04-04T05:31:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,845 | py | from athena import gpu_ops as ad
import os
import sys
import yaml
import multiprocessing
import signal
def main():
def start_scheduler(settings):
for key, value in settings.items():
os.environ[key] = str(value)
assert os.environ['DMLC_ROLE'] == "scheduler"
print('Scheduler starts...')
ad.scheduler_init()
ad.scheduler_finish()
def start_server(settings):
for key, value in settings.items():
os.environ[key] = str(value)
assert os.environ['DMLC_ROLE'] == "server"
print('Server starts...')
ad.server_init()
ad.server_finish()
def signal_handler(sig, frame):
print("SIGINT signal caught, stop Training")
for proc in server_procs:
proc.kill()
sched_proc.kill()
exit(0)
if len(sys.argv) == 1:
settings = yaml.load(open('./settings/dist_s1.yml').read(), Loader=yaml.FullLoader)
else:
file_path = sys.argv[1]
suffix = file_path.split('.')[-1]
if suffix == 'yml':
settings = yaml.load(open(file_path).read(), Loader=yaml.FullLoader)
else:
assert False, 'File type not supported.'
print('Scheduler and servers settings:')
print(settings)
server_procs = []
for key, value in settings.items():
if key == 'shared':
continue
elif key == 'sched':
sched_proc = multiprocessing.Process(target=start_scheduler, args=(value,))
sched_proc.start()
else:
server_procs.append(multiprocessing.Process(target=start_server, args=(value,)))
server_procs[-1].start()
signal.signal(signal.SIGINT, signal_handler)
for proc in server_procs:
proc.join()
sched_proc.join()
if __name__ == '__main__':
main()
| [
"swordonline@foxmail.com"
] | swordonline@foxmail.com |
f6b131bbddadded5e915501ce5a719b1e74ce352 | 45e376ae66b78b17788b1d3575b334b2cb1d0b1c | /checkov/cloudformation/checks/resource/aws/APIGatewayXray.py | 79b7ec85c6b5ac40b0aaa6c2c422267e4a656db6 | [
"Apache-2.0"
] | permissive | bridgecrewio/checkov | aeb8febed2ed90e61d5755f8f9d80b125362644d | e64cbd27ffb6f09c2c9f081b45b7a821a3aa1a4d | refs/heads/main | 2023-08-31T06:57:21.990147 | 2023-08-30T23:01:47 | 2023-08-30T23:01:47 | 224,386,599 | 5,929 | 1,056 | Apache-2.0 | 2023-09-14T20:10:23 | 2019-11-27T08:55:14 | Python | UTF-8 | Python | false | false | 710 | py | from checkov.cloudformation.checks.resource.base_resource_value_check import BaseResourceValueCheck
from checkov.common.models.enums import CheckCategories
class APIGatewayXray(BaseResourceValueCheck):
def __init__(self):
name = "Ensure API Gateway has X-Ray Tracing enabled"
id = "CKV_AWS_73"
supported_resources = ['AWS::ApiGateway::Stage', "AWS::Serverless::Api"]
categories = [CheckCategories.LOGGING]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def get_inspected_key(self):
return 'Properties/TracingEnabled'
def get_expected_value(self):
return True
check = APIGatewayXray()
| [
"noreply@github.com"
] | noreply@github.com |
8cf5a4624636da19f870de78212d63a1d1b37fb1 | b767d70e2f169997ed92842f0488b24f615d8467 | /src/decision/scripts/gazebo_polt.py | f9526c443d71cfca82513ba7ec568fa07851a30b | [] | no_license | Taospirit/Dynamic_Navigation | cd111e12b1c61c413ec06c39f789995fbbe30a46 | ae57a4b676d44b95b865f66ea72fbaa83fa7f99b | refs/heads/master | 2021-08-22T23:38:44.785864 | 2021-01-09T07:50:34 | 2021-01-09T07:50:34 | 239,330,707 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,021 | py | import os
import matplotlib.pyplot as plt
import matplotlib.patches as mpathes
import numpy as np
num = 7
file = os.path.abspath(os.path.dirname(__file__)) + '/' + str(num) + '.txt'
p_list = []
obs_goal_3 = [3.0, -2.0]
obs_goal_4 = [[-1, -2], [2, 2]]
obs_goal_5 = [[5, -2], [7, 2]]
obs_goal_6 = [[5, 2], [3, -2]]
obs_goal_7 = [[2, -3], [5, 5]]
def show_plot(p_list):
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(111)
def plot_robot(ax, p_list, color='r', marker='*', size=0.3, step=1):
# p_list = [[x1, y1], [x2, y2], ... , [xn, yn]]
count = 0
for i, item in enumerate(p_list):
ax.add_patch(mpathes.Circle(item, 0, color=color, fill=False))
if i % step == 0 or i == len(p_list)-1:
ax.add_patch(mpathes.Circle(item, size, color=color, fill=False))
ax.text(item[0], item[1], str(round(i*0.1,2)), fontsize=14)
count += 1
x, y = [p[0] for p in p_list], [p[1] for p in p_list]
ax.plot(x, y, marker=marker, color=color, markersize=2)
ax.add_patch(mpathes.Circle([7.0, 0.0], 0.3, color='r', fill=True))
ax.add_patch(mpathes.Circle(obs_goal_7[0], 0.3, color='g', fill=True))
ax.add_patch(mpathes.Circle(obs_goal_7[-1], 0.3, color='g', fill=True))
color = ['b', 'black', 'brown', 'red', 'sienna', 'darkorange', 'gold', 'y', '']
marker = ['*', 'o', 'x', '1']
plot_robot(ax, p_list[0], color='b', marker='*', size=0.3, step=5)
for i in range(1, len(p_list)):
plot_robot(ax, p_list[i], color=color[i], marker=marker[i], size=0.3, step=10)
plt.xlim((-1, 9))
plt.ylim((-5, 5))
# 设置坐标轴刻度
plt.xticks(np.arange(-1, 9, 1))
plt.yticks(np.arange(-5, 5, 1))
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
plt.grid()
plt.show()
# plt.savefig(path)
if __name__ == "__main__":
a_list = []
o1_list = []
o2_list = []
with open(file, 'r') as f:
for line in f.readlines():
if line[0] == '#':
continue
# lines = f.readlines()
# for i in range(len(lines)):
# if i % 5 == 0:
# line = lines[i]
# lines = f.readlines()
datas = line.split(';')
data = datas[0].split(':')[-1]
# print(datas)
x, y, t, v, w = data.split(',')
a_list.append([float(x), float(y)])
data = datas[1].split(':')[-1]
x, y, t, v, w = data.split(',')
o1_list.append([float(x), float(y)])
if num >= 4:
data = datas[2].split(':')[-1]
x, y, t, v, w = data.split(',')
o2_list.append([float(x), float(y)])
# a_list.append([7.0, 0.0])
# print(a_list)
# print(o_list)
p_list.append(a_list)
p_list.append(o1_list)
p_list.append(o2_list)
# print(p_list)
print(a_list)
print(np.array(p_list).shape)
# print(o_list)
show_plot(p_list) | [
"lintao209@outlook.com"
] | lintao209@outlook.com |
8a478a87698b48f534a445a89fb732282d8b8ac1 | fc6d79382f75f5cebeaae856b255b5d4fba1665e | /News_Website/settings.py | 0053cb6cdcd8e5d26e235b06c0e6dc30dd83b621 | [] | no_license | amalpmathews2003/NewsApp | db3260e09a41b401e04d51beb823f97817fd707e | 84c8b19f3168049c38d34c3f9327a08043b607a1 | refs/heads/master | 2023-09-02T12:41:17.345822 | 2021-10-25T08:30:14 | 2021-10-25T08:30:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,199 | py | """
Django settings for News_Website project.
Generated by 'django-admin startproject' using Django 3.2.8.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-fph$!!2ujpn@t-n4(p-ep5l4kco_ahf9y=77dba4v@lptf0+r)'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'whitenoise.runserver_nostatic',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'News_App',
'User_App',
'rest_framework' , 'corsheaders',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
]
SITE_ID = 1
LOGIN_REDIRECT_URL = '/'
# Additional configuration settings
SOCIALACCOUNT_QUERY_EMAIL = True
ACCOUNT_LOGOUT_ON_GET= True
ACCOUNT_UNIQUE_EMAIL = True
ACCOUNT_EMAIL_REQUIRED = True
SOCIALACCOUNT_PROVIDERS = {
'google': {
'SCOPE': [
'profile',
'email',
],
'AUTH_PARAMS': {
'access_type': 'online',
}
}
}
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'corsheaders.middleware.CorsMiddleware',
]
ROOT_URLCONF = 'News_Website.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['Templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'News_Website.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME':os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT= os.path.join(BASE_DIR, 'static')
CURRENT_PATH = os.path.abspath(os.path.dirname(__file__))
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
MEDIA_URL = '/media/'
MEDIA_ROOT=os.path.join(BASE_DIR,'media/')
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
REST_FRAMEWORK={'DEFAULT_PERMISSION_CLASSES': ['rest_framework.permissions.AllowAny' ]}
CORS_ORIGIN_ALLOW_ALL = True
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
]
import dj_database_url
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.postgresql_psycopg2',
# 'NAME': 'dei0abvg4p7ou4',
# 'USER': 'cyuambjqapmeid',
# 'PASSWORD': '71cd494c18e9a6ae011ae58635c61584760d10dff67d7a0e034ccabc44bafe9c',
# 'HOST': 'ec2-50-19-210-145.compute-1.amazonaws.com',
# 'PORT': '5432',
# }
# }
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
WHITENOISE_USE_FINDERS = True
import django_heroku
django_heroku.settings(locals())
| [
"amalpmathews2003@gmail.com"
] | amalpmathews2003@gmail.com |
d421917e4f4ed68d314b39a5b997b54d20bb18ed | c20e238e5e9c4793f69ce9b20a12771b1ac334df | /models/__init__.py | 2d678b4a2b0e2cb9b5ab53f9056f5a69f255cace | [] | no_license | SweatMeansWin/Sea-Battle-Game-Engine | 466b4e09f7c16709ca205bba451559cb7ccf77ea | b1929203bea4ebdf350343b876ebecd2639d0c16 | refs/heads/master | 2020-03-21T19:07:52.338779 | 2018-06-20T19:19:20 | 2018-06-25T18:01:51 | 138,931,361 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 116 | py | """
Export all classes
"""
from .battle_field import BattleField
from .player import Player
from .ship import Ship
| [
"evgeniyasheis@gmail.com"
] | evgeniyasheis@gmail.com |
342bd4082ce36f826efd3b77b90a432568375e8b | 6b79e61dfb966c3c934c851de2f22129f2cfa3ce | /entertainment/entertainment/wsgi.py | a22500090a813752239a4694b5bdde1af49995a4 | [] | no_license | jrwallace1983/django_entertainment | 5a64cde5b7239e89d960c3d0cdc01936d318873c | 1f0b0cf88d8e67fe660812448997f5173f744f43 | refs/heads/master | 2021-01-01T20:03:02.290344 | 2017-07-30T05:44:35 | 2017-07-30T05:44:35 | 98,752,438 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 404 | py | """
WSGI config for entertainment project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "entertainment.settings")
application = get_wsgi_application()
| [
"jrwallace1983@gmail.com"
] | jrwallace1983@gmail.com |
afe09cf1138edb28b3e71b6c42799e2c565c0415 | 6917bb4d0fbc37a4f3ed131f9ab93213f65a7edd | /Leetcode/LeetCode - 1221. Split a String in Balanced Strings.py | 052b0cb668b6255b0240d4cf17b5ecd215b8e8f6 | [] | no_license | DemondLove/Python-Programming | 0e0bffe8abdfbe5a914c572e2b7182846b830955 | 0dd2332d8a863d5c0ed5573bce68de37bb947688 | refs/heads/master | 2021-06-01T15:22:58.665226 | 2020-08-09T18:35:01 | 2020-08-09T18:35:01 | 130,158,476 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,110 | py | '''
Balanced strings are those who have equal quantity of 'L' and 'R' characters.
Given a balanced string s split it in the maximum amount of balanced strings.
Return the maximum amount of splitted balanced strings.
Example 1:
Input: s = "RLRRLLRLRL"
Output: 4
Explanation: s can be split into "RL", "RRLL", "RL", "RL", each substring contains same number of 'L' and 'R'.
Example 2:
Input: s = "RLLLLRRRLR"
Output: 3
Explanation: s can be split into "RL", "LLLRRR", "LR", each substring contains same number of 'L' and 'R'.
Example 3:
Input: s = "LLLLRRRR"
Output: 1
Explanation: s can be split into "LLLLRRRR".
Example 4:
Input: s = "RLRRRLLRLL"
Output: 2
Explanation: s can be split into "RL", "RRRLLRLL", since each substring contains an equal number of 'L' and 'R'
Constraints:
1 <= s.length <= 1000
s[i] = 'L' or 'R'
'''
class Solution:
def balancedStringSplit(self, s: str) -> int:
temp = []
nums = 0
for x in s:
temp.append(x)
if temp.count('R') == temp.count('L'):
nums+=1
temp = []
return nums
| [
"noreply@github.com"
] | noreply@github.com |
7be1f25265abad04394ea8ef6d319e4a2fa54b68 | 1b9eff84f2ba4bcbe86d502a8cdb883f01b17294 | /universalspider/configs/compile_config_rulesV3.py | 298fc57827f85611c8e345b775599ff2c68eb1ce | [] | no_license | rym-max/firstspider | f3c76b98838bf65e05cb03d8383eacd5bf16b6db | 9c03aba856195f8895ed261d9ff1ce45b81e98ab | refs/heads/master | 2020-12-06T16:37:49.544821 | 2019-09-02T00:55:12 | 2019-09-02T00:55:12 | 232,508,373 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,483 | py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : compile_config_rulesV3.py
@Time : 2019/08/30 10:41:24
@Author : Hzx
@Version : 1.0
@Contact : hzxstarcloud@hotmail.com
@Desc : None
'''
# here put the import lib
import pymssql
import json
import os
import datetime
SQL_SERVER="localhost"
SQL_PORT=1433,
SQL_DB = "Vip_Tongji"
SQL_USER = "spider_account"
SQL_PSWD = "gespider"
SQL_CONFIG_TABLE = "SPIDER_Config"
SQL_ITEM_TABLE = "SPIDER_Item"
current_path = os.getcwd()#当前文件夹
cnx = pymssql.connect(host=SQL_SERVER, user=SQL_USER,
password=SQL_PSWD,database=SQL_DB)
cur = cnx.cursor()
def insert(name="all", creator="BWM",path = current_path):
result = get_result(name,path)
#数据准备
value_item = {
"create":datetime.datetime.now(),
"creator":creator
}
#sql语句
#config
sql_item = "INSERT INTO " + SQL_ITEM_TABLE + \
" (Name,ConfigId,IsOpen,Status,Creator,Type,Interval,CreateTime,ModifyTime) "+\
" VALUES (%(name)s,1,1,0,%(creator)s,0,%(interval)s,%(create)s,%(create)s)"
sql_config = "INSERT INTO " + SQL_CONFIG_TABLE +\
" (Name,Configs,Rules,Project,Spider,Creator,CreateTime,ModifyTime)"+\
" VALUES (%(name)s,%(configs)s,%(rules)s,'universalspider',%(name)s,%(creator)s,%(create)s,%(create)s)"
#item
#存储进数据库
for k,v in result.items():
value_item.update(v)
try:
cur.execute(sql_config,value_item)
cur.execute(sql_item,value_item)
cnx.commit()
except Exception as e:
print(e)
else:
print("成功插入",k)
def delete(name='all',creator='BWM',path=current_path):
pass
def update(name='all',creator='BWM',path=current_path,**kwargs):
result = get_result(name,path)
#数据准备
value_item = {
"create":str(datetime.datetime.now()),
"creator":creator
}
value_item.update(kwargs)
#其他参数
special_item=""
for k,v in kwargs.get('item',{}).keys():
special_item += " ,"+k+"=%("+k+")s "
special_config = ""
for k,v in kwargs.get('config',{}).keys():
special_config += " ,"+k+"=%("+k+")s "
#sql语句
#item
sql_item = "UPDATE " + SQL_ITEM_TABLE + " SET " +\
"Interval=%(interval)s,ModifyTime=%(create)s" + \
special_item +\
" WHERE Name=%(name)s"
#config
sql_config = "UPDATE " + SQL_CONFIG_TABLE +" SET " +\
"Configs=%(configs)s,Rules=%(rules)s,ModifyTime=%(create)s" + \
special_config + \
" WHERE Name=%(name)s"
#存储进数据库
for k,v in result.items():
value_item.update(v)
try:
cur.execute(sql_config,value_item)
cur.execute(sql_item,value_item)
cnx.commit()
except Exception as e:
print(e)
else:
print("成功更新",k)
def select():
pass
def get_result(name,path):
#result存储需要更新的结果
result={}
#获取path下所有文件名称
files_li = os.listdir(path)
#
if name!='all':
if (name+'.json') in files_li and (name+'_rules.json') in files_li:
item ={}
item['name']=name
with open(path+"/"+name+".json","r",encoding="utf-8") as ff:
item['configs'] = ff.read()
with open(path+'/'+name+"_rules.json","r",encoding="utf-8") as ff:
item['rules'] =ff.read()
item_config = json.loads(item['configs'])
item.update({
"interval":item_config.get("INTERVAL",1)
})
result.setdefault(name,item)
else:
print(name,"该文件夹不存在")
else:
#遍历所有文件
for ffl in files_li:
#
#判断是否是文件夹
if not os.path.isdir(ffl):
fl = ffl.split("_")
if fl[-1] == "rules.json":
name = fl[0]
with open(path+"/"+ffl,"r",encoding="utf-8") as ff:
item = {
"name":name,
"rules":ff.read()
}
if name not in result.keys():
result.setdefault(name,item)
else:
result[name].update(item)
else:
fm = ffl.split(".")
if fm[-1] == "json":
name = fm[0]
with open(path + "/" +ffl,"r",encoding="utf-8") as ff:
item = {
"name":name,
"configs":ff.read()
}
item_config = json.loads(item['configs'])
#此处获取爬取周期
item.update({
"interval":item_config.get("INTERVAL",1)
})
if name not in result.keys():
result.setdefault(name,item)
else:
result[name].update(item)
else:
print(ffl,"文件格式不符!")
return result
if __name__ == "__main__":
insert()
| [
"hzxstarcloud@hotmail.com"
] | hzxstarcloud@hotmail.com |
e538aa28b1bd9e8f0574539f2c5075b7eea00ec2 | ba962c2441572ba45ff97a97bb713eb8a603a269 | /lunchmap/models.py | 2c9b100ab61729344379c83188a3554f131dd623 | [] | no_license | melonpan777/my-first-blog | 8158104ba2b3c97a8e6350ac57aac77edf85be26 | 9ff5eee69523d8fbbbd004e566090ea715b043d5 | refs/heads/master | 2020-06-04T13:58:16.704685 | 2019-06-15T11:14:00 | 2019-06-15T11:14:00 | 192,051,830 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,004 | py | from django.db import models
from django.urls import reverse
class Category(models.Model):
name = models.CharField(max_length=255)
author = models.ForeignKey(
'auth.User',
on_delete=models.CASCADE,
)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.name
class Shop(models.Model):
name = models.CharField(max_length=255)
address = models.CharField(max_length=255)
memo = models.CharField(max_length=255, null=True)
author = models.ForeignKey(
'auth.User',
on_delete=models.CASCADE,
)
category = models.ForeignKey(
Category,
on_delete=models.PROTECT,
)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('lunchmap:detail', kwargs={'pk': self.pk})
| [
"you@example.com"
] | you@example.com |
98e8f1034b90045b712c41cdda10e5add841c6ee | 213d0403c263c5ce6489fab742c3fc60038e2196 | /patrollers/scripts/patrol_monitor.py | e5fcb6dac83b7cd1c939c651b235fea8b65b1509 | [] | no_license | blozano824/CS-69-Patrollers | a8b43444d7a7fffe93eb955d845dc694c1b0b292 | a87d8495e880c347b165ac89e1313462887fe0d1 | refs/heads/master | 2020-05-18T09:07:44.543311 | 2019-05-28T18:30:19 | 2019-05-28T18:30:19 | 184,315,371 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,167 | py | import rospy
import graph.py
class PatrolMonitor():
"""
Patrol Monitor Class for publishing velocity commands to all robots. This monitor
will track all robots movements and make specific vertex moveent command for
individual robot actions
Attributes:
publisherList (:obj: Publishers): List of publishers for each one of the
robots that we include in our simulation
graph (:obj: Graph): Graphical representation of our map loaded via a local
Graph function and then stored in our Monitor graph variable
"""
def __init__(self, ROBOT_COUNT, filepath):
self.graph = graph.load_from_file(filepath)
self.vertex_dict = graph.vertices()
self.publisherList = []
self.ROBOT_COUNT = ROBOT_COUNT
# For each Robot in our simulation, we want to create a publisher to access their
# controls and then store those publishers into a list of publishers, allowing us
# easy access to each individual Robot's actions
for i in self.ROBOT_COUNT:
newPub = rospy.Publisher("/robot_" + i + "/cmd_vel", Twist, queue_size=10)
self.publisherList.append(newPub)
def publish_new_target(self, robot_num, next_vertex):
"""
Publish New Target: This function takes in a specific robot and informs it
about the location of it's next vertex to visit through a publisher
:param robot_num: Number of Robots within the system
:param next_vertex: Next Vertex for the robot to visit
"""
robot_pub = self.publisherList[robot_num]
new_target_msg = "New Target|" + next_vertex.x + "|" + next_vertex.y
robot_pub.publish(new_target_msg)
rospy.loginfo("Monitor Message: " + new_target_msg)
def graph(self):
"""
Function to return the Monitor's graph
"""
return self.graph
def publisher_list(self):
"""
Function to return the list of publishers for all robots
"""
return self.publisherList
def __str__(self):
graph_string = self.graph.__str__()
return graph_string
| [
"noreply@github.com"
] | noreply@github.com |
af021860c198c6b94bd42066916fd67b07afa2cc | 308f847179153268f22f64ed60d4b68a22be8d16 | /venv/bin/python-config | 7a5b0828414a679bf06913d9acd8471fb6658f53 | [
"MIT"
] | permissive | lurdray/facedetect | b952262469b532626cc7fc57b9cac199f4f29aba | 112a852a4f31bd07ea724e9faa4fd9fbb444ff23 | refs/heads/master | 2022-04-13T08:15:52.516324 | 2020-04-03T16:13:58 | 2020-04-03T16:13:58 | 251,633,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,355 | #!/home/raytech/gitdemo/facedetect/venv/bin/python
import sys
import getopt
import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'help']
if sys.version_info >= (3, 2):
valid_opts.insert(-1, 'extension-suffix')
valid_opts.append('abiflags')
if sys.version_info >= (3, 3):
valid_opts.append('configdir')
def exit_with_usage(code=1):
sys.stderr.write("Usage: {0} [{1}]\n".format(
sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
for opt in opt_flags:
if opt == '--prefix':
print(sysconfig.get_config_var('prefix'))
elif opt == '--exec-prefix':
print(sysconfig.get_config_var('exec_prefix'))
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_path('include'),
'-I' + sysconfig.get_path('platinclude')]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print(' '.join(flags))
elif opt in ('--libs', '--ldflags'):
abiflags = getattr(sys, 'abiflags', '')
libs = ['-lpython' + pyver + abiflags]
libs += getvar('LIBS').split()
libs += getvar('SYSLIBS').split()
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
libs.extend(getvar('LINKFORSHARED').split())
print(' '.join(libs))
elif opt == '--extension-suffix':
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix is None:
ext_suffix = sysconfig.get_config_var('SO')
print(ext_suffix)
elif opt == '--abiflags':
if not getattr(sys, 'abiflags', None):
exit_with_usage()
print(sys.abiflags)
elif opt == '--configdir':
print(sysconfig.get_config_var('LIBPL'))
| [
"odiagaraymond@gmail.com"
] | odiagaraymond@gmail.com | |
dcff318e512f694dc31b8f2936e54ae11ce40712 | 638fdc6e95bee246cd8784a3a442bda584295d77 | /prj/main/management/commands/import_step.py | 79b97b35a8bf7a0f1d0f2e01c90b812ded017f8c | [] | no_license | zdimon/loyer | ac00faf94c4277eb77d6cdc51e8bf99ef2f7ecb2 | 6df6bc76599bc0fab9ef2bdb600cc3b92daf38c1 | refs/heads/master | 2020-03-27T16:03:36.358793 | 2019-04-18T10:05:18 | 2019-04-18T10:05:18 | 146,757,036 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,631 | py | # -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand, CommandError
import json
from prj.settings import BASE_DIR
import os
from main.models import *
import requests
from bs4 import BeautifulSoup
import os.path
import time
import json
from main.tools import headers
from main.tools import getSessionId, readSessionId, getSessionId, readPHPSessionId
import sys
import os
import random
from datetime import datetime, timedelta
from optparse import make_option
from header import copy_cookie, copy_header
def randomDelay():
sl = random.randint(3,6)
print 'sleeping %s' % sl
time.sleep(sl)
def getCountDocs(txt):
import re
if txt.find('Найдено дел: 0')>0:
return 0
#print txt
result = re.search('Найдено дел:(.*)\(отображаются', txt)
cnt = result.group(1)
cnt = cnt.replace(' ','')
#txt = txt.replace(' ','')
return int(cnt)
def getFiles(d):
soup = BeautifulSoup(d.doc_html, 'html.parser')
divs = soup.findAll('td')
#import pdb; pdb.set_trace()
for div in divs:
try:
a = div.find('a')
url = 'http://online.zakon.kz/sud/'+a['href']
txt = makeRequest(url)
f = Files()
f.document = d
f.html = txt
f.save()
f.clearHtml()
print 'File saved!!!'
except Exception, e:
pass
#print str(e)
def makeDateFormat(dt):
tmp = dt.split('-')
return '%s-%s-%s' % (tmp[2],tmp[1],tmp[0])
def makeRequest(url):
try:
txt = requests.get(url,headers=copy_header,cookies=copy_cookie).text
return txt
except requests.exceptions.ReadTimeout as errh:
print ("Http Error timeout!")
except:
randomDelay()
print 'Pepeat request'
makeRequest(url)
def clear(txt):
return txt.replace('\n','').replace('\t','')
def getDate():
f = open('date','r')
date = f.read()
f.close()
return date
def addDay(date):
dt = datetime.strptime(date, '%d-%m-%Y')
return dt + datetime.timedelta(days=1)
def parseRow(row):
tds = row.findAll("td")
if len(tds) == 4:
out = {}
try:
out['date'] = clear(tds[2].text)
except:
pass
try:
name = tds[3].find('a')
out['name'] = clear(name.text)
out['number'] = name.find('b').text
except Exception, e:
print str(e)
try:
out['href'] = name['href']
except Exception, e:
print str(e)
return out
else:
return False
def savePage(html):
soup = BeautifulSoup(html, 'html.parser')
for tr in soup.findAll("tr",{"class": "row"}):
data = parseRow(tr)
if data:
#print 'Saving %s' % page
c = Documents()
c.href = data['href']
c.uid = data['number']
c.title = data['name']
c.date = makeDateFormat(data['date'])
try:
c.save()
print 'Done!!! %s' % c.uid
except Exception, e:
print 'Error saving. Duplicate!'
print str(e)
#else:
# import pdb; pdb.set_trace()
cnt = Documents.objects.all().count()
print 'Total: %s' % cnt
def getListTmp(date,page=1):
url = 'http://online.zakon.kz/sud//search?check=1&sort=date_desc®ion=-&court=-&date_start=%s&date_end=%s&type=-&files=1&number=&sides=&sides_phrase=1&judge=&context=&context_phrase=1&page=%s' % (date,date,page)
txt = makeRequest(url,cookies=copy_cookie, headers=copy_header).txt
#def test():
# def int():
def gidrateList(dict):
out = []
for i in dict:
if len(i['content']) == 0:
url = i['url']
i['content'] = makeRequest(url)
print 'gidrating %s' % i['page']
print 'url: %s' % url;
randomDelay()
out.append(i)
return out
def getList(date):
out = []
'''
f = open('test/1.html', 'r')
txt = f.read()
f.close()
'''
params = {
'sort': 'date_desc',
'date_start': date,
'date_end': date,
'type': '-',
'files': '1',
'sides_phrase': '1',
'context_phrase': '1',
'page': '1'
}
url = 'http://online.zakon.kz/sud//search'
url = 'http://online.zakon.kz/sud/search?date_start=%s&date_end=%s&sides_phrase=1&context_phrase=1&files=1&sort=date_desc' % (date,date)
txt = makeRequest(url)
#print url
f = open('log.html', 'w')
f.write(txt.encode('utf-8'))
f.close()
cnt = getCountDocs(txt.encode('utf-8'))
l = Log()
l.date = makeDateFormat(date)
l.cnt = cnt
try:
l.save()
l.update()
except:
print 'Log for %s is exist' % date
if cnt == 0:
return False
if cnt<=30:
cp = 1
elif cnt%30>0:
cp = (cnt/30)+1
else:
cp = cnt/30
for p in range(1,cp+1):
url = 'http://online.zakon.kz/sud//search?&date_start=%s&date_end=%s&sides_phrase=1&context_phrase=1&files=1&page=%s&sort=date_desc' % (date,date, p)
if p == 1:
out.append({
"url": url,
"page": p,
"content": txt
})
else:
out.append({
"url": url,
"page": p,
"content": ''
})
'''
soup = BeautifulSoup(txt, 'html.parser')
ul = soup.find('ul',{"class": "pagination"})
cnt = 1
try:
lis = ul.findAll('li')
except:
print 'No data for date %s' % date
return False
for li in lis:
try:
if cnt == 1:
out.append({
"url": li.find('a')['href'],
"content": txt
})
else:
out.append({
"url": li.find('a')['href'],
"content": ''
})
cnt = cnt+1
except:
pass
'''
return out
def loadDocs(date):
print 'Loading documents'
for d in Documents.objects.filter(date=date):
if d.is_document_downloaded == False:
print 'case %s' % d.uid
url = 'http://online.zakon.kz/sud//'+d.href
print 'Loading %s' % url
txt = makeRequest(url)
d.doc_html = txt
d.is_document_downloaded = True
d.save()
d.clearDocHtml()
print 'Saving done %s!!!' % d.uid
randomDelay()
else:
print 'Already downloaded!!!'
#getFiles(d)
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('-s', dest='start')
parser.add_argument('-e', dest='end')
def handle(self, *args, **options):
start_date = options["start"]
end_date = options["end"]
start_date = datetime.strptime(start_date, '%Y-%m-%d')
end_date = datetime.strptime(end_date, '%Y-%m-%d')
date_generated = [end_date - timedelta(days=x) for x in range(0, (end_date-start_date).days)]
print 'Start step importing from %s to %s' % (start_date, end_date)
#print date_generated
#sys.exit()
#dt = getDate()
#dt = '29-08-2018'
#print 'Process %s' % dt
add = []
for l in Log.objects.all():
if l.cnt!=l.fact:
add.append(l.date)
#print add
#sys.exit()
print date_generated
#sys.exit()
#for date in add:
for date in date_generated:
try:
l = Log.objects.get(date=date)
if l.cnt==l.fact:
print 'Date %s is full of data!!!!' % l.date
continue
except:
pass
## Selete all for date
Documents.objects.filter(date=date).delete()
dt = date.strftime("%d-%m-%Y")
lst = getList(dt)
if lst:
for p in gidrateList(lst):
try:
savePage(p['content'])
except:
pass
l = Log.objects.get(date=date)
l.update()
#loadDocs(makeDateFormat(dt))
#for url in getListMock(dt):
# print 'load %s' % url
| [
"zdimon@example.com"
] | zdimon@example.com |
fdfdb3734d3c38c13ecc576dac219bd5323bcec1 | 60d0a307e05453530043b20b89e1767d37174dc7 | /ex020.py | e0ce86158c3a116877280ca35eebc35104339fea | [
"MIT"
] | permissive | carlosvcerqueira/Projetos-Python | b1167c0fd070c96281fd59ffb04e140e7b81dc62 | 2fb91a0be8abf436af6a1d57fb2a1eafd0d30394 | refs/heads/main | 2023-08-16T02:07:14.291569 | 2021-10-11T16:45:07 | 2021-10-11T16:45:07 | 416,007,413 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | py | import random
a1 = input('Primeiro aluno: ')
a2 = input('Segundo aluno: ')
a3 = input('Terceiro aluno: ')
a4 = input('Quarto aluno: ')
lista = [a1, a2, a3, a4]
ordem = random.shuffle(lista)
print('A ordem de apresentação será:')
print(lista)
| [
"noreply@github.com"
] | noreply@github.com |
25001df53eb53de0424c58cb58a0ac920e47da5f | fc8db69812002b37f9d524e5f3a7dac72e4e6e7a | /sample-env/app.py | eb55f551d9fd5a7876a13277b6ac24219a4efa97 | [] | no_license | nvk1152/python-tasks | 0bd1b0d3a62084964a6e4887c71c3ea5a9d37670 | 6397ca29b2720fcf608af6714e54dbef48c852e7 | refs/heads/main | 2023-01-05T05:17:10.377120 | 2020-10-23T16:46:35 | 2020-10-23T16:46:35 | 301,931,306 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 488 | py | # Day 6 Tasks
from flask import Flask
from flask import jsonify
app = Flask(__name__)
student = {
"1" : "Vamsi",
"2" : "Aishwarya",
"3" : "Samantha"
}
@app.route('/')
def helloWorld():
return 'Hello! World'
@app.route('/students/all')
def getAllStudents():
return student
@app.route('/jsonify')
def jsonContent():
return jsonify(Id = 1,
Name = student["1"])
@app.route('/students/<id>')
def getStudentById(id):
return jsonify(Name = student[id]) | [
"nvk1152@gmail.com"
] | nvk1152@gmail.com |
19eb520e240acd85dbbd9f308e71f8690326049c | e297517b8e6ec29a9b13be6cc5a01f77b21b08a2 | /new_vad_plot.py | 4a4a8aeff6a50d0e0b46d8fc288504025b54c5b9 | [] | no_license | jefflai108/LSTM | d6b1ec0d7256a920302b1ca05ff1ddb16eb8cc6a | 09764df3f2e91ceb0f1b6854c289f5b08f1e5541 | refs/heads/master | 2021-01-20T09:56:54.257353 | 2017-05-04T21:11:58 | 2017-05-04T21:11:58 | 90,309,783 | 7 | 6 | null | null | null | null | UTF-8 | Python | false | false | 1,899 | py | #!/usr/bin/env python
import numpy as np
np.seterr(divide='ignore', invalid='ignore')
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from scipy.io import wavfile
import os
fig = plt.figure()
###################################
plt.subplot(2, 1, 1)
indir = '/Users/jefflai108/clsp/spoken/'
for root, dirs, filenames in os.walk(indir):
for filename in filenames:
if filename == '103-1240-0000.wav':
samplerate, data = wavfile.read('/Users/jefflai108/clsp/spoken/'+filename)
# if filename == '103-1240-0001.wav':
# samplerate, data0 = wavfile.read('/Users/jefflai108/clsp/spoken/'+filename)
# data = np.concatenate((data, data0))
# if filename == '1040-133433-0004.wav':
# samplerate, data1 = wavfile.read('/Users/jefflai108/clsp/spoken/'+filename)
# data = np.concatenate((data, data1))
# if filename == '1040-133433-0009.wav':
# samplerate, data2 = wavfile.read('/Users/jefflai108/clsp/spoken/'+filename)
# data = np.concatenate((data, data2))
t1 = np.linspace(0, len(data)/samplerate, len(data))
plt.plot(t1, data)
###################################
plt.subplot(2, 1, 2)
with open('new_new_vad_1.txt') as f:
content = f.readlines()
content = [x.strip() for x in content]
y = []
for count, element in enumerate(content):
if element.split(" ")[0] == '103-1240-0000':
y = y + [int(x) for x in element.split(" ")[1][2:-2].split(' ')]
# if element.split(" ")[0] == '103-1240-0001':
# y = y + [int(x) for x in element.split(" ")[1][2:-2].split(' ')]
# if element.split(" ")[0] == '1040-133433-0004':
# y = y + [int(x) for x in element.split(" ")[1][2:-2].split(' ')]
# if element.split(" ")[0] == '1040-133433-0009':
# y = y + [int(x) for x in element.split(" ")[1][2:-2].split(' ')]
t2 = np.linspace(0, len(y)*10/1000, len(y))
plt.plot(t2, y)
####################################
fig.savefig('vad_temp8.png')
| [
"noreply@github.com"
] | noreply@github.com |
c763938e560a4c0f544e659a6ad288e59175bdc0 | 9a26d5122a235cbb1d5b246bbd211a17d7f16209 | /courses/admin.py | 2cc85fd258a3c983296e3f90dc50d57794082857 | [] | no_license | mohamed591195/LMS | 10d1651c34bf17874e8738ae25b05b1131b827ea | 24c3e84eb49008f154adfc7958ebd5baf13de22e | refs/heads/master | 2020-06-25T05:29:49.223595 | 2019-08-05T00:05:22 | 2019-08-05T00:05:42 | 199,215,487 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 637 | py | from django.contrib import admin
from courses.models import Subject, Module, Course, Content
@admin.register(Subject)
class SubjectAdmin(admin.ModelAdmin):
list_display = ['title', 'slug']
prepopulated_fields = {'slug': ('title',)}
class ModuleInline(admin.StackedInline):
model = Module
prepopulated_fields = {'slug': ('title',)}
@admin.register(Course)
class CourseAdmin(admin.ModelAdmin):
list_display = ['title', 'subject', ]
list_filter = ['subject']
date_hierarchy = 'created_at'
prepopulated_fields = {'slug': ('title',)}
search_fields = ['title', 'overview',]
inlines = [ModuleInline] | [
"mohammad@pop-os.localdomain"
] | mohammad@pop-os.localdomain |
92332d5530fdac81fdd1b7e9ccb8ae37ad1d0f84 | 71290596a9a2e4a5f24984a69c860ac3873ff07b | /deevovenv/Scripts/pilfont.py | 3c278f548bc5650c3040c8a321a61a44e27c52d5 | [] | no_license | reginaalyssa/Deevo | 87856174070eede215b4c680d113d77023c5b9bc | e45fd8c6d39b581702a2851cd22879b2cf182ba6 | refs/heads/master | 2021-07-03T09:44:56.284927 | 2017-09-22T15:45:23 | 2017-09-22T15:45:23 | 102,055,086 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,084 | py | #!c:\users\reginaalyssa\documents\github\deevo\deevovenv\scripts\python.exe
#
# The Python Imaging Library
# $Id$
#
# PIL raster font compiler
#
# history:
# 1997-08-25 fl created
# 2002-03-10 fl use "from PIL import"
#
from __future__ import print_function
import glob
import sys
# drivers
from PIL import BdfFontFile
from PIL import PcfFontFile
VERSION = "0.4"
if len(sys.argv) <= 1:
print("PILFONT", VERSION, "-- PIL font compiler.")
print()
print("Usage: pilfont fontfiles...")
print()
print("Convert given font files to the PIL raster font format.")
print("This version of pilfont supports X BDF and PCF fonts.")
sys.exit(1)
files = []
for f in sys.argv[1:]:
files = files + glob.glob(f)
for f in files:
print(f + "...", end=' ')
try:
fp = open(f, "rb")
try:
p = PcfFontFile.PcfFontFile(fp)
except SyntaxError:
fp.seek(0)
p = BdfFontFile.BdfFontFile(fp)
p.save(f)
except (SyntaxError, IOError):
print("failed")
else:
print("OK")
| [
"reginaalyssa01809@gmail.com"
] | reginaalyssa01809@gmail.com |
95fac5e97fb9e3ad314a5d42c2699ef942e63711 | 7ede26e96474e15ac0910589cac9eff792571503 | /implement-queue-using-stacks.py | 3a2f20a185d8600ed828a8dc3e5d37f4a0d06253 | [] | no_license | Jordanzuo/Algorithm | 0471b766258e4a5aa0cd8334aab3d9edfd078f85 | 6089a678be920f71c0e58de8914db72917aaad10 | refs/heads/master | 2022-03-09T06:27:25.468195 | 2022-03-08T10:23:53 | 2022-03-08T10:23:53 | 152,618,141 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,459 | py | class MyQueue(object):
def __init__(self):
"""
Initialize your data structure here.
"""
self.stack1 = []
self.stack2 = []
def push(self, x):
"""
Push element x to the back of queue.
:type x: int
:rtype: void
"""
self.stack1.append(x)
def pop(self):
"""
Removes the element from in front of queue and returns that element.
:rtype: int
"""
if self.stack2:
return self.stack2.pop()
else:
if self.stack1:
while self.stack1:
self.stack2.append(self.stack1.pop())
return self.stack2.pop()
else:
return None
def peek(self):
"""
Get the front element.
:rtype: int
"""
if self.stack2:
return self.stack2[-1]
else:
if self.stack1:
while self.stack1:
self.stack2.append(self.stack1.pop())
return self.stack2[-1]
else:
return None
def empty(self):
"""
Returns whether the queue is empty.
:rtype: bool
"""
return not self.stack1 and not self.stack2
# Your MyQueue object will be instantiated and called as such:
# obj = MyQueue()
# obj.push(x)
# param_2 = obj.pop()
# param_3 = obj.peek()
# param_4 = obj.empty()
| [
"zuoxianqing@163.com"
] | zuoxianqing@163.com |
47c3d8019181b00a4cc6f1e528455517694034d1 | 1662507ec7104531e4e54209fc32bfdf397b60cd | /backend/wallet/models.py | c0d4a9fbfaf096d0cda2c061ebe3a3c6041ebd63 | [] | no_license | crowdbotics-apps/home-trend-24478 | 4b2397fbefc9469e2d8f00240dff0b3fc3eaa368 | 850309d0bb282cf824f8b8d42ef8c6ab3c43bc1c | refs/heads/master | 2023-03-07T18:34:15.590576 | 2021-02-20T00:34:25 | 2021-02-20T00:34:25 | 338,431,886 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,762 | py | from django.conf import settings
from django.db import models
class PaymentMethod(models.Model):
"Generated Model"
wallet = models.ForeignKey(
"wallet.CustomerWallet",
on_delete=models.CASCADE,
related_name="paymentmethod_wallet",
)
account_token = models.CharField(
max_length=255,
)
payment_account = models.CharField(
max_length=10,
)
timestamp_created = models.DateTimeField(
auto_now_add=True,
)
class PaymentTransaction(models.Model):
"Generated Model"
price = models.FloatField()
tip = models.FloatField()
tracking_id = models.CharField(
max_length=50,
)
timestamp_created = models.DateTimeField(
auto_now_add=True,
)
tasker = models.ForeignKey(
"task_profile.TaskerProfile",
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="paymenttransaction_tasker",
)
customer = models.ForeignKey(
"task_profile.CustomerProfile",
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="paymenttransaction_customer",
)
transaction = models.ForeignKey(
"task.TaskTransaction",
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="paymenttransaction_transaction",
)
payment_method = models.ForeignKey(
"wallet.PaymentMethod",
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="paymenttransaction_payment_method",
)
class TaskerWallet(models.Model):
"Generated Model"
tasker = models.OneToOneField(
"task_profile.TaskerProfile",
on_delete=models.CASCADE,
related_name="taskerwallet_tasker",
)
balance = models.FloatField(
max_length=254,
)
expiration_date = models.DateTimeField()
last_transaction = models.DateTimeField()
class CustomerWallet(models.Model):
"Generated Model"
customer = models.OneToOneField(
"task_profile.CustomerProfile",
on_delete=models.CASCADE,
related_name="customerwallet_customer",
)
balance = models.FloatField()
expiration_date = models.DateTimeField()
last_transaction = models.DateTimeField()
class TaskerPaymentAccount(models.Model):
"Generated Model"
wallet = models.ForeignKey(
"wallet.TaskerWallet",
on_delete=models.CASCADE,
related_name="taskerpaymentaccount_wallet",
)
account_token = models.CharField(
max_length=255,
)
payment_account = models.CharField(
max_length=10,
)
timestamp_created = models.DateTimeField(
auto_now_add=True,
)
# Create your models here.
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
fd9e1af03b971a1db1d6893bbd1eb4399fbcb3d6 | b6c09a1b87074d6e58884211ce24df8ec354da5c | /1720. 解码异或后的数组.py | 62dc31fa20f8ded1e4528d692e236b11be60047e | [] | no_license | fengxiaolong886/leetcode | a0ee12d67c4a10fb12d6ca4369762ab5b090cab1 | 4c0897bc06a297fa9225a0c46d8ec9217d876db8 | refs/heads/master | 2023-03-18T22:16:29.212016 | 2021-03-07T03:48:16 | 2021-03-07T03:48:16 | 339,604,263 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 896 | py | """
未知 整数数组 arr 由 n 个非负整数组成。
经编码后变为长度为 n - 1 的另一个整数数组 encoded ,其中 encoded[i] = arr[i] XOR arr[i + 1] 。例如,arr = [1,0,2,1] 经编码后得到 encoded = [1,2,3] 。
给你编码后的数组 encoded 和原数组 arr 的第一个元素 first(arr[0])。
请解码返回原数组 arr 。可以证明答案存在并且是唯一的。
示例 1:
输入:encoded = [1,2,3], first = 1
输出:[1,0,2,1]
解释:若 arr = [1,0,2,1] ,那么 first = 1 且 encoded = [1 XOR 0, 0 XOR 2, 2 XOR 1] = [1,2,3]
示例 2:
输入:encoded = [6,2,7,3], first = 4
输出:[4,2,0,7,4]
"""
def decode(encoded, first):
res = [first]
for i in encoded:
res.append(first ^ i)
first = res[-1]
return res
print(decode(encoded = [1,2,3], first = 1))
print(decode(encoded = [6,2,7,3], first = 4))
| [
"xlfeng886@163.com"
] | xlfeng886@163.com |
e7f0c5c99e3c97c1f893a277a28384ed826dae22 | 3fc69f8ff0b46be4b51a6aace8292669667dc73f | /mask_detect/stats/models.py | ad366a5951330fab154280b97ce1ea8c45e558c2 | [] | no_license | Maddie02/mask-detector | 19fb616e9d74419b50bc860faacba9a6e4c2a8e6 | 77f5d2940e7d78622278071b2d14d1ad2d48d5d4 | refs/heads/main | 2023-04-21T09:45:24.946492 | 2021-04-09T14:19:01 | 2021-04-09T14:19:01 | 308,363,915 | 0 | 0 | null | 2021-04-09T14:00:02 | 2020-10-29T15:04:30 | Jupyter Notebook | UTF-8 | Python | false | false | 755 | py | from django.db import models
from accounts.models import Employee
class Statistic(models.Model):
employee = models.OneToOneField(Employee, on_delete=models.CASCADE, null=True)
all_violations = models.IntegerField(default=0)
last_seen_without_mask = models.DateTimeField(null=True)
def __str__(self):
return f'Statistic - {self.employee.first_name} {self.employee.last_name}'
class Violation(models.Model):
violation_date = models.DateTimeField()
frame = models.ImageField(upload_to='violations/', blank=True, null=True)
statistic = models.ForeignKey(Statistic, on_delete=models.CASCADE)
def __str__(self):
return f'Violation - {self.statistic.employee.first_name} {self.statistic.employee.last_name}'
| [
"madlensarkisian@gmail.com"
] | madlensarkisian@gmail.com |
4424ea1b3cbe8a4131ced9d2daad60c5f6fce5f5 | 92f8466363e7d930e8516300f9ddc4d674008221 | /Testes/teste_leitura_base.py | 64208c99f9eb189fa9280da38d7671099d10663f | [] | no_license | ViniBiajoni/StateEstimator | 570f3afc7f583936f675498f2177ca25735625ae | d75c143120b618833eb11a89645f8a003ca2e4e6 | refs/heads/master | 2023-07-11T16:57:22.599890 | 2021-08-26T18:45:22 | 2021-08-26T18:45:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 272 | py | import numpy as np
month=['jan', 'fev', 'mar', 'abril','maio','jun','jul','agost','set','out','nov','dez']
year=['2019']
caso=['14Bus_Base']
file= 'Medidas\int_5-5min\Medidas_ieee_' + month[0] + '_' + year[0] + '_' + caso[0] + 'SE.txt'
meas = np.loadtxt(file)
print(meas) | [
"vinibiajoni@gmail.com"
] | vinibiajoni@gmail.com |
c17c177f1e0e41c6cc8ac57a0ae622644ba082ab | 245ceae27f0fbb5185d57976317c5bb376c9b617 | /Languages/Python/mcm.py | 1c2d9ef17af99fb74c0778340b827b7ed380f649 | [
"MIT"
] | permissive | ABHISHEK-AMRUTE/Hacktoberfest-1 | e9a6e1a3abe1b8d038f856a1aaa6c9b8da313301 | 38f7381c16fb35e10bbc32c7105414073cc3e8d2 | refs/heads/master | 2022-09-20T07:15:26.707100 | 2019-10-28T09:44:15 | 2019-10-28T09:44:15 | 218,013,374 | 2 | 0 | MIT | 2022-09-10T15:11:27 | 2019-10-28T09:43:41 | C++ | UTF-8 | Python | false | false | 1,155 | py | # Dynamic Programming Python implementation of Matrix
# Chain Multiplication. See the Cormen book for details
# of the following algorithm
import sys
# Matrix Ai has dimension p[i-1] x p[i] for i = 1..n
def MatrixChainOrder(p, n):
# For simplicity of the program, one extra row and one
# extra column are allocated in m[][]. 0th row and 0th
# column of m[][] are not used
m = [[0 for x in range(n)] for x in range(n)]
# m[i, j] = Minimum number of scalar multiplications needed
# to compute the matrix A[i]A[i + 1]...A[j] = A[i..j] where
# dimension of A[i] is p[i-1] x p[i]
# cost is zero when multiplying one matrix.
for i in range(1, n):
m[i][i] = 0
# L is chain length.
for L in range(2, n):
for i in range(1, n-L + 1):
j = i + L-1
m[i][j] = sys.maxint
for k in range(i, j):
# q = cost / scalar multiplications
q = m[i][k] + m[k + 1][j] + p[i-1]*p[k]*p[j]
if q < m[i][j]:
m[i][j] = q
return m[1][n-1]
# Driver program to test above function
arr = [1, 2, 3, 4]
size = len(arr)
print("Minimum number of multiplications is " +
str(MatrixChainOrder(arr, size)))
# This Code is contributed by Bhavya Jain
| [
"sakshamtaneja@Sakshams-MacBook-Air.local"
] | sakshamtaneja@Sakshams-MacBook-Air.local |
5df9e7d1f970854f9b25a62a472d11be15e0cbdf | 408b405d1dc185cb7644fecb842461e969dcf494 | /profiles/profiles/urls.py | 56a846f256e2a2847b0fb1a45b25063c09976ece | [
"MIT"
] | permissive | Manju2012/Manju2012-profiles-rest-api | 7e26167e7935e9bcda404663ddaf951dc8147583 | b486e865010100454d59433be4cdd579de79928d | refs/heads/master | 2023-05-09T15:26:43.765013 | 2021-05-19T10:11:39 | 2021-05-19T10:11:39 | 367,035,181 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 794 | py | """profiles URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('api.urls')),
]
| [
"naganurprasad@yahoo.in"
] | naganurprasad@yahoo.in |
0f9b9713daebb95fb5fdb1e6dbd63f31d8427286 | 3b8d68f024646db4de6bb71e2f75acc25a2e6554 | /todo/settings.py | 019fc21e61fc7303739d53270e4fbc727f2c79d2 | [] | no_license | kgmaxwell1990/django-todo | dde6ae21372b1819f690f00554ccf0122332deee | 77392c7f3a8c1a7a5db53b705e553cef212dc3fa | refs/heads/master | 2021-08-11T11:11:37.730092 | 2017-11-13T16:05:56 | 2017-11-13T16:05:56 | 110,008,119 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,123 | py | """
Django settings for todo project.
Generated by 'django-admin startproject' using Django 1.11.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['todo-django-kgmaxwell1990.c9users.io', 'katie-dev-djangotodo.herokuapp.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'todoitem',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'todo.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'todo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {'default': dj_database_url.parse(os.environ.get("DATABASE_URL")) }
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| [
"katie@codeinstitute.net"
] | katie@codeinstitute.net |
1ea6daf003707ce50ab5b63472bc6e8fb402e115 | 07bf6fb129048e93bfb1bde5c9f57f44870d0336 | /ta.py | 93ffc371bae5b137e73ffca18f347f529673fbbc | [] | no_license | ayun2001/asd1234qd_trader | ec79309857573e890882b5fc7d9fd60b11b11c00 | bfcc1abc7ea1cb749f482780489869f7d988cf73 | refs/heads/master | 2020-04-15T11:12:36.661127 | 2019-01-18T07:33:35 | 2019-01-18T07:33:35 | 164,619,777 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,973 | py | # coding=utf-8
import talib
import common
class TA(object):
def __init__(self):
if not common.file_exist(common.CONST_DIR_LOG):
common.create_directory(common.CONST_DIR_LOG)
@staticmethod
def make_macd_data(dataset, s=12, l=26, m=9): # MACD数据一定要长时间的数据 30个数据点不够, 至少需要90, 这样数据才准
try:
dataset['macd_dif'] = dataset['close'].ewm(span=s).mean() - dataset['close'].ewm(span=l).mean()
dataset['macd_dea'] = dataset['macd_dif'].ewm(span=m).mean()
dataset['macd'] = 2 * (dataset['macd_dif'] - dataset['macd_dea'])
except Exception as err:
raise Exception, u"添加 MACD 指标数据错误: %s" % err.message
@staticmethod
def make_macd_cross(dataset):
try:
dataset['macd_cross'] = ""
macd_pos = dataset['macd_dif'] > dataset['macd_dea'] # 以K值小于D值为参考, 注意这里数据是最后一天在最后面
dataset.loc[
macd_pos[(macd_pos == True) & (macd_pos.shift(1) == False)].index, 'macd_cross'] = "up_cross" # 金叉
dataset.loc[
macd_pos[(macd_pos == False) & (macd_pos.shift(1) == True)].index, 'macd_cross'] = "down_cross" # 死叉
except Exception as err:
raise Exception, u"添加 MACD 交叉指标数据错误: %s" % err.message
@staticmethod
def make_kdj_data(dataset, n1=9, n2=3):
try:
lvds = dataset['low'].rolling(window=n1).min()
lvds.fillna(value=dataset['low'].expanding().min(), inplace=True)
hvds = dataset['high'].rolling(window=n1).max()
hvds.fillna(value=dataset['close'].expanding().max(), inplace=True)
rsv = (dataset['close'] - lvds) / (hvds - lvds) * 100
dataset['kdj_k'] = rsv.ewm(com=2).mean()
dataset['kdj_d'] = dataset['kdj_k'].ewm(com=2).mean()
dataset['kdj_j'] = n2 * dataset['kdj_k'] - 2 * dataset['kdj_d']
except Exception as err:
raise Exception, u"添加 KDJ 指标数据错误: %s" % err.message
@staticmethod
def make_kdj_cross(dataset):
try:
dataset['kdj_cross'] = ""
kdj_pos = dataset['kdj_k'] > dataset['kdj_d'] # 以K值小于D值为参考, 注意这里数据是最后一天在最后面
dataset.loc[kdj_pos[(kdj_pos == True) & (kdj_pos.shift(1) == False)].index, 'kdj_cross'] = "up_cross" # 金叉
dataset.loc[
kdj_pos[(kdj_pos == False) & (kdj_pos.shift(1) == True)].index, 'kdj_cross'] = "down_cross" # 死叉
except Exception as err:
raise Exception, u"添加 KDJ 交叉指标数据错误: %s" % err.message
@staticmethod
def make_change_data(dataset):
try:
dataset['change'] = dataset['close'].diff() # 计算价格偏差
dataset['pct_change'] = dataset['close'].pct_change() * 100 # 计算百分比
except Exception as err:
raise Exception, u"添加 CHANGE 数据错误: %s" % err.message
@staticmethod
def make_ma_data(dataset):
try:
dataset['ma5'] = dataset['close'].rolling(window=5).mean()
dataset['ma10'] = dataset['close'].rolling(window=10).mean()
dataset['ma20'] = dataset['close'].rolling(window=20).mean()
dataset['ma30'] = dataset['close'].rolling(window=30).mean()
dataset['ma60'] = dataset['close'].rolling(window=60).mean()
except Exception as err:
raise Exception, u"添加 MA 均线指标数据错误: %s" % err.message
@staticmethod
def make_rsi_data(dataset, n1=6, n2=12, n3=24):
try:
dataset['rsi6'] = talib.RSI(dataset['close'].values, n1)
dataset['rsi12'] = talib.RSI(dataset['close'].values, n2)
dataset['rsi24'] = talib.RSI(dataset['close'].values, n3)
except Exception as err:
raise Exception, u"添加 RSI 指标数据错误: %s" % err.message
@staticmethod
def make_rsi_cross(dataset):
try:
dataset['rsi_cross'] = ''
rsi_pos = dataset['rsi12'] > dataset['rsi24'] # 以K值小于D值为参考, 注意这里数据是最后一天在最后面
dataset.loc[rsi_pos[(rsi_pos == True) & (rsi_pos.shift(1) == False)].index, 'rsi_cross'] = "up_cross" # 金叉
dataset.loc[
rsi_pos[(rsi_pos == False) & (rsi_pos.shift(1) == True)].index, 'rsi_cross'] = "down_cross" # 死叉
except Exception as err:
raise Exception, u"添加 RSI 交叉指标数据错误: %s" % err.message
@staticmethod
def make_cci_data(dataset):
try:
dataset['cci'] = talib.CCI(dataset['high'].values, dataset['low'].values, dataset['close'].values)
except Exception as err:
raise Exception, u"添加 CCI 指标数据错误: %s" % err.message
| [
"lishengyan@lichengyandeiMac.local"
] | lishengyan@lichengyandeiMac.local |
9afe676c878fcd117a985ef2ca7fd25fdf3c3fec | 63ca8081ba3e43e7658a500498c41a476b1fa2d2 | /app.py | 7507e9c3ffea58425ebe4677b8018958ddfca68e | [] | no_license | domoniquecox/eagle-eye | 0aad3b930e68c03ec13329fe453579eca1ae672c | ab1936f0d6887bd1b4f9e0d1e15d77ceee6c011d | refs/heads/master | 2020-12-19T07:19:12.247589 | 2020-04-28T20:47:59 | 2020-04-28T20:47:59 | 235,660,836 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,794 | py | import cv2
from flask import Flask, render_template, session, redirect, request, url_for, Response
import os
import firebase_admin
import firebase
import pyrebase
import sys
from subprocess import run,PIPE
from firebase_admin import credentials, firestore
from functools import wraps
config = {
"apiKey": "AIzaSyDd7CWwzX-cxTMk1tH-pzDEl21-FK6fUsI",
"authDomain": "eagle-eye-f3a0d.firebaseapp.com",
"databaseURL": "https://eagle-eye-f3a0d.firebaseio.com",
"projectId": "eagle-eye-f3a0d",
"storageBucket": "eagle-eye-f3a0d.appspot.com",
"serviceAccount": "eagle-eye-f3a0d-firebase-adminsdk-f8rcz-5d151faa13.json",
"messagingSenderId": "1020036625253"
}
firebase = pyrebase.initialize_app(config)
db2 = firebase.database()
cred = credentials.Certificate("eagle-eye-f3a0d-firebase-adminsdk-f8rcz-5d151faa13.json")
firebase_admin.initialize_app(cred, {"projectId":"eagle-eye-f3a0d"})
db = firestore.client()
app = Flask(__name__)
def is_logged_in(f):
@wraps(f)
def wrap(*args, **kwargs):
if "logged_in" in session:
return f(*args, **kwargs)
else:
return redirect(url_for("index"))
return wrap
def log_out():
session.clear()
return redirect(url_for("index"))
@app.route("/", methods =["GET", "POST"])
def index():
if request.method == "POST":
username = request.form["email"]
password = request.form["password"]
user = db.collection(u"login-users")
results = user.where(u"email", u"==", username).stream()
for result in results:
# print("{}".format(result.to_dict()))
# print(result.to_dict()["email"])
if username == result.to_dict()["email"] and password == result.to_dict()["pass"]:
session["logged_in"] = True
session["username"] = username
session["uid"] = result.id
return redirect(url_for("courses"))
return render_template("index.html")
@app.route("/courses")
@is_logged_in
def courses():
return render_template("courses.html")
@app.route("/attendance", methods=["GET", "POST"])
@is_logged_in
def faces_recognized():
if request.method == "POST":
allposts = db2.child("Students_Present").get()
faces = allposts.val()
return render_template("attendance.html", n = faces.values())
return render_template("attendance.html")
#@app.route("/submit_attendance", methods=["GET" ,"POST"])
#@is_logged_in
#def submit_attendance():
# if request.method == "POST":
# DomPres = request.form["attendance"]
# print(DomPres)
# return render_template("attendance.html")
#db_events = db2.child("MyTestData").get().val().values()
#return render_template("attendance.html", MyTestData = db_events)
@app.route("/math4212", )
@is_logged_in
def math():
return render_template("attendance.html")
@app.route("/psyc1001" , methods=["GET", "POST"])
@is_logged_in
def psyc():
return render_template("attendance.html")
@app.route("/hlth2223", methods=["GET", "POST"])
@is_logged_in
def hlth2003():
return render_template("attendance.html")
@app.route("/hlth2003", methods=["GET", "POST"] )
@is_logged_in
def hlth2223():
return render_template("attendance.html")
@app.route("/camera_on")
def camera_on():
os.system("python face_recognition.py")
return "hi"
@app.route("/logout")
@is_logged_in
def logOut():
return render_template("logout.html")
if __name__ == "__main__":
app.secret_key = "pvamu"
app.run(debug = True)
| [
"noreply@github.com"
] | noreply@github.com |
345bcde7408a2d774ec98727350693d566242b99 | 006ff11fd8cfd5406c6f4318f1bafa1542095f2a | /Geometry/CMSCommonData/python/cmsExtendedGeometry2015XML_RPC2Gap_cfi.py | 400cbd05035b4ff5689fd24ee8fd31feea7fbe54 | [] | permissive | amkalsi/cmssw | 8ac5f481c7d7263741b5015381473811c59ac3b1 | ad0f69098dfbe449ca0570fbcf6fcebd6acc1154 | refs/heads/CMSSW_7_4_X | 2021-01-19T16:18:22.857382 | 2016-08-09T16:40:50 | 2016-08-09T16:40:50 | 262,608,661 | 0 | 0 | Apache-2.0 | 2020-05-09T16:10:07 | 2020-05-09T16:10:07 | null | UTF-8 | Python | false | false | 15,587 | py | import FWCore.ParameterSet.Config as cms
## Everything that is currently in the PostLS1 geometry (rpc,csc,beampipe)
XMLIdealGeometryESSource = cms.ESSource("XMLIdealGeometryESSource",
geomXMLFiles = cms.vstring('Geometry/CMSCommonData/data/materials.xml',
'Geometry/CMSCommonData/data/rotations.xml',
'Geometry/CMSCommonData/data/extend/cmsextent.xml',
'Geometry/CMSCommonData/data/cms.xml',
'Geometry/CMSCommonData/data/cmsMother.xml',
'Geometry/CMSCommonData/data/cmsTracker.xml',
'Geometry/CMSCommonData/data/caloBase.xml',
'Geometry/CMSCommonData/data/cmsCalo.xml',
'Geometry/CMSCommonData/data/muonBase.xml',
'Geometry/CMSCommonData/data/cmsMuon.xml',
'Geometry/CMSCommonData/data/mgnt.xml',
'Geometry/CMSCommonData/data/PhaseI/beampipe.xml',
'Geometry/CMSCommonData/data/cmsBeam.xml',
'Geometry/CMSCommonData/data/muonMB.xml',
'Geometry/CMSCommonData/data/muonMagnet.xml',
'Geometry/CMSCommonData/data/cavern.xml',
'Geometry/TrackerCommonData/data/pixfwdMaterials.xml',
'Geometry/TrackerCommonData/data/pixfwdCommon.xml',
'Geometry/TrackerCommonData/data/pixfwdPlaq.xml',
'Geometry/TrackerCommonData/data/pixfwdPlaq1x2.xml',
'Geometry/TrackerCommonData/data/pixfwdPlaq1x5.xml',
'Geometry/TrackerCommonData/data/pixfwdPlaq2x3.xml',
'Geometry/TrackerCommonData/data/pixfwdPlaq2x4.xml',
'Geometry/TrackerCommonData/data/pixfwdPlaq2x5.xml',
'Geometry/TrackerCommonData/data/pixfwdPanelBase.xml',
'Geometry/TrackerCommonData/data/pixfwdPanel.xml',
'Geometry/TrackerCommonData/data/pixfwdBlade.xml',
'Geometry/TrackerCommonData/data/pixfwdNipple.xml',
'Geometry/TrackerCommonData/data/pixfwdDisk.xml',
'Geometry/TrackerCommonData/data/pixfwdCylinder.xml',
'Geometry/TrackerCommonData/data/pixfwd.xml',
'Geometry/TrackerCommonData/data/pixbarmaterial.xml',
'Geometry/TrackerCommonData/data/pixbarladder.xml',
'Geometry/TrackerCommonData/data/pixbarladderfull.xml',
'Geometry/TrackerCommonData/data/pixbarladderhalf.xml',
'Geometry/TrackerCommonData/data/pixbarlayer.xml',
'Geometry/TrackerCommonData/data/pixbarlayer0.xml',
'Geometry/TrackerCommonData/data/pixbarlayer1.xml',
'Geometry/TrackerCommonData/data/pixbarlayer2.xml',
'Geometry/TrackerCommonData/data/pixbar.xml',
'Geometry/TrackerCommonData/data/tibtidcommonmaterial.xml',
'Geometry/TrackerCommonData/data/tibmaterial.xml',
'Geometry/TrackerCommonData/data/tibmodpar.xml',
'Geometry/TrackerCommonData/data/tibmodule0.xml',
'Geometry/TrackerCommonData/data/tibmodule0a.xml',
'Geometry/TrackerCommonData/data/tibmodule0b.xml',
'Geometry/TrackerCommonData/data/tibmodule2.xml',
'Geometry/TrackerCommonData/data/tibstringpar.xml',
'Geometry/TrackerCommonData/data/tibstring0ll.xml',
'Geometry/TrackerCommonData/data/tibstring0lr.xml',
'Geometry/TrackerCommonData/data/tibstring0ul.xml',
'Geometry/TrackerCommonData/data/tibstring0ur.xml',
'Geometry/TrackerCommonData/data/tibstring0.xml',
'Geometry/TrackerCommonData/data/tibstring1ll.xml',
'Geometry/TrackerCommonData/data/tibstring1lr.xml',
'Geometry/TrackerCommonData/data/tibstring1ul.xml',
'Geometry/TrackerCommonData/data/tibstring1ur.xml',
'Geometry/TrackerCommonData/data/tibstring1.xml',
'Geometry/TrackerCommonData/data/tibstring2ll.xml',
'Geometry/TrackerCommonData/data/tibstring2lr.xml',
'Geometry/TrackerCommonData/data/tibstring2ul.xml',
'Geometry/TrackerCommonData/data/tibstring2ur.xml',
'Geometry/TrackerCommonData/data/tibstring2.xml',
'Geometry/TrackerCommonData/data/tibstring3ll.xml',
'Geometry/TrackerCommonData/data/tibstring3lr.xml',
'Geometry/TrackerCommonData/data/tibstring3ul.xml',
'Geometry/TrackerCommonData/data/tibstring3ur.xml',
'Geometry/TrackerCommonData/data/tibstring3.xml',
'Geometry/TrackerCommonData/data/tiblayerpar.xml',
'Geometry/TrackerCommonData/data/tiblayer0.xml',
'Geometry/TrackerCommonData/data/tiblayer1.xml',
'Geometry/TrackerCommonData/data/tiblayer2.xml',
'Geometry/TrackerCommonData/data/tiblayer3.xml',
'Geometry/TrackerCommonData/data/tib.xml',
'Geometry/TrackerCommonData/data/tidmaterial.xml',
'Geometry/TrackerCommonData/data/tidmodpar.xml',
'Geometry/TrackerCommonData/data/tidmodule0.xml',
'Geometry/TrackerCommonData/data/tidmodule0r.xml',
'Geometry/TrackerCommonData/data/tidmodule0l.xml',
'Geometry/TrackerCommonData/data/tidmodule1.xml',
'Geometry/TrackerCommonData/data/tidmodule1r.xml',
'Geometry/TrackerCommonData/data/tidmodule1l.xml',
'Geometry/TrackerCommonData/data/tidmodule2.xml',
'Geometry/TrackerCommonData/data/tidringpar.xml',
'Geometry/TrackerCommonData/data/tidring0.xml',
'Geometry/TrackerCommonData/data/tidring0f.xml',
'Geometry/TrackerCommonData/data/tidring0b.xml',
'Geometry/TrackerCommonData/data/tidring1.xml',
'Geometry/TrackerCommonData/data/tidring1f.xml',
'Geometry/TrackerCommonData/data/tidring1b.xml',
'Geometry/TrackerCommonData/data/tidring2.xml',
'Geometry/TrackerCommonData/data/tid.xml',
'Geometry/TrackerCommonData/data/tidf.xml',
'Geometry/TrackerCommonData/data/tidb.xml',
'Geometry/TrackerCommonData/data/tibtidservices.xml',
'Geometry/TrackerCommonData/data/tibtidservicesf.xml',
'Geometry/TrackerCommonData/data/tibtidservicesb.xml',
'Geometry/TrackerCommonData/data/tobmaterial.xml',
'Geometry/TrackerCommonData/data/tobmodpar.xml',
'Geometry/TrackerCommonData/data/tobmodule0.xml',
'Geometry/TrackerCommonData/data/tobmodule2.xml',
'Geometry/TrackerCommonData/data/tobmodule4.xml',
'Geometry/TrackerCommonData/data/tobrodpar.xml',
'Geometry/TrackerCommonData/data/tobrod0c.xml',
'Geometry/TrackerCommonData/data/tobrod0l.xml',
'Geometry/TrackerCommonData/data/tobrod0h.xml',
'Geometry/TrackerCommonData/data/tobrod0.xml',
'Geometry/TrackerCommonData/data/tobrod1l.xml',
'Geometry/TrackerCommonData/data/tobrod1h.xml',
'Geometry/TrackerCommonData/data/tobrod1.xml',
'Geometry/TrackerCommonData/data/tobrod2c.xml',
'Geometry/TrackerCommonData/data/tobrod2l.xml',
'Geometry/TrackerCommonData/data/tobrod2h.xml',
'Geometry/TrackerCommonData/data/tobrod2.xml',
'Geometry/TrackerCommonData/data/tobrod3l.xml',
'Geometry/TrackerCommonData/data/tobrod3h.xml',
'Geometry/TrackerCommonData/data/tobrod3.xml',
'Geometry/TrackerCommonData/data/tobrod4c.xml',
'Geometry/TrackerCommonData/data/tobrod4l.xml',
'Geometry/TrackerCommonData/data/tobrod4h.xml',
'Geometry/TrackerCommonData/data/tobrod4.xml',
'Geometry/TrackerCommonData/data/tobrod5l.xml',
'Geometry/TrackerCommonData/data/tobrod5h.xml',
'Geometry/TrackerCommonData/data/tobrod5.xml',
'Geometry/TrackerCommonData/data/tob.xml',
'Geometry/TrackerCommonData/data/tecmaterial.xml',
'Geometry/TrackerCommonData/data/tecmodpar.xml',
'Geometry/TrackerCommonData/data/tecmodule0.xml',
'Geometry/TrackerCommonData/data/tecmodule0r.xml',
'Geometry/TrackerCommonData/data/tecmodule0s.xml',
'Geometry/TrackerCommonData/data/tecmodule1.xml',
'Geometry/TrackerCommonData/data/tecmodule1r.xml',
'Geometry/TrackerCommonData/data/tecmodule1s.xml',
'Geometry/TrackerCommonData/data/tecmodule2.xml',
'Geometry/TrackerCommonData/data/tecmodule3.xml',
'Geometry/TrackerCommonData/data/tecmodule4.xml',
'Geometry/TrackerCommonData/data/tecmodule4r.xml',
'Geometry/TrackerCommonData/data/tecmodule4s.xml',
'Geometry/TrackerCommonData/data/tecmodule5.xml',
'Geometry/TrackerCommonData/data/tecmodule6.xml',
'Geometry/TrackerCommonData/data/tecpetpar.xml',
'Geometry/TrackerCommonData/data/tecring0.xml',
'Geometry/TrackerCommonData/data/tecring1.xml',
'Geometry/TrackerCommonData/data/tecring2.xml',
'Geometry/TrackerCommonData/data/tecring3.xml',
'Geometry/TrackerCommonData/data/tecring4.xml',
'Geometry/TrackerCommonData/data/tecring5.xml',
'Geometry/TrackerCommonData/data/tecring6.xml',
'Geometry/TrackerCommonData/data/tecring0f.xml',
'Geometry/TrackerCommonData/data/tecring1f.xml',
'Geometry/TrackerCommonData/data/tecring2f.xml',
'Geometry/TrackerCommonData/data/tecring3f.xml',
'Geometry/TrackerCommonData/data/tecring4f.xml',
'Geometry/TrackerCommonData/data/tecring5f.xml',
'Geometry/TrackerCommonData/data/tecring6f.xml',
'Geometry/TrackerCommonData/data/tecring0b.xml',
'Geometry/TrackerCommonData/data/tecring1b.xml',
'Geometry/TrackerCommonData/data/tecring2b.xml',
'Geometry/TrackerCommonData/data/tecring3b.xml',
'Geometry/TrackerCommonData/data/tecring4b.xml',
'Geometry/TrackerCommonData/data/tecring5b.xml',
'Geometry/TrackerCommonData/data/tecring6b.xml',
'Geometry/TrackerCommonData/data/tecpetalf.xml',
'Geometry/TrackerCommonData/data/tecpetalb.xml',
'Geometry/TrackerCommonData/data/tecpetal0.xml',
'Geometry/TrackerCommonData/data/tecpetal0f.xml',
'Geometry/TrackerCommonData/data/tecpetal0b.xml',
'Geometry/TrackerCommonData/data/tecpetal3.xml',
'Geometry/TrackerCommonData/data/tecpetal3f.xml',
'Geometry/TrackerCommonData/data/tecpetal3b.xml',
'Geometry/TrackerCommonData/data/tecpetal6f.xml',
'Geometry/TrackerCommonData/data/tecpetal6b.xml',
'Geometry/TrackerCommonData/data/tecpetal8f.xml',
'Geometry/TrackerCommonData/data/tecpetal8b.xml',
'Geometry/TrackerCommonData/data/tecwheel.xml',
'Geometry/TrackerCommonData/data/tecwheela.xml',
'Geometry/TrackerCommonData/data/tecwheelb.xml',
'Geometry/TrackerCommonData/data/tecwheelc.xml',
'Geometry/TrackerCommonData/data/tecwheeld.xml',
'Geometry/TrackerCommonData/data/tecwheel6.xml',
'Geometry/TrackerCommonData/data/tecservices.xml',
'Geometry/TrackerCommonData/data/tecbackplate.xml',
'Geometry/TrackerCommonData/data/tec.xml',
'Geometry/TrackerCommonData/data/trackermaterial.xml',
'Geometry/TrackerCommonData/data/tracker.xml',
'Geometry/TrackerCommonData/data/trackerpixbar.xml',
'Geometry/TrackerCommonData/data/trackerpixfwd.xml',
'Geometry/TrackerCommonData/data/trackertibtidservices.xml',
'Geometry/TrackerCommonData/data/trackertib.xml',
'Geometry/TrackerCommonData/data/trackertid.xml',
'Geometry/TrackerCommonData/data/trackertob.xml',
'Geometry/TrackerCommonData/data/trackertec.xml',
'Geometry/TrackerCommonData/data/trackerbulkhead.xml',
'Geometry/TrackerCommonData/data/trackerother.xml',
'Geometry/EcalCommonData/data/eregalgo.xml',
'Geometry/EcalCommonData/data/ebalgo.xml',
'Geometry/EcalCommonData/data/ebcon.xml',
'Geometry/EcalCommonData/data/ebrot.xml',
'Geometry/EcalCommonData/data/eecon.xml',
'Geometry/EcalCommonData/data/eefixed.xml',
'Geometry/EcalCommonData/data/eehier.xml',
'Geometry/EcalCommonData/data/eealgo.xml',
'Geometry/EcalCommonData/data/escon.xml',
'Geometry/EcalCommonData/data/esalgo.xml',
'Geometry/EcalCommonData/data/eeF.xml',
'Geometry/EcalCommonData/data/eeB.xml',
'Geometry/HcalCommonData/data/hcalrotations.xml',
'Geometry/HcalCommonData/data/hcalalgo.xml',
'Geometry/HcalCommonData/data/hcalbarrelalgo.xml',
'Geometry/HcalCommonData/data/hcalendcapalgo.xml',
'Geometry/HcalCommonData/data/hcalouteralgo.xml',
'Geometry/HcalCommonData/data/hcalforwardalgo.xml',
'Geometry/HcalCommonData/data/average/hcalforwardmaterial.xml',
'Geometry/MuonCommonData/data/RPC2Gap/mbCommon.xml',
'Geometry/MuonCommonData/data/RPC2Gap/mb1.xml',
'Geometry/MuonCommonData/data/RPC2Gap/mb2.xml',
'Geometry/MuonCommonData/data/RPC2Gap/mb3.xml',
'Geometry/MuonCommonData/data/RPC2Gap/mb4.xml',
'Geometry/MuonCommonData/data/design/muonYoke.xml',
'Geometry/MuonCommonData/data/v2/mf.xml',
'Geometry/MuonCommonData/data/RPC2Gap/rpcf.xml',
'Geometry/MuonCommonData/data/v2/csc.xml',
'Geometry/MuonCommonData/data/v2/mfshield.xml',
'Geometry/ForwardCommonData/data/forward.xml',
'Geometry/ForwardCommonData/data/v2/forwardshield.xml',
'Geometry/ForwardCommonData/data/brmrotations.xml',
'Geometry/ForwardCommonData/data/brm.xml',
'Geometry/ForwardCommonData/data/totemMaterials.xml',
'Geometry/ForwardCommonData/data/totemRotations.xml',
'Geometry/ForwardCommonData/data/totemt1.xml',
'Geometry/ForwardCommonData/data/totemt2.xml',
'Geometry/ForwardCommonData/data/ionpump.xml',
'Geometry/ForwardCommonData/data/castor.xml',
'Geometry/ForwardCommonData/data/zdcmaterials.xml',
'Geometry/ForwardCommonData/data/lumimaterials.xml',
'Geometry/ForwardCommonData/data/zdcrotations.xml',
'Geometry/ForwardCommonData/data/lumirotations.xml',
'Geometry/ForwardCommonData/data/zdc.xml',
'Geometry/ForwardCommonData/data/zdclumi.xml',
'Geometry/ForwardCommonData/data/cmszdc.xml')+cms.vstring(
'Geometry/MuonCommonData/data/RPC2Gap/muonNumbering.xml',
'Geometry/TrackerCommonData/data/trackerStructureTopology.xml',
'Geometry/TrackerSimData/data/trackersens.xml',
'Geometry/TrackerRecoData/data/trackerRecoMaterial.xml',
'Geometry/EcalSimData/data/ecalsens.xml',
'Geometry/HcalCommonData/data/hcalsenspmf.xml',
'Geometry/HcalSimData/data/hf.xml',
'Geometry/HcalSimData/data/hfpmt.xml',
'Geometry/HcalSimData/data/hffibrebundle.xml',
'Geometry/HcalSimData/data/CaloUtil.xml',
'Geometry/MuonSimData/data/muonSens.xml',
'Geometry/DTGeometryBuilder/data/dtSpecsFilter.xml',
'Geometry/CSCGeometryBuilder/data/cscSpecsFilter.xml',
'Geometry/CSCGeometryBuilder/data/cscSpecs.xml',
'Geometry/RPCGeometryBuilder/data/RPCSpecs.xml',
'Geometry/ForwardCommonData/data/brmsens.xml',
'Geometry/ForwardSimData/data/castorsens.xml',
'Geometry/ForwardSimData/data/zdcsens.xml',
'Geometry/HcalSimData/data/HcalProdCuts.xml',
'Geometry/EcalSimData/data/EcalProdCuts.xml',
'Geometry/EcalSimData/data/ESProdCuts.xml',
'Geometry/TrackerSimData/data/trackerProdCuts.xml',
'Geometry/TrackerSimData/data/trackerProdCutsBEAM.xml',
'Geometry/MuonSimData/data/muonProdCuts.xml',
'Geometry/ForwardSimData/data/CastorProdCuts.xml',
'Geometry/ForwardSimData/data/zdcProdCuts.xml',
'Geometry/ForwardSimData/data/ForwardShieldProdCuts.xml',
'Geometry/CMSCommonData/data/FieldParameters.xml'),
rootNodeName = cms.string('cms:OCMS')
)
| [
"giulio.eulisse@cern.ch"
] | giulio.eulisse@cern.ch |
a809cf5f7c25bbfabfc4c575d1a07b237ec8bc9c | 018d3ade7ce3c9797ec53e5b29e93c343cbd41e3 | /test/test_dynamic_shapes.py | 0d421b04008d224758407fdd0b571cdfd72af613 | [
"BSD-3-Clause",
"BSD-2-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] | permissive | aiot-tech/pytorch | 016574055a306f58a46308e4971cf180ffc92e4d | 46730aec35ee047b92b288e0366da0f7e993e5ae | refs/heads/master | 2022-11-18T14:01:22.576441 | 2022-11-04T23:11:17 | 2022-11-05T05:42:07 | 102,860,737 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,297 | py | # -*- coding: utf-8 -*-
# Owner(s): ["oncall: jit"]
from torch._C import _disabled_torch_function_impl
import torch.fx
import torch.nn.functional as F
from torch.testing._internal.common_utils import run_tests, TestCase, skipIfTorchDynamo, \
IS_WINDOWS, parametrize, instantiate_parametrized_tests
import unittest
import torch
import operator
import itertools
import random
import contextlib
import math
import builtins
import atexit
import io
import os
from torch.utils._pytree import tree_map
from torch.fx.experimental import symbolic_shapes
from torch.fx.experimental.proxy_tensor import make_fx
from torch.fx.experimental.symbolic_shapes import ShapeEnv, sym_float, guard_int, SymNode
from torch.utils._python_dispatch import TorchDispatchMode
from torch import SymInt
aten = torch.ops.aten
try:
import sympy
# TODO(jansel): these tests fail on windows
HAS_SYMPY = not IS_WINDOWS
except ImportError:
HAS_SYMPY = False
skipIfNoSympy = unittest.skipIf(not HAS_SYMPY, "no sympy")
meta_funcs = {}
def register_meta(op):
def decorator(f):
def add_func(op):
meta_funcs[op] = f
tree_map(add_func, op)
return f
return decorator
@register_meta([aten.add.Tensor, aten.sub.Tensor])
def binary_meta(a, b):
return a.new_empty(a.shape)
@register_meta(aten.cat.default)
def cat_meta(tensors, dim=0):
concat_length = 0
shape = tensors[0].shape
for tensor in tensors:
for idx, (common_length, length) in enumerate(zip(shape, tensor.shape)):
if idx == dim:
concat_length = concat_length + length
else:
assert length == common_length
new_shape = list(shape)
new_shape[dim] = concat_length
return tensors[0].new_empty(new_shape)
@register_meta([aten.narrow_copy.default])
def narrow_copy_symint_meta(a, dim, start, length, **kwargs):
shape = []
for i, x in enumerate(a.shape):
if i == dim:
shape.append(length)
else:
shape.append(x)
return a.new_empty(tuple(shape))
@register_meta([aten.expand.default])
def expand_symint_meta(a, size, implicit=False):
return a.new_empty(size)
def create_contiguous(shape):
strides = [1]
for dim in reversed(shape[:-1]):
strides.append(dim * strides[-1])
return list(reversed(strides))
class FakeSymbolicTensor(torch.Tensor):
@staticmethod
def __new__(cls, sym_shape, sym_strides, dtype, layout, requires_grad, device, storage_offset=0):
# TODO: this is wrong in general
sym_stride = create_contiguous(sym_shape)
r = torch.Tensor._make_wrapper_subclass(
cls, sym_shape,
sym_stride, storage_offset,
dtype=dtype, layout=layout, requires_grad=requires_grad,
device=device,
)
return r
__torch_function__ = _disabled_torch_function_impl
def new_empty(self, shape):
return FakeSymbolicTensor(shape, None, self.dtype, self.layout, self.requires_grad, self.device)
@classmethod
def __torch_dispatch__(cls, func_overload, types, args=(), kwargs=None):
if func_overload in meta_funcs:
return meta_funcs[func_overload](*args, **kwargs)
if func_overload == torch.ops.aten.new_empty.default:
self = args[0]
shape = args[1]
return FakeSymbolicTensor(shape, self.stride(), self.dtype, self.layout, self.requires_grad, self.device)
raise RuntimeError(f"operator {func_overload} not supported")
def create_symbolic_tensor(name, arg, shape_env, storage_offset=0):
sym_shapes, sym_strides = shape_env.create_symbolic_sizes_strides(arg)
return FakeSymbolicTensor(sym_shapes, sym_strides, arg.dtype, arg.layout, arg.requires_grad, arg.device, storage_offset)
def create_symint(shape_env, i):
return shape_env.create_symintnode(shape_env.create_symbol(i))
@skipIfTorchDynamo("Creating ShapeEnv fails for confusing reasons (also we never expect dynamo to see code like this)")
class TestPySymInt(TestCase):
@skipIfNoSympy
def test_arith_ops(self):
shape_env = ShapeEnv()
symints = []
for i in range(2, 5):
symints.append((i, create_symint(shape_env, i)))
ops = [operator.add, operator.sub, operator.floordiv, operator.mul, operator.mod]
for op in ops:
for args in itertools.permutations(symints, 2):
if not isinstance(args[0][1], int) and ((op != operator.mod or op != operator.floordiv) and args[1][0] != 0):
self.assertTrue(op(args[0][1], args[1][1]) == op(args[0][0], args[1][0]))
@skipIfNoSympy
def test_reverse_arith_ops(self):
shape_env = ShapeEnv()
a = create_symint(shape_env, 2)
self.assertTrue(5 // a == 5 // 2)
a = create_symint(shape_env, 2)
self.assertTrue(5 * a == 5 * 2)
@skipIfNoSympy
def test_roundtrip(self):
shape_env = ShapeEnv()
x = create_symbolic_tensor("x", torch.randn(5, 4, 3), shape_env)
self.assertTrue(not isinstance(x.shape[0], SymNode))
self.assertTrue(isinstance(x.shape[0], SymInt))
self.assertTrue(x.shape[0] == 5)
self.assertTrue(x.shape[1] == 4)
self.assertTrue(x.shape[2], 3)
self.assertTrue(x.size()[0], 5)
self.assertTrue(x.size()[1], 4)
self.assertTrue(isinstance(x.size()[1], SymInt))
self.assertTrue(x.size()[2] == 3)
self.assertTrue(x.size(0) == 5)
self.assertTrue(x.size(1) == 4)
self.assertTrue(x.size(2) == 3)
self.assertTrue(isinstance(x.size(2), SymInt))
offset = create_symint(shape_env, 2)
y = create_symbolic_tensor("x", torch.randn(5, 4, 3), shape_env, offset)
self.assertTrue(isinstance(y.storage_offset(), SymInt))
self.assertTrue(y.storage_offset() == 2)
offset = 2
z = create_symbolic_tensor("z", torch.randn(5, 4, 3), shape_env, offset)
self.assertTrue(isinstance(z.storage_offset(), int))
self.assertTrue(z.storage_offset() == 2)
@skipIfNoSympy
def test_binary(self):
shape_env = ShapeEnv()
x = create_symbolic_tensor("x", torch.randn(5, 4, 3), shape_env)
y = create_symbolic_tensor("y", torch.randn(5, 4, 3), shape_env)
z = x + y
self.assertTrue(z.shape[0] == 5)
self.assertTrue(z.shape[1] == 4)
self.assertTrue(z.shape[2] == 3)
# broadcasting
y = create_symbolic_tensor("y", torch.randn(1, 4, 1), shape_env)
z = x + y
self.assertTrue(z.shape[0] == 5)
self.assertTrue(z.shape[1] == 4)
self.assertTrue(z.shape[2] == 3)
@skipIfNoSympy
def test_symint_args(self):
shape_env = ShapeEnv()
x = create_symbolic_tensor("x", torch.randn(5, 4, 3), shape_env)
y = create_symbolic_tensor("y", torch.randn(5, 4, 1), shape_env)
LAST_DIM = 2
z = x.narrow_copy(LAST_DIM, 0, y.shape[LAST_DIM])
self.assertTrue(z.shape[2] == y.shape[2])
# arithmetic expr with two symints
z = x.narrow_copy(LAST_DIM, 0, x.shape[LAST_DIM] - y.shape[LAST_DIM])
self.assertTrue(z.shape[2] == 2)
# arithmetic expr with a symint and python int
z = x.narrow_copy(LAST_DIM, 0, x.shape[LAST_DIM] - 1)
self.assertTrue(z.shape[2] == 2)
@skipIfNoSympy
def test_symint_vargs(self):
shape_env = ShapeEnv()
x = create_symbolic_tensor("x", torch.randn(5, 4, 3), shape_env)
y = create_symbolic_tensor("y", torch.randn(1, 4, 1), shape_env)
# varargs
z = y.expand(x.shape[0], y.shape[1], x.shape[2])
self.assertTrue(z.shape[0] == 5)
self.assertTrue(z.shape[1] == 4)
self.assertTrue(z.shape[2] == 3)
# shape list
z = y.expand((x.shape[0], y.shape[1], x.shape[2]))
self.assertTrue(z.shape[0] == 5)
self.assertTrue(z.shape[1] == 4)
self.assertTrue(z.shape[2] == 3)
# mixed python symints and ints
z = y.expand(x.shape[0], y.shape[1], 3)
self.assertTrue(z.shape[0] == 5)
self.assertTrue(z.shape[1] == 4)
self.assertTrue(z.shape[2] == 3)
# mixed python symints and ints in a list
z = y.expand((x.shape[0], y.shape[1], 3))
self.assertTrue(z.shape[0] == 5)
self.assertTrue(z.shape[1] == 4)
self.assertTrue(z.shape[2] == 3)
# mixed python symints and ints
z = y.expand(5, y.shape[1], x.shape[2])
self.assertTrue(z.shape[0] == 5)
self.assertTrue(z.shape[1] == 4)
self.assertTrue(z.shape[2] == 3)
# mixed python ints and symints in a list
z = y.expand((5, y.shape[1], x.shape[2]))
self.assertTrue(z.shape[0] == 5)
self.assertTrue(z.shape[1] == 4)
self.assertTrue(z.shape[2] == 3)
z = y.expand((y.shape[1],))
z = y.expand(y.shape[1])
@skipIfNoSympy
def test_stride(self):
shape_env = ShapeEnv()
x = create_symbolic_tensor("x", torch.randn(5, 5), shape_env)
self.assertIsInstance(x.stride()[0], SymInt)
@skipIfNoSympy
def test_size_expressions(self):
shape_env = ShapeEnv()
x = create_symbolic_tensor("x", torch.randn(5), shape_env)
expand_x = x.expand(x.shape[0], x.shape[0])
if expand_x.shape[0] > 3:
result = expand_x + expand_x
else:
result = expand_x + expand_x
gt_op = shape_env.guards[0][0]
self.assertTrue(isinstance(gt_op, sympy.core.relational.StrictGreaterThan))
self.assertTrue(str(x.shape[0]), str(gt_op.args[0]))
self.assertTrue(str(expand_x.shape[1]), str(x.shape[0]))
self.assertTrue(str(expand_x.shape[1]), str(result.shape[0]))
@skipIfNoSympy
def test_int_to_float(self):
shape_env = ShapeEnv()
x = create_symbolic_tensor("x", torch.randn(5), shape_env)
r = sym_float(x.shape[0])
self.assertIsInstance(r, torch.SymFloat, msg=type(r))
@skipIfNoSympy
def test_aten_ops(self):
shape_env = ShapeEnv()
x = create_symbolic_tensor("x", torch.randn(5), shape_env)
torch.ops.aten.narrow_copy.default(x, 0, 0, x.shape[0])
shape_env = ShapeEnv()
x = create_symbolic_tensor("x", torch.randn(5, 4, 3), shape_env)
torch.ops.aten.expand.default(x, [x.shape[0], x.shape[1], x.shape[2]])
def test_fx_trace_intlist(self):
class CustomModule(torch.nn.Module):
def forward(self, x):
bs, c, h, w = x.shape
return F.pad(x, (0, w % 2, 0, h % 2, 0, 0))
m = CustomModule()
x = torch.rand(1, 3, 4, 4)
# should not TypeError: pad(): argument 'pad' (position 2) must be
# tuple of ints, not tuple
torch.fx.symbolic_trace(m)
@skipIfNoSympy
def test_meta_symint(self):
shape_env = ShapeEnv()
a0 = create_symint(shape_env, 2)
r = torch.empty(a0, device='meta')
self.assertIsInstance(r.shape[0], SymInt)
@skipIfNoSympy
def test_guard_int(self):
shape_env = ShapeEnv()
a0 = create_symint(shape_env, 2)
self.assertEqual(guard_int(a0), 2)
self.assertEqual(str(shape_env.guards[0][0]), "Eq(s0, 2)")
@skipIfNoSympy
def test_int_conversion(self):
shape_env = ShapeEnv()
a0 = create_symint(shape_env, 2)
self.assertRaisesRegex(RuntimeError, "Trying to extract", lambda: int(a0))
@skipIfNoSympy
def test_symint_as_scalar(self):
shape_env = ShapeEnv()
a0 = create_symint(shape_env, 2)
sym_int_encountered = False
class TestSymInt(TorchDispatchMode):
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
assert func == torch.ops.aten.add.Tensor
nonlocal sym_int_encountered
# WARNING: do not do identity tests on the outer
# SymInt/SymFloat, they are NOT STABLE
sym_int_encountered = kwargs["alpha"].node is a0.node
kwargs["alpha"] = 0
return func(*args)
x = torch.rand([4, 4])
with TestSymInt():
y = torch.add(x, x, alpha=a0)
self.assertTrue(sym_int_encountered)
@skipIfNoSympy
@unittest.mock.patch('sys.stdout', new_callable=io.StringIO)
def test_print_readable_with_symints(self, mock_stdout):
def f(a, b):
dim0 = a.shape[0] + b.shape[0]
dim1 = a.shape[1] + b.shape[1]
d = a.new_empty(dim0, dim1)
d = torch.ops.aten.native_dropout(d, 0.5, train=True)
return d
fx_g = make_fx(f, tracing_mode="symbolic")(torch.randn(5, 3), torch.randn(4, 3))
fx_g.print_readable()
self.assertExpectedInline(mock_stdout.getvalue().strip(), """\
class f(torch.nn.Module):
def forward(self, a_1: f32[s0, s1], b_1: f32[s2, s1]):
# No stacktrace found for following nodes
sym_size: Sym(s0) = torch.ops.aten.sym_size(a_1, 0)
sym_size_1: Sym(s2) = torch.ops.aten.sym_size(b_1, 0)
add: Sym(s0 + s2) = sym_size + sym_size_1; sym_size = sym_size_1 = None
sym_size_2: Sym(s1) = torch.ops.aten.sym_size(a_1, 1)
sym_size_3: Sym(s1) = torch.ops.aten.sym_size(b_1, 1); b_1 = None
add_1: Sym(2*s1) = sym_size_2 + sym_size_3; sym_size_2 = sym_size_3 = None
new_empty: f32[s0 + s2, 2*s1] = torch.ops.aten.new_empty.default(a_1, [add, add_1], dtype = torch.float32, layout = torch.strided, device = device(type='cpu'), pin_memory = False); a_1 = add = add_1 = None
native_dropout = torch.ops.aten.native_dropout.default(new_empty, 0.5, True); new_empty = None
getitem: f32[s0 + s2, 2*s1] = native_dropout[0]
getitem_1: b8[s0 + s2, 2*s1] = native_dropout[1]; native_dropout = None
return (getitem, getitem_1)""") # noqa: B950
# This environment variable controls whether or not we print expected failure
# lists at the end of a test suite run. The intended usage looks like this:
#
# 1. Run `PYTORCH_COLLECT_EXPECT=1 python test/test_dynamic_shapes.py -k TestSymNumberMagicMethods`.
# 2. Given the printed xfail list, add them to the set expected_failure_sym_magic_methods.
COLLECT_EXPECT = os.getenv('PYTORCH_COLLECT_EXPECT', '0') == '1'
seen_failed = []
def print_seen():
out = []
for key, reason in seen_failed:
# Make sure the generated line is lint clean
out.append(f" {key}, # {reason}"[:120])
print("expected_failure_sym_magic_methods = {")
print("\n".join(out))
print("}")
if COLLECT_EXPECT:
atexit.register(print_seen)
expected_failure_sym_magic_methods = {
('floordiv', 'SymInt', 'float'), # Cannot convert complex to float
('floordiv', 'int', 'SymFloat'), # unsupported operand type(s) for //: 'int' and 'SymFloat'
('floordiv', 'SymInt', 'SymFloat'), # Cannot convert complex to float
('mod', 'int', 'SymFloat'), # unsupported operand type(s) for %: 'int' and 'SymFloat'
('sym_int', 'int', 'float'), # sym_int() takes 1 positional argument but 2 were given
('sym_int', 'SymInt', 'float'), # sym_int() takes 1 positional argument but 2 were given
('sym_int', 'int', 'SymFloat'), # sym_int() takes 1 positional argument but 2 were given
('sym_int', 'SymInt', 'SymFloat'), # sym_int() takes 1 positional argument but 2 were given
('sym_int', 'int', 'int'), # sym_int() takes 1 positional argument but 2 were given
('sym_int', 'SymInt', 'int'), # sym_int() takes 1 positional argument but 2 were given
('sym_int', 'int', 'SymInt'), # sym_int() takes 1 positional argument but 2 were given
('sym_int', 'SymInt', 'SymInt'), # sym_int() takes 1 positional argument but 2 were given
}
@skipIfTorchDynamo("Creating ShapeEnv fails for confusing reasons (also we never expect dynamo to see code like this)")
class TestSymNumberMagicMethods(TestCase):
def _do_test(self, fn, inp1, inp2, shape_env, is_unary_fn):
# Helper function
seed_node = (create_symint(shape_env, 1) / 1.).get_pyobj()
def get_sym_inp(inp):
if isinstance(inp, int):
return torch.SymInt(seed_node.to_node(inp))
else:
return torch.SymFloat(seed_node.to_node(inp))
def maybe_xfail(inp1, inp2):
key = (fn, type(inp1).__name__, type(inp2).__name__)
if COLLECT_EXPECT:
@contextlib.contextmanager
def context():
try:
yield
except TypeError as e:
seen_failed.append((key, str(e)))
return context()
if key in expected_failure_sym_magic_methods:
return self.assertRaises(TypeError)
else:
return contextlib.nullcontext()
# These functions might return plain int/float
has_valid_downcast = fn in ["min", "max"]
if fn in symbolic_shapes.magic_methods_on_builtins:
lambda_apply = getattr(builtins, fn)
elif fn in symbolic_shapes.magic_methods_on_math:
lambda_apply = getattr(math, fn)
elif fn in symbolic_shapes.magic_methods_on_submodule:
lambda_apply = getattr(symbolic_shapes, fn)
else:
lambda_apply = getattr(operator, fn)
if fn in symbolic_shapes.always_float_magic_methods:
tp = "float"
elif fn in symbolic_shapes.always_int_magic_methods:
tp = "int"
elif is_unary_fn:
tp = "float" if isinstance(inp1, float) else "int"
else:
tp = "float" if any(isinstance(i, float) for i in [inp1, inp2]) else "int"
def guard_fn(v):
try:
if fn in symbolic_shapes.always_bool_magic_methods:
return bool(v)
else:
return getattr(v.node, f"guard_{tp}")("", 0)
except Exception as e:
if has_valid_downcast:
return v
else:
raise e
# Get reference result
with maybe_xfail(inp1, inp2):
if is_unary_fn:
ref_out = lambda_apply(inp1)
else:
ref_out = lambda_apply(inp1, inp2)
# Symified first arg
sym_inp1 = get_sym_inp(inp1)
with maybe_xfail(sym_inp1, inp2):
if is_unary_fn:
out = lambda_apply(sym_inp1)
else:
out = lambda_apply(sym_inp1, inp2)
self.assertEqual(guard_fn(out), ref_out)
if is_unary_fn:
return
# Symified second arg
sym_inp2 = get_sym_inp(inp2)
with maybe_xfail(inp1, sym_inp2):
out = lambda_apply(inp1, sym_inp2)
self.assertEqual(guard_fn(out), ref_out)
# Symified both args
with maybe_xfail(sym_inp1, sym_inp2):
out = lambda_apply(sym_inp1, sym_inp2)
self.assertEqual(guard_fn(out), ref_out)
@parametrize("fn", list(symbolic_shapes.magic_methods.keys()))
@parametrize("first_type", ["int", "float"])
@parametrize("second_type", ["int", "float"])
def test_method(self, fn, first_type, second_type):
if first_type == "float" and fn not in symbolic_shapes.float_magic_methods:
self.skipTest(f"{fn} is not a float magic method")
is_unary_fn = fn in symbolic_shapes.unary_magic_methods
# Second argument is ignored for unary function. So only run for one type
if is_unary_fn and second_type == "float":
self.skipTest(f"{fn} is unary and already tested")
# We could pass int/float directly for types but then the
# mangled test name is bad
inp1 = random.random() * 2.5
if first_type == "int":
inp1 = int(inp1)
inp2 = random.random() * 2.5
if second_type == "int":
inp2 = int(inp2)
shape_env = ShapeEnv()
self._do_test(fn, inp1, inp2, shape_env, is_unary_fn)
instantiate_parametrized_tests(TestSymNumberMagicMethods)
if __name__ == '__main__':
run_tests()
| [
"pytorchmergebot@users.noreply.github.com"
] | pytorchmergebot@users.noreply.github.com |
f23d73d2815a3e756ad65a90979bd0b36bba33f3 | 6e8719e86a1b6bef6b7a923bec99ed74936f3972 | /budget-app/BudgetApp.py | 576feae95b73fbd5a1da47df37042fb425e7b674 | [] | no_license | agnesho/scientific-computing-projects | f7808e3e069a2302f02df20bffc86b67b4933461 | 07f14a0fb787ef713ec39a391e3fec5b301cdc8e | refs/heads/main | 2023-06-04T13:59:15.143562 | 2021-06-20T02:11:05 | 2021-06-20T02:11:05 | 378,539,316 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,982 | py | class Category:
def __init__(self, category_name):
self.name = category_name
self.ledger = list()
def deposit(self, amount, desc=None):
if desc == None:
desc = ""
self.ledger.append({"amount": amount, "description": desc})
def withdraw(self, amount, desc=""):
if self.check_funds(amount): # if True
self.ledger.append({"amount": -amount, "description": desc})
return True
else:
return False
def get_balance(self):
self.balance = 0
for entry in self.ledger:
try:
for v in entry.values():
self.balance += v
except:
continue
return self.balance
def transfer(self, amount, category_name2):
if self.check_funds(amount): # if True
self.withdraw(amount, 'Transfer to ' + category_name2.name)
category_name2.deposit(amount, 'Transfer from ' + self.name)
return True
else:
return False
def check_funds(self, amount):
balance = self.get_balance()
if amount > balance:
return False
else:
return True
def __str__(self):
# Category name's length is even
if len(self.name) % 2 == 0:
output = ((30-len(self.name))//2)*'*' + self.name + ((30-len(self.name))//2)*'*' + '\n'
# Odd
else:
output = ((30-len(self.name))//2 + 1)*'*' + self.name + ((30-len(self.name))//2)*'*' + '\n'
for entry in self.ledger:
item = entry["description"][:23]
price = entry["amount"]
output += '{:<23}{:>7.2f}\n'.format(item, price)
output += 'Total: ' + '{:.2f}'.format(self.get_balance())
return output
##### Bar Chart #####
def create_spend_chart(categories):
total_spending = 0
max_length = 0
cat_list = list()
per_list = list()
# Find categories and their respective withdrawals
for i in categories:
for cat in i.ledger:
if cat['amount'] < 0:
spending = -cat['amount']
total_spending += (-cat['amount'])
# What is the length of the longest category name?
if len(i.name) > max_length:
max_length = len(i.name)
cat_list.append([i.name, spending])
for i in cat_list:
percentage = (i[1]/total_spending)*100
per_list.append({i[0]: percentage})
# Output
title = "Percentage spent by category\n"
row = ''
name = ' '
no_of_categories = len(cat_list)
for i in range(100,-10,-10):
row += '{:3}{} '.format(i, '|')
for j in per_list:
for cat, percentage in j.items():
if percentage >= i:
row += 'o '
else:
row += ' '
row += '\n'
count = 0
for i in range(max_length):
for j in cat_list:
if len(j[0])-1 < count:
name += ' '
else:
name += j[0][i] + ' '
# No newline after last character
if i == max_length-1:
continue
else:
name += '\n '
count += 1
dash = ' ' + '-' * (no_of_categories*3 + 1) + '\n'
output = title + row + dash + name
return output | [
"hoagnes@yahoo.com.sg"
] | hoagnes@yahoo.com.sg |
d2a632f411a948ce72638cfc9cc2bc817a789e22 | 380ca73a9fa28b27dfcba3b3a30588853cf8fda9 | /miappotro/link.py | c84c6c7f766ffaa14b655289078604b08f81bb12 | [] | no_license | Alejandro-Gutierrez/django | 71394a766d07c054fd2038f3b9adbed1291dec66 | 786cbc2122ba35b99dd1c2c11264576ca4bbef48 | refs/heads/master | 2021-06-22T22:03:53.451226 | 2019-08-15T23:02:50 | 2019-08-15T23:02:50 | 202,214,891 | 0 | 0 | null | 2021-06-10T22:02:20 | 2019-08-13T19:56:58 | Python | UTF-8 | Python | false | false | 258 | py |
from django.urls import path
from miappotro.views import *
urlpatterns = [
path('', saludar),
path('Cocinero/new', cocinero),
path('perro/new', perro),
path('gato/new', gato),
path('loro/new', loro),
path('tiburon/new', tiburon),
] | [
"alejandroluis444@gmail.com"
] | alejandroluis444@gmail.com |
de25130f84581c63b98110512d16ae48fb0b9636 | d2665b95ccbd1eecc5b92a1cd69233f2eab74cb0 | /prototype.py | 88df86304ba38569ee79daa960e8809680ab1dfa | [] | no_license | sychov/patterns | 32a240a2318a7b932fdcc5c1784f13f35769fa3c | f70bddced432b963ac83d865f873088672568e07 | refs/heads/master | 2020-05-18T13:13:44.737147 | 2019-05-01T15:00:18 | 2019-05-01T15:00:18 | 184,432,887 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,593 | py | #-----------------------------------------------------------------------------#
# Name: prototype.py
# Author: Ryoga
# Created: 18.04.2019
# Description: "Prototype" pattern.
# Main idea: copying object using it's own method.
#-----------------------------------------------------------------------------#
import abc
from dataclasses import dataclass
RED = '#ff0000'
GREEN = '#00ff00'
class Prototype(abc.ABC):
"""Interface.
Declares clone() method.
"""
@abc.abstractmethod
def clone(self):
"""Create object's copy.
"""
pass
@dataclass
class Monster(Prototype):
"""Monster, generic clonable class.
"""
name: str
color: str
coord_x: int
coord_y: int
def clone(self):
return Monster(self.name, self.color, self.coord_x, self.coord_y)
def show(self):
print('%s at (%d:%d), color %s' %
(self.name, self.coord_x, self.coord_y, self.color))
class RedMonster(Monster):
"""Red Monster prototype.
"""
def __init__(self, coord_x, coord_y):
super().__init__('red_monster', RED, coord_x, coord_y)
def clone(self):
return RedMonster(self.coord_x, self.coord_y)
# --------------------------- TEST --------------------------------
if __name__ == '__main__':
monster_1 = Monster('TestMonster', GREEN, 0, 0)
monster_2 = monster_1.clone()
monster_2.coord_x = 1
monster_1.show()
monster_2.show()
red_1 = RedMonster(7, 7)
red_2 = red_1.clone()
red_2.coord_x = 8
red_1.show()
red_2.show()
| [
"alexx-lawyer@yandex.ru"
] | alexx-lawyer@yandex.ru |
794e2904caebb85aa81ccb41eaed66721843747f | 09301c71638abf45230192e62503f79a52e0bd80 | /besco_erp/besco_warehouse/general_stock_fifo/__openerp__.py | 7aa0010772448e6c5236add7f97c1eec77d47520 | [] | no_license | westlyou/NEDCOFFEE | 24ef8c46f74a129059622f126401366497ba72a6 | 4079ab7312428c0eb12015e543605eac0bd3976f | refs/heads/master | 2020-05-27T06:01:15.188827 | 2017-11-14T15:35:22 | 2017-11-14T15:35:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 841 | py | # -*- coding: utf-8 -*-
##############################################################################
#
##############################################################################
{
"name" : "General Stock FIFO",
"version" : "9.0",
"author" : "Le Truong Thanh <thanh.lt1689@gmail.com>",
'category': 'General 90',
"depends" : ["general_stock",
"general_account",
],
"init_xml" : [],
"demo_xml" : [],
"description": """
""",
'data': [
# 'security/ir.model.access.csv',
# 'security/security.xml',
'cron.xml',
'stock_fifo_view.xml',
'menu.xml',
],
'test': [
],
'installable': True,
'auto_install': False,
'certificate': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| [
"son.huynh@nedcoffee.vn"
] | son.huynh@nedcoffee.vn |
351803413cf100483f00adc08ebc22983a6b6c58 | 50a3c6da75543b4cefd1054a7f1cb2fc06189397 | /multiagent/run.py | acb9b3b679b93799d4c24b5804a943a2f641bb76 | [] | no_license | anuragdutt/gasil_robotics | 99865f8c3964c8c0fe6866a10507804501f36273 | 132c7fc834a507faf2e9a1d15d290c3d7963dd10 | refs/heads/master | 2023-03-07T06:20:46.105025 | 2021-02-16T05:34:15 | 2021-02-16T05:34:15 | 264,075,668 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,268 | py | # -*- coding: utf-8 -*-
import os
import pickle
os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # 使用 GPU 0
import tensorflow as tf
import numpy as np
import algorithm.common.tf_utils as tf_utils
import time
import random
# read input cmd from standard input device
flags = tf.app.flags
# Game parameter
flags.DEFINE_string('env_name', 'predator_prey', 'env used')
flags.DEFINE_bool('render', True, 'whether to render the scenario')
flags.DEFINE_integer('seed', 1, 'random seed')
flags.DEFINE_integer('num_adversaries', 2, 'num_adversaries')
flags.DEFINE_integer('num_good_agents', 3, 'num_good_agents')
flags.DEFINE_integer('max_step_before_punishment', 8, 'max_step_before_punishment')
flags.DEFINE_bool('reload_prey', True, 'whether to reload the pre-trained prey model')
flags.DEFINE_string('prey_model_path', './model/unit_num_32/model.ckpt-40000',
'path of the pre-trained prey model')
flags.DEFINE_bool('reload_predator', False, 'whether to reload the pre-trained predator model')
flags.DEFINE_string('predator_model_path', './your_trained_model_path',
'path of the pre-trained predator model')
# Training parameters
flags.DEFINE_bool('learning', True, 'train the agents')
flags.DEFINE_string('predator_policy', 'gasil', 'predator_policy')
flags.DEFINE_string('prey_policy', 'ddpg', 'prey_policy: [random, fixed, ddpg]')
flags.DEFINE_integer('episodes', 140000, 'maximum training episode')
flags.DEFINE_integer('max_episode_len', 60, 'maximum step of each episode')
flags.DEFINE_float('ddpg_plr', 0.01, 'policy learning rate')
flags.DEFINE_float('ddpg_qlr', 0.001, 'critic learning rate')
flags.DEFINE_float('gamma', 0.99, 'discount factor')
flags.DEFINE_float('tau', 0.01, 'target network update frequency')
flags.DEFINE_integer('target_update_interval', 1, 'target network update frequency')
flags.DEFINE_float('return_confidence_factor', 0.7, 'return_confidence_factor')
flags.DEFINE_integer('batch_size', 1024, 'batch size')
flags.DEFINE_integer('n_train_repeat', 1, 'repeated sample times at each training time')
flags.DEFINE_integer('save_checkpoint_every_epoch', 5000, 'save_checkpoint_every_epoch')
flags.DEFINE_integer('plot_reward_recent_mean', 1000, 'show the avg reward of recent 200 episode')
flags.DEFINE_bool('save_return', True, 'save trajectory Return by default')
flags.DEFINE_float('lambda1', 0., 'n-step return')
flags.DEFINE_float('lambda1_max', 1., 'n-step return')
flags.DEFINE_float('lambda2', 1e-6, 'coefficient of regularization')
# GASIL
flags.DEFINE_bool('consider_state_action_confidence', True, 'The closer (state, action) to the end state the important')
flags.DEFINE_float('state_action_confidence', 0.8, 'discount factor of (state, action)')
flags.DEFINE_float('state_action_confidence_max', 1., 'discount factor of (state, action)')
flags.DEFINE_integer('gradually_inc_start_episode', 0,
'increase parameters start at ${gradually_inc_start_episode} episode')
flags.DEFINE_integer('gradually_inc_within_episode', 12000,
'increase parameters in ${gradually_inc_within_episode} episode')
flags.DEFINE_integer('inc_or_dec_step', 1000, 'natural_exp_inc parameter: inc_step')
flags.DEFINE_float('d_lr', 0.001, 'discriminator learning rate')
flags.DEFINE_float('imitation_lambda', 0., 'coefficient of imitation learning')
flags.DEFINE_float('imitation_lambda_max', 1., 'maximum coefficient of imitation learning')
flags.DEFINE_integer('train_discriminator_k', 1, 'train discriminator net k times at each update')
flags.DEFINE_integer('gan_batch_size', 8, 'batch_size of training GAN')
# experience replay
flags.DEFINE_integer('buffer_size', 300000, 'buffer size')
flags.DEFINE_integer('min_buffer_size', 30000, 'minimum buffer size before training')
flags.DEFINE_integer('positive_buffer_size', 32, 'buffer size')
flags.DEFINE_integer('min_positive_buffer_size', 32, 'min buffer size before training')
# prioritized
flags.DEFINE_bool('prioritized_er', False, 'whether to use prioritized ER')
flags.DEFINE_float('alpha', 0.6, 'how much prioritization is used (0 - no prioritization, 1 - full prioritization)')
flags.DEFINE_float('beta', 0.4, 'To what degree to use importance weights (0 - no corrections, 1 - full correction)')
# Net structure
flags.DEFINE_integer('num_units', 32, 'layer neuron number')
flags.DEFINE_integer('num_units_ma', 64, 'layer neuron number for multiagent alg')
flags.DEFINE_integer('h_layer_num', 2, 'hidden layer num')
# Model saving dir
flags.DEFINE_string('model_save_dir', './exp_result/{}/{}/saved_models/seed_{}/model',
'Model saving dir')
flags.DEFINE_string('learning_curve_dir', './exp_result/{}/{}/learning_curves/seed_{}',
'learning_curve_dir')
FLAGS = flags.FLAGS # alias
def make_env(scenario_name, max_step_before_punishment):
'''
Creates a MultiAgentEnv object as env. This can be used similar to a gym
environment by calling env.reset() and env.step().
Use env.render() to view the environment on the screen.
Input:
scenario_name : name of the scenario from ./scenarios/ to be Returns
(without the .py extension)
benchmark : whether you want to produce benchmarking data
(usually only done during evaluation)
Some useful env properties (see environment.py):
.observation_space : Returns the observation space for each agent
.action_space : Returns the action space for each agent
.n : Returns the number of Agents
'''
from env.multiagent.environment import MultiAgentEnv
import env.multiagent.scenarios as scenarios
# load scenario from script
scenario = scenarios.load(scenario_name + ".py").Scenario()
scenario.max_step_before_punishment = max_step_before_punishment
print('==============================================================')
print('max_step_before_punishment: ', scenario.max_step_before_punishment)
print('==============================================================')
# create world
world = scenario.make_world()
# create multiagent environment
env = MultiAgentEnv(world, scenario.reset_world, scenario.reward, scenario.observation,
info_callback=scenario.collision_number,
done_callback=scenario.done,
other_callbacks=[scenario.set_arrested_pressed_watched])
return env
def build_agents(action_dim_n, observation_dim_n, policies_name):
'''
build agents
:param action_dim_n:
:param observation_dim_n:
:param policies_name:
:return:
'''
from algorithm.trainer import SimpleAgentFactory
agents = []
obs_shape_n = [[dim] for dim in observation_dim_n]
for agent_idx, policy_name in enumerate(policies_name):
agents.append(SimpleAgentFactory.createAgent(agent_idx, policy_name, obs_shape_n, action_dim_n, FLAGS))
return agents
def reload_previous_models(session, env):
import gc
if FLAGS.reload_prey:
prey_vars = []
for idx in range(FLAGS.num_adversaries, env.n):
var = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='agent_{}'.format(idx))
prey_vars += var
saver_prey = tf.train.Saver(var_list=prey_vars)
saver_prey.restore(session, FLAGS.prey_model_path)
print('[prey] successfully reload previously saved ddpg model({})...'.format(FLAGS.prey_model_path))
del saver_prey
gc.collect()
# all the predator using the same policy
# best_agent = agents[base_kwargs['num_adversaries']]
# for i in range(base_kwargs['num_adversaries'], env.n):
# agents[i] = best_agent
if FLAGS.reload_predator:
predator_vars = []
for idx in range(FLAGS.num_adversaries):
var = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='agent_{}'.format(idx))
predator_vars += var
saver_predator = tf.train.Saver(var_list=predator_vars)
saver_predator.restore(session, FLAGS.predator_model_path)
print('[predator] successfully reload previously saved ddpg model({})...'.format(
FLAGS.predator_model_path
))
del saver_predator
gc.collect()
def train():
# init env
env = make_env(FLAGS.env_name, FLAGS.max_step_before_punishment)
env = env.unwrapped
# set env seed
np.random.seed(FLAGS.seed)
random.seed(FLAGS.seed)
tf.set_random_seed(FLAGS.seed)
print("Using seed {} ...".format(FLAGS.seed))
print('There are total {} agents.'.format(env.n))
obs_shape_n = [env.observation_space[i].shape[0] for i in range(env.n)]
action_shpe_n = [2] * env.n
print('obs_shape_n: ', obs_shape_n) # [16, 16, 16, 14]
print(action_shpe_n) # [5, 5, 5, 5]
adv_policies = [FLAGS.predator_policy] * FLAGS.num_adversaries
good_policies = [FLAGS.prey_policy] * FLAGS.num_good_agents
print(adv_policies + good_policies)
with tf_utils.make_session().as_default() as sess:
# init agents
agents = build_agents(action_shpe_n, obs_shape_n, adv_policies + good_policies)
# init tf summaries
summary_path = FLAGS.learning_curve_dir.format(FLAGS.env_name, FLAGS.predator_policy, FLAGS.seed)
print('summary_path', summary_path)
summary_writer = tf.summary.FileWriter(summary_path)
adv_mean_options, adv_mean_phs = [], []
adv_episode_options, adv_episode_phs = [], []
prey_mean_options, prey_mean_phs = [], []
prey_episode_options, prey_episode_phs = [], []
agent0_pool_mean_return_ph = tf.placeholder(dtype=tf.float32, shape=[], name='agent0_pool_mean_return')
agent0_pool_mean_return_option = tf.summary.scalar('agent0_pool_mean_return', agent0_pool_mean_return_ph)
agent1_pool_mean_return_ph = tf.placeholder(dtype=tf.float32, shape=[], name='agent1_pool_mean_return')
agent1_pool_mean_return_option = tf.summary.scalar('agent1_pool_mean_return', agent1_pool_mean_return_ph)
agent0_positive_pool_mean_return_ph = tf.placeholder(dtype=tf.float32, shape=[],
name='agent0_positive_pool_mean_return')
agent0_positive_pool_mean_return_option = tf.summary.scalar('agent0_positive_pool_mean_return',
agent0_positive_pool_mean_return_ph)
agent1_positive_pool_mean_return_ph = tf.placeholder(dtype=tf.float32, shape=[],
name='agent1_positive_pool_mean_return')
agent1_positive_pool_mean_return_option = tf.summary.scalar('agent1_positive_pool_mean_return',
agent1_positive_pool_mean_return_ph)
agent0_imitation_lambda = tf.placeholder(dtype=tf.float32, shape=[], name='agent0_imitation_lambda')
agent0_imitation_lambda_option = tf.summary.scalar('agent0_imitation_lambda', agent0_imitation_lambda)
agent0_state_action_confidence = tf.placeholder(dtype=tf.float32, shape=[],
name='agent0_state_action_confidence')
agent0_state_action_confidence_option = tf.summary.scalar('agent0_state_action_confidence',
agent0_state_action_confidence)
for idx in range(FLAGS.num_adversaries):
ad_fp_reward_1000_mean = tf.placeholder(dtype=tf.float32, shape=[],
name='ad_{}_fp_reward_{}_mean'.format(idx,
FLAGS.plot_reward_recent_mean))
ad_fp_reward_1000_mean_op = tf.summary.scalar(
'adversary {} episode reward {} mean'.format(idx, FLAGS.plot_reward_recent_mean),
ad_fp_reward_1000_mean)
ad_fp_reward_episode = tf.placeholder(dtype=tf.float32, shape=[],
name='ad_{}_fp_reward_episode'.format(idx))
ad_fp_reward_episode_op = tf.summary.scalar('adversary {} episode reward'.format(idx), ad_fp_reward_episode)
adv_mean_phs.append(ad_fp_reward_1000_mean)
adv_mean_options.append(ad_fp_reward_1000_mean_op)
adv_episode_phs.append(ad_fp_reward_episode)
adv_episode_options.append(ad_fp_reward_episode_op)
for idx in range(FLAGS.num_good_agents):
prey_fp_reward_1000_mean = tf.placeholder(dtype=tf.float32, shape=[],
name='prey_{}_fp_reward_{}_mean'.format(idx,
FLAGS.plot_reward_recent_mean))
prey_fp_reward_1000_mean_op = tf.summary.scalar(
'prey {} episode reward {} mean'.format(idx, FLAGS.plot_reward_recent_mean),
prey_fp_reward_1000_mean)
prey_fp_reward_episode = tf.placeholder(dtype=tf.float32, shape=[],
name='prey_{}_fp_reward_episode'.format(idx))
prey_fp_reward_episode_op = tf.summary.scalar('prey {} episode reward'.format(idx), prey_fp_reward_episode)
prey_mean_phs.append(prey_fp_reward_1000_mean)
prey_mean_options.append(prey_fp_reward_1000_mean_op)
prey_episode_phs.append(prey_fp_reward_episode)
prey_episode_options.append(prey_fp_reward_episode_op)
# build model saver
saver = tf.train.Saver(max_to_keep=int(FLAGS.episodes / FLAGS.save_checkpoint_every_epoch))
# reload previous prey and predator model
reload_previous_models(session=sess, env=env)
# Initialize uninitialized variables.
tf_utils.initialize(sess=sess)
# assert using same session
same_session(sess, agents)
# make the tensor graph unchangeable
sess.graph.finalize()
# collect some statistical data
episode_rewards = [0.0] # sum of rewards for all agents
agent_rewards = [[0.0] for _ in range(env.n)] # individual agent reward
agent_episode_rewards = [[0.0] for _ in range(env.n)]
losses_transformation = [[] for _ in range(FLAGS.num_adversaries)]
coordination_reach_times = [[0] for _ in range(FLAGS.num_good_agents)]
coordination_times = 0
miss_coordination_times = 0
total_times = 0
obs_n = env.reset()
episode_step = 0 # step for each episode
train_step = 0 # total training step
t_start = time.time()
print('Starting iterations...')
while len(episode_rewards) <= FLAGS.episodes:
# increment global step counter
train_step += 1
if FLAGS.render:
time.sleep(0.3)
env.render()
action_2_dim_n = [agent.get_actions(observations=[obs], single=True) for agent, obs in zip(agents, obs_n)]
action_n = [[0, a[0], 0, a[1], 0] for a in action_2_dim_n]
# environment step
new_obs_n, rew_n, done_n, info_n = env.step(action_n, restrict_move=True)
info_n = info_n['n']
episode_step += 1
done = all(done_n) # 达到任务
terminal = (episode_step >= FLAGS.max_episode_len) # 最大步数
ended = done or terminal
# collect experience
if FLAGS.learning:
for i, agent in enumerate(agents):
# prey is fixed
if i < FLAGS.num_adversaries:
agent.experience(obs_n[i], action_2_dim_n[i], rew_n[i], new_obs_n[i], ended)
# step forward observations
obs_n = new_obs_n
# record some analysis information
for i, rew in enumerate(rew_n):
episode_rewards[-1] += rew
agent_episode_rewards[i].append(rew)
agent_rewards[i][-1] += rew
for discrete_action in range(FLAGS.num_good_agents):
coordination_reach_times[discrete_action][-1] += info_n[0][discrete_action]
# add some log and records...
if ended:
# print log for debugging......
if len(episode_rewards) % 100 == 0:
print('process {}, episode {}: '.format(os.getpid(), len(episode_rewards)))
# reset environment
obs_n = env.reset()
# reset episode tags
episode_step = 0
episode_rewards.append(0) # reset sum rewards
for idx, a in enumerate(agent_rewards): # reset each agent's reward
a.append(0)
agent_episode_rewards = [[0.0] for _ in range(env.n)]
for coord_count in coordination_reach_times: # reset coordination times
coord_count.append(0)
# do training
if FLAGS.learning:
for i in range(FLAGS.n_train_repeat):
loss_and_positive_loss, trained = [], False
for idx, agent in enumerate(agents):
if idx >= FLAGS.num_adversaries:
continue
loss = agent.do_training(agents=agents, iteration=train_step, episode=len(episode_rewards))
loss_and_positive_loss.append(loss)
trained = loss is not None
if trained:
for idx in range(FLAGS.num_adversaries):
losses_transformation[idx].append(loss_and_positive_loss[idx])
# add summary
if ended and len(episode_rewards) % 10 == 0:
# agent buffer avg return
summary_writer.add_summary(sess.run(agent0_pool_mean_return_option,
{agent0_pool_mean_return_ph: agents[
0].pool.current_mean_return}),
len(episode_rewards))
summary_writer.add_summary(sess.run(agent1_pool_mean_return_option,
{agent1_pool_mean_return_ph: agents[
1].pool.current_mean_return}),
len(episode_rewards))
if FLAGS.predator_policy == 'gasil':
# log confidence_factor,
summary_writer.add_summary(
sess.run(agent0_state_action_confidence_option,
{agent0_state_action_confidence: agents[0].state_action_confidence}),
len(episode_rewards))
# imitation_lambda
summary_writer.add_summary(
sess.run(agent0_imitation_lambda_option,
{agent0_imitation_lambda: agents[0].imitation_lambda}), len(episode_rewards))
# positive pool mean return
summary_writer.add_summary(sess.run(agent0_positive_pool_mean_return_option,
{agent0_positive_pool_mean_return_ph: agents[
0].positive_pool.current_mean_return}),
len(episode_rewards))
summary_writer.add_summary(sess.run(agent1_positive_pool_mean_return_option,
{agent1_positive_pool_mean_return_ph: agents[
1].positive_pool.current_mean_return}),
len(episode_rewards))
for idx in range(FLAGS.num_adversaries):
summary_writer.add_summary(sess.run(adv_mean_options[idx], {
adv_mean_phs[idx]: np.mean(
agent_rewards[idx][-FLAGS.plot_reward_recent_mean - 1: -1])}),
len(episode_rewards))
summary_writer.add_summary(
sess.run(adv_episode_options[idx], {adv_episode_phs[idx]: agent_rewards[idx][-2]}),
len(episode_rewards))
# add summary for drawing curves (prey)
for idx in range(FLAGS.num_good_agents):
summary_writer.add_summary(sess.run(prey_mean_options[idx], {
prey_mean_phs[idx]: np.mean(
agent_rewards[idx + FLAGS.num_adversaries][
-FLAGS.plot_reward_recent_mean - 1: -1])
}), len(episode_rewards))
summary_writer.add_summary(
sess.run(prey_episode_options[idx], {
prey_episode_phs[idx]: agent_rewards[idx + FLAGS.num_adversaries][-2]
}), len(episode_rewards))
# save models
if ended and len(episode_rewards) % FLAGS.save_checkpoint_every_epoch == 0:
# save model
save_model(saver, sess, len(episode_rewards))
print("steps: {}, episodes: {}, mean episode reward: {}, agent episode reward: {}, time: {}".format(
train_step, len(episode_rewards),
np.mean(episode_rewards[-FLAGS.save_checkpoint_every_epoch:]),
[np.mean(rew[-FLAGS.save_checkpoint_every_epoch:]) for rew in agent_rewards],
round(time.time() - t_start, 3)))
t_start = time.time()
# close sess
sess.close()
# record
if FLAGS.learning:
record_logs(**{
'summary_path': summary_path,
'agent_rewards': agent_rewards,
'coordination_reach_times': coordination_reach_times,
'agents': agents,
'losses_transformation': losses_transformation,
})
def save_model(saver, sess, episode):
model_path = FLAGS.model_save_dir.format(FLAGS.env_name, FLAGS.predator_policy, FLAGS.seed)
if not os.path.exists(model_path):
os.makedirs(os.path.dirname(model_path), exist_ok=True)
saver.save(sess, model_path, global_step=episode)
def record_logs(**kwargs):
log_path = kwargs['summary_path'] + '/logs'
if not os.path.exists(log_path):
os.makedirs(log_path, exist_ok=True)
losses_transformation_file_name = log_path + '/' + FLAGS.env_name + '_losses_transformation.pkl'
with open(losses_transformation_file_name, 'wb') as fp:
pickle.dump(kwargs['losses_transformation'], fp)
rew_file_name = log_path + '/' + FLAGS.env_name + '_rewards.pkl'
with open(rew_file_name, 'wb') as fp:
pickle.dump(kwargs['agent_rewards'], fp)
coordination_reach_times_file = log_path + '/' + FLAGS.env_name + '_coordination_reach_times.pkl'
with open(coordination_reach_times_file, 'wb') as fp:
pickle.dump(kwargs['coordination_reach_times'], fp)
buffer_mean = log_path + '/' + FLAGS.env_name + '_buffer.pkl'
with open(buffer_mean, 'wb') as fp:
pickle.dump(kwargs['agents'][0].pool.mean_returns, fp)
print("Mean buffer return:")
print(kwargs['agents'][0].pool.mean_returns)
# for debug below ..........................................................
def same_session(sess, agents):
for agent in agents[:FLAGS.num_adversaries]:
if sess != agent.get_session():
print("Session error (diff tf session)")
print("The same session.........................")
if __name__ == '__main__':
train()
| [
"anurag2709@gmail.com"
] | anurag2709@gmail.com |
9ae53a696d6932a42cbec2b3dbdcca559ebf6b4d | af15fc08a85e9f98db34293764adc4e26a0c0441 | /Python3/generators.py | 7ccb5ce8cc4ac51f0284193204159937f986502b | [] | no_license | mmuazam98/python-basics | d187ad14fd9d231ea281c22613138ace90937f7a | 37e616709a05f4fa44b748e9eb367f522ed72a49 | refs/heads/master | 2023-04-07T11:34:11.362978 | 2021-04-22T08:29:02 | 2021-04-22T08:29:02 | 313,984,508 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 708 | py | def main():
# for i in range(10): prints 0-9
# for i in inclusive_range(10): prints 0-10
for i in inclusive_range(1,10,2):
print(i, end=' ')
print()
def inclusive_range(*args):
numargs = len(args)
start = 0
step = 1
if numargs < 1:
raise TypeError(f'expected atleast 1 argument {numargs}')
elif numargs == 1:
stop = args[0]
elif numargs == 2:
(start,stop) = args
elif numargs == 3:
(start,stop,step) = args
else:
raise TypeError(f'expected atmost 3 arguments {numargs}')
# generator
i = start
while i<=stop:
yield i
i = i + step
if __name__ == "__main__":
main()
| [
"mm9084@srmist.edu.in"
] | mm9084@srmist.edu.in |
dc2fa899111262ff07ca26dd5d3f00391c89999a | 22a0715a4b71deb014cfc333c496385553ed2261 | /dijkstra.py | c402863ca15199af38af954715a84ac199bb08c9 | [] | no_license | Kharya3/algo | 044d8716e65e038f5b5d94f86970bfc886caba93 | 9df53fc2e8a57fe7cb0d0a4ba198c622953ec801 | refs/heads/master | 2022-12-22T02:52:40.616235 | 2020-09-16T08:56:12 | 2020-09-16T08:56:12 | 280,258,290 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 565 | py | from collections import defaultdict
def djk(s):
X = {s}
ln[s] = 0
while X:
v = X.pop()
for w, weight in g[v]:
if ln[w] > ln[v] + weight:
ln[w] = ln[v] + weight
X.add(w)
mx = 10 ** 7
g = defaultdict(list)
with open("dijkstraData.txt") as file:
for line in file:
if line != "\n":
lst = [i for i in line.split()]
for i in lst[1:]:
x, y = map(int, i.split(","))
g[int(lst[0])].append((x, y))
ln = [mx] * 201
djk(1)
print(ln)
| [
"noreply@github.com"
] | noreply@github.com |
7caca9b675e58b3cf92fdadfc2e2d255f90c481f | 63bdeab4f5ed48acfc6fa919ead31244cbd1d7e7 | /agentnet/utils/__init__.py | a719f81e995ab17066de7ec9c435a8c0f4504c72 | [
"MIT"
] | permissive | gitter-badger/AgentNet | 5bad5b4d14cea4a72e785dedcd7206c35f9f0354 | ee3aba8b7c799427eba69e9a91e3b5a0f5936094 | refs/heads/master | 2020-12-11T03:55:21.817033 | 2016-05-03T21:17:20 | 2016-05-03T21:17:20 | 58,045,765 | 0 | 1 | null | 2016-05-04T11:06:29 | 2016-05-04T11:06:28 | null | UTF-8 | Python | false | false | 164 | py |
__doc__ = """helper functions for symbolic theano code"""
from tensor_ops import *
from grad import *
from shared import *
from persistence import *
| [
"jheuristic@cv-gpu01h.ydf.yandex.net"
] | jheuristic@cv-gpu01h.ydf.yandex.net |
bd40e87cf094c91dcb5d4c15d6fec0e2daf3068f | 1d928c3f90d4a0a9a3919a804597aa0a4aab19a3 | /python/spaCy/2016/4/test_flag_features.py | 880704e28905500ee8aa5b21c6e60fc6e73fdc58 | [
"MIT"
] | permissive | rosoareslv/SED99 | d8b2ff5811e7f0ffc59be066a5a0349a92cbb845 | a062c118f12b93172e31e8ca115ce3f871b64461 | refs/heads/main | 2023-02-22T21:59:02.703005 | 2021-01-28T19:40:51 | 2021-01-28T19:40:51 | 306,497,459 | 1 | 1 | null | 2020-11-24T20:56:18 | 2020-10-23T01:18:07 | null | UTF-8 | Python | false | false | 1,333 | py | from __future__ import unicode_literals
import pytest
from spacy.orth import is_alpha
from spacy.orth import is_digit
from spacy.orth import is_punct
from spacy.orth import is_space
from spacy.orth import is_ascii
from spacy.orth import is_upper
from spacy.orth import is_lower
from spacy.orth import is_title
@pytest.fixture
def words():
return ["1997", "19.97", "hello9", "Hello", "HELLO", "Hello9", "\n", "!",
"!d", "\nd"]
def test_is_alpha(words):
assert not is_alpha(words[0])
assert not is_alpha(words[1])
assert not is_alpha(words[2])
assert is_alpha(words[3])
assert is_alpha(words[4])
assert not is_alpha(words[5])
assert not is_alpha(words[6])
assert not is_alpha(words[7])
assert not is_alpha(words[8])
assert not is_alpha(words[9])
def test_is_digit(words):
assert is_digit(words[0])
assert not is_digit(words[1])
assert not is_digit(words[2])
assert not is_digit(words[3])
assert not is_digit(words[4])
assert not is_digit(words[5])
assert not is_digit(words[6])
assert not is_digit(words[7])
assert not is_digit(words[8])
assert not is_digit(words[9])
def test_is_quote(words):
pass
def test_is_bracket(words):
pass
def test_is_left_bracket(words):
pass
def test_is_right_bracket(words):
pass
| [
"rodrigosoaresilva@gmail.com"
] | rodrigosoaresilva@gmail.com |
9d92b6a68e48dc9a49b6e6121c3804f805be4957 | f419df1d9c148d0964f05c74ada4f3f218db49b7 | /征收所得税问题.py | 3f7579b7c32356803d0cb4138f3f871460d336a0 | [] | no_license | 1354107742/PythonForLearning | 5c915be8639cca7074eb902fa7623623a9b67783 | 780e88b56927db532d0efc004279eff263beb439 | refs/heads/master | 2020-05-06T13:24:42.090563 | 2019-07-31T09:23:33 | 2019-07-31T09:23:33 | 180,135,107 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 463 | py | money = int(input('请输入你的个人所得:'))
income = 0
if money < 2000:
print("你的收入竟然都不用交税,真是可怜")
elif 4000 > money >= 2000:
income = money - 2000 * 0.03
elif 6000 > money >= 4000:
income = money - 2000 * 0.03 - 2000 * 0.04
elif 8000 > money >= 6000:
income = money - 2000 * 0.03 - 2000 * 0.04 - 2000 * 0.08
else:
income = money - (money - 10000) * 0.2
print(' 你的税后所得为:%d' %income)
| [
"1354107742@qq.com"
] | 1354107742@qq.com |
0a069c40b9b698414eb9696b83360c0c93a6f9a9 | 53dde7748eea471a047942bfcb7f445734bb2c5e | /grafica.py | 25b0fa080f4bee7c69b41b75024c8a032592d382 | [] | no_license | MicheleCESO/ROAmbulatori | e5e539a6855ce05a91e6513b751519b77b783f74 | fd1a25b7f707d10c3e43418f424bdfb3debff9de | refs/heads/master | 2023-03-19T09:42:24.280580 | 2021-03-08T17:10:11 | 2021-03-08T17:10:11 | 194,710,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,363 | py | from PyQt5 import QtWidgets, uic, QtGui, QtCore
import sys
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, conf):
super(MainWindow, self).__init__()
self.config = conf # Configurazione
uic.loadUi("GUI.ui", self)
# Passaggio parametri a widget
self.istanza.inizializzaPar(conf, "Istanza")
self.greedy_1.inizializzaPar(conf, "Greedy slot 1")
self.greedy_2.inizializzaPar(conf, "Greedy slot 2")
self.simulated_annealing_1.inizializzaPar(conf, "Simulated Annealing slot 1")
self.simulated_annealing_2.inizializzaPar(conf, "Simulated Annealing slot 2")
self.path_relinking_1.inizializzaPar(conf, "Path Relinking slot 1")
self.path_relinking_2.inizializzaPar(conf, "Path Relinking slot 2")
class Widget(QtWidgets.QWidget):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.tipo = ""
self.rettangoli = []
self.labels = []
self.linee = []
self.valoriUsati = []
self.posIniziale = 0
self.posFinale = 0
self.val = 0
self.offsetWindow = 0
self.colori = [
QtGui.QBrush(QtCore.Qt.green, QtCore.Qt.SolidPattern),
QtGui.QBrush(QtCore.Qt.red, QtCore.Qt.SolidPattern),
QtGui.QBrush(QtCore.Qt.cyan, QtCore.Qt.SolidPattern),
QtGui.QBrush(QtCore.Qt.yellow, QtCore.Qt.SolidPattern),
QtGui.QBrush(QtCore.Qt.magenta, QtCore.Qt.SolidPattern)
]
# Palette background
self.pal = self.palette()
self.pal.setColor(QtGui.QPalette.Window, QtCore.Qt.white)
def inizializzaPar(self, conf, nome):
self.config = conf
self.nome = nome
self.scala = 50 # Scala dell'intero grafico
self.altezza = 30 # Altezza dei rettangoli
self.offsetx = 50 # Distanza dalla coordinata 0
self.offsety = 20 # Distanza dalla coordinata 0
self.offsetLabely = 10 # Distanza tra asse X e labels
self.compAssey = self.altezza * 3 + self.offsety # Altezza dell'origine O
'''
Funzione che cancella tutte le parti grafiche
'''
def cancellaDati(self):
self.rettangoli = []
self.labels = []
self.linee = []
self.valoriUsati = []
self.update()
def popolamentoDati(self, soluzione):
# Reset dati
self.cancellaDati()
# Generazione rettangoli colorati con etichette e valori di riferimento
for paziente in soluzione.pazienti.values():
for tipo, start in paziente.esami.items():
x = start.valore * self.scala + self.offsetx
y = (2 - paziente.ambulatorio) * self.altezza + self.offsety
# Aggiunta rettangolo
self.rettangoli.append([QtCore.QRect(QtCore.QPoint(x, y), QtCore.QSize(getattr(self.config, "durata" + str(tipo)) * self.scala, self.altezza)), self.colori[tipo - 1]])
# Aggiunta labels
if start.valore not in self.valoriUsati:
self.labels.append([QtCore.QRect(QtCore.QPoint(x - 25, self.compAssey + self.offsetLabely), QtCore.QSize(50,20)), str(start.valore)])
self.linee.append(QtCore.QLine(QtCore.QPoint(x, self.compAssey), QtCore.QPoint(x, self.compAssey + self.offsetLabely)))
self.valoriUsati.append(start.valore)
if start.valore + getattr(self.config, "durata" + str(tipo)) not in self.valoriUsati:
xFine = (start.valore + getattr(self.config, "durata" + str(tipo))) * self.scala + self.offsetx
self.labels.append([QtCore.QRect(QtCore.QPoint(xFine - 25, self.compAssey + self.offsetLabely), QtCore.QSize(50,20)), str(start.valore + getattr(self.config, "durata" + str(tipo)))])
self.linee.append(QtCore.QLine(QtCore.QPoint(xFine, self.compAssey), QtCore.QPoint(xFine, self.compAssey + self.offsetLabely)))
self.valoriUsati.append(start.valore + getattr(self.config, "durata" + str(tipo)))
# Label raffigurante id paziente e tipo di job
self.labels.append([QtCore.QRect(QtCore.QPoint(x, y), QtCore.QSize(getattr(self.config, "durata" + str(tipo)) * self.scala, self.altezza)), "P"+str(paziente.id)+" - E"+str(tipo)])
self.linee.append(QtCore.QLine(QtCore.QPoint(self.offsetx, self.compAssey), QtCore.QPoint(soluzione.makeSpan * self.scala + self.offsetx, self.compAssey)))
self.linee.append(QtCore.QLine(QtCore.QPoint(self.offsetx, self.compAssey), QtCore.QPoint(self.offsetx, self.offsety)))
# Aggiornamento forzato widget
self.update()
def resizeEvent(self, e):
self.setAutoFillBackground(True)
self.update()
def mouseMoveEvent(self, e):
self.posFinale = e.x()
self.update()
def mousePressEvent(self, e):
self.posIniziale = e.x()
def mouseReleaseEvent(self, e):
self.offsetWindow = self.offsetWindow + self.posIniziale - self.posFinale
self.posIniziale = 0
self.posFinale = 0
def paintEvent(self, e):
self.setPalette(self.pal)
painter = QtGui.QPainter(self)
painter.drawText(QtCore.QRect(QtCore.QPoint(10, 0), QtCore.QSize(150, 20)), QtCore.Qt.AlignLeft, self.nome + " " + self.tipo)
if self.rettangoli:
spostat = self.offsetWindow + self.posIniziale - self.posFinale
if spostat < 0:
spostat = 0
self.offsetWindow = 0
self.posIniziale = self.posFinale
painter.setWindow(spostat, 0, self.width(), self.height())
painter.setPen(QtGui.QPen(QtCore.Qt.black, 1, QtCore.Qt.SolidLine))
for elemento in self.rettangoli:
painter.setBrush(elemento[1])
painter.drawRect(elemento[0])
for elemento in self.labels:
painter.drawText(elemento[0], QtCore.Qt.AlignCenter, elemento[1])
for linea in self.linee:
painter.drawLine(linea)
class WidgetIstanza(Widget):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
del self.valoriUsati # Eredità inutile
def inizializzaPar(self, conf, nome):
self.config = conf
self.nome = nome
self.scala = 100 # Scala dell'intero grafico
self.altezza = 30 # Altezza dei rettangoli
self.offsetx = 50 # Distanza dalla coordinata 0
self.offsety = 50 # Distanza dalla coordinata 0
self.offsetLabelx = 10 # Distanza tra asse X e labels
self.compAssey = self.altezza * 5 + self.offsety # Altezza dell'origine O
# Dimensioni label
self.labelx = 500
self.labely = 20
def cancellaDati(self):
self.rettangoli = []
self.labels = []
self.linee = []
self.update()
def popolamentoDati(self, istanza):
# Reset dei dati
self.cancellaDati()
# Generazione rettangoli colorati per ogni paziente
for indicePaziente, paziente in enumerate(istanza):
for esame in paziente:
x = indicePaziente * self.scala + self.offsetx
y = (5 - esame) * self.altezza + self.offsety
self.rettangoli.append([QtCore.QRect(QtCore.QPoint(x, y), QtCore.QSize(1 * self.scala, self.altezza)), self.colori[esame - 1]])
# Label
self.labels.append([QtCore.QRect(QtCore.QPoint(x - self.labelx / 2 + self.scala / 2, self.compAssey + self.offsetLabelx), QtCore.QSize(self.labelx, self.labely)), str(indicePaziente + 1)])
# Creazione assi cartesiani e labels asse y
self.linee.append(QtCore.QLine(QtCore.QPoint(self.offsetx, self.compAssey), QtCore.QPoint(len(istanza) * self.scala + self.offsetx, self.compAssey)))
self.linee.append(QtCore.QLine(QtCore.QPoint(self.offsetx, self.compAssey), QtCore.QPoint(self.offsetx, self.offsety)))
for tipoEsame in range(1,6):
y = (5 - tipoEsame) * self.altezza + self.offsety
self.labels.append([QtCore.QRect(QtCore.QPoint(0 - (self.labelx - 50) / 2, y + ((self.altezza - self.labely) / 2)), QtCore.QSize(self.labelx, self.labely)), "Esame "+str(tipoEsame)])
# Aggiornamento forzato widget
self.update() | [
"michele.cesari@student.unife.it"
] | michele.cesari@student.unife.it |
466a8429bb24cc0cb7bcbc85315544ca6ba9f6fc | 32721405fb7e1c1acc130784c17e40bb67034d4b | /src/layout.py | 0cf2693c10f6ba4c28ffc7d19c218a66d1f506a7 | [] | no_license | ryanQc1216/AutoLayout | b874955805b61c587ee804f9a90e0283afa3d68a | 61ec93c55f864f34c96987d57b8c19db4a1d65c1 | refs/heads/main | 2023-03-07T20:26:04.102543 | 2021-02-14T03:51:57 | 2021-02-14T03:51:57 | 338,213,982 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,568 | py | import os
import cv2
import json
import copy
import numpy as np
import itertools
import functools
import random
from src.node import Coordinate, Node, Group, get_default_text_size
from src.render import Render, MAX_VALUE, MIN_VALUE, SMALL_VALUE
def parsing_children_description(description):
if 'children' not in description:
return [], []
return [x for x in description['children']], [x['id'] for x in description['children']]
class Layout:
def __init__(self, description, cfg):
# static param
self.ratio_standard = cfg['ratio_standard']
self.max_cols_contains = cfg['max_cols_contains']
self.ratio_score_weight = 0.7
self.move_score_weight = 0.3
self.max_loop_support = 999
self.description = description
self.start_parent_id = -1
self.maps = self.generate_maps()
self.groups = self.generate_groups()
self.calc_relative_coord()
self.init_group_coord()
self.layer_info = self.update_layer_info()
self.placement_group()
def generate_maps(self):
maps = dict()
children_description, children_node_id = parsing_children_description(self.description)
start_node = Node(node_id=self.description['id'],
children=children_node_id,
parent=self.start_parent_id,
depth=0)
maps[start_node.node_id] = start_node
candidates = children_description.copy()
_iter = 0
while len(candidates) > 0 and _iter < self.max_loop_support:
candidate = candidates.pop()
candidate_children_description, candidate_children_node_id = parsing_children_description(candidate)
node_id = candidate['id']
parent_node_id = candidate['parentId']
this_node = Node(node_id=node_id,
children=candidate_children_node_id,
parent=parent_node_id,
depth=maps[parent_node_id].depth + 1)
maps[node_id] = this_node
candidates += candidate_children_description
_iter += 1
assert _iter < self.max_loop_support, 'Error happened in While loop'
return maps
def generate_groups(self):
parents_info = dict()
for node_id in self.maps:
if self.maps[node_id].parent not in parents_info:
parents_info[self.maps[node_id].parent] = [node_id]
else:
parents_info[self.maps[node_id].parent].append(node_id)
groups = dict()
group_id = 0
for parent_node_id in parents_info:
if parent_node_id == self.start_parent_id:
depth = 0
else:
depth = self.maps[parent_node_id].depth + 1
groups[group_id] = Group(contains=parents_info[parent_node_id].copy(),
group_id=group_id,
depth=depth,
parent_node_id=parent_node_id)
group_id += 1
return groups
def calc_relative_coord(self):
# step 1, assign group id to nodes
for group_id in self.groups:
for node_id in self.groups[group_id].contains:
self.maps[node_id].group_id = group_id
# step 2, check the groups need multiple layer
for group_id in self.groups:
self.groups[group_id].assign_node_relative_coord(self.maps, self.max_cols_contains)
assert self.check_all_node_relative_assigned()
pass
def check_all_node_relative_assigned(self):
cnt_assigned = 0
for node_id in self.maps:
if self.maps[node_id].valid_relative_coord():
cnt_assigned += 1
if cnt_assigned == len(self.maps):
return True
else:
return False
def check_all_group_assigned(self):
cnt_assigned = 0
for group_id in self.groups:
if self.groups[group_id].valid_bbox_lt():
cnt_assigned += 1
if cnt_assigned == len(self.groups):
return True
else:
return False
def node_id_to_group_id(self, node_id):
group_id = self.maps[node_id].group_id
return group_id
def inference_children_group_coord(self, parent_node_id, children_group_id):
parent_group_id = self.node_id_to_group_id(parent_node_id)
text_width, text_height = get_default_text_size()
children_group_cx = self.maps[parent_node_id].absolute_coord.x + int(text_width / 2)
children_group_sy = self.groups[parent_group_id].rb.y + text_width
children_group_sx = children_group_cx - int(self.groups[children_group_id].bbox_size[0] / 2)
return children_group_sx, children_group_sy
def init_group_coord(self):
self.maps = self.maps
self.groups = self.groups
self.groups[0].assign_group_offset(sx=0, sy=0, maps=self.maps)
_iter = 0
while self.check_all_group_assigned() is False and _iter < self.max_loop_support:
for group_id in self.groups:
parent_node_id = self.groups[group_id].parent_node_id
if parent_node_id == self.start_parent_id or self.groups[group_id].valid_bbox_lt() is True:
continue
parent_group_id = self.node_id_to_group_id(parent_node_id)
if self.groups[parent_group_id].valid_bbox_lt() is False:
continue
sx, sy = self.inference_children_group_coord(parent_node_id, group_id)
self.groups[group_id].assign_group_offset(sx=sx, sy=sy, maps=self.maps)
_iter += 1
assert _iter < self.max_loop_support, 'Error happened in While loop'
pass
def update_related_groups(self, group_id):
contains = self.groups[group_id].contains.copy()
stack_info = []
for node_id in contains:
stack_info.append({'node_id': node_id, 'group_id': group_id})
_iter = 0
while len(stack_info) > 0 and _iter < self.max_loop_support:
parent_info = stack_info.pop()
parent_node_id = parent_info['node_id']
# update this parent_node_id's next group
if len(self.maps[parent_node_id].children) > 0:
first_children_id = self.maps[parent_node_id].children[0]
children_group_id = self.maps[first_children_id].group_id
sx, sy = self.inference_children_group_coord(parent_node_id, children_group_id)
self.groups[children_group_id].assign_group_offset(sx=sx, sy=sy, maps=self.maps)
for node_id in self.maps[parent_node_id].children:
stack_info.append({'node_id': node_id, 'group_id': children_group_id})
_iter += 1
assert _iter < self.max_loop_support, 'Error happened in While loop'
pass
def sort_by_parent_cx(self, group_ids):
group_ids_sort = group_ids.copy()
for i in range(len(group_ids_sort)):
for j in range(i + 1, len(group_ids_sort)):
group_id_i = group_ids_sort[i]
group_id_j = group_ids_sort[j]
if self.maps[self.groups[group_id_i].parent_node_id].absolute_coord.x > \
self.maps[self.groups[group_id_j].parent_node_id].absolute_coord.x:
temp = group_ids_sort[i]
group_ids_sort[i] = group_ids_sort[j]
group_ids_sort[j] = temp
return group_ids_sort
def update_layer_info(self):
layer_info = {}
for group_id in self.groups:
layer_index = self.groups[group_id].depth
if layer_index not in layer_info:
layer_info[layer_index] = [group_id]
else:
layer_info[layer_index].append(group_id)
# rank group-id list by parent node cx
for layer_index in layer_info:
if layer_index == 0:
continue
group_ids = layer_info[layer_index]
layer_info[layer_index] = self.sort_by_parent_cx(group_ids)
return layer_info
def calc_two_bbox_overlap(self, bbox_0, bbox_1):
sx0, ex0, sy0, ey0 = bbox_0
sx1, ex1, sy1, ey1 = bbox_1
sx = max(sx0, sx1)
sy = max(sy0, sy1)
ex = min(ex0, ex1)
ey = min(ey0, ey1)
width = max(0, ex - sx + 1)
height = max(0, ey - sy + 1)
overlap = width * height
return overlap
def calc_layer_group_overlap(self, layer_id):
group_ids = self.layer_info[layer_id]
overlap = 0
for select in itertools.combinations(group_ids, 2):
group_id_0 = select[0]
group_id_1 = select[1]
overlap += self.calc_two_bbox_overlap(self.groups[group_id_0].get_bbox_as_list(),
self.groups[group_id_1].get_bbox_as_list())
return overlap
def find_valid_placement_area(self, group_id, group_ids, already_placement):
start_x = MIN_VALUE
end_x = MAX_VALUE
left_group_ids = group_ids[:group_ids.index(group_id)]
right_group_ids = group_ids[group_ids.index(group_id) + 1:]
border = self.groups[group_id].boarder
for l_idx in left_group_ids:
if l_idx in already_placement:
start_x = max(start_x, already_placement[l_idx][2] + border) # use ex
for r_idx in right_group_ids:
if r_idx in already_placement:
end_x = min(end_x, already_placement[r_idx][0] - border) # use sx
return start_x, end_x
def constrain_placement_group(self, group_id, original_bbox, start_x, end_x):
# not valid
if self.groups[group_id].bbox_size[0] > (end_x - start_x):
return None
# use original
if original_bbox[0] > start_x and original_bbox[2] < end_x:
return original_bbox.copy()
# left overlap
if original_bbox[0] <= start_x:
distance = start_x - original_bbox[0] + 1
return original_bbox[0] + distance, original_bbox[1], original_bbox[2] + distance, original_bbox[3]
# right overlap
if original_bbox[2] >= end_x:
distance = original_bbox[2] - end_x + 1
return original_bbox[0] - distance, original_bbox[1], original_bbox[2] - distance, original_bbox[3]
raise Exception('Error Happened in constrain_placement_group')
def calc_placement_movement_score(self, original_bbox_info, update_bbox_info):
total_width = 0
move_dis = 0
delta = 0.1
for group_id in original_bbox_info:
contain_num = len(self.groups[group_id].contains)
dis = abs(original_bbox_info[group_id][0] - update_bbox_info[group_id][0])
move_dis += dis*contain_num
total_width += original_bbox_info[group_id][2] - original_bbox_info[group_id][0]
score = 1 - min(1.0, (move_dis*delta/total_width))
return score
def calc_previous_placement_range(self, curr_layer_id):
min_x, min_y = MAX_VALUE, MAX_VALUE
max_x, max_y = MIN_VALUE, MIN_VALUE
for layer_id in self.layer_info:
group_ids = self.layer_info[layer_id]
for group_id in group_ids:
for node_id in self.groups[group_id].contains:
min_y = min(self.maps[node_id].absolute_coord.y, min_y)
max_y = max(self.maps[node_id].absolute_coord.y, max_y)
if layer_id <= curr_layer_id:
min_x = min(self.maps[node_id].absolute_coord.x, min_x)
max_x = max(self.maps[node_id].absolute_coord.x, max_x)
pass
return min_x, min_y, max_x, max_y
def calc_placement_ratio_score(self, update_bbox_info, previous_range):
update_sx = previous_range[0]
update_ex = previous_range[2]
for group_id in update_bbox_info:
update_sx = min(update_bbox_info[group_id][0], update_sx)
update_ex = max(update_bbox_info[group_id][2], update_ex)
width = update_ex - update_sx
height = previous_range[3] - previous_range[1]
ratio = width/height
delta = 2.0
score = 1 - abs(self.ratio_standard-ratio)/delta
score = min(1.0, max(score, 0))
return score
def calc_all_placement_order(self, group_ids):
group_num_thresh = 8
if len(group_ids) <= group_num_thresh:
all_placement_order = list(itertools.permutations(group_ids))
return all_placement_order
else:
rev = group_ids.copy()
rev.reverse()
all_placement_order = [group_ids.copy(), rev]
indexes = [x for x in range(len(group_ids))]
while len(all_placement_order) < self.max_loop_support:
random.shuffle(indexes)
placement = []
for idx in indexes:
placement.append(group_ids[idx])
all_placement_order.append(placement)
return all_placement_order
def search_movement_policy(self, layer_id):
if layer_id==7:
kk = 1
# group_ids is ordered from left to right
group_ids = self.layer_info[layer_id].copy()
original_bbox = {}
for group_id in group_ids:
original_bbox[group_id] = self.groups[group_id].get_bbox_as_list().copy()
best_placement_info = {'placement': None, 'score': MIN_VALUE,
'movement_score': MIN_VALUE, 'ratio_score': MIN_VALUE}
all_placement_order = self.calc_all_placement_order(group_ids)
print('layer %d generate all placement order %d' % (layer_id, len(all_placement_order)))
previous_range = self.calc_previous_placement_range(layer_id)
for placement_order in all_placement_order:
already_placement = dict() # {saved_new_bbox}
for idx in range(len(placement_order)):
group_id = placement_order[idx]
start_x, end_x = self.find_valid_placement_area(group_id, group_ids, already_placement)
update_bbox = self.constrain_placement_group(group_id, original_bbox[group_id], start_x, end_x)
if update_bbox is None:
break
already_placement[group_id] = update_bbox
pass
if len(already_placement) == len(group_ids):
movement_score = self.calc_placement_movement_score(original_bbox, already_placement)
ratio_score = self.calc_placement_ratio_score(already_placement, previous_range)
score = movement_score*self.move_score_weight + ratio_score*self.ratio_score_weight
#score = movement_score
if score > best_placement_info['score']:
best_placement_info = {'placement': already_placement,
'score': score,
'movement_score': movement_score,
'ratio_score': ratio_score}
assert best_placement_info['placement'] is not None, 'Error in search_minimal_movement_policy!!'
return best_placement_info
# 1) 定义
# Node , 每一个待绘制的节点
# Group , 具有公共Parent的Node集合
# Layer , 到起始节点有相同距离的Group集合
# 2) 搜索目标 (用于计算候选Placement方案的得分值)
# a) 绘制时不能有交叉线
# b) 尽量保持4:3的全图比例
# c) 尽量让Children所形成的Group的x中心到Parent的x位置距离最小
# 3) 搜索步骤 (NP问题,目前解耦成启发式搜索)
# a) 解析Json拓扑,形成初始的Node、Group、Layer
# b) 逐个Layer进行Group的重排,重排过程中计算当前方案的得分(按照搜索目标的设定)
# c) 目前Group重排只改变其x坐标,且不改变内部Node的排列顺序
# d) 单次遍历Layer后完成拓扑计算,返回各个Node坐标(或Grid的划分)
def placement_group(self):
for layer_id in self.layer_info:
overlap = self.calc_layer_group_overlap(layer_id)
if overlap > 0:
placement_dict = self.search_movement_policy(layer_id)
print('update layer %d' % layer_id, placement_dict)
for group_id in placement_dict['placement']:
update_sx = placement_dict['placement'][group_id][0]
update_sy = placement_dict['placement'][group_id][1]
self.groups[group_id].assign_group_offset(sx=update_sx,
sy=update_sy,
maps=self.maps)
self.update_related_groups(group_id)
self.layer_info = self.update_layer_info()
def render(self):
ins_render = Render(self.maps, self.groups)
image = ins_render.render()
# log layer_info
for layer_id in self.layer_info:
for group_id in self.layer_info[layer_id]:
print(' [layer %d] - [group %d], contain: %d' %(layer_id, group_id, len(self.groups[group_id].contains)))
return image
| [
"qiancheng1216@163.com"
] | qiancheng1216@163.com |
f9774489922882cc2d1ea68f918e5fb9a808c4e8 | 6f98f1a573b0de18982bf20aa307778dc0eecc20 | /NameAndColor.py | 709eee38c3d14fa692bf027af05cb5000b831176 | [] | no_license | ArnavPalkhiwala/PythonBasics | 0eee6dcb4d18792feb3245ae324337d4a3b3d9ff | dafea8c9f4dda4baaed62c8c1978b8f6106f7360 | refs/heads/master | 2022-11-22T14:50:09.548758 | 2020-07-24T06:26:10 | 2020-07-24T06:26:10 | 256,051,132 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 134 | py | name = input("What is your name? ")
print("Hi " + name)
color = input("What is your favorite color? ")
print(name + " likes " + color) | [
"noreply@github.com"
] | noreply@github.com |
1e9432aa6ff5d3d51c600b42a5c2491c7b26584b | 547909f2d5c06848910f23c9e251c404212d6fb4 | /dl2cvt.py | a48553306599c95b74b7a40423fe1208eae87f79 | [
"MIT"
] | permissive | aldebaran1/gpstec | 015ba9a72f4116f45d83bdf583deb6320afe622a | e8d0e38d89a1399741a63c0aa1f8e5ccf7ada9ba | refs/heads/master | 2022-03-11T00:29:42.696872 | 2022-02-11T23:37:33 | 2022-02-11T23:37:33 | 151,584,490 | 1 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,072 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Feb 11 12:15:36 2022
@author: smrak@bu.edu
"""
from datetime import timedelta, datetime
from dateutil import parser
from argparse import ArgumentParser
import subprocess
def main(start: str = None, stop : str = None, los : bool = False):#
timeout = 900 if los else 180
startdt, stopdt = parser.parse(start), parser.parse(stop)
t = startdt
dtlist = []
while t <= stopdt:
dtlist.append(t.strftime('%Y-%m-%d'))
t += timedelta(days=1)
for d in dtlist:
line = f'./auto.sh {d} {d} --los' if los else f'./auto.sh {d} {d}'
t0 = datetime.now()
subprocess.call(line, shell = True, timeout=timeout)
print (f'It took {datetime.now()-t0} to download.')
if __name__ == '__main__':
p = ArgumentParser()
p.add_argument('startdate', type = str)
p.add_argument('enddate', type = str)
p.add_argument('--los', type = str)
P = p.parse_args()
main(start = P.startdate, stop = P.enddate, los = P.los) | [
"sebastijan.mrak@gmail.com"
] | sebastijan.mrak@gmail.com |
c0a8d77794fc0b272d8c8f71e38f1ad636e7b10b | 27144798510348ac511fafb2c451a18735933aa3 | /ml_service/cleaning.py | b0fe778d53400f593bb3a6ac76c9ea0feffec5b7 | [] | no_license | zhangzexi/capstone | c02019e05d235cc9485f60355f79dd40e3395774 | dd37613e76fb633253b13403ad4182308aaee4dd | refs/heads/master | 2021-01-18T16:42:44.858129 | 2018-01-16T07:29:07 | 2018-01-16T07:29:07 | 86,756,313 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,477 | py | import pandas as pd
from pymongo import MongoClient
pd.options.mode.chained_assignment = None
def _connect_mongo(host, port, username, password, db):
""" A util for making a connection to mongo """
if username and password:
mongo_uri = 'mongodb://%s:%s@%s:%s/%s' % (username, password, host, port, db)
conn = MongoClient(mongo_uri)
else:
conn = MongoClient(host, port)
return conn[db]
def read_mongo(db, collection, query={}, host='localhost', port=27017, username=None, password=None, no_id=True):
""" Read from Mongo and Store into DataFrame """
# Connect to MongoDB
db = _connect_mongo(host=host, port=port, username=username, password=password, db=db)
# Make a query to the specific DB and Collection
cursor = db[collection].find(query)
# Expand the cursor and construct the DataFrame
df = pd.DataFrame(list(cursor))
return df
def clean(df):
mask = (
(df.property_type != "Condo") & (df.property_type != "Single Family") & (df.property_type != "Multi Family") & (
df.property_type != "Townhouse"))
df = df[~mask]
df = df[df.bedroom < 10]
df = df[df.bathroom < 10]
df = df[df.list_price > 0]
df = df[df["size"] > 0]
df = df[(df.bathroom != 0) & (df.bedroom != 0)]
df.index = range(len(df))
df["lotsize"] = getLot(df)
df.drop(
["_id", "is_for_sale", "state", "street_address", "zipcode", "zpid", "description", "image_url", "last_update",
"latitude", "longitude", "facts"], axis=1, inplace=True)
return df
def getLot(df):
lotsize = []
for i in range(len(df["facts"])):
count = 0
for j in range(len(df["facts"][i])):
if "Lot:" in df["facts"][i][j]:
temp = df["facts"][i][j + 1].split()
if temp[1] == "sqft":
res = temp[0].replace(",", "")
lotsize.append(int(res))
if "acre" in temp[1]:
res = float(temp[0].replace(",", ""))
if res > 1000:
lotsize.append(res)
else:
lotsize.append(res * 43560)
break
count += 1
if count + 1 == len(df["facts"][i]):
lotsize.append(0)
return lotsize
df = read_mongo(db="real_estate_smart_view_testing",collection="property_recently_sold")
df = clean(df)
df.to_csv(path_or_buf = "cleaned2.csv")
| [
"zhangzexi.dz@gmail.com"
] | zhangzexi.dz@gmail.com |
321218c294aead0ab1e1b51e4d6843485439ce23 | 89d220f0e0884963279c25dc8bd77bf1f6697ee8 | /imagemix.py | 839d4fbe01b5510cac161c30e0364fd813788ffb | [
"MIT"
] | permissive | AdityaPai2398/neural_style_transfer_app | 2103961aa307d447050a0d8df4028331a530dea3 | d87a60fb38e025eaf5ec34c145fefe7446f1e236 | refs/heads/master | 2020-03-22T03:57:01.236504 | 2018-07-02T05:35:35 | 2018-07-02T05:35:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 69,319 | py |
from kivy.clock import Clock
from kivy.uix.screenmanager import ScreenManager
from outputscreen import OutputScreen
from configscreen import ConfigScreen
from contentscreen import ContentScreen
from stylescreen import StyleScreen
from fastgraphconfigscreen import FastGraphConfigScreen
from fastergraphconfigscreen import FasterGraphConfigScreen
from contentfast import FastContent
from stylefast import FastStyle
from contentfaster import FasterContent
from stylefaster import FasterStyle
from kivyqueue import KivyQueue
from neuralworker import NeuralWorker
from functools import partial
import threading
import os, glob, sys
class imagemixController(ScreenManager):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.content_path_list = []
self.content_path_list_counter = -1
self.style_path_list = []
self.style_path_list_counter = -1
self.output_path_list = []
self.output_path_list_counter = -1
self.latest_output = True
self.fast_content_list = []
self.fast_content_list_counter = -1
self.fast_style_list = []
self.fast_style_list_counter = -1
self.faster_content_list = []
self.faster_content_list_counter = -1
self.faster_style_list = []
self.faster_style_list_counter = -1
self.state = None
self.worker = None
self.result_queue = KivyQueue(self.result_queue_callback)
self.command_queue = KivyQueue(self.command_queue_callback)
self.response_queue = KivyQueue(self.reponse_queue_callback)
self.output_screen.train_button.bind(on_press = self.set_config_screen_on_train_button)
self.cleanup()
# prfloat("FINISHED:", sys._getframe().f_code.co_name)
def clear(self):
if self.worker is not None:
self.command_queue.put('stop')
self.output_screen.clear_button.text = 'Wait...'
self.output_screen.logs.text = 'Please wait...'
else:
self.content_path_list = []
self.content_path_list_counter = -1
self.output_screen.content.source = ''
self.style_path_list = []
self.style_path_list_counter = -1
self.output_screen.style.source = ''
self.output_path_list = []
self.output_path_list_counter = -1
self.output_screen.output.source = ''
self.latest_output = True
self.fast_content_list = []
self.fast_content_list_counter = -1
self.fast_style_list = []
self.fast_style_list_counter = -1
self.faster_content_list = []
self.faster_content_list_counter = -1
self.faster_style_list = []
self.faster_style_list_counter = -1
self.state = None
self.worker = None
self.empty_all_queue()
self.cleanup()
self.output_screen.train_button.text = 'Train'
self.output_screen.clear_button.text = 'Clear'
self.output_screen.logs.text = 'Cleared...'
def cleanup(self):
if not os.path.exists('./meta'):
os.makedirs('./meta')
if not os.path.exists('./out'):
os.makedirs('./out')
if os.path.exists('./out'):
filelist = glob.glob(os.path.join('./out', "*.png"))
for f in filelist:
os.remove(f)
def empty_all_queue(self):
while not self.result_queue.empty():
self.result_queue.get()
while not self.command_queue.empty():
self.command_queue.get()
while not self.response_queue.empty():
self.response_queue.get()
def set_image(self, filename, target):
try:
if target is 'content' and self.worker is None:
self.output_screen.content.source = filename[0]
if filename[0] not in self.content_path_list:
self.content_path_list.append(filename[0])
self.content_path_list_counter = len(self.content_path_list) - 1
self.fast_content_list.append(FastContent(filename[0]))
self.fast_content_list_counter = len(self.fast_content_list) - 1
self.fast_content_screen_update()
self.faster_content_list.append(FasterContent(filename[0]))
self.faster_content_list_counter = len(self.faster_content_list) - 1
self.faster_content_screen_update()
elif target is 'style' and self.worker is None:
self.output_screen.style.source = filename[0]
if filename[0] not in self.style_path_list:
self.style_path_list.append(filename[0])
self.style_path_list_counter = len(self.style_path_list) - 1
self.fast_style_list.append(FastStyle(filename[0]))
self.fast_style_list_counter = len(self.fast_style_list) - 1
self.fast_style_screen_update()
self.faster_style_list.append(FasterStyle(filename[0]))
self.faster_style_list_counter = len(self.faster_style_list) - 1
self.faster_style_screen_update()
elif target is 'output':
self.output_screen.output.source = filename[0]
except:
pass
def cycle_image(self, target):
try:
if target is 'content':
if self.content_path_list_counter != -1:
self.content_path_list_counter += 1
if self.content_path_list_counter == len(self.content_path_list):
self.content_path_list_counter = 0
self.output_screen.content.source = self.content_path_list[self.content_path_list_counter]
elif target is 'style':
if self.style_path_list_counter != -1:
self.style_path_list_counter += 1
if self.style_path_list_counter == len(self.style_path_list):
self.style_path_list_counter = 0
self.output_screen.style.source = self.style_path_list[self.style_path_list_counter]
elif target is 'output_forward':
if self.output_path_list_counter + 1 != len(self.output_path_list):
self.output_screen.output.source = self.output_path_list[self.output_path_list_counter + 1]
self.output_path_list_counter += 1
self.latest_output = False
if self.output_path_list_counter == len(self.output_path_list) - 1:
self.latest_output = True
elif target is 'output_backward':
if self.output_path_list_counter > 0:
self.output_screen.output.source = self.output_path_list[self.output_path_list_counter - 1]
self.output_path_list_counter -= 1
self.latest_output = False
if self.output_path_list_counter == len(self.output_path_list) - 1:
self.latest_output = True
elif target is 'output_latest':
if self.output_screen.output.source is not self.output_path_list[-1]:
self.output_screen.output.source = self.output_path_list[-1]
self.output_path_list_counter = len(self.output_path_list) - 1
else:
self.output_screen.output.source = self.output_screen.content.source
self.output_path_list_counter = -1
self.latest_output = True
except:
pass
def result_queue_callback_logic(self, dt):
if not self.result_queue.empty():
path = str(self.result_queue.get())
self.output_path_list.append(path)
if self.latest_output:
self.cycle_image('output_latest')
def result_queue_callback(self):
# Trigger created can be called wherever, not necessary immediately.
# Maybe a good way to schedule things as even main thread may be frozen.
event = Clock.create_trigger(self.result_queue_callback_logic)
event()
def command_queue_callback_logic(self, dt):
# self.command_queue.put(command)
pass
def command_queue_callback(self):
# event = Clock.create_trigger(self.command_queue_callback_logic)
# event()
pass
def reponse_queue_callback_logic(self, dt):
if not self.response_queue.empty():
response = str(self.response_queue.get())
if response is 'paused':
self.state = 'paused'
self.output_screen.train_button.text = 'Resume'
self.output_screen.logs.text = 'Training has been paused...'
elif response is 'resumed':
self.state = 'resumed'
self.output_screen.train_button.text = 'Pause'
self.output_screen.logs.text = 'Resuming to train...'
elif response is 'stopped':
self.content_path_list = []
self.content_path_list_counter = -1
self.output_screen.content.source = ''
self.style_path_list = []
self.style_path_list_counter = -1
self.output_screen.style.source = ''
self.output_path_list = []
self.output_path_list_counter = -1
self.output_screen.output.source = ''
self.latest_output = True
self.fast_content_list = []
self.fast_content_list_counter = -1
self.fast_style_list = []
self.fast_style_list_counter = -1
self.faster_content_list = []
self.faster_content_list_counter = -1
self.faster_style_list = []
self.faster_style_list_counter = -1
self.state = None
self.worker = None
self.empty_all_queue()
self.cleanup()
self.output_screen.train_button.text = 'Train'
self.output_screen.clear_button.text = 'Clear'
self.output_screen.logs.text = 'Cleared...'
#########
self.output_screen.train_button.unbind(on_press = self.pause_or_resume_button)
self.output_screen.train_button.bind(on_press = self.set_config_screen_on_train_button)
else:
self.output_screen.logs.text = response
def reponse_queue_callback(self):
event = Clock.create_trigger(self.reponse_queue_callback_logic)
event()
def train_worker(self, use_faster_graph, use_lbfgs, max_iterations, width, height, alpha, beta, gamma, noise_ratio, use_meta, save_meta):
# Threads share data.
worker = NeuralWorker(self.result_queue, self.command_queue, self.response_queue, self.faster_content_list, self.faster_style_list, self.fast_content_list, self.fast_style_list, use_faster_graph, use_lbfgs, max_iterations, width, height, alpha, beta, gamma, noise_ratio, use_meta, save_meta)
worker.train()
def pause_or_resume_button(self, *args):
if self.worker is not None:
if self.state is 'resumed':
self.command_queue.put('pause')
self.state = 'waiting'
self.output_screen.train_button.text = 'Wait...'
elif self.state is 'paused':
self.command_queue.put('resume')
self.state = 'waiting'
self.output_screen.train_button.text = 'Wait...'
elif self.state is 'waiting':
self.output_screen.train_button.text = 'Please wait...?'
self.output_screen.logs.text = 'Please wait while the background thread is busy...'
def set_config_screen_on_train_button(self, *args):
if len(self.content_path_list) == 0 or len(self.style_path_list) == 0:
return
self.current = 'config_screen'
def start_button(self, width, height, use_meta, save_meta, use_lbfgs, max_iterations, noise_ratio, alpha, beta, gamma, use_faster_graph):
try:
use_faster_graph = bool(use_faster_graph)
use_lbfgs = bool(use_lbfgs)
max_iterations = int(max_iterations)
width = int(width)
height = int(height)
alpha = float(alpha)
beta = float(beta)
gamma = float(gamma)
noise_ratio = float(noise_ratio)
use_meta = bool(use_meta)
save_meta = bool(save_meta)
if self.worker is None:
if len(self.content_path_list) == 0 or len(self.style_path_list) == 0:
return
self.worker = threading.Thread(target = self.train_worker, args=(use_faster_graph, use_lbfgs, max_iterations, width, height, alpha, beta, gamma, noise_ratio, use_meta, save_meta))
self.worker.daemon = True
self.worker.start()
self.state = 'resumed'
self.output_screen.logs.text = 'Starting to train...'
self.output_screen.train_button.text = 'Pause'
self.output_screen.train_button.unbind(on_press = self.set_config_screen_on_train_button)
self.output_screen.train_button.bind(on_press = self.pause_or_resume_button)
except:
pass
def set_config_screen(self):
if self.config_screen.use_faster_graph.active:
self.current = 'faster_graph_config_screen'
else:
self.current = 'fast_graph_config_screen'
def cycle_config(self, target, *args):
try:
if target is 'faster_content_next':
if self.faster_content_list_counter != -1:
self.faster_content_save(*args)
self.faster_content_list_counter += 1
if self.faster_content_list_counter == len(self.faster_content_list):
self.faster_content_list_counter = 0
self.faster_content_screen_update()
elif target is 'faster_content_prev':
if self.faster_content_list_counter != -1:
self.faster_content_save(*args)
self.faster_content_list_counter -= 1
if self.faster_content_list_counter == -1:
self.faster_content_list_counter = len(self.faster_content_list) - 1
self.faster_content_screen_update()
elif target is 'faster_style_next':
if self.faster_style_list_counter != -1:
self.faster_style_save(*args)
self.faster_style_list_counter += 1
if self.faster_style_list_counter == len(self.faster_style_list):
self.faster_style_list_counter = 0
self.faster_style_screen_update()
elif target is 'faster_style_prev':
if self.faster_style_list_counter != -1:
self.faster_style_save(*args)
self.faster_style_list_counter -= 1
if self.faster_style_list_counter == -1:
self.faster_style_list_counter = len(self.faster_style_list) - 1
self.faster_style_screen_update()
elif target is 'fast_content_next':
if self.fast_content_list_counter != -1:
self.fast_content_save(*args)
self.fast_content_list_counter += 1
if self.fast_content_list_counter == len(self.fast_content_list):
self.fast_content_list_counter = 0
self.fast_content_screen_update()
elif target is 'fast_content_prev':
if self.fast_content_list_counter != -1:
self.fast_content_save(*args)
self.fast_content_list_counter -= 1
if self.fast_content_list_counter == -1:
self.fast_content_list_counter = len(self.fast_content_list) - 1
self.fast_content_screen_update()
elif target is 'fast_style_next':
if self.fast_style_list_counter != -1:
self.fast_style_save(*args)
self.fast_style_list_counter += 1
if self.fast_style_list_counter == len(self.fast_style_list):
self.fast_style_list_counter = 0
self.fast_style_screen_update()
elif target is 'fast_style_prev':
if self.fast_style_list_counter != -1:
self.fast_style_save(*args)
self.fast_style_list_counter -= 1
if self.fast_style_list_counter == -1:
self.fast_style_list_counter = len(self.fast_style_list) - 1
self.fast_style_screen_update()
except:
pass
def accept_faster_config_button(self, alpha, content_conv1_1_check, content_conv1_1_weight, content_conv1_2_check, content_conv1_2_weight, content_pool1_check, content_pool1_weight, content_conv2_1_check, content_conv2_1_weight, content_conv2_2_check, content_conv2_2_weight, content_pool2_check, content_pool2_weight, content_conv3_1_check, content_conv3_1_weight, content_conv3_2_check, content_conv3_2_weight, content_conv3_3_check, content_conv3_3_weight, content_pool3_check, content_pool3_weight, content_conv4_1_check, content_conv4_1_weight, content_conv4_2_check, content_conv4_2_weight, content_conv4_3_check, content_conv4_3_weight, content_pool4_check, content_pool4_weight, content_conv5_1_check, content_conv5_1_weight, content_conv5_2_check, content_conv5_2_weight, content_conv5_3_check, content_conv5_3_weight, content_pool5_check, content_pool5_weight, beta, style_conv1_1_check, style_conv1_1_weight, style_conv1_2_check, style_conv1_2_weight, style_pool1_check, style_pool1_weight, style_conv2_1_check, style_conv2_1_weight, style_conv2_2_check, style_conv2_2_weight, style_pool2_check, style_pool2_weight, style_conv3_1_check, style_conv3_1_weight, style_conv3_2_check, style_conv3_2_weight, style_conv3_3_check, style_conv3_3_weight, style_pool3_check, style_pool3_weight, style_conv4_1_check, style_conv4_1_weight, style_conv4_2_check, style_conv4_2_weight, style_conv4_3_check, style_conv4_3_weight, style_pool4_check, style_pool4_weight, style_conv5_1_check, style_conv5_1_weight, style_conv5_2_check, style_conv5_2_weight, style_conv5_3_check, style_conv5_3_weight, style_pool5_check, style_pool5_weight):
self.faster_content_save(alpha, content_conv1_1_check, content_conv1_1_weight, content_conv1_2_check, content_conv1_2_weight, content_pool1_check, content_pool1_weight, content_conv2_1_check, content_conv2_1_weight, content_conv2_2_check, content_conv2_2_weight, content_pool2_check, content_pool2_weight, content_conv3_1_check, content_conv3_1_weight, content_conv3_2_check, content_conv3_2_weight, content_conv3_3_check, content_conv3_3_weight, content_pool3_check, content_pool3_weight, content_conv4_1_check, content_conv4_1_weight, content_conv4_2_check, content_conv4_2_weight, content_conv4_3_check, content_conv4_3_weight, content_pool4_check, content_pool4_weight, content_conv5_1_check, content_conv5_1_weight, content_conv5_2_check, content_conv5_2_weight, content_conv5_3_check, content_conv5_3_weight, content_pool5_check, content_pool5_weight)
self.faster_style_save(beta, style_conv1_1_check, style_conv1_1_weight, style_conv1_2_check, style_conv1_2_weight, style_pool1_check, style_pool1_weight, style_conv2_1_check, style_conv2_1_weight, style_conv2_2_check, style_conv2_2_weight, style_pool2_check, style_pool2_weight, style_conv3_1_check, style_conv3_1_weight, style_conv3_2_check, style_conv3_2_weight, style_conv3_3_check, style_conv3_3_weight, style_pool3_check, style_pool3_weight, style_conv4_1_check, style_conv4_1_weight, style_conv4_2_check, style_conv4_2_weight, style_conv4_3_check, style_conv4_3_weight, style_pool4_check, style_pool4_weight, style_conv5_1_check, style_conv5_1_weight, style_conv5_2_check, style_conv5_2_weight, style_conv5_3_check, style_conv5_3_weight, style_pool5_check, style_pool5_weight)
def accept_fast_config_button(self, alpha, content_conv1_1_check, content_conv1_1_weight, content_conv1_2_check, content_conv1_2_weight, content_pool1_check, content_pool1_weight, content_conv2_1_check, content_conv2_1_weight, content_conv2_2_check, content_conv2_2_weight, content_pool2_check, content_pool2_weight, content_conv3_1_check, content_conv3_1_weight, content_conv3_2_check, content_conv3_2_weight, content_conv3_3_check, content_conv3_3_weight, content_conv3_4_check, content_conv3_4_weight, content_pool3_check, content_pool3_weight, content_conv4_1_check, content_conv4_1_weight, content_conv4_2_check, content_conv4_2_weight, content_conv4_3_check, content_conv4_3_weight, content_conv4_4_check, content_conv4_4_weight, content_pool4_check, content_pool4_weight, content_conv5_1_check, content_conv5_1_weight, content_conv5_2_check, content_conv5_2_weight, content_conv5_3_check, content_conv5_3_weight, content_conv5_4_check, content_conv5_4_weight, content_pool5_check, content_pool5_weight, beta, style_conv1_1_check, style_conv1_1_weight, style_conv1_2_check, style_conv1_2_weight, style_pool1_check, style_pool1_weight, style_conv2_1_check, style_conv2_1_weight, style_conv2_2_check, style_conv2_2_weight, style_pool2_check, style_pool2_weight, style_conv3_1_check, style_conv3_1_weight, style_conv3_2_check, style_conv3_2_weight, style_conv3_3_check, style_conv3_3_weight, style_conv3_4_check, style_conv3_4_weight, style_pool3_check, style_pool3_weight, style_conv4_1_check, style_conv4_1_weight, style_conv4_2_check, style_conv4_2_weight, style_conv4_3_check, style_conv4_3_weight, style_conv4_4_check, style_conv4_4_weight, style_pool4_check, style_pool4_weight, style_conv5_1_check, style_conv5_1_weight, style_conv5_2_check, style_conv5_2_weight, style_conv5_3_check, style_conv5_3_weight, style_conv5_4_check, style_conv5_4_weight, style_pool5_check, style_pool5_weight):
self.fast_content_save(alpha, content_conv1_1_check, content_conv1_1_weight, content_conv1_2_check, content_conv1_2_weight, content_pool1_check, content_pool1_weight, content_conv2_1_check, content_conv2_1_weight, content_conv2_2_check, content_conv2_2_weight, content_pool2_check, content_pool2_weight, content_conv3_1_check, content_conv3_1_weight, content_conv3_2_check, content_conv3_2_weight, content_conv3_3_check, content_conv3_3_weight, content_conv3_4_check, content_conv3_4_weight, content_pool3_check, content_pool3_weight, content_conv4_1_check, content_conv4_1_weight, content_conv4_2_check, content_conv4_2_weight, content_conv4_3_check, content_conv4_3_weight, content_conv4_4_check, content_conv4_4_weight, content_pool4_check, content_pool4_weight, content_conv5_1_check, content_conv5_1_weight, content_conv5_2_check, content_conv5_2_weight, content_conv5_3_check, content_conv5_3_weight, content_conv5_4_check, content_conv5_4_weight, content_pool5_check, content_pool5_weight)
self.fast_style_save(beta, style_conv1_1_check, style_conv1_1_weight, style_conv1_2_check, style_conv1_2_weight, style_pool1_check, style_pool1_weight, style_conv2_1_check, style_conv2_1_weight, style_conv2_2_check, style_conv2_2_weight, style_pool2_check, style_pool2_weight, style_conv3_1_check, style_conv3_1_weight, style_conv3_2_check, style_conv3_2_weight, style_conv3_3_check, style_conv3_3_weight, style_conv3_4_check, style_conv3_4_weight, style_pool3_check, style_pool3_weight, style_conv4_1_check, style_conv4_1_weight, style_conv4_2_check, style_conv4_2_weight, style_conv4_3_check, style_conv4_3_weight, style_conv4_4_check, style_conv4_4_weight, style_pool4_check, style_pool4_weight, style_conv5_1_check, style_conv5_1_weight, style_conv5_2_check, style_conv5_2_weight, style_conv5_3_check, style_conv5_3_weight, style_conv5_4_check, style_conv5_4_weight, style_pool5_check, style_pool5_weight)
def faster_content_screen_update(self):
current_faster_content = self.faster_content_list[self.faster_content_list_counter]
self.faster_graph_config_screen.content_file.text = current_faster_content.path.split('\\')[-1]
self.faster_graph_config_screen.alpha.text = str(current_faster_content.alpha)
self.faster_graph_config_screen.content_conv1_1_check.active = current_faster_content.content_conv1_1_check
self.faster_graph_config_screen.content_conv1_1_weight.text = str(current_faster_content.content_conv1_1_weight)
self.faster_graph_config_screen.content_conv1_2_check.active = current_faster_content.content_conv1_2_check
self.faster_graph_config_screen.content_conv1_2_weight.text = str(current_faster_content.content_conv1_2_weight)
self.faster_graph_config_screen.content_pool1_check.active = current_faster_content.content_pool1_check
self.faster_graph_config_screen.content_pool1_weight.text = str(current_faster_content.content_pool1_weight)
self.faster_graph_config_screen.content_conv2_1_check.active = current_faster_content.content_conv2_1_check
self.faster_graph_config_screen.content_conv2_1_weight.text = str(current_faster_content.content_conv2_1_weight)
self.faster_graph_config_screen.content_conv2_2_check.active = current_faster_content.content_conv2_2_check
self.faster_graph_config_screen.content_conv2_2_weight.text = str(current_faster_content.content_conv2_2_weight)
self.faster_graph_config_screen.content_pool2_check.active = current_faster_content.content_pool2_check
self.faster_graph_config_screen.content_pool2_weight.text = str(current_faster_content.content_pool2_weight)
self.faster_graph_config_screen.content_conv3_1_check.active = current_faster_content.content_conv3_1_check
self.faster_graph_config_screen.content_conv3_1_weight.text = str(current_faster_content.content_conv3_1_weight)
self.faster_graph_config_screen.content_conv3_2_check.active = current_faster_content.content_conv3_2_check
self.faster_graph_config_screen.content_conv3_2_weight.text = str(current_faster_content.content_conv3_2_weight)
self.faster_graph_config_screen.content_conv3_3_check.active = current_faster_content.content_conv3_3_check
self.faster_graph_config_screen.content_conv3_3_weight.text = str(current_faster_content.content_conv3_3_weight)
self.faster_graph_config_screen.content_pool3_check.active = current_faster_content.content_pool3_check
self.faster_graph_config_screen.content_pool3_weight.text = str(current_faster_content.content_pool3_weight)
self.faster_graph_config_screen.content_conv4_1_check.active = current_faster_content.content_conv4_1_check
self.faster_graph_config_screen.content_conv4_1_weight.text = str(current_faster_content.content_conv4_1_weight)
self.faster_graph_config_screen.content_conv4_2_check.active = current_faster_content.content_conv4_2_check
self.faster_graph_config_screen.content_conv4_2_weight.text = str(current_faster_content.content_conv4_2_weight)
self.faster_graph_config_screen.content_conv4_3_check.active = current_faster_content.content_conv4_3_check
self.faster_graph_config_screen.content_conv4_3_weight.text = str(current_faster_content.content_conv4_3_weight)
self.faster_graph_config_screen.content_pool4_check.active = current_faster_content.content_pool4_check
self.faster_graph_config_screen.content_pool4_weight.text = str(current_faster_content.content_pool4_weight)
self.faster_graph_config_screen.content_conv5_1_check.active = current_faster_content.content_conv5_1_check
self.faster_graph_config_screen.content_conv5_1_weight.text = str(current_faster_content.content_conv5_1_weight)
self.faster_graph_config_screen.content_conv5_2_check.active = current_faster_content.content_conv5_2_check
self.faster_graph_config_screen.content_conv5_2_weight.text = str(current_faster_content.content_conv5_2_weight)
self.faster_graph_config_screen.content_conv5_3_check.active = current_faster_content.content_conv5_3_check
self.faster_graph_config_screen.content_conv5_3_weight.text = str(current_faster_content.content_conv5_3_weight)
self.faster_graph_config_screen.content_pool5_check.active = current_faster_content.content_pool5_check
self.faster_graph_config_screen.content_pool5_weight.text = str(current_faster_content.content_pool5_weight)
def faster_style_screen_update(self):
current_faster_style = self.faster_style_list[self.faster_style_list_counter]
self.faster_graph_config_screen.style_file.text = current_faster_style.path.split('\\')[-1]
self.faster_graph_config_screen.beta.text = str(current_faster_style.beta)
self.faster_graph_config_screen.style_conv1_1_check.active = current_faster_style.style_conv1_1_check
self.faster_graph_config_screen.style_conv1_1_weight.text = str(current_faster_style.style_conv1_1_weight)
self.faster_graph_config_screen.style_conv1_2_check.active = current_faster_style.style_conv1_2_check
self.faster_graph_config_screen.style_conv1_2_weight.text = str(current_faster_style.style_conv1_2_weight)
self.faster_graph_config_screen.style_pool1_check.active = current_faster_style.style_pool1_check
self.faster_graph_config_screen.style_pool1_weight.text = str(current_faster_style.style_pool1_weight)
self.faster_graph_config_screen.style_conv2_1_check.active = current_faster_style.style_conv2_1_check
self.faster_graph_config_screen.style_conv2_1_weight.text = str(current_faster_style.style_conv2_1_weight)
self.faster_graph_config_screen.style_conv2_2_check.active = current_faster_style.style_conv2_2_check
self.faster_graph_config_screen.style_conv2_2_weight.text = str(current_faster_style.style_conv2_2_weight)
self.faster_graph_config_screen.style_pool2_check.active = current_faster_style.style_pool2_check
self.faster_graph_config_screen.style_pool2_weight.text = str(current_faster_style.style_pool2_weight)
self.faster_graph_config_screen.style_conv3_1_check.active = current_faster_style.style_conv3_1_check
self.faster_graph_config_screen.style_conv3_1_weight.text = str(current_faster_style.style_conv3_1_weight)
self.faster_graph_config_screen.style_conv3_2_check.active = current_faster_style.style_conv3_2_check
self.faster_graph_config_screen.style_conv3_2_weight.text = str(current_faster_style.style_conv3_2_weight)
self.faster_graph_config_screen.style_conv3_3_check.active = current_faster_style.style_conv3_3_check
self.faster_graph_config_screen.style_conv3_3_weight.text = str(current_faster_style.style_conv3_3_weight)
self.faster_graph_config_screen.style_pool3_check.active = current_faster_style.style_pool3_check
self.faster_graph_config_screen.style_pool3_weight.text = str(current_faster_style.style_pool3_weight)
self.faster_graph_config_screen.style_conv4_1_check.active = current_faster_style.style_conv4_1_check
self.faster_graph_config_screen.style_conv4_1_weight.text = str(current_faster_style.style_conv4_1_weight)
self.faster_graph_config_screen.style_conv4_2_check.active = current_faster_style.style_conv4_2_check
self.faster_graph_config_screen.style_conv4_2_weight.text = str(current_faster_style.style_conv4_2_weight)
self.faster_graph_config_screen.style_conv4_3_check.active = current_faster_style.style_conv4_3_check
self.faster_graph_config_screen.style_conv4_3_weight.text = str(current_faster_style.style_conv4_3_weight)
self.faster_graph_config_screen.style_pool4_check.active = current_faster_style.style_pool4_check
self.faster_graph_config_screen.style_pool4_weight.text = str(current_faster_style.style_pool4_weight)
self.faster_graph_config_screen.style_conv5_1_check.active = current_faster_style.style_conv5_1_check
self.faster_graph_config_screen.style_conv5_1_weight.text = str(current_faster_style.style_conv5_1_weight)
self.faster_graph_config_screen.style_conv5_2_check.active = current_faster_style.style_conv5_2_check
self.faster_graph_config_screen.style_conv5_2_weight.text = str(current_faster_style.style_conv5_2_weight)
self.faster_graph_config_screen.style_conv5_3_check.active = current_faster_style.style_conv5_3_check
self.faster_graph_config_screen.style_conv5_3_weight.text = str(current_faster_style.style_conv5_3_weight)
self.faster_graph_config_screen.style_pool5_check.active = current_faster_style.style_pool5_check
self.faster_graph_config_screen.style_pool5_weight.text = str(current_faster_style.style_pool5_weight)
def fast_content_screen_update(self):
current_fast_content = self.fast_content_list[self.fast_content_list_counter]
self.fast_graph_config_screen.content_file.text = current_fast_content.path.split('\\')[-1]
self.fast_graph_config_screen.alpha.text = str(current_fast_content.alpha)
self.fast_graph_config_screen.content_conv1_1_check.active = current_fast_content.content_conv1_1_check
self.fast_graph_config_screen.content_conv1_1_weight.text = str(current_fast_content.content_conv1_1_weight)
self.fast_graph_config_screen.content_conv1_2_check.active = current_fast_content.content_conv1_2_check
self.fast_graph_config_screen.content_conv1_2_weight.text = str(current_fast_content.content_conv1_2_weight)
self.fast_graph_config_screen.content_pool1_check.active = current_fast_content.content_pool1_check
self.fast_graph_config_screen.content_pool1_weight.text = str(current_fast_content.content_pool1_weight)
self.fast_graph_config_screen.content_conv2_1_check.active = current_fast_content.content_conv2_1_check
self.fast_graph_config_screen.content_conv2_1_weight.text = str(current_fast_content.content_conv2_1_weight)
self.fast_graph_config_screen.content_conv2_2_check.active = current_fast_content.content_conv2_2_check
self.fast_graph_config_screen.content_conv2_2_weight.text = str(current_fast_content.content_conv2_2_weight)
self.fast_graph_config_screen.content_pool2_check.active = current_fast_content.content_pool2_check
self.fast_graph_config_screen.content_pool2_weight.text = str(current_fast_content.content_pool2_weight)
self.fast_graph_config_screen.content_conv3_1_check.active = current_fast_content.content_conv3_1_check
self.fast_graph_config_screen.content_conv3_1_weight.text = str(current_fast_content.content_conv3_1_weight)
self.fast_graph_config_screen.content_conv3_2_check.active = current_fast_content.content_conv3_2_check
self.fast_graph_config_screen.content_conv3_2_weight.text = str(current_fast_content.content_conv3_2_weight)
self.fast_graph_config_screen.content_conv3_3_check.active = current_fast_content.content_conv3_3_check
self.fast_graph_config_screen.content_conv3_3_weight.text = str(current_fast_content.content_conv3_3_weight)
self.fast_graph_config_screen.content_conv3_4_check.active = current_fast_content.content_conv3_4_check
self.fast_graph_config_screen.content_conv3_4_weight.text = str(current_fast_content.content_conv3_4_weight)
self.fast_graph_config_screen.content_pool3_check.active = current_fast_content.content_pool3_check
self.fast_graph_config_screen.content_pool3_weight.text = str(current_fast_content.content_pool3_weight)
self.fast_graph_config_screen.content_conv4_1_check.active = current_fast_content.content_conv4_1_check
self.fast_graph_config_screen.content_conv4_1_weight.text = str(current_fast_content.content_conv4_1_weight)
self.fast_graph_config_screen.content_conv4_2_check.active = current_fast_content.content_conv4_2_check
self.fast_graph_config_screen.content_conv4_2_weight.text = str(current_fast_content.content_conv4_2_weight)
self.fast_graph_config_screen.content_conv4_3_check.active = current_fast_content.content_conv4_3_check
self.fast_graph_config_screen.content_conv4_3_weight.text = str(current_fast_content.content_conv4_3_weight)
self.fast_graph_config_screen.content_conv4_4_check.active = current_fast_content.content_conv4_4_check
self.fast_graph_config_screen.content_conv4_4_weight.text = str(current_fast_content.content_conv4_4_weight)
self.fast_graph_config_screen.content_pool4_check.active = current_fast_content.content_pool4_check
self.fast_graph_config_screen.content_pool4_weight.text = str(current_fast_content.content_pool4_weight)
self.fast_graph_config_screen.content_conv5_1_check.active = current_fast_content.content_conv5_1_check
self.fast_graph_config_screen.content_conv5_1_weight.text = str(current_fast_content.content_conv5_1_weight)
self.fast_graph_config_screen.content_conv5_2_check.active = current_fast_content.content_conv5_2_check
self.fast_graph_config_screen.content_conv5_2_weight.text = str(current_fast_content.content_conv5_2_weight)
self.fast_graph_config_screen.content_conv5_3_check.active = current_fast_content.content_conv5_3_check
self.fast_graph_config_screen.content_conv5_3_weight.text = str(current_fast_content.content_conv5_3_weight)
self.fast_graph_config_screen.content_conv5_4_check.active = current_fast_content.content_conv5_4_check
self.fast_graph_config_screen.content_conv5_4_weight.text = str(current_fast_content.content_conv5_4_weight)
self.fast_graph_config_screen.content_pool5_check.active = current_fast_content.content_pool5_check
self.fast_graph_config_screen.content_pool5_weight.text = str(current_fast_content.content_pool5_weight)
def fast_style_screen_update(self):
current_fast_style = self.fast_style_list[self.fast_style_list_counter]
self.fast_graph_config_screen.style_file.text = current_fast_style.path.split('\\')[-1]
self.fast_graph_config_screen.beta.text = str(current_fast_style.beta)
self.fast_graph_config_screen.style_conv1_1_check.active = current_fast_style.style_conv1_1_check
self.fast_graph_config_screen.style_conv1_1_weight.text = str(current_fast_style.style_conv1_1_weight)
self.fast_graph_config_screen.style_conv1_2_check.active = current_fast_style.style_conv1_2_check
self.fast_graph_config_screen.style_conv1_2_weight.text = str(current_fast_style.style_conv1_2_weight)
self.fast_graph_config_screen.style_pool1_check.active = current_fast_style.style_pool1_check
self.fast_graph_config_screen.style_pool1_weight.text = str(current_fast_style.style_pool1_weight)
self.fast_graph_config_screen.style_conv2_1_check.active = current_fast_style.style_conv2_1_check
self.fast_graph_config_screen.style_conv2_1_weight.text = str(current_fast_style.style_conv2_1_weight)
self.fast_graph_config_screen.style_conv2_2_check.active = current_fast_style.style_conv2_2_check
self.fast_graph_config_screen.style_conv2_2_weight.text = str(current_fast_style.style_conv2_2_weight)
self.fast_graph_config_screen.style_pool2_check.active = current_fast_style.style_pool2_check
self.fast_graph_config_screen.style_pool2_weight.text = str(current_fast_style.style_pool2_weight)
self.fast_graph_config_screen.style_conv3_1_check.active = current_fast_style.style_conv3_1_check
self.fast_graph_config_screen.style_conv3_1_weight.text = str(current_fast_style.style_conv3_1_weight)
self.fast_graph_config_screen.style_conv3_2_check.active = current_fast_style.style_conv3_2_check
self.fast_graph_config_screen.style_conv3_2_weight.text = str(current_fast_style.style_conv3_2_weight)
self.fast_graph_config_screen.style_conv3_3_check.active = current_fast_style.style_conv3_3_check
self.fast_graph_config_screen.style_conv3_3_weight.text = str(current_fast_style.style_conv3_3_weight)
self.fast_graph_config_screen.style_conv3_4_check.active = current_fast_style.style_conv3_4_check
self.fast_graph_config_screen.style_conv3_4_weight.text = str(current_fast_style.style_conv3_4_weight)
self.fast_graph_config_screen.style_pool3_check.active = current_fast_style.style_pool3_check
self.fast_graph_config_screen.style_pool3_weight.text = str(current_fast_style.style_pool3_weight)
self.fast_graph_config_screen.style_conv4_1_check.active = current_fast_style.style_conv4_1_check
self.fast_graph_config_screen.style_conv4_1_weight.text = str(current_fast_style.style_conv4_1_weight)
self.fast_graph_config_screen.style_conv4_2_check.active = current_fast_style.style_conv4_2_check
self.fast_graph_config_screen.style_conv4_2_weight.text = str(current_fast_style.style_conv4_2_weight)
self.fast_graph_config_screen.style_conv4_3_check.active = current_fast_style.style_conv4_3_check
self.fast_graph_config_screen.style_conv4_3_weight.text = str(current_fast_style.style_conv4_3_weight)
self.fast_graph_config_screen.style_conv4_4_check.active = current_fast_style.style_conv4_4_check
self.fast_graph_config_screen.style_conv4_4_weight.text = str(current_fast_style.style_conv4_4_weight)
self.fast_graph_config_screen.style_pool4_check.active = current_fast_style.style_pool4_check
self.fast_graph_config_screen.style_pool4_weight.text = str(current_fast_style.style_pool4_weight)
self.fast_graph_config_screen.style_conv5_1_check.active = current_fast_style.style_conv5_1_check
self.fast_graph_config_screen.style_conv5_1_weight.text = str(current_fast_style.style_conv5_1_weight)
self.fast_graph_config_screen.style_conv5_2_check.active = current_fast_style.style_conv5_2_check
self.fast_graph_config_screen.style_conv5_2_weight.text = str(current_fast_style.style_conv5_2_weight)
self.fast_graph_config_screen.style_conv5_3_check.active = current_fast_style.style_conv5_3_check
self.fast_graph_config_screen.style_conv5_3_weight.text = str(current_fast_style.style_conv5_3_weight)
self.fast_graph_config_screen.style_conv5_4_check.active = current_fast_style.style_conv5_4_check
self.fast_graph_config_screen.style_conv5_4_weight.text = str(current_fast_style.style_conv5_4_weight)
self.fast_graph_config_screen.style_pool5_check.active = current_fast_style.style_pool5_check
self.fast_graph_config_screen.style_pool5_weight.text = str(current_fast_style.style_pool5_weight)
def faster_content_save(self, alpha, content_conv1_1_check, content_conv1_1_weight, content_conv1_2_check, content_conv1_2_weight, content_pool1_check, content_pool1_weight, content_conv2_1_check, content_conv2_1_weight, content_conv2_2_check, content_conv2_2_weight, content_pool2_check, content_pool2_weight, content_conv3_1_check, content_conv3_1_weight, content_conv3_2_check, content_conv3_2_weight, content_conv3_3_check, content_conv3_3_weight, content_pool3_check, content_pool3_weight, content_conv4_1_check, content_conv4_1_weight, content_conv4_2_check, content_conv4_2_weight, content_conv4_3_check, content_conv4_3_weight, content_pool4_check, content_pool4_weight, content_conv5_1_check, content_conv5_1_weight, content_conv5_2_check, content_conv5_2_weight, content_conv5_3_check, content_conv5_3_weight, content_pool5_check, content_pool5_weight):
try:
alpha = float(alpha)
content_conv1_1_check = bool(content_conv1_1_check)
content_conv1_1_weight = float(content_conv1_1_weight)
content_conv1_2_check = bool(content_conv1_2_check)
content_conv1_2_weight = float(content_conv1_2_weight)
content_pool1_check = bool(content_pool1_check)
content_pool1_weight = float(content_pool1_weight)
content_conv2_1_check = bool(content_conv2_1_check)
content_conv2_1_weight = float(content_conv2_1_weight)
content_conv2_2_check = bool(content_conv2_2_check)
content_conv2_2_weight = float(content_conv2_2_weight)
content_pool2_check = bool(content_pool2_check)
content_pool2_weight = float(content_pool2_weight)
content_conv3_1_check = bool(content_conv3_1_check)
content_conv3_1_weight = float(content_conv3_1_weight)
content_conv3_2_check = bool(content_conv3_2_check)
content_conv3_2_weight = float(content_conv3_2_weight)
content_conv3_3_check = bool(content_conv3_3_check)
content_conv3_3_weight = float(content_conv3_3_weight)
content_pool3_check = bool(content_pool3_check)
content_pool3_weight = float(content_pool3_weight)
content_conv4_1_check = bool(content_conv4_1_check)
content_conv4_1_weight = float(content_conv4_1_weight)
content_conv4_2_check = bool(content_conv4_2_check)
content_conv4_2_weight = float(content_conv4_2_weight)
content_conv4_3_check = bool(content_conv4_3_check)
content_conv4_3_weight = float(content_conv4_3_weight)
content_pool4_check = bool(content_pool4_check)
content_pool4_weight = float(content_pool4_weight)
content_conv5_1_check = bool(content_conv5_1_check)
content_conv5_1_weight = float(content_conv5_1_weight)
content_conv5_2_check = bool(content_conv5_2_check)
content_conv5_2_weight = float(content_conv5_2_weight)
content_conv5_3_check = bool(content_conv5_3_check)
content_conv5_3_weight = float(content_conv5_3_weight)
content_pool5_check = bool(content_pool5_check)
content_pool5_weight = float(content_pool5_weight)
########
current_faster_content = self.faster_content_list[self.faster_content_list_counter]
current_faster_content.alpha = alpha
current_faster_content.content_conv1_1_check = content_conv1_1_check
current_faster_content.content_conv1_1_weight = content_conv1_1_weight
current_faster_content.content_conv1_2_check = content_conv1_2_check
current_faster_content.content_conv1_2_weight = content_conv1_2_weight
current_faster_content.content_pool1_check = content_pool1_check
current_faster_content.content_pool1_weight = content_pool1_weight
current_faster_content.content_conv2_1_check = content_conv2_1_check
current_faster_content.content_conv2_1_weight = content_conv2_1_weight
current_faster_content.content_conv2_2_check = content_conv2_2_check
current_faster_content.content_conv2_2_weight = content_conv2_2_weight
current_faster_content.content_pool2_check = content_pool2_check
current_faster_content.content_pool2_weight = content_pool2_weight
current_faster_content.content_conv3_1_check = content_conv3_1_check
current_faster_content.content_conv3_1_weight = content_conv3_1_weight
current_faster_content.content_conv3_2_check = content_conv3_2_check
current_faster_content.content_conv3_2_weight = content_conv3_2_weight
current_faster_content.content_conv3_3_check = content_conv3_3_check
current_faster_content.content_conv3_3_weight = content_conv3_3_weight
current_faster_content.content_pool3_check = content_pool3_check
current_faster_content.content_pool3_weight = content_pool3_weight
current_faster_content.content_conv4_1_check = content_conv4_1_check
current_faster_content.content_conv4_1_weight = content_conv4_1_weight
current_faster_content.content_conv4_2_check = content_conv4_2_check
current_faster_content.content_conv4_2_weight = content_conv4_2_weight
current_faster_content.content_conv4_3_check = content_conv4_3_check
current_faster_content.content_conv4_3_weight = content_conv4_3_weight
current_faster_content.content_pool4_check = content_pool4_check
current_faster_content.content_pool4_weight = content_pool4_weight
current_faster_content.content_conv5_1_check = content_conv5_1_check
current_faster_content.content_conv5_1_weight = content_conv5_1_weight
current_faster_content.content_conv5_2_check = content_conv5_2_check
current_faster_content.content_conv5_2_weight = content_conv5_2_weight
current_faster_content.content_conv5_3_check = content_conv5_3_check
current_faster_content.content_conv5_3_weight = content_conv5_3_weight
current_faster_content.content_pool5_check = content_pool5_check
current_faster_content.content_pool5_weight = content_pool5_weight
except:
pass
def faster_style_save(self, beta, style_conv1_1_check, style_conv1_1_weight, style_conv1_2_check, style_conv1_2_weight, style_pool1_check, style_pool1_weight, style_conv2_1_check, style_conv2_1_weight, style_conv2_2_check, style_conv2_2_weight, style_pool2_check, style_pool2_weight, style_conv3_1_check, style_conv3_1_weight, style_conv3_2_check, style_conv3_2_weight, style_conv3_3_check, style_conv3_3_weight, style_pool3_check, style_pool3_weight, style_conv4_1_check, style_conv4_1_weight, style_conv4_2_check, style_conv4_2_weight, style_conv4_3_check, style_conv4_3_weight, style_pool4_check, style_pool4_weight, style_conv5_1_check, style_conv5_1_weight, style_conv5_2_check, style_conv5_2_weight, style_conv5_3_check, style_conv5_3_weight, style_pool5_check, style_pool5_weight):
try:
beta = float(beta)
style_conv1_1_check = bool(style_conv1_1_check)
style_conv1_1_weight = float(style_conv1_1_weight)
style_conv1_2_check = bool(style_conv1_2_check)
style_conv1_2_weight = float(style_conv1_2_weight)
style_pool1_check = bool(style_pool1_check)
style_pool1_weight = float(style_pool1_weight)
style_conv2_1_check = bool(style_conv2_1_check)
style_conv2_1_weight = float(style_conv2_1_weight)
style_conv2_2_check = bool(style_conv2_2_check)
style_conv2_2_weight = float(style_conv2_2_weight)
style_pool2_check = bool(style_pool2_check)
style_pool2_weight = float(style_pool2_weight)
style_conv3_1_check = bool(style_conv3_1_check)
style_conv3_1_weight = float(style_conv3_1_weight)
style_conv3_2_check = bool(style_conv3_2_check)
style_conv3_2_weight = float(style_conv3_2_weight)
style_conv3_3_check = bool(style_conv3_3_check)
style_conv3_3_weight = float(style_conv3_3_weight)
style_pool3_check = bool(style_pool3_check)
style_pool3_weight = float(style_pool3_weight)
style_conv4_1_check = bool(style_conv4_1_check)
style_conv4_1_weight = float(style_conv4_1_weight)
style_conv4_2_check = bool(style_conv4_2_check)
style_conv4_2_weight = float(style_conv4_2_weight)
style_conv4_3_check = bool(style_conv4_3_check)
style_conv4_3_weight = float(style_conv4_3_weight)
style_pool4_check = bool(style_pool4_check)
style_pool4_weight = float(style_pool4_weight)
style_conv5_1_check = bool(style_conv5_1_check)
style_conv5_1_weight = float(style_conv5_1_weight)
style_conv5_2_check = bool(style_conv5_2_check)
style_conv5_2_weight = float(style_conv5_2_weight)
style_conv5_3_check = bool(style_conv5_3_check)
style_conv5_3_weight = float(style_conv5_3_weight)
style_pool5_check = bool(style_pool5_check)
style_pool5_weight = float(style_pool5_weight)
######
current_faster_style = self.faster_style_list[self.faster_style_list_counter]
current_faster_style.beta = beta
current_faster_style.style_conv1_1_check = style_conv1_1_check
current_faster_style.style_conv1_1_weight = style_conv1_1_weight
current_faster_style.style_conv1_2_check = style_conv1_2_check
current_faster_style.style_conv1_2_weight = style_conv1_2_weight
current_faster_style.style_pool1_check = style_pool1_check
current_faster_style.style_pool1_weight = style_pool1_weight
current_faster_style.style_conv2_1_check = style_conv2_1_check
current_faster_style.style_conv2_1_weight = style_conv2_1_weight
current_faster_style.style_conv2_2_check = style_conv2_2_check
current_faster_style.style_conv2_2_weight = style_conv2_2_weight
current_faster_style.style_pool2_check = style_pool2_check
current_faster_style.style_pool2_weight = style_pool2_weight
current_faster_style.style_conv3_1_check = style_conv3_1_check
current_faster_style.style_conv3_1_weight = style_conv3_1_weight
current_faster_style.style_conv3_2_check = style_conv3_2_check
current_faster_style.style_conv3_2_weight = style_conv3_2_weight
current_faster_style.style_conv3_3_check = style_conv3_3_check
current_faster_style.style_conv3_3_weight = style_conv3_3_weight
current_faster_style.style_pool3_check = style_pool3_check
current_faster_style.style_pool3_weight = style_pool3_weight
current_faster_style.style_conv4_1_check = style_conv4_1_check
current_faster_style.style_conv4_1_weight = style_conv4_1_weight
current_faster_style.style_conv4_2_check = style_conv4_2_check
current_faster_style.style_conv4_2_weight = style_conv4_2_weight
current_faster_style.style_conv4_3_check = style_conv4_3_check
current_faster_style.style_conv4_3_weight = style_conv4_3_weight
current_faster_style.style_pool4_check = style_pool4_check
current_faster_style.style_pool4_weight = style_pool4_weight
current_faster_style.style_conv5_1_check = style_conv5_1_check
current_faster_style.style_conv5_1_weight = style_conv5_1_weight
current_faster_style.style_conv5_2_check = style_conv5_2_check
current_faster_style.style_conv5_2_weight = style_conv5_2_weight
current_faster_style.style_conv5_3_check = style_conv5_3_check
current_faster_style.style_conv5_3_weight = style_conv5_3_weight
current_faster_style.style_pool5_check = style_pool5_check
current_faster_style.style_pool5_weight = style_pool5_weight
except:
pass
def fast_content_save(self, alpha, content_conv1_1_check, content_conv1_1_weight, content_conv1_2_check, content_conv1_2_weight, content_pool1_check, content_pool1_weight, content_conv2_1_check, content_conv2_1_weight, content_conv2_2_check, content_conv2_2_weight, content_pool2_check, content_pool2_weight, content_conv3_1_check, content_conv3_1_weight, content_conv3_2_check, content_conv3_2_weight, content_conv3_3_check, content_conv3_3_weight, content_conv3_4_check, content_conv3_4_weight, content_pool3_check, content_pool3_weight, content_conv4_1_check, content_conv4_1_weight, content_conv4_2_check, content_conv4_2_weight, content_conv4_3_check, content_conv4_3_weight, content_conv4_4_check, content_conv4_4_weight, content_pool4_check, content_pool4_weight, content_conv5_1_check, content_conv5_1_weight, content_conv5_2_check, content_conv5_2_weight, content_conv5_3_check, content_conv5_3_weight, content_conv5_4_check, content_conv5_4_weight, content_pool5_check, content_pool5_weight):
try:
alpha = float(alpha)
content_conv1_1_check = bool(content_conv1_1_check)
content_conv1_1_weight = float(content_conv1_1_weight)
content_conv1_2_check = bool(content_conv1_2_check)
content_conv1_2_weight = float(content_conv1_2_weight)
content_pool1_check = bool(content_pool1_check)
content_pool1_weight = float(content_pool1_weight)
content_conv2_1_check = bool(content_conv2_1_check)
content_conv2_1_weight = float(content_conv2_1_weight)
content_conv2_2_check = bool(content_conv2_2_check)
content_conv2_2_weight = float(content_conv2_2_weight)
content_pool2_check = bool(content_pool2_check)
content_pool2_weight = float(content_pool2_weight)
content_conv3_1_check = bool(content_conv3_1_check)
content_conv3_1_weight = float(content_conv3_1_weight)
content_conv3_2_check = bool(content_conv3_2_check)
content_conv3_2_weight = float(content_conv3_2_weight)
content_conv3_3_check = bool(content_conv3_3_check)
content_conv3_3_weight = float(content_conv3_3_weight)
content_conv3_4_check = bool(content_conv3_4_check)
content_conv3_4_weight = float(content_conv3_4_weight)
content_pool3_check = bool(content_pool3_check)
content_pool3_weight = float(content_pool3_weight)
content_conv4_1_check = bool(content_conv4_1_check)
content_conv4_1_weight = float(content_conv4_1_weight)
content_conv4_2_check = bool(content_conv4_2_check)
content_conv4_2_weight = float(content_conv4_2_weight)
content_conv4_3_check = bool(content_conv4_3_check)
content_conv4_3_weight = float(content_conv4_3_weight)
content_conv4_4_check = bool(content_conv4_4_check)
content_conv4_4_weight = float(content_conv4_4_weight)
content_pool4_check = bool(content_pool4_check)
content_pool4_weight = float(content_pool4_weight)
content_conv5_1_check = bool(content_conv5_1_check)
content_conv5_1_weight = float(content_conv5_1_weight)
content_conv5_2_check = bool(content_conv5_2_check)
content_conv5_2_weight = float(content_conv5_2_weight)
content_conv5_3_check = bool(content_conv5_3_check)
content_conv5_3_weight = float(content_conv5_3_weight)
content_conv5_4_check = bool(content_conv5_4_check)
content_conv5_4_weight = float(content_conv5_4_weight)
content_pool5_check = bool(content_pool5_check)
content_pool5_weight = float(content_pool5_weight)
########
current_fast_content = self.fast_content_list[self.fast_content_list_counter]
current_fast_content.alpha = alpha
current_fast_content.content_conv1_1_check = content_conv1_1_check
current_fast_content.content_conv1_1_weight = content_conv1_1_weight
current_fast_content.content_conv1_2_check = content_conv1_2_check
current_fast_content.content_conv1_2_weight = content_conv1_2_weight
current_fast_content.content_pool1_check = content_pool1_check
current_fast_content.content_pool1_weight = content_pool1_weight
current_fast_content.content_conv2_1_check = content_conv2_1_check
current_fast_content.content_conv2_1_weight = content_conv2_1_weight
current_fast_content.content_conv2_2_check = content_conv2_2_check
current_fast_content.content_conv2_2_weight = content_conv2_2_weight
current_fast_content.content_pool2_check = content_pool2_check
current_fast_content.content_pool2_weight = content_pool2_weight
current_fast_content.content_conv3_1_check = content_conv3_1_check
current_fast_content.content_conv3_1_weight = content_conv3_1_weight
current_fast_content.content_conv3_2_check = content_conv3_2_check
current_fast_content.content_conv3_2_weight = content_conv3_2_weight
current_fast_content.content_conv3_3_check = content_conv3_3_check
current_fast_content.content_conv3_3_weight = content_conv3_3_weight
current_fast_content.content_conv3_4_check = content_conv3_4_check
current_fast_content.content_conv3_4_weight = content_conv3_4_weight
current_fast_content.content_pool3_check = content_pool3_check
current_fast_content.content_pool3_weight = content_pool3_weight
current_fast_content.content_conv4_1_check = content_conv4_1_check
current_fast_content.content_conv4_1_weight = content_conv4_1_weight
current_fast_content.content_conv4_2_check = content_conv4_2_check
current_fast_content.content_conv4_2_weight = content_conv4_2_weight
current_fast_content.content_conv4_3_check = content_conv4_3_check
current_fast_content.content_conv4_3_weight = content_conv4_3_weight
current_fast_content.content_conv4_4_check = content_conv4_4_check
current_fast_content.content_conv4_4_weight = content_conv4_4_weight
current_fast_content.content_pool4_check = content_pool4_check
current_fast_content.content_pool4_weight = content_pool4_weight
current_fast_content.content_conv5_1_check = content_conv5_1_check
current_fast_content.content_conv5_1_weight = content_conv5_1_weight
current_fast_content.content_conv5_2_check = content_conv5_2_check
current_fast_content.content_conv5_2_weight = content_conv5_2_weight
current_fast_content.content_conv5_3_check = content_conv5_3_check
current_fast_content.content_conv5_3_weight = content_conv5_3_weight
current_fast_content.content_conv5_4_check = content_conv5_4_check
current_fast_content.content_conv5_4_weight = content_conv5_4_weight
current_fast_content.content_pool5_check = content_pool5_check
current_fast_content.content_pool5_weight = content_pool5_weight
except:
pass
def fast_style_save(self, beta, style_conv1_1_check, style_conv1_1_weight, style_conv1_2_check, style_conv1_2_weight, style_pool1_check, style_pool1_weight, style_conv2_1_check, style_conv2_1_weight, style_conv2_2_check, style_conv2_2_weight, style_pool2_check, style_pool2_weight, style_conv3_1_check, style_conv3_1_weight, style_conv3_2_check, style_conv3_2_weight, style_conv3_3_check, style_conv3_3_weight, style_conv3_4_check, style_conv3_4_weight, style_pool3_check, style_pool3_weight, style_conv4_1_check, style_conv4_1_weight, style_conv4_2_check, style_conv4_2_weight, style_conv4_3_check, style_conv4_3_weight, style_conv4_4_check, style_conv4_4_weight, style_pool4_check, style_pool4_weight, style_conv5_1_check, style_conv5_1_weight, style_conv5_2_check, style_conv5_2_weight, style_conv5_3_check, style_conv5_3_weight, style_conv5_4_check, style_conv5_4_weight, style_pool5_check, style_pool5_weight):
try:
beta = float(beta)
style_conv1_1_check = bool(style_conv1_1_check)
style_conv1_1_weight = float(style_conv1_1_weight)
style_conv1_2_check = bool(style_conv1_2_check)
style_conv1_2_weight = float(style_conv1_2_weight)
style_pool1_check = bool(style_pool1_check)
style_pool1_weight = float(style_pool1_weight)
style_conv2_1_check = bool(style_conv2_1_check)
style_conv2_1_weight = float(style_conv2_1_weight)
style_conv2_2_check = bool(style_conv2_2_check)
style_conv2_2_weight = float(style_conv2_2_weight)
style_pool2_check = bool(style_pool2_check)
style_pool2_weight = float(style_pool2_weight)
style_conv3_1_check = bool(style_conv3_1_check)
style_conv3_1_weight = float(style_conv3_1_weight)
style_conv3_2_check = bool(style_conv3_2_check)
style_conv3_2_weight = float(style_conv3_2_weight)
style_conv3_3_check = bool(style_conv3_3_check)
style_conv3_3_weight = float(style_conv3_3_weight)
style_conv3_4_check = bool(style_conv3_4_check)
style_conv3_4_weight = float(style_conv3_4_weight)
style_pool3_check = bool(style_pool3_check)
style_pool3_weight = float(style_pool3_weight)
style_conv4_1_check = bool(style_conv4_1_check)
style_conv4_1_weight = float(style_conv4_1_weight)
style_conv4_2_check = bool(style_conv4_2_check)
style_conv4_2_weight = float(style_conv4_2_weight)
style_conv4_3_check = bool(style_conv4_3_check)
style_conv4_3_weight = float(style_conv4_3_weight)
style_conv4_4_check = bool(style_conv4_4_check)
style_conv4_4_weight = float(style_conv4_4_weight)
style_pool4_check = bool(style_pool4_check)
style_pool4_weight = float(style_pool4_weight)
style_conv5_1_check = bool(style_conv5_1_check)
style_conv5_1_weight = float(style_conv5_1_weight)
style_conv5_2_check = bool(style_conv5_2_check)
style_conv5_2_weight = float(style_conv5_2_weight)
style_conv5_3_check = bool(style_conv5_3_check)
style_conv5_3_weight = float(style_conv5_3_weight)
style_conv5_4_check = bool(style_conv5_4_check)
style_conv5_4_weight = float(style_conv5_4_weight)
style_pool5_check = bool(style_pool5_check)
style_pool5_weight = float(style_pool5_weight)
######
current_fast_style = self.fast_style_list[self.fast_style_list_counter]
current_fast_style.beta = beta
current_fast_style.style_conv1_1_check = style_conv1_1_check
current_fast_style.style_conv1_1_weight = style_conv1_1_weight
current_fast_style.style_conv1_2_check = style_conv1_2_check
current_fast_style.style_conv1_2_weight = style_conv1_2_weight
current_fast_style.style_pool1_check = style_pool1_check
current_fast_style.style_pool1_weight = style_pool1_weight
current_fast_style.style_conv2_1_check = style_conv2_1_check
current_fast_style.style_conv2_1_weight = style_conv2_1_weight
current_fast_style.style_conv2_2_check = style_conv2_2_check
current_fast_style.style_conv2_2_weight = style_conv2_2_weight
current_fast_style.style_pool2_check = style_pool2_check
current_fast_style.style_pool2_weight = style_pool2_weight
current_fast_style.style_conv3_1_check = style_conv3_1_check
current_fast_style.style_conv3_1_weight = style_conv3_1_weight
current_fast_style.style_conv3_2_check = style_conv3_2_check
current_fast_style.style_conv3_2_weight = style_conv3_2_weight
current_fast_style.style_conv3_3_check = style_conv3_3_check
current_fast_style.style_conv3_3_weight = style_conv3_3_weight
current_fast_style.style_conv3_4_check = style_conv3_4_check
current_fast_style.style_conv3_4_weight = style_conv3_4_weight
current_fast_style.style_pool3_check = style_pool3_check
current_fast_style.style_pool3_weight = style_pool3_weight
current_fast_style.style_conv4_1_check = style_conv4_1_check
current_fast_style.style_conv4_1_weight = style_conv4_1_weight
current_fast_style.style_conv4_2_check = style_conv4_2_check
current_fast_style.style_conv4_2_weight = style_conv4_2_weight
current_fast_style.style_conv4_3_check = style_conv4_3_check
current_fast_style.style_conv4_3_weight = style_conv4_3_weight
current_fast_style.style_conv4_4_check = style_conv4_4_check
current_fast_style.style_conv4_4_weight = style_conv4_4_weight
current_fast_style.style_pool4_check = style_pool4_check
current_fast_style.style_pool4_weight = style_pool4_weight
current_fast_style.style_conv5_1_check = style_conv5_1_check
current_fast_style.style_conv5_1_weight = style_conv5_1_weight
current_fast_style.style_conv5_2_check = style_conv5_2_check
current_fast_style.style_conv5_2_weight = style_conv5_2_weight
current_fast_style.style_conv5_3_check = style_conv5_3_check
current_fast_style.style_conv5_3_weight = style_conv5_3_weight
current_fast_style.style_conv5_4_check = style_conv5_4_check
current_fast_style.style_conv5_4_weight = style_conv5_4_weight
current_fast_style.style_pool5_check = style_pool5_check
current_fast_style.style_pool5_weight = style_pool5_weight
except:
pass
| [
"ansh.iam@gmail.com"
] | ansh.iam@gmail.com |
b67c35de5858f8d010e21e187124471ece32cac7 | c6e6ad3d3d2b0737a3f75690043767358bf0e959 | /Week5/ps5.py | 4ff856cf4e91d86512624c03bb911961ec98c867 | [] | no_license | wesenu/edx.computational.thinking | c4a43090d46644a5da76b681ba17d80c6ca06c43 | f7c80489909b049bb221095130e0639b57813205 | refs/heads/master | 2022-02-24T17:53:22.314907 | 2016-05-06T10:58:26 | 2016-05-06T10:58:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,731 | py | # 6.00.2x Problem Set 5
# Graph optimization
# Finding shortest paths through MIT buildings
#
import string
# This imports everything from `graph.py` as if it was defined in this file!
from graph import *
#
# Problem 2: Building up the Campus Map
#
# Before you write any code, write a couple of sentences here
# describing how you will model this problem as a graph.
# This is a helpful exercise to help you organize your
# thoughts before you tackle a big design problem!
#
def load_map(mapFilename):
"""
Parses the map file and constructs a directed graph
Parameters:
mapFilename : name of the map file
Assumes:
Each entry in the map file consists of the following four positive
integers, separated by a blank space:
From To TotalDistance DistanceOutdoors
e.g.
32 76 54 23
This entry would become an edge from 32 to 76.
Returns:
a directed graph representing the map
"""
map_file = open(mapFilename, 'r')
map_graph = WeightedDigraph()
try:
for line in map_file:
raw_edge = line.split()
src = Node(raw_edge[0])
dest = Node(raw_edge[1])
edge = WeightedEdge(src, dest, int(raw_edge[2]), int(raw_edge[3]))
if not map_graph.hasNode(src): map_graph.addNode(src)
if not map_graph.hasNode(dest): map_graph.addNode(dest)
map_graph.addEdge(edge)
finally:
map_file.close()
return map_graph
#
# Problem 3: Finding the Shortest Path using Brute Force Search
#
# State the optimization problem as a function to minimize
# and what the constraints are
#
def bruteForceSearch(digraph, start, end, maxTotalDist, maxDistOutdoors):
"""
Finds the shortest path from start to end using brute-force approach.
The total distance travelled on the path must not exceed maxTotalDist, and
the distance spent outdoor on this path must not exceed maxDistOutdoors.
Parameters:
digraph: instance of class Digraph or its subclass
start, end: start & end building numbers (strings)
maxTotalDist : maximum total distance on a path (integer)
maxDistOutdoors: maximum distance spent outdoors on a path (integer)
Assumes:
start and end are numbers for existing buildings in graph
Returns:
The shortest-path from start to end, represented by
a list of building numbers (in strings), [n_1, n_2, ..., n_k],
where there exists an edge from n_i to n_(i+1) in digraph,
for all 1 <= i < k.
If there exists no path that satisfies maxTotalDist and
maxDistOutdoors constraints, then raises a ValueError.
"""
stack = [[[start], (0.0, 0.0)]]
best_path = None
while len(stack) > 0:
tip = stack[0]
stack = stack[1:]
if tip[0][-1] == end:
if tip[1][0] > maxTotalDist or tip[1][1] > maxDistOutdoors:
continue
elif best_path is None or tip[1][0] < best_path[1][0]:
best_path = tip
edges = digraph.childrenOf(Node(tip[0][-1]))
if not len(edges):
continue
tips = []
for edge in edges:
if str(edge[0]) in tip[0]:
continue
else:
tips.append([tip[0]+[str(edge[0])], (tip[1][0] + edge[1][0], tip[1][1] + edge[1][1])])
stack = tips + stack
if not best_path:
raise ValueError('Path not found')
return best_path[0]
#
# Problem 4: Finding the Shorest Path using Optimized Search Method
#
def directedDFS(digraph, start, end, maxTotalDist, maxDistOutdoors):
"""
Finds the shortest path from start to end using directed depth-first.
search approach. The total distance travelled on the path must not
exceed maxTotalDist, and the distance spent outdoor on this path must
not exceed maxDistOutdoors.
Parameters:
digraph: instance of class Digraph or its subclass
start, end: start & end building numbers (strings)
maxTotalDist : maximum total distance on a path (integer)
maxDistOutdoors: maximum distance spent outdoors on a path (integer)
Assumes:
start and end are numbers for existing buildings in graph
Returns:
The shortest-path from start to end, represented by
a list of building numbers (in strings), [n_1, n_2, ..., n_k],
where there exists an edge from n_i to n_(i+1) in digraph,
for all 1 <= i < k.
If there exists no path that satisfies maxTotalDist and
maxDistOutdoors constraints, then raises a ValueError.
"""
stack = [[[start], (0.0, 0.0)]]
best_path = None
while len(stack) > 0:
tip = stack[0]
stack = stack[1:]
if best_path is not None and tip[1][0] > best_path[1][0]:
continue
if tip[0][-1] == end:
if tip[1][0] > maxTotalDist or tip[1][1] > maxDistOutdoors:
continue
elif best_path is None or tip[1][0] < best_path[1][0]:
best_path = tip
edges = digraph.childrenOf(Node(tip[0][-1]))
if not len(edges):
continue
tips = []
for edge in edges:
if str(edge[0]) in tip[0]:
continue
else:
tips.append([tip[0]+[str(edge[0])], (tip[1][0] + edge[1][0], tip[1][1] + edge[1][1])])
stack = tips + stack
if not best_path:
raise ValueError('Path not found')
return best_path[0]
# Uncomment below when ready to test
#### NOTE! These tests may take a few minutes to run!! ####
# if __name__ == '__main__':
# Test cases
# mitMap = load_map("mit_map.txt")
# print isinstance(mitMap, Digraph)
# print isinstance(mitMap, WeightedDigraph)
# print 'nodes', mitMap.nodes
# print 'edges', mitMap.edges
# LARGE_DIST = 1000000
# Test case 1
# print "---------------"
# print "Test case 1:"
# print "Find the shortest-path from Building 32 to 56"
# expectedPath1 = ['32', '56']
# brutePath1 = bruteForceSearch(mitMap, '32', '56', LARGE_DIST, LARGE_DIST)
# dfsPath1 = directedDFS(mitMap, '32', '56', LARGE_DIST, LARGE_DIST)
# print "Expected: ", expectedPath1
# print "Brute-force: ", brutePath1
# print "DFS: ", dfsPath1
# print "Correct? BFS: {0}; DFS: {1}".format(expectedPath1 == brutePath1, expectedPath1 == dfsPath1)
# Test case 2
# print "---------------"
# print "Test case 2:"
# print "Find the shortest-path from Building 32 to 56 without going outdoors"
# expectedPath2 = ['32', '36', '26', '16', '56']
# brutePath2 = bruteForceSearch(mitMap, '32', '56', LARGE_DIST, 0)
# dfsPath2 = directedDFS(mitMap, '32', '56', LARGE_DIST, 0)
# print "Expected: ", expectedPath2
# print "Brute-force: ", brutePath2
# print "DFS: ", dfsPath2
# print "Correct? BFS: {0}; DFS: {1}".format(expectedPath2 == brutePath2, expectedPath2 == dfsPath2)
# Test case 3
# print "---------------"
# print "Test case 3:"
# print "Find the shortest-path from Building 2 to 9"
# expectedPath3 = ['2', '3', '7', '9']
# brutePath3 = bruteForceSearch(mitMap, '2', '9', LARGE_DIST, LARGE_DIST)
# dfsPath3 = directedDFS(mitMap, '2', '9', LARGE_DIST, LARGE_DIST)
# print "Expected: ", expectedPath3
# print "Brute-force: ", brutePath3
# print "DFS: ", dfsPath3
# print "Correct? BFS: {0}; DFS: {1}".format(expectedPath3 == brutePath3, expectedPath3 == dfsPath3)
# Test case 4
# print "---------------"
# print "Test case 4:"
# print "Find the shortest-path from Building 2 to 9 without going outdoors"
# expectedPath4 = ['2', '4', '10', '13', '9']
# brutePath4 = bruteForceSearch(mitMap, '2', '9', LARGE_DIST, 0)
# dfsPath4 = directedDFS(mitMap, '2', '9', LARGE_DIST, 0)
# print "Expected: ", expectedPath4
# print "Brute-force: ", brutePath4
# print "DFS: ", dfsPath4
# print "Correct? BFS: {0}; DFS: {1}".format(expectedPath4 == brutePath4, expectedPath4 == dfsPath4)
# Test case 5
# print "---------------"
# print "Test case 5:"
# print "Find the shortest-path from Building 1 to 32"
# expectedPath5 = ['1', '4', '12', '32']
# brutePath5 = bruteForceSearch(mitMap, '1', '32', LARGE_DIST, LARGE_DIST)
# dfsPath5 = directedDFS(mitMap, '1', '32', LARGE_DIST, LARGE_DIST)
# print "Expected: ", expectedPath5
# print "Brute-force: ", brutePath5
# print "DFS: ", dfsPath5
# print "Correct? BFS: {0}; DFS: {1}".format(expectedPath5 == brutePath5, expectedPath5 == dfsPath5)
# Test case 6
# print "---------------"
# print "Test case 6:"
# print "Find the shortest-path from Building 1 to 32 without going outdoors"
# expectedPath6 = ['1', '3', '10', '4', '12', '24', '34', '36', '32']
# brutePath6 = bruteForceSearch(mitMap, '1', '32', LARGE_DIST, 0)
# dfsPath6 = directedDFS(mitMap, '1', '32', LARGE_DIST, 0)
# print "Expected: ", expectedPath6
# print "Brute-force: ", brutePath6
# print "DFS: ", dfsPath6
# print "Correct? BFS: {0}; DFS: {1}".format(expectedPath6 == brutePath6, expectedPath6 == dfsPath6)
# Test case 7
# print "---------------"
# print "Test case 7:"
# print "Find the shortest-path from Building 8 to 50 without going outdoors"
# bruteRaisedErr = 'No'
# dfsRaisedErr = 'No'
# try:
# bruteForceSearch(mitMap, '8', '50', LARGE_DIST, 0)
# except ValueError:
# bruteRaisedErr = 'Yes'
# try:
# directedDFS(mitMap, '8', '50', LARGE_DIST, 0)
# except ValueError:
# dfsRaisedErr = 'Yes'
# print "Expected: No such path! Should throw a value error."
# print "Did brute force search raise an error?", bruteRaisedErr
# print "Did DFS search raise an error?", dfsRaisedErr
# Test case 8
# print "---------------"
# print "Test case 8:"
# print "Find the shortest-path from Building 10 to 32 without walking"
# print "more than 100 meters in total"
# bruteRaisedErr = 'No'
# dfsRaisedErr = 'No'
# try:
# bruteForceSearch(mitMap, '10', '32', 100, LARGE_DIST)
# except ValueError:
# bruteRaisedErr = 'Yes'
# try:
# directedDFS(mitMap, '10', '32', 100, LARGE_DIST)
# except ValueError:
# dfsRaisedErr = 'Yes'
# print "Expected: No such path! Should throw a value error."
# print "Did brute force search raise an error?", bruteRaisedErr
# print "Did DFS search raise an error?", dfsRaisedErr
| [
"dbnulles@gmail.com"
] | dbnulles@gmail.com |
29fdc94457c0a0b0a4ee44a2b125ab4a859821e0 | 4fe11812295499df6785139a7b63cf1bd7d6a16b | /other/Leetcode1685.py | 7fc7b632d074c91ef9bc736414f39bff0ee7a84b | [] | no_license | moonlight035/algorithm | 2f25a5ea77806bc2dad086bb90be7597e2bc2968 | e178f91ebffff06977e8c231de12786a72b3b13d | refs/heads/master | 2022-12-21T19:54:29.037097 | 2020-09-18T06:51:53 | 2020-09-18T06:51:53 | 266,728,998 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 970 | py | from typing import List
class Solution:
def findMagicIndex(self, nums: List[int]) -> int:
res = -1
next = [[0,len(nums)-1]]
while next:
left,right = next.pop()
while left <= right:
mid = left + (right-left) // 2
if nums[mid] == mid:
res = mid
right = mid - 1
next.clear()
elif nums[mid] < mid:
next.append([mid + 1, right])
right = nums[mid]
else:
next.append([nums[mid], right])
right = mid - 1
return res
s = Solution()
print(s.findMagicIndex([32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32]))
| [
"jing.liu14@ucarinc.com"
] | jing.liu14@ucarinc.com |
790e09716ca634f73859962629960782dccd97b8 | 2580d5f300369d4e3234e58c46a56ba641563afa | /Test_Folder/testdriver.py | 5302ead44ffd6af879fa93bb3ec9f8a778f0fa0f | [
"MIT",
"BSD-3-Clause"
] | permissive | DCZYewen/OpenOLS | 465d5df652bc40b9da02c4edefbcb2dd19c04b4e | c2a579c8a5bcc5860062694326c35dd29c81c235 | refs/heads/master | 2022-11-16T10:13:56.864340 | 2020-07-11T17:29:37 | 2020-07-11T17:29:37 | 252,991,070 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 723 | py | import libo2lsdb as o2lsdb
#o2lsdb.insertFulline('USERS',o2lsdb.makeInsertLine(99999996,'sabee',2099,99,99999997,'pbkdf2:sha256:150000$KtVm7aor$07665da0754c2da66cb1b6945fd057f334cc6562ea8dccfb2aa52a2df4793c7a','sabee','sabee','0','2002020202020200','男','无','无'))
#pass!
#o2lsdb.deleteByID('USERS',99999996,'user_id')
#pass!
#o2lsdb.updateByID("USERS",o2lsdb.makeUpdateLine('user_id','99999996','name','dasabee'),99999997,'user_id')
#pass!
#data = o2lsdb.selectAll("USERS")
#print(data)
#pass!
#data = o2lsdb.selectByID('USERS',o2lsdb.makeSelectLine('name','auth'),99999996,'user_id')
#print(data)
#pass!
#data = o2lsdb.selectByIndex('USERS',o2lsdb.makeSelectIndex('user_id','name','auth'))
#print(data)
#pass!
| [
"lzhallinone@outlook.com"
] | lzhallinone@outlook.com |
5acdd811894d586d73a7e4957bf4f0ed4a5a5ff8 | b3ac5b9905871ce0d3270587b30cac32da4583ae | /app.py | fdcb4abb1371c3ae8054deec6d8269a72fd4e2e0 | [] | no_license | dcesarz/Taylor-series-for-e-x-implementation. | 222bd35f1227896bfd882b8ec600a68e1cf4f9ba | b3036ca801b2f5a504206cd4796739b7766c0cdd | refs/heads/master | 2020-12-20T07:06:26.413135 | 2020-01-24T12:12:40 | 2020-01-24T12:12:40 | 235,996,596 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,636 | py | import math
import plotly.graph_objects as go
import numpy as np
import sys
sys.stdout = open('DANE', 'w')
y2_fin = np.zeros(1000000)
y3_fin = np.zeros(1000000)
iterate = 0
def rel_err(a, b):
n = (math.exp(a) - b) / math.exp(a)
return abs(n)
# ZE WZORU TAYLORA
def taylor_e(x, n):
a = x
z = 0
for i in range(0, n):
if i == 0:
z += 1
else:
z += x / (math.factorial(i))
x *= a
return z
def taylor_e_reverse(x, n):
a = x
z = 0
i = 0
while i < n - 2:
x *= a
i += 1
for i in range(n - 1, -1, -1):
if i == 0:
z += 1
else:
z += x / (math.factorial(i))
x /= a
return z
# ZE WZORU NA POPRZEDNI WYRAZ
def taylor_e_rec(x, n):
if n == 0:
return 1
else:
return taylor_e_rec(x, n - 1) * (x / n)
def sum_e_rec(x, n):
z = 0
for i in range(0, n):
z += taylor_e_rec(x, i)
return z
def sum_e_reverse_rec(x, n):
z = 0
for i in range(n - 1, -1, -1):
z += taylor_e_rec(x, i)
return z
def precision_test(y11, y22, x):
global iterate
flag = True
print('e^' + str(x) + '\n')
for j in range(y11.shape[0]):
y22[j] = math.exp(x) - test_function(x, j, taylor_e)
if y22[j] < 0.000001 and flag is True:
print(
'Przy ' + str(j) + ' wyrazach osiagamy dokladnosc 10^-6, stosunek ilosci wyrazow do argumentu = ' + str(
j / x))
y2_fin[iterate] = j
y3_fin[iterate] = j / x
iterate = iterate + 1
flag = False
break
print('\n\n')
def absolute(zabs):
if zabs < 0:
zabs = -zabs
return zabs
x = np.arange(0.0000002, 0.2000002, 0.0000002)
y1 = np.zeros(1000000)
y2 = np.zeros(1000000)
y3 = np.zeros(1000000)
y4 = np.zeros(1000000)
for j in range(x.shape[0]):
z1 = absolute(taylor_e(x[j], 10)-math.exp(x[j]))
z2 = absolute(taylor_e_reverse(x[j], 10)-math.exp(x[j]))
z3 = absolute(sum_e_rec(x[j], 10)-math.exp(x[j]))
z4 = absolute(sum_e_reverse_rec(x[j], 10)-math.exp(x[j]))
print(z1, z2, z3, z4)
y1[j] = z1
y2[j] = z2
y3[j] = z3
y4[j] = z4
print(j)
fig1 = go.Figure()
fig2 = go.Figure()
fig3 = go.Figure()
fig4 = go.Figure()
fig5 = go.Figure()
fig1.add_trace(go.Scatter(x=x, y=y1,
mode='lines',
name='Wzor Taylora'))
fig1.add_trace(go.Scatter(x=x, y=y2,
mode='lines',
name='Wzor Taylora sumowany odwrotnie'))
fig2.add_trace(go.Scatter(x=x, y=y3,
mode='lines',
name='Ze wzoru na poprzedni wyraz'))
fig2.add_trace(go.Scatter(x=x, y=y4,
mode='lines',
name='Ze wzoru na poprzedni, \n sumowany odwrotnie'))
fig3.add_trace(go.Scatter(x=x, y=y1,
mode='lines',
name='Wzor Taylora'))
fig3.add_trace(go.Scatter(x=x, y=y3,
mode='lines',
name='Ze wzoru na poprzedni wyraz'))
fig4.add_trace(go.Scatter(x=x, y=y1,
mode='lines',
name='Wzor Taylora'))
fig5.add_trace(go.Scatter(x=x, y=y3,
mode='lines',
name='Ze wzoru na poprzedni wyraz'))
fig1.show()
fig2.show()
fig3.show()
fig4.show()
fig5.show()
fig1.write_image("C:/Users/Domi/Desktop/prgrmng/fig1.pdf")
fig2.write_image("C:/Users/Domi/Desktop/prgrmng/fig2.pdf")
fig3.write_image("C:/Users/Domi/Desktop/prgrmng/fig3.pdf")
#
# # TEST 2 - HIPOTEZA H1 - TEST ILOSCI SKLADNIKOW
#
#
# y1 = np.arange(1000000)
# y2 = np.zeros(1000000)
# y1_fin = np.arange(1, 5.00000, 0.000004)
#
# for i in np.arange(1, 5.00000, 0.000004):
# precision_test(y1, y2, i)
#
#
# fig5 = go.Figure()
# fig5.add_trace(go.Scatter(x=y1_fin, y=y2_fin,
# mode='lines',
# name='Pierwszy z dokladnoscia 10^-6'))
# fig5.add_trace(go.Scatter(x=y1_fin, y=y3_fin,
# mode='lines',
# name='y/x'))
#
# fig5.show()
# Referencje do kodu:
# https://blogs.ubc.ca/infiniteseriesmodule/units/unit-3-power-series/taylor-series/maclaurin-expansion-of-ex/
# https://plot.ly/python/reference/
| [
"noreply@github.com"
] | noreply@github.com |
7e6bb6b7c3d1c869203218a0eaf61d13e5e6e0e3 | ea7ec89ec3823b2f05f5c9a426e42905c4b661d3 | /catkin_ws/build/ardrone_autonomy/cmake/ardrone_autonomy-genmsg-context.py | 9ce3078a49947a1187646853c365ad2bd9e38bf6 | [] | no_license | davidhudsont/Senior_Project_SWARM | 0bd2c1bf851b886621e2d814128df135c946e44e | 6e6b9c984de54163b4131624bba7bfc1180df83b | refs/heads/master | 2021-06-03T00:08:07.804922 | 2020-03-25T19:47:39 | 2020-03-25T19:47:39 | 107,053,266 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,981 | py | # generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = "/home/viki/catkin_ws/src/ardrone_autonomy/msg/matrix33.msg;/home/viki/catkin_ws/src/ardrone_autonomy/msg/navdata_adc_data_frame.msg;/home/viki/catkin_ws/src/ardrone_autonomy/msg/navdata_altitude.msg;/home/viki/catkin_ws/src/ardrone_autonomy/msg/navdata_demo.msg;/home/viki/catkin_ws/src/ardrone_autonomy/msg/navdata_euler_angles.msg;/home/viki/catkin_ws/src/ardrone_autonomy/msg/navdata_games.msg;/home/viki/catkin_ws/src/ardrone_autonomy/msg/navdata_gyros_offsets.msg;/home/viki/catkin_ws/src/ardrone_autonomy/msg/navdata_hdvideo_stream.msg;/home/viki/catkin_ws/src/ardrone_autonomy/msg/navdata_kalman_pressure.msg;/home/viki/catkin_ws/src/ardrone_autonomy/msg/navdata_magneto.msg;/home/viki/catkin_ws/src/ardrone_autonomy/msg/Navdata.msg;/home/viki/catkin_ws/src/ardrone_autonomy/msg/navdata_phys_measures.msg;/home/viki/catkin_ws/src/ardrone_autonomy/msg/navdata_pressure_raw.msg;/home/viki/catkin_ws/src/ardrone_autonomy/msg/navdata_pwm.msg;/home/viki/catkin_ws/src/ardrone_autonomy/msg/navdata_raw_measures.msg;/home/viki/catkin_ws/src/ardrone_autonomy/msg/navdata_rc_references.msg;/home/viki/catkin_ws/src/ardrone_autonomy/msg/navdata_references.msg;/home/viki/catkin_ws/src/ardrone_autonomy/msg/navdata_time.msg;/home/viki/catkin_ws/src/ardrone_autonomy/msg/navdata_trackers_send.msg;/home/viki/catkin_ws/src/ardrone_autonomy/msg/navdata_trims.msg;/home/viki/catkin_ws/src/ardrone_autonomy/msg/navdata_video_stream.msg;/home/viki/catkin_ws/src/ardrone_autonomy/msg/navdata_vision_detect.msg;/home/viki/catkin_ws/src/ardrone_autonomy/msg/navdata_vision.msg;/home/viki/catkin_ws/src/ardrone_autonomy/msg/navdata_vision_of.msg;/home/viki/catkin_ws/src/ardrone_autonomy/msg/navdata_vision_perf.msg;/home/viki/catkin_ws/src/ardrone_autonomy/msg/navdata_vision_raw.msg;/home/viki/catkin_ws/src/ardrone_autonomy/msg/navdata_watchdog.msg;/home/viki/catkin_ws/src/ardrone_autonomy/msg/navdata_wifi.msg;/home/viki/catkin_ws/src/ardrone_autonomy/msg/navdata_wind_speed.msg;/home/viki/catkin_ws/src/ardrone_autonomy/msg/navdata_zimmu_3000.msg;/home/viki/catkin_ws/src/ardrone_autonomy/msg/vector21.msg;/home/viki/catkin_ws/src/ardrone_autonomy/msg/vector31.msg"
services_str = "/home/viki/catkin_ws/src/ardrone_autonomy/srv/CamSelect.srv;/home/viki/catkin_ws/src/ardrone_autonomy/srv/FlightAnim.srv;/home/viki/catkin_ws/src/ardrone_autonomy/srv/LedAnim.srv;/home/viki/catkin_ws/src/ardrone_autonomy/srv/RecordEnable.srv"
pkg_name = "ardrone_autonomy"
dependencies_str = "geometry_msgs;std_msgs"
langs = "gencpp;genlisp;genpy"
dep_include_paths_str = "ardrone_autonomy;/home/viki/catkin_ws/src/ardrone_autonomy/msg;geometry_msgs;/opt/ros/indigo/share/geometry_msgs/cmake/../msg;std_msgs;/opt/ros/indigo/share/std_msgs/cmake/../msg"
PYTHON_EXECUTABLE = "/usr/bin/python"
package_has_static_sources = '' == 'TRUE'
genmsg_check_deps_script = "/opt/ros/indigo/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
| [
"hkdavid74@gmail.com"
] | hkdavid74@gmail.com |
24875a336f66ccd4c114ada3a3e42c2d603c2639 | e81d274d6a1bcabbe7771612edd43b42c0d48197 | /数据库/03_Redis/day48(主从服务器)/demo/02_python操作redis/01.py | 9cef9a735360a75455cde6d390c9cebd36992a94 | [
"MIT"
] | permissive | ChWeiking/PythonTutorial | 1259dc04c843382f2323d69f6678b9431d0b56fd | 1aa4b81cf26fba2fa2570dd8e1228fef4fd6ee61 | refs/heads/master | 2020-05-15T00:50:10.583105 | 2016-07-30T16:03:45 | 2016-07-30T16:03:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 917 | py | #推荐网站
#http://python.jobbole.com/87305/
import redis
#获取连接对象 当我们用Redis和StrictRedis创建连接时,其实内部实现并没有主动给我创建一个连接,我们获得的连接是连接池提供的连接,这个连接由连接池管理,所以我们无需关注连接是否需要主动释放关闭的问题。另外连接池有自己的关闭连接的接口,一旦调用该接口,所有连接都将被关闭,连接池的操作不需要程序员管理,系统redis模块自动管理好了。
conn = redis.StrictRedis('127.0.0.1',6379,password=123456)
#如果是多个增删改,使用管道对象,默认先存在管道中,当execute时候,保存到数据库文件中
pip = conn.pipeline()
pip.set('a',1)
pip.set('b',2)
pip.set('c',3)
#提交
pip.execute()
#查询的时候,可以使用pip,也可以使用conn对象
print(conn.get('a'))
print('哦了')
| [
"1025212779@qq.com"
] | 1025212779@qq.com |
2690ccbc7a050b0c554ac9b2591656a86135599d | 17078503a7d75f8ef3875c3df5effc8eaf5b67dd | /TurboCopCLI/turbocop.py | 7e020fea528c1d725f579e42811768824da94cd3 | [] | no_license | dpleshkov/turbocop | 4feff74aaa73e1d358226036127dccf4e83a6dc2 | 773f8ce2048422c3aff6a42e87e5ba61a353a38f | refs/heads/master | 2023-08-30T09:54:41.259976 | 2021-05-14T03:55:43 | 2021-05-14T03:55:43 | 223,249,995 | 0 | 0 | null | 2021-08-12T09:33:14 | 2019-11-21T19:33:45 | JavaScript | UTF-8 | Python | false | false | 1,901 | py | #!/usr/local/bin/python3
from time import sleep
from webbrowser import open as web_open
from termcolor import cprint
from json import loads as json_loads
from urllib.request import urlopen
from sys import exit as sys_exit
cprint("Welcome to TurboCop™.", "red")
cprint("Program Version: Nov 28 2019 dev release", "yellow")
cprint("Would you like to cop by item ID or item name?", "cyan")
search_mode = input("[id, name] > ")
while search_mode not in ("id", "name"):
search_mode = input("Please enter either \"id\" or \"name\" > ")
cprint("How many different items are you buying?", "cyan")
item_id = 1
item_name = "Handbag"
if search_mode == "id":
cprint("Please enter target item ID.", "cyan")
item_id = input("> ")
elif search_mode == "name":
cprint("Please enter exact item name (as seen in mobile_stock.json).", "cyan")
item_name = input("> ")
if search_mode == "id":
while True:
mobile_stock = urlopen("https://www.supremenewyork.com/mobile_stock.json")
stock = mobile_stock.read().decode()
if item_id in stock:
cprint("Found! Opening shop webpage.")
web_open("https://www.supremenewyork.com/shop/%s" % item_id)
sys_exit()
print("Not found, retrying...")
sleep(0.1)
elif search_mode == "name":
while True:
mobile_stock = urlopen("https://www.supremenewyork.com/mobile_stock.json")
stock = mobile_stock.read().decode()
stock_json = json_loads(stock)
products = stock_json["products_and_categories"]
for category in products:
for item in products[category]:
if item["name"] == item_name:
cprint("Found! Opening shop webpage.")
web_open("https://www.supremenewyork.com/shop/%s" % item["id"])
sys_exit()
print("Not found, retrying...")
sleep(0.1)
| [
"dpleshkov.usa@gmail.com"
] | dpleshkov.usa@gmail.com |
b3eb7fb7fa1348c16caf5a59c321cebec111e272 | a2b898d08c228381c03dbeb7ef431cce5a8b5bc4 | /deep-tree/cheat/test_deep_tree.py | d3f0eab4e4bf534ecc00917f99558e5bb5f278f4 | [] | no_license | jtv/eohn-python | e6295b0aebf03c8a07f502da442067de2573d98c | f0b97ec35e983c313a28b528ab79243fe819d634 | refs/heads/master | 2023-08-15T02:40:48.129929 | 2017-11-27T10:54:43 | 2017-11-27T10:54:43 | 111,695,329 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,951 | py | """Tests for deep_tree.
We'll try to test every function in detail.
"""
from deep_tree import (
locate_log,
print_log,
read_log,
)
from given import write_text_file
from helpers import (
make_temp_file,
make_string,
)
import json
from mock import patch
from unittest import TestCase
class TestPrintLog(TestCase):
"""Tests for top-level function."""
def test_end_to_end(self):
# Create a fake log file.
fake_log = make_temp_file(self, 'fake.log')
contents = make_string("Log content", sep=' ')
write_text_file(fake_log, contents)
# Inject a fake config pointing to the fake log file.
fake_config = make_temp_file(self, 'config.json')
config_contents = json.dumps({'log-location': fake_log})
write_text_file(fake_config, config_contents)
# Temporarily patch deep_tree's imported copy of dump_text.
# Replace it with a Mock, a magic test double.
with patch('deep_tree.dump_text') as fake_dump:
print_log()
# A Mock records its calls in call_args_list. It got one.
[call] = fake_dump.call_args_list
self.assertEqual(call.args, (contents,))
class TestLocateLog(TestCase):
"""Tests for locate_log."""
def test_returns_config_file_entry(self):
location = make_string('path')
fake_config = {
'log-location': location,
}
self.assertEqual(locate_log(fake_config), location)
class TestReadLog(TestCase):
"""Tests for read_log."""
def test_returns_log_contents(self):
fake_log = make_temp_file(self, 'fake.log')
contents = make_string("Log content", sep=' ')
write_text_file(fake_log, contents)
self.assertEqual(read_log(fake_log), contents)
def test_raises_IOError_if_file_not_found(self):
self.assertRaises(
IOError,
read_log, make_string('nonexistent'))
| [
"jeroen.vermeulen@eonics.nl"
] | jeroen.vermeulen@eonics.nl |
1a4a84046bb067d8317cba7a3dfb51fef729d588 | abf44e8ac8325e1c95b0d0569baee19b8f725b0a | /1_slide_window/7.py | 79fadc05e2ce3816ac627747f460e59868bd8734 | [] | no_license | terrifyzhao/educative2 | 05994b0e7f4e0c8d4319106eddd48ba1dfe5317d | 00e9d630da117fa9550f2efb2191709734c63c8a | refs/heads/master | 2022-12-24T02:51:18.671842 | 2020-09-24T07:43:08 | 2020-09-24T07:43:08 | 276,569,403 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 588 | py | def length_of_longest_substring(arr, k):
start = 0
max_len = 0
count_1 = 0
for i in range(len(arr)):
num = arr[i]
if num == 1:
count_1 += 1
if i - start + 1 - count_1 > k:
num = arr[start]
if num == 1:
count_1 -= 1
start += 1
max_len = max(i - start + 1, max_len)
return max_len
def main():
print(length_of_longest_substring([0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1], 2))
print(length_of_longest_substring(
[0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1], 3))
main()
| [
"zjiuzhou@gmail.com"
] | zjiuzhou@gmail.com |
70c639168190033b173f7a292cbe30b9792aca6f | 11dcfa814a937cd862acc9bdb68c99c5e1717be7 | /fine_refining.py | e53fc771e7aee96d1870998b9a1d022df6fb9d55 | [
"Apache-2.0"
] | permissive | tanveer-hussain/DeepRes-Video-Summarization | 54ee11adabf437838c1134b26c89a8ca3cf9da9f | 2a7bea0d9c665896f45e408274ed1c40c6da70f3 | refs/heads/master | 2022-09-21T07:39:33.032017 | 2021-02-04T06:52:15 | 2021-02-04T06:52:15 | 222,349,250 | 4 | 4 | null | null | null | null | UTF-8 | Python | false | false | 3,072 | py | # -*- coding: utf-8 -*-
"""
@author: Tanveer
"""
from __future__ import print_function
import numpy as np
import glob
import os
import caffe
import cv2
from scipy.spatial import distance
proto = "Models/deploy.prototxt"
model = "Models/squeezenet_v1.1.caffemodel"
caffe.set_mode_cpu()
net = caffe.Net(proto, model, caffe.TEST)
def extract_frame_features(frame):
resized_image = cv2.resize(frame, (227,227))
transformer = caffe.io.Transformer({'data':net.blobs['data'].data.shape})
transformer.set_transpose('data',(2, 0, 1))
transformer.set_channel_swap('data', (2, 1, 0))
transformer.set_raw_scale('data', 255)
net.blobs['data'].reshape(1, 3, 227, 227)
net.blobs['data'].data[...] = transformer.preprocess('data', resized_image)
net.forward()
features = net.blobs['pool10'].data[0].reshape(1,1000)
single_featurevector = np.array(features)
return single_featurevector
def fine_refine(seq1_features,seq2_features):
mi = distance.euclidean(seq1_features,seq2_features)
return mi
def directory_processing():
frames_counter = -1
seq_features = []
previous_seq_features = np.zeros(5000)
video_number = 'v3'
path_for_video = video_number + "\\SqueezeNet\\Coarse-refine\\*.jpg"
images_path = glob.glob(path_for_video)
images_path = sorted(f for f in images_path)
images_names = []
#images_path.sort(key=lambda f: int(filter(str.isdigit, f)))
for single_image in images_path:
frames_counter = frames_counter + 1
#image = cv2.imread(single_image)
tinu = os.path.basename(single_image)
tinu = os.path.splitext(tinu)[0]
images_names.append(tinu)
images_names = np.array(images_names,dtype=int)
images_names = np.sort(images_names,axis=0)
length = images_names.shape
length = length[0]
for index in range(length):
image_name = images_names[index]
full_name = video_number + "\\SqueezeNet\\Coarse-refine\\" + str(image_name) + '.jpg'
image = cv2.imread(full_name)
print ('Processing:', full_name)
single_featurevector = extract_frame_features(image)
seq_features.append(single_featurevector)
if index%5 == 4:
temp = np.asarray(seq_features)
temp = temp.reshape(5000)
features_distance = fine_refine(previous_seq_features,temp)
previous_seq_features = temp
seq_features = []
print ('Distance ************************ = ', features_distance)
if features_distance >= 7000: #7000 for squeezenet, 40000 for mobilenet, #49 for googlenet, #30 for alexnet
name = video_number + '\\SqueezeNet\\Candidate-keyframes\\'+str(index)+'.jpg'
cv2.imwrite(name,image)
print ('Frame written ', name, ',distance = ',features_distance)
#print ('m i = ' , features_distance)
cv2.imshow('F',image)
cv2.waitKey(1)
directory_processing()
| [
"40714349+tanveer-hussain@users.noreply.github.com"
] | 40714349+tanveer-hussain@users.noreply.github.com |
ef5ee3d5be4ed2ccc483f52d41f60c57932a9c32 | ea62b28a735066cd48b4c0b04eb4b44ea9efc8c0 | /logicaloperators.py | a8f9add8fb3c02c3cce7063f07df80b39bd71b5d | [] | no_license | dsv48/python | 7598f2de64e6fd899e97c6c7d9adc272afbbfddd | d2b99ef9d94991c1b090c36abee7719d47f70a72 | refs/heads/master | 2020-11-25T12:55:41.572337 | 2019-12-23T18:42:44 | 2019-12-23T18:42:44 | 228,662,799 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 77 | py | a = True
b = False
print(a and b)
print(a or b)
print(not a)
print(not b) | [
"dev1@gmail.com"
] | dev1@gmail.com |
711b1bad3bc88ffe27fe6f94932ad3d106dabdd2 | e0ff2350d7f73143bbada6be525615a9c573a06f | /myclang/enumerations.py | c69e010489de4be3c2d8bd467b585e1477f9ba6a | [
"Apache-2.0"
] | permissive | FindDefinition/myclang | 19aeac05a58c269903e5aeda6c61689d5500def2 | 3fa641f3b4fa87e0d3b1d840288e61442293036a | refs/heads/master | 2023-04-09T19:26:03.407365 | 2021-04-20T14:02:08 | 2021-04-20T14:02:08 | 309,330,283 | 0 | 0 | Apache-2.0 | 2020-12-19T10:19:51 | 2020-11-02T10:07:21 | C++ | UTF-8 | Python | false | false | 1,102 | py | #===- enumerations.py - Python Enumerations ------------------*- python -*--===#
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===------------------------------------------------------------------------===#
"""
Clang Enumerations
==================
This module provides static definitions of enumerations that exist in libclang.
Enumerations are typically defined as a list of tuples. The exported values are
typically munged into other types or classes at module load time.
All enumerations are centrally defined in this file so they are all grouped
together and easier to audit. And, maybe even one day this file will be
automatically generated by scanning the libclang headers!
"""
# Maps to CXTokenKind. Note that libclang maintains a separate set of token
# enumerations from the C++ API.
TokenKinds = [
('PUNCTUATION', 0),
('KEYWORD', 1),
('IDENTIFIER', 2),
('LITERAL', 3),
('COMMENT', 4),
]
__all__ = ['TokenKinds']
| [
"yanyan.sub@outlook.com"
] | yanyan.sub@outlook.com |
f860c44d119e4913458cb56676421287f0a10d13 | d543fc6acfc0e17c91312ce5a5e7d72a586e472b | /v3/utilityV3/utils.py | 5b4920727accf49bf74183a016bd3b13cd6c228f | [
"MIT"
] | permissive | workofart/ml-trading-playground | 7b6763a46905e5efffd0b8d88d182d56d79cea04 | 1c619a1917574220798a749fbad2ba1782207362 | refs/heads/master | 2021-12-15T03:26:57.466282 | 2021-12-06T01:47:49 | 2021-12-06T01:47:49 | 169,917,494 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,854 | py | import numpy as np
import matplotlib.pyplot as plt
import os
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import normalize as norm2
def read_data(filename, ticker):
data = pd.read_csv(os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', 'data', filename)))
if ticker:
data = data[data['ticker'] == ticker]
data['timestamp'] = pd.to_datetime(data.timestamp)
data = data[['high', 'low', 'price', 'volume', 'timestamp']].sort_values(by='timestamp')
data = data.set_index('timestamp')
return data
def normalize(data):
"""
axis = 1, along each column, mean of all rows
axis = 0, along each row, mean of all cols
"""
return (data - np.mean(data, axis=0, keepdims=True)) / np.sqrt(np.var(data, axis=0, dtype=np.float64, keepdims=True))
def generate_datasets(data):
# price, high, low, volume at time N are the x-vars
# price at time N + 1 is the y-var
X = data[['price', 'high', 'low', 'volume']][0: -1]
Y = data[['price']][1:]
X = (X.values)
Y = (Y.values)
assert (X.shape[0] == Y.shape[0]) # number of samples match
assert (X.shape[1] == 4)
assert (Y.shape[1] == 1)
X = normalize(X)
Y = normalize(Y)
# X = norm2(X, axis=0) # Currently disabled
# Y = norm2(Y, axis=0) # # Currently disabled
X_train, X_test, Y_train, Y_test = train_test_split(
X, Y, test_size=0.33, random_state=42)
# Due to the differences between Keras and the hand-coded forward/backward prop implementations
# the orientation of the data is different. shape = (row, col), where row = # samples, col = # features
# Therefore, transposition is not necessary
# X_train = X_train.T
# X_test = X_test.T
# Y_train = Y_train.T
# Y_test = Y_test.T
return X_train, X_test, Y_train, Y_test
def evaluate_result(pred, x, y, mode):
plt.plot(np.squeeze(pred)[0:100], marker=None,
color='red', markersize=1, linewidth=1)
plt.plot(np.squeeze(y)[0:100], marker=None,
color='blue', markersize=1, linewidth=1)
plt.ylabel('normalized price')
plt.xlabel('time step')
plt.title(mode + " Predicted Prices")
plt.legend(['predict', 'true'], loc='upper left')
plt.show()
def plot_trades(prices, actions):
plt.plot(prices)
for i, action in enumerate(actions):
if action == 0:
plt.annotate('x', xy=(i, prices[i]), color='green')
elif action == 1:
plt.annotate('x', xy=(i, prices[i]), color='red')
# elif action == 2:
# plt.annotate('x', xy=(i, np.mean(prices)), color='yellow')
plt.ylabel('Prices')
plt.xlabel('Timesteps')
plt.show()
def plot_reward(rewards):
plt.plot(rewards)
plt.ylabel('Avg Reward')
plt.xlabel('Timesteps')
plt.show() | [
"hanxiangp@gmail.com"
] | hanxiangp@gmail.com |
957032a1a9290967c599011dccdf78a23bb502f8 | c45dde290697c10a652e2a5c47d29b59956c29ed | /YALES2/PostProc.py | 5a2ba9ae30605b3fb649f2dae006a42394edb856 | [] | no_license | gmclove/pymoo-OpenFOAM-YALES2 | c73092f12d481c4b8093edbcb99021bcd32153d1 | e75abaf8347da29db23574444f4fbee46ae84ce1 | refs/heads/main | 2023-02-13T10:49:13.871800 | 2021-01-08T20:37:27 | 2021-01-08T20:37:27 | 328,003,083 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,162 | py | import os
import subprocess
import numpy as np
class PostProc:
def __init__(self, x, gen):
self.x = np.array([[0, 1]]) #[[0, 1]] # x
# create array for objectives
self.obj = []
dataFile = 'ics_temporals.txt'
# Extract parameters for each individual
for ind in range(len(self.x)):
para = self.x[ind, :] #x[ind, :]
omega = para[0]
freq = para[1]
print('Individual: '+str(ind))
print('Parameters: '+str(para))
print('Omega: '+str(omega))
print('Freq: '+str(freq))
####### Extract data from case file ########
data = np.genfromtxt('ics_temporals.txt', skip_header=1)
# collect data after 8 seconds
noffset = 8 * data.shape[0] // 10
# extract P_OVER_RHO_INTGRL_(1) and TAU_INTGRL_(1)
p_over_rho_intgrl_1 = data[noffset:, 4]
tau_intgrl_1 = data[noffset:, 6]
######## Compute Lift and Drag ##########
drag = np.mean(p_over_rho_intgrl_1 - tau_intgrl_1)
# lift = np.mean()
obj_i = [drag] #,lift]
self.obj.append(obj_i)
localpath = os.getcwd()
a_Omega = np.array([50., 250., 500.] )
a_freq = np.array([5., 10., 20.])
drag = np.zeros((len(a_Omega),len(a_freq)))
iOmega = 0
for Omega in a_Omega:
ifreq = 0
for freq in a_freq:
try:
os.mkdir("dump")
except OSError:
print("dump already exists")
modif_in(Omega, freq)
sim = subprocess.run(["sh", "sim.sh"])
data = np.genfromtxt('dump/ics_temporals.txt', skip_header = 1)
noffset = 8*data.shape[0]//10
plt.plot(data[noffset:,3], data[noffset:,4] - data[noffset:,6],'-',label=r"$\Omega = %.1f, f= %.1f$"%(Omega,freq))
print("Omega = %.1f, f= %.1f, Drag= %.4e"%(Omega,freq,np.mean(data[noffset:,4] - data[noffset:,6])))
os.replace("dump","run_O"+ "{0:.1f}".format(Omega) + "_f" +"{0:.1f}".format(freq))
drag[iOmega,ifreq] = np.mean(data[noffset:,4] - data[noffset:,6])
ifreq += 1
iOmega +=1
plt.legend()
plt.show()
| [
"noreply@github.com"
] | noreply@github.com |
e64dd0cbc65a12a5db722c9d9271767c3209999a | 6db9df0fc8629ca61601d8b556d9389feaac657c | /sources/ch06/heritage/rectangle.py | d4b6e8ef06c815d07c00312c51ee91516639bf15 | [] | no_license | mba-tradelab/programmation_python_mathematiques | 2297d618957cb22caaad4ab9f55640e12316eceb | 8b79dffbe0a01ff67aed5d337a9d975f5e289866 | refs/heads/main | 2023-01-12T01:55:48.693084 | 2020-11-06T19:08:43 | 2020-11-06T19:08:43 | 248,700,679 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 948 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
########################################################################
# (C) Alexandre Casamayou-Boucau, Pascal Chauvin, Guillaume Connan #
# #
# Complément de l'ouvrage : #
# Programmation en Python pour les mathématiques #
# Editeur : Dunod - Collection : Sciences Sup #
# ISBN-13: 978-2100738311 - Licence : GPLv2 #
########################################################################
from quadrilatere import *
class rectangle(quadrilatere):
"""rectangle"""
def __init__(self, a =0, b =0):
quadrilatere.__init__(self, a, b, a, b)
self.type = "rectangle"
# def perimetre(self):
# return (self.a + self.b)*2
if __name__ == "__main__":
r = rectangle(4, 3)
print(r)
print(r.perimetre())
| [
"matthieu.brito.antunes@gmail.com"
] | matthieu.brito.antunes@gmail.com |
81f5316150af9c908dd4b3ef8628cca2b90db2b0 | 8fc7635b84b42e61b7efb9eaf7215394b5b5790a | /aliennor-backend copy/aliennorDjangoBackend/aliennorDjangoBackend/wsgi.py | 021b6e96cb9641200f626f50804bb038f497e40a | [] | no_license | phamcong/aliennor-platform | f1e8470aab7ed634859e071f6028931f576ddf3e | e1d71532426ac9414d2158d50ee34c32257618f0 | refs/heads/master | 2021-05-14T17:08:08.629564 | 2018-02-17T23:35:07 | 2018-02-17T23:35:07 | 116,038,495 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 419 | py | """
WSGI config for aliennorDjangoBackend project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "aliennorDjangoBackend.settings")
application = get_wsgi_application()
| [
"ccuong.ph@gmail.com"
] | ccuong.ph@gmail.com |
38351f81842a4db9105255ea57908f440eb430a9 | 99063cd01e8c148cf62556c54e2c5c85767263bf | /license/migrations/0006_auto_20170620_2306.py | 825d85a5e4c4f3381f03ddc16f27fc10da5f1e20 | [] | no_license | Fynjiby/ZSP | 316f37e47d2ac97d395486d21f6ec9768df46431 | 0bfe3542f78cfdb658d323d81a2288e1241e43e0 | refs/heads/master | 2021-05-02T08:36:18.699291 | 2018-02-08T19:49:22 | 2018-02-08T19:49:22 | 120,805,767 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,875 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-20 20:06
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('license', '0005_auto_20170619_2214'),
]
operations = [
migrations.CreateModel(
name='groupLicenseImg',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50)),
('license', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='license.license')),
],
options={
'verbose_name': '\u0413\u0440\u0443\u043f\u043f\u0430 \u043a\u0430\u0440\u0442\u0438\u043d\u043e\u043a \u043b\u0438\u0446\u0435\u043d\u0437\u0438\u0438',
'verbose_name_plural': '\u0413\u0440\u0443\u043f\u043f\u044b \u043a\u0430\u0440\u0442\u0438\u043d\u043e\u043a \u043b\u0438\u0446\u0435\u043d\u0437\u0438\u0439',
},
),
migrations.AlterModelOptions(
name='licenseimg',
options={'verbose_name': '\u041a\u0430\u0440\u0442\u0438\u043d\u043a\u0430 \u043b\u0438\u0446\u0435\u043d\u0437\u0438\u0438', 'verbose_name_plural': '\u041a\u0430\u0440\u0442\u0438\u043d\u043a\u0438 \u043b\u0438\u0446\u0435\u043d\u0437\u0438\u0439'},
),
migrations.RemoveField(
model_name='licenseimg',
name='license',
),
migrations.AddField(
model_name='licenseimg',
name='group',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='license.groupLicenseImg'),
preserve_default=False,
),
]
| [
"noreply@github.com"
] | noreply@github.com |
e4677ab1274dc97869b710e764fc2d18a1774ce5 | 0df5c7a5385098861b6b1a3486c9ca8a11bd8c23 | /maps/generate_maps.py | c7f787ec462c00b13f8068baea70b3faa89377f6 | [] | no_license | MarkDabe/CS440_1 | 69a79c50f84013c88ce3bc2d8164e410e431eec0 | a5527df215e61de95cee1cc71aa69528fb5d26de | refs/heads/master | 2020-04-24T18:00:14.712342 | 2019-02-26T04:22:34 | 2019-02-26T04:22:34 | 172,166,308 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 708 | py | from random import random
def build_grid():
for counter in range(1, 51):
grid = []
for row in range(101):
grid.append([])
for col in range(101):
grid[row].append(0)
gridLength = len(grid)
for i in range(gridLength):
for j in range(gridLength):
if random() < 0.70:
continue
else:
grid[i][j] = 1
grid[0][0] = 2
grid[gridLength - 1][gridLength - 1] = -1
filename = "map_{}.txt".format(counter)
f = open(filename, "w")
f.write("{}".format(grid))
f.close()
if __name__ == "__main__":
build_grid() | [
"markhabashy91@gmail.com"
] | markhabashy91@gmail.com |
c5b1ad656441fd3672b9400a47c2232d2e21ad2a | 17a0ee47a7488b5bf1964118bc5bf3a78145dcb3 | /plusOneRecursive.py | 50051aa7f28351a9576875c7003aa29e4f3209e9 | [] | no_license | kapil87/LeetCode | fec40c0b461495e3f8a8ea800ee0dc0c1e9e4e0f | 1412ea8147fbda3bf60c7fa342dc4843c6a865d0 | refs/heads/master | 2021-07-19T21:07:08.506702 | 2020-06-28T02:12:13 | 2020-06-28T02:12:13 | 63,844,018 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 880 | py | class Solution(object):
def plusOne(self, digits):
"""
:type digits: List[int]
:rtype: List[int]
"""
def addOne(digits, index):
#print 'Returning with index: %s, digits: %s'% (index, digits)
if index == 0 and digits[index] < 9:
digits[index] += 1
return digits
elif index == 0 and digits[index] == 9:
digits[index] = 0
#temp = [0]*(len(digits)+1)
#temp[0] = 1
digits.insert(0,1)
#return temp
return digits
elif digits[index] == 9:
digits[index] = 0
return addOne(digits, index-1)
else:
digits[index] +=1
return digits
return addOne(digits, len(digits)-1)
| [
"noreply@github.com"
] | noreply@github.com |
0a4d4d56b30b5adc4cd0dd2722fb1366ef8b83f6 | 3f5b816d48fdb1e6580e457c444c84f753599d32 | /src/Ruuvi_GW/ruuvi_status.py | eb9fb69aab6572c04edae08cc8eea3387bba8a2b | [] | no_license | theBASTI0N/ruuvi.gateway.micropython | 1f3e8be7a0825a3c6c63a4897b71495453e297d7 | 9637eb271696234885c1bbb4487aef0ae95f875e | refs/heads/ruuvi_gw | 2022-11-29T21:48:04.437305 | 2021-02-04T11:36:07 | 2021-02-04T11:36:07 | 250,762,893 | 3 | 1 | null | 2022-11-10T14:22:54 | 2020-03-28T10:08:11 | Python | UTF-8 | Python | false | false | 983 | py | from micropython import const
RE_SUCCESS = const(0)
RE_ERROR_DATA_SIZE = const(1 << 3) #//!< Data size too large/small.
RE_ERROR_INVALID_PARAM = const(1 << 4) #//!< Invalid Parameter.
RE_ERROR_NULL = const(1 << 11) #//!< Null Pointer.
RE_ERROR_ENCODING = const(1 << 12) #//!< Data encoding failed.
RE_ERROR_DECODING = const(1 << 13) #//!< Data decoding failed.
RE_ERROR_DECODING_LEN = const(1 << 14) #//!< Data decoding len failed.
RE_ERROR_DECODING_DELIMITER = const(1 << 15) #//!< Data decoding delimiter failed.
RE_ERROR_DECODING_STX = const(1 << 16) #//!< Data decoding stx failed.
RE_ERROR_DECODING_ETX = const(1 << 17) #//!< Data decoding etx failed.
RE_ERROR_DECODING_CRC = const(1 << 18) #//!< Data decoding crc failed.
RE_ERROR_DECODING_CMD = const(1 << 19) #//!< Data decoding cmd failed.
RE_ERROR_NOT_IMPLEMENTED = const(1 << 24) #//!< Not implemented yet. | [
"thebasti0ncode@gmail.com"
] | thebasti0ncode@gmail.com |
48491a01b1fd6bc95774d3c6ee18ff3a061288f6 | 0a1e0da3020f1d0f90490424c58c410ffeef7b5d | /node_modules/fsevents/build/config.gypi | 49cc3561e588159166076b260c401697588b7429 | [
"MIT"
] | permissive | lkaudasch/lkaudasch.github.io | d56946ee21477dd0c3c6cb98977e5e6b226cfb7e | 468ed3a4a1d873e2aceef13a9c6de8dfa65859e7 | refs/heads/master | 2021-01-09T15:49:11.559119 | 2020-03-01T15:12:34 | 2020-03-01T15:12:34 | 242,361,939 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,471 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"coverage": "false",
"debug_devtools": "node",
"debug_http2": "false",
"debug_nghttp2": "false",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_data_file": "icudt60l.dat",
"icu_data_in": "../../deps/icu-small/source/data/in/icudt60l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "deps/icu-small",
"icu_small": "true",
"icu_ver_major": "60",
"llvm_version": 0,
"node_byteorder": "little",
"node_enable_d8": "false",
"node_enable_v8_vtunejit": "false",
"node_install_npm": "true",
"node_module_version": 57,
"node_no_browser_globals": "false",
"node_prefix": "/",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_nghttp2": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_target_type": "executable",
"node_use_bundled_v8": "true",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_lttng": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"node_use_v8_platform": "true",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_no_asm": 0,
"shlib_suffix": "57.dylib",
"target_arch": "x64",
"uv_parent_path": "/deps/uv/",
"uv_use_dtrace": "true",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_use_snapshot": "true",
"want_separate_host_toolset": 0,
"xcode_version": "7.0",
"nodedir": "/Users/z002nh0/.node-gyp/8.15.0",
"standalone_static_library": 1,
"dry_run": "",
"legacy_bundling": "",
"save_dev": "",
"browser": "",
"commit_hooks": "true",
"only": "",
"viewer": "man",
"also": "",
"rollback": "true",
"sign_git_commit": "",
"audit": "true",
"usage": "",
"globalignorefile": "/Users/z002nh0/.nvm/versions/node/v8.15.0/etc/npmignore",
"init_author_url": "",
"maxsockets": "50",
"shell": "/bin/zsh",
"metrics_registry": "https://registry.npmjs.org/",
"parseable": "",
"shrinkwrap": "true",
"init_license": "ISC",
"timing": "",
"if_present": "",
"cache_max": "Infinity",
"init_author_email": "",
"sign_git_tag": "",
"cert": "",
"git_tag_version": "true",
"local_address": "",
"long": "",
"preid": "",
"fetch_retries": "2",
"noproxy": "",
"registry": "https://registry.npmjs.org/",
"key": "",
"message": "%s",
"versions": "",
"globalconfig": "/Users/z002nh0/.nvm/versions/node/v8.15.0/etc/npmrc",
"always_auth": "",
"logs_max": "10",
"prefer_online": "",
"cache_lock_retries": "10",
"global_style": "",
"update_notifier": "true",
"audit_level": "low",
"heading": "npm",
"fetch_retry_mintimeout": "10000",
"offline": "",
"read_only": "",
"searchlimit": "20",
"access": "",
"json": "",
"allow_same_version": "",
"description": "true",
"engine_strict": "",
"https_proxy": "",
"init_module": "/Users/z002nh0/.npm-init.js",
"userconfig": "/Users/z002nh0/.npmrc",
"cidr": "",
"node_version": "8.15.0",
"user": "824244172",
"auth_type": "legacy",
"editor": "vi",
"ignore_prepublish": "",
"save": "true",
"script_shell": "",
"tag": "latest",
"global": "",
"progress": "true",
"ham_it_up": "",
"optional": "true",
"searchstaleness": "900",
"bin_links": "true",
"force": "",
"save_prod": "",
"searchopts": "",
"depth": "Infinity",
"node_gyp": "/Users/z002nh0/.nvm/versions/node/v8.15.0/lib/node_modules/npm/node_modules/node-gyp/bin/node-gyp.js",
"rebuild_bundle": "true",
"sso_poll_frequency": "500",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"ca": "",
"save_prefix": "^",
"scripts_prepend_node_path": "warn-only",
"sso_type": "oauth",
"strict_ssl": "true",
"tag_version_prefix": "v",
"dev": "",
"fetch_retry_factor": "10",
"group": "1244359605",
"save_exact": "",
"cache_lock_stale": "60000",
"prefer_offline": "",
"version": "",
"cache_min": "10",
"otp": "",
"cache": "/Users/z002nh0/.npm",
"searchexclude": "",
"color": "true",
"package_lock": "true",
"package_lock_only": "",
"save_optional": "",
"user_agent": "npm/6.4.1 node/v8.15.0 darwin x64",
"ignore_scripts": "",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"send_metrics": "",
"init_version": "1.0.0",
"node_options": "",
"umask": "0022",
"scope": "",
"git": "git",
"init_author_name": "",
"onload_script": "",
"tmp": "/var/folders/p4/hggl4fdn5bxf_tm46hk38p9hrk1xyc/T",
"unsafe_perm": "true",
"link": "",
"prefix": "/Users/z002nh0/.nvm/versions/node/v8.15.0"
}
}
| [
"z002nh0@38f9d39f732a.target.com"
] | z002nh0@38f9d39f732a.target.com |
40f118a930e06e6edf455277d99dddcc1d85aa9a | 2e6c95871bd255873fb563347c0f070e6fcdde74 | /ngram_2_model_pca.py | 6b077e23c628515f969ffa99bba1c5e5f09cec87 | [] | no_license | MSBradshaw/BioHackathon2020 | 3203c5232bebd70d2c2a88b7f49063a09da023c4 | 31826b698a408541200b6f75bfe9c03217bf2d1a | refs/heads/master | 2022-08-05T11:57:32.221444 | 2020-05-29T17:30:29 | 2020-05-29T17:30:29 | 258,961,184 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,036 | py | import re
import pandas as pd
from bs4 import BeautifulSoup
import datetime
import time
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
from sklearn import datasets
from sklearn import svm
import pickle
import os
def date_to_unix_time(date):
if date is None or date == '':
return None
dt = datetime.datetime.strptime(date, '%B %d, %Y')
return int(time.mktime(dt.timetuple()))
def get_n_grams(_text, _n, _gram_dict={}):
# if a special character is being used as punctuation (not in a name) add a space
_text = re.sub('(: )', ' \\g<1>', _text)
_text = re.sub('(- )', ' \\g<1>', _text)
_text = re.sub('(, )', ' \\g<1>', _text)
_text = re.sub('(\\. )', ' \\g<1>', _text)
_text = re.sub('(- )', ' \\g<1>', _text)
_text = re.sub('(\\? )', ' \\g<1>', _text)
_text = re.sub('(; )', ' \\g<1>', _text)
_text = re.sub('(! )', ' \\g<1>', _text)
# remove paranthesis arounda single word
_text = re.sub(' \\(([^ ])\\) ', ' \\g<1> ', _text)
# remove leading and trailing parenthesis
_text = re.sub(' \\(', ' ', _text)
_text = re.sub('\\) ', ' ', _text)
_text_list = _text.split(' ')
# create the n-grams
_done = False
# gram_dict = {}
for _i in range(len(_text_list)):
_gram = ''
_skip = False
for _j in range(_n):
if _i + _j >= len(_text_list):
_done = True
break
# check if the current item is punctuation, if so skip this gram
if _text_list[_i + _j] in ['.', ',', '?', ';', '!', ':', '-']:
_skip = True
break
_gram += _text_list[_i + _j] + ' '
if not _done and not _skip:
# remove trailing space
_gram = _gram[:-1]
# if gram has already been made
if _gram in _gram_dict:
# increment count
_gram_dict[_gram] += 1
else:
# else create new entry
_gram_dict[_gram] = 1
_gram_df = pd.DataFrame({'gram': list(_gram_dict.keys()), 'count': list(_gram_dict.values())})
return _gram_df, _gram_dict
def get_df_of_n_grams(_texts, _n):
_dic = {}
_final_df = None
for _ab in _texts:
_final_df, _dic = get_n_grams(BeautifulSoup(_ab).get_text(), _n, _dic)
_grams = list(set(_final_df['gram']))
_article_n_grams = {_x: [] for _x in _grams}
for _ab in _texts:
_final_df, _dic = get_n_grams(BeautifulSoup(_ab).get_text(), _n,{})
for _key in _grams:
if _key in _dic:
_article_n_grams[_key].append(_dic[_key])
else:
_article_n_grams[_key].append(0)
fake_df_n_grams = pd.DataFrame(_article_n_grams)
return fake_df_n_grams
train = pd.read_csv('train.csv')
pickle_cache = 'grams_2_df.pickle'
if os.path.exists(pickle_cache):
grams_2 = pickle.load(open(pickle_cache,'rb'))
else:
grams_2 = get_df_of_n_grams(list(train['abstract']),2)
pickle.dump(grams_2,open(pickle_cache,'wb'),protocol=4)
X = grams_2.to_numpy()
y = train['type'].to_numpy()
pca2 = PCA(n_components=10)
pca2.fit(grams_2.to_numpy().transpose())
# pca = pickle.load(open('real_fake_pca.pickle','rb'))
clf = svm.SVC(kernel='linear', C=1)
scores = cross_val_score(clf,pca2.components_.transpose(), y, cv=5)
#
# with open('svm-cross-val-pca.txt','w') as outfile:
# outfile.write(str(scores))
X_train, X_test, y_train, y_test = train_test_split(pca2.components_.transpose(), y, test_size=0.33, random_state=42)
clf.fit(X_train, y_train)
with open('svm-results-pca.txt','w') as outfile:
outfile.write('Cross Val scores: ' + str(scores) + '\n')
outfile.write('SVM SCore: ' + str(clf.score(X_test,y_test)) + '\n')
preds = clf.predict(X_test)
outfile.write('Predictions: ')
for p in preds:
outfile.write(',' + str(p))
| [
"michaelscottbradshaw@gmail.com"
] | michaelscottbradshaw@gmail.com |
e4f52ac50c48d5174a4789f40b2b50eeba3338a3 | 79aec114df38f94377b8516630a3222ebd17e838 | /chapter_8/try_it_yourself/message_8_1.py | 87a566235520da9b005117e400ffa4ae0a2841b5 | [] | no_license | colinjpaul/PythonCrash | 748472bccb9efa854dd5f24c3bf3648aba3e5ee7 | e001491002b861c97a188fc4088f5afa035caea5 | refs/heads/master | 2022-06-13T00:59:37.765139 | 2022-05-05T21:30:36 | 2022-05-05T21:30:36 | 115,006,817 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 92 | py | def display_message(lesson):
print(lesson)
display_message("im learning ML and python") | [
"colin.paul@dell.com"
] | colin.paul@dell.com |
d9690f64f2d87fc9c59628ba17f91eaea2b6a063 | 0dda2261ec49e062e00be848caff3cb566b9fe8f | /sentiment_analysis/simon.py | a0ecb039d1a8e81974ea7012e48bbd8c0d7c3198 | [] | no_license | pswedrak/Embedding-Knowledge-Graphs | 462362982cd0ff823d92746cfe088a5d0e794f9d | cf4b90c4e13561a1492d66a64478b11d5870acda | refs/heads/master | 2023-05-11T10:47:00.075388 | 2021-05-30T17:03:02 | 2021-05-30T17:03:02 | 320,930,030 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,129 | py | import numpy as np
from sematch.semantic.similarity import WordNetSimilarity
from nltk.corpus import sentiwordnet as swn
import seaborn as sns
import matplotlib.pyplot as plt
from tensorflow.python.keras.utils.np_utils import to_categorical
from common.helpers import store_vectors, store_vector
from semantic_similarity.graph_creator import build_graph
from text_processing.yelp_utils import load_vectors
def build_simon_model(reviews, positive_lexicon_words, negative_lexicon_words, neutral_lexicon_words, pos_polarity,
neg_polarity, neu_polarity,
train_filename, test_filename, train_size, dissim=True, verbose=True, three_classes=False):
train_results = []
test_results = []
i = 0
for review in reviews:
result = list(
compute_simon_vector(review, positive_lexicon_words, negative_lexicon_words, neutral_lexicon_words,
pos_polarity,
neg_polarity, neu_polarity, dissim, False, False, three_classes=three_classes))
if i < train_size:
store_vector(train_filename, result)
else:
store_vector(test_filename, result)
if verbose:
print("Simon vector has been computed: " + str(i+1) + "/" + str(len(reviews)))
i += 1
# store_vectors(train_filename, train_results)
# store_vectors(test_filename, test_results)
def compute_simon_vector(review, positive_lexicon_words, negative_lexicon_words, neutral_lexicon_words, pos_polarity,
neg_polarity, neu_polarity, dissim=True, include_polarity=False, show=False, three_classes=False):
wns = WordNetSimilarity()
input_tokens = review.text
similarity_matrix = np.zeros(
(len(input_tokens), len(positive_lexicon_words) + len(negative_lexicon_words) + len(neutral_lexicon_words)))
# input_tokens_in_wordnet = []
# for input_token in input_tokens:
# if len(wn.synsets(input_token, pos='n')) > 0:
# input_tokens_in_wordnet.append(input_token)
# input_tokens = input_tokens_in_wordnet
#
# similarity_matrix = np.zeros((len(input_tokens), len(lexicon_words)))
# print(wns.word_similarity('film', 'great', 'wpath'))
#
for i in range(len(input_tokens)):
for j in range(len(positive_lexicon_words)):
similarity_matrix[i, j] = wns.word_similarity(input_tokens[i], positive_lexicon_words[j][0], 'wpath')
if dissim:
similarity_matrix[i, j] += (compute_dissim_coef(input_tokens[i], positive_lexicon_words[j][0]) / 10)
if three_classes:
for j in range(len(neutral_lexicon_words)):
similarity_matrix[i, len(positive_lexicon_words) + j] = wns.word_similarity(input_tokens[i],
neutral_lexicon_words[j][0],
'wpath')
if dissim:
similarity_matrix[i, len(positive_lexicon_words) + j] += \
(compute_dissim_coef(input_tokens[i], neutral_lexicon_words[j][0]) / 10)
for j in range(len(negative_lexicon_words)):
similarity_matrix[i, len(negative_lexicon_words) + len(positive_lexicon_words) + j] = wns.word_similarity(
input_tokens[i],
negative_lexicon_words[j][0],
'wpath')
if dissim:
similarity_matrix[i, len(negative_lexicon_words) + len(positive_lexicon_words) + j] += \
(compute_dissim_coef(input_tokens[i], negative_lexicon_words[j][0]) / 10)
print(str(i) + "/" + str(len(input_tokens)))
if show & three_classes:
sns.heatmap(similarity_matrix,
xticklabels=positive_lexicon_words + neutral_lexicon_words + negative_lexicon_words,
yticklabels=input_tokens)
plt.show()
elif show & (not three_classes):
sns.heatmap(similarity_matrix,
xticklabels=positive_lexicon_words + negative_lexicon_words,
yticklabels=input_tokens)
plt.show()
if include_polarity:
if three_classes:
return np.multiply(np.max(similarity_matrix, axis=0), np.concatenate(pos_polarity, neg_polarity, neu_polarity))
else:
return np.multiply(np.max(similarity_matrix, axis=0), np.concatenate(pos_polarity, neg_polarity))
else:
return np.max(similarity_matrix, axis=0)
def extract_lexicon_words(corpus, size, pos_file_name, neg_file_name, neu_file_name):
positive_synsets = {}
negative_synsets = {}
pos_words = []
neg_words = []
all_neu_words = []
neu_words = []
pos_polarity = []
neg_polarity = []
neu_polarity = []
for token in corpus:
synsets = list(swn.senti_synsets(token))
if len(synsets) > 0:
synsets.sort(key=lambda x: x.pos_score(), reverse=True)
positive_synsets[synsets[0].synset.name()] = synsets[0].pos_score()
synsets.sort(key=lambda x: x.neg_score(), reverse=True)
negative_synsets[synsets[0].synset.name()] = synsets[0].neg_score()
for synset in synsets:
if (synset.pos_score() == 0) & (synset.neg_score() == 0):
all_neu_words.append(synset.synset.name())
positive_synsets_keys = sorted(positive_synsets, key=positive_synsets.get, reverse=True)
with open(pos_file_name, "w") as pos_file:
for i in range(size // 3):
pos_file.write(positive_synsets_keys[i].split(".")[0] + " " + str(positive_synsets[positive_synsets_keys[i]]))
pos_file.write('\n')
pos_words.append(positive_synsets_keys[i].split(".")[0])
pos_polarity.append(positive_synsets[positive_synsets_keys[i]])
negative_synsets_keys = sorted(negative_synsets, key=negative_synsets.get, reverse=True)
with open(neg_file_name, "w") as neg_file:
for i in range(size // 3):
neg_file.write(negative_synsets_keys[i].split(".")[0] + " " + str(negative_synsets[negative_synsets_keys[i]]))
neg_file.write('\n')
neg_words.append(negative_synsets_keys[i].split(".")[0])
neg_polarity.append(negative_synsets[negative_synsets_keys[i]])
with open(neu_file_name, "w") as neu_file:
i = 0
j = 0
while i < (size // 3):
name = all_neu_words[j].split(".")[0]
if name not in neu_words:
neu_file.write(name + " " + "1.0")
neu_file.write('\n')
neu_words.append(name)
neu_polarity.append(1.0)
i += 1
j += 1
return pos_words, neg_words, neu_words, pos_polarity, neg_polarity, neu_polarity
def compute_simon_vectors(reviews, pos_words, neg_words, size, three_classes=False):
embeddings = []
for review in reviews:
simon_vector = compute_simon_vector(review.text, pos_words, neg_words, size, three_classes=three_classes)
embeddings.append(simon_vector.tolist())
return embeddings
def prepare_dataset_simon(train_reviews, test_reviews, train_model, test_model, three_classes=False):
x_train = []
x_test = []
y_train = []
y_test = []
simon_vectors = load_vectors(train_model)
i = 0
for review in train_reviews:
if review.stars <= 2:
x_train.append(simon_vectors[i])
y_train.append(0)
elif review.stars >= 4:
x_train.append(simon_vectors[i])
y_train.append(1)
elif three_classes & (review.stars == 3):
x_train.append(simon_vectors[i])
y_train.append(2)
i += 1
simon_vectors = load_vectors(test_model)
i = 0
for review in test_reviews:
if review.stars <= 2:
x_test.append(simon_vectors[i])
y_test.append(0)
elif review.stars >= 4:
x_test.append(simon_vectors[i])
y_test.append(1)
elif three_classes & (review.stars == 3):
x_test.append(simon_vectors[i])
y_test.append(2)
i += 1
return np.array(x_train), np.array(x_test), np.array(to_categorical(y_train)), np.array(to_categorical(y_test))
def read_lexicon_words(pos_file_name, neg_file_name, neu_file_name):
return read_lexicon_file(pos_file_name), read_lexicon_file(neg_file_name), read_lexicon_file(neu_file_name)
def read_lexicon_file(file_name):
words = []
polarity = []
with open(file_name, "r") as file:
for line in file.readlines():
tokens = line.split(" ")
words.append(tokens[0])
polarity.append(float(tokens[1]))
return words, polarity
def compute_dissim_coef(word1, word2):
g, max_depth, root, dist1, dist2, lch_concept, max_lch_path_length = build_graph(word1, word2)
if max_lch_path_length != 0:
return (dist1 - dist2) / max_lch_path_length
else:
return 0
| [
"piotrekswedrak@gmail.com"
] | piotrekswedrak@gmail.com |
5d701f0a48dd6c81ab978a9683db47f0cf9fb515 | 587ac0749473666c2bcdfe558bdba8517cb1c0a0 | /sp2020/j.py | 9470133d7c7fc37f2ee060305d69a2e6d4c99a9d | [] | no_license | katrinafyi/cpg | fc2f408baf19791fa7260561a55d29464a42b212 | 0631d1983ec6a45cbe1a8df63963ab8caac51440 | refs/heads/main | 2023-02-21T13:07:02.517306 | 2021-01-23T06:09:39 | 2021-01-23T06:09:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,615 | py | def ints(): return [int(x.strip()) for x in input().split()]
# t is interval of measurement
# d is time considered
# p is percentile required
# r is response delay required for responsiveness
num_meas, t, d, p, r = ints()
num_attempts = ints() [0]
SUBMIT = 'S'
REPLY = 'R'
MEASURE = 'M'
timeline = [] # list of (time, response time)
submitted = [None] * num_attempts
for i in range(2 * num_attempts):
a, b, c = input().strip().split()
a = int(a)
b = int(b) - 1
if c == SUBMIT:
submitted[b] = a
else:
timeline.append((a, 'R', a - submitted[b]))
# for i in range(1, num_meas + 1):
# timeline.append((i * t, MEASURE, None))
# timeline.sort()
from collections import deque
from math import ceil, floor
considering = deque()
def measure():
if not considering: return True
l = [x[1] for x in considering]
l.sort()
# print(l)
i = (p/100 * len(l))
if i == int(i): i = int(i) - 1
else: i = floor(i)
return l[i] <= r
# print(num_meas, t, d, p, r)
# print(timeline)
num_responsive = 0
prev_measure = -1
prev_measure_time = 0
changed = True
for time, event, value in timeline:
if event == REPLY:
if time > prev_measure_time + t:
next_measure_time = floor(time / t) * t
while considering and considering[0][0] < next_measure_time - d:
considering.popleft()
m = measure()
num_responsive += m * (time - prev_measure_time + t) // t
prev_measure_time = next_measure_time
considering.append((time, value))
changed = True
print(num_responsive) | [
"kenton_lam@outlook.com"
] | kenton_lam@outlook.com |
30395fb46f4f90b60cac2dacbfb6cc5c1a0cc15e | eb357dfc2ca3e4ec2170a6d7cb1998886c715b12 | /ex31.py | f97866c4f09d5f1bbdc765511c14f6a228bb3a35 | [] | no_license | manix1980/lpthw | 49feec49a0f67fa8796dc585158ccd39a2f37894 | d1b1e4809b18d6a2c37fbab64bf04cffbd35741c | refs/heads/master | 2021-07-19T04:18:25.297068 | 2017-10-25T08:34:00 | 2017-10-25T08:34:00 | 104,328,672 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,390 | py | print("""You enter a dark room with two doors.
Do you go through door #1 or door #2?""")
door = input("> ")
if door == "1":
print("There is a giant bear here eating a cheese cake.")
print("What do you do?")
print("1. Take the cake.")
print("2. Scream at the bear")
print("3. Munch all of the bears food")
bear = input("> ")
if bear == "1":
print("The bear eats your face off. Good Job!")
elif bear == "2":
print("The bear eats your legs off. Good job")
elif bear == "3":
print("The bear chases you and pounds you in the ass")
else:
print(f"Well, doing {bear} is probably better")
print("Bear runs away")
elif door == "2":
print("You stare into the endless abyss at Cthulul's retina")
print("1. Blueberries")
print("2. Yellow Jacket Clothespins")
print("3. Understanding Revolvers yelling melodies")
print("4. I don't understand a word you are talking about muppet")
insanity = input("> ")
if insanity == "1" or insanity == "2":
print("Your body survives powered by a mind of jello")
print("Good Job")
elif insanity == "4":
print("Get me the hell out of here bro")
else:
print("The insanity rots your eyes into a pool of muck.")
print("Good Job")
else:
print("You stumble around and fall on a knife and die. Good Job!")
| [
"mark.ruggles@gmail.com"
] | mark.ruggles@gmail.com |
f7ef68c1de1a2734d11f7d19ac6a13e89278dd3b | 7196fa6e5bef1c2714dc49bb25eb11cfd531c07d | /examples/astronomy.py | d2763fa0ffc30446e77cb225f497b85d7c28afd8 | [
"MIT"
] | permissive | dinarior/daft | 0da226bc951195c6c1c92b8cdff87bc303793f60 | c982155f5fc27d1da1617ebf8a687af388cd0efa | refs/heads/master | 2020-05-09T13:40:50.913111 | 2019-04-16T12:42:16 | 2019-04-16T12:42:16 | 181,163,124 | 2 | 2 | MIT | 2019-07-02T06:06:11 | 2019-04-13T11:49:00 | Python | UTF-8 | Python | false | false | 7,319 | py | """
Astronomical imaging
====================
This is a model for every pixel of every astronomical image ever
taken. It is incomplete!
"""
from matplotlib import rc
rc("font", family="serif", size=12)
rc("text", usetex=True)
import daft
pgm = daft.PGM([8, 6.75], origin=[0.5, 0.5], grid_unit=4., node_unit=1.4)
# Start with the plates.
tweak=0.02
rect_params = {"lw": 2}
pgm.add_plate(daft.Plate([1.5+tweak, 0.5+tweak, 6.0-2*tweak, 3.75-2*tweak], label=r"\Large telescope+camera+filter multiplets", rect_params=rect_params))
pgm.add_plate(daft.Plate([2.5+tweak, 1.0+tweak, 4.0-2*tweak, 2.75-2*tweak], label=r"\Large images", rect_params=rect_params))
pgm.add_plate(daft.Plate([3.5+tweak, 1.5+tweak, 2.0-2*tweak, 1.75-2*tweak], label=r"\Large pixel patches", rect_params=rect_params))
pgm.add_plate(daft.Plate([1.0+tweak, 4.25+tweak, 3.5-2*tweak, 1.75-2*tweak], label=r"\Large stars", rect_params=rect_params))
pgm.add_plate(daft.Plate([5.5+tweak, 4.25+tweak, 2.5-2*tweak, 1.75-2*tweak], label=r"\Large galaxies", rect_params=rect_params))
# ONLY pixels are observed
asp = 2.3
pgm.add_node(daft.Node("true pixels", r"~\\noise-free\\pixel patch", 5.0, 2.5, aspect=asp))
pgm.add_node(daft.Node("pixels", r"pixel patch", 4.0, 2.0, observed=True, aspect=asp))
pgm.add_edge("true pixels", "pixels")
# The sky
pgm.add_node(daft.Node("sky", r"sky model", 6.0, 2.5, aspect=asp))
pgm.add_edge("sky", "true pixels")
pgm.add_node(daft.Node("sky prior", r"sky priors", 8.0, 2.5, fixed=True))
pgm.add_edge("sky prior", "sky")
# Stars
pgm.add_node(daft.Node("star patch", r"star patch", 4.0, 3.0, aspect=asp))
pgm.add_edge("star patch", "true pixels")
pgm.add_node(daft.Node("star SED", r"~\\spectral energy\\distribution", 2.5, 4.75, aspect=asp+0.2))
pgm.add_edge("star SED", "star patch")
pgm.add_node(daft.Node("star position", r"position", 4.0, 4.75, aspect=asp))
pgm.add_edge("star position", "star patch")
pgm.add_node(daft.Node("temperature", r"temperature", 1.5, 5.25, aspect=asp))
pgm.add_edge("temperature", "star SED")
pgm.add_node(daft.Node("luminosity", r"luminosity", 2.5, 5.25, aspect=asp))
pgm.add_edge("luminosity", "star SED")
pgm.add_node(daft.Node("metallicity", r"metallicity", 1.5, 5.75, aspect=asp))
pgm.add_edge("metallicity", "star SED")
pgm.add_edge("metallicity", "temperature")
pgm.add_edge("metallicity", "luminosity")
pgm.add_node(daft.Node("mass", r"mass", 2.5, 5.75, aspect=asp))
pgm.add_edge("mass", "temperature")
pgm.add_edge("mass", "luminosity")
pgm.add_node(daft.Node("age", r"age", 3.5, 5.75, aspect=asp))
pgm.add_edge("age", "temperature")
pgm.add_edge("age", "luminosity")
pgm.add_node(daft.Node("star models", r"star models", 1.0, 4.0, fixed=True))
pgm.add_edge("star models", "temperature")
pgm.add_edge("star models", "luminosity")
pgm.add_edge("star models", "star SED")
# Galaxies
pgm.add_node(daft.Node("galaxy patch", r"galaxy patch", 5.0, 3.0, aspect=asp))
pgm.add_edge("galaxy patch", "true pixels")
pgm.add_node(daft.Node("galaxy SED", r"~\\spectral energy\\distribution", 6.5, 4.75, aspect=asp+0.2))
pgm.add_edge("galaxy SED", "galaxy patch")
pgm.add_node(daft.Node("morphology", r"morphology", 7.5, 4.75, aspect=asp))
pgm.add_edge("morphology", "galaxy patch")
pgm.add_node(daft.Node("SFH", r"~\\star-formation\\history", 7.5, 5.25, aspect=asp))
pgm.add_edge("SFH", "galaxy SED")
pgm.add_edge("SFH", "morphology")
pgm.add_node(daft.Node("galaxy position", r"~\\redshift\\ \& position", 6.0, 5.25, aspect=asp))
pgm.add_edge("galaxy position", "galaxy SED")
pgm.add_edge("galaxy position", "morphology")
pgm.add_edge("galaxy position", "galaxy patch")
pgm.add_node(daft.Node("dynamics", r"orbit structure", 6.5, 5.75, aspect=asp))
pgm.add_edge("dynamics", "morphology")
pgm.add_edge("dynamics", "SFH")
pgm.add_node(daft.Node("galaxy mass", r"mass", 7.5, 5.75, aspect=asp))
pgm.add_edge("galaxy mass", "dynamics")
pgm.add_edge("galaxy mass", "galaxy SED")
pgm.add_edge("galaxy mass", "SFH")
# Universals
pgm.add_node(daft.Node("extinction model", r"~\\extinction\\model", 5.0, 4.75, aspect=asp))
pgm.add_edge("extinction model", "star patch")
pgm.add_edge("extinction model", "galaxy patch")
pgm.add_node(daft.Node("MW", r"~\\Milky Way\\formation", 4.0, 6.5, aspect=asp))
pgm.add_edge("MW", "metallicity")
pgm.add_edge("MW", "mass")
pgm.add_edge("MW", "age")
pgm.add_edge("MW", "star position")
pgm.add_edge("MW", "extinction model")
pgm.add_node(daft.Node("galaxy formation", r"~\\galaxy\\formation", 5.0, 6.5, aspect=asp))
pgm.add_edge("galaxy formation", "MW")
pgm.add_edge("galaxy formation", "dynamics")
pgm.add_edge("galaxy formation", "galaxy mass")
pgm.add_edge("galaxy formation", "extinction model")
pgm.add_node(daft.Node("LSS", r"~\\large-scale\\structure", 6.0, 6.5, aspect=asp))
pgm.add_edge("LSS", "galaxy position")
pgm.add_node(daft.Node("cosmology", r"~\\cosmological\\parameters", 6.0, 7.0, aspect=asp))
pgm.add_edge("cosmology", "LSS")
pgm.add_edge("cosmology", "galaxy formation")
pgm.add_node(daft.Node("god", r"God", 7.0, 7.0, fixed=True))
pgm.add_edge("god", "cosmology")
# Sensitivity
pgm.add_node(daft.Node("zeropoint", r"~\\zeropoint\\(photocal)", 3.0, 3.0, aspect=asp))
pgm.add_edge("zeropoint", "true pixels")
pgm.add_node(daft.Node("exposure time", r"exposure time", 3.0, 2.5, observed=True, aspect=asp))
pgm.add_edge("exposure time", "zeropoint")
# The PSF
pgm.add_node(daft.Node("WCS", r"~\\astrometric\\calibration", 3.0, 2.0, aspect=asp))
pgm.add_edge("WCS", "star patch")
pgm.add_edge("WCS", "galaxy patch")
pgm.add_node(daft.Node("psf", r"PSF model", 3.0, 3.5, aspect=asp))
pgm.add_edge("psf", "star patch")
pgm.add_edge("psf", "galaxy patch")
pgm.add_node(daft.Node("optics", r"optics", 2.0, 3.0, aspect=asp-1.2))
pgm.add_edge("optics", "psf")
pgm.add_edge("optics", "WCS")
pgm.add_node(daft.Node("atmosphere", r"~\\atmosphere\\model", 1.0, 3.5, aspect=asp))
pgm.add_edge("atmosphere", "psf")
pgm.add_edge("atmosphere", "WCS")
pgm.add_edge("atmosphere", "zeropoint")
# The device
pgm.add_node(daft.Node("flatfield", r"flat-field", 2.0, 1.5, aspect=asp))
pgm.add_edge("flatfield", "pixels")
pgm.add_node(daft.Node("nonlinearity", r"non-linearity", 2.0, 1.0, aspect=asp))
pgm.add_edge("nonlinearity", "pixels")
pgm.add_node(daft.Node("pointing", r"~\\telescope\\pointing etc.", 2.0, 2.0, aspect=asp))
pgm.add_edge("pointing", "WCS")
pgm.add_node(daft.Node("detector", r"detector priors", 1.0, 1.5, fixed=True))
pgm.add_edge("detector", "flatfield")
pgm.add_edge("detector", "nonlinearity")
pgm.add_node(daft.Node("hardware", r"hardware priors", 1.0, 2.5, fixed=True))
pgm.add_edge("hardware", "pointing")
pgm.add_edge("hardware", "exposure time")
pgm.add_edge("hardware", "optics")
# Noise
pgm.add_node(daft.Node("noise patch", r"noise patch", 5.0, 2.0, aspect=asp))
pgm.add_edge("noise patch", "pixels")
pgm.add_edge("true pixels", "noise patch")
pgm.add_node(daft.Node("noise model", r"noise model", 7.0, 2.0, aspect=asp))
pgm.add_edge("noise model", "noise patch")
pgm.add_node(daft.Node("noise prior", r"noise priors", 8.0, 2.0, fixed=True))
pgm.add_edge("noise prior", "noise model")
pgm.add_node(daft.Node("cosmic rays", r"~\\cosmic-ray\\model", 8.0, 1.5, aspect=asp))
pgm.add_edge("cosmic rays", "noise patch")
# Render and save.
pgm.render()
pgm.figure.savefig("astronomy.pdf")
pgm.figure.savefig("astronomy.png", dpi=150)
| [
"dwhogg@gmail.com"
] | dwhogg@gmail.com |
c6e8b539cfe2e03de48550078df41e6739fe020d | 0936d67c5e72971c465075400fe6b7e8ea4d6d64 | /TF1/tf1.py | 419f1b54ab9a16233981fa9d2b1083cf303182d1 | [
"MIT"
] | permissive | ChengHaoKe/MLtest | 324a990a498d8f1722ee37ab0487855ff40dc1fc | bcd713736e19ebba4aaf7bd39595c96f710a052e | refs/heads/master | 2020-09-05T17:42:40.831474 | 2019-11-21T10:22:24 | 2019-11-21T10:22:24 | 220,171,852 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,616 | py | import tensorflow as tf
import matplotlib.pyplot as plt
import os
# remove warning for cpu
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# define functions because pycharm doesn't locate them
Dense = tf.keras.layers.Dense
Flatten = tf.keras.layers.Flatten
Conv2D = tf.keras.layers.Conv2D
BatchNormalization = tf.keras.layers.BatchNormalization
EarlyStopping = tf.keras.callbacks.EarlyStopping
# get data from online
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
train_data = x_train.reshape(x_train.shape[0], 28, 28, 1)
test_data = x_test.reshape(x_test.shape[0], 28, 28, 1)
# create model
model = tf.keras.Sequential()
model.add(Conv2D(10, kernel_size=2, activation='relu', input_shape=(28, 28, 1)))
# # Add a dropout layer
# model.add(Dropout(0.2))
# Add batch normalization layer
model.add(BatchNormalization())
model.add(Conv2D(10, kernel_size=2, activation='relu'))
model.add(Flatten())
model.add(Dense(10, activation='softmax'))
print(model.summary())
# Compile the model
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# Define early_stopping_monitor
early_stopping_monitor = EarlyStopping(patience=2)
# Fit to training data
training = model.fit(train_data, y_train, validation_split=0.2, epochs=5, batch_size=10,
callbacks=[early_stopping_monitor])
# Extract the history from the training object
history = training.history
# Plot the training loss
plt.plot(history['loss'], label='Training Loss')
# Plot the validation loss
plt.plot(history['val_loss'], label='Validation Loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.title('Model Training and Validation Loss')
# Show the figure
plt.show()
# Evaluate on test data
x = model.evaluate(test_data, y_test, batch_size=10)
print(x)
# get weights
mwei0 = model.get_weights()
mwei1 = mwei0[0]
mwei2 = mwei0[1]
mwei3 = mwei0[2]
mwei4 = mwei0[3]
mwei5 = mwei0[4]
mwei6 = mwei0[5]
# predict images
# convert to float 32
image = tf.cast(test_data[0:3], tf.float32)
y = model.predict(image)
# plot images
def imgplt(num):
x1 = test_data[num].reshape(test_data[num].shape[0], test_data[num].shape[1])
plt.imshow(x1)
plt.show()
imgplt(0)
imgplt(1)
imgplt(2)
# # save model
# model.save('/Users/ch.ke/GitHub/MLtest/modelfiles/tf1.h5')
# # model = tf.keras.models.load_model('/Users/ch.ke/GitHub/MLtest/modelfiles/tf1.h5')
# # save weights
# model.save_weights('/Users/ch.ke/GitHub/MLtest/modelfiles/tf1weights.h5')
# # model.load_weights('Users/ch.ke/GitHub/MLtest/modelfiles/tf1weights.h5')
| [
"ch.ke@afusion.com"
] | ch.ke@afusion.com |
9d7cb819a753857b29721f3b71cd97414ec18f0b | 76c5db14cf4cfac8ce587cb65521556993564997 | /projects/III_Binary_Exploits_for_Windows/ed309/debruijn.py | 9982b4c252caf02b153459a8c313254035ac7253 | [] | no_license | 0xballistics/CNIT-127-2019-Scripts | f862ef5ce05f555a633c380f58da4146c0ef3693 | 0314d1976dc4a92fe1d0c89e9b33ba9d6d0b8893 | refs/heads/master | 2023-06-08T23:12:52.706875 | 2020-06-02T20:38:11 | 2020-06-02T20:38:11 | 268,901,895 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,319 | py | import struct
buf = struct.pack ("3000B", *[
0x41,0x41,0x41,0x42,0x41,0x41,0x43,0x41,0x41,0x44,0x41,
0x41,0x45,0x41,0x41,0x46,0x41,0x41,0x47,0x41,0x41,0x48,
0x41,0x41,0x49,0x41,0x41,0x4a,0x41,0x41,0x4b,0x41,0x41,
0x4c,0x41,0x41,0x4d,0x41,0x41,0x4e,0x41,0x41,0x4f,0x41,
0x41,0x50,0x41,0x41,0x51,0x41,0x41,0x52,0x41,0x41,0x53,
0x41,0x41,0x54,0x41,0x41,0x55,0x41,0x41,0x56,0x41,0x41,
0x57,0x41,0x41,0x58,0x41,0x41,0x59,0x41,0x41,0x5a,0x41,
0x41,0x61,0x41,0x41,0x62,0x41,0x41,0x63,0x41,0x41,0x64,
0x41,0x41,0x65,0x41,0x41,0x66,0x41,0x41,0x67,0x41,0x41,
0x68,0x41,0x41,0x69,0x41,0x41,0x6a,0x41,0x41,0x6b,0x41,
0x41,0x6c,0x41,0x41,0x6d,0x41,0x41,0x6e,0x41,0x41,0x6f,
0x41,0x41,0x70,0x41,0x41,0x71,0x41,0x41,0x72,0x41,0x41,
0x73,0x41,0x41,0x74,0x41,0x41,0x75,0x41,0x41,0x76,0x41,
0x41,0x77,0x41,0x41,0x78,0x41,0x41,0x79,0x41,0x41,0x7a,
0x41,0x41,0x31,0x41,0x41,0x32,0x41,0x41,0x33,0x41,0x41,
0x34,0x41,0x41,0x35,0x41,0x41,0x36,0x41,0x41,0x37,0x41,
0x41,0x38,0x41,0x41,0x39,0x41,0x41,0x30,0x41,0x42,0x42,
0x41,0x42,0x43,0x41,0x42,0x44,0x41,0x42,0x45,0x41,0x42,
0x46,0x41,0x42,0x47,0x41,0x42,0x48,0x41,0x42,0x49,0x41,
0x42,0x4a,0x41,0x42,0x4b,0x41,0x42,0x4c,0x41,0x42,0x4d,
0x41,0x42,0x4e,0x41,0x42,0x4f,0x41,0x42,0x50,0x41,0x42,
0x51,0x41,0x42,0x52,0x41,0x42,0x53,0x41,0x42,0x54,0x41,
0x42,0x55,0x41,0x42,0x56,0x41,0x42,0x57,0x41,0x42,0x58,
0x41,0x42,0x59,0x41,0x42,0x5a,0x41,0x42,0x61,0x41,0x42,
0x62,0x41,0x42,0x63,0x41,0x42,0x64,0x41,0x42,0x65,0x41,
0x42,0x66,0x41,0x42,0x67,0x41,0x42,0x68,0x41,0x42,0x69,
0x41,0x42,0x6a,0x41,0x42,0x6b,0x41,0x42,0x6c,0x41,0x42,
0x6d,0x41,0x42,0x6e,0x41,0x42,0x6f,0x41,0x42,0x70,0x41,
0x42,0x71,0x41,0x42,0x72,0x41,0x42,0x73,0x41,0x42,0x74,
0x41,0x42,0x75,0x41,0x42,0x76,0x41,0x42,0x77,0x41,0x42,
0x78,0x41,0x42,0x79,0x41,0x42,0x7a,0x41,0x42,0x31,0x41,
0x42,0x32,0x41,0x42,0x33,0x41,0x42,0x34,0x41,0x42,0x35,
0x41,0x42,0x36,0x41,0x42,0x37,0x41,0x42,0x38,0x41,0x42,
0x39,0x41,0x42,0x30,0x41,0x43,0x42,0x41,0x43,0x43,0x41,
0x43,0x44,0x41,0x43,0x45,0x41,0x43,0x46,0x41,0x43,0x47,
0x41,0x43,0x48,0x41,0x43,0x49,0x41,0x43,0x4a,0x41,0x43,
0x4b,0x41,0x43,0x4c,0x41,0x43,0x4d,0x41,0x43,0x4e,0x41,
0x43,0x4f,0x41,0x43,0x50,0x41,0x43,0x51,0x41,0x43,0x52,
0x41,0x43,0x53,0x41,0x43,0x54,0x41,0x43,0x55,0x41,0x43,
0x56,0x41,0x43,0x57,0x41,0x43,0x58,0x41,0x43,0x59,0x41,
0x43,0x5a,0x41,0x43,0x61,0x41,0x43,0x62,0x41,0x43,0x63,
0x41,0x43,0x64,0x41,0x43,0x65,0x41,0x43,0x66,0x41,0x43,
0x67,0x41,0x43,0x68,0x41,0x43,0x69,0x41,0x43,0x6a,0x41,
0x43,0x6b,0x41,0x43,0x6c,0x41,0x43,0x6d,0x41,0x43,0x6e,
0x41,0x43,0x6f,0x41,0x43,0x70,0x41,0x43,0x71,0x41,0x43,
0x72,0x41,0x43,0x73,0x41,0x43,0x74,0x41,0x43,0x75,0x41,
0x43,0x76,0x41,0x43,0x77,0x41,0x43,0x78,0x41,0x43,0x79,
0x41,0x43,0x7a,0x41,0x43,0x31,0x41,0x43,0x32,0x41,0x43,
0x33,0x41,0x43,0x34,0x41,0x43,0x35,0x41,0x43,0x36,0x41,
0x43,0x37,0x41,0x43,0x38,0x41,0x43,0x39,0x41,0x43,0x30,
0x41,0x44,0x42,0x41,0x44,0x43,0x41,0x44,0x44,0x41,0x44,
0x45,0x41,0x44,0x46,0x41,0x44,0x47,0x41,0x44,0x48,0x41,
0x44,0x49,0x41,0x44,0x4a,0x41,0x44,0x4b,0x41,0x44,0x4c,
0x41,0x44,0x4d,0x41,0x44,0x4e,0x41,0x44,0x4f,0x41,0x44,
0x50,0x41,0x44,0x51,0x41,0x44,0x52,0x41,0x44,0x53,0x41,
0x44,0x54,0x41,0x44,0x55,0x41,0x44,0x56,0x41,0x44,0x57,
0x41,0x44,0x58,0x41,0x44,0x59,0x41,0x44,0x5a,0x41,0x44,
0x61,0x41,0x44,0x62,0x41,0x44,0x63,0x41,0x44,0x64,0x41,
0x44,0x65,0x41,0x44,0x66,0x41,0x44,0x67,0x41,0x44,0x68,
0x41,0x44,0x69,0x41,0x44,0x6a,0x41,0x44,0x6b,0x41,0x44,
0x6c,0x41,0x44,0x6d,0x41,0x44,0x6e,0x41,0x44,0x6f,0x41,
0x44,0x70,0x41,0x44,0x71,0x41,0x44,0x72,0x41,0x44,0x73,
0x41,0x44,0x74,0x41,0x44,0x75,0x41,0x44,0x76,0x41,0x44,
0x77,0x41,0x44,0x78,0x41,0x44,0x79,0x41,0x44,0x7a,0x41,
0x44,0x31,0x41,0x44,0x32,0x41,0x44,0x33,0x41,0x44,0x34,
0x41,0x44,0x35,0x41,0x44,0x36,0x41,0x44,0x37,0x41,0x44,
0x38,0x41,0x44,0x39,0x41,0x44,0x30,0x41,0x45,0x42,0x41,
0x45,0x43,0x41,0x45,0x44,0x41,0x45,0x45,0x41,0x45,0x46,
0x41,0x45,0x47,0x41,0x45,0x48,0x41,0x45,0x49,0x41,0x45,
0x4a,0x41,0x45,0x4b,0x41,0x45,0x4c,0x41,0x45,0x4d,0x41,
0x45,0x4e,0x41,0x45,0x4f,0x41,0x45,0x50,0x41,0x45,0x51,
0x41,0x45,0x52,0x41,0x45,0x53,0x41,0x45,0x54,0x41,0x45,
0x55,0x41,0x45,0x56,0x41,0x45,0x57,0x41,0x45,0x58,0x41,
0x45,0x59,0x41,0x45,0x5a,0x41,0x45,0x61,0x41,0x45,0x62,
0x41,0x45,0x63,0x41,0x45,0x64,0x41,0x45,0x65,0x41,0x45,
0x66,0x41,0x45,0x67,0x41,0x45,0x68,0x41,0x45,0x69,0x41,
0x45,0x6a,0x41,0x45,0x6b,0x41,0x45,0x6c,0x41,0x45,0x6d,
0x41,0x45,0x6e,0x41,0x45,0x6f,0x41,0x45,0x70,0x41,0x45,
0x71,0x41,0x45,0x72,0x41,0x45,0x73,0x41,0x45,0x74,0x41,
0x45,0x75,0x41,0x45,0x76,0x41,0x45,0x77,0x41,0x45,0x78,
0x41,0x45,0x79,0x41,0x45,0x7a,0x41,0x45,0x31,0x41,0x45,
0x32,0x41,0x45,0x33,0x41,0x45,0x34,0x41,0x45,0x35,0x41,
0x45,0x36,0x41,0x45,0x37,0x41,0x45,0x38,0x41,0x45,0x39,
0x41,0x45,0x30,0x41,0x46,0x42,0x41,0x46,0x43,0x41,0x46,
0x44,0x41,0x46,0x45,0x41,0x46,0x46,0x41,0x46,0x47,0x41,
0x46,0x48,0x41,0x46,0x49,0x41,0x46,0x4a,0x41,0x46,0x4b,
0x41,0x46,0x4c,0x41,0x46,0x4d,0x41,0x46,0x4e,0x41,0x46,
0x4f,0x41,0x46,0x50,0x41,0x46,0x51,0x41,0x46,0x52,0x41,
0x46,0x53,0x41,0x46,0x54,0x41,0x46,0x55,0x41,0x46,0x56,
0x41,0x46,0x57,0x41,0x46,0x58,0x41,0x46,0x59,0x41,0x46,
0x5a,0x41,0x46,0x61,0x41,0x46,0x62,0x41,0x46,0x63,0x41,
0x46,0x64,0x41,0x46,0x65,0x41,0x46,0x66,0x41,0x46,0x67,
0x41,0x46,0x68,0x41,0x46,0x69,0x41,0x46,0x6a,0x41,0x46,
0x6b,0x41,0x46,0x6c,0x41,0x46,0x6d,0x41,0x46,0x6e,0x41,
0x46,0x6f,0x41,0x46,0x70,0x41,0x46,0x71,0x41,0x46,0x72,
0x41,0x46,0x73,0x41,0x46,0x74,0x41,0x46,0x75,0x41,0x46,
0x76,0x41,0x46,0x77,0x41,0x46,0x78,0x41,0x46,0x79,0x41,
0x46,0x7a,0x41,0x46,0x31,0x41,0x46,0x32,0x41,0x46,0x33,
0x41,0x46,0x34,0x41,0x46,0x35,0x41,0x46,0x36,0x41,0x46,
0x37,0x41,0x46,0x38,0x41,0x46,0x39,0x41,0x46,0x30,0x41,
0x47,0x42,0x41,0x47,0x43,0x41,0x47,0x44,0x41,0x47,0x45,
0x41,0x47,0x46,0x41,0x47,0x47,0x41,0x47,0x48,0x41,0x47,
0x49,0x41,0x47,0x4a,0x41,0x47,0x4b,0x41,0x47,0x4c,0x41,
0x47,0x4d,0x41,0x47,0x4e,0x41,0x47,0x4f,0x41,0x47,0x50,
0x41,0x47,0x51,0x41,0x47,0x52,0x41,0x47,0x53,0x41,0x47,
0x54,0x41,0x47,0x55,0x41,0x47,0x56,0x41,0x47,0x57,0x41,
0x47,0x58,0x41,0x47,0x59,0x41,0x47,0x5a,0x41,0x47,0x61,
0x41,0x47,0x62,0x41,0x47,0x63,0x41,0x47,0x64,0x41,0x47,
0x65,0x41,0x47,0x66,0x41,0x47,0x67,0x41,0x47,0x68,0x41,
0x47,0x69,0x41,0x47,0x6a,0x41,0x47,0x6b,0x41,0x47,0x6c,
0x41,0x47,0x6d,0x41,0x47,0x6e,0x41,0x47,0x6f,0x41,0x47,
0x70,0x41,0x47,0x71,0x41,0x47,0x72,0x41,0x47,0x73,0x41,
0x47,0x74,0x41,0x47,0x75,0x41,0x47,0x76,0x41,0x47,0x77,
0x41,0x47,0x78,0x41,0x47,0x79,0x41,0x47,0x7a,0x41,0x47,
0x31,0x41,0x47,0x32,0x41,0x47,0x33,0x41,0x47,0x34,0x41,
0x47,0x35,0x41,0x47,0x36,0x41,0x47,0x37,0x41,0x47,0x38,
0x41,0x47,0x39,0x41,0x47,0x30,0x41,0x48,0x42,0x41,0x48,
0x43,0x41,0x48,0x44,0x41,0x48,0x45,0x41,0x48,0x46,0x41,
0x48,0x47,0x41,0x48,0x48,0x41,0x48,0x49,0x41,0x48,0x4a,
0x41,0x48,0x4b,0x41,0x48,0x4c,0x41,0x48,0x4d,0x41,0x48,
0x4e,0x41,0x48,0x4f,0x41,0x48,0x50,0x41,0x48,0x51,0x41,
0x48,0x52,0x41,0x48,0x53,0x41,0x48,0x54,0x41,0x48,0x55,
0x41,0x48,0x56,0x41,0x48,0x57,0x41,0x48,0x58,0x41,0x48,
0x59,0x41,0x48,0x5a,0x41,0x48,0x61,0x41,0x48,0x62,0x41,
0x48,0x63,0x41,0x48,0x64,0x41,0x48,0x65,0x41,0x48,0x66,
0x41,0x48,0x67,0x41,0x48,0x68,0x41,0x48,0x69,0x41,0x48,
0x6a,0x41,0x48,0x6b,0x41,0x48,0x6c,0x41,0x48,0x6d,0x41,
0x48,0x6e,0x41,0x48,0x6f,0x41,0x48,0x70,0x41,0x48,0x71,
0x41,0x48,0x72,0x41,0x48,0x73,0x41,0x48,0x74,0x41,0x48,
0x75,0x41,0x48,0x76,0x41,0x48,0x77,0x41,0x48,0x78,0x41,
0x48,0x79,0x41,0x48,0x7a,0x41,0x48,0x31,0x41,0x48,0x32,
0x41,0x48,0x33,0x41,0x48,0x34,0x41,0x48,0x35,0x41,0x48,
0x36,0x41,0x48,0x37,0x41,0x48,0x38,0x41,0x48,0x39,0x41,
0x48,0x30,0x41,0x49,0x42,0x41,0x49,0x43,0x41,0x49,0x44,
0x41,0x49,0x45,0x41,0x49,0x46,0x41,0x49,0x47,0x41,0x49,
0x48,0x41,0x49,0x49,0x41,0x49,0x4a,0x41,0x49,0x4b,0x41,
0x49,0x4c,0x41,0x49,0x4d,0x41,0x49,0x4e,0x41,0x49,0x4f,
0x41,0x49,0x50,0x41,0x49,0x51,0x41,0x49,0x52,0x41,0x49,
0x53,0x41,0x49,0x54,0x41,0x49,0x55,0x41,0x49,0x56,0x41,
0x49,0x57,0x41,0x49,0x58,0x41,0x49,0x59,0x41,0x49,0x5a,
0x41,0x49,0x61,0x41,0x49,0x62,0x41,0x49,0x63,0x41,0x49,
0x64,0x41,0x49,0x65,0x41,0x49,0x66,0x41,0x49,0x67,0x41,
0x49,0x68,0x41,0x49,0x69,0x41,0x49,0x6a,0x41,0x49,0x6b,
0x41,0x49,0x6c,0x41,0x49,0x6d,0x41,0x49,0x6e,0x41,0x49,
0x6f,0x41,0x49,0x70,0x41,0x49,0x71,0x41,0x49,0x72,0x41,
0x49,0x73,0x41,0x49,0x74,0x41,0x49,0x75,0x41,0x49,0x76,
0x41,0x49,0x77,0x41,0x49,0x78,0x41,0x49,0x79,0x41,0x49,
0x7a,0x41,0x49,0x31,0x41,0x49,0x32,0x41,0x49,0x33,0x41,
0x49,0x34,0x41,0x49,0x35,0x41,0x49,0x36,0x41,0x49,0x37,
0x41,0x49,0x38,0x41,0x49,0x39,0x41,0x49,0x30,0x41,0x4a,
0x42,0x41,0x4a,0x43,0x41,0x4a,0x44,0x41,0x4a,0x45,0x41,
0x4a,0x46,0x41,0x4a,0x47,0x41,0x4a,0x48,0x41,0x4a,0x49,
0x41,0x4a,0x4a,0x41,0x4a,0x4b,0x41,0x4a,0x4c,0x41,0x4a,
0x4d,0x41,0x4a,0x4e,0x41,0x4a,0x4f,0x41,0x4a,0x50,0x41,
0x4a,0x51,0x41,0x4a,0x52,0x41,0x4a,0x53,0x41,0x4a,0x54,
0x41,0x4a,0x55,0x41,0x4a,0x56,0x41,0x4a,0x57,0x41,0x4a,
0x58,0x41,0x4a,0x59,0x41,0x4a,0x5a,0x41,0x4a,0x61,0x41,
0x4a,0x62,0x41,0x4a,0x63,0x41,0x4a,0x64,0x41,0x4a,0x65,
0x41,0x4a,0x66,0x41,0x4a,0x67,0x41,0x4a,0x68,0x41,0x4a,
0x69,0x41,0x4a,0x6a,0x41,0x4a,0x6b,0x41,0x4a,0x6c,0x41,
0x4a,0x6d,0x41,0x4a,0x6e,0x41,0x4a,0x6f,0x41,0x4a,0x70,
0x41,0x4a,0x71,0x41,0x4a,0x72,0x41,0x4a,0x73,0x41,0x4a,
0x74,0x41,0x4a,0x75,0x41,0x4a,0x76,0x41,0x4a,0x77,0x41,
0x4a,0x78,0x41,0x4a,0x79,0x41,0x4a,0x7a,0x41,0x4a,0x31,
0x41,0x4a,0x32,0x41,0x4a,0x33,0x41,0x4a,0x34,0x41,0x4a,
0x35,0x41,0x4a,0x36,0x41,0x4a,0x37,0x41,0x4a,0x38,0x41,
0x4a,0x39,0x41,0x4a,0x30,0x41,0x4b,0x42,0x41,0x4b,0x43,
0x41,0x4b,0x44,0x41,0x4b,0x45,0x41,0x4b,0x46,0x41,0x4b,
0x47,0x41,0x4b,0x48,0x41,0x4b,0x49,0x41,0x4b,0x4a,0x41,
0x4b,0x4b,0x41,0x4b,0x4c,0x41,0x4b,0x4d,0x41,0x4b,0x4e,
0x41,0x4b,0x4f,0x41,0x4b,0x50,0x41,0x4b,0x51,0x41,0x4b,
0x52,0x41,0x4b,0x53,0x41,0x4b,0x54,0x41,0x4b,0x55,0x41,
0x4b,0x56,0x41,0x4b,0x57,0x41,0x4b,0x58,0x41,0x4b,0x59,
0x41,0x4b,0x5a,0x41,0x4b,0x61,0x41,0x4b,0x62,0x41,0x4b,
0x63,0x41,0x4b,0x64,0x41,0x4b,0x65,0x41,0x4b,0x66,0x41,
0x4b,0x67,0x41,0x4b,0x68,0x41,0x4b,0x69,0x41,0x4b,0x6a,
0x41,0x4b,0x6b,0x41,0x4b,0x6c,0x41,0x4b,0x6d,0x41,0x4b,
0x6e,0x41,0x4b,0x6f,0x41,0x4b,0x70,0x41,0x4b,0x71,0x41,
0x4b,0x72,0x41,0x4b,0x73,0x41,0x4b,0x74,0x41,0x4b,0x75,
0x41,0x4b,0x76,0x41,0x4b,0x77,0x41,0x4b,0x78,0x41,0x4b,
0x79,0x41,0x4b,0x7a,0x41,0x4b,0x31,0x41,0x4b,0x32,0x41,
0x4b,0x33,0x41,0x4b,0x34,0x41,0x4b,0x35,0x41,0x4b,0x36,
0x41,0x4b,0x37,0x41,0x4b,0x38,0x41,0x4b,0x39,0x41,0x4b,
0x30,0x41,0x4c,0x42,0x41,0x4c,0x43,0x41,0x4c,0x44,0x41,
0x4c,0x45,0x41,0x4c,0x46,0x41,0x4c,0x47,0x41,0x4c,0x48,
0x41,0x4c,0x49,0x41,0x4c,0x4a,0x41,0x4c,0x4b,0x41,0x4c,
0x4c,0x41,0x4c,0x4d,0x41,0x4c,0x4e,0x41,0x4c,0x4f,0x41,
0x4c,0x50,0x41,0x4c,0x51,0x41,0x4c,0x52,0x41,0x4c,0x53,
0x41,0x4c,0x54,0x41,0x4c,0x55,0x41,0x4c,0x56,0x41,0x4c,
0x57,0x41,0x4c,0x58,0x41,0x4c,0x59,0x41,0x4c,0x5a,0x41,
0x4c,0x61,0x41,0x4c,0x62,0x41,0x4c,0x63,0x41,0x4c,0x64,
0x41,0x4c,0x65,0x41,0x4c,0x66,0x41,0x4c,0x67,0x41,0x4c,
0x68,0x41,0x4c,0x69,0x41,0x4c,0x6a,0x41,0x4c,0x6b,0x41,
0x4c,0x6c,0x41,0x4c,0x6d,0x41,0x4c,0x6e,0x41,0x4c,0x6f,
0x41,0x4c,0x70,0x41,0x4c,0x71,0x41,0x4c,0x72,0x41,0x4c,
0x73,0x41,0x4c,0x74,0x41,0x4c,0x75,0x41,0x4c,0x76,0x41,
0x4c,0x77,0x41,0x4c,0x78,0x41,0x4c,0x79,0x41,0x4c,0x7a,
0x41,0x4c,0x31,0x41,0x4c,0x32,0x41,0x4c,0x33,0x41,0x4c,
0x34,0x41,0x4c,0x35,0x41,0x4c,0x36,0x41,0x4c,0x37,0x41,
0x4c,0x38,0x41,0x4c,0x39,0x41,0x4c,0x30,0x41,0x4d,0x42,
0x41,0x4d,0x43,0x41,0x4d,0x44,0x41,0x4d,0x45,0x41,0x4d,
0x46,0x41,0x4d,0x47,0x41,0x4d,0x48,0x41,0x4d,0x49,0x41,
0x4d,0x4a,0x41,0x4d,0x4b,0x41,0x4d,0x4c,0x41,0x4d,0x4d,
0x41,0x4d,0x4e,0x41,0x4d,0x4f,0x41,0x4d,0x50,0x41,0x4d,
0x51,0x41,0x4d,0x52,0x41,0x4d,0x53,0x41,0x4d,0x54,0x41,
0x4d,0x55,0x41,0x4d,0x56,0x41,0x4d,0x57,0x41,0x4d,0x58,
0x41,0x4d,0x59,0x41,0x4d,0x5a,0x41,0x4d,0x61,0x41,0x4d,
0x62,0x41,0x4d,0x63,0x41,0x4d,0x64,0x41,0x4d,0x65,0x41,
0x4d,0x66,0x41,0x4d,0x67,0x41,0x4d,0x68,0x41,0x4d,0x69,
0x41,0x4d,0x6a,0x41,0x4d,0x6b,0x41,0x4d,0x6c,0x41,0x4d,
0x6d,0x41,0x4d,0x6e,0x41,0x4d,0x6f,0x41,0x4d,0x70,0x41,
0x4d,0x71,0x41,0x4d,0x72,0x41,0x4d,0x73,0x41,0x4d,0x74,
0x41,0x4d,0x75,0x41,0x4d,0x76,0x41,0x4d,0x77,0x41,0x4d,
0x78,0x41,0x4d,0x79,0x41,0x4d,0x7a,0x41,0x4d,0x31,0x41,
0x4d,0x32,0x41,0x4d,0x33,0x41,0x4d,0x34,0x41,0x4d,0x35,
0x41,0x4d,0x36,0x41,0x4d,0x37,0x41,0x4d,0x38,0x41,0x4d,
0x39,0x41,0x4d,0x30,0x41,0x4e,0x42,0x41,0x4e,0x43,0x41,
0x4e,0x44,0x41,0x4e,0x45,0x41,0x4e,0x46,0x41,0x4e,0x47,
0x41,0x4e,0x48,0x41,0x4e,0x49,0x41,0x4e,0x4a,0x41,0x4e,
0x4b,0x41,0x4e,0x4c,0x41,0x4e,0x4d,0x41,0x4e,0x4e,0x41,
0x4e,0x4f,0x41,0x4e,0x50,0x41,0x4e,0x51,0x41,0x4e,0x52,
0x41,0x4e,0x53,0x41,0x4e,0x54,0x41,0x4e,0x55,0x41,0x4e,
0x56,0x41,0x4e,0x57,0x41,0x4e,0x58,0x41,0x4e,0x59,0x41,
0x4e,0x5a,0x41,0x4e,0x61,0x41,0x4e,0x62,0x41,0x4e,0x63,
0x41,0x4e,0x64,0x41,0x4e,0x65,0x41,0x4e,0x66,0x41,0x4e,
0x67,0x41,0x4e,0x68,0x41,0x4e,0x69,0x41,0x4e,0x6a,0x41,
0x4e,0x6b,0x41,0x4e,0x6c,0x41,0x4e,0x6d,0x41,0x4e,0x6e,
0x41,0x4e,0x6f,0x41,0x4e,0x70,0x41,0x4e,0x71,0x41,0x4e,
0x72,0x41,0x4e,0x73,0x41,0x4e,0x74,0x41,0x4e,0x75,0x41,
0x4e,0x76,0x41,0x4e,0x77,0x41,0x4e,0x78,0x41,0x4e,0x79,
0x41,0x4e,0x7a,0x41,0x4e,0x31,0x41,0x4e,0x32,0x41,0x4e,
0x33,0x41,0x4e,0x34,0x41,0x4e,0x35,0x41,0x4e,0x36,0x41,
0x4e,0x37,0x41,0x4e,0x38,0x41,0x4e,0x39,0x41,0x4e,0x30,
0x41,0x4f,0x42,0x41,0x4f,0x43,0x41,0x4f,0x44,0x41,0x4f,
0x45,0x41,0x4f,0x46,0x41,0x4f,0x47,0x41,0x4f,0x48,0x41,
0x4f,0x49,0x41,0x4f,0x4a,0x41,0x4f,0x4b,0x41,0x4f,0x4c,
0x41,0x4f,0x4d,0x41,0x4f,0x4e,0x41,0x4f,0x4f,0x41,0x4f,
0x50,0x41,0x4f,0x51,0x41,0x4f,0x52,0x41,0x4f,0x53,0x41,
0x4f,0x54,0x41,0x4f,0x55,0x41,0x4f,0x56,0x41,0x4f,0x57,
0x41,0x4f,0x58,0x41,0x4f,0x59,0x41,0x4f,0x5a,0x41,0x4f,
0x61,0x41,0x4f,0x62,0x41,0x4f,0x63,0x41,0x4f,0x64,0x41,
0x4f,0x65,0x41,0x4f,0x66,0x41,0x4f,0x67,0x41,0x4f,0x68,
0x41,0x4f,0x69,0x41,0x4f,0x6a,0x41,0x4f,0x6b,0x41,0x4f,
0x6c,0x41,0x4f,0x6d,0x41,0x4f,0x6e,0x41,0x4f,0x6f,0x41,
0x4f,0x70,0x41,0x4f,0x71,0x41,0x4f,0x72,0x41,0x4f,0x73,
0x41,0x4f,0x74,0x41,0x4f,0x75,0x41,0x4f,0x76,0x41,0x4f,
0x77,0x41,0x4f,0x78,0x41,0x4f,0x79,0x41,0x4f,0x7a,0x41,
0x4f,0x31,0x41,0x4f,0x32,0x41,0x4f,0x33,0x41,0x4f,0x34,
0x41,0x4f,0x35,0x41,0x4f,0x36,0x41,0x4f,0x37,0x41,0x4f,
0x38,0x41,0x4f,0x39,0x41,0x4f,0x30,0x41,0x50,0x42,0x41,
0x50,0x43,0x41,0x50,0x44,0x41,0x50,0x45,0x41,0x50,0x46,
0x41,0x50,0x47,0x41,0x50,0x48,0x41,0x50,0x49,0x41,0x50,
0x4a,0x41,0x50,0x4b,0x41,0x50,0x4c,0x41,0x50,0x4d,0x41,
0x50,0x4e,0x41,0x50,0x4f,0x41,0x50,0x50,0x41,0x50,0x51,
0x41,0x50,0x52,0x41,0x50,0x53,0x41,0x50,0x54,0x41,0x50,
0x55,0x41,0x50,0x56,0x41,0x50,0x57,0x41,0x50,0x58,0x41,
0x50,0x59,0x41,0x50,0x5a,0x41,0x50,0x61,0x41,0x50,0x62,
0x41,0x50,0x63,0x41,0x50,0x64,0x41,0x50,0x65,0x41,0x50,
0x66,0x41,0x50,0x67,0x41,0x50,0x68,0x41,0x50,0x69,0x41,
0x50,0x6a,0x41,0x50,0x6b,0x41,0x50,0x6c,0x41,0x50,0x6d,
0x41,0x50,0x6e,0x41,0x50,0x6f,0x41,0x50,0x70,0x41,0x50,
0x71,0x41,0x50,0x72,0x41,0x50,0x73,0x41,0x50,0x74,0x41,
0x50,0x75,0x41,0x50,0x76,0x41,0x50,0x77,0x41,0x50,0x78,
0x41,0x50,0x79,0x41,0x50,0x7a,0x41,0x50,0x31,0x41,0x50,
0x32,0x41,0x50,0x33,0x41,0x50,0x34,0x41,0x50,0x35,0x41,
0x50,0x36,0x41,0x50,0x37,0x41,0x50,0x38,0x41,0x50,0x39,
0x41,0x50,0x30,0x41,0x51,0x42,0x41,0x51,0x43,0x41,0x51,
0x44,0x41,0x51,0x45,0x41,0x51,0x46,0x41,0x51,0x47,0x41,
0x51,0x48,0x41,0x51,0x49,0x41,0x51,0x4a,0x41,0x51,0x4b,
0x41,0x51,0x4c,0x41,0x51,0x4d,0x41,0x51,0x4e,0x41,0x51,
0x4f,0x41,0x51,0x50,0x41,0x51,0x51,0x41,0x51,0x52,0x41,
0x51,0x53,0x41,0x51,0x54,0x41,0x51,0x55,0x41,0x51,0x56,
0x41,0x51,0x57,0x41,0x51,0x58,0x41,0x51])
| [
"halilburaknoyan@gmail.com"
] | halilburaknoyan@gmail.com |
52cc67b49401a2ab6f5fb96ce878c941ca0fd9c2 | 91d1d3135ff096852e8a89690d95277428614cd1 | /spatial_two_mics/labels_inference/ground_truth.py | 972b8bdbc6b067e1f22ef5b61825b16b2dc8da06 | [
"MIT",
"NCSA",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | etzinis/unsupervised_spatial_dc | 07a35e2c9e19aa237b4be2dbfb11447c21028859 | 7330303d3c994b2abdbceedf2ee08660f94bd9ce | refs/heads/master | 2021-10-22T18:40:51.430636 | 2021-10-10T20:41:36 | 2021-10-10T20:41:36 | 156,471,783 | 23 | 4 | null | 2021-10-10T20:14:20 | 2018-11-07T01:19:36 | Python | UTF-8 | Python | false | false | 2,560 | py | """!
@brief Infering the masking for eah tf bin independently based on the
maximum energy of the sources in each bin
@author Efthymios Tzinis {etzinis2@illinois.edu}
@copyright University of Illinois at Urbana Champaign
"""
import numpy as np
from pprint import pprint
def infer_mask(mixture_info):
"""
:param mixture_info:
mixture_info = {
'm1_raw': numpy array containing the raw m1 signal,
'm2_raw': numpy array containing the raw m2 signal,
'm1_tf': numpy array containing the m1 TF representation,
'm2_tf': numpy array containing the m2 TF representation,
'sources_raw': a list of numpy 1d vectors containing the
sources ,
'sources_tf': a list of numpy 2d vectors containing the
TF represeantations of the sources
'amplitudes': the weights that each source contributes to
the mixture of the second microphone
}
:return: A tf 2d matrix corresponding to the dominating source
for each TF bin [0,1,...,n_sources]
"""
sources_complex_spectra = mixture_info['sources_tf']
amplitudes = mixture_info['amplitudes']
n_sources = len(sources_complex_spectra)
assert len(amplitudes) == n_sources, "Length of weights: {} " \
"should be equal to the " \
"number of sources: {}" \
"".format(len(amplitudes),
n_sources)
same_dimensions = [(sources_complex_spectra[i].shape ==
sources_complex_spectra[0].shape)
for i in np.arange(len(sources_complex_spectra))]
assert all(same_dimensions), "All arrays should have the same " \
"dimensions. However, got sizes of {}"\
"".format([x.shape for x in
sources_complex_spectra])
sources_complex_spectra = [amplitudes[i] * sources_complex_spectra[i]
for i in np.arange(n_sources)]
tf_real_sources = [np.abs(tf_complex)
for tf_complex in sources_complex_spectra]
mixture_tensor = np.dstack(tf_real_sources)
dominating_source = np.argmax(mixture_tensor, axis=2)
zipped_tf_labels = dominating_source.astype(np.uint8)
assert np.array_equal(dominating_source, zipped_tf_labels), \
"Zipping the numpy matrix should not yield different labels"
return zipped_tf_labels
| [
"etzinis2@illinois.edu"
] | etzinis2@illinois.edu |
9972d2ca8050c0b4d9a0231a9b533c4390a5e5cc | 42371933210691aff00dbbf79c970572490764c1 | /files/xain_adapter.py | 0f0645afffabace31dc51bc956622e1632b3a156 | [
"MIT"
] | permissive | BPChain/private-xain | e188c9491332dc81e5a8f0821cac023a25bf6a4b | b0fc77dc533e2a4c8622f8499bb746c58b2cf0bd | refs/heads/master | 2022-12-13T09:41:37.616857 | 2018-06-05T08:18:29 | 2018-06-05T08:18:29 | 118,761,437 | 4 | 0 | MIT | 2022-12-08T02:02:40 | 2018-01-24T12:18:29 | JavaScript | UTF-8 | Python | false | false | 1,461 | py | """I provide an adapter for the Xain blockchain api"""
# pylint: disable=no-member
from time import sleep
from statistics_reader.block import Block
from statistics_reader.blockchain_adapter import BlockchainAdapter
from web3 import Web3, HTTPProvider
class XainAdapter(BlockchainAdapter):
"""I am an adapter for the Xain blockchain api"""
def __init__(self, is_miner):
super().__init__(is_miner)
self.web3_rpc = Web3(HTTPProvider('http://127.0.0.1:8545', request_kwargs={'timeout': 120}))
file = open("/root/files/coinbasepwd")
coinbasepwd = file.read()
while not self.web3_rpc.isConnected():
sleep(1)
self.web3_rpc.personal.unlockAccount(self.web3_rpc.eth.accounts[0], coinbasepwd, 0)
self.web3_rpc.miner.start(1)
def fetch_newest_block_number(self) -> int:
return self.web3_rpc.eth.getBlock('latest').number
def fetch_block_with(self, number: int):
return self.web3_rpc.eth.getBlock(number)
def make_block_from(self, raw_block) -> Block:
return Block(raw_block.difficulty, raw_block.transactions,
raw_block.timestamp, raw_block.size)
def hashrate(self) -> int:
return self.web3_rpc.eth.hashrate
def is_mining(self) -> int:
if self.is_miner == '0':
return 0
return 1 if self.web3_rpc.eth.mining else 0
def host_id(self):
return self.web3_rpc.admin.nodeInfo.id
| [
"anton.weltzien@gmail.com"
] | anton.weltzien@gmail.com |
a1e7d70a6c380ecacff8990c1a643540a79ff197 | b6296822efeb5a03ae5b4b9c0b94677b7d3783c8 | /models/wd.py | 79756204621798c94305e1db528f8d4451f4b22f | [] | no_license | AlenUbuntu/CTRLib | 4571bc4e595309936ad1f1da5bf6d616d4c729dc | 1ce1ce59d4c8e723751439018e7daa94996e62e9 | refs/heads/main | 2023-04-11T00:14:51.547245 | 2021-03-25T20:34:04 | 2021-03-25T20:34:04 | 349,613,365 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,191 | py |
import torch
from common.layers import *
class WideAndDeepModel(torch.nn.Module):
"""
A pytorch implementation of Wide & Deep model.
Reference:
Heng-Tze Cheng, Wide & Deep Learning for Recommender Systems, 2016
"""
def __init__(self, cfg, field_info, cross_prod_transform=None):
super(WideAndDeepModel, self).__init__()
self.linear = SparseFeatureLinear(field_info)
self.input = InputLayer(field_info, cfg.WD.LATENT_DIM, cfg.WD.AGGREGATE)
hidden_dims = []
cur_dim = self.input.output_dim
for fractor in cfg.WD.HIDDEN_DIMS_FRACTOR:
hidden_dims.append(int(cur_dim * fractor))
cur_dim = hidden_dims[-1]
self.mlp = MultiLayerPerceptron(self.input.output_dim, hidden_dims, cfg.WD.DROPOUT_PROB)
self.bias = torch.nn.Parameter(torch.zeros(1, ))
self.cross_prod_transform = cross_prod_transform
def forward(self, data):
wide_part = self.linear(self.cross_prod_transform(data)) if self.cross_prod_transform else self.linear(data)
deep_part = self.mlp(self.input(data))
logit = wide_part + deep_part + self.bias
return logit | [
"ustcgaoy01@hotmail.com"
] | ustcgaoy01@hotmail.com |
26dfeac08449167a930a80d1d44fae4e247ac8ed | d364123a0655bff7e9d725382934fe2c15b5bfc4 | /python3Test/Test/test009.py | d27fb2cf894790461b15d3b1ec2464ef190ccbb4 | [] | no_license | yuan1093040152/SeleniumTest | 88d75361c8419354f56856c326f843a0a89d7ca6 | d155b98702bc46c174499042b43257696b861b5e | refs/heads/master | 2023-08-31T15:00:25.415642 | 2023-08-30T09:26:42 | 2023-08-30T09:26:42 | 227,269,300 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,398 | py | #coding=utf-8
"""
@Author : Yuan Meng
@Time : 2022/6/28 17:24
@Software: PyCharm
Ctrl+shift+v 历史粘贴版
ctrl+alt+空格 自动补全
ctrl+alt+D 分屏
Ctrl+/ 快速注释
"""
import base64,hashlib
import json
from Crypto.Cipher import AES
import binascii
from Crypto.Util.Padding import pad
aa = "0DIGbxUpV2a8TQTf6l4kiKTtrnNaRI1qcMPLxXma82ix0dUGGqNeCE4eYG1ifS8xWL6DR0NHKmflYgcsmW/XZ2MROaTxKQbXq3yLcWZqc8pKcmJbGVhcSXY0mDT5MsmcMCYgti1NJ0WUOHU2Zv6WIP//Q4AstV7/6yBHdFo6UJTGkRmddGAGxO4jdIciDy8vmGk6ot3cCa6GKYfJIhn4/cPXsTkjLY+OQa7BIUsye8+UgjyuiOJx8bMSyWu/FnCK6awaKVLAYdUceayuEvizT4zzGdhDLV+YjqJu7nITL5IupBXSbF5Q4lQO6glviPFZEcKM9RdlPcMhsROzPVtDZswrC7mLphcX5vrZypjmeXdnaP8VfLN7I2xfSozWp5cGZxheCNTnVgX7ayhkHjiBVQoEeuUeESYYlIH0pyJyGOI7uF79e4+jUS2OqNuOlNEg4HSBkX59HggXwiu+3yKsZ/Mfrfhm46VPbWqXHiGRiyXWKnWZd1L//KD927SFpM5UgfP8etVxDjn8tNKKK4kN0FWcYMUBbLd0mQrL8QWAihay2yvpowTlgWZPga4Cr464QRrrj0CJI9lvO1ej3THKTr3K89gWGlzapQqnW+Ot+9sN7PozUQIePBCyVG6myWXcVoTGO2YTQ/TVxvty7YNwZHyMycV7DZUim8HmcMvp/RiAwaY2IOa80bnxWIfFcTnMaGUio2nyB1SX2QPrfg/47OtYkvI+FP0oDIhDhb87+hEdLJ+11LaTUZPIVO2b2d8N7biVGafJLUU2p3NpGuuGdoPYgHLmcXvRGMdI1wxoaUuhW7e1XgSc87qJOG9xViYyJ5ec+yiECDHq8Z91nEvgC1evfPYETY2XtMp3eNSgg5EqFQBMjEYK+z9v92+fObZBmiVqEk+eMgBh98/XOXJFLRnSWQIOFgiuRuCtgUMRX4myo8X+9h8m1f6UENWHV4r+H1qLuNiLnBiu0E/dSaOinSMzVzpG3yzydTwWWtx5W/lgQOFQz5fJTBfj8QNi9+VsZBEe65m93kt5etClJc3FZzesK0R6h0oG67EhQi+dOCmbDE+f3C9uC9u7p+DLvp+sP1oqqu4klwMf/hrcdMwXnwL7rPPTMonXU7Cv4RpNKRu21YIO33JJOM6gdIXedma7PsuV4KVOYazp08yj1fIOv6kgnM4/nI5qDtV4H3zYLc269uAhGa1kg0lO8lAFCXHm3AGKg6agdGahv0aOCtZh77r0gsr9FeZANNMAGb5HtGfpYKixO9PP/I2u4mHVfcNCk1Vs/wvUiTAwAWeUwFHxrvPm9biGJ+Wrjk1M7Gwxorc69rBxmp/XqqRroGUpECS8hAI5M+BLaYp1hDIXk9nUlHR2USpVRkaiLDOLDGmU7af1l3QnZ57r3kpnyTyJY+cYlcuTeDeJmjNtRSsBymlho7Ls4QKSKOf4it+wq7dLoQCroDbdRX+eEibKJe6rrnFOUb1ws3uZPKzygdEFC86XAD5doYVzRCNdSdIxJscRmNcXsrMTXLLRsCsTEF11DWT4jZId1r3nX2EtWx4dfg/bjqkWvTX88WdjIUnIBUJX4nRGH5OItrYrMil8azy3MoMPNxGvzh1Eb6F70EBt+lBOy5oCYHPoaTFaA213Uzldn8mITiL5k4S6tTPpsGdoOEpezMuB69vtW115mP8UgGzo3Lx9ycBQJm9tc6suYCfU4JQN+HY1+DH6QGji099V0E9r2vyqg8qYYB5yjcGL/qLAqvZH1Q0+RZ8Ye9K6jtywIMWwdU3o1s9PRdhoFV5LbZhEw4xBFeC0R1vfh72PyE1I1w=="
bb = "yjGG1V9JYO4/ezGJw8yY3lm390MgKwDjHV1jxZUz+/8="
password = 'ODcyYTUxNGM1N2M2'
# class EncryptDate:
# def __init__(self, key):
# # 初始化密钥
# self.key = key
# # 初始化数据块大小
# self.length = AES.block_size
# # 初始化AES,ECB模式的实例
# self.aes = AES.new(self.key.encode("utf-8"), AES.MODE_ECB)
# # 截断函数,去除填充的字符
# self.unpad = lambda date: date[0:-ord(date[-1])]
# def fill_method(self, aes_str):
# '''pkcs7补全'''
# pad_pkcs7 = pad(aes_str.encode('utf-8'), AES.block_size, style='pkcs7')
#
# return pad_pkcs7
#
# def encrypt(self, encrData):
# # 加密函数,使用pkcs7补全
# res = self.aes.encrypt(self.fill_method(encrData))
# # 转换为base64
# msg = str(base64.b64encode(res), encoding="utf-8")
#
# return msg
# def decrypt(self, decrData):
# # base64解码
# res = base64.decodebytes(decrData.encode("utf-8"))
# # 解密函数
# msg = self.aes.decrypt(res).decode("utf-8")
#
# return self.unpad(msg)
def xx(aa):
# 定义AES,ECB模式
aes = AES.new(password.encode("utf-8"), AES.MODE_ECB)
#截断函数,去除填充的字符
unpad = lambda date: date[0:-ord(date[-1])]
# base64解码
res = base64.decodebytes(aa.encode("utf-8"))
# 解密函数
msg = aes.decrypt(res).decode("utf-8")
dd = unpad(msg)
print(type(dd))
# print(dd[1]['list'])
ee = json.loads(dd)
return ee
print(ee)
print(ee['data']['list'][0]['authStatusStrForXcx'])
# print(xx(aa))
a = ''
b = json.dumps(a)
print(b)
# if __name__ == '__main__':
# key的长度需要补长(16倍数),补全方式根据情况而定,未补齐会报错
# key字符长度决定加密结果,长度16:加密结果AES(128),长度32:结果就是AES(256)
# eg = EncryptDate("ODcyYTUxNGM1N2M2")
# # 加密字符串长同样需要16倍数:需注意,不过代码中pad()方法里,帮助实现了补全(补全方式就是pkcs7)
# # en = eg.encrypt(aa)
# de = eg.decrypt(aa)
# # print(f"加密结果:{en}")
# print(f"解密结果:{de}")
| [
"1093040152@qq.com"
] | 1093040152@qq.com |
fd7afc9a446cecb45eebfc6213eba6e8904175a9 | 4565630f20dd7ab7f22b6a829d9c93b9d3a7aefa | /main/migrations/0001_initial.py | 5a33b3e23a310e66362a2b101449973a1ef38874 | [] | no_license | Normal66/nlpsite | fb6d76de57e82adbb243d41f1f0a99e6d8374d3b | 5985e51f60a0eec646226069e9374859ce27e24e | refs/heads/main | 2023-06-29T14:40:05.490268 | 2021-07-30T16:30:15 | 2021-07-30T16:30:15 | 391,020,486 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,229 | py | # Generated by Django 3.2.5 on 2021-07-28 06:04
from django.db import migrations, models
import django.db.models.manager
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='m_SprMenu',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=15, verbose_name='Пункт меню')),
('icons', models.ImageField(blank=True, upload_to='menu/img/', verbose_name='Иконка')),
('links', models.URLField(verbose_name='Ссылка')),
('order', models.PositiveSmallIntegerField(default=0, verbose_name='Порядок')),
('cansw', models.BooleanField(default=True, verbose_name='Отображать')),
],
options={
'verbose_name': 'Пункт меню',
'verbose_name_plural': 'Меню',
'ordering': ['order'],
},
managers=[
('object', django.db.models.manager.Manager()),
],
),
]
| [
"normal66@yandex.ru"
] | normal66@yandex.ru |
419bee1b9fe65c8d11a7d4b70693ec15423d958f | cc578cec7c485e2c1060fd075ccc08eb18124345 | /cs15211/24Game.py | ea18464f94d06f61725723f26fa46ca83987f4e3 | [
"Apache-2.0"
] | permissive | JulyKikuAkita/PythonPrac | 18e36bfad934a6112f727b4906a5e4b784182354 | 0ba027d9b8bc7c80bc89ce2da3543ce7a49a403c | refs/heads/master | 2021-01-21T16:49:01.482561 | 2019-02-07T06:15:29 | 2019-02-07T06:15:29 | 91,907,704 | 1 | 1 | Apache-2.0 | 2019-02-07T06:15:30 | 2017-05-20T18:12:53 | Python | UTF-8 | Python | false | false | 4,885 | py | __source__ = 'https://leetcode.com/problems/24-game/description/'
# Time: O()
# Space: O()
#
# Description: Leetcode # 679. 24 Game
#
# You have 4 cards each containing a number from 1 to 9.
# You need to judge whether they could operated through *, /, +, -, (, ) to get the value of 24.
#
# Example 1:
# Input: [4, 1, 8, 7]
# Output: True
# Explanation: (8-4) * (7-1) = 24
# Example 2:
# Input: [1, 2, 1, 2]
# Output: False
# Note:
# The division operator / represents real division, not integer division. For example, 4 / (1 - 2/3) = 12.
# Every operation done is between two numbers. In particular, we cannot use - as a unary operator.
#
# For example, with [1, 1, 1, 1] as input, the expression -1 - 1 - 1 - 1 is not allowed.
# You cannot concatenate numbers together.
# For example, if the input is [1, 2, 1, 2], we cannot write this as 12 + 12.
#
# Companies
# Google
# Related Topics
# Depth-first Search
#
#868ms 6.09%
import unittest
import itertools
from operator import truediv, mul, add, sub
from fractions import Fraction
class Solution(object):
def judgePoint24(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
def apply(A, B):
ans = set()
for x, y, op in itertools.product(A, B, (truediv, mul, add, sub)):
if op is not truediv or y: ans.add(op(x, y))
if op is not truediv or x: ans.add(op(y, x))
return ans
A = [{x} for x in map(Fraction, nums)]
for i, j in itertools.combinations(range(4), 2):
r1 = apply(A[i], A[j])
k, l = {0, 1, 2, 3} - {i, j}
if 24 in apply(apply(r1, A[k]), A[l]): return True
if 24 in apply(apply(r1, A[l]), A[k]): return True
if 24 in apply(r1, apply(A[k], A[l])): return True
return False
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
#Thought: https://leetcode.com/problems/24-game/solution/
Given: (a, b, c, d) - (A tuple of 4)
Generate:
((a+b),c,d) ((a-b),c,d) ((b-a),c,d) ((a*b),c,d) ((a/b),c,d) ((b/a),c,d)
((a+c),b,d) ................................................................. ((c/a),b,d)
((a+d),b,c) ................................................................. ((d/a),b,c)
(a,(b+c),d) ................................................................. (a,(c/b),d)
(a,(b+d),d) ................................................................. (a,(d/b),d)
(a,b,(c+d)) ................................................................. (a,b,(d/c))
There are 36 (6*6) such tuples. Of these, + & - are not order dependent. That is 2+3 = 3+2.
But / & - are order dependent. i.e. 2/3 != 3/2. These look like (e,f,g) i.e. a tuple of 3 now.
Carrying out similar reductions gives 18 (6*3) tuples for each of the above-generated tuples.
These now look like (h, i) i.e. a tuple of 2 now.
Similiar, the final reduction now yields 6 answers (a+b, a-b, a*b, a/b, b-a, b/a)
for each of the above-generated tuple.
Thus in total 36x18x6 final values can be generated using the 4 operators and 4 initial values.
Algo: Generate all such answers using dfs method and stop when it's 24.
Catches:
Use double instead of int
Be careful about the classical divide by zero error
#18ms 56.03%
class Solution {
public boolean judgePoint24(int[] nums) {
ArrayList A = new ArrayList<Double>();
for (int v: nums) A.add((double) v);
return solve(A);
}
private boolean solve(ArrayList<Double> nums) {
if (nums.size() == 0) return false;
if (nums.size() == 1) return Math.abs(nums.get(0) - 24) < 1e-6;
for (int i = 0; i < nums.size(); i++) {
for (int j = 0; j < nums.size(); j++) {
if (i != j) {
ArrayList<Double> nums2 = new ArrayList<Double>();
for (int k = 0; k < nums.size(); k++) if (k != i && k != j) {
nums2.add(nums.get(k));
}
for (int k = 0; k < 4; k++) {
if (k < 2 && j > i) continue;
if (k == 0) nums2.add(nums.get(i) + nums.get(j));
if (k == 1) nums2.add(nums.get(i) * nums.get(j));
if (k == 2) nums2.add(nums.get(i) - nums.get(j));
if (k == 3) {
if (nums.get(j) != 0) {
nums2.add(nums.get(i) / nums.get(j));
} else {
continue;
}
}
if (solve(nums2)) return true;
nums2.remove(nums2.size() - 1);
}
}
}
}
return false;
}
}
''' | [
"b92701105@gmail.com"
] | b92701105@gmail.com |
0fbd94e2a660cbe47f3fd584e7387b55601c811b | be72929423f60d596078cdc671dd2f34ca443cd0 | /Todo/todo_app/models.py | 945eb5e2dc7529b4f0f7b41af2c1c224be3d47a6 | [] | no_license | GeorgeGithiri5/Django_Postgresql_App | 3b845ff749aae436c886a2ff2a059c5da71972d8 | 1eaf827aed54845b6bb4c7f3eda86ad34e38178f | refs/heads/master | 2022-12-17T08:35:45.219542 | 2020-09-23T03:34:46 | 2020-09-23T03:34:46 | 297,686,471 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 89 | py | from django.db import models
class Todo(models.Model):
content = models.TextField()
| [
"georgegithiri002@gmail.com"
] | georgegithiri002@gmail.com |
09ef442b86779731be186e1bc9cea0b1acd17697 | 4385068eddc701aa884c32421720c30ca7eafee4 | /Timonator.py | 4b7ba6a76ea279ad79bb30fbfa89cd5c4946ced0 | [] | no_license | benjjo/Timonator | 72faef3cc111f8c9aa82463cb1b26362cd4c17cd | bad2e76c81beac84f1e4d966b1fd902953fd4041 | refs/heads/master | 2023-08-12T12:28:14.324387 | 2021-09-24T06:16:11 | 2021-09-24T06:16:11 | 327,108,832 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,934 | py | import os
import tkinter as tk
from tkinter import filedialog
import pandas as pd
import matplotlib.pyplot as plt
import time
from datetime import datetime, timedelta
global_file_name = str()
start_date = datetime
global_df = pd.DataFrame()
list_of_bitsets = []
mvb_dictionary = {}
folder_name = 'Timon_Plots'
def set_timon_log_filename():
"""
Prompts the user to select a timon log file.
Note that this must be a raw timon log file generated from the timon_embedded.xml file.
"""
global global_file_name
root = tk.Tk()
root.withdraw()
print('Select a TiMon Logfile: ')
global_file_name = filedialog.askopenfilename(title='Select TiMon log file')
root.destroy()
def set_global_df(df):
global global_df
global_df = df
def read_and_clean_timon_log():
"""
Reads in the data from the pre selected timon log file. Some minor character changes made to help with
data types later on down the track.
Pre-condition: global_file_name contains a file name.
:return: pd.DataFrame() object
"""
global global_file_name
df = pd.read_csv(global_file_name, sep=';')
df.replace(',', '.', regex=True, inplace=True)
df['TIME'] = df['TIME'].astype('float64')
convert_bitsets_to_int()
return df
def set_start_date_var():
"""
Updates the start_date variable using the name of the timon file.
By default the timon log will name the timon log file using the time
and date from the TCMS HMI. If there is no PLC_TIME variable recorded,
start_date will be used in conjunction with the TIME (seconds since
start of recording) to make a time date index.
"""
global start_date
global global_file_name
file_name = global_file_name.split('/')[-1]
date_list = [int(i) for i in file_name.split('.')[0].split('_')]
start_date = datetime(date_list[0], date_list[1], date_list[2], date_list[3], date_list[4], date_list[5])
print(f'Set start_date var to: {start_date}')
def set_mvb_dictionary():
"""
Looks for a file in the working directory called 'mvb_list.ben'. Failing that the
function will prompt the user to continue to search for a file locally.
Once located, the global dictionary is updated with the contents of the file.
"""
global mvb_dictionary
global list_of_bitsets
try:
mvb_dictionary = eval(open('mvb_list.ben', 'r').read())
except FileNotFoundError:
print('MVB List not present in working directory.')
choice = input('Do you wish to create one? [Y to continue]')
if choice in 'YyYESYesyes':
update_mvb_dictionary(list_of_bitsets, make_bitset_df())
else:
abort_protocol()
def user_defined_mvb_list():
"""
Prompts the user to identify an updated MVB list of type xlsx.
:return: str
"""
root = tk.Tk()
root.withdraw()
print('Select the E39_CALEDONIAN_MVB_Seated.xlsx Document: ')
mvb_file_name = filedialog.askopenfilename(title='Select E39_CALEDONIAN_MVB_Seated.xlsx Document')
root.destroy()
return mvb_file_name
def create_datetime_column():
"""
Creates a index based on a Date Time column. This column is either created from
a unix time stamp or calculated using the seconds since start of recording [TIME] column.
"""
global start_date
global global_df
if 'PLC_TIME(Timedate48)' in global_df.columns and 'PLC_TIME_CV(Enum2)' in global_df.columns:
global_df['Time Date'] = pd.to_datetime(global_df['PLC_TIME(Timedate48)'], unit='s') + pd.to_timedelta(global_df['PLC_TIME_CV(Enum2)'], unit='h')
global_df = global_df.set_index(['Time Date'])
else:
set_start_date_var()
global_df['Time Date'] = global_df['TIME'].apply(lambda x: start_date + timedelta(seconds=x))
global_df = global_df.set_index(['Time Date'])
# Remove the duplicated index data, otherwise it will come back to bite us when we want to reference a cell.
global_df = global_df[~global_df.index.duplicated(keep='first')]
def create_excel_format():
"""
Prompts the user to enter a location to save an Excel friendly version of the timon
csv log. This file will include dates in a human readable format as opposed to the
standard timon TIME in seconds or UNIX seconds.
"""
global global_file_name
global global_df
root = tk.Tk()
root.withdraw()
excel_name = global_file_name.split('/')[-1]
global_df.to_csv(filedialog.asksaveasfilename(defaultextension='.csv',
title='EXCEL_Timon', filetypes=[('csv files', '*.csv')],
initialfile=f'EXCEL_{excel_name}'))
root.destroy()
def convert_bitsets_to_int():
"""
Inspects the column headings and checks for the bitset data and
converts the hexadecimal data to decimal.
"""
global global_df
for col in global_df.columns:
if 'Bitset' in col and global_df[col].dtypes == 'object':
global_df[col] = global_df[col].apply(int, base=16)
def add_bitset_sub_columns(df, mega_mvb_dic, bitset):
"""
Adds the individual bitset sub-columns to the df.
If they are a bitset value, they will be plotted.
:return: pd.DataFrame() object
"""
global folder_name
if bitset in df.columns:
bitset = bitset.split('(')[0]
if bitset in mega_mvb_dic:
for bitset_sub_col in mega_mvb_dic[bitset]:
bit = mega_mvb_dic[bitset][bitset_sub_col]
print('\n :::: ' + df[bitset_sub_col] + ' ::::\n')
df[bitset_sub_col] = df[mega_mvb_dic[bitset]].apply(lambda x: get_bitset_value(x, bit))
df[bitset].plot(figsize=(16, 4), legend=True, linewidth=2)
plt.savefig(f'{folder_name}/{bitset}.png', dpi=300, facecolor='w', edgecolor='w',
orientation='landscape', format=None, transparent=False, pad_inches=0.1)
else:
print('Something went terribly wrong. Perhaps the mvb_list.ben is empty.')
abort_protocol()
return df
def make_local_plots_directory():
"""
Creates a folder in the
"""
global folder_name
try:
os.mkdir(folder_name)
except FileExistsError:
print(f'Directory {folder_name} exists, you may overwrite data.')
time.sleep(3)
os.system('cls')
except PermissionError:
print('You are running this script in a folder that wont allow you to write to it.')
abort_protocol()
def save_individual_plots_to_png(list_of_cols, df, remove_time_columns=True):
"""
Takes a list and a data frame and makes a bunch of plots for each of the individual variables.
Removes time/date columns from the list by default.
Iterates over the columns to find the variables to be displayed.
"""
global folder_name
global mvb_dictionary
set_mvb_dictionary()
if not os.path.exists(folder_name):
make_local_plots_directory()
if remove_time_columns:
print('Removing columns that are used to make the index (x-axis).')
for col in list_of_cols:
if 'TIME' in col:
print(f'removing {col} from plot set.')
list_of_cols.remove(col)
if 'HMI_SCREEN(Unsigned8)' in list_of_cols:
print('Processing HMI Screen states.')
df = HMI_Screen_populate(df)
print('Adding HMI Screen state data to Plot set.')
# Drop all the columns with 0
# df = df.loc[:, (df != 0).any(axis=0)]
list_of_cols = df.columns
print('Processing data. This can take a while.')
print('Keep an eye on the Timon_Plots folder if you are worried.')
for col in list_of_cols:
if col == 'PLC_MASTER_COACH(Unsigned16)':
df[col].plot(figsize=(16, 4), legend=True, ylim=(15001, 15011), linewidth=5)
plt.savefig(f'{folder_name}/{col}.png', dpi=300, facecolor='w', edgecolor='w', orientation='landscape',
format=None, transparent=False, pad_inches=0.1)
plt.close()
elif 'Boolean' in col:
df[col].plot(figsize=(16, 4), legend=True, linewidth=1)
plt.savefig(f'{folder_name}/{col}.png', dpi=300, facecolor='w', edgecolor='w', orientation='landscape',
format=None, transparent=False, pad_inches=0.1)
plt.close()
elif 'Bitset' in col and col.split('(')[0] in mvb_dictionary:
print(f'Processing {str(col)}. Please sit and chill.')
plot_a_set_of_bitsets(df, mvb_dictionary, col)
print(f'Completed: {str(col)}')
plt.close()
elif 'Enum2' in col:
df[col].plot(figsize=(16, 4), legend=True, ylim=(0, 3), linewidth=1)
plt.savefig(f'{folder_name}/{col}.png', dpi=300, facecolor='w', edgecolor='w', orientation='landscape',
format=None, transparent=False, pad_inches=0.1)
plt.close()
elif 'TIME' in col:
pass
else:
df[col].plot(figsize=(16, 4), legend=True)
plt.savefig(f'{folder_name}/{col}.png', dpi=300, facecolor='w', edgecolor='w', orientation='landscape',
format=None, transparent=False, pad_inches=0.1)
plt.close()
def plot_a_set_of_bitsets(df, mvb_dic, key):
"""
Plots the array of bitsets as individual plots for the key (value-id).
Need to continue to troubleshoot performance on the bitset function.
"""
global folder_name
sub_key = key.split('(')[0]
if sub_key in mvb_dic and key in str(df.columns):
for col in mvb_dic[sub_key]:
if 'reserve' not in col:
counter = 0
new_col = f'{sub_key}_{col}'
df[new_col] = df[key].copy().apply(lambda x: get_bitset_value(x, mvb_dic[sub_key][col]))
df[new_col].plot(figsize=(16, 4), legend=True, linewidth=2)
while os.path.exists(f'{folder_name}\\{new_col}'):
col = f'{new_col}{str(counter)}'
counter += 1
plt.savefig(f'{folder_name}/{new_col}.png', dpi=300, facecolor='w', edgecolor='w',
orientation='landscape', format=None, transparent=False, pad_inches=0.1)
plt.close()
def lifeword_plot(df: pd.DataFrame, file_name: str, lifeword: str):
"""
Uses a keyword to search through the data and find a bunch of variables
that fit that criteria.
Plots the data do a folder in the local directory.
"""
global folder_name
if not os.path.exists(folder_name):
make_local_plots_directory()
lw_search_list = lifeword.split('-')
lw_list = []
for search_word in lw_search_list:
for col in df.columns:
if search_word in col:
lw_list.append(col)
lw_df = df[lw_list]
title_name = file_name.split('/')[-1]
save_individual_plots_to_png(list_of_cols=lw_list, df=lw_df)
lw_df.plot(subplots=True, figsize=(16, 8), legend=True, xlabel='Time Date',
title=f'{lifeword} Lifeword :: {title_name}')
plt.show()
def plot_a_single_variable(df, get_choices=True, col=None):
"""
Spits out a list of variables from the main Pandas Data Frame containing
all of the TiMon log data. This is list of the headings from the data frame
and one of these variables can be used for interrogation.
The user is be prompted to enter one of these variables to inspect.
"""
global folder_name
if not os.path.exists(folder_name):
make_local_plots_directory()
if get_choices:
os.system('cls')
for col in df.columns:
print(col)
col = input('\nType a variable to inspect: ')
try:
df[col].plot(figsize=(16, 8), legend=True, xlabel='Time Date', title=col)
plt.savefig(f'{folder_name}/{col}.png', dpi=300, facecolor='w', edgecolor='w', orientation='landscape',
format=None, transparent=False, pad_inches=0.1)
plt.show()
except ValueError:
print(f'{col} does not exist. Typo?')
abort_protocol()
def inspect_a_single_variable(df):
"""
Spits out a list of variables from the main Pandas Data Frame containing
all of the TiMon log data. This is list of the headings from the data frame
and one of these variables can be used for interrogation.
The user is be prompted to enter one of these variables to inspect.
"""
os.system('cls')
for col in df.columns:
print(col)
col = input('\nType a variable to inspect: ')
try:
df[col].plot(figsize=(16, 8), legend=True, xlabel='Time Date', title=col)
plt.show()
except KeyError:
print(f'{col} does not exist. Typo?')
abort_protocol()
def make_bitset_df():
"""
Creates and returns a Pandas Data Frame with the bitset data.
This will call the get_mvb_list function which will prompt the user
to identify an appropriate MVB list of type xlsx.
:return: pd.DataFrame() object
"""
global global_df
bitsetdf = global_df.copy()
mvb_list = user_defined_mvb_list()
cols = ['VarId', 'VarType', 'Comment0', 'Comment1']
bitsetdf = pd.read_excel(mvb_list, sheet_name='Variables', header=0, usecols=cols)
bitsetdf['VarId'].ffill(inplace=True)
bitsetdf['VarType'].ffill(inplace=True)
bitsetdf = bitsetdf.dropna(how='all')
bitsetdf = bitsetdf[bitsetdf.VarType.str.contains('BITSET', case=True)]
# Get indexes where name column doesn't have value john
indexNames = bitsetdf[(bitsetdf['Comment0'] == 'Bits')].index
# Delete these row indexes from dataFrame
bitsetdf.drop(indexNames, inplace=True)
bitsetdf.reset_index(inplace=True)
return bitsetdf
def map_bitrange_to_value(bitset_variable, bit_df):
"""
Maps the bitset range of bits to the associated variable name. i.e. {'Bit 1':0, 'Bit 2':1}
:return: dictionary
"""
bitset_dic = {}
for row_num in bit_df.loc[bit_df['VarId'] == bitset_variable, 'Comment0'].index:
bitset_dic[bit_df.loc[bit_df.index[row_num], 'Comment1']] = bit_df.loc[bit_df.index[row_num], 'Comment0']
return bitset_dic
def set_list_of_bitsets(bit_df):
"""
Sets the unique id set of bitsets to the global var.
"""
global list_of_bitsets
list_of_bitsets = bit_df['VarId'].unique()
def dic_of_all_bitsets_to_bitrange(bitset_list, bit_df):
"""
Maps the bitset dictionary to a Var ID. i.e. {'Var 1': {'Bit 1':0, 'Bit 2':1}, 'Var 2': {'Bit 1':0, 'Bit 2':1}}
:return: dictionary
"""
mvb_dict = {}
for bitset in bitset_list:
mvb_dict[bitset] = map_bitrange_to_value(bitset, bit_df)
return mvb_dict
def update_mvb_dictionary(bitset_list, bit_df):
"""
Requests the user to input a new list of MVB data from a file of type xlsx.
"""
global list_of_bitsets
root = tk.Tk()
root.withdraw()
file_out = filedialog.asksaveasfilename(defaultextension='.ben',
title='Update MVB list', filetypes=[('Benjo files', '*.ben')],
initialfile='mvb_list.ben')
root.destroy()
print('Please wait. This may take a minute or two...')
if not bitset_list:
set_list_of_bitsets(bit_df)
bitset_list = list_of_bitsets
time.sleep(3)
mega_mvb_dict = dic_of_all_bitsets_to_bitrange(bitset_list, bit_df)
print(f'{len(mega_mvb_dict)} variables with corresponding bitsets added.')
time.sleep(1)
# file_out = 'mvb_list.ben'
try:
with open(file_out, 'w') as fout:
fout.write(str(mega_mvb_dict).replace(', nan: nan', ''))
except (FileExistsError, PermissionError):
print(f'Failed to overwrite file {file_out}.')
abort_protocol()
print('SUCCESS!')
set_mvb_dictionary()
def abort_protocol():
print("""
_ ____ ___ ____ _____ ___ _ _ ____
/ \ | __ ) / _ \ | _ \ |_ _||_ _|| \ | | / ___|
/ _ \ | _ \ | | | || |_) | | | | | | \| || | _
/ ___ \ | |_) || |_| || _ < | | | | | |\ || |_| |
/_/ \_\|____/ \___/ |_| \_\ |_| |___||_| \_| \____|
""")
time.sleep(3)
quit()
def plot_bitset_data(list_of_cols, df, remove_time_columns=True):
"""
Plots the bitset data.
"""
global folder_name
global mvb_dictionary
set_mvb_dictionary()
if not os.path.exists(folder_name):
make_local_plots_directory()
if remove_time_columns:
for col in list_of_cols:
if 'TIME' in col:
print(f'removing {col}')
list_of_cols.remove(col)
print('Populating Timon_Plots folder. This may take some time with big files.')
for col in list_of_cols:
if 'Bitset' in col and col.split('(')[0] in mvb_dictionary:
plot_a_set_of_bitsets(df, mvb_dictionary, col)
plt.close()
def get_bitset_value(cell_value, bit):
"""
Returns the bit value associated with the variable.
Shifts the bit to be examined into the LSB column and performs a logical AND on that bit.
:return: int 1 or 0
"""
return int(bin(cell_value >> bit & 0b1), 2)
def HMI_Screen_populate(df):
HMI_Screen_dic = {1: 'Train Formation Confirmation screen',
2: 'Train Formation screen',
3: 'Controls screen',
4: 'HVAC screen',
5: 'Lighting screen',
6: 'Toilets screen',
7: 'CCTV screen',
8: 'Alarms screen',
9: 'Brake Status screen',
20: 'Password screen (Maintenance)',
21: 'Alarms screen (Maintenance)',
22: 'Controls screen (Maintenance)',
23: 'HVAC screen (Maintenance)',
24: 'Lighting screen (Maintenance)',
25: 'Buses communications screen (Maintenance)',
26: 'Groups screen',
27: 'Log fault screen (Maintenance)',
28: 'Versions screen (Maintenance)',
100: 'Degraded mode screen'}
for col in HMI_Screen_dic.values():
df[col] = 0
for cell_index in df.index.values:
try:
cell_value = int(df.loc[cell_index, 'HMI_SCREEN(Unsigned8)'])
if cell_value in HMI_Screen_dic.keys():
df.loc[cell_index, HMI_Screen_dic[cell_value]] = 1
except IndexError:
print(f'Bad index at: {str(cell_index)}')
return df
def print_group():
os.system('cls')
print("""
__ __
| \/ | ___ _ __ _ _
| |\/| | / _ \| '_ \ | | | |
| | | || __/| | | || |_| |
|_| |_| \___||_| |_| \__,_|
1. Look at data using a keyword search
2. Inspect a single variable
3. Plot all variables to png
4. Create an Excel friendly version
5. Plot the available bitsets
6. Update the MVB list with a new version
[Defaults to V2.34]
""")
def setup_dataframe():
global global_df
set_timon_log_filename()
set_global_df(read_and_clean_timon_log())
create_datetime_column()
convert_bitsets_to_int()
set_global_df(global_df)
def main():
global global_df
global global_file_name
global list_of_bitsets
setup_dataframe()
print_group()
try:
choice = int(input('Select an option: '))
if choice == 1:
os.system('cls')
print("""
Input a search word to look for in the timon log.
Here's a few suggestions: FDS, GWE, HMI, PLC, ASDO, EVR, DO1, DO2
If you want to add a few options at once, separate the values using the dash -
eg. DO1-DO2-ASDO
""")
search_term = input('Input a search term here: ')
lifeword_plot(global_df, global_file_name, search_term)
elif choice == 2:
plot_a_single_variable(global_df)
elif choice == 3:
current_list_of_cols = list(global_df.columns)
save_individual_plots_to_png(list_of_cols=current_list_of_cols, df=global_df)
elif choice == 4:
create_excel_format()
elif choice == 5:
current_list_of_cols = list(global_df.columns)
plot_bitset_data(list_of_cols=current_list_of_cols, df=global_df)
elif choice == 6:
update_mvb_dictionary(list_of_bitsets, make_bitset_df())
else:
print('No selection made. Aborting.')
time.sleep(3)
except ValueError:
print('Poor selection choice.')
abort_protocol()
os.system('cls')
print("""
_____ _ _
|_ _|(_) _ __ ___ ___ _ __ __ _ | |_ ___ _ __
| | | || '_ ` _ \ / _ \ | '_ \ / _` || __| / _ \ | '__|
| | | || | | | | || (_) || | | || (_| || |_ | (_) || |
|_| |_||_| |_| |_| \___/ |_| |_| \__,_| \__| \___/ |_|
V2.0 beta, now with more bitsets!
______
<((((((\\\\\\
/ . }\\
;--..--._|}
(\ '--/\--' )
\\\\ | '-' :'|
\\\\ . -==- .-|
\\\\ \.__.' \--._
[\\\\ __.--| // _/'--.
\ \\\\ .'-._ ('-----'/ __/ \\
\ \\\\ / __>| | '--. |
\ \\\\ | \ | / / /
\ '\ / \ | | _/ /
\ \ \ | | / /
\ \ \ /
** Ben McGuffog 2021
""")
time.sleep(3)
if __name__ == "__main__":
main()
| [
"benjjo@gmail.com"
] | benjjo@gmail.com |
b3824074ac433a21af52219d56f5c1c32c9199db | aa7342ae2e7b786ddfcc2fa290f091ee60b70b98 | /doerry_board.py | 1ed3a8eea3fb49b257f144d35b35f700b0f32ed7 | [] | no_license | Clarissac/cs470_project4 | 3c90e962bd0b4e2129378c6c3d84727018e18657 | 0927d34d718d035d87b1a6c252f2ea4d70607b71 | refs/heads/master | 2021-01-19T23:52:34.657635 | 2017-05-10T02:23:58 | 2017-05-10T02:23:58 | 89,044,682 | 0 | 0 | null | 2017-04-22T04:38:27 | 2017-04-22T04:38:27 | null | UTF-8 | Python | false | false | 4,042 | py | #The following code is Doerry's board code and his comments
#This class is a simple board viewer. You pass it a board object, which is basically just a NxN 2D array.
import tkinter as tk
class BoardView(tk, Frame):
def __init__(selfself, master=None, initboard=[]):
tk.Frame.__init__(self, master)
self.grid()
print("Made a new board")
self.boardgrid = []
self.status = [] #will be set to status widget when created
self.boardsize = len(initboard)
self.blanktile = tk.PhotoImage(file = 'empty.gif')
self.redpiece = tk.PhotoImage(file = 'red.gif')
self.redgo = tk.PhotoImage(file = 'redgo.gif')
self.redgone = tk.PhotoImage(file = 'redwent.gif')
self.moving = False
self.createboard(initboard)
def createboard(self,theboard):
if not theboard:
theboard.extend([ [0]*8]*8)
self.boardsize= len(theboard)
self.boardgrid.extend( [[0]*self.boardsize]* self.boardsize)
def displayStatus(alabel, msg):
alabel.config(bg= 'yellow', fg= 'red', text=msg)
alabel.after(3000, lambda: theButton.config(image=newImage, text=newText))
#doerrychanged this line above ^ to:
#alabel.after(2000,lambda: alabel.config(text="...snooze", bg='white', fg='black')))
def changeButton(theButton,TransImage,newImage,newText):
theButton.config(image=TransImage,text='empty')
theButton.after(3000, lambda:theButton.config(image=newImage, text=newtext))
def buttonHandler(event):
button = event.widget
mystatus=self.status
if button.cget('text')== empty :
if self.moving:
changeButton(self.moving,self.redgone,self.blanktile, 'empty')
displayStatus(mystatus, "nice Move")
button.config(image=self.redgo, text= 'red')
changeButton(button,self.redgo,self.redpiece,'red')
self.moving=False
else:
displayStatus(mystatus, "Red like a comie pinko perver!")
button.config(image=self.redpiece, text='red')
elif button.cget('text')=='red';
if self.moving:
displayStatus(mystatus, "Eww gross...red pieces melding")
changeButton(self.moving,self.redgone,self,blanktile, 'empty')
changeButton(button,self.redgo,self.redpiece, 'red')
self.moving=False
else:
button.config(image=self.redgo, text='empty')
displayStatus(mystatus, "CLick a space to move to")
self.moving=button
elif button.cget('text')=='moving':
button.config(image=self.redpiece,text='red')
displayStatus(mystatus, "Aww, you changed your mind")
def entryHandler(event):
ebox=event.widget
mystatus=self.status
if ebox.get() !="":
displayStatus(mystatus,"Hey! Did someone say: "+ebox.get())
ebox.delete(0,tk.END)
status=tk.Label(relief=tk.Ridge,width=30, text="Welcome to Halma", pady=4, font=('Arial','16','bold'))
self.status=status
status.grid(row=0, columnspan=self.boardsize)
for x in range(1,self.boardsize):
for y in range(self.boardsize):
w=tk.Button(width=35, height=35, text='empty', image=self.blanktile, borderwidth=5)
w.bind("<Button-1>", buttonHandler)
self.boardgrid[x][y]=w
w.grid(row=x,column=y)
tbox=tk.Entry(relief=tk.SUNKEN, width=40)
tbox.bind("<Return>", entryHandler)
tbox.grid(row=self.boardsize+1, columnspan=self.boardsize)
#Add a quit button for fun.
qb=tk.Button(text="QUIT",command=lambda: self.quit() )
qb.grid(row=self.boardsize+2, columnspan=self.boardsize)
print("created the GUI board of size ",self.boarsize)
board=BoardView()
board.mainloop() | [
"clarissacalderon23@gmail.com"
] | clarissacalderon23@gmail.com |
4fe2f24ace7a19b1acc48f98e1b7555884e1392c | 6e2e476c5764d5e75c7afe5a531ac5b890ef0c64 | /Models_barAllExecutionTimes.py | 6dd46654dd04bc45d58214343e8245ce54d8db3f | [] | no_license | BrunoDatoMeneses/PythonPloting | d4611f62f2709465e32d3ab2dc4e0d5cef65e783 | b5bd1c7aa5a50144d2db82f29ab754b01084f230 | refs/heads/master | 2023-05-07T14:08:17.225336 | 2021-06-02T09:06:13 | 2021-06-02T09:06:13 | 297,996,150 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,065 | py | import _PLOT
from Utils import transpose
import os
import csv
# transpose.transposeFiles()
from _FIG import PLOTTING
from _PARAMS import PARAMETERS
figEndName = "-AllNCS"
#xlabel = 'Learning Cycles (#)'
ylabel = 'Times (ms)'
yStringLong ="ExecuyionTimes"
# figVaryingParamString = "learningCycles"
# varyingParamStringValues = ["500","1000","1500","2000"]
# varyingParamStrings = []
# paramlabelString = " Learning Cycles"
# PARAMETERS.learningCycles= "("
# for value in varyingParamStringValues:
# # precisionRange+= str(int(100*float(label))) + "_"
# # labelStrings.append(labelString + str(int(100*float(label))) + " %")
# PARAMETERS.learningCycles += value + "_"
# varyingParamStrings.append(value + paramlabelString)
#
# PARAMETERS.learningCycles += ")"
PARAMETERS.figSize = (4.5, 3.75)
yStrings = ["perceptsTimeExecution","contextsTimeExecution","headTimeExecution",
"NCSTimeExecution",
"NCS_UselessnessTimeExecution","NCS_IncompetendHeadTimeExecution","NCS_ConcurrenceAndConflictTimeExecution",
"NCS_Create_New_ContextTimeExecution","NCS_OvermappingTimeExecution","NCS_ChildContextTimeExecution","NCS_PotentialRequestTimeExecution"]
yStringsAvg = []
yStringsDev = []
yStringsMin = []
yStringsMax = []
for string in yStrings:
yStringsAvg.append(string+"_Average")
yStringsDev.append(string+"_Deviation")
yStringsMin.append(string+"_Min")
yStringsMax.append(string+"_Max")
xLabelStrings = ["Pcts","Ctxt","Head",
"NCSs",
"NCS Useless.","NCS Unprod.","NCS Conf. and Conc.",
"NCS Ctxt Creation","NCS Redun.","NCS Model","NCS Endo."]
logXScale = False
logYScale = False
# for label in labelStrings:
# yStringLong += label + "_"
XYDevMinMax = []
for y,yDev,min,max,yString in zip(yStringsAvg, yStringsDev, yStringsMin, yStringsMax,yStrings):
if(yString == "endoRequests"):
XYDevMinMax.append([y, yDev, min, max,0.1])
else:
XYDevMinMax.append([y, yDev, min, max, 1])
figName = "ToFill_" + yStringLong + "-" + PARAMETERS.getFigName() + figEndName
print(figName)
PARAMETERS.isActiveLearning = "false"
PARAMETERS.isSelfLearning = "true"
PARAMETERS.isLearnFromNeighbors = "true"
PARAMETERS.isActiveExploitation = "true"
PARAMETERS.activeExploitationCycles = "4000"
PARAMETERS.learningCycles = "500"
varyingParamStrings=[""]
constrains = []
constrains.append(PARAMETERS.getConstainsLabelsAreYStrings(xLabelStrings, XYDevMinMax))
PLOTTING.ROTATION = 45
_PLOT.barWithDeviationConstrained(xLabelStrings, varyingParamStrings, PARAMETERS.colors, PARAMETERS.intervalColors, PARAMETERS.markers,
figName, ylabel, False, False,
constrains, 1, 1, PARAMETERS.figSize)
_PLOT.barWithDeviationConstrained(xLabelStrings, varyingParamStrings, PARAMETERS.colors, PARAMETERS.intervalColors, PARAMETERS.markers,
figName, ylabel, False, True,
constrains, 1, 1, PARAMETERS.figSize)
# _PLOT.plotWitMinMaxWithFillBetweenConstrained(labelStrings, PARAMETERS.colors, PARAMETERS.intervalColors, PARAMETERS.markers,
# figName, xlabel, ylabel, False, logYScale,
# constrains, 1, 1, PARAMETERS.figSize)
# _PLOT.plotWithDeviationWithFillBetweenConstrained(labelStrings, PARAMETERS.colors, PARAMETERS.intervalColors, PARAMETERS.markers,
# figName, xlabel, ylabel, True, logYScale,
# constrains, 1, 1, PARAMETERS.figSize)
# _PLOT.plotWitMinMaxWithFillBetweenConstrained(labelStrings, PARAMETERS.colors, PARAMETERS.intervalColors, PARAMETERS.markers,
# figName, xlabel, ylabel, True, logYScale,
# constrains, 1, 1, PARAMETERS.figSize)
# _PLOT.plotWithDeviation(labels, colors, markers, figName, xlabel, ylabel, logXScale, logYScale, xString, yString, deviationString, constrains, 1, 1)
| [
"bruno.dato.meneses@gmail.com"
] | bruno.dato.meneses@gmail.com |
f8fd6bda1cf814b0318a560ee864f0cde1e89a5e | 21b62c71317642b406d0acbd5be74eb1457917a1 | /tests/models/test_position.py | e48a40322e96b0b44f5a27b7422405736e82562c | [] | no_license | paulinobruno/mario-kart | bc09e46f2cb93c4648ef7088b3d6ac947fc7c3b3 | d0cfa0bad3da63fc8135c76a5102ce671511c6ba | refs/heads/master | 2020-06-02T15:13:52.285587 | 2019-02-14T16:44:13 | 2019-02-14T16:44:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,631 | py | import unittest
from mkart.models.driver import Driver
from mkart.models.position import Position
class TestPosition(unittest.TestCase):
def setUp(self):
self.load_fixtures()
def load_fixtures(self):
driver = Driver(1, 'F.MASSA')
driver.laps = 1
driver.laps = 2
self.driver = driver
def test_create_position(self):
number = 1
finished_laps = 2
duration = 60001
position = Position(number, self.driver, finished_laps, duration)
self.assertEqual(type(position), Position)
self.assertEqual(type(position.driver), Driver)
self.assertEqual(position.driver.id, self.driver.id)
self.assertEqual(position.driver.name, self.driver.name)
self.assertEqual(position.number, number)
self.assertEqual(position.finished_laps, finished_laps)
self.assertEqual(position.duration, duration)
def test_create_position_with_delay_after_winner(self):
number = 1
finished_laps = 2
duration = 60001
position = Position(number, self.driver, finished_laps, duration)
position.delay_after_winner = 60001
self.assertEqual(type(position), Position)
self.assertEqual(type(position.driver), Driver)
self.assertEqual(position.driver.id, self.driver.id)
self.assertEqual(position.driver.name, self.driver.name)
self.assertEqual(position.number, number)
self.assertEqual(position.finished_laps, finished_laps)
self.assertEqual(position.duration, duration)
self.assertEqual(position.delay_after_winner, 60001)
| [
"vtrmantovani@gmail.com"
] | vtrmantovani@gmail.com |
f2db0f815309f934b46da888e24855c0aad96a91 | 914b504e13df945a50f35eca4d850eb2c5b52c0b | /test/compute/test_base.py | f8c9bd3ba3a0fd128e1401b5f2e96d9796badcc2 | [
"Apache-2.0"
] | permissive | cloudkick/libcloud | d05c0401bd232279cb38b5abacd3d4c85d7d072f | 9c8605e1518c6b5e2511f0780e1946089a7256dd | refs/heads/master | 2021-01-01T19:51:41.895189 | 2011-03-14T02:34:57 | 2011-03-14T02:34:57 | 258,426 | 8 | 7 | null | null | null | null | UTF-8 | Python | false | false | 2,958 | py | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
from libcloud.common.base import Response
from libcloud.common.base import ConnectionKey, ConnectionUserAndKey
from libcloud.compute.base import Node, NodeSize, NodeImage, NodeDriver
from test import MockResponse
class FakeDriver(object):
type = 0
class BaseTests(unittest.TestCase):
def test_base_node(self):
node = Node(id=0, name=0, state=0, public_ip=0, private_ip=0,
driver=FakeDriver())
def test_base_node_size(self):
node_size = NodeSize(id=0, name=0, ram=0, disk=0, bandwidth=0, price=0,
driver=FakeDriver())
def test_base_node_image(self):
node_image = NodeImage(id=0, name=0, driver=FakeDriver())
def test_base_response(self):
resp = Response(MockResponse(status=200, body='foo'))
def test_base_node_driver(self):
node_driver = NodeDriver('foo')
def test_base_connection_key(self):
conn = ConnectionKey('foo')
def test_base_connection_userkey(self):
conn = ConnectionUserAndKey('foo', 'bar')
# def test_drivers_interface(self):
# failures = []
# for driver in DRIVERS:
# creds = ProviderCreds(driver, 'foo', 'bar')
# try:
# verifyObject(INodeDriver, get_driver(driver)(creds))
# except BrokenImplementation:
# failures.append(DRIVERS[driver][1])
#
# if failures:
# self.fail('the following drivers do not support the \
# INodeDriver interface: %s' % (', '.join(failures)))
# def test_invalid_creds(self):
# failures = []
# for driver in DRIVERS:
# if driver == Provider.DUMMY:
# continue
# conn = connect(driver, 'bad', 'keys')
# try:
# conn.list_nodes()
# except InvalidCredsException:
# pass
# else:
# failures.append(DRIVERS[driver][1])
#
# if failures:
# self.fail('the following drivers did not throw an \
# InvalidCredsException: %s' % (', '.join(failures)))
if __name__ == '__main__':
sys.exit(unittest.main())
| [
"tomaz@apache.org"
] | tomaz@apache.org |
8df9b0fa83f0519444478ee62668187e3d56ebc1 | efd6a992f37b66c3f799acc8c0deb01966ee0b28 | /FaceDetectionWebcam/face_detect_cam.py | cedaca7c41ba519359e0af99e5e6f109965cd26e | [] | no_license | filikos/OpenCV | fa5f74229b68d15ab9d20e99cb63c6bf2820edd6 | ea521ed7ac2f1c6ac05381f2594d53f6f6793f79 | refs/heads/master | 2021-10-25T09:56:40.167953 | 2019-04-03T18:37:37 | 2019-04-03T18:37:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,208 | py | '''
Created on 15.01.2016
@author: Niko Filippidis
'''
import cv2
cascPath = "haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascPath)
#Initialize the webcam.
video_capture = cv2.VideoCapture(0)
while True:
#Get the next frame of the cam.
ret, frame = video_capture.read()
#Convert the frame to an gray picture for better processing.
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#Facetedection initialisation.
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=3,
minSize=(30, 30),
flags = cv2.CASCADE_SCALE_IMAGE
)
#Draw a rectangle around every found face.
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
#Name above the face
cv2.putText(frame,"Face", (x,y), cv2.FONT_HERSHEY_COMPLEX, 1, (0,200,200), 2)
#Prints the amount of faces in the camera field
cv2.putText(frame,str(len(faces)), (20,20), cv2.FONT_HERSHEY_COMPLEX, 1, (0,200,200), 2)
#Body detection initialisation
hog = cv2.HOGDescriptor()
hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
foundLocationsOfBody, foundWeights = hog.detectMultiScale(
frame,
winStride=(8, 8),
padding=(32, 32),
scale=1.05
)
#Draw a rectangle around every found body.
for x, y, w, h in foundLocationsOfBody:
# Draw rectangle around fond object
if len (foundLocationsOfBody) > 0:
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
#Create a namedWindow to set flags
cv2.namedWindow("Stylebox", cv2.WND_PROP_FULLSCREEN)
#Setting parameters for the creating windows
cv2.setWindowProperty("Stylebox", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
#Display the result
cv2.imshow("Stylebox", frame)
#Programm break condition
if cv2.waitKey(1) & 0xFF == ord('s'):
break
# Release the webcam
video_capture.release()
#Close the display window
cv2.destroyAllWindows()
| [
"noreply@github.com"
] | noreply@github.com |
62926e4236f6d5260463602eb5762e132f57bd9a | 0047686fffa211457af0f284e6cba48dc762bbd5 | /gencmake.py | 11153e02700ed00ef88d3deee9bdf7b4e3033fc9 | [] | no_license | timre13/gencmake | 9ca7bcbf455cbed229a637446b5624ac9b8a38e9 | f02f6102c9bad9e5829dd0bf9b3efcab821b0d4f | refs/heads/master | 2023-07-09T03:05:01.109679 | 2021-08-09T15:55:39 | 2021-08-09T15:55:39 | 394,062,369 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,164 | py | import sys
import os
from glob import iglob
import subprocess as sp
def printerr(s: str): sys.stderr.write(s+"\n")
if len(sys.argv) != 2 and len(sys.argv) != 3:
printerr("Usage: "+sys.argv[0]+" <project name> <optional: project type>")
sys.exit(1)
if sys.argv[1] == "--list-project-types".strip() or sys.argv[1].strip() == "-l":
print("Available project types:\n\
default: A project without any libraries.\n\
sdl2: A project with the SDL2 graphical library.\n\
gtkmm-3.0: A project with the GTK-3 binding for C++.\n\
fltk: A project with the FLTK GUI toolkit.")
sys.exit(0)
PROJECT_NAME = sys.argv[1].strip()
PROJECT_TYPE = "default" if len(sys.argv) == 2 else sys.argv[2].lower()
PROJECT_CFLAGS = ["-Wall", "-Wextra", "-Wpedantic", "-g3", "-fno-limit-debug-info"]
PROJECT_LIBS = []
PROJECT_INCLUDE_DIRS = []
CMAKE_MIN_VER = "3.10"
CXX_STANDARD_VER = "17"
FILE_EXTENSIONS = (
".cpp",
".c",
".cxx",
".h",
".hpp",
".H",
)
MAIN_SKELETONS = {
"default":
r"""#include <iostream>
int main()
{
std::cout << "Hello, World!\n";
return 0;
}
""",
"sdl2":
r"""#include <SDL2/SDL.h>
#include <iostream>
#define WINDOW_WIDTH 1500
#define WINDOW_HEIGHT 1000
int main()
{
if (SDL_Init(SDL_INIT_VIDEO))
{
std::cerr << "Failed to initialize SDL2: " << SDL_GetError() << '\n';
return 1;
}
auto window = SDL_CreateWindow("Window", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, WINDOW_WIDTH, WINDOW_HEIGHT, 0);
if (!window)
{
std::cerr << "Failed to create window: " << SDL_GetError() << '\n';
SDL_Quit();
return 1;
}
auto renderer = SDL_CreateRenderer(window, -1, SDL_RENDERER_ACCELERATED);
if (!renderer)
{
std::cerr << "Failed to create renderer: " << SDL_GetError() << '\n';
SDL_DestroyWindow(window);
SDL_Quit();
return 1;
}
bool isDone{};
while (!isDone)
{
SDL_Event event;
while (!isDone && SDL_PollEvent(&event))
{
switch (event.type)
{
case SDL_QUIT:
isDone = true;
break;
case SDL_KEYUP:
switch (event.key.keysym.sym)
{
case SDLK_ESCAPE:
isDone = true;
break;
}
break;
}
}
if (isDone)
break;
SDL_SetRenderDrawColor(renderer, 100, 100, 100, 255);
SDL_RenderClear(renderer);
SDL_RenderPresent(renderer);
SDL_Delay(16);
}
SDL_DestroyWindow(window);
SDL_DestroyRenderer(renderer);
SDL_Quit();
return 0;
}
""",
"gtkmm-3.0":
r"""#include <iostream>
int main()
{
std::cout << "Hello, World!\n";
return 0;
}
""",
"fltk":
r"""#include <iostream>
int main()
{
std::cout << "Hello, World!\n";
return 0;
}
"""
}
def getCommandOutput(cmd: str) -> str:
try:
result = sp.run(cmd.split(" "), stdout=sp.PIPE, stderr=sp.PIPE)
except:
printerr("Failed to run command: \"{}\"\nexception: {}".format(cmd, sys.exc_info()))
sys.exit(1)
if result.returncode:
printerr("Failed to run command: \"{}\"\nreturn code: {}\nstdout: {}\nstderr: {}".format(
cmd, result.returncode, result.stdout, result.stderr))
sys.exit(1)
return result.stdout.decode("utf-8")
def fetchCFlagsAndIncludeDirs(cmd: str):
for val in getCommandOutput(cmd).strip().split(" "):
if val.strip().startswith("-I"):
PROJECT_INCLUDE_DIRS.append(val.strip()[2:])
else:
PROJECT_CFLAGS.append(val.strip())
def fetchLibs(cmd: str):
PROJECT_LIBS.extend([x.strip()[2:] for x in getCommandOutput(cmd).strip().split(" ")])
class ProjectFile:
def __init__(self):
if os.path.exists("./CMakeLists.txt"):
raise FileExistsError
self.file = open("CMakeLists.txt", "w+")
def writeLine(self, val: str=""):
self.file.write(val+"\n")
def writeCmakeMinVer(self):
self.file.write("cmake_minimum_required(VERSION {})\n".format(CMAKE_MIN_VER))
def writeCxxStandard(self, standard: str):
self.file.write("set(CMAKE_CXX_STANDARD {})\nset(CMAKE_CXX_STANDARD_REQUIRED true)\n".format(standard))
def writeExportCCommands(self):
self.file.write("set(CMAKE_EXPORT_COMPILE_COMMANDS true)\n")
def writeProjectDecl(self, projectName: str, projectVersion: str):
self.file.write("project({} VERSION {})\n".format(projectName, projectVersion))
def writeFlags(self, flags: tuple):
self.file.write("set(CMAKE_CXX_FLAGS \"{}\")\n".format(" ".join(flags)))
def writeIncludeDirs(self, dirs: tuple):
self.file.write("include_directories(\n{})\n".format("".join([" "*4+x+"\n" for x in dirs])))
def writeLinkLibs(self, libs: tuple):
self.file.write("link_libraries(\n{})\n".format("".join([" "*4+x+"\n" for x in libs])))
def writeExeInfo(self, exeName: str):
sourceFiles = []
for file in iglob("src/**/*", recursive=True):
if file.endswith(FILE_EXTENSIONS):
sourceFiles.append(file)
self.file.write("add_executable({}\n{})\n".format(exeName, "".join([" "*4+x+"\n" for x in sorted(sourceFiles or ["src/main.cpp"], reverse=True)])))
def __del__(self):
try:
self.file.close()
except AttributeError:
pass
print("Generating project \"{}\" of type \"{}\"".format(PROJECT_NAME, PROJECT_TYPE))
if PROJECT_TYPE == "default":
pass
elif PROJECT_TYPE == "sdl2":
fetchCFlagsAndIncludeDirs("sdl2-config --cflags")
fetchLibs("sdl2-config --libs")
elif PROJECT_TYPE == "gtkmm-3.0":
fetchCFlagsAndIncludeDirs("pkg-config --cflags gtkmm-3.0")
fetchLibs("pkg-config --libs gtkmm-3.0")
elif PROJECT_TYPE == "fltk":
fetchCFlagsAndIncludeDirs("fltk-config --cxxflags")
fetchLibs("fltk-config --ldflags")
else:
printerr("Error: Invalid project type. Use `--list-project-types` or `-l` to get the list of available project types.")
sys.exit(1)
try:
file = ProjectFile()
except:
printerr("Error: File already exists")
sys.exit(1)
file.writeCmakeMinVer()
file.writeLine()
file.writeCxxStandard(CXX_STANDARD_VER)
file.writeLine()
file.writeExportCCommands()
file.writeLine()
file.writeProjectDecl(PROJECT_NAME, "1.0")
file.writeLine()
file.writeFlags(tuple(PROJECT_CFLAGS))
file.writeLine()
if PROJECT_INCLUDE_DIRS:
file.writeIncludeDirs(tuple(PROJECT_INCLUDE_DIRS))
if PROJECT_LIBS:
file.writeLinkLibs(tuple(PROJECT_LIBS))
if PROJECT_INCLUDE_DIRS or PROJECT_LIBS:
file.writeLine()
file.writeExeInfo(PROJECT_NAME)
file.writeLine()
print("CMakeLists.txt written")
print("Writing skeleton")
if not os.path.exists("./src"):
print("Creating ./src/")
os.mkdir("./src")
if not os.path.exists("./src/main.cpp"):
print("Writing ./src/main.cpp")
with open("./src/main.cpp", "w+") as file:
file.write(MAIN_SKELETONS[PROJECT_TYPE])
else:
print("src/main.cpp already exists, not writing")
print("Done")
| [
"itorteli13@gmail.com"
] | itorteli13@gmail.com |
32a29493bd6303aa367a5871b7921ff7df25016d | 10c50dd33cbd22a7ef96ace3c5d1a9fa8129b408 | /build/releases/release-0.602/src/lisp-etr.py | d3e74c7d0fb2b45deb537161c4218fcbf5ac483c | [
"Apache-2.0"
] | permissive | farinacci/lispers.net | 5590089ad296a404c7da07a4410b16b5e98db82d | 3c6a4a30b96409406c161e4ad1c3fcaecf4f4ba3 | refs/heads/master | 2023-08-09T05:43:06.456949 | 2023-07-27T21:17:53 | 2023-07-27T21:17:53 | 114,426,834 | 36 | 6 | Apache-2.0 | 2022-11-15T22:15:26 | 2017-12-16T02:15:08 | Python | UTF-8 | Python | false | false | 70,894 | py | #-----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - Dino Farinacci <farinacci@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# lisp-etr.py
#
# This file performs LISP Egress Tunnel Router (ETR) functionality.
#
# -----------------------------------------------------------------------------
from future import standard_library
standard_library.install_aliases()
from builtins import str
import lisp
import lispconfig
import socket
import select
import threading
import time
import struct
from subprocess import getoutput
import os
try:
import pytun
except:
pytun = None
#endtry
#------------------------------------------------------------------------------
#
# Global data structures relative to the lisp-etr process.
#
lisp_register_timer = None
lisp_trigger_register_timer = None
lisp_etr_info_timer = None
lisp_ephem_socket = None
lisp_ephem_port = lisp.lisp_get_ephemeral_port()
lisp_ipc_listen_socket = None
lisp_send_sockets = [None, None, None]
lisp_raw_socket = None
lisp_l2_socket = None
lisp_mac_header = None
LISP_MAP_REGISTER_INTERVAL = 60 # In units of seconds
#
# Test mode. Allows a batch of database-mapping commands to be read from
# lisp.config before any Map-Registers are sent. When an EID 'eid-done' is
# found (which is placed as the last database-mapping command in lisp.config),
# then lisp_build_map_register() is called via the 5-second delay timer.
#
lisp_etr_test_mode = (os.getenv("LISP_ETR_TEST_MODE") != None)
lisp_seen_eid_done = False
#
# When the lisp-itr process discovers decentralized NAT map-cache entries we
# need to send Info-Requests to them to open a hole in our local NAT device.
#
# Indexed by an IP address string where value is a lisp_address().
#
lisp_etr_nat_probe_list = {}
#------------------------------------------------------------------------------
#
# lisp_etr_map_server_command
#
# Configure a Map-Server and trigger ETR functionality.
#
def lisp_etr_map_server_command(kv_pair):
global lisp_trigger_register_timer
global lisp_etr_info_timer
ms = lispconfig.lisp_map_server_command(kv_pair)
#
# Trigger a Info-Request if we are doing NAT-traversal if this is the
# first Map-Server..
#
first_ms = (len(lisp.lisp_map_servers_list) == 1)
if (first_ms):
ms = list(lisp.lisp_map_servers_list.values())[0]
lisp_etr_info_timer = threading.Timer(2, lisp_etr_process_info_timer,
[ms.map_server])
lisp_etr_info_timer.start()
else:
#
# Trigger Map-Register to newly configured Map-Server.
#
# Do not trigger Map-Register if NAT-traveral is configured. We may not
# have the global RLOC yet from Info-Replies. When the Info-Reply comes
# in we do trigger Map-Registers to all map-servers.
#
if (lisp.lisp_nat_traversal): return
if (ms and len(lisp.lisp_db_list) > 0):
lisp_build_map_register(lisp_send_sockets, None, None, ms, False)
#endif
#endif
#
# Do not start the trigger timer if we are in test-mode. We may already
# be sending a huge list of Map-Registers after "eid-done".
#
if (lisp_etr_test_mode and lisp_seen_eid_done): return
#
# Handle case where "lisp database-mapping" comes before "lisp map-server"
# in configuration file. We have to start periodic timer.
#
if (len(lisp.lisp_db_list) > 0):
if (lisp_trigger_register_timer != None): return
lisp_trigger_register_timer = threading.Timer(5,
lisp_process_register_timer, [lisp_send_sockets])
lisp_trigger_register_timer.start()
#endif
#enddef
#
# lisp_etr_database_mapping_command
#
# This function supports adding additional RLOCs to a database-mapping entry
# that already exists.
#
def lisp_etr_database_mapping_command(kv_pair):
global lisp_register_timer, lisp_trigger_register_timer
global lisp_send_sockets, lisp_seen_eid_done
global lisp_seen_eid_done_count
#
# This is to fix an issue with the same set of database-mappings being
# sent a second time. Only in test-mode we don't want to dup process for
# large numbers of entries.
#
if (lisp_seen_eid_done): return
lispconfig.lisp_database_mapping_command(kv_pair, lisp_ephem_port,
(lisp_etr_test_mode == False))
#
# Trigger Map-Register when all databaase-mappings are configured.
#
# Do not trigger Map-Register if NAT-traveral is configured. We may not
# have the global RLOC yet from Info-Replies. When the Info-Reply comes
# in we do trigger Map-Registers to all map-servers.
#
if (lisp.lisp_nat_traversal): return
if (lisp_trigger_register_timer != None): return
#
# Wait until a large set of database-mapping commands are processed
# before sending the first set of Map-Registers. Used in test mode only.
#
if (lisp_etr_test_mode):
db_size = len(lisp.lisp_db_list)
if (db_size % 1000 == 0):
lisp.fprint("{} database-mappings processed".format(db_size))
#endif
db = lisp.lisp_db_list[-1]
if (db.eid.is_dist_name() == False): return
if (db.eid.address != "eid-done"): return
lisp_seen_eid_done = True
lisp.fprint("Finished batch of {} database-mappings".format(db_size))
t = threading.Timer(0, lisp_process_register_timer,
[lisp_send_sockets])
lisp_register_timer = t
lisp_register_timer.start()
return
#endif
if (len(lisp.lisp_map_servers_list) > 0):
lisp_trigger_register_timer = threading.Timer(5,
lisp_process_register_timer, [lisp_send_sockets])
lisp_trigger_register_timer.start()
#endif
#enddef
#
# lisp_etr_show_command
#
# Show ETR configured map-servers and database-mappings.
#
def lisp_etr_show_command(clause):
#
# Show local found RLOCs.
#
output = lispconfig.lisp_show_myrlocs("")
#
# Show decapsulation stats.
#
output = lispconfig.lisp_show_decap_stats(output, "ETR")
#
# Show configured map-servers.
#
dns_suffix = lisp.lisp_decent_dns_suffix
if (dns_suffix == None):
dns_suffix = ":"
else:
dns_suffix = " (dns-suffix '{}'):".format(dns_suffix)
#endif
hover = "{} configured map-servers".format(len(lisp.lisp_map_servers_list))
title = "LISP-ETR Configured Map-Servers{}".format(dns_suffix)
title = lisp.lisp_span(title, hover)
hover = ("P = proxy-reply requested, M = merge-registrations " + \
"requested, N = Map-Notify requested")
reg_title = lisp.lisp_span("Registration<br>flags", hover)
output += lispconfig.lisp_table_header(title, "Address", "Auth-Type",
"xTR-ID", "Site-ID", reg_title, "Map-Registers<br>Sent",
"Map-Notifies<br>Received")
for ms in list(lisp.lisp_map_servers_list.values()):
ms.resolve_dns_name()
ms_name = "" if ms.ms_name == "all" else ms.ms_name + "<br>"
addr_str = ms_name + ms.map_server.print_address_no_iid()
if (ms.dns_name): addr_str += "<br>" + ms.dns_name
xtr_id = "0x" + lisp.lisp_hex_string(ms.xtr_id)
flags = "{}-{}-{}-{}".format("P" if ms.proxy_reply else "p",
"M" if ms.merge_registrations else "m",
"N" if ms.want_map_notify else "n",
"R" if ms.refresh_registrations else "r")
registers_sent = ms.map_registers_sent + \
ms.map_registers_multicast_sent
output += lispconfig.lisp_table_row(addr_str,
"sha1" if (ms.alg_id == lisp.LISP_SHA_1_96_ALG_ID) else "sha2",
xtr_id, ms.site_id, flags, registers_sent,
ms.map_notifies_received)
#endfor
output += lispconfig.lisp_table_footer()
#
# Show database-mappings configured.
#
output = lispconfig.lisp_show_db_list("ETR", output)
#
# Show ELP configuration, if it exists.
#
if (len(lisp.lisp_elp_list) != 0):
output = lispconfig.lisp_show_elp_list(output)
#endif
#
# Show RLE configuration, if it exists.
#
if (len(lisp.lisp_rle_list) != 0):
output = lispconfig.lisp_show_rle_list(output)
#endif
#
# Show JSON configuration, if it exists.
#
if (len(lisp.lisp_json_list) != 0):
output = lispconfig.lisp_show_json_list(output)
#endif
#
# Show group-mappings, if they exist.
#
if (len(lisp.lisp_group_mapping_list) != 0):
title = "Configured Group Mappings:"
output += lispconfig.lisp_table_header(title, "Name", "Group Prefix",
"Sources", "Use MS")
for gm in list(lisp.lisp_group_mapping_list.values()):
sources = ""
for s in gm.sources: sources += s + ", "
if (sources == ""):
sources = "*"
else:
sources = sources[0:-2]
#endif
output += lispconfig.lisp_table_row(gm.group_name,
gm.group_prefix.print_prefix(), sources, gm.use_ms_name)
#endfor
output += lispconfig.lisp_table_footer()
#endif
return(output)
#enddef
#
# lisp_etr_show_keys_command
#
# Call lispconfig.lisp_show_crypto_list().
#
def lisp_etr_show_keys_command(parameter):
return(lispconfig.lisp_show_crypto_list("ETR"))
#enddef
#
# lisp_group_mapping_command
#
# Process the "lisp group-mapping" command clause.
#
def lisp_group_mapping_command(kv_pairs):
sources = []
group_prefix = None
rle_address = None
ms_name = "all"
for kw in list(kv_pairs.keys()):
value = kv_pairs[kw]
if (kw == "group-name"):
group_name = value
#endif
if (kw == "group-prefix"):
if (group_prefix == None):
group_prefix = lisp.lisp_address(lisp.LISP_AFI_NONE, "", 0, 0)
#endif
group_prefix.store_prefix(value)
#endif
if (kw == "instance-id"):
if (group_prefix == None):
group_prefix = lisp.lisp_address(lisp.LISP_AFI_NONE, "", 0, 0)
#endif
group_prefix.instance_id = int(value)
#endif
if (kw == "ms-name"):
ms_name = value[0]
#endif
if (kw == "address"):
for source in value:
if (source != ""): sources.append(source)
#endfor
#endif
if (kw == "rle-address"):
if (rle_address == None):
rle_address = lisp.lisp_address(lisp.LISP_AFI_NONE, "", 0, 0)
#endif
rle_address.store_address(value)
#endif
#endfor
gm = lisp.lisp_group_mapping(group_name, ms_name, group_prefix, sources,
rle_address)
gm.add_group()
return
#enddef
#
# lisp_build_map_register_records
#
# Build EID and RLOC records to be inserted in a Map-Register message.
#
def lisp_build_map_register_records(quiet, db, eid, group, ttl):
#
# Don't include RTR-list if there is no NAT in the path but nat-traversal
# is configured and NAT in path is tested. When there is a NAT, include
# all RTRs if lisp_register_all_rtrs is configured. Otherwise, if the
# array element is None, then the RTR is down and should be excluded in
# the list to register.
#
rtr_list = {}
for rloc_entry in db.rloc_set:
if (rloc_entry.translated_rloc.is_null()): continue
for rtr_str in lisp.lisp_rtr_list:
rtr = lisp.lisp_rtr_list[rtr_str]
if (lisp.lisp_register_all_rtrs == False and rtr == None):
lisp.lprint(" Exclude unreachable RTR {}".format( \
lisp.red(rtr_str, False)))
continue
#endif
if (rtr == None): continue
rtr_list[rtr_str] = rtr
#endif
break
#endfor
count = 0
eid_records = b""
for iid in [eid.instance_id] + eid.iid_list:
eid_record = lisp.lisp_eid_record()
eid_record.rloc_count = len(db.rloc_set) + len(rtr_list)
eid_record.authoritative = True
eid_record.record_ttl = ttl
eid_record.eid.copy_address(eid)
eid_record.eid.instance_id = iid
eid_record.eid.iid_list = []
eid_record.group.copy_address(group)
eid_records += eid_record.encode()
if (not quiet):
prefix_str = lisp.lisp_print_eid_tuple(eid, group)
decent_index = ""
if (lisp.lisp_decent_pull_xtr_configured()):
decent_index = lisp.lisp_get_decent_index(eid)
decent_index = lisp.bold(str(decent_index), False)
decent_index = ", decent-index {}".format(decent_index)
#endif
lisp.lprint(" EID-prefix {} for ms-name '{}'{}".format( \
lisp.green(prefix_str, False), db.use_ms_name, decent_index))
eid_record.print_record(" ", False)
#endif
for rloc_entry in db.rloc_set:
rloc_record = lisp.lisp_rloc_record()
rloc_record.store_rloc_entry(rloc_entry)
rloc_record.local_bit = rloc_entry.rloc.is_local()
rloc_record.reach_bit = True
eid_records += rloc_record.encode()
if (not quiet): rloc_record.print_record(" ")
#endfor
#
# If we are doing NAT-traversal, include a set or RTR RLOCs with
# priority 1. And set the global RLOCs to priority 254.
#
for rtr in list(rtr_list.values()):
rloc_record = lisp.lisp_rloc_record()
rloc_record.rloc.copy_address(rtr)
rloc_record.priority = 254
rloc_record.rloc_name = "RTR"
rloc_record.weight = 0
rloc_record.mpriority = 255
rloc_record.mweight = 0
rloc_record.local_bit = False
rloc_record.reach_bit = True
eid_records += rloc_record.encode()
if (not quiet): rloc_record.print_record(" RTR ")
#endfor
#
# Return to caller number of EID records written to returned buffer.
#
count += 1
#endfor
return(eid_records, count)
#enddef
#
# lisp_build_map_register
#
# From each configured "database-mapping" command, register mappings to
# configured map-servers.
#
def lisp_build_map_register(lisp_sockets, ttl, eid_only, ms_only, refresh):
#
# No database-mapping entries.
#
if (eid_only != None):
db_list_len = 1
else:
db_list_len = lisp.lisp_db_list_length()
if (db_list_len == 0): return
#endif
if (lisp_etr_test_mode):
lisp.lprint("Build Map-Register for {} database-mapping entries". \
format(db_list_len))
else:
lisp.lprint("Build Map-Register for {} database-mapping entries". \
format(db_list_len))
#endif
#
# Set boolean if "decentralized-pull-xtr-[modulus,dns-suffix]" configured.
#
decent = lisp.lisp_decent_pull_xtr_configured()
#
# Go quiet with debug output when there are a lot of EID-records.
#
quiet = (db_list_len > 12)
ms_list = {}
if (decent):
#
# If "decentralized-pull-xtr-[modulus,dns-suffix]" is configured,
# decide which map-server this EID belongs too (and is registered with.
#
for db in lisp.lisp_db_list:
eid = db.eid if db.group.is_null() else db.group
dns_name = lisp.lisp_get_decent_dns_name(eid)
ms_list[dns_name] = []
#endfor
else:
#
# Set up each map-server names so we can decide which EID-prefixes go
# to which map-servers. [0] is eid_records and [1] is count.
#
for ms in list(lisp.lisp_map_servers_list.values()):
if (ms_only != None and ms != ms_only): continue
ms_list[ms.ms_name] = []
#endfor
#endif
#
# Create data structure instances to build Map-Regiser message.
#
map_register = lisp.lisp_map_register()
map_register.nonce = 0xaabbccdddfdfdf00
map_register.xtr_id_present = True
map_register.use_ttl_for_timeout = True
if (ttl == None): ttl = lisp.LISP_REGISTER_TTL
#
# Traverse the databas-mapping associative array.
#
mtu = 65000 if (lisp_etr_test_mode) else 1100
for db in lisp.lisp_db_list:
if (decent):
ms_dns_name = lisp.lisp_get_decent_dns_name(db.eid)
else:
ms_dns_name = db.use_ms_name
#endif
#
# Is db entry associated with a map-server name that is not
# configured?
#
if (ms_dns_name not in ms_list): continue
msl = ms_list[ms_dns_name]
if (msl == []):
msl = [b"", 0]
ms_list[ms_dns_name].append(msl)
else:
msl = ms_list[ms_dns_name][-1]
#endif
#
# If dynamic-EIDs are discovered, add each of them to EID-records,
# unless, we are doing a trigger in which case a single dynamic-EID
# is built into an EID-record.
#
# Otherwise, add static EID-prefixes into EID-records, unless a single
# one is triggered.
#
eid_records = b""
if (db.dynamic_eid_configured()):
for dyn_eid in list(db.dynamic_eids.values()):
eid = dyn_eid.dynamic_eid
if (eid_only == None or eid_only.is_exact_match(eid)):
records, count = lisp_build_map_register_records(quiet, db,
eid, db.group, ttl)
eid_records += records
msl[1] += count
#endif
#endfor
else:
if (eid_only == None):
if (ttl != 0): ttl = db.register_ttl
eid_records, count = lisp_build_map_register_records(quiet, db,
db.eid, db.group, ttl)
msl[1] += count
#endif
#endif
#
# Add EID-records to correct map-server name set.
#
msl[0] += eid_records
if (msl[1] == 20 or len(msl[0]) > mtu):
msl = [b"", 0]
ms_list[ms_dns_name].append(msl)
#endif
#endfor
#
# Send Map-Register to each configured map-server.
#
sleep_time = .500 if (lisp_etr_test_mode) else .001
count = 0
for ms in list(lisp.lisp_map_servers_list.values()):
if (ms_only != None and ms != ms_only): continue
ms_dns_name = ms.dns_name if decent else ms.ms_name
if (ms_dns_name not in ms_list): continue
for msl in ms_list[ms_dns_name]:
#
# Build map-server specific fields.
#
map_register.record_count = msl[1]
if (map_register.record_count == 0): continue
map_register.nonce += 1
map_register.alg_id = ms.alg_id
map_register.key_id = ms.key_id
map_register.proxy_reply_requested = ms.proxy_reply
map_register.merge_register_requested = ms.merge_registrations
map_register.map_notify_requested = ms.want_map_notify
map_register.xtr_id = ms.xtr_id
map_register.site_id = ms.site_id
map_register.encrypt_bit = (ms.ekey != None)
if (ms.refresh_registrations):
map_register.map_register_refresh = refresh
#endif
if (ms.ekey != None): map_register.encryption_key_id = ms.ekey_id
packet = map_register.encode()
map_register.print_map_register()
#
# Append EID-records and encode xtr-ID and site-ID at end of
# Map-Register.
#
trailer = map_register.encode_xtr_id(b"")
eid_records = msl[0]
packet = packet + eid_records + trailer
ms.map_registers_sent += 1
lisp.lisp_send_map_register(lisp_sockets, packet, map_register, ms)
count += 1
if (count % 100 == 0 and lisp_etr_test_mode):
sleep_time += .1
lisp.fprint("Sent {} Map-Registers, ipd {}".format(count,
sleep_time))
#endif
time.sleep(sleep_time)
#endfor
if (lisp_etr_test_mode):
lisp.fprint("Sent total {} Map-Registers".format(count))
#endif
#
# Do DNS lookup for Map-Server if "dns-name" configured.
#
ms.resolve_dns_name()
#
# Exit loop if we are triggering a Map-Register to a single
# Map-Server.
#
if (ms_only != None and ms == ms_only): break
#endfor
return
#enddef
#
# lisp_etr_process_info_timer
#
# Time to send a periodic Info-Request message. This must be done less often
# then sending periodic Map-Registers as well as less the the NAT timeout
# value which is usually one minute.
#
def lisp_etr_process_info_timer(ms):
global lisp_etr_info_timer
global lisp_ephem_socket
lisp.lisp_set_exception()
#
# Build Info-Request messages if we have any private RLOCs in database-
# mappings.
#
sockets = [lisp_ephem_socket, lisp_ephem_socket, lisp_ipc_listen_socket]
lisp.lisp_build_info_requests(sockets, ms, lisp.LISP_CTRL_PORT)
#
# Build Info-Request for RTRs so we can open up NAT state so RTRs
# can encapsulate to us when ETR is behind NAT.
#
allow_private = (os.getenv("LISP_RTR_BEHIND_NAT") == None)
for rtr in list(lisp.lisp_rtr_list.values()):
if (rtr == None): continue
if (rtr.is_private_address() and allow_private == False):
r = lisp.red(rtr.print_address_no_iid(), False)
lisp.lprint("Skip over RTR private address {}".format(r))
continue
#endif
lisp.lisp_build_info_requests(sockets, rtr, lisp.LISP_DATA_PORT)
#endfor
#
# Build Info-Requests to decentralized-NATed ETRs.
#
for etr_str in lisp_etr_nat_probe_list:
etr = lisp_etr_nat_probe_list[etr_str]
lisp.lprint("Send NAT-Probe to ETR {}".format(etr_str))
lisp.lisp_send_info_request(sockets, etr, lisp.LISP_DATA_PORT, None)
#endfor
#
# Restart periodic timer. For some reason only this timer has to be
# canceled. Found on while testing NAT-traversal on rasp-pi in Jul 2015.
#
lisp_etr_info_timer.cancel()
lisp_etr_info_timer = threading.Timer(lisp.LISP_INFO_INTERVAL,
lisp_etr_process_info_timer, [None])
lisp_etr_info_timer.start()
return
#enddef
#
# lisp_process_register_timer
#
# Time to send a periodic Map-Register.
#
def lisp_process_register_timer(lisp_sockets):
global lisp_register_timer, lisp_trigger_register_timer
global lisp_ephem_socket
lisp.lisp_set_exception()
#
# Build and send Map-Register.
#
lisp_build_map_register(lisp_sockets, None, None, None, True)
#
# If we are are doing L2-overlays, then register as a join of the
# broadcast MAC address.
#
if (lisp.lisp_l2_overlay):
entry = [ None, "ffff-ffff-ffff", True ]
lisp_send_multicast_map_register(lisp_sockets, [entry])
#endif
#
# If trigger timer called this function, clear it out and only use it
# when a new map-server of database-mapping is configured.
#
if (lisp_trigger_register_timer != None):
lisp_trigger_register_timer.cancel()
lisp_trigger_register_timer = None
#endif
#
# Restart periodic timer.
#
if (lisp_register_timer): lisp_register_timer.cancel()
lisp_register_timer = threading.Timer(LISP_MAP_REGISTER_INTERVAL,
lisp_process_register_timer, [lisp_send_sockets])
lisp_register_timer.start()
return
#enddef
#
# lisp_send_multicast_map_register
#
# Build a Map-Register message with a Multicast Info Type LCAF as an EID-record
# for each entry in the 'entries' array. And build an RLOC-record as an RLE
# describing this ETR as the RLOC to be used for replication.
#
# The entries is an array of (source, group, joinleave) tuples.
#
def lisp_send_multicast_map_register(lisp_sockets, entries):
length = len(entries)
if (length == 0): return
afi = None
if (entries[0][1].find(":") != -1): afi = lisp.LISP_AFI_IPV6
if (entries[0][1].find(".") != -1): afi = lisp.LISP_AFI_IPV4
if (entries[0][1].find("-") != -1): afi = lisp.LISP_AFI_MAC
if (afi == None):
lisp.lprint("lisp_send_multicast_map_register() invalid group address")
return
#endif
#
# Find all (*,G) entries in entries array and replace with (S,G) entries
# from lisp_group_mapping_list. The comment to avoid the source check
# is there so we can build a g_entry that can validate against group
# mappings. Have to fix to allow different sources for the same G when
# (S,G) is reported.
#
g_entries = []
for source, group, joinleave in entries:
# if (source != None): continue
g_entries.append([group, joinleave])
#endfor
decent = lisp.lisp_decent_pull_xtr_configured()
ms_list = {}
entries = []
for group, joinleave in g_entries:
ms_gm = lisp.lisp_lookup_group(group)
if (ms_gm == None):
lisp.lprint("No group-mapping for {}, could be underlay group". \
format(group))
continue
#endif
lisp.lprint("Use group-mapping '{}' {} for group {}".format( \
ms_gm.group_name, ms_gm.group_prefix.print_prefix(), group))
iid = ms_gm.group_prefix.instance_id
ms_name = ms_gm.use_ms_name
rle = ms_gm.rle_address
#
# To obtain decent-index for a group address, just use group address
# and no source as part of hash. Because an ITR does not know if (*,G)
# or (S,G) is registered with the mapping system
#
key = ms_name
if (decent):
key = lisp.lisp_get_decent_dns_name_from_str(iid, group)
ms_list[key] = [b"", 0]
#endif
if (len(ms_gm.sources) == 0):
entries.append(["0.0.0.0", group, iid, key, rle, joinleave])
continue
#endif
for s in ms_gm.sources:
ms_list[key] = [b"", 0]
entries.append([s, group, iid, key, rle, joinleave])
#endfor
#endfor
length = len(entries)
if (length == 0): return
lisp.lprint("Build Map-Register for {} multicast entries".format(length))
#
# Build RLE node for RLOC-record encoding. If behind a NAT, we need to
# insert a global address as the RLE node address. We will do that in
# the entries for loop.
#
rle_node = lisp.lisp_rle_node()
rle_node.level = 128
translated_rloc = lisp.lisp_get_any_translated_rloc()
rle = lisp.lisp_rle("")
rle.rle_nodes.append(rle_node)
#
# Set up each map-server names so we can decide which EID-prefixes go
# to which map-servers. [0] is eid_records and [1] is count. The ms_list
# is already setup for when pull-based decent is used.
#
if (decent == False):
for ms in list(lisp.lisp_map_servers_list.values()):
ms_list[ms.ms_name] = [b"", 0]
#endfor
#endif
rloc_name = None
if (lisp.lisp_nat_traversal): rloc_name = lisp.lisp_hostname
#
# Count number of RTRs reachable so we know allocation count.
#
rtr_count = 0
for rtr in list(lisp.lisp_rtr_list.values()):
if (rtr == None): continue
rtr_count += 1
#endfor
#
# Run through multicast entry array.
#
eid_records = b""
for source, group, iid, ms_dns_name, rle_addr, joinleave in entries:
#
# Is db entry associated with a map-server name that is not configured?
#
if (ms_dns_name not in ms_list): continue
eid_record = lisp.lisp_eid_record()
eid_record.rloc_count = 1 + rtr_count
eid_record.authoritative = True
eid_record.record_ttl = lisp.LISP_REGISTER_TTL if joinleave else 0
eid_record.eid = lisp.lisp_address(afi, source, 0, iid)
if (eid_record.eid.address == 0): eid_record.eid.mask_len = 0
eid_record.group = lisp.lisp_address(afi, group, 0, iid)
if (eid_record.group.is_mac_broadcast() and \
eid_record.eid.address == 0): eid_record.eid.mask_len = 0
decent_index = ""
ms_name = ""
if (lisp.lisp_decent_pull_xtr_configured()):
decent_index = lisp.lisp_get_decent_index(eid_record.group)
decent_index = lisp.bold(str(decent_index), False)
decent_index = "with decent-index {}".format(decent_index)
else:
decent_index = "for ms-name '{}'".format(ms_dns_name)
#endif
eid_str = lisp.green(eid_record.print_eid_tuple(), False)
lisp.lprint(" EID-prefix {} {}{}".format(eid_str, ms_name,
decent_index))
eid_records += eid_record.encode()
eid_record.print_record(" ", False)
ms_list[ms_dns_name][1] += 1
#
# Build our RLOC entry.
#
rloc_record = lisp.lisp_rloc_record()
rloc_record.rloc_name = rloc_name
#
# Decide on RLE address. Have NAT-traversal take precedent, otherwise
# use configured RLE in group-mapping. If one wasn't configured use
# lisp_myrlocs IPv4 address.
#
if (translated_rloc != None):
rle_node.address = translated_rloc
elif (rle_addr != None):
rle_node.address = rle_addr
else:
rle_node.address = rle_addr = lisp.lisp_myrlocs[0]
#endif
rloc_record.rle = rle
rloc_record.local_bit = True
rloc_record.reach_bit = True
rloc_record.priority = 255
rloc_record.weight = 0
rloc_record.mpriority = 1
rloc_record.mweight = 100
eid_records += rloc_record.encode()
rloc_record.print_record(" ")
#
# If we are doing NAT-traversal, include a set or RTR RLOCs with
# priority 1. And set the global RLOCs to priority 254.
#
for rtr in list(lisp.lisp_rtr_list.values()):
if (rtr == None): continue
rloc_record = lisp.lisp_rloc_record()
rloc_record.rloc.copy_address(rtr)
rloc_record.priority = 254
rloc_record.rloc_name = "RTR"
rloc_record.weight = 0
rloc_record.mpriority = 255
rloc_record.mweight = 0
rloc_record.local_bit = False
rloc_record.reach_bit = True
eid_records += rloc_record.encode()
rloc_record.print_record(" RTR ")
#endfor
#
# Add EID-records to correct map-server name set.
#
ms_list[ms_dns_name][0] += eid_records
#endfor
#
# Build map-server independent fields.
#
map_register = lisp.lisp_map_register()
map_register.nonce = 0xaabbccdddfdfdf00
map_register.xtr_id_present = True
map_register.proxy_reply_requested = True
map_register.map_notify_requested = False
map_register.merge_register_requested = True
#
# Send Map-Register to each configured map-server.
#
for ms in list(lisp.lisp_map_servers_list.values()):
key = ms.dns_name if decent else ms.ms_name
#
# Get EID-records from correct map-server name set.
#
if (key not in ms_list): continue
#
# Build map-server specific fields.
#
map_register.record_count = ms_list[key][1]
if (map_register.record_count == 0): continue
map_register.nonce += 1
map_register.alg_id = ms.alg_id
map_register.alg_id = ms.key_id
map_register.xtr_id = ms.xtr_id
map_register.site_id = ms.site_id
map_register.encrypt_bit = (ms.ekey != None)
packet = map_register.encode()
map_register.print_map_register()
#
# Append EID-records and encode xtr-ID and site-ID at end of
# Map-Register.
#
trailer = map_register.encode_xtr_id(b"")
packet = packet + eid_records + trailer
ms.map_registers_multicast_sent += 1
lisp.lisp_send_map_register(lisp_sockets, packet, map_register, ms)
#
# Do DNS lookup for Map-Server if "dns-name" configured.
#
ms.resolve_dns_name()
#
# Go build more EID-records.
#
time.sleep(.001)
#endfor
return
#enddef
#
# lisp_etr_data_plane
#
# Capture a LISP encapsulated packet, decap it, process inner header, and
# re-encapsulated it.
#
def lisp_etr_data_plane(parms, not_used, packet):
global lisp_ipc_listen_socket, lisp_send_sockets
device = parms[0]
lisp_raw_socket = parms[1]
#
# Jump over MAC header if packet received on interface. There is a 4-byte
# internal header in any case (loopback interfaces will have a 4 byte
# header)..
#
if (lisp.lisp_is_macos() == False):
offset = 4 if device == "lo0" else 16
packet = packet[offset::]
elif (device == "en0"):
packet = packet[14::]
#endif
#
# Check IGMP packet.
#
protocol = struct.unpack("B", packet[9:10])[0]
if (protocol == 2):
entries = lisp.lisp_process_igmp_packet(packet)
if (type(entries) != bool):
lisp_send_multicast_map_register(lisp_send_sockets, entries)
return
#endif
#endif
#
# Check RLOC-probe Map-Request. We need to grab the TTL from IP header.
#
orig_packet = packet
packet, source, port, ttl = lisp.lisp_is_rloc_probe(packet, device, 0)
if (orig_packet != packet):
if (source == None): return
lisp.lisp_parse_packet(lisp_send_sockets, packet, source, port, ttl)
return
#endif
#
# First check if we are assembling IPv4 fragments. Do this only when
# not doing NAT-traversal. Otherwise, the kernel will do it when we
# receive the same packet on a raw socket (in lisp_etr_nat_data_plane()).
#
if (struct.unpack("B", packet[0:1])[0] & 0xf0 == 0x40):
sport = socket.ntohs(struct.unpack("H", packet[20:22])[0])
if (lisp.lisp_nat_traversal and sport == lisp.LISP_DATA_PORT): return
packet = lisp.lisp_reassemble(packet)
if (packet == None): return
#endif
packet = lisp.lisp_packet(packet)
status = packet.decode(True, lisp_ipc_listen_socket, lisp.lisp_decap_stats)
if (status == None): return
#
# Print some useful header fields.
#
packet.print_packet("Receive", True)
#
# If we are looping back Map-Registers via encapsulation, overwrite
# multicast address with source address. That means we are sending a
# Map-Register message to the lisp-core process from our local RLOC
# address to our local RLOC address. Also, zero out the UDP checksum
# since the destination address changes that affects the pseudo-header.
#
if (lisp.lisp_decent_push_configured and
packet.inner_dest.is_multicast_address() and \
packet.lisp_header.get_instance_id() == 0xffffff):
source = packet.inner_source.print_address_no_iid()
packet.strip_outer_headers()
packet = packet.packet[28::]
packet = lisp.lisp_packet_ipc(packet, source, sport)
lisp.lisp_ipc(packet, lisp_ipc_listen_socket, "lisp-ms")
return
#endif
#
# Check if inner packet is a LISP control-packet. Typically RLOC-probes
# from RTRs can come through NATs. We want to reply to the global address
# of the RTR which is the outer source RLOC. We don't care about the
# inner source port since the RTR will decapsulate a data encapsulated
# RLOC-probe Map-Reply. The inner LISP header begins at offset 20+16+28=64
# (outer-IPv4 + UDP-outer-LISP + inner-IPv4-UDP).
#
if (packet.lisp_header.get_instance_id() == 0xffffff):
inner_ip = packet.packet[36::]
inner_lisp = inner_ip[28::]
ttl = -1
if (lisp.lisp_is_rloc_probe_request(inner_lisp[0:1])):
ttl = struct.unpack("B", inner_ip[8:9])[0] - 1
#endif
source = packet.outer_source.print_address_no_iid()
lisp.lisp_parse_packet(lisp_send_sockets, inner_lisp, source, 0, ttl)
return
#endif
#
# Packets are arriving on pcap interface. Need to check if another data-
# plane is running. If so, don't deliver duplicates.
#
if (lisp.lisp_ipc_data_plane):
lisp.dprint("Drop packet, external data-plane active")
return
#endif
#
# Increment global stats.
#
lisp.lisp_decap_stats["good-packets"].increment(len(packet.packet))
#
# Strip outer headers and start inner header forwarding logic.
#
packet.strip_outer_headers()
f_or_b = lisp.bold("Forward", False)
#
# Process inner header (checksum and decrement ttl).
#
igmp = False
L2 = packet.inner_dest.is_mac()
if (L2):
packet.packet = lisp.lisp_mac_input(packet.packet)
if (packet.packet == None): return
f_or_b = lisp.bold("Bridge", False)
elif (packet.inner_version == 4):
igmp, packet.packet = lisp.lisp_ipv4_input(packet.packet)
if (packet.packet == None): return
if (igmp):
entries = lisp.lisp_process_igmp_packet(packet.packet)
if (type(entries) != bool):
lisp_send_multicast_map_register(lisp_send_sockets, entries)
return
#endif
#endif
packet.inner_ttl = packet.outer_ttl
elif (packet.inner_version == 6):
packet.packet = lisp.lisp_ipv6_input(packet)
if (packet.packet == None): return
packet.inner_ttl = packet.outer_ttl
else:
lisp.dprint("Cannot parse inner packet header")
return
#endif
#
# Check if database-mapping exists for our local destination. When the
# destination is a multicast address, check if the source is our EID.
# That means we sent to a group we are members of. If using an RTR,
# it can't tell since the source RLOC could be rewritten by a NAT so
# the ETR must process the packet. If it decaps, the ITR on this system
# will pcap it and encap again. This will happen until the TTL reaches 0.
#
if (packet.inner_dest.is_multicast_address() == False):
db = lisp.lisp_db_for_lookups.lookup_cache(packet.inner_dest, False)
if (db):
db.increment_decap_stats(packet)
else:
lisp.dprint("No database-mapping found for EID {}".format( \
lisp.green(packet.inner_dest.print_address(), False)))
return
#endif
else:
if (lisp.lisp_db_for_lookups.lookup_cache(packet.inner_source, False)):
lisp.dprint("Discard echoed multicast packet (through NAT)")
return
#endif
#endif
#
# If this is a trace packet, lisp_trace_append() will swap addresses
# and send packet back to source. We have no app to forward this decap'ed
# packet to, so return.
#
if (packet.is_trace()):
if (lisp.lisp_trace_append(packet, ed="decap") == False): return
#endif
#
# We are going to forward or bridge the decapsulated packet.
#
addr_str = "{} -> {}".format(packet.inner_source.print_address(),
packet.inner_dest.print_address())
lisp.dprint("{} packet for EIDs {}: {} ...".format(f_or_b, \
lisp.green(addr_str, False),
lisp.lisp_format_packet(packet.packet[0:60])))
#
# If we are decapsulating a MAC frame, then use the L2 socket where
# the MAC header is already in packet.
#
if (L2):
packet.bridge_l2_packet(packet.inner_dest, db)
return
#endif
#
# Send on L2 socket since IPv6 raw sockets do not allow us to send an
# entire IPv6 header in payload. Prepend prebuilt MAC header.
#
if (packet.inner_version == 6):
packet.send_l2_packet(lisp_l2_socket, lisp_mac_header)
return
#endif
#
# Default to global raw socket otherwise get socket baesd on instance-ID.
#
raw_socket = packet.get_raw_socket()
if (raw_socket == None): raw_socket = lisp_raw_socket
#
# Send out.
#
packet.send_packet(raw_socket, packet.inner_dest)
return
#enddef
#
# lisp_etr_nat_data_plane
#
# Packet came in on a destination ephemeral port from a source port of 4341.
# That is a RTR encapsulated this packet that is coming through a NAT device.
#
# The packet has the outer IP and UDP headers stripped so the first byte of
# this supplied data packet has the LISP data header on it.
#
def lisp_etr_nat_data_plane(lisp_raw_socket, packet, source):
global lisp_ipc_listen_socket, lisp_send_sockets
#
# Decode LISP header.
#
lisp_header = packet
packet = lisp.lisp_packet(packet[8::])
if (packet.lisp_header.decode(lisp_header) == False): return
#
# Store outer source RLOC address so if we are doing lisp-crypto across
# NAT-traversal, we can find the decryption key.
#
packet.outer_source = lisp.lisp_address(lisp.LISP_AFI_IPV4, source,
lisp.LISP_IPV4_HOST_MASK_LEN, 0)
status = packet.decode(False, lisp_ipc_listen_socket,
lisp.lisp_decap_stats)
if (status == None): return
#
# Special case to log packets with no outer header but are considered
# decapsulated when coming through NATs. Since packets are sent from
# source port 4341, the kernel will strip outer header, so we don't have
# outer header context in lisp_packet().
#
if (lisp.lisp_flow_logging): packet.log_flow(False)
packet.print_packet("Kernel-decap", False)
lisp.dprint(packet.lisp_header.print_header(" "))
#
# If we are looping back Map-Registers via encapsulation, overwrite
# multicast address with source address. That means we are sending a
# Map-Register message to the lisp-core process from our local RLOC
# address to our local RLOC address. Also, zero out the UDP checksum
# since the destination address changes that affects the pseudo-header.
#
if (lisp.lisp_decent_push_configured and
packet.inner_dest.is_multicast_address() and \
packet.lisp_header.get_instance_id() == 0xffffff):
sport = packet.udp_sport
packet = packet.packet[28::]
packet = lisp.lisp_packet_ipc(packet, source, sport)
lisp.lisp_ipc(packet, lisp_ipc_listen_socket, "lisp-ms")
return
#endif
#
# Check if inner packet is a LISP control-packet. Typically RLOC-probes
# from RTRs can come through NATs. We want to reply to the global address
# of the RTR which is the outer source RLOC. We don't care about the
# inner source port since the RTR will decapsulate a data encapsulated
# RLOC-probe Map-Reply.
#
if (packet.lisp_header.get_instance_id() == 0xffffff):
inner_ip = packet.packet
inner_lisp = inner_ip[28::]
ttl = -1
if (lisp.lisp_is_rloc_probe_request(inner_lisp[0:1])):
ttl = struct.unpack("B", inner_ip[8:9])[0] - 1
#endif
#
# This could be a Map-Reply that comes from port 4341 to the NAT
# translated port. We need to get the Map-Reply processed by the
# lisp-itr process. Send it a packet IPC message.
#
if (lisp.lisp_is_rloc_probe_reply(inner_lisp[0:1])):
sport = socket.ntohs(struct.unpack("H", packet.packet[20:22])[0])
if (sport == lisp.LISP_DATA_PORT):
packet = packet.packet[28::]
packet = lisp.lisp_packet_ipc(packet, source, sport)
lisp.lisp_ipc(packet, lisp_ipc_listen_socket, "lisp-itr")
return
#endif
#endif
lisp.lisp_parse_packet(lisp_send_sockets, inner_lisp, source, 0, ttl)
return
#endif
#
# Packets are arriving on ephemeral socket. Need to check if another data-
# plane is running. If so, don't deliver duplicates.
#
if (lisp.lisp_ipc_data_plane):
lisp.dprint("Drop packet, external data-plane active")
return
#endif
#
# Increment global stats.
#
lisp.lisp_decap_stats["good-packets"].increment(len(packet.packet))
#
# Check if database-mapping exists for our local destination. When the
# destination is a multicast address, check if the source is our EID.
# That means we sent to a group we are members of. If using an RTR,
# it can't tell since the source RLOC could be rewritten by a NAT so
# the ETR must process the packet. If it decaps, the ITR on this system
# will pcap it and encap again. This will happen until the TTL reaches 0.
#
if (packet.inner_dest.is_multicast_address() == False):
db = lisp.lisp_db_for_lookups.lookup_cache(packet.inner_dest, False)
if (db):
db.increment_decap_stats(packet)
else:
lisp.dprint("No database-mapping found for EID {}".format( \
lisp.green(packet.inner_dest.print_address(), False)))
#endif
#endif
else:
if (lisp.lisp_db_for_lookups.lookup_cache(packet.inner_source, False)):
lisp.dprint("Discard echoed multicast packet")
return
#endif
#endif
#
# If this is a trace packet, lisp_trace_append() will swap addresses
# and send packet back to source. We have no app to forward this decap'ed
# packet to, so return.
#
if (packet.is_trace()):
if (lisp.lisp_trace_append(packet, ed="decap") == False): return
#endif
addr_str = "{} -> {}".format(packet.inner_source.print_address(),
packet.inner_dest.print_address())
lisp.dprint("{} packet for EIDs {}: {} ...".format( \
lisp.bold("NAT-Forward", False), lisp.green(addr_str, False),
lisp.lisp_format_packet(packet.packet[0:60])))
#
# Send on L2 socket since IPv6 raw sockets do not allow us to send an
# entire IPv6 header in payload. Prepend prebuilt MAC header
#
if (packet.inner_version == 6):
packet.send_l2_packet(lisp_l2_socket, lisp_mac_header)
return
#endif
#
# Default to global raw socket otherwise get socket baesd on instance-ID.
#
raw_socket = packet.get_raw_socket()
if (raw_socket == None): raw_socket = lisp_raw_socket
#
# Send out on raw socket.
#
packet.send_packet(raw_socket, packet.inner_dest)
return
#enddef
#
# lisp_register_ipv6_group_entries
#
# Find an IPv6 group-mapping and send a Map-Register for each configured IPv6
# source for the IPv6 group-prefix found.
#
def lisp_register_ipv6_group_entries(group, joinleave):
ms_gm = lisp.lisp_lookup_group(group)
if (ms_gm == None): return
sg = []
for s in ms_gm.sources:
sg.append([s, group, joinleave])
#endfor
lisp_send_multicast_map_register(lisp_send_sockets, sg)
return
#enddef
#
# lisp_etr_join_leave_process
#
# Look at file-system to see if there is a join or leave to be done. This
# function will send joins in the form of building an IP/IGMPv2 packet to
# be passed to lisp_process_igmp_packet(). The groups that are joined are
# ones found as filenames in the current directory as "join-<group>". The
# IGMP Reports wil lbe sent to lisp_process_igmp_packet() every 30 seconds.
#
# For right now, if the group address is IPv6, send a Map-Register directly.
# We will get to MLD support later.
#
# This is used for testing and not meant for production deployment.
#
def lisp_etr_join_leave_process():
global lisp_send_sockets
lisp.lisp_set_exception()
swap = socket.htonl
ipigmp = [swap(0x46000020), swap(0x9fe60000), swap(0x0102d7cc),
swap(0x0acfc15a), swap(0xe00000fb), swap(0x94040000)]
packet = b""
for l in ipigmp: packet += struct.pack("I", l)
#
# Look for files in current directory for "join-<group>" and then send
# an IGMPv2 report to ourselves.
#
while (True):
groups = getoutput("ls join-*").replace("join-", "")
groups = groups.split("\n")
for group in groups:
if (lisp.lisp_valid_address_format("address", group) == False):
continue
#endif
ipv6 = (group.find(":") != -1)
#
# Check if we are leaving group.
#
leavejoin = os.path.exists("leave-{}".format(group))
lisp.lprint("Internal {} group {}".format( \
"leaving" if leavejoin else "joining", group))
#
# Set IGMP message to Report or Leave. Then add group.
#
if (ipv6):
if (group.lower().find("ff02:") != -1):
lisp.lprint("Suppress registration for link-local groups")
continue
#endif
lisp_register_ipv6_group_entries(group, (leavejoin == False))
else:
send_packet = packet
if (leavejoin):
send_packet += struct.pack("I", swap(0x17000000))
else:
send_packet += struct.pack("I", swap(0x16000000))
#endif
octet = group.split(".")
value = int(octet[0]) << 24
value += int(octet[1]) << 16
value += int(octet[2]) << 8
value += int(octet[3])
send_packet += struct.pack("I", swap(value))
sg = lisp.lisp_process_igmp_packet(send_packet)
if (type(sg) != bool):
lisp_send_multicast_map_register(lisp_send_sockets, sg)
#endif
time.sleep(.100)
#endif
#endfor
time.sleep(10)
#endwhile
return
#enddef
#
# lisp_etr_process
#
# This thread is for receiving encapsulated LISP packets address to destination
# port 4341. As well as IGMP reports. The IGMP reports can be captured on
# Ubuntu and Fedora but not on MacOS. The former supports IGMPv3 and the
# latter supports IGMPv2 if we listen on "en0".
#
def lisp_etr_process():
lisp.lisp_set_exception()
if (lisp.lisp_myrlocs[0] == None): return
#
# Find all multicast RLEs so we can receive packets on underlay multicast
# groups.
#
rles = lisp.lisp_get_all_multicast_rles()
#
# We need to listen on en0 when doing IGMP testing on MacOS.
#
# device = "any"
device = "en0" if lisp.lisp_is_macos() else "any"
# device = "lo0" if lisp.lisp_is_macos() else "any"
pfilter = "(proto 2) or "
pfilter += "((dst host "
for addr in lisp.lisp_get_all_addresses() + rles:
pfilter += "{} or ".format(addr)
#endif
pfilter = pfilter[0:-4]
pfilter += ") and ((udp dst port 4341 or 8472 or 4789) or "
pfilter += "(udp src port 4341) or "
pfilter += "(udp dst port 4342 and ip[28] == 0x12) or "
pfilter += "(proto 17 and (ip[6]&0xe0 == 0x20 or " + \
"(ip[6]&0xe0 == 0 and ip[7] != 0)))))"
lisp.lprint("Capturing packets for: '{}' on device {}".format(pfilter,
device))
#
# Enter receive loop.
#
if (lisp.lisp_is_python2()):
import pcappy
pcap = pcappy.open_live(device, 1600, 0, 100)
pcap.filter = pfilter
pcap.loop(-1, lisp_etr_data_plane, [device, lisp_raw_socket])
#endif
if (lisp.lisp_is_python3()):
import pcapy
pcap = pcapy.open_live(device, 1600, 0, 100)
pcap.setfilter(pfilter)
while(True):
header, packet = pcap.next()
if (len(packet) == 0): continue
lisp_etr_data_plane([device, lisp_raw_socket], None, packet)
#endwhile
#endif
return
#enddef
#
# lisp_etr_startup
#
# Intialize this LISP ETR process. This function returns no values.
#
def lisp_etr_startup():
global lisp_ipc_listen_socket
global lisp_ephem_socket
global lisp_send_sockets
global lisp_raw_socket
global lisp_l2_socket
global lisp_mac_header
lisp.lisp_i_am("etr")
lisp.lisp_set_exception()
lisp.lisp_print_banner("ETR starting up")
#
# Get local address for source RLOC for encapsulation.
#
lisp.lisp_get_local_interfaces()
lisp.lisp_get_local_macs()
if (lisp.lisp_get_local_addresses() == False): return(False)
#
# Prebuild MAC header for lisp_l2_socket sending. Disabled code in favor
# of using pytun. See below.
#
# m = list(lisp.lisp_mymacs.keys())[0]
# mac = ""
# for i in range(0, 12, 2): mac += chr(int(m[i:i+2], 16))
# lisp_mac_header = mac + mac + "\x86\xdd"
# lisp.dprint("Built MAC header for L2 socket:",
# lisp.lisp_format_packet(lisp_mac_header))
#
# Used on for listening for Info-Replies for NAT-traversal support.
#
s = lisp.lisp_open_listen_socket("0.0.0.0", str(lisp_ephem_port))
s.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 32)
lisp_ephem_socket = s
#
# Open network send socket and internal listen socket.
#
lisp_ipc_listen_socket = lisp.lisp_open_listen_socket("", "lisp-etr")
lisp_send_sockets[0] = lisp_ephem_socket
lisp_send_sockets[1] = lisp.lisp_open_send_socket("", lisp.LISP_AFI_IPV6)
lisp_send_sockets[2] = lisp_ipc_listen_socket
#
# Open up raw socket so we can send with IP headers after decapsulation.
# There is a special case where the RTR's lisp_send_sockets array is of
# size 4 since we need to pass the raw socket through the lisp.py module
# to send a data encapsulated RLOC-probe to an ETR that sits behind a NAT.
# The test is in lisp_send_map_request() for this. This is the case in
# ETRs as well. All other components use an array size of 3 modulo.
#
lisp_raw_socket = socket.socket(socket.AF_INET, socket.SOCK_RAW,
socket.IPPROTO_RAW)
lisp_raw_socket.setsockopt(socket.SOL_IP, socket.IP_HDRINCL, 1)
lisp_send_sockets.append(lisp_raw_socket)
#
# Open a L2 socket so when we decapsulate and have to route an IPv6
# packet, we have the kernel receive a MAC frame on the loopback interface.
# We do this because there is no IP_HDRINCL for IPv6 raw sockets.
#
# Disabling this code in favor of using a tuntap tun interface via the
# pytun module. See code right below.
#
# if ("PF_PACKET" in dir(socket)):
# interface = "lo" if ("lo" in lisp.lisp_myinterfaces.keys()) else \
# "lo0" if ("lo0" in lisp.lisp_myinterfaces.keys()) else None
# if (interface != None):
# lisp_l2_socket = socket.socket(socket.PF_PACKET, socket.SOCK_RAW)
# lisp_l2_socket.bind(("lo", 0x86dd))
# #endif
# #endif
#
# Setup tuntap tunnel interface so when we decap IPv6 packets, we can
# send to kernel to route them.
#
if (pytun != None):
lisp_mac_header = b'\x00\x00\x86\xdd'
device = "lispers.net"
try:
lisp_l2_socket = pytun.TunTapDevice(flags=pytun.IFF_TUN,
name=device)
os.system("ip link set dev {} up".format(device))
except:
lisp.lprint("Cannot create tuntap interface")
#endtry
#endif
#
# Start thread to listen on data socket.
#
threading.Thread(target=lisp_etr_process, args=[]).start()
#
# Test code to force IGMPv2 joins and leaves on an airplane. ;-)
#
threading.Thread(target=lisp_etr_join_leave_process, args=[]).start()
return(True)
#enddef
#
# lisp_etr_shutdown
#
# Shut down this process.
#
def lisp_etr_shutdown():
global lisp_register_timer
global lisp_etr_info_timer
#
# Cancel periodic Map-Register and Info timer threads.
#
if (lisp_register_timer): lisp_register_timer.cancel()
if (lisp_etr_info_timer): lisp_etr_info_timer.cancel()
#
# Close sockets.
#
lisp.lisp_close_socket(lisp_send_sockets[0], "")
lisp.lisp_close_socket(lisp_send_sockets[1], "")
lisp.lisp_close_socket(lisp_ipc_listen_socket, "lisp-etr")
return
#enddef
#
# lisp_etr_discover_eid
#
# Process IPC message from the lisp-itr process. It will be in the form of:
#
# "learn%<eid-string>%<interface-name>"
#
# Variable "ipc" is a string and not a byte string. Caller converts.
#
def lisp_etr_discover_eid(ipc):
ipc = ipc.split("%")
eid_str = ipc[1]
interface = ipc[2]
if (interface == "None"): interface = None
eid = lisp.lisp_address(lisp.LISP_AFI_NONE, "", 0, 0)
eid.store_address(eid_str)
#
# Do database-mapping lookup.
#
db = lisp.lisp_db_for_lookups.lookup_cache(eid, False)
if (db == None or db.dynamic_eid_configured() == False):
lisp.lprint("ITR/ETR dynamic-EID configuration out of sync for {}". \
format(lisp.green(eid_str, False)))
return
#endif
#
# Do logic checks. That is do not remove an entry if it is not there and
# don't try to add an entry if it is already cached.
#
dyn_eid = None
if (eid_str in db.dynamic_eids): dyn_eid = db.dynamic_eids[eid_str]
if (dyn_eid == None and interface == None):
lisp.lprint("ITR/ETR state mismatch for {}".format( \
lisp.green(eid_str, False)))
return
#endif
#
# Check if ITR is changing the interface to the same interface, meaning
# it is confused. Otherwise, the IPC is an interface change. Don't register
# in this case.
#
if (dyn_eid and interface):
if (dyn_eid.interface == interface):
lisp.lprint("ITR sent redundant IPC for {}".format( \
lisp.green(eid_str, False)))
else:
lisp.lprint("Dynamic-EID {} interface change, {} -> {}".format( \
lisp.green(eid_str, False), dyn_eid.interface, interface))
dyn_eid.interface = interface
#endif
return
#endif
#
# Add new entry and register it.
#
if (interface):
dyn_eid = lisp.lisp_dynamic_eid()
dyn_eid.dynamic_eid.copy_address(eid)
dyn_eid.interface = interface
dyn_eid.get_timeout(interface)
db.dynamic_eids[eid_str] = dyn_eid
reg = lisp.bold("Registering", False)
eid_str = lisp.bold(eid_str, False)
lisp.lprint("{} dynamic-EID {} on interface {}, timeout {}".format(reg,
lisp.green(eid_str, False), interface, dyn_eid.timeout))
lisp_build_map_register(lisp_send_sockets, None, eid, None, False)
#
# Add /32 to routing table.
#
if (lisp.lisp_is_macos() == False):
eid_str = eid.print_prefix_no_iid()
cmd = "ip route add {} dev {}".format(eid_str, interface)
os.system(cmd)
#endif
return
#endif
#
# Remove existig entry and deregister it.
#
if (eid_str in db.dynamic_eids):
interface = db.dynamic_eids[eid_str].interface
dereg = lisp.bold("Deregistering", False)
lisp.lprint("{} dynamic-EID {}".format(dereg,
lisp.green(eid_str, False)))
lisp_build_map_register(lisp_send_sockets, 0, eid, None, False)
db.dynamic_eids.pop(eid_str)
#
# Delete /32 from routing table.
#
if (lisp.lisp_is_macos() == False):
eid_str = eid.print_prefix_no_iid()
cmd = "ip route delete {} dev {}".format(eid_str, interface)
os.system(cmd)
#endif
#endif
return
#enddef
#
# lisp_etr_nat_probe
#
# Process IPC message from the lisp-itr process. It will be in the form of:
#
# "nat%<rloc-string>
#
# Variable "ipc" is a string and not a byte string. Caller converts.
#
# Add the RLOC to the lisp_etr_nat_probe_list.
#
def lisp_etr_nat_probe(ipc):
global lisp_etr_nat_probe_list
global lisp_ephem_socket
ipc = ipc.split("%")
rloc = ipc[-2]
rloc_name = ipc[-1]
trigger = False
if (rloc in lisp_etr_nat_probe_list):
etr = lisp_etr_nat_probe_list[rloc]
else:
etr = lisp.lisp_address(lisp.LISP_AFI_IPV4, rloc, 32, 0)
lisp_etr_nat_probe_list[rloc] = etr
trigger = True
#endif
#
# Trigger Info-Request for destination RLOC.
#
if (trigger):
sockets = [lisp_ephem_socket, lisp_ephem_socket,
lisp_ipc_listen_socket]
lisp.lprint("Trigger NAT-Probe to ETR {}".format(rloc))
lisp.lisp_send_info_request(sockets, etr, lisp.LISP_DATA_PORT, None)
#endif
#
# Store port info in for Decent-NAT xTR nat-info table so we can RLOC-probe
# reply to it. The map-request only has the public RLOC address and not the
# port needed to get through its NAT.
#
hp = rloc_name.split(lisp.LISP_TP)
if (len(hp) != 2):
lisp.lprint("Invalid NAT IPC rloc-name {}".format(hp))
return
#endif
hostname, port = hp[0], int(hp[1])
lisp.lisp_store_nat_info(hostname, etr, port)
#enddef
#
# lisp_etr_process_rtr_updown
#
# Process IPC message from lisp-itr. It is telling the lisp-etr process if
# RLOC-probing has determined if the RTR has gone up or down. And therefore
# if it should be registered to the mapping system.
#
# Variable "ipc" is a string and not a byte string. Caller converts.
#
def lisp_etr_process_rtr_updown(ipc):
if (lisp.lisp_register_all_rtrs): return
opcode, rtr_str, status = ipc.split("%")
if (rtr_str not in lisp.lisp_rtr_list): return
lisp.lprint("Process ITR IPC message, RTR {} has gone {}".format(
lisp.red(rtr_str, False), lisp.bold(status, False)))
rtr = lisp.lisp_rtr_list[rtr_str]
if (status == "down"):
lisp.lisp_rtr_list[rtr_str] = None
return
#endif
rtr = lisp.lisp_address(lisp.LISP_AFI_IPV4, rtr_str, 32, 0)
lisp.lisp_rtr_list[rtr_str] = rtr
return
#enddef
#
# lisp_etr_process_nonce_ipc
#
# Process an nonce IPC message from the ITR. It wants to know when a nonce
# is echoed from a remote ITR.
#
# Variable "ipc" is a string and not a byte string. Caller converts.
#
def lisp_etr_process_nonce_ipc(ipc):
x, opcode, rloc_str, nonce = ipc.split("%")
nonce = int(nonce, 16)
echo_nonce = lisp.lisp_get_echo_nonce(None, rloc_str)
if (echo_nonce == None): echo_nonce = lisp.lisp_echo_nonce(rloc_str)
if (opcode == "R"):
echo_nonce.request_nonce_sent = nonce
lisp.lprint("Waiting for echo-nonce 0x{} from {}".format( \
lisp.lisp_hex_string(nonce), lisp.red(echo_nonce.rloc_str, False)))
elif (opcode == "E"):
echo_nonce.echo_nonce_sent = nonce
lisp.lprint("Sent echo-nonce 0x{} to {}".format( \
lisp.lisp_hex_string(nonce), lisp.red(echo_nonce.rloc_str, False)))
#endif
return
#enddef
#
# ETR commands procssed by this process.
#
lisp_etr_commands = {
"lisp xtr-parameters" : [lispconfig.lisp_xtr_command, {
"rloc-probing" : [True, "yes", "no"],
"nonce-echoing" : [True, "yes", "no"],
"data-plane-security" : [True, "yes", "no"],
"data-plane-logging" : [True, "yes", "no"],
"frame-logging" : [True, "yes", "no"],
"flow-logging" : [True, "yes", "no"],
"nat-traversal" : [True, "yes", "no"],
"decentralized-nat" : [True, "yes", "no"],
"checkpoint-map-cache" : [True, "yes", "no"],
"ipc-data-plane" : [True, "yes", "no"],
"decentralized-push-xtr" : [True, "yes", "no"],
"decentralized-pull-xtr-modulus" : [True, 1, 0xff],
"decentralized-pull-xtr-dns-suffix" : [True],
"register-reachable-rtrs" : [True, "yes", "no"],
"program-hardware" : [True, "yes", "no"] }],
"lisp interface" : [lispconfig.lisp_interface_command, {
"interface-name" : [True],
"device" : [True],
"instance-id" : [True, 0, 0xffffffff],
"dynamic-eid" : [True],
"dynamic-eid-device" : [True],
"lisp-nat" : [True, "yes", "no"],
"dynamic-eid-timeout" : [True, 0, 0xff] }],
"lisp map-server" : [lisp_etr_map_server_command, {
"ms-name" : [True],
"address" : [True],
"dns-name" : [True],
"authentication-type" : [False, "sha1", "sha2"],
"authentication-key" : [False],
"encryption-key" : [False],
"proxy-reply" : [False, "yes", "no"],
"want-map-notify" : [False, "yes", "no"],
"merge-registrations" : [False, "yes", "no"],
"refresh-registrations" : [False, "yes", "no"],
"site-id" : [False, 1, 0xffffffffffffffff] }],
"lisp database-mapping" : [lisp_etr_database_mapping_command, {
"prefix" : [],
"mr-name" : [True],
"ms-name" : [True],
"instance-id" : [True, 0, 0xffffffff],
"secondary-instance-id" : [True, 0, 0xffffffff],
"eid-prefix" : [True],
"group-prefix" : [True],
"dynamic-eid" : [True, "yes", "no"],
"signature-eid" : [True, "yes", "no"],
"register-ttl" : [True, 1, 0xffffffff],
"rloc" : [],
"rloc-record-name" : [True],
"elp-name" : [True],
"geo-name" : [True],
"rle-name" : [True],
"json-name" : [True],
"address" : [True],
"interface" : [True],
"priority" : [True, 0, 255],
"weight" : [True, 0, 100] }],
"lisp explicit-locator-path" : [lispconfig.lisp_elp_command, {
"elp-name" : [False],
"elp-node" : [],
"address" : [True],
"probe" : [True, "yes", "no"],
"strict" : [True, "yes", "no"],
"eid" : [True, "yes", "no"] }],
"lisp replication-list-entry" : [lispconfig.lisp_rle_command, {
"rle-name" : [False],
"rle-node" : [],
"address" : [True],
"level" : [True, 0, 255] }],
"lisp geo-coordinates" : [lispconfig.lisp_geo_command, {
"geo-name" : [False],
"geo-tag" : [False] }],
"lisp json" : [lispconfig.lisp_json_command, {
"json-name" : [False],
"json-string" : [False] }],
"lisp group-mapping" : [lisp_group_mapping_command, {
"group-name" : [False],
"ms-name" : [True],
"group-prefix" : [False],
"instance-id" : [True, 0, 0xffffffff],
"rle-address" : [False],
"sources" : [],
"address" : [True] }],
"show database-mapping" : [lisp_etr_show_command, { }],
"show etr-keys" : [lisp_etr_show_keys_command, {}],
"show etr-dynamic-eid" : [lispconfig.lisp_show_dynamic_eid_command, { }]
}
#------------------------------------------------------------------------------
#
# Main entry point for process.
#
if (lisp_etr_startup() == False):
lisp.lprint("lisp_etr_startup() failed")
lisp.lisp_print_banner("ETR abnormal exit")
exit(1)
#endif
socket_list = [lisp_ephem_socket, lisp_ipc_listen_socket]
while (True):
try: ready_list, w, x = select.select(socket_list, [], [])
except: break
#
# Process Info-Reply messages received on ephemeral port.
#
if (lisp_ephem_socket in ready_list):
opcode, source, port, packet = \
lisp.lisp_receive(lisp_ephem_socket, False)
if (source == ""): break
if (port == lisp.LISP_DATA_PORT):
lisp_etr_nat_data_plane(lisp_raw_socket, packet, source)
else:
if (lisp.lisp_is_rloc_probe_request(packet[0:1])):
lisp.lprint("ETR ignoring RLOC-probe request, using pcap")
continue
#endif
send_register = lisp.lisp_parse_packet(lisp_send_sockets, packet,
source, port)
#
# Info-Reply from map-server has new RTR-list, trigger a
# Map-Register and a Info-Request to the RTR.
#
if (send_register):
lisp_etr_info_timer = threading.Timer(0,
lisp_etr_process_info_timer, [None])
lisp_etr_info_timer.start()
lisp_register_timer = threading.Timer(0,
lisp_process_register_timer, [lisp_send_sockets])
lisp_register_timer.start()
#endif
#endif
#endif
#
# Process either commands, an IPC data-packet (for testing), or any
# protocol message on the IPC listen socket.
#
if (lisp_ipc_listen_socket in ready_list):
opcode, source, port, packet = \
lisp.lisp_receive(lisp_ipc_listen_socket, True)
if (source == ""): break
if (opcode == "command"):
packet = packet.decode()
if (packet.find("learn%") != -1):
lisp_etr_discover_eid(packet)
elif (packet.find("nat%") != -1):
lisp_etr_nat_probe(packet)
elif (packet.find("nonce%") != -1):
lisp_etr_process_nonce_ipc(packet)
elif (packet.find("clear%") != -1):
lispconfig.lisp_clear_decap_stats(packet)
elif (packet.find("rtr%") != -1):
lisp_etr_process_rtr_updown(packet)
elif (packet.find("stats%") != -1):
packet = packet.split("%")[-1]
lisp.lisp_process_data_plane_decap_stats(packet, None)
else:
lispconfig.lisp_process_command(lisp_ipc_listen_socket,
opcode, packet, "lisp-etr", [lisp_etr_commands])
#endif
elif (opcode == "api"):
packet = packet.decode()
lisp.lisp_process_api("lisp-etr", lisp_ipc_listen_socket, packet)
else:
if (lisp.lisp_is_rloc_probe_request(packet[0:1])):
lisp.lprint("ETR ignoring RLOC-probe request, using pcap")
continue
#endif
lisp.lisp_parse_packet(lisp_send_sockets, packet, source, port)
#endif
#endif
#endwhile
lisp_etr_shutdown()
lisp.lisp_print_banner("ETR normal exit")
exit(0)
#------------------------------------------------------------------------------
| [
"farinacci@gmail.com"
] | farinacci@gmail.com |
d3f9ac25806cb4110f7be0924818e2d79b7e1008 | 0af4552d69d8f049f8e8de12895fad9b19740925 | /kim_munse/hw1.py | 2b7edba1ec61892eb7f136235fee61bf6b45df44 | [] | no_license | BangDaeGeon/2020_cphys2-1 | a3990180a6dd5675ef366a2984ec2c4438ee947c | 9a245bc2b511717788a42c83882aaa4ab8d7484c | refs/heads/master | 2023-01-21T01:40:56.561030 | 2020-11-30T03:35:37 | 2020-11-30T03:35:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 67 | py | for x in range(1,10):
for y in range(1,10):
print(x*y)
| [
"71931366+joongjae93@users.noreply.github.com"
] | 71931366+joongjae93@users.noreply.github.com |
fb54366e606923e522f506648ec99d8422579fe4 | 6ed8797b868b643c37933cd086ef70b51e5e93c1 | /addTopic.py | 23cb8d91770f6f722d64c1588a713fc7c500c156 | [] | no_license | wumss/add-topic-gui | 1350b16f7a3d188d492c5da104d86769e811865d | f06b89c084b77f8bf68d296b4d291b19410e9794 | refs/heads/master | 2021-01-13T04:26:44.519668 | 2017-01-26T02:01:27 | 2017-01-26T02:01:27 | 79,875,297 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,612 | py | import json
from tkinter import *
fname = 'suggested-topics.json'
# Add a object to the schedule
def writeNewTopic(topic):
with open(fname) as jsonTopics:
topics = json.load(jsonTopics)
topics.append(topic)
with open(fname, 'w') as jsonTopics:
json.dump(topics, jsonTopics, indent=4)
print(json.dumps(topic))
print("Was added")
def nextField():
global i
i+=1
extraField()
def extraField():
global extra
global moveOn
global i
global labels
extra.pack_forget()
moveOn.pack_forget()
add.pack_forget()
addEntry()
extra["text"] = "Add another " + labels[i].rstrip('s')
extra.pack(pady=5)
if(i+1 < len(labels)):
moveOn["text"] = "Add " + labels[i+1]
moveOn.pack(pady=5)
if(i > 2):
add.pack(pady=5)
def addEntry():
global labels
global i
global entries
Label(text=labels[i].rstrip('s')).pack(side="top", anchor="w")
if(i == 1):
e = Text(width=75, height=15)
else:
e = Entry(width=100)
e.pack()
Frame(height=2, bd=1, relief=SUNKEN).pack(fill=X, padx=5, pady=5)
if(i > 1):
entries[i].append(e)
else:
entries[i] = e
def getText(widget):
if (widget.winfo_class() == "Text"):
return widget.get("1.0", "end-1c")
else:
return widget.get()
def clearText(widget):
if (widget.winfo_class() == "Text"):
widget.delete("1.0", END)
else:
widget.delete(0, END)
def submitTopic():
global entries
global master
global labels
newTopic = {}
leng = len(labels)
for i in range(leng):
if(isinstance(entries[i], list)):
toAdd = []
for e in entries[i]:
if(getText(e) != ""):
toAdd.append(getText(e))
clearText(e)
newTopic[labels[i]] = toAdd
else:
newTopic[labels[i]] = getText(entries[i])
clearText(entries[i])
writeNewTopic(newTopic)
labels = ["topic", "excerpt", "tags", "references", "see-also"]
i = 0
entries = [None, None, [], [], []]
master = Tk()
extra = Button(master, text="Add another " + labels[2].rstrip('s'), command=lambda: extraField())
moveOn = Button(master, text="Add " + labels[3], command=lambda: nextField())
add = Button(master, text="Submit Topic", command=lambda: submitTopic())
while(i < 3):
addEntry()
i+=1
i = 2
extra.pack(pady=5)
moveOn.pack(pady=5)
mainloop()
| [
"noreply@github.com"
] | noreply@github.com |
825827097adba23db0866260b555a3ed41ff6b70 | e301dc54f11c77d6b721220005ae384f7c9d512e | /Problem Set 6 - Python/Cash/cash.py | bbe343aae240c4484372bd8438fcff47fd7465f4 | [] | no_license | soufian01/CS50-Harvard-University-Introduction-To-Computer-Science- | 6905c6e78ed7dc6af3ef7429f7716b9732e6b9bd | 6afde8776c802e6c484c296ad0e4aec29a0a5431 | refs/heads/master | 2022-08-07T07:08:08.375288 | 2020-05-19T16:22:16 | 2020-05-19T16:22:16 | 259,454,598 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 531 | py | from cs50 import get_float # importing the get_float function from the cs50 library.
change = get_float("Change owed: ") # Ask the user for the input.
count = 0
while change < 0:
change = get_float("Change owed: ")
change = round(change * 100)
change = round(change * 100)
while change >= 25:
change -= 25
count += 1
while change >= 10:
change -= 10
count += 1
while change >= 5:
change -= 5
count += 1
while change >= 1:
change -= 1
count += 1
print(count) # print the final result.
| [
"noreply@github.com"
] | noreply@github.com |
dae8dcdee9369cf62771b3d6dee303b90dbe166f | 2294e0e4fb5b3abd206af4739e1a7ec572db9a95 | /enaml_nodegraph/widgets/graphicsview.py | d6904e1dc5b10ce9f08d09801ab3b5b7dea5c5cf | [] | no_license | ulricheck/enaml_nodegraph | 382fa06245aa766abd0325356f329936cef83156 | 14a12f51b10af2934393492c9b591fac3f637742 | refs/heads/master | 2020-03-23T14:26:42.963110 | 2018-09-01T14:13:35 | 2018-09-01T14:13:35 | 141,676,063 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,269 | py | __author__ = 'jack'
import logging
from atom.api import Typed, List, Instance, ForwardTyped, ForwardInstance, set_default
from enaml.widgets.control import Control, ProxyControl
from enaml.core.declarative import d_
from .graphicsitem import GraphicsItem
from .graphicsscene import GraphicsScene
from .edge_item import EdgeItem, EdgeType
from .node_socket import NodeSocket, SocketType
log = logging.getLogger(__name__)
def import_graph_controller_class():
from enaml_nodegraph.controller import GraphControllerBase
return GraphControllerBase
class ProxyGraphicsView(ProxyControl):
""" The abstract definition of a proxy QtWidgets.QGraphicsScene object.
"""
#: A reference to the OpenGLWidget declaration.
declaration = ForwardTyped(lambda: GraphicsView)
def update(self, *args):
raise NotImplementedError
def set_scene(self, scene):
raise NotImplementedError
class GraphicsView(Control):
""" A widget for displaying QGraphicsScene.
"""
controller = d_(ForwardInstance(import_graph_controller_class))
scene = d_(Typed(GraphicsScene))
selectedItems = d_(List(GraphicsItem))
#: An graphicsview widget expands freely in height and width by default.
hug_width = set_default('ignore')
hug_height = set_default('ignore')
_dragEdge = Instance(EdgeItem)
#: A reference to the ProxyGraphicsView object
proxy = Typed(ProxyGraphicsView)
#--------------------------------------------------------------------------
# Content Helpers
#--------------------------------------------------------------------------
def child_added(self, child):
""" Set the scene attribute when a scene child is added """
super(GraphicsView, self).child_added(child)
if isinstance(child, GraphicsScene):
self.scene = child
def child_removed(self, child):
""" Reset the scene attribute when a scene child is removed """
super(GraphicsView, self).child_removed(child)
if isinstance(child, GraphicsScene):
self.scene = None
def activate_top_down(self):
if not self.scene:
log.warning("GraphicsView needs a scene to work properly.")
if not self.controller:
log.warning("GraphicsView needs controller to work properly.")
super(GraphicsView, self).activate_top_down()
if self.scene is not None:
self.proxy.set_scene(self.scene)
#--------------------------------------------------------------------------
# Observers
#--------------------------------------------------------------------------
def _observe_scene(self, change):
if self.proxy is not None and change['value'] is not None:
self.proxy.set_scene(change['value'])
def _observe_controller(self, change):
ctrl = change['value']
if ctrl is not None:
ctrl.set_view(self)
if self.scene is not None:
self.scene.controller = ctrl
#--------------------------------------------------------------------------
# API
#--------------------------------------------------------------------------
def edgeDragStart(self, item):
if self.controller is None:
log.warning("GraphicsView has no controller - ignoring request")
return
if isinstance(item, NodeSocket) and item.socket_type == SocketType.OUTPUT:
edge_typename = self.controller.edge_type_for_start_socket(item.parent.id, item.id)
self._dragEdge = self.controller.create_edge(edge_typename,
start_socket=item,
end_socket=None,
scene=self.scene)
self._dragEdge.pos_destination = item.absolute_position
else:
log.warning("Invalid edge start: ", item)
def edgeDragEnd(self, item):
if self._dragEdge is None:
return
ss = self._dragEdge.start_socket
if isinstance(item, NodeSocket) and item.socket_type == SocketType.INPUT and \
self.controller.edge_can_connect(ss.parent.id, ss.id, item.parent.id, item.id):
self._dragEdge.end_socket = item
self.controller.edge_connected(self._dragEdge.id)
else:
if self._dragEdge is not None:
self.controller.destroy_edge(self._dragEdge.id)
self._dragEdge = None
def updatePoseEdgeDrag(self, pos):
if self._dragEdge is not None:
self._dragEdge.pos_destination = pos
def handle_selection_changed(self, items):
self.selectedItems = items
if self.controller is not None:
self.controller.itemsSelected(items)
def getViewportTransform(self):
if self.proxy is not None:
return self.proxy.getViewportTransform()
def setViewportTransform(self, trans):
if self.proxy is not None:
self.proxy.setViewportTransform(trans)
def fitNodesInView(self):
self.proxy.fitInView(self.scene.bounding_box_all_nodes())
def resetViewportTransform(self):
self.proxy.resetViewportTransform()
| [
"ulrich.eck@tum.de"
] | ulrich.eck@tum.de |
2024c5d4f87fca03d6ed98d0117333ae57ac87d9 | b3091e900447227c611337e7f1d09113f1496282 | /projdat/result/result/urls.py | cc606e601770ea29185b2b4b4d0bbf243efd0a25 | [] | no_license | FinancialEngineerLab/documents | 51285a6d13ea3a6a133f9b8af75782632ec46cf6 | 5ea97493d460dada421e7f04ad31a4d9419a44d1 | refs/heads/master | 2022-03-10T17:14:31.473383 | 2014-05-05T15:41:07 | 2014-05-05T15:41:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 671 | py | from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
url(r'^$', 'result.todolist.views.home', name='home'),
url(r'^add_item/$', 'result.todolist.views.add_item', name='add_item'),
url(r'^delete_item/(?P<item_id>\d+)/$', 'result.todolist.views.delete_item', name='delete_item'),
url(r'^toggle_item_pressed/(?P<item_id>\d+)/$', 'result.todolist.views.toggle_item_pressed', name='toggle_item_pressed'),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
)
| [
"pvx884@alumni.ku.dk"
] | pvx884@alumni.ku.dk |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.