text stringlengths 38 1.54M |
|---|
#!/usr/bin/python3
class Solution(object):
def merge(self, intervals):
"""
:type intervals: List[Interval]
:rtype: List[Interval]
"""
res = []
for i in sorted(intervals, key=lambda x: x.start):
if res and i.start <= res[-1].end:
res[-1].end = max(res[-1].end, i.end)
else:
res.append(i)
return res
sol = Solution()
output = sol.merge([[1, 3], [2, 6], [8, 10], [15, 18]])
print(output) |
import time
from itertools import permutations as perm
def sol1(lim):
pent_nums_set = set([(3*i*i-i)//2 for i in range(lim)])
hexa_nums_set = set([2*n*n-n for n in range(lim)])
for i in range(286, lim):
tri = (i*i+i)//2
if tri in pent_nums_set and tri in hexa_nums_set:
print('next tri pent hex num is {}'.format(tri))
break
def main():
a = time.clock()
sol1(100000)
b = time.clock()
print('time taken is {:f}'.format(b-a))
main()
|
# Generated by Django 2.2.7 on 2020-01-14 12:04
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('Public_Data_Acquisition_Unit', '0012_auto_20200101_1047'),
]
operations = [
migrations.AlterField(
model_name='facebook_target_group',
name='created_on',
field=models.DateTimeField(default=datetime.datetime(2020, 1, 14, 12, 4, 54, 854034, tzinfo=utc)),
),
migrations.AlterField(
model_name='facebook_target_hashtag',
name='created_on',
field=models.DateTimeField(default=datetime.datetime(2020, 1, 14, 12, 4, 54, 854445, tzinfo=utc)),
),
migrations.AlterField(
model_name='facebook_target_page',
name='created_on',
field=models.DateTimeField(default=datetime.datetime(2020, 1, 14, 12, 4, 54, 853593, tzinfo=utc)),
),
migrations.AlterField(
model_name='facebook_target_page',
name='expired_on',
field=models.DateTimeField(),
),
migrations.AlterField(
model_name='facebook_target_person',
name='created_on',
field=models.DateTimeField(default=datetime.datetime(2020, 1, 14, 12, 4, 54, 853030, tzinfo=utc)),
),
migrations.AlterField(
model_name='facebook_target_search',
name='created_on',
field=models.DateTimeField(default=datetime.datetime(2020, 1, 14, 12, 4, 54, 854846, tzinfo=utc)),
),
migrations.AlterField(
model_name='instagram_target_person',
name='created_on',
field=models.DateTimeField(default=datetime.datetime(2020, 1, 14, 12, 4, 54, 857188, tzinfo=utc)),
),
migrations.AlterField(
model_name='instagram_target_search',
name='created_on',
field=models.DateTimeField(default=datetime.datetime(2020, 1, 14, 12, 4, 54, 857695, tzinfo=utc)),
),
migrations.AlterField(
model_name='linkedin_target_company',
name='created_on',
field=models.DateTimeField(default=datetime.datetime(2020, 1, 14, 12, 4, 54, 858605, tzinfo=utc)),
),
migrations.AlterField(
model_name='linkedin_target_person',
name='created_on',
field=models.DateTimeField(default=datetime.datetime(2020, 1, 14, 12, 4, 54, 858148, tzinfo=utc)),
),
migrations.AlterField(
model_name='news_site_target',
name='created_on',
field=models.DateTimeField(default=datetime.datetime(2020, 1, 14, 12, 4, 54, 856769, tzinfo=utc)),
),
migrations.AlterField(
model_name='twitter_target_hashtag',
name='created_on',
field=models.DateTimeField(default=datetime.datetime(2020, 1, 14, 12, 4, 54, 855920, tzinfo=utc)),
),
migrations.AlterField(
model_name='twitter_target_person',
name='created_on',
field=models.DateTimeField(default=datetime.datetime(2020, 1, 14, 12, 4, 54, 855332, tzinfo=utc)),
),
migrations.AlterField(
model_name='twitter_target_search',
name='created_on',
field=models.DateTimeField(default=datetime.datetime(2020, 1, 14, 12, 4, 54, 856361, tzinfo=utc)),
),
]
|
#!/usr/bin/python
# coding:utf-8
from numpy import *
import mysql.connector
import Similarity
config = {'host': 'localhost',
'user':'root',
'password':'',
'port':'3306',
'database':'movielens',
'charset':'utf8',
'buffered': True,
}
try:
conn = mysql.connector.connect(**config)
except mysql.connector.Error as e:
print 'connect failed!{}'.format(e)
cursor = conn.cursor()
users = []
movies = []
try:
query_movies = 'select movieid from movies'
cursor.execute(query_movies)
# data = cursor.fetchall()
# data = []
for movieid in cursor:
# line=[movieid,title,genres.split('|')]
# data.append(line)
movies.append(movieid[0])
# print len(data)
query_users = 'select UserID from users'
cursor.execute(query_users)
for userID in cursor:
users.append(userID[0])
except mysql.connector.Error as e:
print 'connect failed!{}'.format(e)
# print movies[-1][0], users[-1][0]
# 初始化用户-物品矩阵
print users[-1], movies[-1]
dataMat = zeros((users[-1]+1,movies[-1]+1))
try:
query_ratings = "select UserID, MovieID,Rating from ratings"
cursor.execute(query_ratings)
for userid, movieid, rating in cursor:
# print userid, movieid, rating
dataMat[userid, movieid]=rating
finally:
cursor.close()
conn.close()
#得到的是一个list,(编号, 评分)
# print Similarity.userSimiliar(mat(dataMat), 1, Similarity.cosSim)
result = Similarity.simBetweenUsers(mat(dataMat), users, Similarity.cosSim)
for user1 in users:
for user2 in users:
print user1, user2, result[user1, user2]
|
import numpy as np
import pandas as pd
from mxnet import ndarray as nd
from mxnet import autograd as ag
from mxnet import gluon
import matplotlib as mpl
mpl.rcParams['figure.dpi'] = 120
import matplotlib.pyplot as plt
# import os
# print(os.path.abspath('.'))
# 读入数据
train = pd.read_csv("all/train.csv")
test = pd.read_csv("all/test.csv")
all_X = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition']))
# print(train.head())
# print(train.shape)
# print(test.shape)
# print(all_X.shape)
# 预处理数据
# 对数值特征做标准化处理
numeric_feats = all_X.dtypes[all_X.dtypes != "object"].index
all_X[numeric_feats] = all_X[numeric_feats].apply(lambda x: (x - x.mean()) / (x.std()))
# 把离散数据点转换成数值标签
all_X = pd.get_dummies(all_X, dummy_na=True) # dummy_na保留空值
# 把缺失值用本特征的平均值估计,本特征的最常见值估计呢?
all_X = all_X.fillna(all_X.mean())
# 把数据转换一下格式
num_train = train.shape[0]
X_train = all_X[:num_train].as_matrix()
X_test = all_X[num_train:].as_matrix()
y_train = train.SalePrice.as_matrix()
# 为了便于和Gluon交互,导入NDArray格式数据
X_train = nd.array(X_train)
y_train = nd.array(y_train)
y_train.reshape((num_train, 1))
X_test = nd.array(X_test)
# print(X_train.shape)
# print(y_train.shape)
# 损失函数-平方误差
square_loss = gluon.loss.L2Loss()
# 测量函数
# ??
def get_rmse_log(net, X_train, y_train):
num_train = X_train.shape[0]
clipped_preds = nd.clip(net(X_train), 1, float('inf'))
return np.sqrt(2 * nd.sum(square_loss(nd.log(clipped_preds), nd.log(y_train))).asscalar() / num_train)
# # 定义模型,模型最初版本
# def get_net():
# net = gluon.nn.Sequential()
# with net.name_scope():
# net.add(gluon.nn.Dense(1))
#
# net.initialize()
# return net
# 定义模型
def get_net():
net = gluon.nn.Sequential()
with net.name_scope():
net.add(gluon.nn.Dense(1024, activation='relu'))
net.add(gluon.nn.Dropout(0.5))
net.add(gluon.nn.Dense(1))
net.initialize()
return net
# 训练
def train(net, X_train, y_train, X_test, y_test, epochs,
verbose_epoch, learning_rate, weight_decay):
train_loss = []
if y_test is not None:
test_loss = []
batch_size = 100
dataset_train = gluon.data.ArrayDataset(X_train, y_train)
data_iter_train = gluon.data.DataLoader(dataset_train, batch_size, shuffle=True)
trainer = gluon.Trainer(net.collect_params(), 'adam', {'learning_rate': learning_rate, 'wd': weight_decay})
net.collect_params().initialize(force_reinit=True) # ??
for epoch in range(epochs):
for data, label in data_iter_train:
with ag.record():
output = net(data)
loss = square_loss(output, label)
loss.backward()
trainer.step(batch_size)
cur_train_loss = get_rmse_log(net, X_train, y_train)
if epoch > verbose_epoch:
print("Epoch %d, train loss: %f " % (epoch, cur_train_loss))
train_loss.append(cur_train_loss)
if X_test is not None:
cur_test_loss = get_rmse_log(net, X_test, y_test)
test_loss.append(cur_test_loss)
plt.plot(train_loss)
plt.legend('train')
if X_test is not None:
plt.plot(test_loss)
plt.legend(['train', 'test'])
plt.show()
if X_test is not None:
return cur_train_loss, cur_test_loss
else:
return cur_train_loss
def k_fold_cross_valid(k, epochs, verbose_epoch, X_train, y_train, learning_rate, weight_decay):
assert k > 1
fold_size = X_train.shape[0] // k # python中//是除法取整
train_loss_sum = 0.0
test_loss_sum = 0.0
for test_i in range(k):
# 第test_i折的测试数据
X_val_test = X_train[test_i * fold_size:(test_i + 1) * fold_size, :]
y_val_test = y_train[test_i * fold_size:(test_i + 1) * fold_size]
val_train_defined = False
for i in range(k): # 第test_i折的训练数据
if i != test_i:
X_cur_fold = X_train[i * fold_size:(i + 1) * fold_size, :]
y_cur_fold = y_train[i * fold_size:(i + 1) * fold_size]
if not val_train_defined:
X_val_train = X_cur_fold
y_val_train = y_cur_fold
val_train_defined = True
else:
X_val_train = nd.concat(X_val_train, X_cur_fold, dim=0)
y_val_train = nd.concat(y_val_train, y_cur_fold, dim=0)
net = get_net()
train_loss, test_loss = train(net, X_val_train, y_val_train, X_val_test, y_val_test, epochs,
verbose_epoch, learning_rate, weight_decay)
train_loss_sum += train_loss
print("Test Loss: %f " % test_loss) # 第test_i折的测试误差
test_loss_sum += test_loss
return train_loss_sum / k, test_loss_sum / k
k = 5 # k折交叉验证
epochs = 50
verbose_epoch = 45
learning_rate = 0.05
weight_decay = 170
train_loss, test_loss = k_fold_cross_valid(k, epochs, verbose_epoch, X_train
, y_train, learning_rate, weight_decay)
print("%d-fold validation: Avg train loss: %f, Avg test loss: %f " % (k, train_loss, test_loss))
def learn(epochs, verbose_epoch, X_train, y_train, test, leaening_rate, weight_decay):
net = get_net()
train(net, X_train, y_train, None, None, epochs, verbose_epoch, learning_rate, weight_decay)
preds = net(X_test).asnumpy()
test['SalePrice'] = pd.Series(preds.reshape(1, -1)[0])
submission = pd.concat([test['Id'], test['SalePrice']], axis=1) # 列拼接?
submission.to_csv('submission.csv', index=False)
learn(epochs, verbose_epoch, X_train, y_train, test, learning_rate, weight_decay)
|
# Generated by Django 3.0.2 on 2020-03-19 09:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('apis', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='orderpost',
name='order_status',
field=models.CharField(blank=True, choices=[(1, 1), (2, 2), (0, 0), ('CANCELLED', 'CANCELLED')], max_length=255, null=True),
),
migrations.AlterField(
model_name='ordertrn',
name='order_status',
field=models.CharField(choices=[(1, 1), (2, 2), (0, 0), ('CANCELLED', 'CANCELLED')], max_length=255),
),
migrations.AlterField(
model_name='verifylog',
name='created_time',
field=models.DateTimeField(),
),
]
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
import os
import imageio
from ..composition import SeekableSource
class ImageFileSource(SeekableSource):
"""
"""
def __init__(self, filename_func, nb_images=None, **kwargs):
self.parallel_possible = False
self.cached = False
self._filename_func = filename_func
self._size = nb_images
self._check_files()
super(ImageFileSource, self).__init__(name=u"ImageFileSource", **kwargs)
def _get_data_at(self, position):
img_file = self._filename_func(position)
im = imageio.imread(img_file)
return im
def _check_files(self):
# check how many continuously numbered images are available
if self._size is None:
max_nb = int(1e12)
else:
max_nb = self._size
counter = 0
while os.path.isfile(self._filename_func(counter)) and counter < max_nb:
counter += 1
if self._size is None:
self._size = counter
elif self._size < counter:
raise RuntimeError("Not all image files were found.")
if self._size > 0:
image = self._get_data_at(0)
self._dtype = image.dtype
self._shape = image.shape
else:
self._dtype = None
self._shape = None
@property
def dtype(self):
return self._dtype
|
/*
Nome: Van
ID: 2693
Resposta: Accepted
Linguagem: Python 3 (Python 3.4.3) [+1s]
Tempo: 0.028s
Tamanho: 287 Bytes
Submissao: 24/10/17 08:11:55
*/
# -*- coding: utf-8 -*-
while 1:
try:
Q = int(input())
except:
break
schedule = []
for i in range(Q):
student, region, cost = input().split()
schedule.append((int(cost), region, student))
schedule.sort()
print('\n'.join([s[2] for s in schedule]))
|
import Products.PloneGetPaid.browser.portlets.cart
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from getpaid.googlecheckout.browser.button import checkout_button_url
from Products.CMFCore.utils import getToolByName
class Renderer(Products.PloneGetPaid.browser.portlets.cart.Renderer):
render = ViewPageTemplateFile('../templates/portlet-cart.pt')
def googleCheckoutButtonUrl(self):
portal = getToolByName(self.context, 'portal_url').getPortalObject()
return checkout_button_url(portal)
|
def max_value(list):
max_num = 0
for i in list:
if i > max_num:
max_num = i
return max_num
def sum_of_list(list):
sum_of_list = 0
count = 0
for nota in notas:
sum_of_list += nota
count += 1
return sum_of_list / count
# Main
notas = [9,7,7,10,3,9,6,6,2]
print('O numero de notas 7 encontradas = ', notas.count(7))
print('A maior nota e:', max_value(notas))
print('A media das notas:', sum_of_list(notas))
notas.sort()
print('Notas Ordenadas de forma crescente', notas)
|
def create_sieve(size):
l=[1]*(size+1)
l[0]=0
for i in range(2,size+1):
if l[i]==1:
j=2
while(i*j<=size):
l[i*j]=0
j+=1
return l
size=input('Enter the sieve size: ')
sieve=create_sieve(size)
|
from django.shortcuts import render, get_object_or_404
# used for pagnation
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
#used for my customized created models
from .models import Category, Product
from cart.forms import CartAddProductForm
from .forms import SearchForm
#used for normalizing my search text
import re
def product_list(request, category_slug=None):
"""
For displying All products and Categories
"""
#for listing Categories othat has at least one published product
categories = Category.objects.filter(products__is_published=True).distinct()
#List all products
page_list = Product.objects.filter(is_published=True)
show_count = None
category = None
# If this is a GET request then process the Form data
if request.method == 'GET':
# Create a form instance and populate it with data from the request (binding):
form=SearchForm(request.GET)
if form.is_valid():
show_count = form.cleaned_data['show_count']
# List Prodcuts based on Category
if category_slug:
category = get_object_or_404(Category, slug=category_slug)
page_list = Product.objects.filter(categories__in=[category],is_published=True)
# handeling Paganation & Items count per page
if show_count:
paginator = Paginator(page_list, show_count)
else:
paginator = Paginator(page_list, 10)
show_count = 10
page = request.GET.get('page')
try:
page_list = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
page_list = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
page_list = paginator.page(paginator.num_pages)
# Fixing the proplem of the reptitave Page attribute in URL when using the paging links
url_without_page = request.GET.copy()
if 'page' in url_without_page:
del url_without_page['page']
# FInal Context Variables to Send
context = {
'category': category,
'categories': categories,
'page_list': page_list,
'url_without_page':url_without_page,
'show_count':show_count,
}
return render(request,'product/product_list.html', context)
def promotion_list(request, category_slug=None):
"""
For displying ONly Discounted and Featured products and Categories
"""
#for listing Categories on the lift side of the page
categories = Category.objects.filter(products__is_discounted=True).distinct()
products = Product.objects.filter(is_published=True).filter(is_discounted=True)
# FInal Context Variables to Send
context = {
'categories': categories,
'products': products,
}
return render(request,'product/promotion_list.html', context)
def category_list(request):
"""
For displying All Categories
"""
#for listing Categories othat has at least one published product
page_list = Category.objects.filter(products__is_published=True).distinct()
# Pagination
paginator = Paginator(page_list, 9) # Show 25 contacts per page
page = request.GET.get('page')
try:
page_list = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
page_list = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
page_list = paginator.page(paginator.num_pages)
# Fixing the proplem f the reptitave Page attribute in URL
url_without_page = request.GET.copy()
if 'page' in url_without_page:
del url_without_page['page']
# FInal Context Variables to Send
context = {
'page_list': page_list,
'url_without_page':url_without_page,
}
return render(request,'product/category_list.html', context)
def product_detail(request, id, slug):
"""
For displying All information related to ONE Products and listing all categories
"""
#for listing Categories othat has at least one published product
categories = Category.objects.filter(products__is_published=True).distinct()
product = get_object_or_404(Product,id=id,slug=slug,is_published=True)
related_categories = product.categories.all()
# Related products List has the same category as the current prodcut without repeatition.
related_products = Product.objects.filter(categories__in=list(related_categories)).exclude(id=product.id).distinct()
#intializing the cart form
cart_product_form = CartAddProductForm()
# Final Context Variables to Send back
context = {
'categories': categories,
'product': product,
'related_products':related_products,
'cart_product_form':cart_product_form
}
return render(request,'product/detail.html', context)
def search_product(request):
"""
This View will relay on the request context valriables anc check all posibilities/criterias and search the DB
"""
#for listing Categories othat has at least one published product
categories = Category.objects.filter(products__is_published=True).distinct()
# If this is a GET request then process the Form data
if request.method == 'GET':
# Create a form instance and populate it with data from the request (binding):
form=SearchForm(request.GET)
# Check if the form is valid:
if form.is_valid():
# Maping the Search From to our variables and intialization
search_category_object = None
# Represent the Products
page_list = None
total_results = 0
original_search_text = form.cleaned_data['search_query']
search_text = original_search_text.strip()
max_price = form.cleaned_data['max_price']
search_category_name = form.cleaned_data['search_category']
is_exact_match = form.cleaned_data['is_exact_match']
show_count = form.cleaned_data['show_count']
if search_category_name != "None" and search_category_name != "":
search_category_object = get_object_or_404(Category, name=search_category_name)
if is_exact_match == False:
# Replacing Spaces with | to search on each word included separetly
search_text = re.sub(r' +',"|",search_text)
# if A SearchText is entered
if search_text != '' or search_text == None:
if search_category_object:
if max_price == None:
# Do Search based on SearchText + Category
page_list = Product.objects.filter(name__iregex=r'('+search_text+r')').filter(is_published=True).filter(categories__in=[search_category_object])
else:
# Do Search based on SearchText + Category + Max
page_list = Product.objects.filter(name__iregex=r'('+search_text+r')').filter(is_published=True).filter(categories__in=[search_category_object]).filter(price__lte=max_price)
else:
if max_price == None:
# Do Search based on SearchText only
page_list = Product.objects.filter(name__iregex=r'('+search_text+r')').filter(is_published=True)
else:
# Do Search based on SearchText + MAX
page_list = Product.objects.filter(name__iregex=r'('+search_text+r')').filter(is_published=True).filter(price__lte=max_price)
# if no SearchText is entered
else:
if search_category_object:
if max_price == None:
# Do Search based on Category only
page_list = Product.objects.filter(categories__in=[search_category_object],is_published=True)
else:
# Do Search based on Category + MAX
page_list = Product.objects.filter(is_published=True).filter(categories__in=[search_category_object]).filter(price__lte=max_price)
else:
if max_price == None:
# Do Search based on NOTHING Selcted
start_search='StartSearch'
context = {'categories': categories,'start_search':start_search}
return render(request,'product/search.html',context)
else:
# Do Search based on MAX only
page_list = Product.objects.filter(is_published=True).filter(price__lte=max_price)
# Getting the number of results
if page_list == None:
total_results = 0
else:
total_results = page_list.count()
# handeling Paganation & Items count per page
if show_count:
paginator = Paginator(page_list, show_count)
else:
paginator = Paginator(page_list, 10)
show_count = 10
page = request.GET.get('page')
try:
page_list = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
page_list = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
page_list = paginator.page(paginator.num_pages)
# Fixing the proplem of the reptitave Page attribute in URL
url_without_page = request.GET.copy()
if 'page' in url_without_page:
del url_without_page['page']
# Final Context Variables to Send back
context = {
'categories': categories,
'page_list': page_list,
'category':search_category_object,
'original_search_text':original_search_text,
'max_price':max_price,
'is_exact_match':is_exact_match,
'total_results':total_results,
'show_count':show_count,
'url_without_page':url_without_page,
}
return render(request,'product/search.html', context)
else:
print ("***************** NOT VALID *********************")
context = {'categories': categories,}
return render(request,'product/search.html',context)
else:
print ("***************** NOT GET *********************")
return
|
from django.db import models
from reversion import revisions as reversion
from geoposition.fields import GeopositionField
SITE_TYPES = [
('TR', 'Training Site'),
('IN', 'Inland Site'),
('OF', 'Offshore Site'),
]
@reversion.register()
class Site(models.Model):
name=models.CharField(max_length=128)
type=models.CharField(max_length=8, choices = SITE_TYPES)
address=models.TextField(blank=True)
location=GeopositionField(blank=True, null=True)
phone=models.CharField(max_length=20, blank=True)
email=models.EmailField(blank=True)
min_temp=models.IntegerField(blank=True, null=True)
max_temp=models.IntegerField(blank=True, null=True)
max_depth=models.IntegerField(blank=True, null=True)
facilities=models.TextField(blank=True)
def __str__(self):
return self.name
def uid(self):
return "ST{:0>4d}".format(self.pk)
|
import re
import os
import shutil
import yaml
from steam_buddy.config import SHORTCUT_DIR
from PIL import Image, ImageFont, ImageDraw
def sanitize(string):
if isinstance(string, str):
return string.replace('\n', '_').replace('\r', '_').replace('/', '_').replace('\\', '_').replace('\0', '_').replace('"', '')
return string
def load_shortcuts(platform):
shortcuts = []
if not os.path.exists(SHORTCUT_DIR):
os.makedirs(SHORTCUT_DIR)
shortcuts_file = "{shortcuts_dir}/steam-buddy.{platform}.yaml".format(shortcuts_dir=SHORTCUT_DIR, platform=platform)
if os.path.isfile(shortcuts_file):
shortcuts = yaml.load(open(shortcuts_file), Loader=yaml.Loader)
if not shortcuts:
shortcuts = []
return shortcuts
def delete_file_link(base_dir, platform, name):
e = re.escape(name) + r"\.[^.]+$"
d = os.path.join(base_dir, platform)
links = []
if os.path.isdir(d):
links = [os.path.join(d, l) for l in os.listdir(d) if re.match(e, l)]
if len(links) < 1:
return
for link in links:
if os.path.islink(link) or os.path.exists(link):
os.remove(link)
def is_direct(platform, content_type):
return (platform == "arcade" or platform == "neo-geo") and content_type == "content"
def upsert_file(src_path, base_dir, platform, name, dst_name):
if not src_path:
return
content_type = os.path.basename(base_dir)
filename = sanitize(dst_name)
file_dir = "{base_dir}/{platform}/.{name}".format(base_dir=base_dir, platform=platform, name=name)
# mame ROM files have dependencies on each other, so store them all in a single directory
if is_direct(platform, content_type):
file_dir = "{base_dir}/{platform}/.{platform}".format(base_dir=base_dir, platform=platform)
if not os.path.exists(file_dir):
os.makedirs(file_dir)
file_path = "{file_dir}/{filename}".format(file_dir=file_dir, filename=filename)
if os.path.exists(file_path):
os.remove(file_path)
shutil.move(src_path, file_path)
_, ext = os.path.splitext(filename)
dst = "{base_dir}/{platform}/{name}{ext}".format(base_dir=base_dir, platform=platform, name=name, ext=ext)
delete_file_link(base_dir, platform, name)
os.symlink(file_path, dst)
# mame requires ROM files to have a specific name, so launch original file directly
if is_direct(platform, content_type):
return file_path
return dst
def strip(string):
if string.startswith('"') and string.endswith('"'):
return string[1:-1]
return string
def delete_file(base_dir, platform, name):
if is_direct(platform, os.path.basename(base_dir)):
shortcuts = load_shortcuts(platform)
matches = [e for e in shortcuts if e['name'] == name and e['cmd'] == platform]
shortcut = matches[0]
if 'dir' in shortcut and 'params' in shortcut:
file_path = os.path.join(strip(shortcut['dir']), strip(shortcut['params']))
if os.path.exists(file_path):
os.remove(file_path)
else:
file_dir = "{base_dir}/{platform}/.{name}".format(base_dir=base_dir, platform=platform, name=name)
if os.path.exists(file_dir):
shutil.rmtree(file_dir)
delete_file_link(base_dir, platform, name)
def generate_banner(text, path):
# The thumbnail size used by Steam is set
banner_width = 460
banner_height = 215
banner = Image.new('RGB', (banner_width, banner_height), color=(0, 0, 0))
font = ImageFont.truetype("/usr/share/fonts/TTF/DejaVuSansMono-Bold.ttf", 24)
text_width, text_height = font.getsize(text)
# Shorten the text if it doesn't fit on the image
while text_width > banner_width:
text = text[:-4] + "..."
text_width, text_height = font.getsize(text)
text_x = int(banner_width / 2 - text_width / 2)
text_y = int(banner_height / 2 - text_height / 2)
title = ImageDraw.Draw(banner)
title.text((text_x, text_y), text, font=font, fill=(255, 255, 255))
banner.save(path)
|
#ImportModules
import ShareYourSystem as SYS
#Definition an Tree instance
MyViewer=SYS.ViewerClass().view()
MyViewer.MeteoredConcurrentDDPClientVariable.stop()
#Definition the AttestedStr
SYS._attest(
[
'MyViewer is '+SYS._str(
MyViewer,
**{
'RepresentingBaseKeyStrsListBool':False,
'RepresentingAlineaIsBool':False
}
)
]
)
#Print
|
import random
import re
import time
book = [] # database, name < 31, phone < 16, address < 31
n = 0 # кол-во имен в справочнике
# reg.ex. for check numbers: digit + it can be plus in the beginning
p = re.compile('\+?\d+$')
# for check name: letters and one space in the middle
o = re.compile('[a-zA-zа-яА-я]+\s*[a-zA-zа-яА-я]*$')
# for address: numbers or letters
q = re.compile('\w+$')
letters = ['a', 'e', 'i', 'o']
consLet = ['b','c','d','f','k','l','m','n','p','r','s','t']
""" generate random database, which will be in the beginning """
for x in range(random.randint(5, 10)):
name = chr(random.randint(65, 90)) + (random.choice(letters) +\
random.choice(consLet)) * random.randint(0, 1)\
+ random.choice(letters) * random.randint(0, 1)\
+ (random.choice(consLet) + random.choice(letters)\
+ random.choice(consLet))\
* random.randint(0, 1)\
+ random.choice(consLet) + ' ' + chr(random.randint(65, 90))\
+ (random.choice(letters) + random.choice(consLet))\
* random.randint(0,1)\
+ random.choice(letters) + chr(random.randint(97, 122))
""" NB! not chr(random.randint(48, 57))*9 because numbers will be same """
mobile = '+79' + chr(random.randint(48, 57))\
+ chr(random.randint(48, 57)) + chr(random.randint(48, 57))\
+ chr(random.randint(48, 57)) + chr(random.randint(48, 57))\
+ chr(random.randint(48, 57)) + chr(random.randint(48, 57))\
+ chr(random.randint(48, 57)) + chr(random.randint(48, 57))
home_phone = chr(random.randint(48, 57)) + chr(random.randint(48, 57))\
+ chr(random.randint(48, 57)) + chr(random.randint(48, 57))\
+ chr(random.randint(48, 57)) + chr(random.randint(48, 57))\
+ chr(random.randint(48, 57)) + chr(random.randint(48, 57))
address = chr(random.randint(65, 90)) + random.choice(letters)\
+ (chr(random.randint(97, 122)) + random.choice(letters)\
+ chr(random.randint(97, 122)))*random.randint(0,1) +\
(random.choice(letters) + chr(random.randint(97, 122)))\
* random.randint(0,1) + random.choice(letters)\
+ chr(random.randint(97, 122)) + random.choice(letters)\
+ chr(random.randint(97, 122)) + ' St.'
book += [[name, mobile, home_phone, address]]
n += 1
# х1 - name; у1 - max length; u - reg.ex.
# check, if x1 exist, it is less than y1 and it is mathc u,
# and and user to write x1 again if not
def RightLen(x1, y1, u):
while (not(x1)) or (len(x1) > y1) or (not(u.match(x1))):
while not(x1):
x1 = input('Введите хоть что-нибудь!\n')
while (len(x1) > y1) or (not(u.match(x1))):
x1 = input('Неправильный формат ввода!\n')
return x1
# little preview, copied from http://spravkaru.net/names/7/812/list16/
print("""\t\tПофамильный телефонный справочник — это справочник, в котором\n\
\t\tпредставлен список фамилий.Выбрав интересующую фамилию, можно\n\
\t\tпросмотреть список людей с такой фамилией, проживающих в данном\n\
\t\tгороде. Такой справочник полезен, когда нужно найти человека с\n\
\t\tопределённой фамилией. Именно с помощью такого справочника\n\
\t\tТерминатор Т-800 нашел Джона Коннора, будущего лидера движения\n\
\t\tСопротивления и помог ему победить в войне человечества против машин.\n\
\t\tТакже, с помощью подобного справочника Марти Макфлай нашел Доктора\n\
\t\tБрауна в 1955, который помог ему восстановить исторический ход\n\
\t\tсобытий в вернуться в будущее.\n\
\t\thttp://spravkaru.net/names/7/812/list16/""")
a = input(
"""Добро пожаловать в телефонный справочник!\nДоступные функции:
0:выход из справочника
1:вывести весь справочник
2:добавить новую запись
3:поиск номера и адреса человека по его имени, адресу, домашнему
или мобильному номеру
4:сортировка справочника по имени, мобильному телефону, домашнему
телефону или адресу
5:экспорт справочника в файл\n""")
while a != '0':
if p.match(a): # if a.isnumeric
# show database, use pheudographic
if int(a) == 1:
print('+-----+' + '-'*32 + '+' + '-'*18 + '+' + '-'*17 + '+'
+ '-'*37 + '+' )
print('| № ' + '|' + ' Name' + ' '*24 + '|' + ' Mobile Phone'
+ ' '*5 + '|' + ' Home Phone' + ' '*5 + '|' + ' Address'
+ ' '*26 + '|')
for x in range(0, n):
print('+-----+' + '-'*32 + '+' + '-'*18 + '+' + '-'*17 + '+'
+ '-'*37 + '+')
if x+1 < 10: # check, if numbers of record has 1,2 or 3 letters
print('| ' + str(x+1) + ' |', end='')
elif x<100:
print('| ' + str(x+1) + ' |', end='')
else:
print('| ' + str(x+1) + ' |', end='')
print(' ' + book[x][0] + ' '*(31-len(book[x][0]))
+ '| ' + book[x][1] + ' '*(16-len(book[x][1]))
+ ' | ' + book[x][2] + ' '*(16-len(book[x][2]))
+ '| '+book[x][3] + ' '*(31-len(book[x][3])) + ' |')
print('+-----+' + '-'*32 + '+' + '-'*18 + '+' + '-'*17
+ '+' + '-'*37 + '+')
# input new record name, address, phone, and chech them
elif int(a) == 2:
# input name
name=input('Введите имя(не больше 20 символов!)\n')
f = 0
while not(f):
name = RightLen(name, 21, o)
# check if name is unique
for x in range(n):
if book[x][0] == name:
f = 1
if f:
name = input(
"""На ваше имя уже зарегистрирован номер! Введите какое-нибудь другое имя\n""")
f = 0
else:
break
# input mobile number
mobile = input(
"""Теперь введите мобильный номер (не больше 15 символов!)\n""");
f = 0
while (not(f)) and (mobile):
mobile = RightLen(mobile, 16, p)
for x in range(n):
if book[x][1] == mobile:
f = 1
if f:
mobile = input(
"""Этот номер уже занят! Введите какой-нибудь другой номер!\n""")
f = 0
else:
break
# input address
address=input('Введите свой адрес(не больше 30 символов!)\n')
address = RightLen(address,31,q)
y=int(0)
for x in range(n):
if book[x][3]==address:
y+=1
# check numbers of people, that wrote this address
if int(y)>5:
print(
"""Предупреждаем! На этот адрес зарегистрировано уже слишком много номеров!
Возможно, к вам придут из УФМС!\n""")
# this function will be written later
# call_ufms()
# input home number
home_phone = input(
"""Теперь введите домашний номер (не больше 15 символов!)\n""");
f = 0
while (not(f)) and (home_phone):
home_phone = RightLen(home_phone, 16, p)
for x in range(n):
if (book[x][2] == home_phone) and (address != book[x][3]):
f = 1
if f:
home_phone = input(
"""Этот номер уже занят!Введите какой-нибудь другой номер!\n""")
f = 0
else:
break
# add record in the database
book += [[name, mobile, home_phone, address]]
n += 1
# searching through the database for name, address or phone
elif int(a) == 3:
some=input(
"""Введите 1, если хотите осуществить поиск по имени,
2, если хотите осуществить поиск по мобильному номеру,
3, если хотите осуществить поиск по домашнему номеру,
4, если хотите осуществить поиск по адресу\n""")
# searching for name
if int(some) == 1:
name = input('Введите искомое имя\n')
name = RightLen(name, 21, o)
f = 0
for x in range(n):
if book[x][0] == name:
f = 1
print('Номер введенного человека: ',book[x][1],
', его адрес: ', book[x][3],
', его домашний номер: ',book[x][2],'\n')
if not(f):
print('На этого человека не зарегистрирован\
ни один номер!\n')
# searching for mobile
elif int(some) == 2:
mobile = input('Введите искомый номер\n')
mobile = RightLen(mobile, 16, p)
f = 0
for x in range(n):
if book[x][1] == mobile:
f = 1
print('Имя владельца номера: ', book[x][0],
', его адрес: ', book[x][3],
', его домашний номер: ',book[x][2],'\n')
if not(f):
print('Этого номера нет в данном справочнике!\n')
# searching for home phone
elif int(some) == 3:
home_phone = input('Введите искомый номер\n')
home_phone = RightLen(home_phone, 16, p)
f = 0
for x in range(n):
if book[x][2] == home_phone:
f = 1
print('Имя владельца номера: ', book[x][0],
', его адрес: ', book[x][3],
', его мобильный номер: ', book[x][1], '\n')
if not(f):
print('Этого номера нет в данном справочнике!\n')
# searching for address
elif int(some) == 4:
address = input('Введите искомый адрес\n')
address = RightLen(address, 31, q)
f = 0
print('По данному адресу проживают:')
for x in range(n):
if book[x][3] == address:
f = 1
print('+ Владелец: ', book[x][0],
', мобильный номер: ', book[x][1],
', домашний номер; ', book[x][2], '\n')
if not(f):
print('Никто')
# sort database by name, phone or address
elif int(a) == 4:
some = input(
"""Введите 0, если хотите отсортировать справочник по имени,
1, если хотите отсортировать справочник по мобильному номеру,
2, если хотите отсортировать справочник по домашнему номеру,
3, если хотите отсортировать справочник по адресу\n""")
while ((int(some) != 1) and (int(some) != 2) and
(int(some) != 0) and (int(some) != 3)):
some = input('Пожалуйста, введите 0,1,2 или 3\n')
y = input(
"""Введите 1, если хотите отсортировать справочник по возрастанию, или
2, если хотите отсортировать справочник по убыванию\n""")
# check, if user write right number and ask him again if not
while (int(y) != 1) and (int(y) != 2):
y = input('Пожалуйста, введите 1 или 2\n')
# bubble sort
if int(y) == 1:
for x in range(0, n-1):
for k in range(0, n-x-1):
if book[k][int(some)] > book[k+1][int(some)]:
(book[k],book[k+1]) = (book[k+1], book[k])
if int(y) == 2:
for x in range(0, n-1):
for k in range(0, n-x-1):
if book[k][int(some)] < book[k+1][int(some)]:
(book[k],book[k+1]) = (book[k+1], book[k])
# export database
elif int(a) == 5:
some = input('Введите полный путь к файлу\n')
while not(some):
some = input('Введите хоть что-нибудь\n')
try:
with open(some,'w') as f:
# check, if x has 1, 2 or 3 numbers
for x in range(n):
if x+1 < 10:
f.write(' ' + str(x+1) + ' ')
elif x+1 < 100:
f.write(' ' + str(x+1) + ' ')
else:
f.write(' ' + str(x+1) + ' ')
f.write(' ' + book[x][0] + ' '*(31-len(book[x][0]))
+ ' ' + book[x][1] + ' '*(16-len(book[x][1])) + ' '
+ book[x][2] + ' '*(16-len(book[x][2])) + ' '
+ book[x][3] + '\n')
print('Записываю...')
time.sleep(2)
print('Готово!\n')
except IOError:
print('Неправильный путь!\n')
elif int(a) == 42: # easter egg
print(
"""\t\t"Forty-two!" yelled Loonquawl."Is that all you've got to show for
\tseven and a half million years' work?"
\t"I checked it very thoroughly,"said the computer, "and that quite definitelyis
\tthe answer. I think the problem, to be quite honest with you,
\tis that you've never actually known what the question is."
\t"But it was the Great Question! The Ultimate Question of Life, the Universe
\tand Everything!" howled Loonquawl.
\t"Yes," said Deep Thought with the air of one who suffers fools gladly,
\t"but what actually is it?"
\tA slow stupefied silence crept over the men as they stared at the computer
\tand then at each other.
\t"Well, you know, it's just Everything ... Everything ..." offered Phouchg weakly.
\t"Exactly!" said Deep Thought. "So once you do know what the question actually
\tis, you'll know what the answer means."\n""")
else:
print('Неизвестная команда!')
else:
print('Неизвестная команда!')
a = input(
"""Желаете выполнить еще какие-нибудь функции?
Доступные функции:
0:выход из справочника
1:вывести весь справочник
2:добавить новую запись
3:поиск номера и адреса человека по его имени, адресу,
домашнему или мобильному номеру
4:сортировка справочника по имени, мобильному телефону,
домашнему телефону или адресу
5:экспорт справочника в файл\n""")
print('Good luck!')
|
# -*- coding: utf-8 -*-
import pandas as pd
import sys
import os
reload(sys)
sys.setdefaultencoding('utf8')
import numpy as np
# import jieba
import codecs
import re
import shutil
from keras.preprocessing.image import ImageDataGenerator, img_to_array, load_img
import random
datagen = ImageDataGenerator(
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
#读取目录,得到文件名
def load_file_names(dir):
nums = 6000
fileDict = {}
# print dir
for dir_path, dir_names, file_names in os.walk(dir):
# print file_names
# files = []
if '.DS_Store' in file_names:
file_names.remove('.DS_Store')
p = os.path.join(dir_path, '.DS_Store')
os.remove(p)
print p
# print type(file_names)
# os._exit(0)
fileNums = len(file_names)
print dir_path, fileNums
# if fileNums > 10:
# for i in range(nums):
# if i >= fileNums:
# if file_names[i % fileNums] == '.DS_Store':
# continue
# #复制文件
# from_url = dir_path + '/' + file_names[i % fileNums]
# print from_url
# img = load_img(from_url) # this is a PIL image
# x = img_to_array(img) # this is a Numpy array with shape (3, 150, 150)
# x = x.reshape((1,) + x.shape) # this is a Numpy array with shape (1, 3, 150, 150)
# for batch in datagen.flow(x, batch_size=1,
# save_to_dir=dir_path, save_prefix='c_' + str(random.randint(111111,999999)), save_format='jpeg'):
# break # otherwise the generator would loop indefinitely
#
# print dir_path
# for file_name in sorted(file_names):
# name = dir_path.split('/')[3]
# url = dir_path + '/' + file_name
# print name, '==========>', url
# # files.append(url)
#
# # print url
# # print name, '=========>', file_name
#
#
#
# # print "Value : %s" % fileDict.has_key('Age')
# if not fileDict.has_key(name):
# fileDict[name] = [url]
# else:
# fileDict[name].append(url)
#
# return fileDict
# return file_urls, file_names
pathName = '/home/ylj/tag_sys/PIC_DATA/train'
dict = load_file_names(pathName)
# for key in dict:
# print key
# for i in range(1600):
# file = dict[key][i % len(dict[key])]
# print dict[key][i % len(dict[key])]
# for file in dict[key]:
# print file
# print key
# print len(dict[key])
# print dict
# # dict = [ v for v in sorted(dict.values())]
# # dict= sorted(fileDict.iteritems(), key=lambda d:d[1]['val'], reverse = True)
# dict = sorted(dict.items(), lambda x, y: cmp(x[1], y[1]), reverse=True)
#
# # print dict
# num = 0
# for key in dict:
# for i in range(1600):
#
#
#
#
#
#
#
# if key[1] > 120:
# num = num +1
# print key[0], '===>', key[1]
# formDir = '../ai_pic/%s' % key[0]
# toDir = './data/train/%s' % key[0]
# if not os.path.exists(toDir):
# os.makedirs(toDir)
# valDir = './data/valid/%s' % key[0]
# if not os.path.exists(valDir):
# os.makedirs(valDir)
#
# train_num = key[1] * 0.8
# i = 1
# for dir_path, dir_names, file_names in os.walk(formDir):
# for file_name in sorted(file_names):
# from_file = formDir + '/' + file_name
# to_file = toDir + '/' + file_name
# val_file = valDir + '/' + file_name
# if not "DS_Store" in from_file:
# if i < train_num :
# shutil.copy(from_file, to_file)
# else:
# shutil.copy(from_file, val_file)
# i = i + 1
# print num |
#!/usr/bin/env python
#
# Copyright (c) 2010-2017, David Dittrich <dave.dittrich@gmail.com>
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
RPC Request Class
See the class documentation below for details.
"""
import json
from rpc.rpc_common import RPC_Frame_Object
class RPC_Request(RPC_Frame_Object):
"""RPC Request object.
"""
NAME = 'RPC_Request'
def __init__(self, *args, **kwargs):
RPC_Frame_Object.__init__(self, *args, **kwargs)
ARGSOPTS = {'program': self.program,
'usage': 'true'}
self.appdata = kwargs.pop('argsopts', ARGSOPTS)
self.name = self.program
def get_appdata(self):
"""Get application data for frame."""
return self.appdata
|
#
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import time
from common import constants as const
from common import config
__name = 'Upload server readiness status updater'
class HealthStatusUpdaterError(Exception):
pass
def _compute_health_status(resident_set, upload_dir_usage):
"""
Given populated fields dict, returns health status string.
"""
rss_not_ready_lim = const.NOT_READY_TOL * config.UPLOAD_SERVER_RSS_MAX_USAGE
dir_not_ready_lim = const.NOT_READY_TOL * config.PENDING_UPLOADS_MAX_USAGE
rss_stop_resp_lim = const.STOP_RESPONDING_TOL * \
config.UPLOAD_SERVER_RSS_MAX_USAGE
dir_stop_resp_lim = const.STOP_RESPONDING_TOL * \
config.PENDING_UPLOADS_MAX_USAGE
msg = '{0}: RAM usage: {1} File usage: {2}'.format(
__name, resident_set, upload_dir_usage)
logging.debug(msg)
if resident_set < rss_not_ready_lim and \
upload_dir_usage < dir_not_ready_lim:
return const.STATUS_READY
if resident_set < rss_stop_resp_lim and \
upload_dir_usage < dir_stop_resp_lim:
return const.STATUS_NOT_READY
return const.STATUS_STOP_RESPONDING
def _read_usage_data():
"""
Reads memory usage data and populates fields dict with values for tracked
metrics.
"""
# Read the file to obtain rss stat
try:
with open(const.MEM_STAT_FILE) as f:
# Strip off newline characters
lines = [l.strip() for l in f.readlines()]
except IOError as e:
msg = '{0} failed to open {1}. Error: {2}'.format(
__name, const.MEM_STAT_FILE, e)
raise HealthStatusUpdaterError(msg)
# Extract the rss stat
try:
# Lines are of the form "<metric_name> <value>", we want <value>
rss_line = [l for l in lines
if l.split()[0] == const.RESIDENT_SET_FIELD][0]
rss_usage = int(rss_line.split()[1])
except (IndexError, ValueError) as e:
msg = '{0} error parsing {1} for rss value. Error: {2}'.format(
__name, lines, e)
raise HealthStatusUpdaterError(msg)
# Compute size of pending uploads directory
pending_uploads_usage = 0
try:
for fname in os.listdir(const.UPLOAD_DIR):
fpath = os.path.join(const.UPLOAD_DIR, fname)
pending_uploads_usage += os.path.getsize(fpath)
except OSError as e:
msg = '{0} error obtaining size of {1} dir: {2}'.format(
__name, const.UPLOAD_DIR, e)
raise HealthStatusUpdaterError(msg)
return rss_usage, pending_uploads_usage
def _record_status(status):
"""
Records readiness status.
"""
try:
with open(const.UPLOAD_SERVER_READINESS_FILE, 'w') as f:
f.write(status)
except IOError:
msg = '{0} failed to open {1}'.format(
__name, const.UPLOAD_SERVER_READINESS_FILE)
logging.error(msg)
return
def _update_readiness_status():
"""
Updates the readiness status.
"""
try:
rss_usage, pending_uploads_usage = _read_usage_data()
except HealthStatusUpdaterError as e:
logging.error(str(e))
return
status = _compute_health_status(rss_usage, pending_uploads_usage)
_record_status(status)
def main():
logging.basicConfig(level=logging.INFO, format=const.LOG_FMT_S_THREADED)
while True:
_update_readiness_status()
time.sleep(const.READINESS_UPDATE_INTERVAL)
if __name__ == '__main__':
main()
|
class Solution:
# @param board, a 9x9 2D array
# @return a boolean
def isValidSudoku(self, board):
row = [set([]) for i in range(9)]
col = [set([]) for i in range(9)]
grid = [set([]) for i in range(9)]
for r in range(9):
for c in range(9):
if board[r][c] == '.':
continue
if board[r][c] in row[r]:
return False
if board[r][c] in col[c]:
return False
g = r / 3 * 3 + c / 3
if board[r][c] in grid[g]:
return False
grid[g].add(board[r][c])
row[r].add(board[r][c])
col[c].add(board[r][c])
return True
|
import os
import ray
import time
import pytest
from ray._private.test_utils import (
run_string_as_driver_nonblocking,
run_string_as_driver,
)
from ray.tests.conftest import * # noqa
from ray import workflow
from unittest.mock import patch
driver_script = """
import time
import ray
from ray import workflow
@ray.remote
def foo(x):
time.sleep(1)
if x < 20:
return workflow.continuation(foo.bind(x + 1))
else:
return 20
if __name__ == "__main__":
ray.init()
output = workflow.run_async(foo.bind(0), workflow_id="driver_terminated")
time.sleep({})
"""
def test_workflow_lifetime_1(workflow_start_cluster):
# Case 1: driver exits normally
address, storage_uri = workflow_start_cluster
with patch.dict(os.environ, {"RAY_ADDRESS": address}):
ray.init()
run_string_as_driver(driver_script.format(5))
assert workflow.get_output("driver_terminated") == 20
def test_workflow_lifetime_2(workflow_start_cluster):
# Case 2: driver terminated
address, storage_uri = workflow_start_cluster
with patch.dict(os.environ, {"RAY_ADDRESS": address}):
ray.init()
proc = run_string_as_driver_nonblocking(driver_script.format(100))
time.sleep(10)
proc.kill()
time.sleep(1)
assert workflow.get_output("driver_terminated") == 20
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
|
from .operations import *
def calculate_expression(expression):
x, operator, y = expression.split()
x = float(x)
y = int(y)
if operator == '+':
result = add(x, y)
elif operator == '-':
result = subtract(x, y)
elif operator == '*':
result = multiply(x, y)
elif operator == '/':
result = divide(x, y)
elif operator == '^':
result = power(x, y)
else:
raise Exception(f'Invalid operator \'{operator}\'')
return f'{result:.2f}'
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import statistics as stat
import scipy.stats as scs
import seaborn as sns
# 1
print('-----1-----')
elements = (1, 2, 3, 4, 5, 6)
probabilities = (1/6, 1/6, 1/6, 1/6, 1/6, 1/6)
data = scs.rv_discrete(values=(elements, probabilities))
print('Mean: ', data.mean())
print('Median: ', data.median())
print('Variance: ', data.var())
print('Standard deviation: ', data.std())
print('Expected value: ', data.expect())
print('Entropy: ', data.entropy())
# 2
print('-----2-----')
p = 0.2
n = 5
bernoulli = scs.bernoulli.rvs(p, size=100)
binomial = scs.binom.rvs(n, p, size=100)
poisson = scs.poisson.rvs(p, size=100)
# 3
print('-----3-----')
bernoulli_mean = bernoulli.mean()
bernoulli_var = bernoulli.var()
bernoulli_kur = scs.kurtosis(bernoulli)
bernoulli_skew = scs.skew(bernoulli)
binomial_mean = binomial.mean()
binomial_var = binomial.var()
binomial_kur = scs.kurtosis(binomial)
binomial_skew = scs.skew(binomial)
poisson_mean = poisson.mean()
poisson_var = poisson.var()
poisson_kur = scs.kurtosis(poisson)
poisson_skew = scs.skew(poisson)
print('Bernoulli')
print('Mean: ', bernoulli_mean)
print('Variance: ', bernoulli_var)
print('Kurtosis: ', bernoulli_skew)
print('Skewness: ', bernoulli_kur)
print('Binomial')
print('Mean: ', binomial_mean)
print('Variance: ', binomial_var)
print('Kurtosis: ', binomial_kur)
print('Skewness: ', binomial_skew)
print('Poisson')
print('Mean: ', poisson_mean)
print('Variance: ', poisson_var)
print('Kurtosis: ', poisson_kur)
print('Skewness: ', poisson_skew)
# 4
print('-----4-----')
plot = sns.distplot(bernoulli)
plot.set(xlabel='Bernoulli distribution', ylabel='Frequency')
plt.show()
plot = sns.distplot(binomial)
plot.set(xlabel='Binomial distribution', ylabel='Frequency')
plt.show()
plot = sns.distplot(poisson)
plot.set(xlabel='Poisson distribution', ylabel='Frequency')
plt.show()
# 5
print('-----5-----')
binomial = scs.binom.rvs(n=20, p=0.4, size=1000)
plot = sns.distplot(binomial)
plot.set(xlabel='Binomial distribution', ylabel='Frequency')
plt.show()
print("Probability sum: ", scs.binom.cdf(k=20, n=20, p=0.4))
# 6
print('-----6-----')
norm = scs.norm.rvs(size = 10000, loc=0, scale=2)
print('Mean: expected 0, actual ', norm.mean())
print('Variance: expected 4, actual ', norm.var())
print('Standard deviation: expected 2, actual ', norm.std())
print('Median: expected 0, actual ', scs.norm.median(loc=0, scale=2))
print('Expected value: expected 0, actual ', scs.norm.expect(loc=0, scale=2))
print('Kurtosis: expected 0, actual ', scs.kurtosis(norm))
print('Skewness: expected 0, actual ', scs.skew(norm))
# 7
print('-----7-----')
data1 = scs.norm.rvs(size=1000, loc=1, scale=2)
data2 = scs.norm.rvs(size=1000, loc=0, scale=1)
data3 = scs.norm.rvs(size=1000, loc=-1, scale=0.5)
plot = sns.distplot(data1, color='red')
plot = sns.distplot(data2, color='blue')
plot = sns.distplot(data3, color='black')
plt.show() |
import torch
import numpy as np
import parameters as pt
from torch.utils.data import Dataset
from numpy.random import normal, randint, permutation
from glob import glob
from skimage import img_as_float
from skimage.io import imread, imsave
from skimage.color import rgb2gray
from skimage.filters import prewitt
from os import makedirs
from os.path import dirname, exists
from scipy.ndimage.filters import convolve
from scipy import fftpack
def _pad_to(image, size):
Hk, Wk = image.shape
Ht, Wt = size
Hp, Wp = (Ht - Hk)//2, (Wt - Wk)//2
return np.pad(image, pad_width=((Hp, Ht - Hk - Hp), (Wp, Wt - Wk - Wp)),
mode='constant')
def _ensure_gray(image):
if image.ndim <= 2:
return image
elif image.shape[2] == 1:
return image[:, :, 0]
else:
return rgb2gray(image)
def _augment(image):
transformed = np.fliplr(image) if randint(2) == 1 else image
transformed = np.rot90(transformed, k=randint(4))
return transformed
def list_image_files(image_dir):
'''
List image files under "image_dir"
'''
image_suffices = ['jpg', 'png', 'bmp', 'gif']
image_files = []
for suffix in image_suffices:
image_files += sorted(glob(image_dir + '/*.' + suffix))
return image_files
def load_image(image_file):
'''
Read image, normalize and ensure even size
'''
image = imread(image_file)
image = img_as_float(image)
if image.ndim == 2: # grayscale
image = np.expand_dims(image, axis=-1)
elif image.shape[2] == 3 and pt.image_channels == 1:
image = np.expand_dims(rgb2gray(image), axis=-1)
Hi, Wi, _ = image.shape
image = image[:2*(Hi//2), :2*(Wi//2), :]
return image.astype('float32')
def imwrite(image, file_path):
root = dirname(file_path)
if not exists(root):
makedirs(root)
# Clip image to [0, 1]
image[image < 0] = 0
image[image > 1] = 1
imsave(file_path, image)
def load_kernel(kernel_file, bounding_box_size=None):
'''
Read kernel, normalize and ensure odd size
'''
kernel = imread(kernel_file, as_gray=True).astype('float32')
kernel /= kernel.sum()
if bounding_box_size:
return _pad_to(kernel, bounding_box_size)
else:
Hk, Wk = kernel.shape
return np.pad(kernel, pad_width=((0, (Hk + 1) % 2), (0, (Wk + 1) % 2)),
mode='constant')
def to_tensor(array):
'''
Convert numpy array to pytorch tensor
'''
if array.ndim == 3: # HxWxC
array = np.transpose(array, axes=(2, 0, 1))
elif array.ndim == 4: # NxHxWxC
array = np.transpose(array, axes=(0, 3, 1, 2))
return torch.from_numpy(array.astype('float32'))
def parse(data, device=torch.device('cpu')):
blur = data['blurred'].to(device)
image = data['image'].to(device)
kernel = data['kernel'].to(device)
return blur, image, kernel
def random_crop(image, patch_size):
Hi, Wi, _ = image.shape
Hp, Wp = patch_size
h0 = 0 if Hi == Hp else randint(0, Hi - Hp)
w0 = 0 if Wi == Wp else randint(0, Wi - Wp)
return image[h0: h0 + Hp, w0: w0 + Wp, :]
def convn(image, kernel):
'''
Multi-dimensional convolution with 'valid' padding
'''
Hk, Wk = kernel.shape
Hk2, Wk2 = Hk // 2, Wk // 2
def conv2(x, k):
return convolve(x, k, mode='constant')[Hk2:-Hk2, Wk2:-Wk2]
if image.ndim < 3:
return conv2(image, kernel)
num_channels = image.shape[2]
if num_channels == 1:
return np.expand_dims(conv2(image[:, :, 0], kernel), axis=-1)
else:
channels = [conv2(image[:, :, c], kernel) for c in range(num_channels)]
return np.stack(channels, axis=-1)
def solve_min_laplacian(boundary_image):
H, W = boundary_image.shape
# Laplacian
f = np.zeros((H, W))
# boundary image contains image intensities at boundaries
boundary_image[1:-1, 1:-1] = 0
j = np.arange(1, H - 1)
k = np.arange(1, W - 1)
f_bp = np.zeros((H, W))
f_bp[np.ix_(j, k)] = -4*boundary_image[np.ix_(j, k)] \
+ boundary_image[np.ix_(j, k + 1)] + boundary_image[np.ix_(j, k - 1)] \
+ boundary_image[np.ix_(j - 1, k)] + boundary_image[np.ix_(j + 1, k)]
# subtract boundary points contribution
f1 = f - f_bp # subtract boundary points contribution
# DST Sine Transform algo starts here
f2 = f1[1:-1, 1:-1]
# compute sine tranform
def dst(x):
return fftpack.dst(x, type=1, axis=0) / 2.0
def idst(x):
return np.real(fftpack.idst(x, type=1, axis=0)) / (x.shape[0] + 1.0)
tt = dst(f2)
f2sin = dst(tt.T).T
# compute Eigen Values
x, y = np.meshgrid(np.arange(1, W - 1), np.arange(1, H - 1))
denom = 2*np.cos(np.pi*x/(W-1)) - 2 + 2*np.cos(np.pi*y/(H-1)) - 2
# divide
f3 = f2sin / denom
# compute Inverse Sine Transform
tt = idst(f3)
img_tt = idst(tt.T).T
# put solution in inner points; outer points obtained from boundary image
img_direct = boundary_image
img_direct[1:-1, 1:-1] = 0
img_direct[1:-1, 1:-1] = img_tt
return img_direct
def edgetaper(blurred, kernel_size):
Hk, Wk = kernel_size
Hk2, Wk2 = Hk // 2, Wk // 2
padded = np.pad(blurred, pad_width=((Hk2, Hk - Hk2 - 1),
(Wk2, Wk - Wk2 - 1), (0, 0)),
mode='linear_ramp')
# Pad in four directions
for c in range(padded.shape[2]):
padded[:Hk2+1, Wk2:-Wk2, c] = solve_min_laplacian(
padded[:Hk2+1, Wk2:-Wk2, c])
padded[-Hk2-1:, Wk2:-Wk2, c] = solve_min_laplacian(
padded[-Hk2-1:, Wk2:-Wk2, c])
padded[:, :Wk2+1, c] = solve_min_laplacian(padded[:, :Wk2+1, c])
padded[:, -Wk2-1:, c] = solve_min_laplacian(padded[:, -Wk2-1:, c])
padded = np.pad(padded, pad_width=((Hk//2, Hk - Hk//2 - 1),
(Wk//2, Wk - Wk//2 - 1), (0, 0)),
mode='constant')
return padded
class SyntheticDataset(Dataset):
def __init__(self, image_dir, kernel_dir,
max_trial=10, grad_thr=0.05, thr_ratio=0.06):
self.image_files = list_image_files(image_dir)
self.kernel_files = list_image_files(kernel_dir)
self.kernel_indices = permutation(len(self.kernel_files))
self.max_trial = max_trial
self.grad_thr = grad_thr
self.thr_ratio = thr_ratio
def __len__(self):
return len(self.image_files)
def __getitem__(self, idx):
Hv, Wv = pt.patch_size # 'valid' size
Hk, Wk = pt.bounding_box_size
Hp, Wp = Hv + Hk - 1, Wv + Wk - 1 # 'same' size
image = _augment(load_image(self.image_files[idx]))
# Hi, Wi = image.shape[0], image.shape[1]
# Hv, Wv = Hi - Hk + 1, Wi - Wk + 1
# patch = resize(image, (Hp, Wp), mode='reflect', anti_aliasing=True)
for t in range(self.max_trial):
patch = random_crop(image, (Hp, Wp))
# Validate patch: reject it if it is over-smooth
grad = prewitt(_ensure_gray(patch))
ratio = np.count_nonzero(grad > self.grad_thr) / float(grad.size)
if ratio > self.thr_ratio:
break
ker_idx = self.kernel_indices[idx]
kernel = load_kernel(self.kernel_files[ker_idx], pt.bounding_box_size)
blurred = convn(patch, kernel)
# blurred += normal(scale=pt.noise_stddev, size=blurred.shape)
# Pad the invisible boundary region
# blurred = to_tensor(np.pad(blurred, pad_width=((Hk - 1, Hk - 1),
# (Wk - 1, Wk - 1),
# (0, 0)),
# mode='constant'))
# blurred = to_tensor(edgetaper(blurred, (Hk, Wk)))
blurred = to_tensor(blurred)
patch = to_tensor(patch[Hk//2:Hk//2 + Hv, Wk//2:Wk//2 + Wv, :])
return {'blurred': blurred, 'image': patch,
'kernel': to_tensor(kernel)}
class BlurredImageDataset(Dataset):
def __init__(self, data_dir):
self.blur_image_files = list_image_files(data_dir + '/blurred')
self.sharp_image_files = list_image_files(data_dir + '/sharp')
assert len(self.blur_image_files) == len(self.sharp_image_files)
self.kernel_files = list_image_files(data_dir + '/kernel')
self.num_kernels = len(self.kernel_files)
self.num_images = len(self.blur_image_files)
def __len__(self):
return self.num_images
def __getitem__(self, idx):
Hk, Wk = pt.bounding_box_size
blurred = load_image(self.blur_image_files[idx])
image = load_image(self.sharp_image_files[idx])
# Hi, Wi, _ = image.shape
# Hp, Wp = pt.patch_size
# h0 = 0 if Hi == Hp else randint(0, Hi - Hp)
# w0 = 0 if Wi == Wp else randint(0, Wi - Wp)
# blurred = blurred[h0: h0 + Hp, w0: w0 + Wp, :]
# image = image[h0: h0 + Hp, w0: w0 + Wp, :]
# blurred += normal(scale=pt.noise_stddev, size=blurred.shape)
# blurred = to_tensor(edgetaper(blurred, (Hk, Wk)))
blurred = to_tensor(blurred)
image = to_tensor(image)
kernel = None if self.num_kernels == 0 else to_tensor(
load_kernel(self.kernel_files[idx], pt.bounding_box_size))
return {'blurred': blurred, 'image': image, 'kernel': kernel}
|
#coding:utf-8
'''
折半查找法双排序列表的中位数
'''
def half_find(l1, d):
if len(l1) == 0: return
if len(l1) == 1:
if l1[0] == d:
return d
else:
return
if d < l1[0]: return
if d > l1[-1]: return
mid = len(l1) / 2
if l1[mid] == d:
return d
if l1[mid] < d:
return half_find(l1[mid+1:], d)
else:
return half_find(l1[0:mid], d)
return
class Solution(object):
def findMedianSortedArrays(self, nums1, nums2):
def media(l1, l2, k):
m = len(l1)
n = len(l2)
if m > n:
l1, l2 = l2, l1
if not l1: return l2[k]
if k + 1 == m + n:
return l1[-1] if l1[-1] > l2[-1] else l2[-1]
c = len(l1) / 2
t = k - c
if l1[c] > l2[t]:
return media(l1[:c], l2[t:], c)
else:
return media(l1[c:], l2[:t], t)
l = len(nums1) + len(nums2)
k = l / 2
if l & 0x10:
return media(nums1, nums2, k)
else:
return (media(nums1, nums2, k - 1) + media(nums1, nums2, k)) / 2.0
def media_array(l1, l2, k):
# 检验空数据输入
if len(l1) > len(l2):
l1, l2 = l2, l1
if not l1: return l2[k]
if len(l1) == 1:
return max(l1[-1], l2[-1])
c = len(l1) // 2
t = k - c
if l1[c] > l2[t]:
return media_array(l1[:c], l2[t:], c)
else:
return media_array(l1[c:], l2[:t], t)
def main(l1, l2):
l = len(l1) + len(l2)
if l % 2:
return media_array(l1, l2, l // 2)
else:
return (media_array(l1, l2, l // 2 - 1) + media_array(l1, l2, l // 2)) / 2.0
l1 = [1, 1, 1]
l2 = [1, 1, 1]
C = Solution()
print C.findMedianSortedArrays(l1, l2)
|
import numpy as np
import pandas as pd
import pytorch_lightning as pl
from sklearn.metrics import top_k_accuracy_score
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import Adam
from torch.optim.lr_scheduler import CosineAnnealingWarmRestarts, ReduceLROnPlateau
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from src.context import ctx
from src.config import params, config
from src.logger import logger
class LSTMModel(pl.LightningModule):
def __init__(self, num_items):
super().__init__()
self.num_items = num_items
self.item_embedding = nn.Embedding(
num_embeddings=self.num_items,
embedding_dim=params.lstm.model.embedding_dim,
padding_idx=0,
)
self.model = nn.LSTM(
input_size=params.lstm.model.embedding_dim,
hidden_size=params.lstm.model.hidden_dim,
num_layers=params.lstm.model.num_layers,
batch_first=True,
dropout=params.lstm.model.dropout,
)
self.linear = nn.Linear(params.lstm.model.hidden_dim, num_items)
def forward(self, batch):
sequence = batch["sequence"]
sequence_lengths = batch["sequence_lengths"]
sequence = self.item_embedding(sequence)
packed_sequence = pack_padded_sequence(sequence, sequence_lengths.cpu(), batch_first=True, enforce_sorted=False)
hidden_states, last_hidden_state = self.model(packed_sequence)
padded_sequence, _ = pad_packed_sequence(hidden_states, batch_first=True)
logits = self.linear(padded_sequence)
return logits
def training_step(self, batch, batch_idx):
sequence = batch["sequence"]
sequence_lengths = batch["sequence_lengths"]
target = batch["target"]
logits = self.forward(batch)
loss = self.criterion(logits, target, sequence_lengths)
logger.log_metric("train_loss", loss.item())
return {"loss": loss}
def training_epoch_end(self, outputs):
train_loss_mean = torch.stack([x["loss"] for x in outputs]).mean()
logs = {
"train_loss": train_loss_mean,
}
for key, value in logs.items():
logger.log_metric(key, value.item(), dvc=True)
return
def validation_step(self, batch, batch_idx):
sequence = batch["sequence"]
sequence_lengths = batch["sequence_lengths"]
target = batch["target"]
last_items = batch["last_item"]
logits = self.forward(batch)
loss = self.criterion(logits, target, sequence_lengths)
last_item_predictions = torch.softmax(logits[:, -1], dim=1)
accuracies = {
f"valid_acc@{k}": top_k_accuracy_score(
last_items.detach().cpu().numpy(),
last_item_predictions.detach().cpu().numpy(),
k=k,
labels=np.arange(self.num_items)
)
for k in [20, 50, 100]
}
return {"valid_loss": loss, **accuracies}
def validation_epoch_end(self, outputs):
valid_loss_mean = torch.stack([x["valid_loss"] for x in outputs]).mean()
logs = {
"valid_loss": valid_loss_mean,
}
for k in [20, 50, 100]:
logs[f"valid_acc@{k}"] = np.mean([x[f"valid_acc@{k}"] for x in outputs])
for key, value in logs.items():
logger.log_metric(key, value.item(), dvc=True)
return
def criterion(self, logits, targets, sequence_lengths):
mask = torch.zeros_like(targets).float()
targets = targets.view(-1)
predictions = torch.log_softmax(logits, dim=2)
predictions = predictions.view(-1, self.num_items)
for row, col in enumerate(sequence_lengths):
mask[row, :col.item()] = 1.
mask = mask.view(-1)
valid_items = int(torch.sum(mask).item())
predictions = predictions[range(predictions.shape[0]), targets] * mask
ce_loss = - torch.sum(predictions) / valid_items
return ce_loss
def configure_optimizers(self):
self.optimizer = Adam(self.model.parameters(), lr=params.lstm.optimizer.learning_rate)
return self.optimizer
|
# -*- coding: utf-8 -*-
import datetime
import calendar
def date_finder(year, month):
def allsaturdays(year):
d = datetime.date(year, 1, 4)
d += datetime.timedelta(days = 5 - d.weekday())
while d.year == year:
yield d
d += datetime.timedelta(days = 7)
##vars: sched_mon_chosen, sched_yr_chosen, should be global in other script..
#year_ = int(sched_yr_chosen.get())
#mon_ = sched_mon_chosen.get()
datelist = []
for d in allsaturdays(year):
datelist.append(d)
year_ = year
mon_ = month
dates = []
if mon_ == 'January':
for fecha in datelist:
if fecha.month == 1:
dates.append(fecha)
elif mon_ == 'February':
for fecha in datelist:
if fecha.month == 2:
dates.append(fecha)
elif mon_ == 'March':
for fecha in datelist:
if fecha.month == 3:
dates.append(fecha)
elif mon_ == 'April':
for fecha in datelist:
if fecha.month == 4:
dates.append(fecha)
elif mon_ == 'May':
for fecha in datelist:
if fecha.month == 5:
dates.append(fecha)
elif mon_ == 'June':
for fecha in datelist:
if fecha.month == 6:
dates.append(fecha)
elif mon_ == 'July':
for fecha in datelist:
if fecha.month == 7:
dates.append(fecha)
elif mon_ == 'August':
for fecha in datelist:
if fecha.month == 8:
dates.append(fecha)
elif mon_ == 'September':
for fecha in datelist:
if fecha.month == 9:
dates.append(fecha)
elif mon_ == 'October':
for fecha in datelist:
if fecha.month == 10:
dates.append(fecha)
elif mon_ == 'November':
for fecha in datelist:
if fecha.month == 11:
dates.append(fecha)
elif mon_ == 'December':
for fecha in datelist:
if fecha.month == 12:
dates.append(fecha)
return dates
#eg = date_finder(2020, 'January')
#print(eg)
#print(type(eg[0]))
#print(type(eg))
def end_dates(airdates):
ended = []
first = list(airdates)
for dato in first:
mas = dato + datetime.timedelta(weeks=10)
ended.append(mas)
return ended
#end_dates(eg)
#test = end_dates(eg)
#print(test) |
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
#reverse return url as string. The view takes care of the redirect part
from django.urls import reverse
# Create your models here.
class Post(models.Model):
title=models.CharField(max_length=100) #Title of the blog post
content=models.TextField() #Content inside the blog post
date_posted=models.DateTimeField(default=timezone.now) #Saves the date on which the content was uploaded
author=models.ForeignKey(User,on_delete=models.CASCADE) #Name of the author of the post
def __str__(self):
return self.title #Returns the title of the post when object is called
def get_absolute_url(self):
return reverse('post-detail',kwargs={'pk':self.pk}) #Gets the absolute (not hardcoded) url of the post
#(uses the primary key of the post for display) |
import FWCore.ParameterSet.Config as cms
process = cms.Process("ecalReconstructedPi0")
process.load('Configuration.StandardSequences.Services_cff')
process.load('Configuration.StandardSequences.MagneticField_38T_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.load('HeavyIonsAnalysis.Configuration.collisionEventSelection_cff')
process.load('Configuration.StandardSequences.GeometryDB_cff')
process.load('Geometry.CaloEventSetup.CaloTopology_cfi')
process.load('Configuration.StandardSequences.ReconstructionHeavyIons_cff')
process.load('CondCore.DBCommon.CondDBCommon_cfi');
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Pi0Analysis.ecalReconstructedPi0.ecalflowntp_cfi')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(-1))
process.TFileService = cms.Service("TFileService",
fileName = cms.string("pPb_PAHighPt_SideBand22_28_NoTimingCuts_YesEnergyCorr.root")
)
process.MessageLogger.cerr.FwkReport.reportEvery = 1000
process.GlobalTag.globaltag = 'GR_P_V42_AN3::All'
from HeavyIonsAnalysis.Configuration.CommonFunctions_cff import *
overrideCentrality(process)
process.HeavyIonGlobalParameters = cms.PSet(
centralityVariable = cms.string("HFtowersPlusTrunc"),
nonDefaultGlauberModel = cms.string(""),
centralitySrc = cms.InputTag("pACentrality"),
pPbRunFlip = cms.untracked.uint32(99999999)
)
process.load('RecoHI.HiCentralityAlgos.HiCentrality_cfi')
process.load('HeavyIonsAnalysis.Configuration.collisionEventSelection_cff')
#process.load('msharma.RpPbAnalysis.PAPileUpVertexFilter_cff')
process.load('RecoHI.HiCentralityAlgos.HiCentrality_cfi')
process.load("HLTrigger.HLTfilters.hltHighLevel_cfi")
process.hltSingleTrigger = process.hltHighLevel.clone()
process.hltSingleTrigger.HLTPaths = ["HLT_PAZeroBiasPixel_SingleTrack_v1"]
process.hltMult100 = process.hltHighLevel.clone()
process.hltMult100.HLTPaths = ["HLT_PAPixelTracks_Multiplicity100_v1",
"HLT_PAPixelTracks_Multiplicity100_v2"]
process.hltMult130 = process.hltHighLevel.clone()
process.hltMult130.HLTPaths = ["HLT_PAPixelTracks_Multiplicity130_v1",
"HLT_PAPixelTracks_Multiplicity130_v2"]
process.hltMult160 = process.hltHighLevel.clone()
process.hltMult160.HLTPaths = ["HLT_PAPixelTracks_Multiplicity160_v1",
"HLT_PAPixelTracks_Multiplicity160_v2"]
process.hltMult190 = process.hltHighLevel.clone()
process.hltMult190.HLTPaths = ["HLT_PAPixelTracks_Multiplicity190_v1",
"HLT_PAPixelTracks_Multiplicity190_v2"]
process.hltMult100.andOr = cms.bool(True)
process.hltMult100.throw = cms.bool(False)
process.hltMult130.andOr = cms.bool(True)
process.hltMult130.throw = cms.bool(False)
process.hltMult160.andOr = cms.bool(True)
process.hltMult160.throw = cms.bool(False)
process.hltMult190.andOr = cms.bool(True)
process.hltMult190.throw = cms.bool(False)
process.ecalReconstructedPi0.AlCaStreamEBpi0Tag = cms.untracked.InputTag("ecalRecHit","EcalRecHitsEB")
process.ecalReconstructedPi0.pfCandidatesTag = cms.InputTag("particleFlow")
process.ecalReconstructedPi0.srcTowers = cms.InputTag("towerMaker")
process.ecalReconstructedPi0.verbose = cms.untracked.bool(True)
process.ecalReconstructedPi0.qualityString_ = cms.untracked.string("highPurity")
process.ecalReconstructedPi0.cutDzErrMax_ = cms.untracked.double(3.0)
process.ecalReconstructedPi0.cutDxyErrMax_ = cms.untracked.double(3.0)
process.ecalReconstructedPi0.cutPtErrMax_ = cms.untracked.double(0.1)
process.ecalReconstructedPi0.vertexZMax = cms.double(15.0)
process.ecalReconstructedPi0.cutMultMin = cms.double(0.0)
process.ecalReconstructedPi0.cutMultMax = cms.double(1000.0)
process.ecalReconstructedPi0.cutMinTrack = cms.double(0.4)
process.ecalReconstructedPi0.lowpi0PtCut = cms.double(0.7)
process.ecalReconstructedPi0.highpi0PtCut = cms.double(12.0)
process.ecalReconstructedPi0.ptBins = cms.vdouble(1.6, 1.8, 2.0, 2.2, 2.4, 2.6, 3.0, 3.5, 4.0, 4.5, 5.0,
5.5, 6.0, 7.0, 8.0, 10.0, 12.0
)
#process.ecalReconstructedPi0.NptBins = cms.vdouble(0.7, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 5.0, 6.0, 8.0)
process.ecalReconstructedPi0.NptBins = cms.vdouble(0.7, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 5.0, 6.0, 8.0)
process.ecalReconstructedPi0.doEnergyRecalibration = cms.bool(True);
process.ecalReconstructedPi0.useClusterCrystalLimit = cms.bool(False);
process.ecalReconstructedPi0.lowCrystalLimit = cms.int32(1);
process.ecalReconstructedPi0.highCrystalLimit = cms.int32(9);
process.ecalReconstructedPi0.occBins = cms.vdouble(
0., 20., 40., 60., 80., 100., 120., 140., 160.,
180., 200., 250., 300., 500.
)
process.ecalReconstructedPi0.etaBins = cms.vdouble(
-1.46, -1.36, -1.26, -1.16, -1.06, -0.96, -0.86, -0.76, -0.66, -0.56,
-0.46, -0.36, -0.26, -0.16, -0.06, 0.04, 0.14, 0.24, 0.34, 0.44, 0.54,
0.64, 0.74, 0.94, 0.94, 1.04, 1.14, 1.24, 1.34, 1.44)
process.ecalReconstructedPi0.occByCentrality = cms.bool(True)
process.ecalReconstructedPi0.diHadronCorrelations = cms.bool(False)
process.ecalReconstructedPi0.pi0HadronCorrelations = cms.bool(True)
process.ecalReconstructedPi0.etaHadronCorrelations = cms.bool(False)
process.ecalReconstructedPi0.rotatedBackground = cms.bool(False)
process.ecalReconstructedPi0.cutByLeadingTrackPt = cms.bool(False)
process.ecalReconstructedPi0.leadingTrackPtMin = cms.double(0.0)
process.ecalReconstructedPi0.leadingTrackPtMax = cms.double(999.0)
process.ecalReconstructedPi0.cutByLeadingPhotonPt = cms.bool(False)
process.ecalReconstructedPi0.leadingPhotonPtMin = cms.double(0.0)
process.ecalReconstructedPi0.leadingPhotonPtMax = cms.double(999.0)
process.ecalReconstructedPi0.swissThreshold = cms.double(0.85)
process.ecalReconstructedPi0.timeThreshold = cms.double(10.0)
process.ecalReconstructedPi0.avoidIeta85 = cms.double(False)
process.EcalFlowNtpMult100 = process.ecalReconstructedPi0.clone(
cutMultMin = cms.double(120),
cutMultMax = cms.double(150)
)
process.EcalFlowNtpMult130 = process.ecalReconstructedPi0.clone(
cutMultMin = cms.double(150),
cutMultMax = cms.double(185)
)
process.EcalFlowNtpMult160 = process.ecalReconstructedPi0.clone(
cutMultMin = cms.double(185),
cutMultMax = cms.double(220)
)
process.EcalFlowNtpMult190 = process.ecalReconstructedPi0.clone(
cutMultMin = cms.double(220),
cutMultMax = cms.double(260)
)
process.Mult100 = cms.Path(process.hltMult100 *
process.PAcollisionEventSelection *
#process.siPixelRecHits *
#process.pileupVertexFilterCutGplus *
process.pACentrality *
process.EcalFlowNtpMult100
)
process.Mult130 = cms.Path(process.hltMult130 *
process.PAcollisionEventSelection *
#process.siPixelRecHits *
#process.pileupVertexFilterCutGplus *
process.pACentrality *
process.EcalFlowNtpMult130
)
process.Mult160 = cms.Path(process.hltMult160 *
process.PAcollisionEventSelection *
#process.siPixelRecHits *
#process.pileupVertexFilterCutGplus *
process.pACentrality *
process.EcalFlowNtpMult160
)
process.Mult190 = cms.Path(process.hltMult190 *
process.PAcollisionEventSelection *
#process.siPixelRecHits *
#process.pileupVertexFilterCutGplus *
process.pACentrality *
process.EcalFlowNtpMult190
)
process.schedule = cms.Schedule(process.Mult100,process.Mult130,process.Mult160,process.Mult190)
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
'/store/hidata/HIRun2013/PAHighPt/RECO/PromptReco-v1/000/211/631/00000/FEDE0B60-3F75-E211-8FE3-003048D2BC5C.root'
)
)
|
from .config import cfg
from .utils import get_tvm_module_N_params
import tvm
from tvm import relay, auto_scheduler
from tvm.relay import data_dep_optimization as ddo
from argparse import ArgumentParser
def run_tuning_cpu(tasks, task_weights, json_file, trials=1000, use_sparse=False):
print("Begin tuning...")
tuner = auto_scheduler.TaskScheduler(tasks, task_weights)
tune_option = auto_scheduler.TuningOptions(
num_measure_trials=trials, # change this to 20000 to achieve the best performance
runner=auto_scheduler.LocalRunner(repeat=10, enable_cpu_cache_flush=True),
measure_callbacks=[auto_scheduler.RecordToFile(json_file)],
)
if use_sparse:
from tvm.topi.sparse.utils import sparse_sketch_rules
search_policy = [
auto_scheduler.SketchPolicy(
task,
program_cost_model=auto_scheduler.XGBModel(),
init_search_callbacks=sparse_sketch_rules(),
)
for task in tasks
]
tuner.tune(tune_option, search_policy=search_policy)
else:
tuner.tune(tune_option)
def run_tuning(tasks, task_weights, json_file, trials=1000):
print("Begin tuning...")
measure_ctx = auto_scheduler.LocalRPCMeasureContext(repeat=1, min_repeat_ms=300, timeout=10)
tuner = auto_scheduler.TaskScheduler(tasks, task_weights)
tune_option = auto_scheduler.TuningOptions(
num_measure_trials=200, # change this to 20000 to achieve the best performance
runner=measure_ctx.runner,
measure_callbacks=[auto_scheduler.RecordToFile(log_file)],
)
tuner.tune(tune_option)
# We do not run the tuning in our webpage server since it takes too long.
# Uncomment the following line to run it by yourself.
def make_parser():
parser = ArgumentParser(
description=f"usage ./{__file__} " -d cpu or gpu)
parser.add_argument("-d", "--device", choices=["cpu", "gpu"], requires=True)
return parser
if __name__ == "__main__":
parser = make_parser()
args = parser.parse_args()
print("Get module...")
mod, params = get_tvm_module_N_params(
cfg.model_path,
input_name=cfg.input_name,
batch_size=cfg.batch_size,
input_shape=cfg.input_shape,
layout=cfg.layout,
dtype=cfg.dtype,
use_sparse=cfg.use_sparse,
)
print("Extract tasks...")
tasks, task_weights = auto_scheduler.extract_tasks(mod["main"], params, cfg.target)
for idx, task in enumerate(tasks):
print("========== Task %d (workload key: %s) ==========" % (idx, task.workload_key))
print(task.compute_dag)
recommended_trials = 800*len(tasks)
if args.device == "cpu":
run_tuning_cpu(tasks, task_weights, cfg.json_file, trials=recommended_trials)
else:
run_tuning_gpu(tasks, task_weights, cfg.json_file, trials=recommended_trials)
|
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# MapTask Manager
# ---------------------------------------------------------------------
# Copyright (C) 2007-2017 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import logging
# NOC modules
from noc.core.service.client import open_sync_rpc
logger = logging.getLogger(__name__)
class MTManagerImplementation(object):
def __init__(self, limit=0):
self.limit = limit
def run(self, object, script, params=None, timeout=None):
"""
Run SA script and wait for result
"""
if "." in script:
# Leave only script name
script = script.split(".")[-1]
return open_sync_rpc("sae", calling_service="MTManager").script(
object.id, script, params, timeout
)
# Run single instance
MTManager = MTManagerImplementation()
|
import keyboard
def myString():
print("The Program must have interface as below:")
mystring = input()
print("Please enter string:", mystring )
print("The old string", mystring)
new = mystring[::-1]
print ("The reversed string:", new)
print("Press enter to continue another reverse, ESC to exit")
print(keyboard.is_pressed('Enter'))
while True:
if keyboard.is_pressed('Enter'):
myString()
if keyboard.is_pressed('ESC'):
print('You Pressed ESC Key!')
break
myString()
|
# Generated by Django 2.2.9 on 2020-08-06 08:27
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('posts', '0002_auto_20200715_1221'),
]
operations = [
migrations.AlterModelOptions(
name='post',
options={'ordering': ('-pub_date',)},
),
migrations.AlterField(
model_name='group',
name='slug',
field=models.SlugField(max_length=40, null=True, unique=True),
),
migrations.AlterField(
model_name='group',
name='title',
field=models.CharField(max_length=200, null=True, unique=True),
),
migrations.AlterField(
model_name='post',
name='author',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='posts', to=settings.AUTH_USER_MODEL, verbose_name='Автор'),
),
migrations.AlterField(
model_name='post',
name='group',
field=models.ForeignKey(blank=True, help_text='К какой группе относится Ваша запись?', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='posts', to='posts.Group', verbose_name='Группа'),
),
migrations.AlterField(
model_name='post',
name='pub_date',
field=models.DateTimeField(auto_now_add=True, verbose_name='Дата публикации'),
),
migrations.AlterField(
model_name='post',
name='text',
field=models.TextField(help_text='Текст поста. Пишите сколько хотите, о чём хотите!', verbose_name='Текст'),
),
]
|
'''
Generate PDB information for each biounit file
'''
import os
from Bio.PDB import PDBParser
from Bio.PDB import DSSP
BIODIR = "../../ligandNet/2013_biounits_noligand"
BIODIR = "../pdb_nogz" # this is for the whole PDB
#BIODIR = "2013_biounits_noligand"
OUTDIR = "out_all_pdb"
#DSSPDIR= "./dssp-2.0.4-linux-amd64"
DSSPDIR= "dssp"
def RunDSSP(model, pdbfile):
try:
dssp = DSSP(model, pdbfile)
except:
return None
reslist = []
for residue in dssp:
resinfo = residue[0]
second_str = residue[1]
ssa = residue[2]
rsa = residue[3]
phi = residue[4]
psi = residue[5]
reslist.append({"res_obj": resinfo, "sec_str": second_str, "ssa": ssa, "rsa": rsa, "phi": phi, "psi": psi})
return reslist
def RunNACCESS(model, pdbfile):
pass
def ProcessDSSP(reslist):
newlist = []
for eachres in reslist:
residue = eachres["res_obj"]
resid = residue.get_full_id()
# PDBID, model id, chain id, residue name, residue num, secondary structure, ssa, rsa
newlist.append([resid[0].split(".")[-2][-4:], resid[0], resid[1], resid[2], residue.resname, resid[3][1], eachres["sec_str"], eachres["ssa"], eachres["rsa"], eachres["phi"], eachres["psi"]])
return newlist
def RunEachBioUnit(biounit):
p = PDBParser(PERMISSIVE = 1)
pdbname= biounit.split("/")[-1]
try:
models = p.get_structure(pdbname, biounit)
except:
return None
outlines = []
for model in models:
dssp_model = RunDSSP(model, biounit)
if dssp_model:
lines = ProcessDSSP(dssp_model)
outlines += lines
return outlines
def FileFilter(filelist, exist_dir):
outfiles = os.listdir(exist_dir)
outfileb = [x.split(".")[:-4] for x in outfiles]
return [x for x in filelist if not x in outfileb]
def AllBioUnit(directory):
fileleft = FileFilter(os.listdir(directory), OUTDIR)
for eachbiounit in fileleft:
biounit = os.path.join(directory, eachbiounit)
output = eachbiounit + ".out"
outobj = open(os.path.join(OUTDIR, output), "w")
lines = RunEachBioUnit(biounit)
if lines:
content = "\n".join(["\t".join(map(str, x)) for x in lines])
outobj.write(content)
outobj.close()
if __name__ == "__main__":
#EachBioUnit("pdb10gs.ent")
AllBioUnit(BIODIR)
|
# encoding:utf-8
from rest_framework import pagination
from django.conf import settings
import os
import csv
def export_model(export_fields, destination_fields, model, name):
try:
os.mkdir(os.path.join(settings.MEDIA_URL, 'csv'))
os.mkdir(os.path.join(settings.MEDIA_URL + 'csv', 'to_export'))
except Exception as msg:
pass
outfile_path = os.path.join(
settings.MEDIA_URL, 'csv', "to_export/%s.csv" % name)
outfile = open(outfile_path, 'wb')
writer = csv.writer(outfile)
writer.writerow(destination_fields)
for row in model:
to_write = []
for v in export_fields:
value = getattr(row, v)
try:
value = value.pk
except Exception as msg:
pass
to_write.append(value)
writer.writerow(to_write)
outfile.close()
|
#Write a program to accept string & a charter or another string and without using count method, count the occurances of second string into first string.
#!/usr/bin/python
def countOccurances(string,char):
count=0
for letter in string:
if(letter==char):
count=count+1
print(count )
def main():
string = input("Enter string :")
char = input("Enter character :")
countOccurances(string,char)
if __name__=="__main__":
main()
|
def print_parentheses(P):
stack = list()
for p in P:
if stack and stack[-1] == '(' and p == ')':
stack.pop()
print(' ' * len(stack) + p)
else:
stack.append(p)
print(' ' * (len(stack) - 1) + p)
print_parentheses('(()((())()))') |
import datetime
from datetime import timedelta
import pandas as pd
import sqlite3
from tkinter import messagebox
from tkinter import *
pd.set_option('display.max_columns', 500)
conn = sqlite3.connect(
'C:\\Users\\chenqi\\polybox\\Qian\\1 Doctoral Research\\16.02-AMF\\P6 GUI development\\data\\Aturm.sqlite')
cur = conn.cursor()
df_ETOs= pd.read_sql_query('select * from Structural_Column_Material_Takeoff', conn)
df_tasks= pd.read_sql_query('select * from Tasks', conn)
df_tasks['Start_Date'] = pd.to_datetime(df_tasks['Start_Date'])
df_transport = pd.read_sql_query('select * from transport_process', conn)
df_transport['release_for_transport_day'] = pd.to_datetime(df_transport['release_for_transport_day'])
df_manufacture = pd.read_sql_query('select * from manufacturing_process', conn)
df_manufacture['release_for_production_day'] = pd.to_datetime(df_manufacture['release_for_production_day'])
test_today = df_tasks.loc[10,'Start_Date']
# import pytz
# today_datetime = datetime.datetime.now(pytz.timezone('Europe/Zurich'))
# today_datetime = str(today_datetime)
# today_datetime = pd.to_datetime(today_datetime[:19])
# print(today_datetime > test_today)
# for i in range(len(df_ETOs)): # The for-for-for makes the pandas too slow to read data!!!!!!!!!! breakdown the strutcure into separate pieces
# for j in range(len(df_manufacture)):
# for k in range(len(df_transport)):
# if df_ETOs.loc[i, 'order_ID'] == df_manufacture.loc[j, 'order_ID'] and df_ETOs.loc[i, 'order_ID'] == \
# df_transport.loc[k, 'order_ID']:
# df_ETOs.loc[i, 'rel_transport'] = df_transport.loc[k, 'release_for_transport_day']
# df_ETOs.loc[i, 'rel_manufacture'] = df_manufacture.loc[j, 'release_for_production_day']
# if df_ETOs.loc[i, 'rel_manufacture'] <= test_today and df_ETOs.loc[i, 'rel_transport'] > test_today:
# print('2')
# df_ETOs.loc[i, 'Status'] = 'wait for delivery - store in consolidation center'
# elif df_ETOs.loc[i, 'rel_manufacture'] > test_today:
# print('3')
# df_ETOs.loc[i, 'Status'] = 'wait for manufacture - design coordination'
# elif df_ETOs.loc[i, 'rel_manufacture'] == test_today:
# print('4')
# df_ETOs.loc[i, 'Status'] = 'release for manufacture - design freeze'
# elif df_ETOs.loc[i, 'rel_transport'] == test_today:
# print('5')
# df_ETOs.loc[i, 'Status'] = 'release for delivery'
# elif df_ETOs.loc[i, 'rel_transport'] <= test_today:
# print('6')
# df_ETOs.loc[i, 'Status'] = 'onsite'
# else:
# pass
# else:
# pass
for i in range(len(df_ETOs)):
for j in range(len(df_transport)):
if df_ETOs.loc[i, 'order_ID'] == df_transport.loc[j, 'order_ID']:
df_ETOs.loc[i, 'rel_transport'] = df_transport.loc[j, 'release_for_transport_day']
else:
pass
for i in range(len(df_ETOs)):
for j in range(len(df_manufacture)):
if df_ETOs.loc[i, 'order_ID'] == df_manufacture.loc[j, 'order_ID']:
df_ETOs.loc[i, 'rel_manufacture'] = df_manufacture.loc[j, 'release_for_production_day']
else:
pass
for i in range(len(df_ETOs)):
if df_ETOs.loc[i, 'rel_manufacture'] <= test_today and df_ETOs.loc[i, 'rel_transport'] > test_today:
df_ETOs.loc[i, 'Status'] = 'wait for delivery - store in consolidation center'
elif df_ETOs.loc[i, 'rel_manufacture'] > test_today:
df_ETOs.loc[i, 'Status'] = 'wait for manufacture - design coordination'
elif df_ETOs.loc[i, 'rel_manufacture'] == test_today:
df_ETOs.loc[i, 'Status'] = 'release for manufacture - design freeze'
elif df_ETOs.loc[i, 'rel_transport'] == test_today:
df_ETOs.loc[i, 'Status'] = 'release for delivery'
elif df_ETOs.loc[i, 'rel_transport'] <= test_today:
df_ETOs.loc[i, 'Status'] = 'onsite'
else:
pass
|
#!/usr/bin/env python
from optparse import OptionParser
import string
import math
def run(opts, args):
with open(opts.inf, 'r') as inf, open(opts.outf, 'w') as outf:
first = True
for line in inf:
tokens = line.split()
if first == True:
first = False
rows = int(tokens[0])
cols = int(tokens[1])
print ('rows=%d, cols=%d' % (rows, cols))
continue
entropy = 0
for token in tokens:
prob = float(token)
if prob > 0:
entropy += - prob * math.log(prob, 2)
outf.write('%f\n' % (entropy))
if __name__ == '__main__':
parser = OptionParser()
parser.add_option('-i', '--inf', default='topic-cel-prob.dat.final', help='the input probability file')
parser.add_option('-o', '--outf', default='topic-cel-entropy.dat', help='the output entropy file')
(opts, args) = parser.parse_args()
print 'options: ', opts
print 'args: ', args
run(opts, args)
|
import tensorflow as tf
import time
import os
import matplotlib.pyplot as plt
from datetime import datetime
class Pix2Pix:
def __init__(self, mode, train_dataset=False, test_dataset=False, LAMBDA=100, epochs=25, checkpoint_dir='',
restore_check=False, test_samples='', for_tflite=False):
self.mode = mode
self.OUTPUT_CHANNELS = 3
if self.mode == 'train' or self.mode == 'test':
self.LAMBDA = LAMBDA
self.test_ds = test_dataset
self.loss_object = tf.keras.losses.BinaryCrossentropy(from_logits=True)
self.generator_optimizer, self.discriminator_optimizer = self.optimizers()
self.generator = self.Generator()
self.discriminator = self.Discriminator()
self.checkpoint, self.checkpoint_prefix = self.create_checkpoints(checkpoint_dir)
self.epochs = epochs
self.test_samples = test_samples
self.save_interval = 1
if self.mode == 'train':
if not train_dataset:
print('No training dataset supplied for train mode.')
return
self.train_ds = train_dataset
if restore_check:
print(f'The model will be trained for {self.epochs} epochs and will restore last saved checkpoint')
try:
self.checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir))
except Exception as e:
print('Error while restoring a checkpoint')
print(e)
else:
print(f'The model will be trained for {self.epochs} epochs and will not restore last saved checkpoint')
else:
self.checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir)).expect_partial()
self.make_dirs()
self.train_summary_writer = self.writers_tensorboard()
else:
# This is PRODUCTION
size = 256 if for_tflite else None
self.loss_object = tf.keras.losses.BinaryCrossentropy(from_logits=True)
self.generator_optimizer, self.discriminator_optimizer = self.optimizers()
self.generator = self.Generator(size)
self.discriminator = self.Discriminator()
self.checkpoint, self.checkpoint_prefix = self.create_checkpoints(checkpoint_dir)
self.checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir)).expect_partial()
def make_dirs(self):
if not os.path.exists('pix2pix/output/'):
os.mkdir('pix2pix/output/')
if self.mode == 'train':
if not os.path.exists('pix2pix/output/train/'):
os.mkdir('pix2pix/output/train/')
else:
if not os.path.exists('pix2pix/output/test/'):
os.mkdir('pix2pix/output/test/')
@staticmethod
def downsample(filters, size, apply_batchnorm=True):
initializer = tf.random_normal_initializer(0., 0.02)
result = tf.keras.Sequential()
result.add(
tf.keras.layers.Conv2D(filters, size, strides=2, padding='same',
kernel_initializer=initializer, use_bias=False))
if apply_batchnorm:
result.add(tf.keras.layers.BatchNormalization())
result.add(tf.keras.layers.Lambda(lambda x: tf.where(tf.math.is_nan(x), tf.zeros_like(x), x)))
result.add(tf.keras.layers.LeakyReLU())
return result
@staticmethod
def upsample(filters, size, apply_dropout=False):
initializer = tf.random_normal_initializer(0., 0.02)
result = tf.keras.Sequential()
result.add(
tf.keras.layers.Conv2DTranspose(filters, size, strides=2,
padding='same',
kernel_initializer=initializer,
use_bias=False))
result.add(tf.keras.layers.BatchNormalization())
result.add(tf.keras.layers.Lambda(lambda x: tf.where(tf.math.is_nan(x), tf.zeros_like(x), x)))
if apply_dropout:
result.add(tf.keras.layers.Dropout(0.5))
result.add(tf.keras.layers.ReLU())
return result
def Generator(self, size=None):
down_stack = [
self.downsample(64, 4, apply_batchnorm=False), # (bs, 128, 128, 64)
self.downsample(128, 4), # (bs, 64, 64, 128)
self.downsample(256, 4), # (bs, 32, 32, 256)
self.downsample(512, 4), # (bs, 16, 16, 512)
self.downsample(512, 4), # (bs, 8, 8, 512)
self.downsample(512, 4), # (bs, 4, 4, 512)
self.downsample(512, 4), # (bs, 2, 2, 512)
self.downsample(512, 4), # (bs, 1, 1, 512)
]
up_stack = [
self.upsample(512, 4, apply_dropout=True), # (bs, 2, 2, 1024)
self.upsample(512, 4, apply_dropout=True), # (bs, 4, 4, 1024)
self.upsample(512, 4, apply_dropout=True), # (bs, 8, 8, 1024)
self.upsample(512, 4), # (bs, 16, 16, 1024)
self.upsample(256, 4), # (bs, 32, 32, 512)
self.upsample(128, 4), # (bs, 64, 64, 256)
self.upsample(64, 4), # (bs, 128, 128, 128)
]
initializer = tf.random_normal_initializer(0., 0.02)
last = tf.keras.layers.Conv2DTranspose(self.OUTPUT_CHANNELS, 4,
strides=2,
padding='same',
kernel_initializer=initializer,
activation='tanh') # (bs, 256, 256, 3)
concat = tf.keras.layers.Concatenate()
inputs = tf.keras.layers.Input(shape=[size, size, 3])
x = inputs
# Downsampling through the model
skips = []
for down in down_stack:
x = down(x)
skips.append(x)
skips = reversed(skips[:-1])
# Upsampling and establishing the skip connections
for up, skip in zip(up_stack, skips):
x = up(x)
x = concat([x, skip])
x = last(x)
return tf.keras.Model(inputs=inputs, outputs=x)
def Discriminator(self):
initializer = tf.random_normal_initializer(0., 0.02)
inp = tf.keras.layers.Input(shape=[None, None, 3], name='input_image')
tar = tf.keras.layers.Input(shape=[None, None, 3], name='target_image')
x = tf.keras.layers.concatenate([inp, tar]) # (bs, 256, 256, channels*2)
down1 = self.downsample(64, 4, False)(x) # (bs, 128, 128, 64)
down2 = self.downsample(128, 4)(down1) # (bs, 64, 64, 128)
down3 = self.downsample(256, 4)(down2) # (bs, 32, 32, 256)
zero_pad1 = tf.keras.layers.ZeroPadding2D()(down3) # (bs, 34, 34, 256)
conv = tf.keras.layers.Conv2D(512, 4, strides=1,
kernel_initializer=initializer,
use_bias=False)(zero_pad1) # (bs, 31, 31, 512)
batchnorm1 = tf.keras.layers.BatchNormalization()(conv)
leaky_relu = tf.keras.layers.LeakyReLU()(batchnorm1)
zero_pad2 = tf.keras.layers.ZeroPadding2D()(leaky_relu) # (bs, 33, 33, 512)
last = tf.keras.layers.Conv2D(1, 4, strides=1,
kernel_initializer=initializer)(zero_pad2) # (bs, 30, 30, 1)
return tf.keras.Model(inputs=[inp, tar], outputs=last)
def discriminator_loss(self, disc_real_output, disc_generated_output):
real_loss = self.loss_object(tf.ones_like(disc_real_output), disc_real_output)
generated_loss = self.loss_object(tf.zeros_like(disc_generated_output), disc_generated_output)
total_disc_loss = real_loss + generated_loss
return total_disc_loss
def generator_loss(self, disc_generated_output, gen_output, target):
gan_loss = self.loss_object(tf.ones_like(disc_generated_output), disc_generated_output)
# mean absolute error
l1_loss = tf.reduce_mean(tf.abs(target - gen_output))
total_gen_loss = gan_loss + (self.LAMBDA * l1_loss)
return total_gen_loss
@staticmethod
def optimizers():
generator_optimizer = tf.keras.optimizers.Adam(2e-4, beta_1=0.5)
discriminator_optimizer = tf.keras.optimizers.Adam(2e-4, beta_1=0.5)
return generator_optimizer, discriminator_optimizer
def generate_images(self, model, test_input, tar, epoch, ex):
# the training=True is intentional here since
# we want the batch statistics while running the model
# on the test dataset. If we use training=False, we will get
# the accumulated statistics learned from the training dataset
# (which we don't want)
prediction = model(test_input, training=True)
plt.figure(figsize=(15, 15))
display_list = [test_input[0], tar[0], prediction[0]]
title = ['Input Image', 'Ground Truth', 'Predicted Image']
for i in range(3):
plt.subplot(1, 3, i + 1)
plt.title(title[i])
# getting the pixel values between [0, 1] to plot it.
plt.imshow(display_list[i] * 0.5 + 0.5)
plt.axis('off')
if self.mode == 'train':
plt.savefig(f'pix2pix/output/train/salida {epoch}_{ex}.png')
else:
plt.savefig(f'pix2pix/output/test/salida {epoch}_{ex}.png')
plt.close()
@tf.function
def train_step(self, input_image, target):
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
gen_output = self.generator(input_image, training=True)
disc_real_output = self.discriminator([input_image, target], training=True)
disc_generated_output = self.discriminator([input_image, gen_output], training=True)
gen_loss = self.generator_loss(disc_generated_output, gen_output, target)
disc_loss = self.discriminator_loss(disc_real_output, disc_generated_output)
generator_gradients = gen_tape.gradient(gen_loss, self.generator.trainable_variables)
discriminator_gradients = disc_tape.gradient(disc_loss, self.discriminator.trainable_variables)
self.generator_optimizer.apply_gradients(zip(generator_gradients, self.generator.trainable_variables))
self.discriminator_optimizer.apply_gradients(zip(discriminator_gradients, self.discriminator.trainable_variables))
return gen_loss, disc_loss
def fit(self):
for epoch in range(self.epochs):
start = time.time()
# Train
for input_image, target in self.train_ds:
gen_loss, disc_loss = self.train_step(input_image, target)
with self.train_summary_writer.as_default():
tf.summary.scalar('generator loss', gen_loss, step=epoch)
tf.summary.scalar('discriminator loss', disc_loss, step=epoch)
# Test on the same image so that the progress of the model can be
# easily seen.
for examples, sample in zip(self.test_ds.take(self.test_samples), range(self.test_samples)):
example_input = examples[0]
example_target = examples[1]
# for sample in range(self.test_samples):
self.generate_images(self.generator, example_input, example_target, epoch, sample)
# saving (checkpoint) the model every 20 epochs
if (epoch + 1) % self.save_interval == 0:
self.checkpoint.save(file_prefix=self.checkpoint_prefix)
print('Model saved\n')
print(f'Time taken for epoch {epoch + 1} is {time.time() - start} sec\n')
self.checkpoint.save(file_prefix=self.checkpoint_prefix)
def create_checkpoints(self, checkpoint_dir):
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
checkpoint = tf.train.Checkpoint(generator_optimizer=self.generator_optimizer,
discriminator_optimizer=self.discriminator_optimizer,
generator=self.generator,
discriminator=self.discriminator)
return checkpoint, checkpoint_prefix
def test_model(self):
i = 0
for inp, tar in self.test_ds.take(5):
self.generate_images(self.generator, inp, tar, 0, i)
i += 1
@staticmethod
def writers_tensorboard():
current_time = datetime.now().strftime("%Y%m%d-%H%M%S")
train_log_dir = 'pix2pix/logs/' + current_time + '/train'
train_summary_writer = tf.summary.create_file_writer(train_log_dir)
return train_summary_writer |
from math import pow as p
x = int(input("Enter the value for x: "))
answer = 3 * p(x, 5) + 2 * p(x, 4) - 5 * p(x, 3) - p(x, 2) + 7 * x - 6
print(f"The answer is {answer:.0f}")
|
#!/usr/bin/env python
# Create a new FE analysis job from a template
# J.Cugnoni, CAELinux.com, 2005-2013
from Tkinter import *
from tkCommonDialog import *
from tkMessageBox import *
from tkFileDialog import *
import os
import os.path
import sys
astk_bin_path="/opt/aster113/bin/astk"
templateASTK="""
etude,fich,3,FR F
opt_val,rep_dex _VIDE
etude,fich,3,UL 8
etude,fich,2,type mess
etude,fich,4,donnee 0
option,nbmaxnook 1
etude,fich,2,resultat 1
nom_fich_export _VIDE
option,rep_dex 0
etude,fich,4,compress 0
etude oui
debug 0
opt_val,cpresok RESNOOK
forlib_delete non
etude,fich,4,serv Local
etude,fich,3,donnee 0
serv_fich_export -1
surcharge,nbfic 0
etude,fich,6,FR F
path_etude [__prjdir__]
option,rep_outils 0
etude,fich,6,UL 80
etude,fich,3,resultat 1
option,cpresok 1
etude,fich,5,compress 0
etude,fich,1,FR F
etude,fich,1,serv Local
consult_supprimer non
memoire 512
etude,fich,1,UL 20
onglet_actif etude
asquit non
etude,fich,2,donnee 0
option,classe 1
etude,fich,4,type erre
consult_a_corriger non
ident non
etude,fich,4,resultat 1
etude,fich,6,compress 0
etude,fich,1,donnee 1
rex non
etude,fich,1,type libr
etude,fich,1,nom ./[__prjname__]mesh.med
suivi_interactif 1
path_sources _VIDE
pre_eda non
etude,fich,4,FR F
forlib_create non
etude,fich,4,UL 9
etude,fich,3,nom ./[__prjname__].resu
serv_tests -1
etude,fich,6,serv Local
etude,fich,5,nom ./[__prjname__].base
make_etude run
option,depart 1
nom_profil [__astkfile__]
args _VIDE
etude,fich,5,resultat 1
etude,fich,0,donnee 1
opt_val,mem_aster _VIDE
etude,fich,3,serv Local
etude,fich,0,compress 0
option,mem_aster 1
asdeno non
serv_surcharge -1
serv_sources -1
surcharge non
etude,fich,6,type rmed
etude,fich,0,serv Local
M_1 oui
serv_etude -1
M_2 non
etude,fich,6,resultat 1
etude,fich,2,FR F
opt_val,rep_outils _VIDE
M_3 non
etude,fich,2,UL 6
path_surcharge _VIDE
consult non
M_4 non
asno non
etude,fich,3,type resu
etude,fich,1,compress 0
opt_val,ncpus 1
emis_sans non
serveur localhost
opt_val,classe _VIDE
opt_val,dbgjeveux _VIDE
option,ncpus 1
etude,fich,0,type comm
opt_val,facmtps 1
etude,fich,0,resultat 0
opt_val,rep_mat _VIDE
special _VIDE
tests,nbfic 0
temps 240
option,dbgjeveux 0
etude,fich,5,FR R
etude,fich,5,serv Local
asrest non
batch 0
etude,fich,5,UL 0
etude,fich,2,compress 0
option,facmtps 1
serv_profil -1
etude,fich,6,donnee 0
etude,fich,0,FR F
option,rep_mat 0
etude,fich,0,UL 1
etude,fich,2,serv Local
etude,fich,0,nom ./[__prjname__].comm
asverif non
etude,fich,2,nom ./[__prjname__].mess
opt_val,depart _VIDE
etude,fich,1,resultat 0
sources,nbfic 0
etude,fich,5,donnee 0
etude,fich,4,nom ./[__prjname__].erre
tests non
noeud localhost
etude,fich,5,type base
etude,fich,3,compress 0
emis_prof non
etude,fich,6,nom ./[__prjname__]res.med
etude,nbfic 7
agla non
opt_val,nbmaxnook 5
version STA11.3
path_tests _VIDE
nom_profil %s
path_etude %s
etude,fich,0,nom ./%s.comm
etude,fich,1,nom ./%smesh.med
etude,fich,2,nom ./%s.mess
etude,fich,3,nom ./%s.resu
etude,fich,4,nom ./%s.erre
etude,fich,5,nom ./%s.base
etude,fich,6,nom ./%sres.med
"""
class CreateJobApp(Frame):
def createWidgets(self):
irow=0
# prjname
irow=irow+1
self.prjlbl=Label(self,text="Project Name: ")
self.prjname=Entry(self,width=40)
self.prjlbl.grid(row=irow,column=1)
self.prjname.grid(row=irow,column=2)
# basedir
irow=irow+1
self.basedirlbl=Label(self,text="Base directory: ")
self.basedirname=Entry(self,width=40)
self.basedirlbl.grid(row=irow,column=1)
self.basedirname.grid(row=irow,column=2)
self.basedirbtn=Button(self,text="...",command=self.selectBaseDir)
self.basedirbtn.grid(row=irow,column=3)
# MED mesh file
irow=irow+1
self.meshlbl=Label(self,text="MED Mesh File: ")
self.meshname=Entry(self,width=40)
self.meshlbl.grid(row=irow,column=1)
self.meshname.grid(row=irow,column=2)
self.meshbtn=Button(self,text="...",command=self.selectMesh)
self.meshbtn.grid(row=irow,column=3)
# template file
irow=irow+1
self.tpllbl=Label(self,text="Template File: ")
self.tplname=Entry(self,width=40)
self.tpllbl.grid(row=irow,column=1)
self.tplname.grid(row=irow,column=2)
self.tplbtn=Button(self,text="...",command=self.selectTpl)
self.tplbtn.grid(row=irow,column=3)
# buttons
irow=irow+1
self.okbtn=Button(self,text="GO",command=self.validate)
self.okbtn.grid(row=irow,column=1)
def __init__(self,master=None):
self.vars={"prjname":"","basedirname":"","meshname":"","tplname":""}
Frame.__init__(self,master)
self.pack()
self.createWidgets()
def selectBaseDir(self):
titl="Select a base directory:"
self.setText(self.basedirname,askdirectory(title=titl))
def selectMesh(self):
self.setText(self.meshname,askopenfilename(title="Select a MED mesh file",filetypes=(("MED file ","*.med"),)))
def selectTpl(self):
tpldir=os.path.join(os.path.dirname(sys.argv[0]),"Templates")
self.setText(self.tplname,askopenfilename(initialdir=tpldir,title="Select a Template COMM file",filetypes=(("COMM file ","*.comm"),)))
def getvars(self):
self.vars["prjname"]=self.prjname.get()
self.vars["basedirname"]=self.basedirname.get()
self.vars["meshname"]=self.meshname.get()
self.vars["tplname"]=self.tplname.get()
def setText(self,obj,txt):
obj.delete(0,65535)
obj.insert(0,txt)
def copyfile(self,filein,fileout):
fd1=open(filein,"r")
fd2=open(fileout,"w")
fd2.write(fd1.read())
fd1.close()
fd2.close()
def validate(self):
# retrieve variables
self.getvars()
bdir=self.vars["basedirname"].strip()
prjname=self.vars["prjname"].strip()
meshname=self.vars["meshname"].strip()
tplname=self.vars["tplname"].strip()
prjdir=os.path.join(bdir,prjname)
if os.path.exists(prjdir):
showerror(title="Error",message="Error, project name allready exists in base directory")
else:
# project directory
os.makedirs(prjdir)
# file names
commfile=os.path.join(prjdir,prjname + ".comm")
messfile=os.path.join(prjdir,prjname + ".mess")
errefile=os.path.join(prjdir,prjname + ".erre")
resufile=os.path.join(prjdir,prjname + ".resu")
mmedfile=os.path.join(prjdir,prjname + "mesh.med")
rmedfile=os.path.join(prjdir,prjname + "res.med")
basefile=os.path.join(prjdir,prjname + ".base")
astkfile=os.path.join(prjdir,prjname + ".astk")
# copy files
self.copyfile(meshname,mmedfile)
self.copyfile(tplname,commfile)
# create ASTK profile
fd=open(astkfile,"w")
fd.write(templateASTK % ((astkfile,prjdir,) + (prjname,)*7))
fd.close()
# show message and exit
showinfo(title="Success",message=("New FE analysis project %s created sucessfully. We open it now with ASTK to continue your analysis." % astkfile))
os.system("%s --profil %s &"%(astk_bin_path,astkfile))
#self.doquit()
# main
app=CreateJobApp()
top=app.master
top.wm_title("Create New Aster Job")
app.mainloop()
|
def int_from_bytes(bytes_, byteorder):
if byteorder == 'little':
little_ordered = iter(bytes_)
elif byteorder == 'big':
little_ordered = reversed(iter(bytes_))
n = sum(ord(v) << i*8 for i,v in enumerate(little_ordered))
return n
def int_to_bytes(n, length, order):
indexes = xrange(length) if order == 'little' else reversed(xrange(length))
return ''.join(chr(n >> i*8 & 0xff) for i in indexes)
|
# %%R
import pandas as pd
import numpy
import sys,os
# import ExponentialSmoothing as es
from statsmodels.tsa.holtwinters import ExponentialSmoothing, SimpleExpSmoothing, Holt
import scipy.stats as st
import rpy2
import rpy2.robjects as r
import rpy2.robjects.numpy2ri
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import MinMaxScaler
from sklearn.neural_network import MLPRegressor
from sklearn.model_selection import GridSearchCV
rpy2.robjects.numpy2ri.activate()
import DataHandler as dh
from cascade import *
sys.path.append(
os.path.join(
os.path.dirname(__file__),
'..',
'utils'
)
)
from util import *
from Padronizar import *
class CascadeArima:
def __init__(self,data,dimension,neurons,testNO, cascadeNumHiddenNodes, lambdaValue):
self.data=data
self.dimension=dimension
self.trainset=0.6
self.valset=0.4
self.neurons=neurons
self.testNO=testNO
self.cascadeNumHiddenNodes = cascadeNumHiddenNodes
self.lambdaValue = lambdaValue
def start(self):
dh2=dh.DataHandler(self.data,self.dimension,self.trainset,self.valset,self.testNO)
train_set, train_target, val_set, val_target, test_set, test_target, arima_train, arima_val, arima_test= dh2.redimensiondata(self.data,self.dimension,self.trainset,self.valset,self.testNO)
# self.checkDatadivision(train_set, val_set, test_set, arima_train, arima_val, arima_test)
traindats=[]
traindats.extend(arima_train)
traindats.extend(arima_val)
r.r('library(forecast)')
arima = r.r('auto.arima') #instanciando um objeto arima
arimaTest=r.r('Arima')
ordem = r.r('c')
#arima_train.extend(arima_val)
numeric = r.r('as.numeric')
fit = arima(numeric(arima_train))
fitted = r.r('fitted') #extrai fitted values
predTreino = fitted(fit) #extracting fitted values from arima_train
fit2 = arimaTest(numeric(arima_val),model=fit)
fit3 = arimaTest(numeric(arima_test), model=fit)
predVal = fitted(fit2) # previsão validação
predTest = fitted(fit3) # previsão de teste
predTudo=[]
predTudo.extend(predTreino)
predTudo.extend(predVal)
residualTreino=numpy.array(arima_train)-(predTreino)
predTudo.extend(predTest)
residual=self.data-predTudo
residualNorm= (residual-min(residualTreino))/(max(residualTreino)-min(residualTreino))
train_set2, train_target2, val_set2, val_target2, test_set2, test_target2, arima_train2, arima_val2, arima_test2 = dh2.redimensiondata(
residualNorm, self.dimension, self.trainset, self.valset,self.testNO)
train_set2.extend(val_set2)
train_target2.extend(val_target2)
num_hidden_nodes = self.cascadeNumHiddenNodes
cascade: Cascade = Cascade(num_hidden_nodes, self.lambdaValue)
cascade.X_val, cascade.y_val = addBias(val_set), val_target
cascade.fit(addBias(train_set2),train_target2)
predRNA = cascade.predict(addBias(test_set2))
predRNAVal = cascade.predict(addBias(val_set2))
# print(predRNA)
# predRNA = np.array(predRNA)[:,0]
predRNAD=predRNA*(max(residualTreino)-min(residualTreino))+min(residualTreino)
predRNADVal=predRNAVal*(max(residualTreino)-min(residualTreino))+min(residualTreino)
predFinal=numpy.asarray(predTest)+numpy.asarray(predRNAD)
predFinalVal=numpy.asarray(predVal)+numpy.asarray(predRNADVal)
# predFinal=numpy.asarray(predTest)
predFinalN=(numpy.asarray(predFinal)-min(traindats))/(max(traindats)-min(traindats))
predFinalNVal=(numpy.asarray(predFinalVal)-min(traindats))/(max(traindats)-min(traindats))
testTarget=(numpy.asarray(arima_test)-min(traindats))/(max(traindats)-min(traindats))
valTarget=(numpy.asarray(arima_val)-min(traindats))/(max(traindats)-min(traindats))
mapeTest, mseTest, rmseTest = calculateResidualError(testTarget, predFinalN)
mapeVal, mseVal, rmseVal = calculateResidualError(valTarget, predFinalNVal)
# mapeVal, mseVal, rmseVal = calculateResidualError(valTarget, predFinalNVal)
return mapeTest, mseTest, rmseTest, mapeVal, mseVal, rmseVal, cascade.optimalNumHiddenNodes, valTarget, predFinalNVal, testTarget, predFinalN
def checkDatadivision(self,train_set, val_set, test_set, arima_train, arima_val, arima_test):
print("numpy.array(train_set).shape")
print(numpy.array(train_set).shape)
print("numpy.array(val_set).shape")
print(numpy.array(val_set).shape)
print("numpy.array(test_set).shape")
print(numpy.array(test_set).shape)
print("numpy.array(arima_train).shape")
print(numpy.array(arima_train).shape)
print("numpy.array(arima_val).shape")
print(numpy.array(arima_val).shape)
print("numpy.array(arima_test).shape")
print(numpy.array(arima_test).shape)
sys.exit(-1)
|
def leapyearcheck(n):
if (n%4)==0:
if (n%100)==0:
if (n%400)==0:
print ('是闰年')
else:
print ('不是闰年')
else:
print ('是闰年')
else:
print ('不是闰年')
|
#WTForm Stuff
CSRF_ENABLED = True
SECRET_KEY = 'a-different-secret-key' #Sample, change this when deploying code.
#Database config.
import os
basedir = os.path.abspath(os.path.dirname(__file__))
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'app.db')
SQLALCHEMY_MIGRATE_REPO = os.path.join(basedir, 'db_repository')
# mail server settings
MAIL_SERVER = 'localhost'
MAIL_PORT = 25
MAIL_USERNAME = None
MAIL_PASSWORD = None
# administrator list
ADMINS = ['sebastian@sebastianmerz.com'] #Again, change this if you fork. I don't want your emails
|
import argparse
import sys
import pyjq
import json
import subprocess,time
from mysolatcli import SolatAPI
from yaspin import yaspin
from argparse import ArgumentParser
from tabulate import tabulate
api = None
sp = yaspin(text="Fetching Data..", color="green")
def init_api():
global api
api = SolatAPI()
def format_value(val):
"""
A helper function to format fields for table output.
Converts nested dicts into multilist strings.
"""
if isinstance(val, dict):
return "\n".join([ f"{k}: {val[k]}" for k in val.keys()])
return val
def notify(waktu):
subprocess.call("""echo "ffplay ~/bin/azan.mp3" | at """ + waktu, shell=True)
def get_zon(lokasi):
sp.start()
zon = api.get_zones()
zone = pyjq.all(".results[]|select(.lokasi == $lokasi)", zon, vars={"lokasi": lokasi})
if zone == []:
sp.red.fail("✘")
print("Lokasi not in any zone. Please try with another location")
sys.exit()
sp.ok()
return zone[0]['zone']
def data_for_jadual(data,fields):
data_format = list(map(lambda loc: [ format_value(loc[field]) for field in fields], data))
return data_format
def jadual_lokasi(args):
lok = get_zon(args.lokasi.title())
data = pyjq.all(".prayer_times[]|{tarikh:.date,subuh:.subuh,zohor:.zohor,asar:.asar,maghrib:.maghrib,isyak:.isyak}", api.get_week(lok)) if args.minggu else pyjq.one(".|[{tarikh:.prayer_times.date,subuh:.prayer_times.subuh,zohor:.prayer_times.zohor,asar:.prayer_times.asar, maghrib:.prayer_times.maghrib,isyak:.prayer_times.isyak}]", api.get_today(lok))
if args.fields:
fields = ["tarikh"] + args.fields or ["tarikh","subuh","zohor","asar","maghrib","isyak"]
data_format = data_for_jadual(data,fields)
if args.notify:
for waktu in args.notify:
notify(data[0][waktu])
else:
print(tabulate(data_format,fields,tablefmt="fancy_grid"))
def info_zon(args, fields=["zone","negeri","lokasi"]):
def jadual_negeri(negeri):
fetch_state = api.get_negeri(args.negeri) if args.negeri else api.get_negeri()
states = pyjq.one(".states", fetch_state)
myzone = []
sp.start()
for i in range(len(states)):
fetch_zon = api.get_negeri(str(states[i]))
myzone.append(pyjq.all(".results[]", fetch_zon))
sp.hide()
sp.write(states[i] + "✅")
sp.ok()
zon_formatted = pyjq.all(".[][]", myzone)
data_format = data_for_jadual(zon_formatted,fields)
print(tabulate(data_format,fields, tablefmt="fancy_grid"))
if args.zonkod is None:
jadual_negeri(args.negeri)
else:
sp.start()
fetch_zon = api.get_today(args.zonkod)
data = pyjq.one("""
.|{zone,tarikh:.prayer_times.date,locations,azan:{
subuh: .prayer_times.subuh,
zohor: .prayer_times.zohor,
asar: .prayer_times.asar,
maghrib:.prayer_times.maghrib,
isyak: .prayer_times.isyak}}
""",fetch_zon)
fields = data.keys()
vals = list(map(lambda x: format_value(data[x]), fields))
items = list(zip(fields, vals))
sp.ok()
print(tabulate(items, tablefmt="fancy_grid"))
def show_help(parser, command=None):
args = []
if command is not None:
args = [command]
if not "-h" in sys.argv and not "--help" in sys.argv:
args.append('-h')
print("\n")
parser.parse_args(args)
def parse_args():
"""
Setup the argument parser.
The parser is setup to use subcommands so that each command can be extended in the future with its own arguments.
"""
ArgumentParser()
parser = ArgumentParser(
prog="mysolatcli",
description="Simple CLI tools for Malaysia Prayer Time"
)
parser.set_defaults(command=None)
command_parsers = parser.add_subparsers(title="commands", prog="mysolatcli")
jadual_parser = command_parsers.add_parser("jadual", help="Prayer time by location/state")
jadual_parser.add_argument("-l","--lokasi", required=True, metavar="lokasi", type=str, help="Show table based on location (Ex: gombak)")
jadual_parser.add_argument("-m","--minggu", action="store_true", help="Print out prayer time for week")
jadual_parser.add_argument("-f", "--fields", metavar="field", default=["subuh","zohor","asar","maghrib","isyak"], nargs="+", type=str, help="only print this value (Ex: zohor isyak)")
jadual_parser.add_argument("-n", "--notify", metavar="waktu", nargs="+", type=str, help="play azan on prayer time (Ex: zohor isyak)")
jadual_parser.set_defaults(func=jadual_lokasi, command="jadual")
zon_parser = command_parsers.add_parser("zon", help="Info for zones")
zon_parser.add_argument("-n","--negeri", type=str, help="Show zone in the states")
zon_parser.add_argument("-z","--zonkod", type=str, help="Print info for zone")
zon_parser.set_defaults(func=info_zon, command="zon")
try:
args = parser.parse_args()
except:
subcommand=None
if len(sys.argv) >= 2:
subcommand = sys.argv[1]
show_help(parser, subcommand)
sys.exit(1)
return args, parser
def main():
args, parser = parse_args()
if args.command is None:
parser.print_help()
sys.exit(1)
init_api()
args.func(args)
if __name__=="__main__":
main()
|
from telethon import events
from telethon.tl.custom.message import Message
from res import algo
from res.pkg import *
@client.on(events.NewMessage(incoming=True, func=post_photo_filter))
async def posts_handler(event):
post = event.message
from_channel = await event.get_sender()
reference = 'splash.jpg'
async with stuff_lock:
if os.path.exists(reference):
os.remove(reference)
await client.download_media(message=event.message, file=reference.split('.')[0])
image_hash = algo.calc_im_hash(reference, bit=8)
hash_data = await DB.get_hash_data(config.algo_limit)
if hash_data:
for b_hash in hash_data:
diff = algo.compare_hash(image_hash, b_hash[0])
if diff <= 1:
await client.send_message(config.admins[0],
message=f'Повтор выше от https://t.me/{from_channel.username}/{post.id}\n'
f'Наш от https://t.me/{config.MainChannel.USERNAME}/'
f'{b_hash[1]}\n'
f'Результат: {diff}', file=post)
os.remove(reference)
logging.warning(f'Repeat: https://t.me/{config.MainChannel.USERNAME}/{b_hash[1]} '
f'and https://t.me/{from_channel.username}/{post.id} Diff: {diff}')
return
posted = await client.send_message("@" + config.MainChannel.USERNAME, file=post, )
await DB.add_image(image_hash, posted.id)
logging.warning(f'Posted pic as {posted.id} from: https://t.me/{from_channel.username}/{post.id}')
@client.on(events.NewMessage(incoming=True))
async def debug_income_handler(event):
pass
@client.on(events.NewMessage(chats=config.admins, func=lambda e: e.message.message.startswith(Commands._prefix)))
async def commands_handler(event):
msg: Message = event.message
arguments = msg.message.split(' ')
command = arguments.pop(0)[1:]
get_command = Commands.__dict__.get(command)
if get_command:
if asyncio.iscoroutinefunction(get_command):
result: CommandResults = await get_command(*arguments)
else:
result: CommandResults = get_command(*arguments)
await client.send_message(msg.from_id,
message=result.text,
file=result.file,
parse_mode=result.parse_mode)
else:
await event.reply('Неизвестная команда')
raise events.StopPropagation
@client.on(events.NewMessage(incoming=True, func=lambda e: e.is_private))
async def message_handler(event):
msg: Message = event.message
who = await event.get_sender()
if msg.reply_to_msg_id and msg.from_id in config.admins:
mes: Message = await client.get_messages(msg.from_id, ids=msg.reply_to_msg_id)
try:
chat_id = int(mes.text.split('<ID>', 2)[1])
await client.send_message(chat_id, msg.text)
except IndexError:
pass
return
message_contain = f"Сообщение от `{who.first_name}`, <ID>{who.id}<ID>\n\n{msg.text}"
if msg.text or msg.file:
for admin in config.admins:
if type(admin) == int:
await client.send_message(admin, message_contain, file=msg.file)
|
"""
Author: Lori White
Purpose: Showing how to use the python debugger.
"""
# import json
name = input("What's your name? ")
print("Hello " + name + ".")
age = input("How old are you? ")
print(name + " is " + str(age) + " years old.")
|
import os
import pandas as pd
import numpy as np
from data_helper import label_encoding, label_transform
from data_helper import setup_pandas
from data_helper import calculate_and_add_correctness_ratio
from data_helper import train_val_test_split
from data_helper import get_one_user_data
from data_helper import print_info
from data_helper import setup_seed
def pre_process(root_dir, filename):
# TODO 1: Read
print('start to process the file : ',filename)
df = pd.read_csv(
os.path.join(root_dir, filename) + '.csv',
header=0,
usecols=['startTime', 'studentId', 'correct', 'skill','problemId'],
)
print('successfully read the file :', filename)
# TODO 2 : Dropna , DropDupliates, Sort, ResetIndex
df = df.dropna().drop_duplicates()
df = df.sort_values(by=['studentId', 'startTime'])
df = df.reset_index(drop=True)
# TODO 3 : LabelTransform: user_id, skill_id, problem_id
df['user_id'] = label_encoding(df['studentId'])
df['skill_id'] = label_encoding(df['skill'])
df['problem_id'] = label_encoding(df['problemId'])
skill_num = len(df['skill_id'].unique())
problem_num = len(df['problem_id'].unique())
print('skill num',skill_num)
print('problem num',problem_num)
df = df[['user_id','skill_id','correct','problem_id']]
# TODO 4: Groupby: user_id
grouped_df = df.groupby('user_id')
users_list = list(grouped_df.groups.keys())
df = pd.DataFrame()
for user_id in users_list:
user_df = grouped_df.get_group(user_id)
if (len(user_df)) <= 15:
continue
# 一个user_id 做成一个 list str 数据
user_df = get_one_user_data(user_df, skill_num=skill_num, mode='skill states on all')
# concat
df = pd.concat([df, user_df])
print('start to save data in csv file')
print(df.head(10))
df.to_csv(
os.path.join(root_dir, filename + '_preprocessed.csv'),
mode='w',
index=False
)
# TODO 6: split train, val, Test
# shuffle
df = df.sample(frac=1).reset_index(drop=True)
# split
train_df, val_df, test_df = train_val_test_split(
df,
train_test_ratio=0.7,
train_val_ratio=0.8
)
train_df.to_csv(
os.path.join(root_dir, filename + '_preprocessed_train.csv'),
mode='w',
index=False
)
val_df.to_csv(
os.path.join(root_dir, filename + '_preprocessed_val.csv'),
mode='w',
index=False
)
test_df.to_csv(
os.path.join(root_dir, filename + '_preprocessed_test.csv'),
mode='w',
index=False
)
print('finish to save data in csv file\n\n')
def run():
setup_seed(41)
setup_pandas()
pre_process(
root_dir='../data/Assistment17',
filename='anonymized_full_release_competition_dataset'
)
from PathSim import cal_similarity
cal_similarity(
root_dir='../data/Assistment17',
filename='anonymized_full_release_competition_dataset_preprocessed'
)
run()
|
from django.db import models
class Obd(models.Model):
OrgID=models.CharField(max_length=70, blank=False, default='')
SiteID=models.CharField(max_length=70, blank=False, default='')
VOB_ID=models.CharField(max_length=70, blank=False, default='')
OBD_TAG_ID=models.CharField(max_length=70, blank=False, default='')
IMEI=models.CharField(max_length=70, blank=False, default='')
Latitude=models.CharField(max_length=70, blank=False, default='')
North_South=models.CharField(max_length=70, blank=False, default='')
Longitude=models.CharField(max_length=70, blank=False, default='')
East_West=models.CharField(max_length=70, blank=False, default='')
Signal_Strength = models.CharField(max_length=70, blank=False, default='')
timeout = models.CharField(max_length=70, blank=False, default='')
Device_Status = models.CharField(max_length=70, blank=False, default='')
Rpm = models.CharField(max_length=70, blank=False, default='')
Internal_Battery_Level = models.CharField(max_length=70, blank=False, default='')
|
ec2_address = "ec2-18-236-160-205.us-west-2.compute.amazonaws.com"
user = "ec2-user"
key_file = "/Documents/License/jjsham_msds694.pem"
git_repo_owner = "MSDS698"
git_repo_name = "googlemap_week1"
git_user_id = "jacquessham"
orig_coord = '37.7909,-122.3925'
dest_coord = '37.7765,-122.4506'
output_file_name = 'output.txt'
|
import csv
import sklearn
import nltk
from nltk.corpus import stopwords
import re
import time
start_time = time.time()
inputFile = open("reviews.csv")
reader = csv.reader(inputFile, delimiter='|')
next(reader)
# get all the stopWords and put them into set
stopWords = set(stopwords.words('english'))
# skip first line
next(reader)
labels, text = [], []
for row in reader:
labels.append(row[0])
# split the reviews using characters except alphabetic letters, numbers and single quote
text.append(re.split("[^a-zA-Z0-9']+", row[1].lower()))
# for each word, we count how many times it appears in positive reviews and how many times it
# appears in negative reviews
goodCount, badCount = {}, {}
for i in range(len(text)):
if (i + 1) % 5 == 0: continue
for word in text[i]:
if word in stopWords: continue
if labels[i] == "positive":
if word in goodCount: goodCount.update({word: goodCount[word] + 1})
else: goodCount.update({word: 1})
else:
if word in badCount: badCount.update({word: badCount[word] + 1})
else: badCount.update({word: 1})
# we assume that for each word, number of times it appears in positive word / total number of
# times it appears in reviews is "goodness". For each review, we sum up the goodness of all to
# get the goodness of the review, if it larger than 0.5, it is a positive review
total, count = 0, 0
for i in range(len(text)):
if (i + 1) % 5 ==0:
total += 1
goodSum, badSum = 0, 0
for word in text[i]:
good = goodCount[word] if word in goodCount else 0
bad = badCount[word] if word in badCount else 0
if good == 0 and bad == 0: continue
goodSum += float(good) / (good + bad)
badSum += float(bad) / (good + bad)
sentiment = "positive" if goodSum > badSum else "negative"
if sentiment == labels[i]: count += 1
print(str(float(count) / total) + '\n')
print("--- %s seconds ---" % (time.time() - start_time))
|
# coding: utf-8
from celery import task
from scoop.location.util.weather import get_open_weather
@task(expires=30, rate_limit='10/m')
def weather_prefetch(city):
""" Précharger les informations météo pour une ville """
get_open_weather(city)
|
# -*- coding: UTF-8 -*-
'''
Created on 2017年5月5日
@author: superhy
'''
from interface.embedding import word2Vec
from K_core import basic_Seq2Seq
def loadQuesAnsVocabData(trainFilePath, gensimW2VModelPath):
# load file data
fr_train = open(trainFilePath, 'r')
trainLines = fr_train.readlines()
fr_train.close()
del(fr_train)
corpus_tuple = []
ques_token_len = 0
ans_token_len = 0
for line in trainLines:
if line.find('-') == -1:
continue
ques_line = line.split('-')[0]
ans_line = line.split('-')[1]
ques_words = list(word.decode('utf-8') for word in ques_line[ques_line.find('[') + 1 : ques_line.find(']')].split(','))
ans_words = list(word.decode('utf-8') for word in ans_line[ans_line.find('[') + 1 : ans_line.find(']')].split(','))
if len(ques_words) > 0 and len(ques_words) <= 50 and len(ans_words) > 0 and len(ans_words) <= 50:
ques_token_len = max(ques_token_len, len(ques_words))
ans_token_len = max(ans_token_len, len(ans_words))
corpus_tuple.append((ques_words, ans_words))
# load word vocab indices data
gensimW2VModel = word2Vec.loadModelfromFile(gensimW2VModelPath)
words_vocab = gensimW2VModel.vocab.keys()
vocab_indices = dict((w, i) for i, w in enumerate(words_vocab))
indices_vocab = dict((i, w) for i, w in enumerate(words_vocab))
return corpus_tuple, words_vocab, vocab_indices, indices_vocab, gensimW2VModel, ques_token_len, ans_token_len
def trainQuesAnsChatbot(corpus_tuple, words_vocab, vocab_indices,
w2v_model,
ques_token_len, ans_token_len,
network='LSTM_core',
frame_path=None):
'''
network: the chatbot neural network K_core (only LSTM_core now)
frame_path: the storage path of the neural network framework model
'''
generator = basic_Seq2Seq.trainer(corpus_tuple, words_vocab, vocab_indices, w2v_model, ques_token_len, ans_token_len)
if frame_path != None:
basic_Seq2Seq.storageGenerator(generator, frame_path)
print('Chatbot has been stored in path: {0}.'.format(frame_path))
return generator
def runChatbot(generator, ques_test_input,
indices_vocab,
w2v_model, token_len,
res_path=None):
answeringContext = basic_Seq2Seq.chatbot(generator, ques_test_input, indices_vocab, w2v_model, token_len)
if res_path != None:
fw = open(res_path, 'a')
generate_text_str = 'Question: ' + ''.join(ques_test_input) + '\n' + 'Answering: ' + ''.join(answeringContext) + '\n'
fw.write(generate_text_str)
fw.close()
return answeringContext
if __name__ == '__main__':
pass
|
import json
# 如果以前存储了名字, 就加载
# 如果以前没存储名字, 就提示用户输入并存储
filename = 'username.json'
try:
with open("json/" + filename) as f_obj:
username = json.load(f_obj)
except FileNotFoundError:
username = input("请问您叫什么名字")
with open("json/" + filename) as f_obj:
json.dump(username, f_obj)
print("我会将你记在心中的, " + username)
else:
print("Hi, " + username + ", 很高兴又能和你重逢~") |
cost = int(input("Enter the bill total: "))
tip1 = cost * .15
tip2 = cost * .2
print("15% tip is $" + str(tip1) + " and 20% tip is $" + str(tip2) + ".")
|
import os
# 设置应用的运行模式, 是否开启调试模式
DEBUG = True
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
# 数据库配置
SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://root:123456@127.0.0.1:3306/test_gov'
SQLALCHEMY_TRACK_MODIFICATIONS = False
|
from django.contrib.auth.models import User
from faker import Factory as FakerFactory
import factory
from collectors.user.models import Friendship
faker = FakerFactory.create('en')
class UserFactory(factory.DjangoModelFactory):
email = factory.LazyAttribute(lambda n: faker.email())
password = factory.PostGenerationMethodCall('set_password', 'test123')
username = factory.LazyAttribute(lambda n: faker.email())
class Meta:
model = User
class FriendshipFactory(factory.DjangoModelFactory):
creator = factory.SubFactory(UserFactory)
friend = factory.SubFactory(UserFactory)
class Meta:
model = Friendship |
import datetime
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from drf_yasg import openapi
from drf_yasg.utils import swagger_auto_schema
from polls.models import Polls, Question
from polls.serializers import PollsListSerializers, QuestionSerializers, AnswerSaveSerializers, PollsSerializers
class PollsList(APIView):
"""Список доступных опросов на текущую дату"""
def get(self, request):
objects = Polls.objects.filter(from_date__lte=datetime.datetime.today(),
to_date__gte=datetime.datetime.today()).all()
serializer = PollsListSerializers(objects, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class PollsDetail(APIView):
"""Список вопросов в опросе"""
def get(self, request, polls_id):
objects = Question.objects.filter(polls_id=polls_id, draft=False).all()
serializer = QuestionSerializers(objects, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class AnswerSaveView(APIView):
"""Сохраение ответа пользователя"""
@swagger_auto_schema(request_body=openapi.Schema(
type=openapi.TYPE_OBJECT,
properties={
'user_id': openapi.Schema(type=openapi.TYPE_INTEGER, description='User Identification'),
'question': openapi.Schema(type=openapi.TYPE_INTEGER, description='Question id'),
'answer': openapi.Schema(type=openapi.TYPE_STRING, description='Response text from user'),
}))
def post(self, request):
serializer = AnswerSaveSerializers(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(status=status.HTTP_400_BAD_REQUEST, data={'errors': serializer.errors})
class StatisticUserViewView(APIView):
"""Статистика ответов пользователя"""
def get(self, request, user_id):
polls = Polls.objects.raw(
"""SELECT polls_polls.* FROM polls_polls
JOIN polls_question ON polls_polls.id = polls_question.polls_id
JOIN polls_userstatistic pu ON polls_question.id = pu.question_id
WHERE pu.user_id=%s
GROUP BY polls_polls.id""", [user_id])
serializer = PollsSerializers(polls, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
|
from django.db import models
class Company(models.Model):
name = models.CharField(max_length=256)
phone = models.CharField(max_length=256)
inn = models.CharField(max_length=256)
class Adress(models.Model):
name = models.CharField(max_length=256)
building_type = models.CharField(max_length=256)
cad_number = models.CharField(max_length=256)
company = models.ForeignKey(Company, on_delete=models.DO_NOTHING)
def get_company(self):
return self.company
def get_votings(self):
return Voting.objects.filter(adress=self.id)
class Flat(models.Model):
adress = models.ForeignKey(Adress, on_delete=models.CASCADE)
square = models.IntegerField()
number = models.IntegerField()
def get_adress(self):
return self.adress
class Person(models.Model):
surname = models.CharField(max_length=256)
name = models.CharField(max_length=256)
patronymic = models.CharField(max_length=256)
flat = models.ForeignKey(Flat, on_delete=models.CASCADE)
publick_key = models.CharField(max_length=512)
state = models.BooleanField()
def get_flat(self):
return self.flat
class Voting(models.Model):
name = models.CharField(max_length=256)
initiator = models.ForeignKey(Person, on_delete=models.CASCADE)
adress = models.ForeignKey(Adress, on_delete=models.CASCADE)
def get_initiator(self):
return self.initiator
def get_questions(self):
return Question.objects.filter(voting=self.id)
class Question(models.Model):
name = models.CharField(max_length=256)
description = models.CharField(max_length=256)
voting = models.ForeignKey(Voting, on_delete=models.CASCADE)
def get_answers(self):
return Answer.objects.filter(question=self.id)
class Answer(models.Model):
person = models.ForeignKey(Person, on_delete=models.CASCADE)
question = models.ForeignKey(Question, on_delete=models.CASCADE)
time = models.DateTimeField()
answer = models.CharField(max_length=256)
signature = models.CharField(max_length=512) |
#!/usr/bin/env python
# _*_ coding:utf-8 _*_
from weibo import APIClient
import webbrowser
APP_KEY = '2401928872'
APP_SECRET = 'a2c6813e42cdcc1f9b762e4eeaf496dc'
CALLBACK_URL = 'https://api.weibo.com/oauth2/default.html'
# 利用官方微博SDK
client = APIClient(app_key=APP_KEY, app_secret=APP_SECRET, redirect_uri=CALLBACK_URL)
# 得到授权页面的url,利用webbrowser打开这个url
url = client.get_authorize_url()
print url
webbrowser.open_new(url)
# 获取code=后面的内容
print '输入url中code后面的内容后按回车键:'
code = raw_input()
r = client.request_access_token(code)
access_token = r.access_token # 新浪返回的token,类似abc123xyz456
expires_in = r.expires_in # token过期的UNIX时间:http://zh.wikipedia.org/wiki/UNIX%E6%97%B6%E9%97%B4
# 设置得到的access_token
client.set_access_token(access_token, expires_in)
# 可以打印下看看里面都有什么东西
statuses = client.statuses__friends_timeline()['statuses'] # 获取当前登录用户以及所关注用户(已授权)的微博
length = len(statuses)
print length
for i in range(0, length):
print u'昵称:' + statuses[i]['user']['screen_name']
print u'简介:' + statuses[i]['user']['description']
print u'位置:' + statuses[i]['user']['location']
print u'微博:' + statuses[i]['text']
|
from django.shortcuts import get_object_or_404, render, redirect, reverse
from django.urls.base import reverse_lazy
from django.http import HttpResponse
from django.views.generic.base import View
from .models import Ad, Comment
from .forms import CreateForm, CommentForm
from .owner import OwnerCreateView, OwnerListView, OwnerDetailView, OwnerDeleteView
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views import View
class AdListView(OwnerListView):
model = Ad
# By convention:
# template_name = "ads/ad_list.html"
class AdDetailView(OwnerDetailView):
model = Ad
template_name = "ads/ad_detail.html"
def get(self,request,pk):
adx = Ad.objects.get(id=pk)
comments = Comment.objects.filter(ad=adx).order_by('-updated_at')
comment_form = CommentForm()
ctx = {'ad':adx,'comments':comments,'comment_form':comment_form}
return render(request, self.template_name, ctx)
class AdCreateView(LoginRequiredMixin, View):
template_name = 'ads/ad_form.html'
success_url=reverse_lazy('ads:all')
def get(self, request, pk=None):
form = CreateForm()
ctx = {'form':form}
return render(request, self.template_name, ctx)
def post(self,request, pk=None):
form = CreateForm(request.POST, request.FILES or None)
if not form.is_valid():
ctx = {'form':form}
return render(request, self.template_name,ctx)
ad = form.save(commit=False)
ad.owner = self.request.user
ad.save()
return redirect(self.success_url)
class AdUpdateView(LoginRequiredMixin,View):
template_name = 'ads/ad_form.html'
success_url=reverse_lazy('ads:all')
def get(self,request, pk):
ad = get_object_or_404(Ad,id=pk, owner=self.request.user)
form = CreateForm(instance=ad)
ctx = {'form':form}
return render(request,self.template_name, ctx)
def post(self,request,pk=None):
ad = get_object_or_404(Ad, id=pk, owner=self.request.user)
form = CreateForm(request.POST, request.FILES or None, instance=ad)
if not form.is_valid():
ctx = {'form':form}
return render(request, self.template_name,ctx)
ad = form.save(commit=False)
ad.save()
return redirect(self.success_url)
class AdDeleteView(OwnerDeleteView):
model = Ad
def stream_file(request, pk):
ad = get_object_or_404(Ad, id=pk)
response = HttpResponse()
response['Content-Type'] = ad.content_type
response['Content-Length'] = len(ad.picture)
response.write(ad.picture)
return response
class CommentCreateView(LoginRequiredMixin, View):
template_name = 'ads/ad_detail.html'
def post(self,request, pk):
adx = get_object_or_404(Ad,id=pk)
print(request.POST)
comment = Comment(text=request.POST['comment'], ad=adx, owner=request.user)
comment.save()
return redirect(reverse('ads:ad_detail', args=[pk]))
class CommentDeleteView(OwnerDeleteView):
model = Comment
# https://stackoverflow.com/questions/26290415/deleteview-with-a-dynamic-success-url-dependent-on-id
def get_success_url(self):
ad = self.object.ad
return reverse('ads:ad_detail', args=[ad.id])
|
from PhysicsTools.SelectorUtils.centralIDRegistry import central_id_registry
import FWCore.ParameterSet.Config as cms
# Common functions and classes for ID definition are imported here:
from RecoEgamma.PhotonIdentification.Identification.cutBasedPhotonID_tools \
import ( WorkingPoint_V3,
IsolationCutInputs,
ClusterIsolationCutInputs,
HoverECutInputs,
configureVIDCutBasedPhoID_V6 )
#
# Details of the ID values can be found in the following link
# https://indico.cern.ch/event/1204277/#5-update-on-run3-photon-cut-ba
#
#
# First, define cut values
#
# Loose working point Barrel and Endcap
idName = "cutBasedPhotonID-RunIIIWinter22-122X-V1-loose"
WP_Loose_EB = WorkingPoint_V3(
idName , # idName
0.011452 , # full5x5_SigmaIEtaIEtaCut
0.12999 , # hOverEWithEACut
# Isolation cuts are generally absIso < C1 + pt*C2, except for HCalClus is < C1 + pt*C2 + pt*pt*C3
1.8852 , # absPFChgHadIsoWithEACut_C1
0.0 , # absPFChgHadIsoWithEACut_C2
0.70379 , # absPFECalClusIsoWithEACut_C1
0.00065204 , # absPFECalClusIsoWithEACut_C2
6.3440 , # absPFHCalClusIsoWithEACut_C1
0.010055 , # absPFHCalClusIsoWithEACut_C2
0.00005783 # absPFHCalClusIsoWithEACut_C3
)
WP_Loose_EE = WorkingPoint_V3(
idName , # idName
0.027674 , # full5x5_SigmaIEtaIEtaCut
0.15343 , # hOverEWithEACut
# Isolation cuts are generally absIso < C1 + pt*C2, except for HCalClus is < C1 + pt*C2 + pt*pt*C3
1.6540 , # absPFChgHadIsoWithEACut_C1
0.0 , # absPFChgHadIsoWithEACut_C2
6.61585 , # absPFECalClusIsoWithEACut_C1
0.00019549 , # absPFECalClusIsoWithEACut_C2
1.8588 , # absPFHCalClusIsoWithEACut_C1
0.01170 , # absPFHCalClusIsoWithEACut_C2
0.00007476 # absPFHCalClusIsoWithEACut_C3
)
# Medium working point Barrel and Endcap
idName = "cutBasedPhotonID-RunIIIWinter22-122X-V1-medium"
WP_Medium_EB = WorkingPoint_V3(
idName , # idName
0.01001 , # full5x5_SigmaIEtaIEtaCut
0.058305 , # hOverEWithEACut
# Isolation cuts are generally absIso < C1 + pt*C2, except for HCalClus is < C1 + pt*C2 + pt*pt*C3
0.93929 , # absPFChgHadIsoWithEACut_C1
0.0 , # absPFChgHadIsoWithEACut_C2
0.22770 , # absPFECalClusIsoWithEACut_C1
0.00065204 , # absPFECalClusIsoWithEACut_C2
2.1890 , # absPFHCalClusIsoWithEACut_C1
0.010055 , # absPFHCalClusIsoWithEACut_C2
0.00005783 # absPFHCalClusIsoWithEACut_C3
)
WP_Medium_EE = WorkingPoint_V3(
idName , #idName
0.02687 , # full5x5_SigmaIEtaIEtaCut
0.005181 , # hOverECutWithEA
# Isolation cuts are generally absIso < C1 + pt*C2, except for HCalClus is < C1 + pt*C2 + pt*pt*C3
0.97029 , # absPFChgHadIsoWithEACut_C1
0.0 , # absPFChaHadIsoWithEACut_C2
1.124 , # absPFECalClusIsoWithEACut_C1
0.00019549 , # absPFECalClusIsoWithEACut_C2
0.033670 , # absPFHCalClusIsowithEACut_C1
0.01170 , # absPFHCalClusIsoWithEACut_C2
0.00007476 # absPFHCalClusIsoWithEACut_C3
)
# Tight working point Barrel and Endcap
idName = "cutBasedPhotonID-RunIIIWinter22-122X-V1-tight"
WP_Tight_EB = WorkingPoint_V3(
idName , # idName
0.009993 , # full5x5_SigmaIEtaIEtaCut
0.0417588 , # hOverECutWithEA
# Isolation cuts are generally absIso < C1 + pt*C2, except for HCalClus is < C1 + pt*C2 + pt*pt*C3
0.31631 , # absPFChgHadIsoWithEACut_C1
0.0 , # absPFChgHadIsoWithEACut_C2
0.14189 , # absPFECalClusIsoWithEACut_C1
0.00065204 , # absPFECalClusIsoWithEACut_C2
0.39057 , # absPFHCalClusIsoWithEACut_C1
0.0100547 , # absPFHCalClusIsoWithEACut_C2
0.00005783 # absPFHCalClusIsoWithEACut_C3
)
WP_Tight_EE = WorkingPoint_V3(
idName , # idName
0.02687 , # full5x5_SigmaIEtaIEtaCut
0.0025426 , # hOverECutWithEA
# Isolation cuts are generally absIso < C1 + pt*C2, except for HCalClus is < C1 + pt*C2 + pt*pt*C3
0.29266 , # absPFChgHadIsoWithEACut_C1
0.0 , # absPFChgHadIsoWithEACut_C2
1.04269 , # absPFECalClusIsoWithEACut_C1
0.00019549 , # absPFECalClusIsoWithEACut_C2
0.029262 , # absPFHCalClusIsowithEACut_C1
0.01170 , # absPFHCalClusIsoWithEACut_C2
0.00007476 # absPFHCalClusIsoWithEACut_C3
)
# Second, define where to find the precomputed isolations and what effective
# areas to use for pile-up correction
isoInputs = IsolationCutInputs(
# chHadIsolationMapName
'photonIDValueMapProducer:phoChargedIsolation' ,
# chHadIsolationEffAreas
"RecoEgamma/PhotonIdentification/data/RunIII_Winter22/effectiveArea_ChgHadronIso_95percentBased.txt",
# neuHadIsolationMapName
'photonIDValueMapProducer:phoNeutralHadronIsolation' ,
# neuHadIsolationEffAreas
"RecoEgamma/PhotonIdentification/data/RunIII_Winter22/effectiveArea_NeuHadronIso_95percentBased.txt" ,
# phoIsolationMapName
'photonIDValueMapProducer:phoPhotonIsolation' ,
# phoIsolationEffAreas
"RecoEgamma/PhotonIdentification/data/RunIII_Winter22/effectiveArea_PhotonIso_95percentBased.txt"
)
clusterIsoInputs = ClusterIsolationCutInputs(
# trkIsolationMapName
'photonIDValueMapProducer:phoTrkIsolation' ,
# trkIsolationEffAreas
"RecoEgamma/PhotonIdentification/data/RunIII_Winter22/effectiveArea_TrackerIso_95percentBased.txt",
# ecalClusIsolationMapName
'photonIDValueMapProducer:phoEcalPFClIsolation' ,
# ecalClusIsolationEffAreas
"RecoEgamma/PhotonIdentification/data/RunIII_Winter22/effectiveArea_ECalClusterIso_95percentBased.txt",
# hcalClusIsolationMapName
'photonIDValueMapProducer:phoHcalPFClIsolation' ,
# hcalClusIsolationEffAreas
"RecoEgamma/PhotonIdentification/data/RunIII_Winter22/effectiveArea_HCalClusterIso_95percentBased.txt"
)
hOverEInputs = HoverECutInputs(
# hOverEEffAreas
"RecoEgamma/PhotonIdentification/data/RunIII_Winter22/effectiveArea_coneBasedHoverE_95percentBased.txt"
)
#
# Finally, set up VID configuration for all cuts
#
cutBasedPhotonID_RunIIIWinter22_122X_V1_loose = configureVIDCutBasedPhoID_V6 ( WP_Loose_EB, WP_Loose_EE, isoInputs, clusterIsoInputs, hOverEInputs)
cutBasedPhotonID_RunIIIWinter22_122X_V1_medium = configureVIDCutBasedPhoID_V6 ( WP_Medium_EB, WP_Medium_EE, isoInputs, clusterIsoInputs, hOverEInputs)
cutBasedPhotonID_RunIIIWinter22_122X_V1_tight = configureVIDCutBasedPhoID_V6 ( WP_Tight_EB, WP_Tight_EE, isoInputs, clusterIsoInputs, hOverEInputs)
## The MD5 sum numbers below reflect the exact set of cut variables
# and values above. If anything changes, one has to
# 1) comment out the lines below about the registry,
# 2) run "calculateIdMD5 <this file name> <one of the VID config names just above>
# 3) update the MD5 sum strings below and uncomment the lines again.
#
central_id_registry.register(cutBasedPhotonID_RunIIIWinter22_122X_V1_loose.idName,
'57d3fe8d9a1bff37aca5d13887138607233af1b5')
central_id_registry.register(cutBasedPhotonID_RunIIIWinter22_122X_V1_medium.idName,
'114b047ad28e2aae54869847420514d74f2540b8')
central_id_registry.register(cutBasedPhotonID_RunIIIWinter22_122X_V1_tight.idName,
'2e56bbbca90e9bc089e5a716412cc51f3de47cb3')
cutBasedPhotonID_RunIIIWinter22_122X_V1_loose.isPOGApproved = cms.untracked.bool(True)
cutBasedPhotonID_RunIIIWinter22_122X_V1_medium.isPOGApproved = cms.untracked.bool(True)
cutBasedPhotonID_RunIIIWinter22_122X_V1_tight.isPOGApproved = cms.untracked.bool(True)
|
loop = 1
while loop == 1:
score = float(input("Input Score:"))
if score < 0 or score > 100:
print("Invalid score. Must be between 1 and 100")
elif score > 100:
print("Invalid score. Must be between 1 and 100")
elif score > 50 and score < 90:
print("Pass")
loop = 0
elif score > 90 and score <= 100:
print("Excellent")
loop = 0
else:
print("Fail")
loop = 0 |
#rotated array
a = [9,3,4,5,6,7,8]
def find(l,r):
if a[l] < a[r]:
return a[l]
if l == r-1 or l==r:
return min(a[l],a[r])
m = (l+r)/2
if a[m] > a[l]:
return find(m,r)
else:
return find(l,m)
print a
print find(0,len(a)-1)
|
import os.path as osp
from tempfile import mkdtemp
from datetime import datetime
from typing import Iterable, Tuple
import numpy as np
# from sklearn.metrics import confusion_matrix
import tensorflow as tf
from tqdm import tqdm
from ..names import (
X_PLACE,
Y_PLACE,
SAMPLE_WEIGHT_PLACE,
LR_PLACE,
WAVELET_DROPOUT_PLACE,
CONV_DROPOUT_PLACE,
DENSE_DROPOUT_PLACE,
IS_TRAINING_PLACE,
OP_INFERENCE,
OP_LOSS,
OP_TRAIN,
)
from ..utils import (
BatchGenerator,
restore_session,
save_session,
)
def fit_generator(
sess,
train_batch_gen,
evaluate_set: Iterable[Tuple[int, np.ndarray, np.ndarray, bool]],
max_batches: int = 100000,
learning_rate: float = 0.001,
learning_rate_decay_ratio: float = 1 / 3,
learning_rate_decay_rounds: int = 10,
early_stopping_rounds: int = 50,
save_folder: str = './',
save_best: bool = True,
model_name_prefix: str = 'some_random_model',
saver=None,
wavelet_dropout_prob=0.0,
conv_dropout_prob=0.0,
dense_dropout_prob=0.0,
) -> tf.Session:
# TODO: Check only one validation set
early_stop = False
early_stopping_waiting_rounds = 0
learning_rate_decay_waiting_rounds = 0
best_validation_accuracy = 0.
for n_batch, (
x_batch,
y_batch,
weight_batch,
) in tqdm(enumerate(train_batch_gen()), total=max_batches):
if (n_batch > max_batches) or early_stop:
break
fit_batch(
sess,
x_batch,
y_batch,
weight_batch,
learning_rate,
wavelet_dropout_prob=wavelet_dropout_prob,
conv_dropout_prob=conv_dropout_prob,
dense_dropout_prob=dense_dropout_prob,
)
for eval_set_id, (
batches_per_round,
x_evaluate,
y_evaluate,
is_validation_set,
) in enumerate(evaluate_set):
if (n_batch + 1) % batches_per_round > 0:
continue
prediction, loss = predict_and_evaluate(sess, x_evaluate, y_evaluate, batch_size=16)
ans = y_evaluate.argmax(axis=1)
pred_max = prediction.argmax(axis=1)
accuracy = (ans == pred_max).mean()
print('N_BATCH {} with eval id {} loss: {}, accuracy: {}'.format(
n_batch,
eval_set_id,
loss,
accuracy,
))
# print(confusion_matrix(ans, pred_max))
if is_validation_set:
if accuracy > best_validation_accuracy:
print('Improved!')
early_stopping_waiting_rounds = 0
learning_rate_decay_waiting_rounds = 0
# save best model
if save_best:
model_name = '{}__batch_{}_at_{}__valacc_{:.4f}.mdl'.format(
model_name_prefix,
n_batch,
datetime.now().strftime('%Y%m%d-%H%M%S'),
accuracy,
)
best_variable_path = osp.join(save_folder, model_name)
print('Save the model to: {}'.format(best_variable_path))
save_session(sess, best_variable_path, saver)
best_validation_accuracy = accuracy
else:
if early_stopping_waiting_rounds >= early_stopping_rounds:
print('Early Stop')
early_stop = True
else:
early_stopping_waiting_rounds += 1
if learning_rate_decay_waiting_rounds >= learning_rate_decay_rounds:
print('Reduce learning rate')
learning_rate = learning_rate * learning_rate_decay_ratio
print('new learning rate: {}'.format(learning_rate))
learning_rate_decay_waiting_rounds = 0
else:
learning_rate_decay_waiting_rounds += 1
if save_best:
print('Restore best model from: {}'.format(best_variable_path))
_, best_sess = restore_session(best_variable_path)
return best_sess
def fit_batch(sess, x_batch, y_batch, weight_batch, learning_rate, **kwargs):
graph = sess.graph
x_place = graph.get_tensor_by_name(X_PLACE + ':0')
y_place = graph.get_tensor_by_name(Y_PLACE + ':0')
sample_weight_place = graph.get_tensor_by_name(SAMPLE_WEIGHT_PLACE + ':0')
lr_place = graph.get_tensor_by_name(LR_PLACE + ':0')
is_training_place = graph.get_tensor_by_name(IS_TRAINING_PLACE + ':0')
loss_tensor = graph.get_tensor_by_name(OP_LOSS + ':0')
train_op = graph.get_operation_by_name(OP_TRAIN)
wavelet_dropout_place = graph.get_tensor_by_name(WAVELET_DROPOUT_PLACE + ':0')
conv_dropout_place = graph.get_tensor_by_name(CONV_DROPOUT_PLACE + ':0')
dense_dropout_place = graph.get_tensor_by_name(DENSE_DROPOUT_PLACE + ':0')
_, batch_loss = sess.run(
[train_op, loss_tensor],
feed_dict={
x_place: x_batch,
y_place: y_batch,
sample_weight_place: weight_batch,
lr_place: learning_rate,
wavelet_dropout_place: kwargs['wavelet_dropout_prob'],
conv_dropout_place: kwargs['conv_dropout_prob'],
dense_dropout_place: kwargs['dense_dropout_prob'],
is_training_place: True,
},
)
return batch_loss
def evaluate(sess, x_val, y_val, batch_size=128):
loss = []
batch_gen = BatchGenerator(
x=x_val,
y=y_val,
batch_size=batch_size,
shuffle=False,
)
for x_batch, y_batch in tqdm(batch_gen()):
loss.append(evaluate_batch(sess, x_batch, y_batch))
loss = np.mean(loss)
return loss
def evaluate_batch(sess, x_batch, y_batch):
graph = sess.graph
x_place = graph.get_tensor_by_name(X_PLACE + ':0')
y_place = graph.get_tensor_by_name(Y_PLACE + ':0')
sample_weight_place = graph.get_tensor_by_name(SAMPLE_WEIGHT_PLACE + ':0')
loss_op = graph.get_operation_by_name(OP_LOSS)
loss_tensor = graph.get_tensor_by_name(OP_LOSS + ':0')
_, batch_loss = sess.run(
[loss_op, loss_tensor],
feed_dict={
x_place: x_batch,
y_place: y_batch,
sample_weight_place: np.ones(x_batch.shape[0]),
},
)
return batch_loss
def predict(sess, x_test, batch_size=128):
batch_gen = BatchGenerator(
x=x_test,
y=np.empty_like(x_test),
batch_size=batch_size,
shuffle=False,
)
result = []
for x_batch, _ in tqdm(batch_gen()):
batch_result = predict_batch(sess, x_batch)
result.append(batch_result)
return np.concatenate(result, axis=0)
def predict_batch(sess, x_batch):
graph = sess.graph
inference_op = graph.get_operation_by_name(OP_INFERENCE)
inference_tensor = graph.get_tensor_by_name(
OP_INFERENCE + ':0')
x_place = graph.get_tensor_by_name(X_PLACE + ':0')
sample_weight_place = graph.get_tensor_by_name(SAMPLE_WEIGHT_PLACE + ':0')
_, batch_result = sess.run(
[inference_op, inference_tensor],
feed_dict={
x_place: x_batch,
sample_weight_place: np.ones(x_batch.shape[0]),
},
)
return batch_result
def predict_and_evaluate(sess, x, y, batch_size=128):
predictions = []
losses = []
batch_gen = BatchGenerator(
x=x,
y=y,
batch_size=batch_size,
shuffle=False,
)
for x_batch, y_batch in tqdm(batch_gen()):
batch_prediction, batch_loss = predict_and_evaluate_batch(sess, x_batch, y_batch)
predictions.append(batch_prediction)
losses.append(batch_loss)
prediction = np.concatenate(predictions, axis=0)
loss = np.mean(losses)
return prediction, loss
def predict_and_evaluate_batch(sess, x_batch, y_batch):
graph = sess.graph
x_place = graph.get_tensor_by_name(X_PLACE + ':0')
y_place = graph.get_tensor_by_name(Y_PLACE + ':0')
sample_weight_place = graph.get_tensor_by_name(SAMPLE_WEIGHT_PLACE + ':0')
inference_op = graph.get_operation_by_name(OP_INFERENCE)
inference_tensor = graph.get_tensor_by_name(
OP_INFERENCE + ':0')
loss_op = graph.get_operation_by_name(OP_LOSS)
loss_tensor = graph.get_tensor_by_name(OP_LOSS + ':0')
_, batch_prediction, _, batch_loss = sess.run(
[inference_op, inference_tensor, loss_op, loss_tensor],
feed_dict={
x_place: x_batch,
y_place: y_batch,
sample_weight_place: np.ones(x_batch.shape[0]),
},
)
return batch_prediction, batch_loss
|
import requests
import os
url=""
root="D://pics//"
path=root+url.split("/")[-1]
try:
kv={"user-agent":"Mozilla/5.0"}
if not os.path.exists(root):
os.mkdir(root)
if not os.path.exists(path):
r=requests.get(url,headers=kv)
r.raise_for_status()
with open(path,'wb') as f:
f.write(r.content)
f.close()
print("文件保存成功")
else:
print("文件已存在")
except:
print("爬取失败") |
# -*- coding: utf-8 -*-
from docxtpl import DocxTemplate
from time import time
from zip import *
from enum import Enum
from Generator.settings import BASE_DIR
import shutil
WORK_DIR = 'documents'
TEMPLATES_DIR = 'docs/docx_templates'
class Document(Enum):
CLAIM = 'claim'
FORM = 'form'
MEMO = 'memo'
REVIEW = 'review'
@classmethod
def fromJson(cls, json):
docs = []
if json["claim"] == True:
docs.append(Document.CLAIM)
if json["memo"] == True:
docs.append(Document.MEMO)
if json["form"] == True:
docs.append(Document.FORM)
if json["review"] == True:
docs.append(Document.REVIEW)
return docs
def process_all_documents(article, process_pdf=False):
# type: (Article, bool) -> object
directory = _prepare_workdir()
_process_doc(Document.FORM, directory, article)
_process_doc(Document.CLAIM, directory, article)
_process_doc(Document.MEMO, directory, article)
_process_doc(Document.REVIEW, directory, article)
return _zip_workdir(directory)
def process_documents(documents, article, process_pdf=False):
# type: (list, Article, bool) -> object
directory = _prepare_workdir()
for document in documents:
_process_doc(document, directory, article)
return _zip_workdir(directory)
def _prepare_workdir():
"""
Create new working directory with timestamp as the name
"""
dir_name = str(int(time()))
if not os.path.exists(_work_dir_path(dir_name)):
os.makedirs(_work_dir_path(dir_name))
return dir_name
def _zip_workdir(dir_name):
"""Archive current working directory and remove then"""
zipdir(_work_dir_path(dir_name), includeDirInZip=False)
shutil.rmtree(_work_dir_path(dir_name))
return dir_name
def _process_doc(doc, dir, article):
# type: (string, string, Article) -> none
"""
Processing document from template
doc - document name {claim, memo, form, request}
dir - directory to save
article - context for template
:rtype: none
"""
d = DocxTemplate(os.path.join(BASE_DIR, TEMPLATES_DIR, 'template_%s.docx' % doc))
d.render({'article': article})
d.save(os.path.join(BASE_DIR, WORK_DIR, dir, '%s.docx' % doc))
def _work_dir_path(dir):
return os.path.join(BASE_DIR, WORK_DIR, dir)
def work_dir_archive_path(zip_name):
return os.path.join(BASE_DIR, WORK_DIR, "%s.zip" % zip_name) |
from copy import deepcopy as copy
from utils import *
def grid_rotate(grid):
w = len(grid[0]) #new grid height
h = len(grid) #new grid width
result = []
for c in range(w): #build a new row for each column in original
newrow = []
for r in range(h):
newrow.append(grid[h-r-1][c])
result.append(newrow)
return result
def grid_reflect(grid):
return grid[::-1] #vertical reflection is easier to write
def grid_pad(grid,left,top,width,height):
curr_width = len(grid[0])
curr_height = len(grid)
new = []
for r in range(height):
if r < top or r >= curr_height + top:
new.append([0] * width)
else:
row = [0] * left
row += grid[r-top]
row += [0] * (width - curr_width - left)
new.append(row)
return new
def grid_merge(g1,g2):
new = []
for r in range(len(g1)):
newrow = []
for c in range(len(g1[0])):
if g1[r][c] != ".":
newrow.append(g1[r][c])
else:
newrow.append(g2[r][c])
new.append(newrow)
return new
def checkCollision(g1, g2):
# assume same dimensions
for r in range(len(g1)):
for c in range(len(g1[0])):
if (g1[r][c] != ".") and (g2[r][c] != "."):
return True
return False
def allOrientations(penta):
found = []
current = copy(penta)
while current not in found:
cp = copy(current)
found.append(cp)
current = grid_rotate(current)
current = grid_reflect(current)
while current not in found:
cp = copy(current)
found.append(cp)
current = grid_rotate(current)
return found
def allPositions(penta, width, height):
found = []
for grid in allOrientations(penta):
curr_width = len(grid[0])
curr_height = len(grid)
for c in range(0, width - curr_width + 1):
for r in range(0, height - curr_height + 1):
found.append(grid_pad(grid, c, r, width, height))
return found
pentaPositions = {}
def dictionarySetup(pentas, penta_letters, m1, m2):
for i in range(len(pentas)):
modifiedPositions = []
for pos in allPositions(pentas[i], m1, m2):
newgrid = []
for row in pos:
newrow = []
for cell in row:
if cell == 1:
newrow.append(penta_letters[i])
else:
newrow.append(".")
newgrid.append(newrow)
modifiedPositions.append(newgrid)
pentaPositions[penta_letters[i]] = modifiedPositions
def tryFit(grid, pentas):
# print("Trying to fit " + str(pentas))
# print(grid)
# nothing left to fit, done!
if pentas == []:
return grid
next_penta = pentas[0]
next_positions = pentaPositions[next_penta]
for pos in next_positions:
if not checkCollision(grid, pos):
fit = tryFit(grid_merge(grid, pos), pentas[1:])
if fit != None:
return fit
def buildPentaList(numPenta, pentasUsed, remainingPentas, grid):
# numPenta: remaining number of pentas we want
# pentasUsed: list of pentas so far
# remainingPentas: unused pentas
# grid: grid we want to tile
impossibles = []
if numPenta == 0:
result = tryFit(grid, pentasUsed)
if result == None:
return ["".join(pentasUsed)] # found a set with no valid tiling
if remainingPentas == []:
return [] # nothing left to pull from, move on
for i in range(len(remainingPentas)):
result = buildPentaList(numPenta - 1, pentasUsed + [remainingPentas[i]], remainingPentas[i + 1:], grid)
if result != None:
impossibles += result
return impossibles
|
#!/usr/bin/python3
# *- coding: utf-8 -*-
'''
nom: andy limmois, johann hospice
command: py limmois_hospice.py <n> <k> <l> <d>
'''
import argparse
'''
Outils
'''
def buildParser():
parser = argparse.ArgumentParser(
description='Generate a deck of dobble card game')
parser.add_argument(
'n',
type=int,
help='nombre de symboles')
parser.add_argument(
'k',
type=int,
help='nombre de symboles par carte')
parser.add_argument(
'l',
type=int,
help='nombre d\'apparition maximum pour chaque symboles')
parser.add_argument(
'd',
type=int,
help='nombre de symboles partagés par deux cartes quelconques')
return parser
def syntax(b=0, d=0, s=""):
'''
retourne une chaine de caracteres ayant le format de sortie demandé dans le sujet
'''
return '\n'.join(str(x) for x in [b, d, s])
def longest(L):
'''
retourne le plus grand element quelconque d'un ensemble L
L: list contenant des elements ayant une certaine taille
'''
lmax = -1
amax = None
for e in L:
if len(e) > lmax:
amax = e
lmax = len(e)
return amax
def binStrToArr(S):
return [[int(a) for a in x] for x in S.split(' ')]
def binArrToStr(A):
return ' '.join([''.join([str(b) for b in c]) for c in A])
def intToBin(x, n):
'''
retourne la convertion d'un entier x en list d'entier binaire de taille n
x: entier (valeur decimal)
n: entier (taille du mot binaire)
'''
if x >= pow(2, n):
raise Exception()
c = ["0"] * n
for j in reversed(range(0, n)):
nx = x - pow(2, j)
if nx >= 0:
x = nx
c[j] = "1"
return ''.join(c)
def issubsubset(a, E):
'''
retourne si un element a est inclus dans un sous ensemble de E
E: ensemble
a: ensemble
'''
for e in E:
if a <= e:
return True
return False
'''
Deck
'''
class Deck:
def __init__(self, n, k, l, d):
'''
n symboles (alphabet)
k symboles par carte (longueur des mot)
chaque symbole apparaissant au plus l fois
deux cartes quelconques partagent exactement d symboles
'''
self.n = n
self.k = k
self.l = l
self.d = d
def possibilities(self):
'''
retourne un ensemble de cartes ayant k symboles par cartes parmis n symboles
'''
D = []
for i in range(pow(2, self.n)):
c = intToBin(i, self.n)
if self.checkK(c):
D += [c]
return D
def checkK(self, C):
'''
retourne si une carte contient bien k symboles
C: chaine de caractere (carte)
'''
ksum = 0
for s in C:
if s == "1":
ksum += 1
if ksum == self.k:
return True
return False
def checkL(self, P):
'''
retourne si les symboles n'apparaissent pas trop de fois dans un paquet (en fonction de l)
P: ensemble de chaine de caractere (paquet de cartes)
'''
for i in range(self.n):
lsum = 0
for c in P:
if c[i] == "1":
lsum += 1
if lsum > self.l:
return False
return True
def checkD(self, P):
'''
retourne si toutes pairs de cartes respecte le nombre de symboles en commun (en fonction de d)
P: ensemble de chaine de caractere (paquet de cartes)
'''
for c1 in P:
for c2 in P:
if c1 != c2: # pour deux cartes differentes du paquet
dsum = 0
for i in range(self.n):
if c1[i] == "1" and c1[i] == c2[i]: # si meme symbole est present
dsum += 1
if dsum != self.d:
return False
return True
def solutions(self):
'''
retourne toutes les descriptions de solutions distinctes
'''
def aux(SP, P=set()):
'''
fonction auxiliaire permettant d'utiliser S,
une variable en dehors de l'environnement de recursion
permettant d'y ajouter simplement des elements
SP: liste d'ensemble de chaine de caractere (liste des possibilités)
P: ensemble de chaine de caractere (le paquet traité)
'''
if not self.checkL(P) or not self.checkD(P):
return None
elif len(SP) == 0:
return P
else:
s1 = aux(SP[1::], P | {SP[0]}) # appel recursif avec solution retirée
s2 = aux(SP[1::], P) # appel recursif sans solution retirée
if s1 and not issubsubset(s1, S):
S.append(s1)
if s2 and not issubsubset(s2, S):
S.append(s2)
return None
SP = self.possibilities()
S = [] # liste contenant toutes les solutions
aux(SP)
return S
'''
Main
'''
if __name__ == '__main__':
args = buildParser().parse_args()
deck = Deck(args.n, args.k, args.l, args.d)
solutions = deck.solutions()
longest = longest(solutions)
if longest:
output = syntax(len(longest), len(solutions), binArrToStr(longest))
else:
output = syntax()
'''
display
'''
#print("# toutes solutions: \n#\t" + '\n#\t'.join([binArrToStr(s) for s in solutions]))
print(output) |
import securitycenter
from getpass import getpass
import markdown
import re
html_head = '''
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<style>
body{-webkit-font-smoothing:antialiased;font:normal .8764em/1.5em Arial,Verdana,sans-serif;margin:0}
html>body{font-size:13px}
li{font-size:110%}
li li{font-size:100%}
li p{font-size:100%;margin:.5em 0}
h1{color:#000;font-size:2.2857em;line-height:.6563em;margin:.6563em 0}
h2{color:#111;font-size:1.7143em;line-height:.875em;margin:.875em 0}
h3{color:#111;font-size:1.5em;line-height:1em;margin:1em 0}
h4{color:#111;font-size:1.2857em;line-height:1.1667em;margin:1.1667em 0}
h5{color:#111;font-size:1.15em;line-height:1.3em;margin:1.3em 0}
h6{font-size:1em;line-height:1.5em;margin:1.5em 0}
body,p,td,div{color:#111;font-family:"Helvetica Neue",Helvetica,Arial,Verdana,sans-serif;word-wrap:break-word}
h1,h2,h3,h4,h5,h6{line-height:1.5em}
a{-webkit-transition:color .2s ease-in-out;color:#425363;text-decoration:none}
a:hover{color:#3593d9}
.footnote{color:#0d6ea1;font-size:.8em;vertical-align:super}
#wrapper img{max-width:100%;height:auto}dd{margin-bottom:1em}
li>p:first-child{margin:0}
ul ul,ul ol{margin-bottom:.4em}
caption,col,colgroup,table,tbody,td,tfoot,th,thead,tr{border-spacing:0}
table{border:1px solid rgba(0,0,0,0.25);border-collapse:collapse;display:table;empty-cells:hide;margin:-1px 0 23px;padding:0;table-layout:fixed}
caption{display:table-caption;font-weight:700}
col{display:table-column}
colgroup{display:table-column-group}
tbody{display:table-row-group}
tfoot{display:table-footer-group}
thead{display:table-header-group}
td,th{display:table-cell}
tr{display:table-row}
table th,table td{font-size:1.1em;line-height:23px;padding:0 1em}
table thead{background:rgba(0,0,0,0.15);border:1px solid rgba(0,0,0,0.15);border-bottom:1px solid rgba(0,0,0,0.2)}
table tbody{background:rgba(0,0,0,0.05)}
table tfoot{background:rgba(0,0,0,0.15);border:1px solid rgba(0,0,0,0.15);border-top:1px solid rgba(0,0,0,0.2)}
figure{display:inline-block;margin-bottom:1.2em;position:relative;margin:1em 0}
figcaption{font-style:italic;text-align:center;background:rgba(0,0,0,.9);color:rgba(255,255,255,1);position:absolute;left:0;bottom:-24px;width:98%;padding:1%;-webkit-transition:all .2s ease-in-out}
.poetry pre{display:block;font-family:Georgia,Garamond,serif!important;font-size:110%!important;font-style:italic;line-height:1.6em;margin-left:1em}
.poetry pre code{font-family:Georgia,Garamond,serif!important}
blockquote p{font-size:110%;font-style:italic;line-height:1.6em}
sup,sub,a.footnote{font-size:1.4ex;height:0;line-height:1;position:relative;vertical-align:super}
sub{vertical-align:sub;top:-1px}
p,h5{font-size:1.1429em;line-height:1.3125em;margin:1.3125em 0}
dt,th{font-weight:700}
table tr:nth-child(odd),table th:nth-child(odd),table td:nth-child(odd){background:rgba(255,255,255,0.06)}
table tr:nth-child(even),table td:nth-child(even){background:rgba(0,0,0,0.06)}
@media print{body{overflow:auto}img,pre,blockquote,table,figure,p{page-break-inside:avoid}
#wrapper{background:#fff;color:#303030;font-size:85%;padding:10px;position:relative;text-indent:0}}
@media screen{.inverted #wrapper,.inverted{background:rgba(37,42,42,1)}
.inverted hr{border-color:rgba(51,63,64,1)!important}
.inverted p,.inverted td,.inverted li,.inverted h1,.inverted h2,.inverted h3,.inverted h4,.inverted h5,.inverted h6,.inverted pre,.inverted code,.inverted th,.inverted .math,.inverted caption,.inverted dd,.inverted dt{color:#eee!important}
.inverted table tr:nth-child(odd),.inverted table th:nth-child(odd),.inverted table td:nth-child(odd){background:0}
.inverted a{color:rgba(172,209,213,1)}
#wrapper{padding:20px}
::selection{background:rgba(157,193,200,.5)}
h1::selection{background-color:rgba(45,156,208,.3)}
h2::selection{background-color:rgba(90,182,224,.3)}
h3::selection,h4::selection,h5::selection,h6::selection,li::selection,ol::selection{background-color:rgba(133,201,232,.3)}
code::selection{background-color:rgba(0,0,0,.7);color:#eee}
code span::selection{background-color:rgba(0,0,0,.7)!important;color:#eee!important}
a::selection{background-color:rgba(255,230,102,.2)}
.inverted a::selection{background-color:rgba(255,230,102,.6)}
td::selection,th::selection,caption::selection{background-color:rgba(180,237,95,.5)}}
pre code {
display: block; padding: 0.5em;
color: #000;
background: #f8f8ff
}
pre .comment,
pre .template_comment,
pre .diff .header,
pre .javadoc {
color: #998;
font-style: italic
}
pre .keyword,
pre .css .rule .keyword,
pre .winutils,
pre .javascript .title,
pre .lisp .title,
pre .subst {
color: #000;
font-weight: bold
}
pre .ruby .keyword {
font-weight: normal
}
pre .number,
pre .hexcolor {
color: #40a070
}
pre .string,
pre .tag .value,
pre .phpdoc,
pre .tex .formula {
color: #d14
}
pre .title,
pre .id {
color: #900;
font-weight: bold
}
pre .javascript .title,
pre .lisp .title,
pre .subst {
font-weight: normal
}
pre .class .title,
pre .haskell .label,
pre .tex .command {
color: #458;
font-weight: bold
}
pre .class .params {
color: #000;
}
pre .tag,
pre .tag .title,
pre .rules .property,
pre .django .tag .keyword {
color: #000080;
font-weight: normal
}
pre .attribute,
pre .variable,
pre .instancevar,
pre .lisp .body {
color: #008080
}
pre .regexp {
color: #009926
}
pre .class {
color: #458;
font-weight: bold
}
pre .symbol,
pre .ruby .symbol .string,
pre .ruby .symbol .keyword,
pre .ruby .symbol .keymethods,
pre .lisp .keyword,
pre .tex .special,
pre .input_number {
color: #0086b3
}
pre .ruby .identifier .keyword,
pre .ruby .identifier .keymethods {
color: #0086b3;
}
pre .ruby .constant {
color: #008080;
}
pre .builtin,
pre .built_in,
pre .lisp .title {
color: #0086b3
}
pre .preprocessor,
pre .pi,
pre .doctype,
pre .shebang,
pre .cdata {
color: #999;
font-weight: bold
}
pre .deletion {
background: #fdd
}
pre .addition {
background: #dfd
}
pre .diff .change {
background: #0086b3
}
pre .chunk {
color: #aaa
}
pre .tex .formula {
opacity: 0.5;
}
#header {
background: #425363;
height: 73px;
width: 100%;
}
#logo {
height: 73px;
}
</style>
</head>
<body class="normal">
<div id="header">
<img id="logo" src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAZAAAABsCAYAAABEmOQaAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAQdxJREFUeNrtnXd8FNXaxzedhNCLgDTpCCIiVYoFBBsqiBS7ghWxYbkovqCgXOWK14ZYQIEEIUBCkg0llNCxIiBKVaQ3KSHJZjfZJO/vCc9wD4eZ3Z3d2WSzmT+eD2TmnDP9fPc81VJUVGQxxRRTTDHFFL1i3gRTTDHFFFOCAyCn7I4wy8zk7pXnpM2DrIVk0P9j4633NktcUdd8aKaYYoopJkAukdOOvEjA4yUAo0hDCiC7IJ8CKH2aJ62IMR+iKaaUPcnJybn67NnM3pmZ525wJ2jXz+l0Vgm0a8jKyq6L8+sBudGD67gJ19GisLAwzASIn+ARMjP5BQkYDshqSLYGUPIhmwCTiS2TVnY0P0xTTCkzAFmJCbUIE6tboXYAyHUBCJDhOL+znlwDX8c0ACTWBIif4VEpPi2n8cLlDyv7T5Ja69tF12HfZMiPLlYouZC4ivHWx1svWtnC/FBNMcUEiAmQIAbIWUdehASPLMDjLld9jtnsUQDK/Wj/JWS3BkwKIUcgnwAog9omr7rM/HBNMcUEiAmQIAEIwSN0VvLzF+AxJy3bHTxkOWLLpdVJK/QfCZkPOaYBFCdkO+TtmHjrTe1SMqLND9kUU0yAmAApgwBRg0ejBekD3PU7nmsPATBqo09vSF/IO5DxgrwNmQ5ZBfkLYtcASh55eQEmL7ZPzbgmGD/S/Pz8KmfOZA7DyzsJMt6NTMAL/kJOjq25OcGZYgLEBEjAAiQzL99jeJy0OyIsMxfdinbvQ76H5LiwgbiSQja6a+07CpkVE2d9rEPq6qZBApAGAEiKjhd8HwBymznBmWICxARIQAKE4BE2K2WUAI8cER6n7I5Qy8xkUkmNgSz3EhZFLmDhiZC663fIR9Fx1gGd0tbUMgFiiikmQEyAlCJA1ODRYEH6QPbEImM6GcZXuFA5LYW8B7kPMgAyR9hvZ+BsErZlQcZB+kEGc98V7K2ltgrJ0zg2uROvi45LfQsw6WkCxBRTTICYAClBgGTl54eL8GBV1D2QahDaflwlaHBnbHzaO00WLu8s2ECiLd8umihM9nmx8daklkkr29P+Qzm5kZZvkgZi+1ZlrIrx1v1tk1cNVcbYl5UTgTYU7f4WZJ/KcUmd9ZuLFYoNklwhLnVUt8VrG5sAMcUUEyAmQPwIj/BZKc9KE/BQyNOSkZtWAccqxadNaLxweUvJeB4BcBAYTikTPcCxDeDor3ZMgCQakHgV7U4r7QGSHwGSNmK7P8/lhFlmJLXF/nchZyQ11q9smP+SDfJa9pPDkG8BlAe6L1lX3QSIKaaYADEB4h94EDAmcYzGhUm40py0PxqxOksyoodaZi66Gm22CO0PAx7PNU9aEeXq2AeybRZApAHaTxODDWPirZ+3S8moKrffcy47yjIjcSjbP8QVyXoI2WVqQh6DpEBOatlPAJLNPZese9AEiCmmmAAxAWIcPJRf9SI4dqiB45TdEQJw1CVDtmiLADhmABw19JwHQBIKkHRE/43CWGcBkifbp2aEa4BkmASSrOi41PGd0tYUt/nj7LlwtLma3YbXsJ3kIphExaWmASQmQEwxxQSICRBdL0i+U4aHorpSDOjHGyxIf0mtL+BBSRVfEOwcToBjdbPEFdfoBIdi6xjLWX0vWTEAInsBkb5q/XdlZhFIHhJycRUAIuu7pK29JE3K9jPnoizTE29Emy8UVRgAkgOAvGsCxBRTTICYANEBj4jZKc9oqXgADyvgUVslJ1Y4wHEn2uxV2sfGpx1q4mF0enFU+jdJrTkqPVEliJBce/dLK6JitRpAktAhdXUbFYhYAJEr0OYbpV+FuNR/AJGHtM5j2+nMRoDJVobIzuuXrq9mAsQUU0yAmADxAR606qg/f9kIFXCEhsxMboM2K8XVCuDxPOBRwUVOLIpKv4zdeqereHLRCmZ9xXjr6LbJq66lPvuzbQSZ/oJ94x9RTRUTZ30HILnEPrLzbPFq5GFhNXIOIBnXbfFaNYBYAJC7GCAnAJBhJkBMMcUEiAkQF2JzOsM04FFsJAc8mkvpTEJCZyUTAKYIsRn5leLTFjVeuLymRioT8sa6De3+o5JIkVYUOziBYu82yatU813ty8oRIZLPdpmfhHH+jo6zPt7RuiZMggitRq7E/l+UvoDIPDWIbDmd2QYQKQRA8gGQT0yAuJe8vLwYnEN7nItYJ6IPtl2Vm2sPL8sfGs6/Lq6jp1A7gupD9HA4HDUC4fxsNlsbnE8fuYYF3p/r8B5VLksAyc7OaY5+vYTr6I0xWjudBRHBDpBz57KuwrGul5+jv+qQGAqPyNmpT2uorNLqJSyrIMEjEvAYJSQ/JMhsBzg6SF5Y4ZaZi7pyZHq6aEdhF1padSxkd1ty870e8OjeetHKiq7OV4IIgWcJBxue+5+9w/o9INJZ7vvH2XO1AZJkVxABQOoCINbiVcjs1F8BET9BwlmNCu6oFLChQjeDIRs9fcEhRyH/gnRyURSnCyab6gbAIhRj0Yf+EmQp5JgH50dtEtDvVUzILY26h3a7ozZN5nR9HhQGupkmI/QJ0x7PHoI2nSm/GGQDJNfNdZ2ELEKf4Q5HXi3/giK3Mo7TF8f7P8gCyD8evht5kN8hnwMqIwCV2oECEACjPtq9Blnjwb3eBvkYY95YUFAQVZYBAljUw7hP0TcB2QspcHMO5yDrIe9AqH5SSEAARAseAEI+wPGpFI0eFjYr5TaxrgfaHW20IP1RIZVJC2x/isvaHlVZZfzJctyDlCR7GA6vAix92gqrkmKIzEi6S1Bnkd2kItcdKVTsI9FxqXGd0tZcLkGkEiAyUYRId8HjCgCpCIC8zgD5CwBp5yeA3OTph2iE4Fh7AZB+PoCDgPc4xvrVgPP5gyZdwCTCR4AMwTiZOu7BLPSprAIOWmW8gDZ7fLgmJ8b4BCCpbSA0YjHm7Rg7EZJj0LtAk9V6wOQewKRCaQCEVxrfQmxeXsNBjP0GQFK5LAEE4LiHfxgW+PgMD0FehdQoNYDYnQVaK4+L4JGVn28BOMgtd4GUImQCpD5kCMdr7FCJDP+bRS3FyFoO5CuSotz3cFqTHBVPsG9j4q13dUhdHXkuLz9UgkgSxAJpLqVVuciN9wJEpie+K0BkrgKRLeftIDcwQE4BIE+VZ4AAHFXQdzTGOOKH8/obYz/qLUh8BQivOF7UM5l4IAcw5p0AiS/gaIgx6Jf5QT+/FzsAktsAktCSAAjAUQHbp3mw2vBUduIYdwAkEYEMEIBjAP1oghQa/PyyIeQtGlaiACF4YNJ8Ug0edRKWThXgERk+K2WyoH5yckzGNCk2Q5nEd3Ntjx8EACj10D+D3AxpAnmRI9ML2U2X0rs/JKQnIdvGHZwu5QE2tO+RCk5N4gDBO5TjxMRZF3VdvNZyxpGHFUoirZZ2KucHiOzrkrb2YcF99wJEcC/sAMhUYRXSEvvOACBFAEhSeQQIwGFBn27ou6kEzm01IHJNSQIE8OiCvzf76ZrsGPtBvRABOMLR72Ud6ilDBBD5FBCp5U+AAB6NsO0HP70/HwMiVQINIABHU1bPOf38DElF2UmPasuv8OBAwvsENVQhA0G2Y1BU91xWW93ANo0LNo7Y+LSPmyWuaCYY0e9WAhJj4637WyatvEMljclrSoLEivHWhW2TV7UTvLDaYfunggdWFidavF+wfyR2tK5RjOcxAMlzon0E175OiQcBRKoBFJ8yRDIBkTEMkFrYPodXIb8BImHlCSCARyRPZCV5fgcAkbv8CRDITEgE5FFIpp+vxwGA3K0DHl3RZ1NJ3nMJIpsBkdZGAwQTe3v0uQZ//+Xn+70Kx6oTKAABPO5E36wSfo7kKRvqN4A4CgpCNeBRyCuKaEhLqX65U/r/skpz0sY0WJDejV15K4TMTP5QiN04CnC8oLjxAhyhAMdVlMSQ92cDHqMAj0iNXFghUhqTHIDkXYAkVLCBVLTMSCI7xVlu8xdHvjtVIKLEg3zNq6Fi+wjuwwyAIeqk3XEZYLGWIXKoT/rGHsdy7aTGepYBcgDtepQXgAAe1TGZfFRKE1keqW4AEn8BZBobobNL6J6vAUSudAMOWun9q6TOyQ1EMgCRZkYBhGU0/0IuifvtEUT8DRDA4wNahZbSc/QIIl7BIzou9QmNCPNjbM/4QoLKAZ50lwAa/erPXxYruvICHO3YKF48ViWAo7EQ/4HJOQbw+JhtHpR9d7qn6Uw4jUknJS0JIHKgXUrGE1JCxcoAiWI4t7Obrl2BiGj3AEhIrdVFso9QwkZyX26hrGqi4lI3ASIWQKQdIEIAyQRAXi0PAAE86mASiSvliYwg8oonEPECIDYfDLfe3vc3tVRZgAe5P88obXBIEJnsyv3XC4CQB1F+Cd7vRYBIzdICSCnDQ4RIiGEAUYGHAgclnuJTMe0Hez/dzZ5WhwCOCMmVNzx0VvJDHPBXgDbrAI6mAjgiLTMXPapMygDHlmaJK9p7M9Huz7ZRmnfKb5XJKUx+Aki6CRChFUYrYdV0jiHixDUvFCHCBvQKaH+/aB9hB4DJDJACAGQGAELeWNvYDrIq2AECeIRj8hgfCJMYznU/ANLfDwApjWv5GQDp4GL1sTDQzhnvwQBAxCiAlMY9Hw2IRJc0QACPsQY6CPgq97iCiOcTVUGhDA8CxCIBJGeF/++um7B0SEFhoSV8VsoDAENRvYRlcRI8LpS1xX57owXp44VcWGGWmck9sW8zpzM5DHA8ZMSEC5DEAiTvK/YRgOS79qkZDYREiliNJH4oXKMmRNj+URmAmCBlG1agepCN+h+wGmvHjcs2VAtWgJDBHJPGqACbBDYBIlcHAUCKAJChLlRYd1IgaIABJIWCWcsqQPi+dwRESgwggEd/f9vVdArZX5r6BBCCBybaEZIN42VhtSHmmnqtTsLSimxEjwJAvgYgnADIvUIsSBTgMUUuaXsaS3SA43IBTLmAx7gmC5dHGTnp7svKoTTvTckOo6w2cH2vdEhdHcoQoUy7AyUouIII2TrIK2y2il3oB4YIAeQwAHJbEAPkBkwavnr+kJvvMshqluWQ3b5619jt9jINEL6O92iF52IV8lWAAaQIALmxjAMkUUuVZTRAAI+qaPenARP+WuH7yYBs9jFmZJ2WPcQbeOTzhBgvAWXuZfOW1pPSuVcEQE4SJACQWnJZW2w/2GBBelOGR1TIzORxbOdwUjqTJhrpTAwECUCRROVu/yhWa8VZ/wJEBnESRU2IdElbq5VEMRQg6SGkOiliD7NZlKEXAMkFQCYEI0AwsVHMQZKX4+5D37G5ubmtNNWnjrzLOY5kixfnfAIAGVxCAKFAvVWQ9yBvQt6ATIX85GsQH84xAfe5notVSGe02erheL+R7z/kbkz0bfPy8qOFtCZ0r+/giOVdPkLkPTVbiMEA2cdR9XS+4yCvQxYaFTRJkfuASEgJAGS+l3YeUndNxvgtMH6IxtixaDOMf5B5A5Phaqosl5OTEyejAg/S+3cUVVkAxxKVpIohgEdbVl9tV4NHQ8CDjhFyPgNvcYEmgIPSmdxUknmA/jyXEw2QjFTcdAGSTV0Xr215hlZEKhCpEJc6XwsiDJImAIkY+Eg2HCvZQQCQHwwGyBV4cabgAY+XhCavD3VOAGcg8ZAxKuORTMSxnsME00wl1uMRL+McPgI4PPZMIUMy+ozUa8RGnwWASBU/AmQTxrjL4XBEuMiHRSlTJqHtaW+9sXCvW7vxxvq3i4nZAfmUJhqME+JZvIaN7vdd3v4yBkBWASBN/QSQWRijk9PpDHeR5uQGjkEq8HEVUsOfAMEE38PLQNSFGLcuxtXjGnwL+p3QG7WvCyA0sVeMtw4XVxk15y65rxBLGfwKF4P/8gCQflr1QAggl89fNpHsIYBHLyFSvA+7+v7MQDnWaEH6i6WZUG7PuexqAMbXbB+h5I7k+VUdMkCKYylwBREAJEZJY8KSqRjnI2en7gJEGgVTMkVMRs3Rd7nepTbltCLVi1eu5A5HT/TfZcQqxEeAHEbfwRhDT2LFu9HnoDeGdNzra9wApBHarVeZyKejb13vc1fZKmLcxV4A5Cjeww4GA2QeQRDg8Pj8s7OzH+CcY17ZnwCQpn4GyPd6I8wx3vMYz6vsCzheXQ7I1HPMR2WIeAyP6nMXPwB4WACPFnI6EQCkqwpAyP6xgj2sKHK8Dv8Sd7D9RInPyAM8PgI8okoTHgJEQgGRjpy4UVk90OpktFJOl92WiyGikc49Son/kAUA+QcAeSJYAJKXl+/N6iMffd72Fh4CRPpjnDM6ru9LNVuItwChCRV9vfoxAIg8jv5ZOo93GhBwG4OD+/oS2irqm7/wDvSlVaLvGXRtl+kNUmQ7SF+jAELvmqsVhxuI9OL8T94c90XZI8sogPDq44zO8/kvxqrgy/PEOK11rkS2ugVIYWGRKjzYjTccAJnjIUBiAJB8jvKuDFnO7TfzNqWwVItATL/N1QiHKfYR9qxaxvCYx+q8ArVMvBJASO11SPFUA0AKAZBvggggNKnM1vnyWzHJ+fyDAQAheI3VcdyNAEg7IwDC8GjsQ3r3qhgjxU8AqUDXCslAe0NXu3g/RtBqrjQAgvYD9aw6NCDS38PMz/KxVwAgtf0EkC9Yvejxd4pxDEmxz3aRPB33ookmQAgeleLTHhXtGwo88gsKafVRW1HhUEJCLYAAHqERs1O68/6NrK5Skh9S2vadAMegslDHASCJBUjeVOJHeEVFMBklpDW5CCISQA4qbrxK3XRA5BdAJFgA0lWP+yjaHsEEZ1iBLUDkKoy5wcNj5wMgIw3IhbUafVr7eu6AyJt6JlFPAcITdB3cGz/U8bA1w3n8onMVMhbvYrQvAEHbEd6uPFQgMk1vkB6ObwdArjAaIICHRS/QMM4AjGNYfRxKJqlDlfWiuAoR4WEBPAaI8Kj23eIHNeJAaBK9xgVAIgCQN/hX9zhOWlgcsQ14vFx//rKYslQMSEhj8o1gC1nOebsuQETJxCsBhGwfrSQ11p8AyFVBApChOicCQ1Yf0irkHR3H/0hWYxmVzt0LgDyAsU76AyD+rSZoW6TnmfsKEKMrEgIgETxp6l2F9BG9sQwCyB06xziNMaoY+Twx7tM6EjWu0QIIrT4y1eDBACH1VbE6p+bcJT+SMR2TphZAKgAgP7Ka5z02kjsAjtFluaocQFIdIJkp2EJoZdVfgMhcWlmcoISPAEgU7k+f9I3fH7XZCSjbBICcRbtXyjpAAI8YctPUY4wkLyGjrxMQIQDkeXgOaVS3I0AA0gNj/VZSAMG9J2eHe7n40tuCdx15hg3Bs+yCZ1ol2AHiwyrkMQAk0mCAvK8n6hxjfIsxog0GSF09aiwxJuTCIIDHTcqva8BjugQPS0y8tbngjfUMABLhAiAVARCxlgcBxAaAdCzjAIkEQB7nhIlFPZesW3zS7qAgwiFC5DrZiEIoNxYD5CcAhNqMFQBC7ryJQQCQZhg/Xcf4OZjEXvQDQHph7N0ensOvlIK9PACEgwupTG2SjiSL9EuUVFTj8Gxb4h0KCVKAUNnXwzoBMgMAqWYwQJbpSdNOBcswRqTR35AeGwxV7nQHkAddBBPmASD1AJAoNYCw/eN2QX1D6p4NlK4kSACiZCFeg+s/cf3S9e0liORzZcPHos5D5hfqu/nU2Y6UVFGAyNbeyzaElnGAdMX4AZU+w4NrzAJAHg5mgDA4HjOoeNfPeMb98C6FBRlALHqz+/oJIEfK0vcjuPO6BEh/CSBhAIhVUV9xPZAKGgCJBEC+5LG2s+H4JUpnEiQAeZoN6k/S9XdbvPY9TmcSDkAMFbITb5cAcoWkxtoPgFxXxgHSC+OfNAESOAABPBqj3TI/pCVZgvfpimABCEMkTee7sxsAudwogAAelbwI6AsEeUsNINUFgHwg2z8AEMp7VQCAvO8GINEAyG7B2Ex2k94ASGEQAYSqIBJw9+Ie7FE8sLadzhQhUiQBpDr2TRMAkg2AvFXGAXI7uWmWMYAUASDvBiNASiCh4lFIK04bEgwAmabTfdZogLTF9lNlECAfaRnRt9LkBoD8omH/yAdAbtMCiM3ptAAelwsBeOStRZUB+wUZQCjzcHvI27gH2QDI9YILL1UhjFMBCMHlYdEOAoCkl3GADDUBEhgAATwoJ9rOEriHu/SmYjEBEnQAWaMFkLcUQzkg0oABQvmwBikuuwBIjAuAhAEgzwjxDzZ2Yb09yABCKizK5nuzqMZigFB690kyQP5nB1ko2kF+A0SqmgAxAeILQACPGl6kkinJrLwmQMoBQEQ7yAU3XjagT2T7x26hJroaQMj+kaDku7p8/rLtXBOkfzABBNd9DmBoeMruaGD5asFZAOR7ASCXK6VtVQDSHADZLwDkGAAyxASICRAfAfJuIKdGNwFSDgDCEAkFRC5y5ZUM6OluAELxHyeUVQwAEhfEAGkEgNQALDbi7/2ASAQDpDW2OTUA0hAA2aAY2gEQBwDyYRkGSB+Mn2sCpPQAAnhcie3f+3hfDkA2QuZAUiErOUV6gQkQEyB6AEJqrGS2g5wSALKeoKIY0NUAQokWMSG25slxH9lLAJCXgxggjU9RrfbpiV/g73+uX7r+ihO5xfEeDyj3QAUgtQGQeUqdELaDbDK9sEyA+ACQN7Dd6eX9iOPgwUh1u4qtEtcFWelLPfIABsj6UvbC0p3GpCwA5EVBjUXpNgggh9gD63UNgHShyHRMiGOEKnwOAKRTEAPkCgAkhCLO8XchAHIfR6BP41gQqwpAqgAg/xEi2UmNtQcQaVtO4kAo2nUp5FWNeiN+F1zjZACkdzAAhOM95nsxAfyJfjdTJmUd6Ut66c2BFeBxILrTmQRIHAitNieW1vfD8qgWQMgO0kEAyCgGiBKBfpsLgIRjMlzGrrv0KzsvyAHS5NT5AMI76T4AIEMBEHLV3c7Fsf6tApAoAOR5Ob07APJ4OYlEp1Qmnwbacy3DANGtvqJnjDG6eZkDqwr6f60zajlQAXKnF0kMAyESPQljVAyUb0crnXsW20EWUt4TASDXuQBIFCZDO6fz2FAnYamNy9qGBSNAuqStbcoBhO0tXy0ggEwAQBpytDnFvgx0AxCKq9nBaqwZ5SEXFh9jHRU9MgFiCEBuxLYdeutp+FIbhCsUrggCgMzRM3EHUC4sDFFYLdABMpvtIE7a5g4gkG6QLjwxroKsK0cAuZqhMQMyjO/BdMhQNwA5xO1oFfLTzekbyyJALBh/hM6P0C/5sMopQIZj2ykdE/l29O9gZuPNbkKFtrywn3UCQIxO564rG68/82EZCZDhQnLAG4QkiloA6QGZxOqr/6MaIOUQIPMhn3FddYqbecQNQChoczADZB8A0q2sAcSbeiB8nOUUu2ACxGeAPI9t2Tom8kXo37CsAcQP9UC8WX0ETD0Q8tzCWI301EEvSYBYAJCmAkDGewCQ6yGb+P9XkxG9HAJkBWce/hNSj3JlaQGE1Fa3rNi05mBObgMKLKT07gDISwEAkBNUcc7fFQn5WB/4WtJWHQb2aIwbYwJEdSKfhv7VyiJAfC1nK8BjDMY658X7uhAAqREIFQkFW4jhdZUwdlUSrwHCNdFDAZHfGBxbPADIjdTmsnlLjxQAQFQLpFzZQM4DZDndjxuXbVjN7rzPugHIOgCkEv4+SH8DIAkBAJAiTA6fl0BNdMPqoktp3btgzL2QzzE5mwC5dCJfgf7NyypA+B48B4hU8AEemV66f/uzJrpuNRaPN8XXuugSPCpS9mWIlZynfAXIJ1Ltcy2AkMvqmwyQ7xggk9kLq1ZBYWFIkHthKQWlkrG9AAB510OAbARA0G7hV+fVWCmbAZGQ0gQIv5S/YIJoqg8iec29TKXhQL/3cnNzfQUH1RenAklZfA0nMTkPKwcA0WVEx7tgR/++ZRkgLOvQtwdAEuYZOHJocpysoyaKfN8dsvrKSIAwRL7XUVZWlIUYt66v6iyMM1IKahwkVh7UCxCyg9wjwOMEx4E8owIQirpeSPsBkCcBkBAGiLOcxIEoub+2YPtxAKSnC4BQHMgHDJAZDJDHGCAHAZBbShsg/KKnY5KoI6006p45c3a6zXbpCoW8erxchSjyI/rfqhckAEdd9JvAWWLla1iCCbqe6cZ7yRhTfVFj4b24Rs85+wkgitAYTwIklTXA0QBtXuGoel+CT78R3Xf9BJA7vF0dkQ0FY4/E2BE6oUHp5O+D7FGBF8G2prcAITtIDQEgrgIJC1nvbwdA6gAgVFDqfuoHgIwoB5Hon/E9OoPtOwGQUAAkXAMgFImewBUJi1Mi//jPmY7n7SApOQDI+EAACEsmp7eglcWvSslLvKiLAREjVyGiUIGfsRjnasAk7FJg5MVg3zVo8xJFw7r7RYm273uiyipvgYTo8y+ME+EFPG7zJl28HwEiCgU5roKs5nf2sFGpWHCeHUXvK38AhCEy35eIf77maThOXxwnTOMYlD7lWch8D+6PVSxf6zFABDXWOiUxIqcySVABSDFEAI8dXA+EViBtGCATy0EurDVKidvuS9alcj6sKA2AUC6sTQBInlITHQBphm3bARCygyw1GCCV8fFONjgVyBYApJ2GKqsvqUkMPB4tqddCfvBSR3wCk/TQYAWIL6lMqDY8VpXNPQTHtVykyquUKSUEEH+lvlFdffgJII28cS92IVn8/azib8kbFdk9rlRZ7gAyQViFFGokUyxWYwEgnwtFpWIAETsAsj7os/FOT6So80JsywJAxrgBCGXjPQyAnAFAhjJAqpJai9VYfwAiDQwESDg+3hcM/qDOACDPagCE4kJGBdgEsNidKqscJ1Ms5AmG1Dz9IL0gN0C6Q+6HvK833UeQAcROK1611Yc/AMIQ6e+DKssfQhCq4Q1AyA7SWzSkAyAODYBQNPoQASBRAEg61wOpALkjSOuB9OHrP457cQgAae8GIKyuSv0TALmKAUJ2kEEMkKMAyGCDVyG9jUy5fj4diW2GC4N6BI43JsAg8r7dbg9KgJSHdO6l+N6MECPPSwIgDJGxeqLTS0BStVRZLi8EEAnjUrYKKPIAkStVAJIPgLwmACQcABnJ9pEhkFuDtCLhOCVeBvfidwDEogUQsSIhALKVbCXKuIBIe7aD5AMgUwwGCOWryjD4w1oHiNRyAZG6OOb0AJoITgAgg4MYIA3pmQQ5QM75aB/Q+858DHhUcfX8/AUQhsgHtAIKoOc4XE2V5Q4goUotEIaBU/HEkm0glNcJEIlmgIilbb+jyn1BWhN9B9s/nN0Wr50jFJVSAwglWvyCDeiJ4rgASGMAZCOvQn4wMq0JPlxSKw03+OPaC4D0dePaGzArEZzvRwBIhWAFSAnVQxdVGnmlAJDbIStK6H1ZBXjUcff8/AkQhshz6JcTCCsQSHVvViBUzvZlYQVSoBjSJS+svbx/U52EpRUUNVb4rJQVXBf9PgCkIMhUWE/wNe/GfaCqhA+6AcgV2PY7JZxUDOgCQGIBkPH+sIMwRBr64I2l9iE4AZAJHsSHkJfQo6X4ERwn92J3nljBABCGyDC0OeLnezoIklbSyRQxoXdCnwr4e4Of4bEIx6rlyfPzN0AYImSTOlhK3w+t+kZ7ZUQXANJJsoOcUIlEv50z0NLfG+sCIgWFRRSBrhRXmgmA5AUJQJ4UkkZSqpe9uA8HAZAmbgDSkSLWAZD9AEh3CSBkSJ/iL4AwRK7Fh3zMwA8tUc2dVwMkV6B9QgmvOr7CBO1Rvq1gAYiwEvnbD/f0CN6fgU6nk46RWFrJFDko8Ft/qLM42tzj9CAlARCGSCT6/1dvuhMfJQ7iNqDY/cRTUBjOBaUu2DuoLkhhUVGkAJCukFqQnRdDpDAGEKEgRBsAYgsSgDyuxH1ADtI9ADy2iu1EgPRJ3/jTUVtxYOFYtn/8AoBYXKiw/AUQUmV14VQFvr5cOfgY/s9TgAirkY56f73qlJOcxqRNeciF5QIilJ9sjpdum2oAiMP7Uz+QsvECJEOxb7dB780KHKML4BGm5/mVFEAEkND3Y/WzLSieweFRVgxPAEKG9DhJjfUlABKmUtK2Dv7epUAEEgshY3oRAOIAQEaXcYBUB0Bm8vUdg3zCAHnPBUC+B0Do7+1Rs1OdgMcEldWH4sbrl5xYEkiq4oN+y8tfM058BEsBDq9TgjNIWmGsjyCnDXjh6QNeijHvx4TsVYrrYAOIAJKrKd7Dy/gN8gKajnflSvrxEYjp3AERiqR+3ofYCQJHX1eeVoEEEAEkdbgq4W6DgiXpHRmhZefwFSBkSB8h5cWiVUU1wYW3a/EL6yw2ntcVIEK11Ktz++I64ADJKwBJTBkDhwXguALn/40S9wL5hf6m+A8A5E41gHDbHyGt6P8AyAFRfSXCgwGyGwBpUxLXhA+6Cj7sB3mpquXrTy/nAUgCFdMBOC4z8hwwGYZi3BYYn2JH5kK2uPkgT3FQ4XeQZ8hHH5NwaFn+UVISApDUxr16GPdsHj9PtUmH9OzkrTcZ70XfvLz82LJ0jYAJra7Jc2m9CxfYrZAZkEdwPy4HOELK+rMFTK6kbwHyNTsZuAoYJJsGeevNglAKoD7uIs2NAAgVlKolwMPOCRQflQEieGCJEKE073fz/4srHQIiOxssSB9UFh7QrsysWMDjTTacK8kjyWmgL4ERAPkbAKnhAiAHIR8wQNYq6quf/zlTRYLHX/6oCWKKKaaYUmL1QFzYQbbzZHeK//1bDSASRHZzG/q1vpSNznO4nKsTIEkDSDoHKDiiAI5hgnPAAQjVfM+FUALFq1h9tV7uKwGEgHsE8Mi+fun6VwV4TBbqou8z4WGKKaYEK0DIDvKppMYq0gKIApHwWSn1BIhsgxzmvFpkdFcm0DyA5KNGC9KjAuGG7DmXHQpwdMR5pfP5/QN5CqIYz2l7GORVBsjrbgBSxKuP3/ou32g5nJMbK8Oj97IN15kvoymmmBKsACF33kFC0OBOMTodAFE17GXl58sQ2SWodSIglHTxZzayHwNEXixleFQDPL7ma6SVxsdswxlA5xgdl/p398Xrqp+yO2qh3ToA5AwAcr0KQChL7+sCPDIBj9HnvbFMeJhiiinlCCAMkYjo815XBayOuuCVBYAs0fRUuBQiB9gOsrbhgvQISpcSMjP5TjKwF4MkPm1744XLbyrJm/DnuZxoy4ykkVzPvCgmzrqp6+K1Lc848iy8GiF4HOuUtqb4vP44e64FtlME+l4AJEIFIE0AkL+Fe7SFQTRJgMd+Ex6m6Pfcyu1PRbgo0SHVUPEmLbspppQGQMIwiW7kCfAvCg4UqxVC5gIk9dxAZK8MkQaACLU57ciLAkjGcVyJEyBJabJweVN/Xvy+rJxwgKOfYucAOP7qkLp6ENtASJXVkwzluO6TCjzYI+s2Ul91SVu7RAJHKMDRg20+yr05DXkH8roIDzmY0BRTPATIl4r7K6VoAUAqmvcluOTcuaw78XwP8TOeYGTZ2tIESCgm0ncVtRWkB9szRHsIeSi9VidhaUU1iITNKs6PtVdQYxFE1jdakN6MIWKxzEymNou4TW5sfNo4gCTKYHBYLN8kNWWjOB3nXEy89RXAI5RVWQBL4sDzKw/rcQUeDBDyypoEgBQCIKNo2/Yz5yhQsAnaz1axEZHtZ6IJD7cuvZH0i5p/Wb+PifIO876YACmnANklxY9cC4iESG1ewz6K55qA/f20CkgFEkBCAJDOwopjlDAxivmw6P+7AZEh8hiZeZdA5ADbPw4CIheC007ZHWEACf3630z7AZHDzRJXPGTEBe/PtsUCHu/zeeYBHPHtUzMqC3YQAsT/KfDoaF3TW4oJuRz7twEgJ3ouWdf+pN0RCXhMkNycDyhxL5BEwRZywISHJkAqUgZUJXAME+U0876YACmnADkmAWQgABEutTkl7J+C/VEBDRASR0FBJCBCNT4Ka89bMrewqCgUE+kOniD3sJpGWZUUAAxL6iUs66oBkT+53T5y66VIdUBkvNi2eHKeuehR9oQCSKxbAJL2XoIjEuAYpsRzABw/tUvJ6CbYQUg11Rz7ljM8dgAenVQCCjsL5z2APcvE1cYrwirtsAiP65eu72F+ICZATICY4gYgk5VU7njG+wGHqiptyiRAwgGQxTQhAiDHKH0JANKFPZYKOHV7FcgXoloLcJgKkNQVIRI6K7mBAJHfFDtEpfi0bY0XLu8ggSTG8u2ij9k+kgeQTG+etMKjRHkHsm2hAAclhPydxq8Ybz0AcDwhGdErWmYkvc5wyY+Js84GPCoJ4AgBOMiG8ySPU8gAEe0cz3CRKeWack14mAAxAWKKlxDpjufbB2CI0NhfJgFCdhAljbkDEGkNiIREzk59WvnVXTdh6RMFhUUUSNiK03gUsZrqRP35y15WxjrryJMh8j3HhtjZiJ7aZOHya5X2x3PtoYDIVdiXzO2zAZJRLZNWquaxOZSTS3aORgy14rrugMe7bZNXhQq2kCiAgwpebVcxokcDGndh+4fsRSXWPnEq6ioAdAbgEAXIXWaZnrhWtoFExaUeNOFhAsQEiCkGA6ZMAoTsIC2UlUWteUvGcA6sSEBknjKp1klYOpgN5+Hhs1Luw7ajyuQLkOwASG6WIPIX76cJ+CrBboDVRloaQDJQAEkEQEKpUX5ltdZ+QOQOCR7RgMdrip0D4FgIcLRjVVYI9tFq4jXOqKukWCF4kUcWZc3NYJBdcFXm6PlcoQLhui5pa1uwEb0S4PGuGjx6LlnXMxBfwPz8/JgzZzKp/jK5hI6HvA15HS/jzTk5tlCdkz/ltGqO/k9B3uHxKNfOaMrAa7PlhgUjQOx2e3Oc6wuc2E655lewrZfD4Yj2ERQhGKcnjYt/hzscebUCASB4N+rguM8I10wyCe/SfZRfTf94OdUwHnkdvcFG4eLxsG2w0+ms5ENurMZURQ8yTjjPf9Ov+oKCgqgAAgHlghtDjiM6i02VPYAwRCge5AhNzgDI0gsJ25zOCoCIEmB4FBBpKXhgRQIkk7lGehGnMbE2XJDewllYaAmZmdxQgEgGBxleySCxCdlvpwIYg1stWlnH7iyIBEhGc2qVQu5H+akeUtRLAMeWdikZt1FUPKDRXYCDspKgJI8reFVzXAIAHTcV8rUQAEnxIPsAjoeVawtkeOCD7qvUQ8dL9rPNZqvpdBZEu8vGi7YbMVF0cD/pF6eI78lJ6lxl+/wFY94BkMjQaIXtnpbb/Z4rozFg7JPcvqsOx/1om8d9dmLCv8k1EByUBj1RuA9z5Ky7VFsd22/kpHSuzvcE2r2Bc6iuDQk7TZ4pyjUBEiN5ey/8/aNwHmuw70pvAIJ73paevfR8/6U3fgTvQ0v0s3qQ/XUB3om2cgZfFXDUxHhT3RQbo7IBkwCS6jrAQQWYNro5z2yu/XHRvcvKym6GffuFIlatPDkm+v0p3NsH0S/i4v1ZB4X9U7EtCn2uwt+blfOk4wEClzEcnlXuC7ZvxfbKvP0fD7+VZCnl+316vLTQfqx0/5oaCRCyg1A+qyIAJFcAiJzO/Wi9hGUtJDdeyo+1QIxi52jvKMiFlUil+LQMrDrC8woKLJaZiy7jlPArJDXSSQ5onCaqyhS3XHbRncOqMZvkakxBjUfklCycPfhtSC9II+mYWbju8Z3S1ly4F3+cVYcHVieHAmHlIQKEs9jeC9nmYerpnzFpNHUBDypZ+6qOlNEOWmFgQov1EiDTIC8J55eOifZyF/CwcMDdhZTgmPxfcQOQnmi3/X/tHa9J8IjG9rdcZHtVu4+bcC5XuwMIC1VvfFIuGestQHCv2xgEj/bot01P2n+8G0/g/YvQGK8TxtNTy2M7ubJSMSsX4KCJbxLBQcezWYPJvm5JAgQyBfKEnHXaYICQNuG48He6nhUK2u8R+q4rcpGx1xuAkB1ksAIAQORWESJiJt5K5yHSWfLCCgVIrsP+n4RJ9yhn962jrB4YImGC6ioEK47a2DcU8h5n+T2skZ9LDHA8yGMeUtlHhvtPYuKtA9unZtSW0pkUKCo5XG8cwHHRZFUMjxmJE9Xg0X3Jul4BoqYSAZLH9ayV/9MveqpZPpTTQS/gdM8XXmh86J/pgAd9JK9D6Nd5D8jN/CIfkCsFKisRTGQN8feHDLd1SvCUUgEPsgyyGm1+xWT+IK7nFmHy3IMJuJ8LgLRAmxXSsWcDAhEuADJSGH8f/u4nrTwmq3ysP7DqajCEUqYnqEwOW3A+XTwACNU0F104D/Pq7b8ASGM9AGF4/CTV5RjjBTwoFfxGYRwbZCrGuhur2Rgch+7zAK7v8o9wrN14Xu1UxrsJ7cX6HZR6fBHkbgitZrvzO7lOrGOCPgcBkNYuAPKNSt0TSu3+H/7hNIKr+u0X051j3GRM+LVLECD7pdUBPeN16PcLJvlqbgBCK4s1kJXSGPTdrKVvha95OKumxR8ibeRYEg143Cbdx3uKvC1pqwEQCybU2sokDIC8p5GJV4FILiByrzzOWUdeZOis5FGsmlJWFeSN1V+JE4mNT1spQkRNjthyFbD0htwEeRjyX1ZV2aRVCx0rDsAY3i4lo5nkiUXVBh9ke0ixnSM6zvp9R+uaS7IFAx5V0faTQIaHCkCUF/xXfMQ3qrQlddTt4i84tF1ts9miVdRWD1xa3jY3RsO2EUtqCKFtDtq+6I0NBJNoR+zbLKix3nQBkGGK+kq8doCgqwY8wrH/S6FtErbVEQDyOLaJH+0R/D0UbdRsI5dj3wLp2F/jnCq5AYgiBM0bcb2R3hjRjYIHT/jDMdYJYZy7tdRTaFsVbWdwu5fQLlzaX4+egXBemXSdWisLQKGf+AOEyiKjbV2Vdk+I8GL14SC0jbi0bbaFoaJM0BMx4VcqQYAosoXAq+ZlpQUQPTYQ7K8lrcbe1/LokgCSIAD2ELaFGWoDUewgSvwHAPK9WiZeMZ071UMHRCaqQCQEECEV1RTBQJ3HxvTd7iByzFa8KqH+D7Bq7Jg0qZMh3Fox3vpy2+RV12rkwQq1zEii7MA7hH5/Ax6PAx5hKnEgLbkeugyPw4EEDzWA4EVbho+4kov24Wj/itB+JwDSQwJIe7TZJk60WvCQ4DBF6LMcfWp4ARCa5P8jTihqVQhZfaVAi6oenuT2+ZjcR2oApIMy6crqK/yfarqnC8c9im13ujaC2yPQLl5c0eG8HnMHEPz9Dq4zwlsvLNxXUrPNNwIePOm/h/GcPM7XeEdquWmPd+hsP7Srp7JvplCFsBge+flOdzaNQbwaVa65jwgc7A+VbHCnqDytK3UXg2QKzrO/WI2wpABCdhBXBnMjAMJt4oTVxAm0iXEDj3rSqmVikZuCU94ChMrZfq5M0pfNW1pHI517HSUwjw3nqfXnL7vkQk878kJDZiZfKUZtswqJAggLAJHVzRJXXCF4Yd2B7V+Jxm2hD6mlJgIafdokr4p2k86EqgzOF+0cMXHWdzqkrr4kcAfwoPQmdwoeZRfBQy0rbyABhF5EfMDNPehzq9DnLwDkFmn18Zjw4u7B/r4eemqR3vt37ncKE91wb7ywMLk+IEygv1FZWA311XI+z8W8nD/Dfb4kdZQKQB5RVhiXqq8cI8TVB/4/Vm3loQKRbqS+Eo3yOLdaWgAhOBIkvXXjZXjMMwoePOl/phzPE4C4McJvF20xgEcFDw3jc8WVHOBQU9g3WlQZUuVF7A/35hxLyIi+ARN5dTceVkYB5GbJXnefqxUFq6BF9ZXbZ+0tQEIwaXYVsvF+5yIbL3lg/UeIBzkKiPRRawuQhFvOZ+bdK9kqlH/3S5N3IU/on8bGW+9tmbSyjoe5sCpxOpMLdo6YeGsCwKFaThbwIJXVh3y8MgEPNS8scsH0oE9X9DmuAZDLsO/bi1VXNk9dfWn1MEH5ODHZTfcSIFRHfBe3c2ISfsmN+orGbEKGRMXDDAC5SoKHhW0xFwz0tOoQ9n0i7PuJViueueLa5b57cG693HlheQMQNXiQF5iv2XrxzozFODYBSG+SC7gX4zxNPxx4DEwLzo46PKv6sY3tElsI9okrw/XY19Dbay0pLyx37rpGAYTbbRdUUunYFukCICeE9yfOnfrKa4AUf1jOYjXWEmEip1Qmz9RJWNpWBSJhgMjt7OmkRKfPbrAgXbXG9qniFCbJL7A6q0hFLRUPYDzRPGlFSy/SmQwRclVRSpO97VMzVH9F78rMiqDMu0L9E6dQkZHgcSRQ4eEngFxQX523Zdhe0xkv8ogw+aWLaiwdACH3zzjh45lFE7Wa+opWDfj7GYx9wQDOaqynJYC0JnuPMOZkZYXB6qtlwr4vyV7ieTyHfSj6KCq0PJzPECMBgucViXHC/QEPFx5YRyHT8S4MxvGrejjON8JKZhEAUl8HQBqg3x/KfQIkruPtVbHtN/G5YV8VEyAXtXtZUku1VjOK0+pEMszf7Mp4bgRAaBXSUsUdVil7mwBI3FcvYVl9yY1XdI09jjajGy1Ij1aBSIhl5iJq/5HoagtwzPA0jYmUzoTqemwUxjoLeDwJeFwyGXBVwk5CVcIiDiT8XoRHl7S1NwRyoJsfANID+5RfKeQnTt40y9n7w52sZF21oiZaA4A09CaQEJPwE8Ik+hMm0PbCPvLsSla8n0iNxNsfFFRU00Q1FtkzyK7B+45h30BhX0fFFZZtI2/qCwgsVmNtFewgLxgFEDYG14TMluDxNq34DAwevMiQriLk9vkB3o0uLlx3lwnnTQbvTZAMD98dAnimcH23FBQUUixJQ/y9Q9j+qLfqqyAGSIQUa0NOTxEqAEkXVirbsc2j++h7NK6zIDxyduq/Vdxki6TVCamMbuVcWbdzEJ/S5hhA8pIaSE7aHaEACUWnrxHaHwZIngNIotyAg+wcDThWROmbC3B83i4lo6oGODoKad6VVQdVTfykLMGjBADik2DsHQDIjV4CpKegxjqJifh+YR+5+h7mffFkzObtFHOyStFDK2osVlG9c7H6yt6kjADkCyW4Urq38bifdQ2OQL+WHQmcbp7tz3hHOsueWhJAfH13XgJAQgCQ+gpAeGXS2ZdrDEaAcNupwurCjm3REjzqSquUEe6M54YBRIgBoZxYzahWOCRNcIctUjF0/8BR4R9KrraOSvFpHzdeuLyFPD4bzwcKKqQCQGRby6SV/TVyYVE6k1c50WGx2qxivHV52+RVbVU8sWIsM5KGcyChfJ4DxGJQ0XGpR8sCPIIcILIaaxqBgIz8+P+bgpvva5Jq6xI1FkefL7hYfXXR6iSQAeLq/hq6ChFAQPfrOV5Ragbt4T0ZKbrx+hsgah5aJkAutO0sGdOfFu0b7GSi/DCg49b0WyoTj1+0fCfZPSi1yLuS6kgt2G+zkmZdmLx3xcanTWgiwQQgiQZIJgr2EcrOmwSQtGdwkJ2DQLNVGQ/g2A9wDJUM6QSYR7A/RbDNFB87Js76c9fFa/ucceSFYUXytggPsbhUeQaINzYQI3NhYSJ+TlBjrcHE3hDnV0NQX+3CBN1b6nORGougguPeiP//qaa+kgHCbSZ54oElQKK3sFoigDzlB4BQAOgjkFnSRDuc7D9+zItFtpdOHEi5RwLIUbxL16oBRK8NxIVtRFZhPQ+ARJkAUR1zg5CehEIvogSAiMGrUz0xnvsdIJIRPQQwqYlJmCLYpyqp2zUkT0UFRuquuZwAkby/YiFUUyROMq4v5wj1IiFJ4r84VQrZa4Zx0sTNKsfNATi+7pC6ugcb0GPLMjz8BJArsW/D/zypbHNLESA3o51S9vMgJvWbcH6dBfXVHEV9JfTpphiESY2Fv+vguE8JIFoHgLSRAFKZxhI9z2jVogMgrwnjn8Ux7zIYILn4exjFU2C81mIOLYoax/Ma5E+ICICgFd6zoicPjv2CsgrB/o+EeJIjON+rDQAITX4/Cc9mMQBSzwSI6piDlRojgjGd7t8wSX3Vwq/JFI0QSmcSOiu5OdfXmKtSGtcToVXDSk61nqUCHUqOuMENrEh99k1MvPXO9qkZUYItpGJZh4efAFIV+0S31M3Yf1UpAaS+mPiQ07I8Jqivxqj0oV/MnwkpOV7l3ETK9XyqESMiQiATfw/xEB4Umb1QDKDEOTQ3ECDF8BABYbPlkkPA38Ikvh37ry3BbL1zhZUGufxG83aK3zkmTrTuggg9hMgMRf2CMQsBkLZGAITHuxcwCAsSgESJAZlkP2OALBVWJhvVDOwBBxCV+I+wkJnJXXi1kO4GHPlewEZcyaQBGKPbpWSo+vIzPMb/Dx7WY2URHv4ACEPkVim6faqcZdfFpB+Fc4g1CCD0i/dfwgexmT12FPVVH41+jwsTMLmA/irYRR7XiFLvR8GFouuwnKVXAyDPSgGIk+i8/REHIkWj/5/ya19RGaFdIx8m6bqUksZDgEwRVhoiQOh5bRCzHAAgV+qY3CujfbjKuV0nTfrfASK1PUiH3hbtYtwAZDRgEO3mvOqIOdwCFSCCMV15L8gm0l7Kyj3YU+N5QAFExYU3wjIz+Rb23NrlAgiZnD8rlb2kqJb5eJa3GEg9KsZbu7dNXuU2+EkNHnJNdBMgFwcTkncH2v3bHUQwAZF9YjYlOKSgN1cA4WN/i8nS4qkaS9L9z1FLcSKosX5T6bNeVl9JgYafSu3/i+2RLuDRn8qRukqo6EeA0PlOl2wS09C2mhcritvpHqP/N3g3artpW1GEhAgQ3j9SCSbk8/8RUGjuAcAoueJhLSM59idKz+Z9V/Eg2dnFoKBswGvQthUmfBEIv4uBn9jXwAU8WqHdTunYgQyQ2pIx/W/BdfeI3tVHwAJEcuOleBBKlng/px056sIYTwF/nwEYg1ov8iwqXfDECip4+AsgDJHOaLNPeBELOCdWMxUwxFDSQeWXPo/7oUptEBkgv2NC707uoGQMJs8bMWBQQ42lqb6S1FifqwBkqpr6SkiQSAkK5Rogq7CtH0ASJbRryOPnSOM/K64+/AkQhkgFsgn4ktaEEmmKAZaU2JC9qyqpwKM+OzAU8rGK0K6Xip1kjnQP/0bbIZjwI1XAcA07BmQrWQTQ7gqVdtWw/3e1GjTiuABHNVZznhTVsJjw6wtQSJBcldPJUQBtQoU2jdlzKVvlPSopgCQJ50lxNeSRFguhGjgvo0+kxthLNGqlvKvHeF5mAKLizkulbalc7rMclGhzoeraCJhMbJO86ib38EgadyE6Pc56vKzDw88AoZxYA1SKARVwSnLS+9NE8YuSg0pMX41xH1EBiIWr+4lt7cKH9DMm3A4qaqwJ0gesqb4SVhRPioZoVl896UEFwltEVZbkBbWRr13N7fR9nGuEpwWljCppq1IPxIa/B3pqVKc0NZziJV+6plP8g2AKyyp5MsX7MRnvUmUV0JBTwkqV+0ST+o8cFJnCaqECOaOxmAdLgkh3Uf2kknL/d7nOimDnCBfgcBNH2hepRN9nqLzz8VKQY0kB5L/S9WQK+d5Ook9FjbGHaBSTq+3NHFPmawMfseWGAyjdOK7kZxc2ECUz74vtUjKaCvCoBHhMEeHRIXV1n2Com+wvgAggaYa26Tr89zdiUuvqQs11jVwASeibjQn3aZUVxWAxbTv+vwTt6rkByHWiGotiUgAHj2J7KABRSNToTk6j7QPyyqOkAMIQeUCJslcSRaJPN53eVQ8ouag8EXK0wHtU082Y48SJ143YqYwu4BHtRtVVkzMjFHowJjkg3CPCQ4DIm0LtHC2hVfdkTNTUfk8pAKSGXGtH+l7u0KpCiL47pHv0naeR50EHEJUAQooDoZoi0zkCXst+clQKHAwqeDAMOuFjpl9em/BCfYEPt7oHfchVl5bHP5FBEgDp7KasLamE+nFRoGMqLzP9Wp1FtdY9MbZjEm0jpk5nOUuVADHhVlIBSBO2T/zIqUuecQ8BB6nVxrDhnVQd4wAGj2uLc3Gp63iVlalRwnekmHlXAyBUK+Vdit6mTLW49gEeAORlronyK/4dBRhUcL+SyH2ei0Kt4WONR79aOm0hpM56ykUAIUFqJt6dq92VsxXK2jbnFeRWldVBPr+3b2itOlyApA/6fiUWSBPkd3r2mOSruDGO90XbFSrnRe/z5xijsWI7Qds5XACL7m1fGUoAyHwu+ETX8yIm9mg3ABnA6VvWky3LxWqiOReQKpDq1LyvpcLifpOkVeUtnuS9KhcAkVKZhAAmdbhOeqJK3fMiIaniiWCCR2kJJqawM2fOtsSk1c6XcSit+enTZ5tiwmwc6NdMVQ5xri0BjLrl6VkDKh3wrJsBGKHGjJdTG+NR6dpYI88TQMGq+mx9p7PAq/MEIKgQVgeAISpQnwWgcDXOsYYnVQclO8gOVxl6yzVAVLLxhgEobQEMsncs4cJVqwCP/7RPzahqAsAUU0wJZgE8WkmeWE96YzwvlwAxxRRTTCnnAPlcigW5zJfxzJtqiimmmFI+4NFCWn187q3x3ASIKaaYYkpwA6M1IPE15C2uHXNS9EKDtPTWeG4CxBRTTDEluAHyuJRAsUgrpbsJEFNMMcUUU0SATFRxQ6b4kqeMgIcJEFNMMcWU4AXIlYDFWMh4VmONgNQw8hjmjTbFFFNMMcUr+X8sPxwJG1lalwAAAABJRU5ErkJggg==" alt="logo-teal.png" />
</div>
<div id="wrapper">
'''
def main():
'''Main Program Loop'''
# First we need to get the login information.
print 'Security Center Remediation Report Generator\n----------------------------------------------'
sc_address = raw_input('Address : ')
sc_user = raw_input('Username : ')
sc_pass = getpass('Password : ')
try:
sc = securitycenter.SecurityCenter4(sc_address)
sc.login(sc_user, sc_pass)
except:
print 'Invalid Host or Account Information.'
exit()
# Next Determine how we want to limit the data.
data = sc.assets()
print 'Repositories\n------------'
for repo in data['repositories']:
print repo['id'], repo['name']
repos = raw_input('Repository ID : ').strip()
print '\nAssets\n------'
for asset in data['assets']:
print asset['id'], asset['name']
asset = raw_input('Asset List ID : ').strip()
try:
reponame = [r['name'] for r in data['repositories'] if r['id'] == repos][0]
if asset != '':
assetname = [a['name'] for a in data['assets'] if a['id'] == asset][0]
except:
print 'Either the Repository ID or the Asset ID are invalid.'
exit()
report_name = raw_input('Report Output Filename : ')
# Now to get the data
filterset = {
'pluginID': '66334',
'repositoryIDs': repos,
}
if asset != '':
filterset['assetID'] = asset
print '\n* Querying Security Center...'
try:
details = sc.query('vulndetails', **filterset)
except securitycenter.APIError, msg:
print str(msg).replace('\\n','\n')
exit()
report = open('%s.md' % report_name, 'w')
skipper = []
# Chapter 1: Discovered Hosts
print '* Building Chaper 1'
report.write('# Discovered Systems\n\n')
report.write(' * __Repository :__ %s\n' % reponame)
if asset != '':
report.write(' * __Asset List :__ %s\n' % assetname)
report.write('\n')
report.write('|IP|NetBIOS|Operating System|Risk|Patches|Low|Medium|High|Critical|\n')
report.write('|:-|:------|:---------------|:---|:------|:--|:-----|:---|:-------|\n')
for item in details:
info = sc.ip_info(item['ip'], [repos])['records'][0]
rems = sc.query('sumremediation', repositoryIDs=repos, ip=item['ip'])
if len(rems) > 0:
print 'Building Summary for %s...' % item['ip']
report.write('|[%s](#%s)|%s|%s|%s|%s|%s|%s|%s|%s|\n' % (item['ip'], item['ip'],
info['netbiosName'],
info['os'].replace('<br/>', ' '),
info['score'],
len(rems),
info['severityLow'],
info['severityMedium'],
info['severityHigh'],
info['severityCritical']))
else:
skipper.append(item['ip'])
# Chapter 2: Detail
print '* Building Chapter 2'
pout = re.compile('<plugin_output>(.*)</plugin_output>')
ms = re.compile(r'- {0,1}(KB[^\<\:\n]*)')
fix1 = re.compile(r'- (.* \(\d{5,6}\):)((?:\n(?! -).*){1,})')
fix2 = re.compile(r'\[ (.*) \]\n\n\+ Action to take: (.*)\n\n\+ Impact: (.*)')
kbids = re.compile(r'KB(\d{6,8})')
msids = re.compile(r'(MS\d{2}\-\d{2,3})')
msvulns = re.compile(r'\((\d{1,3}) vulnerabilit')
report.write('\n# Remediation Plan\n')
for item in details:
if item['ip'] in skipper:
continue
# We need to parse out all of the data using regex
item['pOut'] = pout.findall(item['pluginText'])[0].replace('<br/>','\n')
item['MSBulletins'] = ms.findall(item['pOut'])
item['patch'] = fix1.findall(item['pOut'])
[item['patch'].append(a) for a in fix2.findall(item['pOut'])]
# And now to write the item in the chapter...
report.write('\n\n## <a id="%s"></a>Remediation Plan for %s\n\n' % (item['ip'], item['ip']))
if len(item['MSBulletins']) > 0:
report.write('\n__To patch the remote system, you need to install the following Microsoft patches:__\n\n')
for kba in item['MSBulletins']:
entry = ' * '
kbid = kbids.findall(kba)
msid = msids.findall(kba)
vnum = msvulns.findall(kba)
if len(kbid) > 0:
entry += '[KB%s](http://support.microsoft.com/kb/%s) ' % (kbid[0], kbid[0])
if len(msid) > 0:
entry += '([%s](http://technet.microsoft.com/en-us/security/bulletin/%s))' % (msid[0], msid[0])
if len(vnum) > 0:
vnum = int(vnum[0])
if vnum == 1:
entry += ' (1 vulnerability)'
else:
entry += ' (%s vulnerabilities)' % vnum
if entry == ' * ':
entry += kba
report.write('%s\n' % entry)
if len(item['patch']) > 0:
report.write('\n__You need to take the following %d actions:__\n\n' % len(item['patch']))
for patch in item['patch']:
if len(patch) == 2:
report.write(' * %s\n' % patch[0])
report.write(' * %s\n' % patch[1].replace('\n', ' '))
if len(patch) == 3:
report.write(' * %s\n' % patch[0])
report.write(' * __Action to take :__ %s\n' % patch[1])
report.write(' * __Impact :__ %s\n' % patch[2])
report.close()
# Now to Generate the PDF...
print '* Saving HTML File...'
with file('%s.html' % report_name, 'w') as html:
html.write(html_head)
html.write(markdown.markdown(file('%s.md' % report_name).read(), ['tables']))
html.write('</div></body></html>')
if __name__ == '__main__':
main()
|
from numpy import *
n = int(input("insira o numero:"))
i = 0
v = zeros(n,dtype =int)
if(n==2):
v = arange(n)
elif(n > 2):
while(i < size(n)):
v[n] = v[i] - i
i = i + 1
print(v[i]) |
from django.conf.urls import url
from . import views
app_name = 'communications'
urlpatterns = [
url(r'^(?P<uuid>[\w-]+)/$', views.comm_detail, name="comm_detail"),
url(r'^(?P<uuid>[\w-]+)/edit/$', views.comm_cru, name="comm_update"),
url(r'^(?P<uuid>[\w-]+)/delete/$', views.CommDelete.as_view(), name="comm_delete"),
url(r'^new/$', views.comm_cru, name="comm_new"),
] |
#
# File: run_RSA.py
# Author: Alexander Craig
# Project: An Analysis of the Security of RSA & Elliptic Curve Cryptography
# Supervisor: Maximilien Gadouleau
# Version: 2.2
# Date: 06/04/19
#
# Functionality: utilises other programs to generate and subsequently break RSA
# keys using a variety of algorithms, while collecting diagnostics
# to compare and check the results of each of these algorithms
#
# Instructions: used to run all other files to be run from the command line:
#
# CLI: python3 run_RSA.py -h (to see possible flags)
#
############ IMPORTS #########
# needed for pydocs to correctly find everything
import sys
sys.path.append('Programming/')
import argparse
from RSA import *
############ FUNCTIONS #########
def runSolver(keys, solver, name, verbose):
""" runs a check on the solver, given the correct keys """
if verbose:
print("="*10, name, "="*10)
solver.solve() # factor n
if verbose:
if solver.d == keys.d: # check for correctness
print("Success!")
else:
print("Fail!")
return {"res": (solver.d == keys.d), # return result as dict
"time": solver.time,
"count": solver.count}
############ MASTER PROGRAM #########
def run(k = 10, brute = True, ferm = True, pRho = True, knj = True, pMinus = True, quad = True, verbose = True):
""" creates a k-bit RSA key, cracks it with several algorithms, and generates
statistics to compare their performance """
############ KEY GENERATION #########
if verbose:
print("\n" + "="*10, "GENERATING", "="*10)
keys = generate_RSA.KeyGen(k, verbose) # create new instance
sanity = keys.generateKeys() # get key and primes
if not sanity:
if verbose:
print ("Please fix input and try again")
return False
############ BRUTE FORCE ATTACK #########
bf_res = {}
if brute:
bf = brute_force.BFSolver(keys.n, keys.e, verbose) # create new instance with public key info
bf_res = runSolver(keys, bf, "BRUTE FORCE", verbose) # check solver
############ FERMAT'S FACTORISATION METHOD #########
fer_res = {}
if ferm:
fer = fermats.FFSolver(keys.n, keys.e, verbose) # create new instance with public key info
fer_res = runSolver(keys, fer, "FERMAT'S METHOD", verbose) # check solver
############ POLLARD'S RHO ATTACK #########
rho_res = {}
if pRho:
rho = pollard_rho.RhoSolver(keys.n, keys.e, verbose) # create new instance with public key info
rho_res = runSolver(keys, rho, "POLLARD'S RHO", verbose) # check solver
############ KNJ FACTORISATION #########
knj_res = {}
if knj:
knjSol = knj_factorisation.KNJSolver(keys.n, keys.e, verbose) # create new instance with public key info
knj_res = runSolver(keys, knjSol, "KNJ FACTORISATION", verbose) # check solver
############ POLLARD'S P - 1 ATTACK #########
minus_res = {}
if pMinus:
polMin = pollard_p_minus_1.PSolver(keys.n, keys.e, verbose) # create new instance with public key info
minus_res = runSolver(keys, polMin, "POLLARD'S P-1", verbose) # check solver
############ QUADRATIC SIEVE METHOD #########
quad_sieve = {}
if quad:
quadS = quadratic_sieve.QSolver(keys.n, keys.e, verbose) # create new instance with public key info
quad_sieve = runSolver(keys, quadS, "QUADRATIC SIEVE", verbose) # check solver
return bf_res, fer_res, rho_res, knj_res, minus_res, quad_sieve
def test(k = 50):
""" tries to find failure point """
res = {}
res['res'] = True
# loop till fail
while res['res']:
res = run(k, False, False, False, False, False, True, verbose = True)[-1]
############ COMMAND LINE INTERFACE #########
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--verbose", help="turns output off", action="store_true")
parser.add_argument("-k", "--bitsize", help="bitlength of public key", action="store", type=int, default=10)
parser.add_argument("-bf", "--bruteforce", help="turns bruteforce decryption on", action="store_true")
parser.add_argument("-ff", "--fermats", help="turns fermats decryption on", action="store_true")
parser.add_argument("-pr", "--pollard_rho", help="turns pollard_rho decryption on", action="store_true")
parser.add_argument("-knj", "--KNJ_factorisation", help="turns KNJ_factorisation decryption on", action="store_true")
parser.add_argument("-pp", "--pollard_p_minus_1", help="turns pollard_p_minus_1 decryption on", action="store_true")
parser.add_argument("-qs", "--quadratic_sieve", help="turns quadratic_sieve decryption on", action="store_true")
parser.add_argument("-a", "--all", help="turns all on", action="store_true")
parser.add_argument("-t", "--test", help="runs failure test", action="store_true")
args = parser.parse_args()
if args.test:
test(args.bitsize)
elif len(sys.argv) == 1:
# default run
run()
elif args.all:
run(args.bitsize, True, True, True, True, True, True, not args.verbose)
else:
run(args.bitsize, args.bruteforce, args.fermats, args.pollard_rho, args.KNJ_factorisation, args.pollard_p_minus_1, args.quadratic_sieve, not args.verbose)
|
a = ["Bayam", "Kangkung", "Wortel", "Selada"]
print("""
MENU :
A. Tambah data Sayur
B. Hapus data Sayur
C. Tampilkan data Sayur""")
masukan = input("Pilihan Anda:")
while masukan != 'C' :
if masukan == 'A' :
input_sayur = input("Masukkan nama sayur yang akan ditambahkan :")
if input_sayur in a :
print("data sudah ada")
else :
a.append(input_sayur)
elif masukan == 'B' :
hapus_sayur = input("Masukkan nama sayur yang akan dihapus :")
if hapus_sayur not in a :
print("data tidak ditemukan")
else :
a.remove(hapus_sayur)
masukan = input("Pilihan Anda:")
else :
if masukan == 'C' :
print(a)
|
"""
script for generating the testvector file for use in ALU_tb.v
Format will be:
{inM[WIDTH], instruction[WIDTH], reset}_{outM[WIDTH], writeM, addressM[WIDTH], pc[WIDTH]}
NOTE that clk and reset should be set internally in the testbench
Algorithm:
- Follow the alu approach of blasting this with random numbers and seeing if both the python
model and the verilog model agree. This begs the question what's testing what? We're also
now running in to an unforseen complication wherein we can only load A-instructions that also
happen to have values that have a c1-c6 that would be a valid ALU function. This is because
of how the ALU logic is hardcoded in the python simulation. The verilog in ALU.v is a high
enough level of abstraction that I'm not sure what it does on an invalid instruction. It would
be interesting to check this model out in that Xilinx tool that allows you to see the block/wiring
diagram and/or a signal-probe simulation, since that might give you more insight into how the high
level verilog is interpreted.
This all makes me consider whether this is moment where formal verification might come into
play -- I've heard that it's used effectively for digital design. In some sense it would also
be a test of whether one's formal verification problem was set up correctly. I imagine that it
might be better because formal verification might make it easier to specify and test all the possible
edge cases. Perhaps I'm being lazy, but trying to do that using my python simulations for a machine as
complex as the CPU is seems extremely daunting.
"""
from simulators.cpu import CPUSimulator
from simulators.alu import ALUSimulator, UnkownALUFunction
from random import randint
OUTPUT_FILE = "tvs/CPU.tv" # expects to be run from directory above this
N = 100000
i = 0
# Flag in case you only want to generate A instructions to help
# narrow down the debugging process
GENERATE_ONLY_A_INSTRUCTIONS = False
cpusim = CPUSimulator()
def gen_random_instruction() -> str:
"""
Function for generating a random instruction since there are some rules instructions should play by.
"""
possible_instruction = cpusim.int_to_bin_str(randint(-32768, 32767), cpusim.WIDTH)
if GENERATE_ONLY_A_INSTRUCTIONS:
return "0" + possible_instruction[1:]
if possible_instruction[0] == "0":
# if this is an A instruction go ahead and return it right away
return possible_instruction
else:
# else this is a C instruction, possible_instruction[1:3] == '1'
# per the specification 4.2.3 The C-Instruction
possible_instruction = possible_instruction[0] + "11" + possible_instruction[3:]
# a[4:10] must be a valid function for the alu
possible_instruction = (
possible_instruction[0:4]
+ ALUSimulator.funcs[randint(0, len(ALUSimulator.funcs) - 1)]
+ possible_instruction[10:]
)
return possible_instruction
with open(OUTPUT_FILE, "w") as f:
while i < N:
inM = randint(-32768, 32767)
instruction = gen_random_instruction()
if i == 0:
reset = True
else:
# TODO: make reset = bool(randint(0, 1)), this
# current setup is just easier for debugging purposes
reset = False
try:
f.write(cpusim.build_line(inM, instruction, reset))
i += 1
except UnkownALUFunction as e:
continue
|
import os
lista = os.listdir('C:\\Users\\Matheus\\Desktop\\psp')
print(lista)
print('Data criação e alteração em segundos')
for x in lista:
listaC = os.path.getctime('C:\\Users\\Matheus\\Desktop\\psp\\' + x)
listaA = os.path.getmtime('C:\\Users\\Matheus\\Desktop\\psp\\' + x)
print(listaC)
print(listaA)
print('---------------------') |
from fractions import Fraction
s = float(input('Decimal Radius? '))
s = Fraction(round(s/(1/64),0)*(1/64))
print(s)
l = input('Press Enter to close this script')
|
def twoStrings(s1, s2):
s1 = set(list(s1))
s2 = set(list(s2))
if s1.intersection(s2):
return "YES"
return "NO"
if __name__ == '__main__':
print(twoStrings("hello", "world"))
print(twoStrings("hi", "world"))
|
''' all routing for accounts app '''
from django.urls import path
from . import views
app_name = 'accounts'
urlpatterns = [
path('', views.index, name='index'),
path('<int:account_id>/', views.accountlist, name='accountlist'),
path('<int:account_id>/newpass/', views.pass_change, name='pass_change'),
]
|
scelta=0
n_task=0
task=[]
from sys import argv
fp= argv[1]
txt= open(fp)
for strng in txt.read().splitlines():
task.append(strng)
n_task+=1
while scelta!=4:
print("Task Manager")
print("1. Insert a new task (a string of text)")
print("2. Remove a task (by typing a substring of its content)")
print("3. Show all existing tasks, sorted in alphabetic order")
print("4. Close the program")
scelta=int(input("Make your choice: "))
if(scelta==1):
task.append(input("Insert task's content: "))
n_task+=1
elif(scelta==2):
if(n_task<=0):
print("No tasks")
else:
ctrl= input("Insert task's substring: ")
for strng in task:
if ctrl in strng:
task.remove(strng)
elif(scelta==3):
if (n_task <= 0):
print("No tasks")
else:
print(sorted(task))
elif(scelta==4):
print("The End")
txt.close()
txt= open(fp, "w")
for strng in task:
txt.write(strng+"\n")
txt.close() |
# -*- mode: python -*-
from kivy_deps import sdl2, glew, gstreamer
block_cipher = None
a = Analysis(['game_inspector.py'],
pathex=[''],
binaries=[],
datas=[('no_screenshot.png', '.'), ('fps_inspector_sdk\\python\\fps_inspector_sdk\\lib', 'fps_inspector_sdk\\lib'), ('screen_recorder_sdk\\python\\screen_recorder_sdk\\lib', 'screen_recorder_sdk\\lib'), ('game_overlay_sdk\\python\\game_overlay_sdk\\lib', 'game_overlay_sdk\\lib')],
hiddenimports=['win32timezone'],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
*[Tree(p) for p in (sdl2.dep_bins + glew.dep_bins + gstreamer.dep_bins)],
name='game_inspector',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
runtime_tmpdir=None,
console=True )
|
import pytest
from institutionevolution.individual import Individual as Ind
from institutionevolution.deme import Deme as Dem
from institutionevolution.population import Population as Pop
import gc
class TestTechnology(object):
def test_deme_technology_is_right_format(self):
self.pop = Pop(fit_fun='technology', inst='test/test')
self.pop.numberOfDemes = 2
self.pop.initialDemeSize = 3
#self.fakeDeme.publicGood = 20
self.pop.initialPhenotypes = [0.5] * 4
self.pop.createAndPopulateDemes()
assert self.pop.demes[0].technologyLevel is self.pop.initialTechnologyLevel
self.pop.clearDemeInfo()
assert self.pop.demes[0].technologyLevel is not None
assert type(self.pop.demes[0].technologyLevel) is float
assert self.pop.demes[0].technologyLevel >= 0
gc.collect()
def test_deme_has_consensus_policing_level(self):
self.fakeDeme = Dem()
try:
tmp = getattr(self.fakeDeme, "politicsValues")
get = tmp['consensus']
except AttributeError as e:
assert False, "where is the policing consensus?"
gc.collect()
# def test_deme_policing_consensus_of_right_format(self, instantiateSingleIndividualsDemes):
# gc.collect()
# self.fakepop = instantiateSingleIndividualsDemes(2)
# self.fakepop.clearDemeInfo()
# self.fakepop.populationMutationMigration()
# self.fakepop.update()
# for dem in self.fakepop.demes:
# assert dem.policingConsensus is not None, "No value in the policing consensus"
# assert dem.policingConsensus >= 0, "Policing consensus shouldn't be negative"
# assert type(dem.policingConsensus) is float, "Policing consensus should be float, not {0} ({1})".format(type(dem.policingConsensus),dem.policingConsensus)
# if dem.demography > 0:
# assert dem.policingConsensus == dem.meanPhenotypes[1], "Group size: {0}, phenotypes: {1}".format(dem.demography, [i.phenotypicValues for i in self.fakepop.individuals if i.currentDeme == dem.id])
# else:
# assert dem.policingConsensus == 0, "It would seem we have a format issue: deme mean phenotypes are {0}".format(dem.meanPhenotypes)
def test_technology_fitness_function_exists(self, getFitnessParameters):
self.indiv = Ind()
self.indiv.phenotypicValues = [0.5,0.2,0.3]
self.indiv.resourcesAmount = 5
self.indiv.neighbours = [0,2]
try:
self.pars = getFitnessParameters("technology")
self.indiv.reproduce("technology", **{**{'fine':0.4,'investmentReward':0.6},**self.pars})
except KeyError as e:
assert False, "{0}".format(e)
gc.collect()
# def test_individuals_return_goods(self, getFitnessParameters):
# self.indiv = Ind()
# self.pars = getFitnessParameters("technology")
# self.indiv.reproduce("technology", **self.pars)
# assert self.indiv.punishmentFee is not None
# assert type(self.indiv.punishmentFee) is float
# assert self.indiv.punishmentFee >= 0
# def test_returned_goods_get_calculated_and_in_right_format(self, instantiateSingleIndividualsDemes):
# self.fakepop = instantiateSingleIndividualsDemes(2)
# self.fakepop.clearDemeInfo()
# self.fakepop.populationMutationMigration()
# self.fakepop.update()
# for dem in self.fakepop.demes:
# assert dem.returnedGoods is not None, "No value in the effective public good"
# assert dem.returnedGoods >= 0, "Effective public good shouldn't be negative"
# assert type(dem.returnedGoods) is float, "Effective public good should be float, not {0}".format(type(dem.effectivePublicGood))
# # resources = 0
# # for ind in self.fakepop.individuals:
# # if ind.currentDeme == dem:
# # ind.
# # assert dem.returnedGoods ==
# def test_individual_returns_resources(self, getFitnessParameters):
# ndemes = 3
# initdemesize = 2
# pars = getFitnessParameters('technology')
# fitfun = 'technology'
# phen = [0.5] * 3
# ## WHEN THERE IS NO POLICING, NO GOODS ARE RETURNED
# self.fakepopNoPolicing = Pop(fit_fun=fitfun, inst='test')
# self.fakepopNoPolicing.fit_fun = fitfun
# self.fakepopNoPolicing.fitnessParameters = pars
# self.fakepopNoPolicing.nDemes = ndemes
# self.fakepopNoPolicing.initialDemeSize = initdemesize
# self.fakepopNoPolicing.initialPhenotypes = phen
# self.fakepopNoPolicing.migrationRate = 0
# self.fakepopNoPolicing.fitnessParameters.update({'p':0})
# self.fakepopNoPolicing.createAndPopulateDemes()
# self.fakepopNoPolicing.clearDemeInfo()
# self.fakepopNoPolicing.populationMutationMigration()
# self.fakepopNoPolicing.updateDemeInfo()
# collectGoods = [0] * self.fakepopNoPolicing.numberOfDemes
# for ind in self.fakepopNoPolicing.individuals:
# collectGoods[ind.currentDeme] += ind.resourcesAmount * ind.phenotypicValues[0]
# for dem in range(self.fakepopNoPolicing.numberOfDemes):
# assert self.fakepopNoPolicing.fit_fun == 'technology'
# assert self.fakepopNoPolicing.fitnessParameters['p'] == 0
# assert self.fakepopNoPolicing.demes[dem].progressValues['effectivePublicGood'] == self.fakepopNoPolicing.demes[dem].publicGood
# assert self.fakepopNoPolicing.demes[dem].progressValues['effectivePublicGood'] == collectGoods[dem]
# ## WHEN THERE IS POLICING, GOODS MUST BE RETURNED
# self.fakepopPolicing = Pop(fit_fun=fitfun, inst='test')
# self.fakepopPolicing.fitnessParameters = pars
# self.fakepopPolicing.nDemes = ndemes
# self.fakepopPolicing.initialDemeSize = initdemesize
# self.fakepopPolicing.initialPhenotypes = phen
# self.fakepopPolicing.migrationRate = 0
# self.fakepopPolicing.fitnessParameters.update({'p':0.8})
# self.fakepopPolicing.createAndPopulateDemes()
# self.fakepopPolicing.clearDemeInfo()
# self.fakepopPolicing.populationMutationMigration()
# self.fakepopPolicing.updateDemeInfo()
# collectGoods = [0] * self.fakepopPolicing.numberOfDemes
# for ind in self.fakepopPolicing.individuals:
# collectGoods[ind.currentDeme] += ind.resourcesAmount * ind.phenotypicValues[0]
# for dem in range(self.fakepopPolicing.numberOfDemes):
# assert self.fakepopPolicing.demes[dem].progressValues['effectivePublicGood'] > collectGoods[dem] * (1-self.fakepopPolicing.fitnessParameters['p']), "goods are not returned after policing"
# def test_effective_public_good_of_right_format(self, instantiateSingleIndividualsDemes):
# self.fakepop = instantiateSingleIndividualsDemes(2)
# self.fakepop.fit_fun = 'technology'
# self.fakepop.clearDemeInfo()
# self.fakepop.populationMutationMigration()
# self.fakepop.updateDemeInfo()
# for dem in self.fakepop.demes:
# assert dem.progressValues['effectivePublicGood'] is not None, "No value in the effective public good"
# assert dem.progressValues['effectivePublicGood'] >= 0, "Effective public good shouldn't be negative"
# assert type(dem.progressValues['effectivePublicGood']) is float, "Effective public good should be float, not {0}".format(type(dem.effectivePublicGood))
def test_technology_fitness_fct_returns_value(self, getFitnessParameters):
self.ind = Ind()
self.ind.resourcesAmount = 5
self.pars = getFitnessParameters('technology')
infoToAdd = {}
infoToAdd['n'] = 10
infoToAdd['xmean'] =[0.3]
infoToAdd['x'] = [0.6]
infoToAdd['fine'] = 0.2
infoToAdd['investmentReward'] = 0.4
try:
self.ind.fertility('technology',**{**self.pars,**infoToAdd})
except TypeError as e:
if str(e) == "float() argument must be a string or a number, not 'NoneType'":
assert False, "technology fonction returns nothing!"
else:
assert False, str(e)
gc.collect()
def test_technology_fitness_fct_takes_args(self, getFitnessParameters):
self.ind = Ind()
self.pars = getFitnessParameters('technology')
self.ind.resourcesAmount = 1
try:
self.ind.fertility('technology',**{**{'fine':0.2,'investmentReward':0.4},**self.pars})
except TypeError as e:
assert False, "technology fitness function does not yet take arguments, fix this!"
gc.collect()
def test_initial_deme_technology_is_not_null(self):
self.pop = Pop(inst='test/test')
self.pop.createAndPopulateDemes()
assert type(self.pop.demes[0].technologyLevel) is float, "initial technology level info missing"
assert self.pop.demes[0].technologyLevel > 0, "technology level cannot be null or negative"
gc.collect()
def test_deme_technology_level_gets_updated_with_individual_investments(self, getFitnessParameters):
self.pars = getFitnessParameters('technology')
self.pop = Pop(fit_fun='technology', inst='test/test')
self.pop.numberOfDemes = 2
self.pop.initialDemeSize = 10
self.pop.initialPhenotypes = [0.5] * 4
self.pop.fitnessParameters = self.pars
self.pop.createAndPopulateDemes()
demeTech = self.pop.demes[0].technologyLevel
self.pop.lifecycle()
self.pop.clearDemeInfo()
assert demeTech != self.pop.demes[0].technologyLevel, "the technology level has not changed!"
gc.collect()
def test_public_good_gets_updated(self):
self.pop = Pop(fit_fun='technology', inst='test/test')
self.pop.numberOfDemes = 2
self.pop.initialDemeSize = 10
self.pop.initialPhenotypes = [0.5] * 4
self.pop.createAndPopulateDemes()
self.pop.clearDemeInfo()
self.pop.populationMutationMigration()
self.pop.updateDemeInfoPreProduction()
self.pop.populationProduction()
self.pop.updateDemeInfoPostProduction()
assert type(self.pop.demes[0].publicGood) is float, "publicGood must be created due to individual investments during reproduction"
assert self.pop.demes[0].publicGood >= 0, "public good cannot be negative"
gc.collect()
def test_technology_updates_with_correct_number(self):
self.pop = Pop(fit_fun='technology', inst='test/test')
self.pop.numberOfDemes = 2
self.pop.initialDemeSize = 10
self.pop.fit_fun = 'technology'
self.pop.initialPhenotypes = [0.5] * 4
self.pop.createAndPopulateDemes()
assert self.pop.demes[0].technologyLevel == self.pop.initialTechnologyLevel, "wrong technology level assigned to deme when created"
self.pop.clearDemeInfo()
assert self.pop.demes[0].technologyLevel == self.pop.initialTechnologyLevel, "wrong technology level after first clearing"
self.pop.populationMutationMigration()
self.pop.updateDemeInfoPreProduction()
self.pop.populationProduction()
self.pop.updateDemeInfoPostProduction()
# calculate new technology level as it should be
publicGood = self.pop.demes[0].publicGood
tech = self.pop.demes[0].technologyLevel
tech_new = tech * (self.pop.fitnessParameters['atech'] + ((1 - self.pop.fitnessParameters['p']) * publicGood) ** (1 - self.pop.fitnessParameters['betaTech'])) / (1 + self.pop.fitnessParameters['btech'] * tech)
self.pop.populationReproduction()
self.pop.clearDemeInfo()
assert self.pop.demes[0].technologyLevel == tech_new, "wrong value for new technology level."
gc.collect()
def test_individual_can_produce_its_own_resources(self, instantiateSingleIndividualsDemes, getFitnessParameters):
self.args = getFitnessParameters('technology')
self.pop = instantiateSingleIndividualsDemes(2)
self.pop.fit_fun = 'technology'
self.pop.fitnessParameters.update(self.args)
self.pop.initialPhenotypes = [0.5] * 4
self.pop.individualResources = 0
self.pop.fitnessParameters['p'] = 0
self.pop.createAndPopulateDemes()
self.pop.individuals[0].resourcesAmount = 0
assert hasattr(self.pop.individuals[0], "produceResources"), "put your farmers to work!"
self.resBEFORE = self.pop.individuals[0].resourcesAmount
self.pop.clearDemeInfo()
self.pop.populationMutationMigration()
self.pop.updateDemeInfoPreProduction()
self.ind = self.pop.individuals[0]
self.deme = self.pop.demes[self.ind.currentDeme]
self.ind.produceResources('technology', **{**self.pop.fitnessParameters,**self.deme.progressValues})
assert self.ind.resourcesAmount > self.resBEFORE, "that one did not get the point of production: it didn't increase its amount of resources!"
gc.collect()
def test_individual_resources_increase_with_technology(self, getFitnessParameters):
#up_dict = {'civilianPublicTime': 0, 'labourForce': 10}
phen = [0.5] * 4
res = 0
# First Individual
self.ind1 = Ind()
self.pars = getFitnessParameters('technology')
#self.pars.update({'civilianPublicTime': 0, 'labourForce': 10, 'technologyLevel': 2})
tech1 = 2.4
tech2 = 5.9
res1 = (self.pars['n'] ** (-self.pars['alphaResources'])) * (tech1 ** self.pars['alphaResources'])
res2 = (self.pars['n'] ** (-self.pars['alphaResources'])) * (tech2 ** self.pars['alphaResources'])
assert res1 < res2
self.pars.update({'tech': tech1, 'p':0})
self.ind1.phenotypicValues = phen
self.ind1.resourcesAmount = res
self.ind1.pars = self.pars
self.ind1.produceResources('technology', **self.ind1.pars)
assert self.ind1.resourcesAmount == res1
# Second Individual
self.ind2 = Ind()
self.pars.update({'tech': tech2})
self.ind2.phenotypicValues = phen
self.ind2.resourcesAmount = res
self.ind2.pars = self.pars
self.ind2.produceResources('technology', **self.ind2.pars)
assert self.ind2.resourcesAmount == res2
assert self.ind1.resourcesAmount < self.ind2.resourcesAmount, "ind1 knows 2 and gets {0}, ind2 knows 5 and gets {1}, when really those with more knowledge should get more resources, all else being equal".format(
self.ind1.resourcesAmount,self.ind2.resourcesAmount)
gc.collect()
def test_group_labour_force_is_calculated_and_given_to_individual_instance(self):
self.pop = Pop(fit_fun='technology', inst='test/test')
self.pop.numberOfDemes = 3
self.pop.initialDemeSize = 20
self.pop.createAndPopulateDemes()
self.deme = self.pop.demes[0]
assert hasattr(self.deme, "progressValues"), "make dict"
assert type(self.deme.progressValues) is dict
progressKeys = ["fine","investmentReward"]
for key in progressKeys:
assert key in self.deme.progressValues
self.pop.clearDemeInfo()
for pheno in self.pop.demes[0].meanPhenotypes:
assert pheno is not None, "none phenotypes before migration"
self.pop.populationMutationMigration()
for pheno in self.pop.demes[0].meanPhenotypes:
assert pheno is not None, "none phenotypes before update"
self.pop.updateDemeInfoPreProduction()
self.pop.populationProduction()
self.pop.updateDemeInfoPostProduction()
self.demeAFTER = self.pop.demes[0]
for key in progressKeys:
assert self.demeAFTER.progressValues[key] is not None
# deme labour force = total private time: (demography - nleaders)(1-T1) + nleaders(1-T2)
# where T1 and T2 is effective time spent in debate by civilian and leader respectively
gc.collect()
def test_production_increase_function(self):
#pars = getFitnessParameters('technology')
self.pop = Pop(fit_fun='technology', inst='test/test')
self.pop.numberOfDemes = 2
self.pop.initialDemeSize = 5
self.pop.createAndPopulateDemes()
self.pop.clearDemeInfo()
self.pop.populationMutationMigration()
self.pop.updateDemeInfoPreProduction()
self.pars = self.pop.fitnessParameters
for ind in self.pop.individuals:
deme = self.pop.demes[ind.currentDeme]
infoToAdd = {}
infoToAdd["tech"] = deme.technologyLevel
infoToAdd["n"] = deme.demography
infoToAdd["xmean"] = deme.meanPhenotypes
infoToAdd["pg"] = deme.publicGood
infoToAdd["x"] = ind.phenotypicValues
# assert deme.progressValues["labourForce"] is not None, "labour force is none!"
# assert deme.progressValues["labourForce"] != 0, "labour force is null!"
assert deme.technologyLevel is not None, "technology is none!"
fine = deme.publicGood * self.pars['p'] / deme.demography
benef = ((deme.publicGood * (1 - self.pars['p'])) ** self.pars["betaTech"]) / deme.demography
resourcesProduced = deme.demography ** (-self.pars['alphaResources']) * infoToAdd['tech'] ** self.pars['alphaResources']
ind.produceResources(self.pop.fit_fun, **{**self.pop.fitnessParameters,**infoToAdd})
assert ind.resourcesAmount == resourcesProduced, "ind produced {0} instead of {1}".format(ind.resourcesAmount, payoff)
gc.collect()
def test_fitness_function_returns_correct_value(self):
self.pop = Pop(fit_fun='technology', inst='test/test')
self.pop.numberOfDemes = 3
self.pop.initialDemeSize = 5
self.pop.createAndPopulateDemes()
self.pop.clearDemeInfo()
self.pop.populationMutationMigration()
self.pop.updateDemeInfoPreProduction()
self.pop.populationProduction()
self.pop.updateDemeInfoPostProduction()
for ind in self.pop.individuals:
#assert self.pop.demes[ind.currentDeme].progressValues['technologyLevel'] > 1, "technology level too low: {0}".format(self.pop.demes[ind.currentDeme].progressValues['technologyLevel'])
#assert ind.resourcesAmount > 0, "not enough resources to reproduce: {0}".format(ind.resourcesAmount)
infoToAdd = {}
infoToAdd['n'] = self.pop.demes[ind.currentDeme].demography
infoToAdd['xmean'] = self.pop.demes[ind.currentDeme].meanPhenotypes
infoToAdd['tech'] = self.pop.demes[ind.currentDeme].technologyLevel
infoToAdd['pg'] = self.pop.demes[ind.currentDeme].publicGood
infoToAdd['x'] = ind.phenotypicValues
ind.reproduce('technology', **{**self.pop.fitnessParameters, **infoToAdd, **self.pop.demes[ind.currentDeme].progressValues})
fine = infoToAdd['pg'] * self.pop.fitnessParameters['p'] / infoToAdd['n']
benef = ((infoToAdd['pg'] * (1 - self.pop.fitnessParameters['p'])) ** self.pop.fitnessParameters["betaTech"]) / infoToAdd['n']
payoff = (1 - self.pop.fitnessParameters['q']) * (1 - infoToAdd['x'][0]) * ind.resourcesAmount + self.pop.fitnessParameters['q'] * ((1 - infoToAdd['x'][0]) * ind.resourcesAmount - fine) + benef
w = (self.pop.fitnessParameters['rb'] + payoff) / (1 + self.pop.fitnessParameters['gamma'] * infoToAdd['n'])
assert ind.fertilityValue == w, "wrong fitness calculation for individual, should return {0}".format(w)
gc.collect()
def test_individuals_reproduce_after_production(self, getFitnessParameters):
self.params = getFitnessParameters('technology')
self.params.update({'p':0,'tech':10.5})
self.ind = Ind()
self.ind.neighbours = [1,2]
self.ind.phenotypicValues = [0.5] * 3
res = (self.params['n'] ** (-self.params['alphaResources'])) * (self.params['tech'] ** self.params['alphaResources'])
assert res > 0, "no resources produced"
fine = self.params['pg'] * self.params['p'] / self.params['n']
benef = ((self.params['pg'] * (1 - self.params['p'])) ** self.params["betaTech"]) / self.params['n']
payoff = (1 - self.params['q']) * (1 - self.ind.phenotypicValues[0]) * res + self.params['q'] * ((1 - self.ind.phenotypicValues[0]) * res - fine) + benef
f = (self.params["rb"] + payoff) / (1 + self.params["gamma"] * self.params["n"])
self.ind.produceResources('technology',**self.params)
self.ind.reproduce('technology',**{**{'fine':fine,'investmentReward':benef},**self.params})
assert self.ind.fertilityValue == f, "wrong fertility value"
self.ind2 = Ind()
self.ind2.neighbours = [1,2]
self.ind2.phenotypicValues = [0.5] * 3
res2 = (self.params['n'] ** (-self.params['alphaResources'])) * (self.params['tech'] ** self.params['alphaResources'])
fine2 = self.params['pg'] * self.params['p'] / self.params['n']
benef2 = ((self.params['pg'] * (1 - self.params['p'])) ** self.params["betaTech"]) / self.params['n']
payoff2 = (1 - self.params['q']) * (1 - self.ind2.phenotypicValues[0]) * res2 + self.params['q'] * ((1 - self.ind2.phenotypicValues[0]) * res2 - fine2) + benef2
assert self.params['q'] * (self.params['pg'] * self.params['p'])/self.params['n'] == 0
assert (1 - self.params['q'] * self.params['d'] * self.params['p']) == 1
assert (1 - self.ind.phenotypicValues[0]) == 0.5
assert res2 > 0, "no resources produced"
f2 = (self.params["rb"] + payoff2) / (1 + self.params["gamma"] * self.params["n"])
assert f2 == f, "all being equal, the fertility values should be the same"
self.ind2.produceResources('technology',**self.params)
self.ind2.reproduce('technology',**{**{'fine':fine2,'investmentReward':benef2},**self.params})
assert self.ind2.fertilityValue == f, "wrong fertility value"
self.params.update({'p':0.7})
self.ind3 = Ind()
self.ind3.neighbours = [1,2]
self.ind3.phenotypicValues = [0.5] * 3
res3 = (self.params['n'] ** (-self.params['alphaResources'])) * (self.params['tech'] ** self.params['alphaResources'])
fine3 = self.params['pg'] * self.params['p'] / self.params['n']
benef3 = ((self.params['pg'] * (1 - self.params['p'])) ** self.params["betaTech"]) / self.params['n']
payoff3 = (1 - self.params['q']) * (1 - self.ind3.phenotypicValues[0]) * res3 + self.params['q'] * ((1 - self.ind3.phenotypicValues[0]) * res3 - fine3) + benef3
assert res3 > 0, "no resources produced"
f3 = (self.params["rb"] + payoff3) / (1 + self.params["gamma"] * self.params["n"])
self.ind3.produceResources('technology',**self.params)
self.ind3.reproduce('technology',**{**{'fine':fine3,'investmentReward':benef3},**self.params})
assert self.ind3.fertilityValue == f3, "wrong fertility value"
gc.collect() |
#-*- coding: utf-8 -*-
from django.conf.urls.defaults import *
from basiccrud.views import *
urlpatterns = patterns('usuario',
## - Cadastro e listagem de usuarios
url(r'^listagem/$', 'views.usuario_list', name='usuario.listagem'),
url(r'^cadastro/$', 'views.usuario', name='usuario.cadastro'),
url(r'^cadastro/(?P<id>\d+)/$', 'views.usuario', name='usuario.cadastro'),
url(r'^recuperar_senha/$', 'views.recuperar_senha', name='usuario.recuperar_senha'),
)
|
import pymongo
import datetime
class MongoDB:
client = pymongo.MongoClient("mongodb://localhost:27017/")
db = client["app_v0"]
# 获取用户账号
def getAccount(self, type="lixinren"):
account = self.db["account"]
result = account.find({"type": type})
json = {}
for x in result:
json = x
return json
# 更新用户账号cookie
def updateAccount(self, params):
account = self.db["account"]
query = {"type": params["type"]}
newvalues = {"$set": {"cookie": params["cookie"]}}
x = account.update_one(query, newvalues)
print(x.modified_count, "修改cookie成功")
# 获取默认配置
def getDefaultSetting(self):
col = self.db["defaultSetting"]
result = col.find_one()
return result
# 每天10点钟更新数据库数据--指数数据
def updateIndexDataByDay(self, data):
col = self.db['indexData']
result = col.insert_many(data)
print(datetime.datetime.now().strftime(
'%Y-%m-%d %H:%M:%S'), '插入数据结果---->完成:总共'+str(len(data))+'条,插入', len(result.inserted_ids))
# 添加定投记录
def getInvestRecord(self, parmas):
col = self.db['investRecord']
result = col.find()
json = {}
sort_type = parmas.get('sort', 'name')
for i in result:
i['_id'] = str(i['_id'])
if i.get(sort_type) in json:
json[i.get(sort_type)].append(i)
else:
json[i.get(sort_type)] = []
json[i.get(sort_type)].append(i)
arr = []
if sort_type == 'code':
for i in json:
item = json[i][0]
ijson = {
'name': item.get('name'),
'code': item.get('code'),
'num': 0,
'money': 0,
}
for j in json[i]:
ijson['money'] += float(j.get('money', 0))
ijson['num'] += float(j.get('num', 0))
ijson['price'] = ijson['money']/ijson['num']
arr.append(ijson)
elif sort_type == 'date':
for i in json:
item = json[i][0]
ijson = {
'date': i,
'code': json[i],
'money': 0,
'codeNum': len(json[i])
}
for j in json[i]:
ijson['money'] += float(j.get('money', 0))
arr.append(ijson)
return arr
# db = MongoDB()
# print(db.getInvestRecord({'sort': 'date'}))
|
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import KMeans
np.random.seed(13579)
X = np.r_[np.random.randn(50, 2) + [2, 2],
np.random.randn(50, 2) + [0, -2],
np.random.randn(50, 2) + [-2, 2]]
print(type(X), X.shape)
print(X[:5, ])
print(X[50:55, ])
print(X[100:105])
Ks = [2, 3, 4, 5] # k is kinds of hyper-parameter
for k in Ks:
kmeans1 = KMeans(n_init=1, n_clusters=k)
kmeans1.fit(X)
# total distance between node and center
print("inertia: ", kmeans1.inertia_)
# the center that the point belongs to
# print(kmeans1.labels_)
# # 3 centers
# print(kmeans1.cluster_centers_)
colors = ['c', 'm', 'y', 'k', 'r']
markers = ['.', '*', '^', 'x', 's']
for i in range(k):
dataX = X[kmeans1.labels_ == i]
plt.scatter(dataX[:, 0], dataX[:, 1],
c=colors[i], marker=markers[i])
# print(dataX.size)
plt.show()
|
import sys
def main():
with open("A-large.in","r") as f:
T = int(f.readline())
ll = []
for line in f:
ll.append(int(line.strip()))
#print T
#print ll
compare = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
for i in xrange(T):
N = ll[i]
digit_list = []
if N == 0:
sys.stdout.write("Case #"+str(i+1)+": INSOMNIA")
else:
k = 1
while True:
M = k * N
k = k + 1
#print N
for j in xrange(36):
digit_list.append(M%10)
M = M/10
if M == 0:
break
#print digit_list
if set(digit_list) == set(compare):
sys.stdout.write("Case #"+str(i+1)+": "+str((k-1)*N))
break
sys.stdout.write("\n")
if __name__ == '__main__':
main()
|
import pandas as pd
import numpy as np
import json
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
from preprocess.utils import clean_text, lemmatize, important
drug = pd.read_csv('./data/nhi_drug.csv', encoding = 'big5')
drug['Ingredient'] = drug['Ingredient'].apply(lambda x : lemmatize(x))
drug['DrugExtract'] = drug['DrugName'].apply(lambda x : clean_text(x))
bow = []
for i in list(drug['DrugExtract']):
for j in i.split():
bow.append(j)
N = len(bow)
idf = np.log(N/pd.Series(bow).value_counts()).to_dict()
drug['DrugExtract'] = drug['DrugExtract'].apply(lambda x : important(x, idf, 6))
drug = drug[['INSORDERID', 'DrugName', 'DrugExtract', 'Ingredient', 'ATCcode', 'GenericName']]
print('Done ! \n')
drug.to_csv('./data/drug.csv', index = False, encoding = 'big5')
# ID = {
# 'AC33023100',
# 'AC34348100',
# 'A028947212',
# 'A043869277',
# 'AC412691G0',
# 'AC412701G0',
# 'A018062100',
# 'AA58181100',
# 'AB58181100',
# 'AC58181100',
# 'AC59884157',
# 'AC303091G0',
# }
# print(drug.loc[[i for i, k in enumerate(list(drug['INSORDERID'])) if k in ID]])
|
import os, re, sys
import numpy as np
from scipy.ndimage import generate_binary_structure, iterate_structure
from scipy.ndimage.morphology import binary_dilation, binary_erosion
from scipy.interpolate import RectBivariateSpline
from lsst.ts.wep.cwfs.Tool import padArray, extractArray, ZernikeAnnularGrad, ZernikeAnnularJacobian
from lsst.ts.wep.cwfs.lib.cyMath import poly10_2D, poly10Grad
from lsst.ts.wep.cwfs.Image import Image
class CompensationImageDecorator(object):
# Constant
INTRA = "intra"
EXTRA = "extra"
def __init__(self):
"""
Instantiate the class of CompensationImageDecorator.
"""
self.__image = None
# Parameters used for transport of intensity equation (TIE)
self.sizeinPix = None
self.fieldX = None
self.fieldY = None
self.image0 = None
self.fldr = None
self.atype = None
self.offAxis_coeff = None
self.offAxisOffset = None
self.caustic = False
self.pMask = None
self.cMask = None
def __getattr__(self, attributeName):
"""
Use the functions and attributes hold by the object.
Arguments:
attributeName {[str]} -- Name of attribute or function.
Returns:
[str] -- Returned values.
"""
return getattr(self.__image, attributeName)
def setImg(self, fieldXY, image=None, imageFile=None, atype=None):
"""
Set the wavefront image.
Arguments:
fieldXY {[float]} -- Position of donut on the focal plane in degree.
Keyword Arguments:
image {[float]} -- Array of image. (default: {None})
imageFile {[string]} -- Path of image file. (default: {None})
atype {[string]} -- Type of image. It should be "intra" or "extra". (default: {None})
Raises:
TypeError -- Error if the atype is not "intra" or "extra".
"""
# Instantiate the image object
self.__image = Image()
# Read the file if there is no input image
self.__image.setImg(image=image, imageFile=imageFile)
# Make sure the image size is n by n
if (self.__image.image.shape[0] != self.__image.image.shape[1]):
raise RuntimeError("Only square image stamps are accepted.")
elif (self.__image.image.shape[0] % 2 == 1):
raise RuntimeError("Number of pixels cannot be odd numbers.")
# Dimension of image
self.sizeinPix = self.__image.image.shape[0]
# Donut position in degree
self.fieldX, self.fieldY = fieldXY
# Save the initial image if we want the compensator always start from this
self.image0 = None
# We will need self.fldr to be on denominator
self.fldr = np.max((np.hypot(self.fieldX, self.fieldY), 1e-8))
# Check the type of image
if atype.lower() not in (self.INTRA, self.EXTRA):
raise TypeError("Image defocal type must be 'intra' or 'extra'.")
self.atype = atype
# Coefficient to do the off-axis correction
self.offAxis_coeff = None
# Defocal distance (Baseline is 1.5mm. The configuration file now is 1mm.)
self.offAxisOffset = 0
# Check the image has the problem or not
self.caustic = False
# Reset all mask related parameters
self.pMask = None
self.cMask = None
def updateImage0(self):
"""
Update the backup of initial image. This will be used in the outer loop iteration, which
always uses the initial image (image0) before each iteration starts.
"""
# Update the initial image for future use
self.image0 = self.__image.image.copy()
def imageCoCenter(self, inst, fov=3.5, debugLevel=0):
"""
Shift the weighting center of donut to the center of reference image with the correction of
projection of fieldX and fieldY.
Arguments:
inst {[Instrument]} -- Instrument to use.
Keyword Arguments:
fov {[float]} -- Field of view (FOV) of telescope. (default: {3.5})
debugLevel {[int]} -- Show the information under the running. If the value is higher,
the information shows more. It can be 0, 1, 2, or 3. (default: {0})
"""
# Calculate the weighting center (x, y) and radius
x1, y1 = self.getCenterAndR_ef()[0:2]
# Show the co-center information
if (debugLevel >= 3):
print("imageCoCenter: (x, y) = (%8.2f,%8.2f)\n" % (x1, y1))
# Calculate the center position on image
# 0.5 is the half of 1 pixel
sensorSamples = inst.parameter["sensorSamples"]
stampCenterx1 = sensorSamples/2. + 0.5
stampCentery1 = sensorSamples/2. + 0.5
# Shift in the radial direction
# The field of view (FOV) of LSST camera is 3.5 degree
offset = inst.parameter["offset"]
pixelSize = inst.parameter["pixelSize"]
radialShift = fov*(offset/1e-3)*(10e-6/pixelSize)
# Calculate the projection of distance of donut to center
radialShift = radialShift*(self.fldr/(fov/2))
# Do not consider the condition out of FOV of lsst
if (self.fldr > (fov/2)):
radialShift = 0
# Calculate the cos(theta) for projection
I1c = self.fieldX/self.fldr
# Calculate the sin(theta) for projection
I1s = self.fieldY/self.fldr
# Get the projected x, y-coordinate
stampCenterx1 = stampCenterx1 + radialShift*I1c
stampCentery1 = stampCentery1 + radialShift*I1s
# Shift the image to the projected position
self.__image.updateImage(np.roll(self.__image.image, int(np.round(stampCentery1 - y1)), axis=0))
self.__image.updateImage(np.roll(self.__image.image, int(np.round(stampCenterx1 - x1)), axis=1))
def compensate(self, inst, algo, zcCol, model):
"""
Calculate the image compensated from the affection of wavefront.
Arguments:
inst {[Instrument]} -- Instrument to use.
algo {[Algorithm]} -- Algorithm to solve the Poisson's equation. It can by done
by the fast Fourier transform or serial expansion.
zcCol {[float]} -- Coefficients of wavefront.
model {[string]} -- Optical model. It can be "paraxial", "onAxis", or "offAxis".
Raises:
Exception -- Number of terms of normal/ annular Zernike polynomilas does
not match the needed number for compensation to use.
"""
# Check the condition of inputs
numTerms = algo.parameter["numTerms"]
if ((zcCol.ndim == 1) and (len(zcCol) != numTerms)):
raise RuntimeError("input:size",
"zcCol in compensate needs to be a %d row column vector. \n" % numTerms)
# Dimension of image
sm, sn = self.__image.image.shape
# Dimenstion of projected image on focal plane
projSamples = sm
# Let us create a look-up table for x -> xp first.
luty, lutx = np.mgrid[-(projSamples/2 - 0.5):(projSamples/2 + 0.5),
-(projSamples/2 - 0.5):(projSamples/2 + 0.5)]
sensorFactor = inst.parameter["sensorFactor"]
lutx = lutx/(projSamples/2/sensorFactor)
luty = luty/(projSamples/2/sensorFactor)
# Set up the mapping
lutxp, lutyp, J = self.__aperture2image(inst, algo, zcCol, lutx, luty,
projSamples, model)
show_lutxyp = self.__showProjection(lutxp, lutyp, sensorFactor,
projSamples, raytrace=False)
if (np.all(show_lutxyp <= 0)):
self.caustic = True
return
# Calculate the weighting center (x, y) and radius
realcx, realcy = self.__image.getCenterAndR_ef()[0:2]
# Extend the dimension of image by 20 pixel in x and y direction
show_lutxyp = padArray(show_lutxyp, projSamples+20)
# Get the binary matrix of image on pupil plane if raytrace=False
struct0 = generate_binary_structure(2, 1)
struct = iterate_structure(struct0, 4)
struct = binary_dilation(struct, structure=struct0, iterations=2).astype(int)
show_lutxyp = binary_dilation(show_lutxyp, structure=struct)
show_lutxyp = binary_erosion(show_lutxyp, structure=struct)
# Extract the region from the center of image and get the original one
show_lutxyp = extractArray(show_lutxyp, projSamples)
# Calculate the weighting center (x, y) and radius
projcx, projcy = self.__image.getCenterAndR_ef(image=show_lutxyp.astype(float))[0:2]
# Shift the image to center of projection on pupil
# +(-) means we need to move image to the right (left)
shiftx = projcx - realcx
# +(-) means we need to move image upward (downward)
shifty = projcy - realcy
self.__image.image = np.roll(self.__image.image, int(np.round(shifty)), axis=0)
self.__image.image = np.roll(self.__image.image, int(np.round(shiftx)), axis=1)
# Construct the interpolant to get the intensity on (x', p') plane
# that corresponds to the grid points on (x,y)
yp, xp = np.mgrid[-(sm/2 - 0.5):(sm/2 + 0.5), -(sm/2 - 0.5):(sm/2 + 0.5)]
xp = xp/(sm/2/sensorFactor)
yp = yp/(sm/2/sensorFactor)
# Put the NaN to be 0 for the interpolate to use
lutxp[np.isnan(lutxp)] = 0
lutyp[np.isnan(lutyp)] = 0
# Construct the function for interpolation
ip = RectBivariateSpline(yp[:, 0], xp[0, :], self.__image.image, kx=1, ky=1)
# Construct the projected image by the interpolation
lutIp = np.zeros(lutxp.shape[0]*lutxp.shape[1])
for ii, (xx, yy) in enumerate(zip(lutxp.ravel(), lutyp.ravel())):
lutIp[ii] = ip(yy, xx)
lutIp = lutIp.reshape(lutxp.shape)
# Calaculate the image on focal plane with compensation based on flux conservation
# I(x, y)/I'(x', y') = J = (dx'/dx)*(dy'/dy) - (dx'/dy)*(dy'/dx)
self.__image.image = lutIp*J
if (self.atype == "extra"):
self.__image.image = np.rot90(self.__image.image, k=2)
# Put NaN to be 0
self.__image.image[np.isnan(self.__image.image)] = 0
# Check the compensated image has the problem or not.
# The negative value means the over-compensation from wavefront error
if (np.any(self.__image.image < 0) and np.all(self.image0 >= 0)):
print("WARNING: negative scale parameter, image is within caustic, zcCol (in um)=\n")
self.caustic = True
# Put the overcompensated part to be 0.
self.__image.image[self.__image.image < 0] = 0
def __aperture2image(self, inst, algo, zcCol, lutx, luty, projSamples, model):
"""
Calculate the x, y-coordinate on the focal plane and the related Jacobian matrix.
Arguments:
inst {[Instrument]} -- Instrument to use.
algo {[Algorithm]} -- Algorithm to solve the Poisson's equation. It can by done
by the fast Fourier transform or serial expansion.
zcCol {[float]} -- Coefficients of optical basis. It is Zernike polynomials in the
baseline.
lutx {[float]} -- x-coordinate on pupil plane.
luty {[float]} -- y-coordinate on pupil plane.
projSamples {[int]} -- Dimension of projected image. This value considers the
magnification ratio of donut image.
model {[string]} -- Optical model. It can be "paraxial", "onAxis", or "offAxis".
Returns:
[float] -- x, y-coordinate on the focal plane.
[float] -- Jacobian matrix between the pupil and focal plane.
"""
# Get the radius: R = D/2
R = inst.parameter["apertureDiameter"]/2.0
# Calculate C = -f(f-l)/l/R^2. This is for the calculation of reduced coordinate.
if (self.atype == self.INTRA):
l = inst.parameter["offset"]
elif (self.atype == self.EXTRA):
l = -inst.parameter["offset"]
focalLength = inst.parameter["focalLength"]
myC = -focalLength*(focalLength - l)/l/R**2
# Get the functions to do the off-axis correction by numerical fitting
# Order to do the off-axis correction. The order is 10 now.
offAxisPolyOrder = algo.parameter["offAxisPolyOrder"]
polyFunc = self.__getFunction("poly%d_2D" % offAxisPolyOrder)
polyGradFunc = self.__getFunction("poly%dGrad" % offAxisPolyOrder)
# Calculate the distance to center
lutr = np.sqrt(lutx**2 + luty**2)
# Calculated the extended ring radius (delta r), which is to extended the available
# pupil area.
# 1 pixel larger than projected pupil. No need to be EF-like, anything
# outside of this will be masked off by the computational mask
sensorFactor = inst.parameter["sensorFactor"]
onepixel = 1/(projSamples/2/sensorFactor)
# Get the index that the point is out of the range of extended pupil
obscuration = inst.parameter["obscuration"]
idxout = (lutr > 1+onepixel)|(lutr < obscuration-onepixel)
# Define the element to be NaN if it is out of range
lutx[idxout] = np.nan
luty[idxout] = np.nan
# Get the index in the extended area of outer boundary with the width of onepixel
idxbound = (lutr <= 1+onepixel)&(lutr > 1)
# Calculate the extended x, y-coordinate (x' = x/r*r', r'=1)
lutx[idxbound] = lutx[idxbound]/lutr[idxbound]
luty[idxbound] = luty[idxbound]/lutr[idxbound]
# Get the index in the extended area of inner boundary with the width of onepixel
idxinbd = (lutr < obscuration)&(lutr > obscuration-onepixel)
# Calculate the extended x, y-coordinate (x' = x/r*r', r'=obscuration)
lutx[idxinbd] = lutx[idxinbd]/lutr[idxinbd]*obscuration
luty[idxinbd] = luty[idxinbd]/lutr[idxinbd]*obscuration
# Get the corrected x, y-coordinate on focal plane (lutxp, lutyp)
if (model == "paraxial"):
# No correction is needed in "paraxial" model
lutxp = lutx
lutyp = luty
elif (model == "onAxis"):
# Calculate F(x, y) = m * sqrt(f^2-R^2) / sqrt(f^2-(x^2+y^2)*R^2)
# m is the mask scaling factor
myA2 = (focalLength**2 - R**2) / (focalLength**2 - lutr**2 * R**2)
# Put the unphysical value as NaN
myA = myA2.copy()
idx = (myA < 0)
myA[idx] = np.nan
myA[~idx] = np.sqrt(myA2[~idx])
# Mask scaling factor (for fast beam)
maskScalingFactor = algo.parameter["maskScalingFactor"]
# Calculate the x, y-coordinate on focal plane
# x' = F(x,y)*x + C*(dW/dx), y' = F(x,y)*y + C*(dW/dy)
lutxp = maskScalingFactor*myA*lutx
lutyp = maskScalingFactor*myA*luty
elif (model == "offAxis"):
# Get the coefficient of polynomials for off-axis correction
tt = self.offAxisOffset
cx = (self.offAxis_coeff[0, :] - self.offAxis_coeff[2, :]) * (tt+l)/(2*tt) + \
self.offAxis_coeff[2, :]
cy = (self.offAxis_coeff[1, :] - self.offAxis_coeff[3, :]) * (tt+l)/(2*tt) + \
self.offAxis_coeff[3, :]
# This will be inverted back by typesign later on.
# We do the inversion here to make the (x,y)->(x',y') equations has
# the same form as the paraxial case.
cx = np.sign(l)*cx
cy = np.sign(l)*cy
# Do the orthogonalization: x'=1/sqrt(2)*(x+y), y'=1/sqrt(2)*(x-y)
# Calculate the rotation angle for the orthogonalization
costheta = (self.fieldX + self.fieldY)/self.fldr/np.sqrt(2)
if (costheta > 1):
costheta = 1
elif (costheta < -1):
costheta = -1
sintheta = np.sqrt(1 - costheta**2)
if (self.fieldY < self.fieldX):
sintheta = -sintheta
# Create the pupil grid in off-axis model. This gives the x,y-coordinate
# in the extended ring area defined by the parameter of onepixel.
# Get the mask-related parameters
maskCa, maskRa, maskCb, maskRb = self.__interpMaskParam(self.fieldX,
self.fieldY, inst.maskParam)
lutx, luty = self.__createPupilGrid(lutx, luty, onepixel, maskCa,
maskCb, maskRa, maskRb, self.fieldX, self.fieldY)
# Calculate the x, y-coordinate on focal plane
# First rotate back to reference orientation
lutx0 = lutx*costheta + luty*sintheta
luty0 = -lutx*sintheta + luty*costheta
# Use the mapping at reference orientation
lutxp0 = polyFunc(cx, lutx0, y=luty0)
lutyp0 = polyFunc(cy, lutx0, y=luty0)
# Rotate back to focal plane
lutxp = lutxp0*costheta - lutyp0*sintheta
lutyp = lutxp0*sintheta + lutyp0*costheta
# Zemax data are in mm, therefore 1000
sensorSamples = inst.parameter["sensorSamples"]
pixelSize = inst.parameter["pixelSize"]
reduced_coordi_factor = 1e-3/(sensorSamples/2*pixelSize/sensorFactor)
# Reduced coordinates, so that this can be added with the dW/dz
lutxp = lutxp*reduced_coordi_factor
lutyp = lutyp*reduced_coordi_factor
else:
print('Wrong optical model type in compensate. \n')
return
# Obscuration of annular aperture
zobsR = algo.parameter["zobsR"]
# Calculate the x, y-coordinate on focal plane
# x' = F(x,y)*x + C*(dW/dx), y' = F(x,y)*y + C*(dW/dy)
# In Model basis (zer: Zernike polynomials)
if (zcCol.ndim == 1):
lutxp = lutxp + myC*ZernikeAnnularGrad(zcCol, lutx, luty, zobsR, "dx")
lutyp = lutyp + myC*ZernikeAnnularGrad(zcCol, lutx, luty, zobsR, "dy")
# Make the sign to be consistent
if (self.atype == "extra"):
lutxp = -lutxp
lutyp = -lutyp
# Calculate the Jacobian matrix
# In Model basis (zer: Zernike polynomials)
if (zcCol.ndim == 1):
if (model == "paraxial"):
J = 1 + myC * ZernikeAnnularJacobian(zcCol, lutx, luty, zobsR, "1st") + \
myC**2 * ZernikeAnnularJacobian(zcCol, lutx, luty, zobsR, "2nd")
elif (model == "onAxis"):
xpox = maskScalingFactor * myA * (1 + \
lutx**2 * R**2. / (focalLength**2 - R**2 * lutr**2)) + \
myC * ZernikeAnnularGrad(zcCol, lutx, luty, zobsR, "dx2")
ypoy = maskScalingFactor * myA * (1 + \
luty**2 * R**2. / (focalLength**2 - R**2 * lutr**2)) + \
myC * ZernikeAnnularGrad(zcCol, lutx, luty, zobsR, "dy2")
xpoy = maskScalingFactor * myA * \
lutx * luty * R**2 / (focalLength**2 - R**2 * lutr**2) + \
myC * ZernikeAnnularGrad(zcCol, lutx, luty, zobsR, "dxy")
ypox = xpoy
J = xpox*ypoy - xpoy*ypox
elif (model == "offAxis"):
xp0ox = polyGradFunc(cx, lutx0, luty0, "dx") * costheta - \
polyGradFunc(cx, lutx0, luty0, "dy") * sintheta
yp0ox = polyGradFunc(cy, lutx0, luty0, "dx") * costheta - \
polyGradFunc(cy, lutx0, luty0, "dy") * sintheta
xp0oy = polyGradFunc(cx, lutx0, luty0, "dx") * sintheta + \
polyGradFunc(cx, lutx0, luty0, "dy") * costheta
yp0oy = polyGradFunc(cy, lutx0, luty0, "dx") * sintheta + \
polyGradFunc(cy, lutx0, luty0, "dy") * costheta
xpox = (xp0ox*costheta - yp0ox*sintheta)*reduced_coordi_factor + \
myC*ZernikeAnnularGrad(zcCol, lutx, luty, zobsR, "dx2")
ypoy = (xp0oy*sintheta + yp0oy*costheta)*reduced_coordi_factor + \
myC*ZernikeAnnularGrad(zcCol, lutx, luty, zobsR, "dy2")
temp = myC*ZernikeAnnularGrad(zcCol, lutx, luty, zobsR, "dxy")
# if temp==0,xpoy doesn't need to be symmetric about x=y
xpoy = (xp0oy*costheta - yp0oy*sintheta)*reduced_coordi_factor + temp
# xpoy-flipud(rot90(ypox))==0 is true
ypox = (xp0ox*sintheta + yp0ox*costheta)*reduced_coordi_factor + temp
J = xpox*ypoy - xpoy*ypox
return lutxp, lutyp, J
def __getFunction(self, name):
"""
Decide to call the function of __poly10_2D() or __poly10Grad(). This is to correct
the off-axis distortion. A numerical solution with 2-dimensions 10 order polynomials
to map between the telescope aperature and defocused image plane is used.
Arguments:
name {[string]} -- Function name to call.
Returns:
[float] -- Corrected image after the correction.
Raises:
RuntimeError -- Raise error if the function name does not exist.
"""
# Construnct the dictionary table for calling function.
# The reason to use the dictionary is for the future's extension.
funcTable = dict(poly10_2D = self.__poly10_2D,
poly10Grad = self.__poly10Grad)
# Look for the function to call
if name in funcTable:
return funcTable[name]
# Error for unknown function name
raise RuntimeError("Unknown function name: %s" % name)
def __poly10_2D(self, c, data, y=None):
"""
Correct the off-axis distortion by fitting with a 10 order polynomial
equation.
Arguments:
c {[float]} -- Parameters of off-axis distrotion.
data {[float]} -- x, y-coordinate on aperature. If y is provided,
this will be just the x-coordinate.
Keyword Arguments:
y {[float]} -- y-coordinate at aperature (default: {None}).
Returns:
[float] -- Corrected parameters for off-axis distortion.
"""
# Decide the x, y-coordinate data on aperature
if (y is None):
x = data[0, :]
y = data[1, :]
else:
x = data
# Correct the off-axis distortion
return poly10_2D(c, x.flatten(), y.flatten()).reshape(x.shape)
def __poly10Grad(self, c, x, y, atype):
"""
Correct the off-axis distortion by fitting with a 10 order polynomial
equation in the gradident part.
Arguments:
c {[float]} -- Parameters of off-axis distrotion.
x {[type]} -- x-coordinate at aperature.
y {[float]} -- y-coordinate at aperature.
atype {[string]} -- Direction of gradient. It can be "dx" or "dy".
Returns:
[float] -- Corrected parameters for off-axis distortion.
"""
return poly10Grad(c, x.flatten(), y.flatten(), atype).reshape(x.shape)
def __createPupilGrid(self, lutx, luty, onepixel, ca, cb, ra, rb, fieldX, fieldY=None):
"""
Create the pupil grid in off-axis model. This function gives the x,y-coordinate in the
extended ring area defined by the parameter of onepixel.
Arguments:
lutx {[float]} -- x-coordinate on pupil plane.
luty {[float]} -- y-coordinate on pupil plane.
onepixel {[float]} -- Exteneded delta radius.
ca {[float]} -- Center of outer ring on the pupil plane.
cb {float} -- Center of inner ring on the pupil plane.
ra {[float]} -- Radius of outer ring on the pupil plane.
rb {[float]} -- Radius of inner ring on the pupil plane.
fieldX {[float]} -- x-coordinate of donut on the focal plane in degree.
If only fieldX is given, this will be fldr = sqrt(2)*fieldX
actually.
Keyword Arguments:
fieldY {[float]} -- y-coordinate of donut on the focal plane in degree. (default: {None})
Returns:
[float] -- x, y-coordinate of extended ring area on pupil plane.
"""
# Calculate fieldX, fieldY if only input of fieldX (= fldr = sqrt(2)*fieldX actually)
# is provided
if (fieldY is None):
# Input of filedX is fldr actually
fldr = fieldX
# Divide fldr by sqrt(2) to get fieldX = fieldY
fieldX = fldr/np.sqrt(2)
fieldY = fieldX
# Rotate the mask center after the off-axis correction based on the position
# of fieldX and fieldY
cax, cay, cbx, cby = self.__rotateMaskParam(ca, cb, fieldX, fieldY)
# Get x, y coordinate of extended outer boundary by the linear approximation
lutx, luty = self.__approximateExtendedXY(lutx, luty, cax, cay, ra, ra+onepixel, "outer")
# Get x, y coordinate of extended inner boundary by the linear approximation
lutx, luty = self.__approximateExtendedXY(lutx, luty, cbx, cby, rb-onepixel, rb, "inner")
return lutx, luty
def __approximateExtendedXY(self, lutx, luty, cenX, cenY, innerR, outerR, config):
"""
Calculate the x, y-cooridnate on puil plane in the extended ring area by the linear
approxination, which is used in the off-axis correction.
Arguments:
lutx {[float]} -- x-coordinate on pupil plane.
luty {[float]} -- y-coordinate on pupil plane.
cenX {[float]} -- x-coordinate of boundary ring center.
cenY {[float]} -- y-coordinate of boundary ring center.
innerR {[float]} -- Inner radius of extended ring.
outerR {[float]} -- Outer radius of extended ring.
config {[string]} -- Configuration to calculate the x,y-coordinate in the extended ring.
"inner": inner extended ring;
"outer": outer extended ring.
Returns:
[float] -- x, y-coordinate of extended ring area on pupil plane.
"""
# Catculate the distance to rotated center of boundary ring
lutr = np.sqrt((lutx - cenX)**2 + (luty - cenY)**2)
# Define NaN to be 999 for the comparison in the following step
tmp = lutr.copy()
tmp[np.isnan(tmp)] = 999
# Get the available index that the related distance is between innderR and outerR
idxbound = (~np.isnan(lutr)) & (tmp >= innerR ) & (tmp <= outerR)
# Deside R based on the configuration
if (config == "outer"):
R = innerR
# Get the index that the related distance is bigger than outerR
idxout = (tmp > outerR)
elif (config == "inner"):
R = outerR
# Get the index that the related distance is smaller than innerR
idxout = (tmp < innerR)
# Put the x, y-coordiate to be NaN if it is inside/ outside the pupil that is
# after the off-axis correction.
lutx[idxout] = np.nan
luty[idxout] = np.nan
# Get the x, y-coordinate in this ring area by the linear approximation
lutx[idxbound] = (lutx[idxbound]-cenX)/lutr[idxbound]*R + cenX
luty[idxbound] = (luty[idxbound]-cenY)/lutr[idxbound]*R + cenY
return lutx, luty
def __rotateMaskParam(self, ca, cb, fieldX, fieldY):
"""
Rotate the mask-related parameters of center.
Arguments:
ca {[float]} -- Mask-related parameter of center.
cb {float} -- Mask-related parameter of center.
fieldX {[float]} -- x-coordinate of donut on the focal plane in degree.
fieldY {[float]} -- y-coordinate of donut on the focal plane in degree.
Returns:
[float] -- Projected x, y elements
"""
# Calculate the sin(theta) and cos(theta) for the rotation
fldr = np.sqrt(fieldX**2 + fieldY**2)
if (fldr == 0):
c = 0
s = 0
else:
# Calculate cos(theta)
c = fieldX/fldr
# Calculate sin(theta)
s = fieldY/fldr
# Projected x and y coordinate after the rotation
cax = c*ca
cay = s*ca
cbx = c*cb
cby = s*cb
return cax, cay, cbx, cby
def getOffAxisCorr(self, instDir, order):
"""
Map the coefficients of off-axis correction for x, y-projection of intra- and
extra-image. This is for the mapping of coordinate from the telescope apearature
to defocal image plane.
Arguments:
instDir {[string]} -- Path to specific instrument directory.
order {[int]} -- Up to order-th of off-axis correction.
"""
# List of configuration
configList = ["cxin", "cyin", "cxex", "cyex"]
# Get all files in the directory
fileList = [f for f in os.listdir(instDir) if os.path.isfile(os.path.join(instDir, f))]
# Read files
temp = []
for config in configList:
# Construct the configuration file name
for fileName in fileList:
m = re.match(r"\S*%s\S*.txt" % config, fileName)
if (m is not None):
matchFileName = m.group()
break
filePath = os.path.join(instDir, matchFileName)
# Read the file
corr_coeff, offset = self.__getOffAxisCorr_single(filePath)
temp.append(corr_coeff)
# Give the values
self.offAxis_coeff = np.array(temp)
self.offAxisOffset = offset
def __getOffAxisCorr_single(self, confFile):
"""
Get the image-related pamameters for the off-axis distortion by the linear
approximation with a series of fitted parameters with LSST ZEMAX model.
Arguments:
confFile {[string]} -- Path of configuration file.
Returns:
[float] -- Coefficients for the off-axis distortion based on the linear
response.
[float] -- Defocal distance in m.
"""
# Calculate the distance from donut to origin (aperature)
fldr = np.sqrt(self.fieldX**2 + self.fieldY**2)
# Read the configuration file
cdata = np.loadtxt(confFile)
# Record the offset (defocal distance)
offset = cdata[0, 0]
# Take the reference parameters
c = cdata[:, 1:]
# Get the ruler, which is the distance to center
# ruler is between 1.51 and 1.84 degree here
ruler = np.sqrt(c[:, 0]**2 + c[:, 1]**2)
# Get the fitted parameters for off-axis correction by linear approximation
corr_coeff = self.__linearApprox(fldr, ruler, c[:, 2:])
return corr_coeff, offset
def __interpMaskParam(self, fieldX, fieldY, maskParam):
"""
Get the mask-related pamameters for the off-axis distortion and vignetting correction
by the linear approximation with a series of fitted parameters with LSST ZEMAX model.
Arguments:
fieldX {[float]} -- x-coordinate of donut on the focal plane in degree.
fieldY {[float]} -- y-coordinate of donut on the focal plane in degree.
maskParam {[string]} -- Fitted coefficient file for the off-axis distortion and
vignetting correction
Returns:
[float] -- Coefficients for the off-axis distortion and vignetting correction based
on the linear response.
"""
# Calculate the distance from donut to origin (aperature)
fldr = np.sqrt(fieldX**2 + fieldY**2)
# Load the mask parameter
c = np.loadtxt(maskParam)
# Get the ruler, which is the distance to center
# ruler is between 1.51 and 1.84 degree here
ruler = np.sqrt(2)*c[:, 0]
# Get the fitted parameters for off-axis correction by linear approximation
param = self.__linearApprox(fldr, ruler, c[:, 1:])
# Define related parameters
ca = param[0]
ra = param[1]
cb = param[2]
rb = param[3]
return ca, ra, cb, rb
def __linearApprox(self, fldr, ruler, parameters):
"""
Get the fitted parameters for off-axis correction by linear approximation
Arguments:
fldr {[float]} -- Distance from donut to origin (aperature).
ruler {[float]} -- A series of distance with available parameters for the fitting.
parameters {[float]} -- Referenced parameters for the fitting.
Returns:
[float] -- Fitted parameters based on the linear approximation.
"""
# Sort the ruler and parameters based on the magnitude of ruler
sortIndex = np.argsort(ruler)
ruler = ruler[sortIndex]
parameters = parameters[sortIndex, :]
# Compare the distance to center (aperature) between donut and standard
compDis = (ruler >= fldr)
# fldr is too big and out of range
if (fldr > ruler.max()):
# Take the coefficients in the highest boundary
p2 = parameters.shape[0] - 1
p1 = 0
w1 = 0
w2 = 1
# fldr is too small to be in the range
elif (fldr < ruler.min()):
# Take the coefficients in the lowest boundary
p2 = 0
p1 = 0
w1 = 1
w2 = 0
# fldr is in the range
else:
# Find the boundary of fldr in the known data
p2 = compDis.argmax()
p1 = p2 - 1
# Calculate the weighting ratio
w1 = (ruler[p2]-fldr)/(ruler[p2]-ruler[p1])
w2 = 1-w1
# Get the fitted parameters for off-axis correction by linear approximation
param = w1*parameters[p1, :] + w2*parameters[p2, :]
return param
def makeMaskList(self, inst, model):
"""
Calculate the mask list based on the obscuration and optical model.
Arguments:
inst {[Instrument]} -- Instrument to use.
model {[string]} -- Optical model. It can be "paraxial", "onAxis", or "offAxis".
"""
# Masklist = [center_x, center_y, radius_of_boundary, 1/ 0 for outer/ inner boundary]
obscuration = inst.parameter["obscuration"]
if (model in ("paraxial", "onAxis")):
if (obscuration == 0):
masklist = np.array([0, 0, 1, 1])
else:
masklist = np.array([[0, 0, 1, 1],
[0, 0, obscuration, 0]])
else:
# Get the mask-related parameters
maskCa, maskRa, maskCb, maskRb = self.__interpMaskParam(self.fieldX,
self.fieldY, inst.maskParam)
# Rotate the mask-related parameters of center
cax, cay, cbx, cby = self.__rotateMaskParam(maskCa, maskCb, self.fieldX, self.fieldY)
masklist = np.array([[0, 0, 1, 1], [0, 0, obscuration, 0],
[cax, cay, maskRa, 1], [cbx, cby, maskRb, 0]])
return masklist
def __showProjection(self, lutxp, lutyp, sensorFactor, projSamples, raytrace=False):
"""
Calculate the x, y-projection of image on pupil. This can be used to calculate
the center of projection in compensate().
Arguments:
lutxp {[float]} -- x-coordinate on pupil plane. The value of element will be
NaN if that point is not inside the pupil.
lutyp {[float]} -- y-coordinate on pupil plane. The value of element will be
NaN if that point is not inside the pupil.
sensorFactor {[float]} -- ? (Need to check the meaning of this.)
projSamples {[int]} -- Dimension of projected image. This value considers the
magnification ratio of donut image.
raytrace {[bool]} -- Consider the ray trace or not. If the value is true, the
times of photon hit will aggregate. (default: {False})
Returns:
[float] -- Projection of image. It will be a binary image if raytrace=False.
"""
# Dimension of pupil image
n1, n2 = lutxp.shape
# Construct the binary matrix on pupil. It is noted that if the raytrace is true,
# the value of element is allowed to be greater than 1.
show_lutxyp = np.zeros([n1, n2])
# Get the index in pupil. If a point's value is NaN, this point is outside the pupil.
idx = (~np.isnan(lutxp)).nonzero()
for ii, jj in zip(idx[0], idx[1]):
# Calculate the projected x, y-coordinate in pixel
# x=0.5 is center of pixel#1
xR = int(np.round((lutxp[ii, jj]+sensorFactor)*projSamples/sensorFactor/2 + 0.5))
yR = int(np.round((lutyp[ii, jj]+sensorFactor)*projSamples/sensorFactor/2 + 0.5))
# Check the projected coordinate is in the range of image or not.
# If the check passes, the times will be recorded.
if (xR>0 and xR<n2 and yR>0 and yR<n1):
# Aggregate the times
if raytrace:
show_lutxyp[yR-1, xR-1] += 1
# No aggragation of times
else:
if (show_lutxyp[yR-1, xR-1] < 1):
show_lutxyp[yR-1, xR-1] = 1
return show_lutxyp
def makeMask(self, inst, model, boundaryT, maskScalingFactorLocal):
"""
Get the binary mask which considers the obscuration and off-axis correction.
There will be two mask parameters to be calculated:
pMask: padded mask for use at the offset planes
cMask: non-padded mask corresponding to aperture
Arguments:
inst {[Instrument]} -- Instrument to use.
model {[string]} -- Optical model. It can be "paraxial", "onAxis", or "offAxis".
boundaryT {[int]} -- Extended boundary in pixel. It defines how far the
computation mask extends beyond the pupil mask. And,
in fft, it is also the width of Neuman boundary where
the derivative of the wavefront is set to zero.
maskScalingFactorLocal {[float]} -- Mask scaling factor (for fast beam) for
local correction.
"""
sensorSamples = inst.parameter["sensorSamples"]
self.pMask = np.ones(sensorSamples, dtype=int)
self.cMask = self.pMask.copy()
apertureDiameter = inst.parameter["apertureDiameter"]
focalLength = inst.parameter["focalLength"]
offset = inst.parameter["offset"]
rMask = apertureDiameter/(2*focalLength/offset)*maskScalingFactorLocal
# Get the mask list
masklist = self.makeMaskList(inst, model)
for ii in range(masklist.shape[0]):
# Distance to center on pupil
r = np.sqrt((inst.xSensor - masklist[ii, 0])**2 +
(inst.ySensor - masklist[ii, 1])**2)
# Find the indices that correspond to the mask element, set them to
# the pass/ block boolean
# Get the index inside the aperature
idx = (r <= masklist[ii, 2])
# Get the higher and lower boundary beyond the pupil mask by extension.
# The extension level is dicided by boundaryT.
# In fft, this is also the Neuman boundary where the derivative of the
# wavefront is set to zero.
pixelSize = inst.parameter["pixelSize"]
if (masklist[ii, 3] >= 1):
aidx = np.nonzero( r <= masklist[ii, 2]*(1+boundaryT*pixelSize/rMask) )
else:
aidx = np.nonzero( r <= masklist[ii, 2]*(1-boundaryT*pixelSize/rMask) )
# Initialize both mask elements to the opposite of the pass/ block boolean
pMaskii = (1 - masklist[ii, 3]) * \
np.ones([sensorSamples, sensorSamples], dtype=int)
cMaskii = pMaskii.copy()
pMaskii[idx] = masklist[ii, 3]
cMaskii[aidx] = masklist[ii, 3]
# Multiplicatively add the current mask elements to the model masks.
# This is try to find the common mask region.
# padded mask for use at the offset planes
self.pMask = self.pMask*pMaskii
# non-padded mask corresponding to aperture
self.cMask = self.cMask*cMaskii
if __name__ == "__main__":
pass
|
# -*- coding: utf-8 -*-
from odoo import models, fields, api
class TodoTask(models.Model):
_name = 'todo.task'
_inherit = ['todo.task','mail.thread']
user_id = fields.Many2one('res.users', 'Responsible')
date_deadline = fields.Date('Deadline') |
#!/usr/bin/env python
import roslib; roslib.load_manifest('leica_ros_sph')
import rospy
import sys
import time
import math
import GeoCom_mod
from geometry_msgs.msg import PointStamped
from optparse import OptionParser
from operator import neg
# Handling options
usage = "usage: rosrun leica_interface %prog [options]"
parser = OptionParser(usage=usage)
parser.set_defaults(port="/dev/ttyS0",baudrate=115200, debug=False)
parser.add_option("-p", "--port", action="store", type="string", dest="port", help="specify used port [default: %default]")
parser.add_option("-b", "--baudrate", action="store", type="int", dest="baudrate", help="specify used baudrate [default: %default]")
parser.add_option("-d", "--debug", action="store_true", dest="debug", help="print debug information")
(options, args) = parser.parse_args()
if GeoCom_mod.COM_OpenConnection(options.port, options.baudrate )[0]:
sys.exit("Can not open Port... exiting")
prism_type = 7
GeoCom_mod.BAP_SetPrismType(prism_type)
GeoCom_mod.AUT_LockIn()
GeoCom_mod.TMC_SetEdmMode(9) #EDM_CONT_FAST = 9, // Fast repeated measurement (geocom manual p.91)
GeoCom_mod.TMC_DoMeasure()
time.sleep(2)
print "Leica is set up"
# Set up ROS:
rospy.init_node('leica_node')
point_pub = rospy.Publisher('/leica/worldposition',PointStamped, queue_size=1) #prism location in original total station world frame
point_msg = PointStamped()
print "ROS-node is set up"
loop_count = 1
while not rospy.is_shutdown():
try:
[error, RC, coord] = GeoCom_mod.TMC_GetCoordinate()
if options.debug: rospy.loginfo( 'Error: '+ str(error) )
if options.debug: rospy.loginfo( 'Return Code: '+ str(RC) )
if options.debug: rospy.loginfo( 'Received: '+ str(coord) )
if RC==0:
rospy.loginfo ('Valid data:'+str(coord))
#should be ENU - XYZ
point_x = float(coord[0]) #East
point_y = float(coord[1]) #North
point_z = float(coord[2]) #Up
elif RC==1284:
rospy.logwarn( 'Accuracy could not be guaranteed \n' )
print ('Still sending data:'+str(coord))
point_x = float(coord[0]) #East
point_y = float(coord[1]) #North
point_z = float(coord[2]) #Up
elif RC==1285:
rospy.logwarn('No valid distance measurement! \n')
else:
rospy.logwarn( '\n'+'ERROR, Return code: '+str(RC)+'\n')
point_msg.header.seq = loop_count
point_msg.header.stamp = rospy.Time.now()
point_msg.header.frame_id = 'world'
point_msg.point.x = point_x
point_msg.point.y = point_y
point_msg.point.z = point_z
point_pub.publish(point_msg)
loop_count = loop_count + 1
except ValueError:
rospy.logwarn( "Non numeric value recieved!" )
except:
rospy.logfatal( "No measurement or drop." )
# Short break in case the problem was related to the serial connection.
time.sleep(0.2)
# Then restart the measurement
GeoCom_mod.TMC_DoMeasure()
rospy.loginfo( "Restarted measurements" )
# Closing serial connection, when execution is stopped
GeoCom_mod.COM_CloseConnection()
|
import os
import cv2
import torch
import time
import random
import warnings
import torchvision as tv
import albumentations as albu
import numpy as np
from config import configs
from PIL import ImageFile
from glob import glob
from utils.reader import SegDataset
from utils.losses import *
from utils.optimizers import get_optimizer
from utils.utils import AverageMeter,get_lr,iou_metric,dice_metric,save_checkpoint
from utils.logger import Logger
from utils.metrics import Evaluator
from utils.warmup import GradualWarmupScheduler
from sklearn.model_selection import train_test_split
from albumentations import pytorch as AT
import segmentation_models_pytorch as smp
from progress.bar import Bar
from tensorboardX import SummaryWriter
# set defaul configs
ImageFile.LOAD_TRUNCATED_IMAGES = True
warnings.filterwarnings("ignore")
os.environ['CUDA_VISIBLE_DEVICES'] = configs.gpu_id
evaluator = Evaluator(configs.num_classes)
# set random seed
def seed_everything(seed):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
seed_everything(configs.seed)
# make dir for use
def makdir():
if not os.path.exists(configs.log_dir):
os.makedirs(configs.log_dir)
if not os.path.exists(configs.checkpoints):
os.makedirs(configs.checkpoints)
if not os.path.exists(configs.pred_mask):
os.makedirs(configs.pred_mask)
makdir()
best_iou = 0
best_dice = 0
# augumentations
def get_training_augmentation():
# for train
train_transform = [
albu.Resize(height=configs.input_size,width=configs.input_size),
albu.HorizontalFlip(p=0.5),
albu.VerticalFlip(p=0.5),
albu.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), p=1),
AT.ToTensor(),
]
return albu.Compose(train_transform)
def stong_aug():
# strong aug for train
train_transform = [
albu.Resize(height=configs.input_size,width=configs.input_size),
albu.HorizontalFlip(p=0.5),
albu.VerticalFlip(p=0.5),
albu.RandomRotate90(p=0.5),
albu.OneOf([
albu.CenterCrop(p=0.5,height=configs.input_size,width=configs.input_size),
albu.ElasticTransform(p=0.5, alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03),
albu.GridDistortion(p=0.5),
albu.OpticalDistortion(p=1, distort_limit=1, shift_limit=0.5),
],p=0.8),
albu.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), p=1),
AT.ToTensor(),
]
return albu.Compose(train_transform)
def get_valid_augmentation():
# for valid
valid_transform = [
albu.Resize(height=configs.input_size,width=configs.input_size),
albu.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), p=1),
AT.ToTensor(),
]
return albu.Compose(valid_transform)
def main():
global best_iou
global best_dice
# model
model = smp.Unet(
encoder_name=configs.encoder,
encoder_weights=configs.encoder_weights,
classes=configs.num_classes,
activation=configs.activation)
if len(configs.gpu_id)>1:
model = nn.DataParallel(model)
model.cuda()
# get files
filenames = glob(configs.dataset+"masks/*")
filenames = [os.path.basename(i) for i in filenames]
# random split dataset into train and val
train_files, val_files = train_test_split(filenames, test_size=0.2)
# define different aug
if configs.use_strong_aug:
transform_train = stong_aug()
else:
transform_train = get_training_augmentation()
transform_valid = get_valid_augmentation()
# make data loader for train and val
train_dataset = SegDataset(train_files,phase="train",transforms = transform_train)
valid_dataset = SegDataset(val_files,phase="valid",transforms = transform_valid)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=configs.bs, shuffle=True, num_workers=configs.workers)
valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=configs.bs, shuffle=False, num_workers=configs.workers)
optimizer = get_optimizer(model)
loss_func = get_loss_func(configs.loss_func)
criterion = loss_func().cuda()
# tensorboardX writer
writer = SummaryWriter(configs.log_dir)
# set lr scheduler method
if configs.lr_scheduler == "step":
scheduler_default = torch.optim.lr_scheduler.StepLR(optimizer,step_size=10,gamma=0.1)
elif configs.lr_scheduler == "on_loss":
scheduler_default = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, patience=5, verbose=False)
elif configs.lr_scheduler == "on_iou":
scheduler_default = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=0.2, patience=5, verbose=False)
elif configs.lr_scheduler == "on_dice":
scheduler_default = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=0.2, patience=5, verbose=False)
elif configs.lr_scheduler == "cosine":
scheduler_default = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, configs.epochs-configs.warmup_epo)
else:
scheduler_default = torch.optim.lr_scheduler.StepLR(optimizer,step_size=6,gamma=0.1)
# scheduler with warmup
if configs.warmup:
scheduler = GradualWarmupScheduler(optimizer, multiplier=configs.warmup_factor, total_epoch=configs.warmup_epo, after_scheduler=scheduler_default)
else:
scheduler = scheduler_default
for epoch in range(configs.epochs):
print('\nEpoch: [%d | %d] LR: %.8f' % (epoch + 1, configs.epochs, optimizer.param_groups[0]['lr']))
train_loss,train_dice,train_iou = train(train_loader,model,criterion,optimizer,epoch,writer)
valid_loss,valid_dice,valid_iou = eval(valid_loader,model,criterion,epoch,writer)
if configs.lr_scheduler == "step" or configs.lr_scheduler == "cosine" or configs.warmup:
scheduler.step(epoch)
elif configs.lr_scheduler == "on_iou":
scheduler.step(valid_iou)
elif configs.lr_scheduler == "on_dice":
scheduler.step(valid_dice)
elif configs.lr_scheduler == "on_loss":
scheduler.step(valid_loss)
# save model
is_best_iou = valid_iou > best_iou
is_best_dice = valid_dice > best_dice
best_iou = max(valid_iou, best_iou)
best_dice = max(valid_dice,best_dice)
print("Best {}: {} ,Best Dice: {}".format(configs.metric,best_iou,best_dice))
save_checkpoint({
'state_dict': model.state_dict(),
},is_best_iou,is_best_dice)
def train(train_loader, model, criterion, optimizer, epoch,writer):
# switch to train mode
model.train()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
Dice_coeff = AverageMeter()
Iou = AverageMeter()
end = time.time()
evaluator.reset()
bar = Bar('Training: ', max=len(train_loader))
for batch_idx, (inputs, targets) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
inputs, targets = inputs.cuda(), targets.cuda()
inputs, targets = torch.autograd.Variable(inputs), torch.autograd.Variable(targets)
outputs = model(inputs)
# compute output
loss = criterion(outputs, targets)
# compute iou
iou_batch = iou_metric(outputs,targets,classes = [str(i) for i in range(configs.num_classes)])
# compute metric
dice_batch = dice_metric(outputs,targets)
# update
losses.update(loss.item(), inputs.size(0))
Dice_coeff.update(dice_batch.item(), inputs.size(0))
target = targets.cpu().numpy()
pred = outputs.data.cpu().numpy()
pred = np.argmax(pred, axis=1)
target = np.argmax(target, axis=1)
evaluator.add_batch(target, pred)
if configs.metric == "mIoU":
iou_value = evaluator.Mean_Intersection_over_Union()
else:
iou_value = evaluator.Frequency_Weighted_Intersection_over_Union()
Iou.update(iou_value, inputs.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
# clip gradient
#torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=5.0, norm_type=2)
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# plot progress
bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | Dice_coeff: {Dice_coeff: .4f} | {metric}: {Iou: .4f}'.format(
batch=batch_idx + 1,
size=len(train_loader),
data=data_time.val,
bt=batch_time.val,
total=bar.elapsed_td,
eta=bar.eta_td,
loss=losses.avg,
Dice_coeff=Dice_coeff.avg,
metric=configs.metric[0],
Iou=Iou.avg,
)
writer.add_scalar("Train-Loss",losses.avg,epoch)
writer.add_scalar("Train-%s"%configs.metric,Iou.avg,epoch)
writer.add_scalar("Train-Dice",Dice_coeff.avg,epoch)
bar.next()
bar.finish()
return (losses.avg, Dice_coeff.avg, Iou.avg)
def eval(valid_loader, model, criterion, epoch,writer):
# switch to train mode
model.eval()
global best_dice
global best_iou
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
Dice_coeff = AverageMeter()
Iou = AverageMeter()
end = time.time()
evaluator.reset()
bar = Bar('Validing: ', max=len(valid_loader))
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(valid_loader):
# measure data loading time
data_time.update(time.time() - end)
inputs, targets = inputs.cuda(), targets.cuda()
inputs, targets = torch.autograd.Variable(inputs), torch.autograd.Variable(targets)
outputs = model(inputs)
# compute output
loss = criterion(outputs, targets)
# compute iou
iou_batch = iou_metric(outputs,targets,classes = [str(i) for i in range(configs.num_classes)])
# compute metric
dice_batch = dice_metric(outputs,targets)
# update
losses.update(loss.item(), inputs.size(0))
Dice_coeff.update(dice_batch.item(), inputs.size(0))
target = targets.cpu().numpy()
pred = outputs.data.cpu().numpy()
pred = np.argmax(pred, axis=1)
target = np.argmax(target, axis=1)
evaluator.add_batch(target, pred)
if configs.metric == "mIoU":
iou_value = evaluator.Mean_Intersection_over_Union()
else:
iou_value = evaluator.Frequency_Weighted_Intersection_over_Union()
Iou.update(iou_value, inputs.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# plot progress
bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | Dice_coeff: {Dice_coeff: .4f} | {metric}: {Iou: .4f}'.format(
batch=batch_idx + 1,
size=len(valid_loader),
data=data_time.val,
bt=batch_time.val,
total=bar.elapsed_td,
eta=bar.eta_td,
loss=losses.avg,
Dice_coeff=Dice_coeff.avg,
metric=configs.metric[0],
Iou=Iou.avg,
)
bar.next()
writer.add_scalar("Valid-Loss",losses.avg,epoch)
writer.add_scalar("Valid-%s"%configs.metric,Iou.avg,epoch)
writer.add_scalar("Valid-Dice",Dice_coeff.avg,epoch)
bar.finish()
return (losses.avg, Dice_coeff.avg, Iou.avg)
if __name__ == "__main__":
main()
|
# Write a list comprehension that results in a list of every letter in the word smog-tether capitalized.
str_to_parse = "smog-tether"
print ([x.upper() for x in str_to_parse if x.isalnum()]) |
# -*- coding: utf-8 -*-
from mbp.models import portal_user
from Library.mailhelper import sendMail,sendMail_Nosync
import requests
def sendsmscode(user_code=None,code=None):
"""
给user_code的手机号发送验证码code
:param user_code:
:param code:
"""
xx = portal_user.query.filter(portal_user.user_code == user_code).first()
if xx:
#sendstr = 'MSG#{0}#{1}'.format(xx.user_mobile, code)
#sendMail_Nosync(sendstr,sendstr)
data = {'smscode': code, 'phone': xx.user_mobile}
requests.post('http://127.0.0.1:6999', data=data) |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-07-20 10:17
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dictionary', '0007_auto_20170707_1349'),
]
operations = [
migrations.RenameModel(
old_name='Language',
new_name='SignLanguage',
),
migrations.AlterModelOptions(
name='dialect',
options={'ordering': ['signlanguage', 'name']},
),
migrations.RenameField(
model_name='dialect',
old_name='language',
new_name='signlanguage',
),
migrations.RenameField(
model_name='gloss',
old_name='language',
new_name='signlanguage',
),
]
|
from app.main.util.response import response_object
from app.main import db
from app.main.model.special_skills_model import SpecialSkillsModel
def get_all_company():
return SpecialSkillsModel.query.all()
def get_a_skills_by_name(name, is_main_skill):
print(name)
print(is_main_skill)
if name == None or name.strip() == "":
if is_main_skill == "true" or is_main_skill == "True":
query = SpecialSkillsModel.query.filter(SpecialSkillsModel.is_main.isnot(None)).filter(SpecialSkillsModel.is_soft==False)
else:
query = SpecialSkillsModel.query.filter(SpecialSkillsModel.is_soft==False)
else:
if is_main_skill == "true" or is_main_skill == "True":
query = SpecialSkillsModel.query.filter(SpecialSkillsModel.name.contains(name),SpecialSkillsModel.is_main.isnot(None)).filter(SpecialSkillsModel.is_soft==False)
else:
query = SpecialSkillsModel.query.filter(SpecialSkillsModel.name.contains(name)).filter(SpecialSkillsModel.is_soft==False)
skills = [ com for com in query]
return skills
def get_soft_skills_by_name(name):
print('soft skill: '+ str(name))
if name == None or name.strip() == "":
query = SpecialSkillsModel.query.filter(SpecialSkillsModel.is_soft==True)
else:
query = SpecialSkillsModel.query.filter(SpecialSkillsModel.name.contains(name)).filter(SpecialSkillsModel.is_soft==True)
skills = [ com for com in query]
return skills
def add_new_skill(name):
skill = SpecialSkillsModel(
name=name
)
try:
db.session.add(skill)
db.session.commit()
return response_object(200, "Add skill success|Thêm skill thành công", data=skill.to_json())
except Exception as ex:
return response_object(200, "Add skill fail|Thêm skill thất bại", data=None)
def add_new_soft_skill(name):
skill = SpecialSkillsModel(
name = name,
is_soft = True
)
try:
db.session.add(skill)
db.session.commit()
return response_object(200, "Add skill success|Thêm skill thành công", data=skill.to_json())
except Exception as ex:
return response_object(200, "Add skill fail|Thêm skill thất bại", data=None) |
list1 = ["TITLE 1: FOOD",
"1.a) ANIMAL FOOD",
"1. Fish",
"(Tariff number 30). Kippers in boxes",
"(Tariff number 31). Pickled herring",
"2. Meat",
"(Tariff number 45). Cow",
"1.b) VEGETABLE FOOD",
]
list2 = ["TITLE 1: FOOD",
"1.a) ANIMAL FOOD",
"1. Fish",
"(Tariff number 30). Kippers in boxes",
"(Tariff number 31). Pickled herring",
"2. Meat",
"(Tariff number 45). Cow",
"(Tarifi number 46). Pig", # Oops! new case
"1.b) VEGETABLE FOOD",
"3, Oil", # Oops! new case
"*Tariff number 480). Oil in bottles", # Oops! new case
"title 2. MACHINES", # Oops! new case
]
list3 = ["TITLE 1: FOOD",
"1.a) ANIMAL FOOD",
"1. Fish",
"(Tariff number 30). Kippers in boxes",
"(Tariff number 31). Pickled herring",
"2. Meat",
"(Tariff number 45). Cow",
"(Tarifi number 46). Pig",
"1.b) VEGETABLE FOOD",
"3, Oil",
"*Tariff number 480). Oil in bottles",
"(Tariff number 485 Cotton oil", # Oops! new case
"title 2. MACHINES",
"2a) ELECTRICAL MACHINES", # Oops! new case
"2B) MECHANICAL MACHINES" # Oops! new case
]
|
# -*- coding: utf-8 -*-
"""
Clamor
~~~~~~
The Python Discord API Framework.
:copyright: (c) 2019 Valentin B.
:license: MIT, see LICENSE for more details.
"""
from .meta import *
from .rest import *
import logging
fmt = '[%(levelname)s] %(asctime)s - %(name)s:%(lineno)d - %(message)s'
logging.basicConfig(format=fmt, level=logging.INFO)
logging.getLogger(__name__).addHandler(logging.NullHandler())
|
n, k = input(), int(input())
lenn = len(n)
dp = [[[i] * (k + 1) for i in range(2)] for j in range(lenn + 1)]
print(dp)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.