index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
7,400 | 90fc6590dab51141124ca73082b8d937008ae782 | """Файл, который запускается при python qtester
""" |
7,401 | f024b0736f5fcdebede8d5b0985cf9d7170db8fc | api_key = "your_key"
|
7,402 | 810e9e4b18ff8cb388f9e16607b8ab3389a9831d | def add_route_distance(routes, cities, source):
c = source.split()
citykey = c[0] + ':' + c[2]
cities.add(c[0])
routes[citykey] = c[4]
def get_route_distance(routes, source, dest):
if (source+":"+dest in routes):
return routes[source+":"+dest]
else:
return routes[dest+":"+source]
filename = "input.txt"
citylist = set()
routes = {"0:0": "0"}
with open(filename) as inputfile:
for line in inputfile:
add_route_distance(routes, citylist, line)
print('Cities to visit')
print(citylist)
print("Routes")
print(routes)
print("Distance from Straylight to Norrath")
print(get_route_distance(routes, "Straylight", "Norrath"))
|
7,403 | 34acb6da1dc9403a311ce3bca0a828a77b7b36da | """Some random mathematical helper functions.
"""
from __future__ import division, print_function
import math
# STATISTICS
def mean(L):
"""Calculate mean of given List"""
return sum(L) / len(L)
def variance(L, is_sample=0):
"""calculate variance (or sample variance) of given List"""
m = mean(L)
return sum((x-m)**2 for x in L) / (len(L) - is_sample)
def std_dev(L, is_sample=0):
"""calculate standard deviation of given List"""
return math.sqrt(variance(L, is_sample))
def z_score(num, mean, std_dev):
"""calculate z-score given sample size, mean and standard deviation"""
return (num - mean) / std_dev
# COMBINATORICS
def fac(n):
assert n >= 0
return n if n <= 2 else fac(n - 1) * n
def over(n, k):
"""n over k"""
return fac(n) // fac(n-k)
def coin(coins, heads):
"""Probability for given number of heads (or tails) when throwing given
number of fair coins."""
return Fraction(int(fac(c) / fac(c-n) / fac(n)), 2**c)
def pick_grom_group(group, other, selected):
"""When selecting 'selected' number of individuums from 'group' and 'other',
return probability that all are from 'group'."""
return Faction(over(group, selected), over(group + other, selected))
def unfair_coins(num_coins, num_unfair, percent_unfair, heads_needed):
"""Calculate probability for pulling a coin from a bag with fair and unfair
coins and flipping it a number of times, each time coming up heads."""
part_fair = (num_coins - num_unfair) / num_coins
part_unfair = num_unfair / num_coins
prob_fair = 0.5**heads_needed
prob_unfair = (percent_unfair / 100)**heads_needed
return part_fair * prob_fair + part_unfair * prob_unfair
# GEOMETRY
def herons_formula(a, b, c):
"""Calculate area of triangle with sides a, b, and c."""
print("sqrt(s*(s-a)*(s-b)*(s-c)) with s = (a + b + c)/2")
s = (a + b + c) / 2
return math.sqrt(s * (s-a) * (s-b) * (s-c))
def area_equilat(side):
"""Area of equilateral triangle."""
return side/2 * math.sqrt(side**2 - (side/2)**2)
# LINEAR ALGEBRA
def inv(a,b,c,d):
"""Inverse of 2x2 matrix."""
det = a*d-b*c
m = lambda x: fractions.Fraction(x, det)
return map(str, map(m, [d, -b, -c, a]))
def det2(m):
"""Determinant of 2x2 matrix."""
(a,b), (c,d) = m
return a*d - b*c
def det3(m):
"""Determinant of 3x3 matrix."""
a, b, c = m[0]
da = det2([ m[1][1:] , m[2][1:]])
db = det2([[m[1][0],m[1][2]],[m[2][0],m[2][2]]])
dc = det2([ m[1][:2] , m[2][:2]])
return a*da - b*db + c*dc
# SOME COMPLEX FORMULAS I NEVER CAN QUITE REMEMBER
def series(r, n):
"""Calculate geometric series."""
return (1 - r**n) / (1 - r)
def quad_form(a, b, c):
"""Quadratic Formula: calculate values of x so that ax^2+bx+c=0."""
sq = math.sqrt(b**2 - 4 * a * c)
x1 = (-b - sq) / (2 * a)
x2 = (-b + sq) / (2 * a)
return (x1, x2)
def master_method(a, b, d):
"""Estimate Complexity using Master Method, print result."""
if a == b**d:
print("Case 1: a = b^d")
print("-> O(n^%d log n)" % d)
elif a < b**d:
print("Case 2: a < b^d")
print("-> O(n^%d)" % d)
elif a > b**d:
print("Case 3: a > b^d")
print("-> O(n^log%d(%d))" % (b, a))
print(" = O(n^%.2f)" % math.log(a, b))
|
7,404 | b111d799b9e71cf36253c37f83dc0cdc8887a32e | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2014 Agile Business Group sagl (<http://www.agilebg.com>)
# Author: Nicola Malcontenti <nicola.malcontenti@agilebg.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm
class StockPicking(orm.Model):
_inherit = "stock.picking"
#def _get_invoice_vals(self, cr, uid, key, inv_type,
# journal_id, origin, context=None):
# invoice_vals = super(StockPicking, self)._get_invoice_vals(
# cr, uid, key, inv_type, journal_id, origin, context=context)
# if context.get('active_id'):
# picking_id = int(context['active_id'])
# partner_id = self.browse(cr, uid, picking_id, context=context).partner_id
# if partner_id:
# invoice_vals['address_shipping_id'] = partner_id.id
# return invoice_vals
def _get_invoice_vals(self, cr, uid, key, inv_type, journal_id, origin, context=None):
if context is None:
context = {}
partner, currency_id, company_id, user_id = key
if inv_type in ('out_invoice', 'out_refund'):
account_id = partner.property_account_receivable.id
payment_term = partner.property_payment_term.id or False
else:
account_id = partner.property_account_payable.id
payment_term = partner.property_supplier_payment_term.id or False
return {
'origin': origin and origin.picking_id.name or origin,
'date_invoice': context.get('date_inv', False),
'address_shipping_id': partner.id,
'user_id': user_id,
'partner_id': partner.id,
'account_id': account_id,
'payment_term': payment_term,
'type': inv_type,
'fiscal_position': partner.property_account_position.id,
'company_id': company_id,
'currency_id': currency_id,
'journal_id': journal_id,
}
|
7,405 | 83ebebbb6191295adcb58b003bf1c3bcc6fb189f | from selenium import webdriver
import time
def test_check_error_page_1():
try:
link = "http://suninjuly.github.io/registration1.html"
browser = webdriver.Chrome()
browser.get(link)
# Проверяем Fisrt name*
field_text = browser.find_element_by_xpath(
'//body/div/form/div[@class="first_block"]/div[@class="form-group first_class"]/label').text
first_input = browser.find_element_by_xpath(
'//body/div/form/div[@class="first_block"]/div[@class="form-group first_class"]/input')
first_input.send_keys('Иван')
# assert field_text=='First name*'
# Проверяем Last name*
field_text = browser.find_element_by_xpath(
'//body/div/form/div[@class="first_block"]/div[@class="form-group second_class"]/label').text
first_input = browser.find_element_by_xpath(
'//body/div/form/div[@class="first_block"]/div[@class="form-group second_class"]/input')
first_input.send_keys('Петров')
# assert field_text == 'Last name*'
# Проверяем Email*
field_text = browser.find_element_by_xpath(
'//body/div/form/div[@class="first_block"]/div[@class="form-group third_class"]/label').text
first_input = browser.find_element_by_xpath(
'//body/div/form/div[@class="first_block"]/div[@class="form-group third_class"]/input')
first_input.send_keys('email@email.test')
# assert field_text == 'Email*'
# Отправляем заполненную форму
button = browser.find_element_by_css_selector("button.btn")
button.click()
# Проверяем, что смогли зарегистрироваться
# ждем загрузки страницы
time.sleep(1)
# находим элемент, содержащий текст
welcome_text_elt = browser.find_element_by_tag_name("h1")
# записываем в переменную welcome_text текст из элемента welcome_text_elt
welcome_text = welcome_text_elt.text
# с помощью assert проверяем, что ожидаемый текст совпадает с текстом на странице сайта
assert "Congratulations! You have successfully registered!" == welcome_text
finally:
# ожидание чтобы визуально оценить результаты прохождения скрипта
time.sleep(10)
# закрываем браузер после всех манипуляций
browser.quit()
def test_check_error_page_2():
try:
link = "http://suninjuly.github.io/registration2.html"
browser = webdriver.Chrome()
browser.get(link)
# Проверяем Fisrt name*
field_text = browser.find_element_by_xpath(
'//body/div/form/div[@class="first_block"]/div[@class="form-group first_class"]/label').text
first_input = browser.find_element_by_xpath(
'//body/div/form/div[@class="first_block"]/div[@class="form-group first_class"]/input')
first_input.send_keys('Иван')
# assert field_text=='First name*'
# Проверяем Last name*
field_text = browser.find_element_by_xpath(
'//body/div/form/div[@class="first_block"]/div[@class="form-group second_class"]/label').text
first_input = browser.find_element_by_xpath(
'//body/div/form/div[@class="first_block"]/div[@class="form-group second_class"]/input')
first_input.send_keys('Петров')
# assert field_text == 'Last name*'
# Проверяем Email*
field_text = browser.find_element_by_xpath(
'//body/div/form/div[@class="first_block"]/div[@class="form-group third_class"]/label').text
first_input = browser.find_element_by_xpath(
'//body/div/form/div[@class="first_block"]/div[@class="form-group third_class"]/input')
first_input.send_keys('email@email.test')
# assert field_text == 'Email*'
# Отправляем заполненную форму
button = browser.find_element_by_css_selector("button.btn")
button.click()
# Проверяем, что смогли зарегистрироваться
# ждем загрузки страницы
time.sleep(1)
# находим элемент, содержащий текст
welcome_text_elt = browser.find_element_by_tag_name("h1")
# записываем в переменную welcome_text текст из элемента welcome_text_elt
welcome_text = welcome_text_elt.text
# с помощью assert проверяем, что ожидаемый текст совпадает с текстом на странице сайта
assert "Congratulations! You have successfully registered!" == welcome_text
finally:
# ожидание чтобы визуально оценить результаты прохождения скрипта
time.sleep(10)
# закрываем браузер после всех манипуляций
browser.quit() |
7,406 | c1bcce809aa073ecd6e64dfa65ead9bd48aee3ff | from ui.pages import BasePage
from ui.locators.login_page_locators import LoginPageLocators
class LoginPage(BasePage, LoginPageLocators):
def __init__(self, driver=None):
super(LoginPage, self).__init__(driver=driver)
self.identifier = self.IDENTIFIER
def login(self, email=None, password=None, remember_me=False):
self.navigate()
if not self.wait_for_page_to_load():
return False
if not email:
email = self.std.login_user
if not password:
password = self.std.login_password
if not self.set_text(self.USERNAME_INPUT, email):
return False
if not self.set_text(self.PASSWORD_INPUT, password):
return False
if remember_me:
if not self.click(self.REMEMBER_CHECKBOX):
return False
if not self.click(self.SIGN_IN_BTN):
return False
return True
def get_error_messages(self):
invalid_user = self.get_text(self.USERNAME_MISSING_DIV)
invalid_pass = self.get_text(self.PASSWORD_MISSING_DIV)
if not(invalid_pass or invalid_user):
return False
return invalid_pass, invalid_user
def forgot_password(self):
if not self.click(self.FORGOT_PASSWORD_LINK):
return False
return True
|
7,407 | a62dd287f9fc6f79ef95a3de83f52c794efe00a7 |
import math
import turtle
wn = turtle.Screen()
wn.bgcolor('lightblue')
PI=3.14
R_outer=50
R_inner=200
fred = turtle.Turtle()
fred.speed(99999)
def cycloid(r, k, nos_cycle, direction):
n=36
angle=2*PI/n
x=1
y=0
for i in range(nos_cycle*n):
beta = i * angle
x = r*(beta-math.sin(beta))
y = r*(1-math.cos(beta))
### equally valid
###x = ((r)*math.cos(beta) + r*beta)
###y = direction*(r)*math.sin(beta)
fred.goto(x,y)
cycloid(10, 0.1, 100, -1)
wn.exitonclick()
|
7,408 | 34a7fd66a9e2eae25994336f22a76c24c11a6e1b |
from django.urls import path
from admin_panel import views
urlpatterns = [
path('admin_panel/', views.AdminPanel.as_view(), name='admin_panel'),
path('admin_panel/connection/', views.Connection.as_view(), name='connect_group-teacher'),
path('admin_panel/connection/<str:choiced_departament>', views.Connection.as_view(), name='connect_group-teacher')
]
|
7,409 | c420fb855fbf5691798eadca476b6eccec4aee57 | points_dict = {
'+': 5,
'-': 4,
'*': 3,
'/': 2,
'(': -1,
}
op_list = ['+','-','*','/']
def fitness(x1,op,x2):
#Mengembalikan point dari penyambungan expresi dengan operasi dan bilangan berikutnya
try:
hasil = eval(f"{x1} {op} {x2}")
diff = points_dict[op] - abs(24-hasil)
if (abs(24-hasil) == 0):
return diff+10
else:
return diff
except ZeroDivisionError:
return float("-inf")
def calc_points(expr):
points = 0
hasil = eval(expr)
points -= abs(24-hasil)
for c in expr:
points += points_dict.get(c,0)
return points
def solve(bil):
bil.sort(reverse=True)
expr = str(bil[0])
bil = bil[1:]
for _ in range(3):
b_max_fitness = float("-Inf")
for b in bil:
for op in op_list:
curr_fitness = fitness(expr,op,b)
if curr_fitness > b_max_fitness:
b_max_fitness = curr_fitness
curr_op_max = op
curr_b_max = b
expr += f" {curr_op_max} {curr_b_max}"
bil.remove(curr_b_max)
points = calc_points(expr)
# print(f"{expr} ~ Points: {points}")
return (expr,points)
def main():
# bil = [int(c) for c in input("Masukkan 4 angka dipisahkan spasi:").strip().split()]
points = 0
solves = []
for a in range(1,14):
for b in range(1,14):
for c in range(1,14):
for d in range(1,14):
bil = [a,b,c,d]
expre,point = solve(bil)
if expre not in solves:
solves.append((expre,point))
points += point
print(f"{(a-1)*13*13*13+(b-1)*13*13+(c-1)*13+d} : {expre}")
avg_points = points/(13**4)
print(f"Average points : {avg_points}")
count24 = 0
for expr in solves:
res = eval(expr[0])
if res==24:
count24 += 1
print(f"24 Count : {count24}")
if __name__ == "__main__":
main()
|
7,410 | ea4e4c8067d9e910b8d4c6a1c4c01f1ef70d7341 | /home/pushkar/anaconda3/lib/python3.6/_bootlocale.py |
7,411 | 9dfb3f58127b30467651ac4209277cd947643c65 | from django.shortcuts import render, redirect, get_object_or_404
from django.http import HttpResponse
from django.contrib import messages
# Create your views here.
from User.models import User, check_if_auth_user
from .models import Chat
# def recv_chat(request, id = None):
# check = check_if_auth_user(request)
# if not check:
# messages.error(request, "Perform login first to start chatting")
# return redirect("home:welcome")
# current_user = User.objects.filter(user_id = check)[0]
# other_user = get_object_or_404(User, auto_id = id)
# message = request.POST.get('chat_msg')
# try:
# if current_user and other_user and message:
# #sql = """INSERT INTO User_user( name, user_id, user_pwd, contact, address)
# # Values(%s,%s,%s,%s,%s)""" % ( name, email, pwd, con, add)
# chat = Chat(
# chat_sender = current_user.user_id,
# chat_reciever = other_user.user_id,
# message = message)
# chat.save()
# return redirect(chat.get_return_url())
# except Exception,error:
# messages.error(request, "Some Internal Error. Try again")
# return redirect(chat.get_return_url())
def begin_chat(request, id = None):
check = check_if_auth_user(request)
if not check:
messages.error(request, "Perform login first to start chatting")
return redirect("home:welcome")
current_user = User.objects.filter(user_id = check)[0]
other_user = get_object_or_404(User, auto_id = id)
sql = """SELECT * FROM chat_start_chat
WHERE chat_sender='{0}' and chat_reciever='{1}'
OR chat_sender='{1}' and chat_reciever='{0}';"""
chat_list = Chat.objects.raw(sql.format(current_user.user_id,other_user.user_id))
context_data = {
"user" : current_user,
"other_user" : other_user,
"chatmessage_list": chat_list,
}
return render(request, "chat.html",context_data)
|
7,412 | db9068e54607e9df48328435ef07f15b4c25a6db | # %matplotlib inline
import tensorflow as tf
#import tensorflow.keras as K
import numpy as np
import math
import matplotlib
matplotlib.use('GTKAgg')
import matplotlib.pyplot as plt
# from keras import backend as K
from keras.models import Sequential, load_model
# from K.models import Sequential, load_model
from keras.layers import InputLayer, Input, Dense, Dropout
from keras.callbacks import TensorBoard
from keras.optimizers import Adam
from keras.backend import clear_session
## pip install h5py scikit-optimize
## once you have that installed, you can run the following code.
import skopt
from skopt import gp_minimize, forest_minimize
from skopt.space import Real, Categorical, Integer
matplotlib.use('GTKAgg')
from skopt.plots import plot_convergence
matplotlib.use('GTKAgg')
from skopt.plots import plot_objective, plot_evaluations
matplotlib.use('GTKAgg')
import csv
from timeit import default_timer as timer
#from skopt.plots import plot_histogram, plot_objective_2D
from skopt.utils import use_named_args
from sklearn.metrics import roc_auc_score ## Computer Area Under the Curve
from datetime import datetime ## time the Optimization time
## Load Datset
train_samples = np.loadtxt("data/train_samples.txt", delimiter=' ', comments='# ', encoding=None)
train_labels = np.loadtxt("data/train_labels.txt", delimiter=' ', comments='# ', encoding=None)
valid_samples = np.loadtxt("data/valid_samples.txt", delimiter=' ', comments='# ', encoding=None)
valid_labels = np.loadtxt("data/valid_labels.txt", delimiter=' ', comments='# ', encoding=None)
## To set up this search space, I first need to define the search space dimension, what parameters are we gonna explore.
## for each of the parameters, we define a dimension explicitly
##
## The learning rate is any real number between 0.000001 and 0.1. But the seraching is done not in bounds.
## 'log-uniform' specifies how the trasformation(updates) of these values is
learning_rate_dim = Real(low=1e-6, high=1e-2, prior='log-uniform', name='learning_rate')
## The number of alyers on the other hand is explored in bounds, increments are done using integers
dense_layers_dim = Integer(low=1, high=5, name='dense_layers')
## We'll also different number of nodes in a layer
nodes_dim = Integer(low=5, high=512, name='nodes')
## Finally we have a Categorical dimension, this needs to be specified explicitly, because scikit-learn
## isn't gonna generate some randomly for you
activation_dim = Categorical(categories=['relu', 'sigmoid'], name='activation')
## Combine all the parameters into a list, so that we can pass it to a function
dimensions = [learning_rate_dim,
dense_layers_dim,
nodes_dim,
activation_dim]
## To kick off, it's helpful to start the serach using a set of hyperparameters that we
## intuitively know performes well
## These default parameters aren't horrible, but they don't perform great either
default_parameters = [1e-5, 1, 16, 'relu']
## To log the performance of the model
def log_dir_name(learning_rate, dense_layers, nodes, activation):
"""
Creates a directory named after the set of hyperparameters that was recently selected. A helper function
to log the results of training every constructed model.
"""
# the dir-name for the TensorBoard log-dir
s = "./2_logs/lr_{0:.0e}_layers{1}_nodes{2}_{3}/"
log_dir = s.format(learning_rate, dense_layers, nodes, activation)
return log_dir
## This funcion is copied from my previous solution on Grid SearchCV
def create_model(learning_rate, dense_layers, nodes, activation, dropout_rate=0.1):
"""
A helper function for the classifier to help construct a model after each run.
learing_rate: Learning-rate for the optimizer.
dense_layer: Number of dense layers for the sequentail model
nodes: Number of nodes in each inner dense layer.
activation: Activation function for all layers.
Additionally, we can improve on this function by adding a separate activation for
the output layer.
"""
model = Sequential()
global train_samples
## Input-shape must be a tuple without the batch size.
input_shape = (1,) + train_samples.shape
model.add(InputLayer(input_shape=(len(train_samples[0]),)))
## Needful only in case of convolutional layers.
# model.add(Reshape(img_shape_full))
for i in range(dense_layers):
## Name each layer, because Keras should give them unique names.
name = 'layer_dense_{0}'.format(i+1)
## Add these fully-connected layers to the model.
model.add(Dense(nodes, activation=activation, name=name))
model.add(Dropout(dropout_rate))
## Last output layer with softmax-activation.
## Used heavily for classification.
model.add(Dense(1, activation='sigmoid'))
optimizer = Adam(lr=learning_rate)
## Compile the model
model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy'])
return model
## Before we start training any model, let's first save the path where we'll store the best-performing model.
best_model_path = '19_best_model.keras'
## A global variable to keep track of the best obtained accuracy.
best_auc = 0.0
@use_named_args(dimensions=dimensions)
def fitness(learning_rate, dense_layers, nodes, activation):
"""
"""
# Print the selected hyperparameters.
print('learning rate: {0:.1f}'.format(learning_rate))
print('num_dense_layers:', dense_layers)
print('num_nodes:', nodes)
print('activation:', activation)
print("")
## Create the neural network with these hyperparameters.
model = create_model(learning_rate, dense_layers, nodes, activation)
## Create log files for the model.
## Not important for now!
# callback_log = TensorBoard(
# log_dir=log_dir,
# histogram_freq=0,
# batch_size=32,
# write_graph=True,
# write_grads=False,
# write_images=False)
## Use Keras to train the model.
history = model.fit(x=train_samples,
y=train_labels,
epochs=10,
batch_size=int(4010/4))
#callbacks=[callback_log])
## Get the classification accuracy on the validation set after the last training epoch.
# accuracy = history.history['val_acc'][-1]
predictions = model.predict(valid_samples)
auc = roc_auc_score(valid_labels, predictions)
## Print the calssification accuracy.
print('')
print("AUC = : {0:.2%}".format(auc))
print('')
## Save the model if it improves on the best-found performance.
## We use the global keyword so we update the variable outside of this function.
global best_auc
if auc > best_auc:
## Save the new model to harddisk.
model.save(best_model_path)
## Update the classification accuracy.
best_auc = auc
## Delete the Keras model with these heyper parameters from memory.
## Also clear the session.
del model
# tf.keras.clear_session()
clear_session()
return -auc
## Now we run our fitness function with the default hyperparameters that we set earlier.
## That's the reason for the @ annotation
fitness(x=default_parameters)
search_result = gp_minimize(func=fitness,
dimensions=dimensions,
acq_func='EI', # Expected Improvement.
n_calls=40,
x0=default_parameters)
## Report Result of the optimizer.
print("Best serach results:")
print(search_result.x)
print(search_result.space)
print("Lowest fitness value:")
print(search_result.fun)
zipped = sorted(zip(search_result.func_vals, search_result.x_iters))
print(zipped)
## Write sorted results to csv file for exporting
of = open('output_bayesian_optimization.csv', 'w')
header="Fit Value; Learning Rate; Dense Layers; Num. Neurons; Activation\n"
of.write(header)
for i in zipped:
row = "{0}; {1}; {2}; {3}; {4};\n".format(i[0], i[1][0], i[1][1], i[1][2], i[1][3])
of.write(row)
of.close()
## Plot results of optimizer
dim_names = ['learning_rate', 'dense_layers', 'nodes', 'activation']
plot_objective(search_result, dimensions=dim_names)
plot_evaluations(search_result)
|
7,413 | e839eba2514c29a8cfec462f8d5f56d1d5712c34 | #!/usr/bin/env python
import argparse
import http.server
import os
class SimpleHTTPRequestHandler(http.server.SimpleHTTPRequestHandler):
def log_message(*args, **kwargs): pass
parser = argparse.ArgumentParser()
parser.add_argument('port', action='store',
# default=8000, type=int,
default=int(os.environ.get("PORT", "8000")), type=int,
nargs='?',
help='Specify alternate port [default: 8000]')
args = parser.parse_args()
http.server.test(HandlerClass=SimpleHTTPRequestHandler, port=args.port)
|
7,414 | 48270f70a9d69d15f808f22ec2d11d337b2c4845 | def densenet(D,DT,F,model):
import scipy.io as sio
import time
import os
import math
import numpy as np
import matplotlib.pyplot as plt
Dataset = D
if DT == 'org':
data_type = 'original'
else:
data_type = 'augmented'
fs = model.fs
fm1 = model.fm1
batch_size = model.batch_size[0]
learn_rate = model.learn_rate
num_layers = model.num_layers
k_fm = model.k_fm
bottleneck = model.bottleneck
dropout_prob = model.dropout_prob
num_of_test = model.num_of_test
###############
# load training / testing set from CrossVal folder,
# names for training set, 'D1_1st_fold_train.mat', 'Augmented_D1_1st_fold_train.mat'
# name for testing set, 'D1_1st_fold_test.mat'
###############
if F == 1:
file_name = '1st_fold'
elif F == 2:
file_name = '2nd_fold'
elif F == 3:
file_name = '3rd_fold'
elif F == 4:
file_name = '4th_fold'
elif F == 5:
file_name = '5th_fold'
path = os.path.join('CrossVal', 'D'+Dataset)
print("path " ,path)
if data_type == 'original':
Train =sio.loadmat(os.path.join(path, 'D'+Dataset+'_'+file_name+'_train.mat'))
else:
Train =sio.loadmat(os.path.join(path, 'Augmented_D'+Dataset+'_'+file_name+'_train.mat'))
Test = sio.loadmat(os.path.join(path, 'D'+Dataset+'_'+file_name+'_test.mat'))
if Dataset == '1':
number_of_classes = 24
num_of_ep = 50
num_of_test = 20
if data_type == 'augmented':
train_imgs = 526190
else:
train_imgs = 52619
iteration = math.ceil((num_of_ep * train_imgs) / batch_size)
elif Dataset == '2':
number_of_classes = 36
num_of_ep = 200
if data_type == 'augmented':
train_imgs = 20120
else:
train_imgs = 2012
iteration = math.ceil((num_of_ep * train_imgs) / batch_size)
else:
number_of_classes = 10
num_of_ep = 200
if data_type == 'augmented':
train_imgs = 16000
else:
train_imgs = 1600
iteration = math.ceil((num_of_ep * train_imgs) / batch_size)
iteration_to_display = int(iteration / num_of_test)
list_to_display = []
for i in range(num_of_test):
if i !=num_of_test:
list_to_display.append(int(iteration_to_display*(i+1)))
del i
total_fm_Block_1 = fm1+(num_layers*k_fm)
total_fm_Block_2 = total_fm_Block_1+(num_layers*k_fm)
total_fm_Block_3 = total_fm_Block_2+(num_layers*k_fm)
fc_nodes = [total_fm_Block_3 ]
Train_Images = Train['trainImages']
Train_Labels = Train['trainLabels2']
total_trainImages = len(Train_Images[0,2])
print(total_trainImages)
Train_Images = Train_Images.reshape(784,total_trainImages).transpose().astype('float32')
Train_Labels = Train_Labels.transpose().astype('float64')
Test_Images = Test['testImages']
Test_Labels = Test['testLabels2']
total_testImages = len(Test_Images[0,2])
Test_Images = Test_Images.reshape(784,total_testImages).transpose().astype('float32')
Test_Labels = Test_Labels.transpose().astype('float64')
Target_labels = np.argmax(Test_Labels,axis=1)
del Test
del Train
import tensorflow as tf
tf.reset_default_graph()
g = tf.Graph()
with g.as_default():
tf.set_random_seed(1)
def weight_variable(shape,n):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial,name=n)
def bias_variable(shape,n):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial,name=n)
def avg_pool(input, s):
return tf.nn.avg_pool(input, [ 1, s, s, 1 ], [1, s, s, 1 ], 'SAME')
def max_pool(input, s):
return tf.nn.max_pool(input, [ 1, s, s, 1 ], [1, s, s, 1 ], 'SAME')
def conv2d_1(input, in_features, out_features, kernel_size, name="W", with_bias=False):
W = weight_variable([ kernel_size, kernel_size, in_features, out_features], name)
conv = tf.nn.conv2d(input, W, [ 1, 1, 1, 1 ], padding='SAME')
if with_bias:
return conv + bias_variable([ out_features ])
return conv
def batch_activ_conv(current, in_features, out_features, kernel_size, is_training, keep_prob, idx, scope='conv_block'):
with tf.variable_scope(scope):
current = tf.layers.batch_normalization(current, scale=True, training=is_training)
current = tf.nn.relu(current)
current = conv2d_1(current, in_features, out_features, kernel_size, name="W"+str(idx))
current = tf.nn.dropout(current, keep_prob)
return current
def block(input, layers, in_features, growth, is_training, keep_prob, name="Block_"):
with tf.name_scope(name):
with tf.variable_scope(name):
current = input
features = in_features
for idx in range(layers):
tmp = batch_activ_conv(current, features, growth, fs, is_training, keep_prob, idx+1, scope='conv_block_'+str(idx+1))
current = tf.concat((current, tmp), axis=3)
features += growth
return current, features
x = tf.placeholder(tf.float32, shape=[None, 784])
y_ = tf.placeholder(tf.float32, shape=[None, number_of_classes])
x_image = tf.reshape(x, [-1, 28, 28, 1])
keep_prob = tf.placeholder(tf.float32)
training = tf.placeholder(tf.bool)
current = conv2d_1(x_image, 1, fm1, fs, name="W1", with_bias=False)
current, features = block(current, num_layers, fm1, k_fm, training, keep_prob, name="Block_1")
b1_conv_printop = tf.Print(current, [current])
with tf.name_scope("transition_lyr"):
#current = batch_activ_conv(current, features, features, 1, training, keep_prob, 1, scope='Transition_layer_1')
current = batch_activ_conv(current, features, bottleneck*k_fm, 1, training, keep_prob, 1, scope='Transition_layer_1')
t1_b_conv_printop = tf.Print(current, [current])
current = batch_activ_conv(current, bottleneck*k_fm, features, fs, training, keep_prob, 1, scope='Transition_layer_1_1')
t1_conv_printop = tf.Print(current, [current])
current = max_pool(current, 2)
#current = avg_pool(current, 2)
current, features = block(current, num_layers, features, k_fm, training, keep_prob, name="Block_2")
b2_conv_printop = tf.Print(current, [current])
with tf.name_scope("transition_lyr_2"):
#current = batch_activ_conv(current, features, features, 1, training, keep_prob, 1, scope='Transition_layer_2')
current = batch_activ_conv(current, features, bottleneck*k_fm, 1, training, keep_prob, 1, scope='Transition_layer_2')
t2_b_conv_printop = tf.Print(current, [current])
current = batch_activ_conv(current, bottleneck*k_fm, features, fs, training, keep_prob, 1, scope='Transition_layer_2_1')
t2_conv_printop = tf.Print(current, [current])
current = max_pool(current, 2)
#current = avg_pool(current, 2)
current, features = block(current, num_layers, features, k_fm, training, keep_prob, name="Block_3")
b3_conv_printop = tf.Print(current, [current])
with tf.name_scope("transition_lyr_3"):
#current = batch_activ_conv(current, features, features, 1, training, keep_prob, 1, scope='Transition_layer_3')
current = batch_activ_conv(current, features, bottleneck*k_fm, 1, training, keep_prob, 1, scope='Transition_layer_3')
t3_b_conv_printop = tf.Print(current, [current])
current = batch_activ_conv(current, bottleneck*k_fm, features, fs, training, keep_prob, 1, scope='Transition_layer_3_1')
t3_conv_printop = tf.Print(current, [current])
current = avg_pool(current, 7)
current = tf.reshape(current, [tf.shape(current)[0], -1])
with tf.name_scope("Dense_Last_lyr"):
W_fc3 = weight_variable([fc_nodes[0], number_of_classes],"w_fc3")
b_fc3 = bias_variable([number_of_classes],"b_fc3")
y_conv = tf.matmul(current, W_fc3) + b_fc3
prediction_prob = tf.nn.softmax(y_conv)
prediction_prob_printop = tf.Print(prediction_prob, [prediction_prob])
with tf.name_scope("Xent"):
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
with tf.name_scope("train"):
extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(extra_update_ops):
train_step = tf.train.AdamOptimizer(learn_rate).minimize(cross_entropy)
with tf.name_scope("accuracy"):
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
wrong_prediction = tf.not_equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
wrong_prediction_printop = tf.Print(wrong_prediction, [wrong_prediction])
predicted_labels = tf.argmax(y_conv, 1)
predicted_labels_printop = tf.Print(predicted_labels, [predicted_labels])
index = 0
index_end = index + batch_size
remaining = 0
start_time = time.time()
costs = []
accuracy_list = []
list_of_predicted_list = []
with tf.Session() as sess:
sess.run(tf.global_variables_initializer(),tf.set_random_seed(0))
for i in range(iteration):
if index_end > total_trainImages:
remaining = total_trainImages - (index_end-batch_size)
images = Train_Images[(index_end-batch_size):total_trainImages, :]
labels = Train_Labels[(index_end-batch_size):total_trainImages, :]
index = 0
index_end = index + batch_size - remaining
images = np.vstack((images, Train_Images[index:index_end, :]))
labels = np.vstack((labels, Train_Labels[index:index_end, :]))
batch = (images, labels)
index = index_end
index_end = index + batch_size
else:
batch = (Train_Images[index:index_end, :], Train_Labels[index:index_end, :])
index = index + batch_size
index_end = index_end + batch_size
if i in list_to_display:
elapsed_time = time.time() - start_time
print('Elapsed Time Before for loop: %f secs' % elapsed_time)
Accuracy = 0
itrt_index = i
print('debug: %d & %d' % (iteration,i))
if Dataset == '1':
if file_name == '5th_fold':
num_test = 13154
else:
num_test = 13155
elif Dataset == '2':
num_test = 503
elif Dataset == '3':
num_test = 400
print(num_test)
for img_index in range(num_test):
t_image = np.array(Test_Images[img_index,:]).reshape(1,784)
t_label = np.array(Test_Labels[img_index,:]).reshape(1,number_of_classes)
test_acc = accuracy.eval(feed_dict={
x: t_image, y_: t_label,
keep_prob: 1.0, training:False})
Accuracy += test_acc
wrong, predicted, prediction_prob = sess.run([wrong_prediction_printop,
predicted_labels_printop,prediction_prob_printop],
feed_dict={
x: t_image, y_: t_label,
keep_prob: 1.0, training:False})
if img_index <= 3:
b1, b2, b3, t1, t2, t3, t1_b, t2_b, t3_b = sess.run([b1_conv_printop, b2_conv_printop, b3_conv_printop,
t1_conv_printop,t2_conv_printop, t3_conv_printop, t1_b_conv_printop, t2_b_conv_printop, t3_b_conv_printop],
feed_dict={
x: t_image, y_: t_label,
keep_prob: 1.0, training:False})
if img_index == 0:
b1_list = b1
b2_list = b2
b3_list = b3
t1_list = t1
t2_list = t2
t3_list = t3
t1_b_list = t1_b
t2_b_list = t2_b
t3_b_list = t3_b
else:
b1_list = np.append(b1_list,b1,axis=0)
b2_list = np.append(b2_list,b2,axis=0)
b3_list = np.append(b3_list,b3,axis=0)
t1_list = np.append(t1_list,t1,axis=0)
t2_list = np.append(t2_list,t2,axis=0)
t3_list = np.append(t3_list,t3,axis=0)
t1_b_list = np.append(t1_b_list,t1_b,axis=0)
t2_b_list = np.append(t2_b_list,t2_b,axis=0)
t3_b_list = np.append(t3_b_list,t3_b,axis=0)
if img_index == 0 :
wrong_list_1 = wrong
predicted_list_1 = predicted
prediction_prob_1 = prediction_prob
else:
wrong_list_1 = np.append(wrong_list_1,wrong,axis=0)
predicted_list_1 = np.append(predicted_list_1,predicted,axis=0)
prediction_prob_1 = np.append(prediction_prob_1, prediction_prob)
Accuracy = Accuracy/num_test
accuracy_list.append(Accuracy)
list_of_predicted_list.append(predicted_list_1)
print('Average test accuracy: %g' % Accuracy)
epoch_around = math.ceil((itrt_index * batch_size) / total_trainImages)
sio.savemat('D'+Dataset+'_'+file_name+'_'+str(epoch_around)+'ep_'+data_type+'_predicted_labels_list.mat', {'wrong_list':wrong_list_1, 'predicted_list': predicted_list_1, 'Target_labels':Target_labels,
'prediction_prob':prediction_prob, 'b1_list':b1_list, 'b2_list':b2_list, 'b3_list':b3_list, 't1_list':t1_list,
't2_list':t2_list, 't3_list':t3_list, 't1_b_list':t1_b_list, 't2_b_list':t2_b_list, 't3_b_list':t3_b_list})
elapsed_time = time.time() - start_time
print('Elapsed Time: %f secs' % elapsed_time)
print('Batch Size & Iteration & Total Train Imgs : %d & %d & %d' % (batch_size, itrt_index, total_trainImages))
print('learning_rate : %g ' % learn_rate)
print('1st conv FMaps : %d ' % fm1)
print('number of layers in dense block : %d ' % num_layers)
print('growth rate(k_fm) : %d ' % k_fm)
print('filter size : %d ' % fs)
print('bottleneck : %d' % bottleneck)
print('dropout prob : %g ' % dropout_prob)
print('data_type :', data_type)
print('file_name :', file_name)
print('FC nodes : %d' % fc_nodes[0])
epoch_around = (itrt_index * batch_size) / total_trainImages
print('Number of epochs : %f ' % epoch_around)
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title("Learning rate =" + str(learn_rate))
plt.show()
if i % 100 == 0:
train_accuracy = accuracy.eval(feed_dict={
x: batch[0], y_: batch[1],
keep_prob: 1.0, training:False})
print('step %d, training accuracy %g' % (i, train_accuracy))
_, loss = sess.run([train_step, cross_entropy],
feed_dict={x: batch[0], y_: batch[1],
keep_prob: dropout_prob, training:True})
iteration_cost = 0 # Defines a cost related to an epoch
num_minibatches = int(total_trainImages / batch_size) # number of minibatches of size minibatch_size in the train set
iteration_cost += loss / num_minibatches
costs.append(iteration_cost)
if i % 100 == 0:
print ('Loss: ',loss)
Accuracy = 0
training_time = time.time() - start_time
print('Training Time: %f secs' % training_time)
if Dataset == '1':
if file_name == '5th_fold':
num_test = 13154
else:
num_test = 13155
elif Dataset == '2':
num_test = 503
elif Dataset == '3':
num_test = 400
print(num_test)
for img_index in range(num_test):
t_image = np.array(Test_Images[img_index,:]).reshape(1,784)
t_label = np.array(Test_Labels[img_index,:]).reshape(1,number_of_classes)
test_acc = accuracy.eval(feed_dict={
x: t_image, y_: t_label,
keep_prob: 1.0, training:False})
Accuracy += test_acc
wrong, predicted = sess.run([wrong_prediction_printop, predicted_labels_printop], feed_dict={
x: t_image, y_: t_label,
keep_prob: 1.0, training:False})
if img_index <= 3:
b1, b2, b3, t1, t2, t3, t1_b, t2_b, t3_b = sess.run([b1_conv_printop, b2_conv_printop, b3_conv_printop,
t1_conv_printop,t2_conv_printop, t3_conv_printop, t1_b_conv_printop, t2_b_conv_printop, t3_b_conv_printop],
feed_dict={
x: t_image, y_: t_label,
keep_prob: 1.0, training:False})
if img_index == 0:
b1_list = b1
b2_list = b2
b3_list = b3
t1_list = t1
t2_list = t2
t3_list = t3
t1_b_list = t1_b
t2_b_list = t2_b
t3_b_list = t3_b
else:
b1_list = np.append(b1_list,b1,axis=0)
b2_list = np.append(b2_list,b2,axis=0)
b3_list = np.append(b3_list,b3,axis=0)
t1_list = np.append(t1_list,t1,axis=0)
t2_list = np.append(t2_list,t2,axis=0)
t3_list = np.append(t3_list,t3,axis=0)
t1_b_list = np.append(t1_b_list,t1_b,axis=0)
t2_b_list = np.append(t2_b_list,t2_b,axis=0)
t3_b_list = np.append(t3_b_list,t3_b,axis=0)
if img_index == 0 :
wrong_list = wrong
predicted_list = predicted
else:
wrong_list = np.append(wrong_list,wrong,axis=0)
predicted_list = np.append(predicted_list,predicted,axis=0)
Accuracy = Accuracy/num_test
print('Average test accuracy: %g' % Accuracy)
accuracy_list.append(Accuracy)
list_of_predicted_list.append(predicted_list)
elapsed_time = time.time() - start_time
print('Elapsed Time: %f secs' % elapsed_time)
print('Batch Size & Iteration & Total Train Imgs : %d & %d & %d' % (batch_size, itrt_index, total_trainImages))
print('learning_rate : %g ' % learn_rate)
print('1st conv FMaps : %d ' % fm1)
print('number of layers in dense block : %d ' % num_layers)
print('growth rate(k_fm) : %d ' % k_fm)
print('filter size : %d ' % fs)
print('bottleneck : %d' % bottleneck)
print('dropout prob : %g ' % dropout_prob)
print('data_type :', data_type)
print('file_name :', file_name)
print('FC nodes : %d' % fc_nodes[0])
epoch_around = math.ceil((iteration * batch_size) / total_trainImages)
if epoch_around == 51:
epoch_around = 50
print('Number of epochs : %f ' % epoch_around)
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title("Learning rate =" + str(learn_rate))
plt.show()
sio.savemat('D'+Dataset+'_'+file_name+'_'+str(epoch_around)+'ep_'+data_type+'_predicted_labels_list.mat', {'wrong_list':wrong_list, 'predicted_list': predicted_list, 'Target_labels':Target_labels, 'accuracy_list':accuracy_list, 'list_of_predicted_list':list_of_predicted_list, 'costs':costs, 'b1_list':b1_list, 'b2_list':b2_list, 'b3_list':b3_list, 't1_list':t1_list,
't2_list':t2_list, 't3_list':t3_list, 't1_b_list':t1_b_list, 't2_b_list':t2_b_list, 't3_b_list':t3_b_list})
class MyModel:
num_layers = 4
k_fm = 24
fs = 3
fm1 = 32
bottleneck = 4
dropout_prob = 0.8
batch_size = [16]
learn_rate = 0.001
num_of_test = 40
model = MyModel()
densenet('1','org',1,model)
densenet('1','org',2,model)
densenet('1','org',3,model)
densenet('1','org',4,model)
densenet('1','org',5,model)
densenet('1','aug',1,model)
densenet('1','aug',2,model)
densenet('1','aug',3,model)
densenet('1','aug',4,model)
densenet('1','aug',5,model)
densenet('2','org',1,model)
densenet('2','org',2,model)
densenet('2','org',3,model)
densenet('2','org',4,model)
densenet('2','org',5,model)
densenet('2','aug',1,model)
densenet('2','aug',2,model)
densenet('2','aug',3,model)
densenet('2','aug',4,model)
densenet('2','aug',5,model)
densenet('3','org',1,model)
densenet('3','org',2,model)
densenet('3','org',3,model)
densenet('3','org',4,model)
densenet('3','org',5,model)
densenet('3','aug',1,model)
densenet('3','aug',2,model)
densenet('3','aug',3,model)
densenet('3','aug',4,model)
densenet('3','aug',5,model)
|
7,415 | 28bf11cb4205dd186b84cc7b7c8b9009f35fe408 | # This simulation obtains dose on a cylindical disk phantom at various
# distances from a 14MeV photon source. Dose in millisieverts is found
# and compared to the yearly limit
# The model is built to have a human tissue and human height and volume which
# is typically referred to as a phantom.
# source details based on https://file.scirp.org/pdf/OJMSi_2014011414370625.pdf
import openmc
import math
import matplotlib.pyplot as plt
# Tissue Equivalent, MS20 from PNNL
mat_tissue = openmc.Material()
mat_tissue.add_element("O", 0.079013)
mat_tissue.add_element("C", 0.32948)
mat_tissue.add_element("H", 0.546359)
mat_tissue.add_element("N", 0.008619)
mat_tissue.add_element("Mg", 0.036358)
mat_tissue.add_element("Cl", 0.000172)
mat_tissue.set_density("g/cm3", 1.0)
mat_air = openmc.Material()
mat_air.add_element("C", 0.00015)
mat_air.add_element("N", 0.784431)
mat_air.add_element("O", 0.210748)
mat_air.add_element("Ar", 0.004671)
mat_air.set_density("g/cm3", 0.001205)
my_materials = openmc.Materials([mat_tissue, mat_air])
all_dose = []
distances_to_simulate = [50, 1000, 2000, 4000, 6000]
for distance_from_source in distances_to_simulate: # units of cm
# representing a human as a cylindrical phantom
# average human is 62,000cm3 volume
# average human height = 169.75
# resulting cylinder radius = 10.782
cylinder_surface = openmc.ZCylinder(r=10.782, x0=distance_from_source)
phantom_upper_surface = openmc.ZPlane(z0=169.75)
phantom_lower_surface = openmc.ZPlane(z0=0)
outer_surface = openmc.Sphere(r=10000000, boundary_type="vacuum")
phantom_region = -cylinder_surface & -phantom_upper_surface & +phantom_lower_surface
# void region is below the outer surface and not the phantom region
void_region = -outer_surface & ~phantom_region
void_cell = openmc.Cell(region=void_region)
void_cell.fill = mat_air
phantom_cell = openmc.Cell(region=phantom_region)
phantom_cell.fill = mat_tissue
my_geometry = openmc.Geometry([phantom_cell, void_cell])
# Instantiate a Settings object
my_settings = openmc.Settings()
my_settings.output = {"tallies": False}
my_settings.batches = 2
my_settings.inactive = 0
my_settings.particles = 500000
my_settings.photon_transport = True
my_settings.run_mode = "fixed source"
# Create a gamma point source
source = openmc.Source()
source.space = openmc.stats.Point((0, 0, 0))
source.angle = openmc.stats.Isotropic()
# This is a Co60 source, see the task on sources to understand it
source.energy = openmc.stats.Discrete([1.1732e6, 1.3325e6], [0.5, 0.5])
source.particle = "photon"
my_settings.source = source
# volume of cylinder V=πr^2h
# openmc native units for length are cm so volume is in cm3
phantom_volume = math.pi * math.pow(10.782, 2) * 169.75
# geometry argument refers to irradiation direction
# https://academic.oup.com/view-large/figure/119655666/ncx112f01.png
energy_bins_p, dose_coeffs_p = openmc.data.dose_coefficients(
particle="photon", geometry="AP"
)
energy_function_filter_p = openmc.EnergyFunctionFilter(energy_bins_p, dose_coeffs_p)
energy_function_filter_p.interpolation = "cubic" # cubic interpolation is recommended by ICRP
photon_particle_filter = openmc.ParticleFilter("photon")
cell_filter = openmc.CellFilter(phantom_cell)
# Create tally to score dose
dose_cell_tally = openmc.Tally(name="photon_dose_on_cell")
# note that the EnergyFunctionFilter is included as a filter
dose_cell_tally.filters = [
cell_filter,
photon_particle_filter,
energy_function_filter_p,
]
dose_cell_tally.scores = ["flux"]
my_tallies = openmc.Tallies([dose_cell_tally])
model = openmc.Model(my_geometry, my_materials, my_settings, my_tallies)
statepoint_filename = model.run()
with openmc.StatePoint(statepoint_filename) as statepoint:
photon_tally_result = statepoint.get_tally(
name="photon_dose_on_cell"
).mean.flatten()[0]
photons_per_second = 740000000000 # units of photons per second
# converts units from pSv-cm3/source_photon to pSv-cm3/second
dose = photon_tally_result * photons_per_second
# converts from pSv-cm3/second to pSv/second
dose = dose / phantom_volume
# converts from (pico) pSv/second to (micro) uSv/second
dose = dose * 1e-6
# converts from uSv/second to uSv/hour
dose = dose * 60 * 60
all_dose.append(dose)
plt.plot(distances_to_simulate, all_dose, label="dose on phantom")
plt.xlabel("Distance between photon source and phantom")
plt.ylabel("Dose [uSv per hour]")
plt.title("Dose on a phantom as a function of distance from a Co60 source\n")
plt.yscale("log")
plt.grid(True)
plt.show()
|
7,416 | f6bfb055e1c1750702580fc9c9295b8528218910 | # 给定一个正整数 n,生成一个包含 1 到 n2 所有元素,且元素按顺时针顺序螺旋排列的正方形矩阵。
#
# DEMO:
# 输入: 3
# 输出:
# [
# [ 1, 2, 3 ],
# [ 8, 9, 4 ],
# [ 7, 6, 5 ]
# ]
class Solution:
def generateMatrix(self, n):
"""
与 54 思路类似,注意边界...
:type n: int
:rtype: List[List[int]]
"""
array = [[0 for _ in range(n)] for _ in range(n)]
top = left = 0
bottom = right = n - 1
cur_num = 1
while left <= right and top <= bottom:
for index in range(left, right + 1):
array[top][index] = cur_num
cur_num += 1
for index in range(top + 1, bottom):
array[index][right] = cur_num
cur_num += 1
if top < bottom:
for index in range(right, left - 1, -1):
array[bottom][index] = cur_num
cur_num += 1
if left < right:
for index in range(bottom - 1, top, -1):
array[index][left] = cur_num
cur_num += 1
left += 1
right -= 1
top += 1
bottom -= 1
return array
|
7,417 | f6d4208afee7aacd96ea5ae6c9e38d2876466703 | import os
def mini100(videopath, minipath,mod='train'):
with open(videopath, 'r') as video_f:
all_videos = video_f.readlines()
#if mod=='train':
# count = [400 for _ in range(0,100)]
#else:
# count = [25 for _ in range(0,100)]
count = [0 for _ in range(0,100)]
with open(minipath,'w') as f:
for video in all_videos:
#print(video)
path, label = video.split(',')
label = int(label)
if label<100:
#if count[label]>0:
# count[label] -= 1
count[label] +=1
f.write(video)
for cls,i in enumerate(count):
#if i!=0:
print("{} class have : {}".format(cls,i))
print("total {}".format(sum(count)))
# assert i==0
def mini200(videopath, minipath,mod='train'):
with open(videopath, 'r') as video_f:
all_videos = video_f.readlines()
#if mod=='train':
# count = [400 for _ in range(0,100)]
#else:
# count = [25 for _ in range(0,100)]
count = [0 for _ in range(0,200)]
with open(minipath,'w') as f:
for video in all_videos:
#print(video)
path, label = video.split(',')
label = int(label)
if label<200:
#if count[label]>0:
# count[label] -= 1
count[label] +=1
f.write(video)
for cls,i in enumerate(count):
#if i!=0:
print("{} class have : {}".format(cls,i))
print("total {}".format(sum(count)))
# assert i==0
def exist_or_not(ann,):
with open(ann, 'r') as f:
all = f.readlines()
for video in all:
path =video.split(',')[0]
if not os.path.isfile(path):
print(path)
print("all done!")
if __name__ == "__main__":
import fire
fire.Fire()
|
7,418 | 8aa35bcaa4e564306125b37c70a8a92f26da736d |
import pickle
from absl import flags
from absl import app
from absl import logging
import time
import numpy as np
FLAGS = flags.FLAGS
flags.DEFINE_string('sent2vec_dir', '2020-04-10/sent2vec/', 'out path')
flags.DEFINE_integer('num_chunks', 36, 'how many files')
flags.DEFINE_string('out_dir', '2020-04-10/', 'out path')
logging.set_verbosity(logging.INFO)
def load_all_vectors(num_chunks):
all_vectors = []
meta_data = [] # (doc_id, section_id, sentence_id, sentence)
for chunk_id in range(num_chunks):
logging.info('Processing file %s', chunk_id)
t = time.time()
vectors = np.load(FLAGS.sent2vec_dir + '/chunk_%s.vectors.npy' % chunk_id).astype(np.float32)
with open(FLAGS.sent2vec_dir + '/chunk_%s.sentences.pkl' % chunk_id, 'rb') as fin:
meta = pickle.load(fin)
vector_norms = np.linalg.norm(vectors, axis=1, keepdims=True)
vector_norms[vector_norms == 0] = 1.0
vectors /= vector_norms
all_vectors.append(vectors)
meta_data.extend(meta)
e = time.time()
logging.info('Finished processing chunk %s in %s seconds', chunk_id, str(e-t))
all_vec = np.concatenate(all_vectors)
logging.info('Concatenated shape %s' % str(all_vec.shape))
return all_vec, meta_data
def main(argv):
logging.info('Running reduce vecs with args %s', str(argv))
logging.info('Running on %s files', str(FLAGS.num_chunks))
all_vecs, all_meta = load_all_vectors(FLAGS.num_chunks)
np.save('%s/all.npy' % FLAGS.out_dir, all_vecs)
with open('%s/all.pkl' % FLAGS.out_dir, 'wb') as fout:
pickle.dump(all_meta, fout)
if __name__ == "__main__":
app.run(main)
|
7,419 | bf8a524e54aa866c8293a93b2321335f2c7b0850 | from .checklist_mixin import ChecklistMixin
from .citation_mixin import CitationMixin
from .license_mixin import LicenseMixin
from .registry_mixin import RegistryMixin
from .repository_mixin import RepositoryMixin
__all__ = [
"RepositoryMixin",
"LicenseMixin",
"RegistryMixin",
"CitationMixin",
"ChecklistMixin"
]
|
7,420 | 297a17ca5aaafb368a1e4cba35e387c67e9f793f | """
Created on Fri Aug 4 19:19:31 2017
@author: aw1042
"""
import requests
import threading
import sys
import re
import xml.etree.ElementTree as ET
import smtplib
from credentials import *
argsObj = {}
for arg1, arg2 in zip(sys.argv[:-1], sys.argv[1:]):
if arg1[0] == '-':
argsObj[arg1] = arg2
posts = []
requestsNumber = 0
try:
url = argsObj['-url']
except AttributeError:
url = 'http://denver.craigslist.org/search/sss?format=rss'
if 'format=rss' not in url:
url = url + ('?' if '?' not in url else '&') + 'format=rss'
print 'set url to ' + url
pollInterval = 900
class Post():
title = ''
link = ''
description = ''
def __init__(self, title, link, description):
self.title = title
self.link = link
self.description = description
def set_interval(func, sec):
def func_wrapper():
set_interval(func, sec)
func()
t = threading.Timer(sec, func_wrapper)
t.start()
return t
def requestUrl():
global requestsNumber
global url
req = requests.get(url)
cdata = req.content.strip()
prevLength = len(posts)
parsedData = ET.fromstring(cdata)
for itemTag in parsedData:
appendPost(itemTag)
if len(posts) > prevLength and requestsNumber > 0:
print 'Aggregated', len(posts[prevLength:]), 'new posts'
emailPosts(posts[prevLength:])
requestsNumber += 1
print 'Number of requests: ', requestsNumber
def appendPost(xmlItem):
postFound = False
title = xmlItem[0].text
link = xmlItem[1].text
description = xmlItem[2].text
newPost = Post(title, link, description)
for post in posts:
if newPost.link == post.link:
postFound = True
if not postFound and not re.match('craigslist.+\|.+search', newPost.title):
posts.append(newPost)
def emailPosts(newPostsArray):
msg = ''
for post in newPostsArray:
msg += post.title + '\n'
if (post.description):
msg += post.description + '\n'
msg += post.link + '\n \n \n'
toPass = password
server = smtplib.SMTP_SSL()
server.connect("smtp.gmail.com", 465)
server.ehlo()
server.login(email, toPass)
server.sendmail(email, email, msg.encode('utf-8'))
server.quit()
requestUrl()
set_interval(requestUrl, pollInterval)
|
7,421 | 4af53bf9cbe136dec7dcc609e28cdd013911c385 | # Copyright (c) 2008 Johns Hopkins University.
# All rights reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose, without fee, and without written
# agreement is hereby granted, provided that the above copyright
# notice, the (updated) modification history and the author appear in
# all copies of this source code.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS `AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, LOSS OF USE, DATA,
# OR PROFITS) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
# @author Razvan Musaloiu-E. <razvanm@cs.jhu.edu>
# @author David Purdy <david@radioretail.co.za>
"""A library that implements the T2 serial communication.
This library has two parts: one that deals with sending and receiving
packets using the serial format from T2 (TEP113) and a second one that
tries to simplifies the work with arbitrary packets.
"""
import sys, struct, time, serial, socket, operator, thread
from Queue import Queue
from threading import Lock, Condition
__version__ = "$Id: tos.py,v 1.2 2008/07/20 22:16:50 razvanm Exp $"
__all__ = ['Serial', 'AM',
'Packet', 'RawPacket',
'AckFrame', 'DataFrame', 'NoAckDataFrame',
'ActiveMessage']
ACK_WAIT = 0.2 # Maximum amount of time to wait for an ack
ACK_WARN = 0.2 # Warn if acks take longer than this to arrive
def list2hex(v):
return " ".join(["%02x" % p for p in v])
class Error(Exception):
"""Base error class for this module"""
pass
class TimeoutError(Error):
"""Thrown when a serial operation times out"""
pass
class ReadError(Error):
"""Base class for read error exceptions"""
pass
class WriteError(Error):
"""Base class for write error exceptions"""
pass
class ReadTimeoutError(TimeoutError, ReadError):
"""Thrown when a serial read operation times out"""
pass
class ReadCRCError(ReadError):
"""Thrown when a read packet fails a CRC check"""
pass
class BadAckSeqnoError(ReadError):
"""Thrown if an ack packet has an unexpected sequenc number"""
pass
class WriteTimeoutError(TimeoutError, WriteError):
"""Thrown when a serial write operation times out"""
pass
class SimpleSerial:
"""
A SimpleSerial object offers a way to send and data using a HDLC-like
formating.
Use SimpleSerial objects for basic low-level serial communications. Use
Serial objects for higher level logic (retry sends, log printfs, etc).
"""
HDLC_FLAG_BYTE = 0x7e
HDLC_CTLESC_BYTE = 0x7d
TOS_SERIAL_ACTIVE_MESSAGE_ID = 0
TOS_SERIAL_CC1000_ID = 1
TOS_SERIAL_802_15_4_ID = 2
TOS_SERIAL_UNKNOWN_ID = 255
SERIAL_PROTO_ACK = 67
SERIAL_PROTO_PACKET_ACK = 68
SERIAL_PROTO_PACKET_NOACK = 69
SERIAL_PROTO_PACKET_UNKNOWN = 255
def __init__(self, port, baudrate, flush=False, debug=False, qsize=10,
timeout=None):
self._debug = debug
self._in_queue = []
self._qsize = qsize
self._ack = None
self._write_counter = 0
self._write_counter_failures = 0
self._read_counter = 0
self._ts = None
self.timeout = timeout # Public attribute
self._received_packet_filters = [] # filter functions for received packets
# Remember sent (and unacknowledged) seqno numbers for 15 seconds:
self._unacked_seqnos = SeqTracker(15.0)
self._s = serial.Serial(port, baudrate, rtscts=0, timeout=0.5)
self._s.flushInput()
if flush:
print >>sys.stdout, "Flushing the serial port",
endtime = time.time() + 1
while time.time() < endtime:
try:
self._read()
except ReadError:
pass
sys.stdout.write(".")
if not self._debug:
sys.stdout.write("\n")
self._s.close()
self._s = serial.Serial(port, baudrate, rtscts=0, timeout=timeout)
# Add a filter for received 'write ack' packets
self.add_received_packet_filter(self._write_ack_filter)
# Returns the next incoming serial packet
def _read(self, timeout=None):
"""Wait for a packet and return it as a RawPacket.
Throws:
- ReadCRCError if a CRC check fails
- ReadTimeoutError if the timeout expires.
"""
# Developer notes:
#
# Packet data read from Serial is in this format:
# [HDLC_FLAG_BYTE][Escaped data][HDLC_FLAG_BYTE]
#
# [Escaped data] is encoded so that [HDLC_FLAG_BYTE] byte
# values cannot occur within it. When [Escaped data] has been
# unescaped, the last 2 bytes are a 16-bit CRC of the earlier
# part of the packet (excluding the initial HDLC_FLAG_BYTE
# byte)
#
# It's also possible that the serial device was half-way
# through transmitting a packet when this function was called
# (app was just started). So we also neeed to handle this case:
#
# [Incomplete escaped data][HDLC_FLAG_BYTE][HDLC_FLAG_BYTE][Escaped data][HDLC_FLAG_BYTE]
#
# In this case we skip over the first (incomplete) packet.
#
if self._s.timeout != timeout and timeout != None:
if self._debug:
print "Set the timeout to %s, previous one was %s" % (timeout, self._s.timeout)
self._s.timeout = timeout
try:
# Read bytes until we get to a HDLC_FLAG_BYTE value
# (either the end of a packet, or the start of a new one)
d = self._get_byte(timeout)
ts = time.time()
if self._debug and d != self.HDLC_FLAG_BYTE:
print "Skipping incomplete packet"
while d != self.HDLC_FLAG_BYTE:
d = self._get_byte(timeout)
ts = time.time()
# Store HDLC_FLAG_BYTE at the start of the retrieved packet
# data:
packet = [d]
# Is the next byte also HDLC_FLAG_BYTE?
d = self._get_byte(timeout)
if d == self.HDLC_FLAG_BYTE:
# Yes. This means that the previous byte was for
# the end of the previous packet, and this byte is for
# the start of the next packet.
# Get the 2nd byte of the new packet:
d = self._get_byte(timeout)
ts = time.time()
# We are now on the 2nd byte of the packet. Add it to
# our retrieved packet data:
packet.append(d)
# Read bytes from serial until we read another
# HDLC_FLAG_BYTE value (end of the current packet):
while d != self.HDLC_FLAG_BYTE:
d = self._get_byte(timeout)
packet.append(d)
# Done reading a whole packet from serial
if self._debug:
print "SimpleSerial:_read: unescaped", packet
# Decode the packet, and check CRC:
packet = self._unescape(packet)
crc = self._crc16(0, packet[1:-3])
packet_crc = self._decode(packet[-3:-1])
if crc != packet_crc:
print "Warning: wrong CRC! %x != %x %s" % (crc, packet_crc, ["%2x" % i for i in packet])
raise ReadCRCError
if self._debug:
if self._ts == None:
self._ts = ts
else:
print "Serial:_read: %.4f (%.4f) Recv:" % (ts, ts - self._ts), self._format_packet(packet[1:-3])
self._ts = ts
# Packet was successfully retrieved, so return it in a
# RawPacket wrapper object (but leave out the
# HDLC_FLAG_BYTE and CRC bytes)
return RawPacket(ts, packet[1:-3])
except socket.timeout:
raise ReadTimeoutError
def _write_ack_filter(self, packet):
"""Filter for recieved write acknowledgement packets"""
ack = AckFrame(packet.data)
if ack.protocol == self.SERIAL_PROTO_ACK:
if self._debug:
print "_filter_read: got an ack:", ack
self._ack = ack
packet = None # No further processing of received ack packet
return packet
def _filter_read(self, timeout=None):
"""Read a packet from the serial device, perform filtering, and return
the packet if it hasn't been processed yet.
"""
p = self._read(timeout)
self._read_counter += 1
if self._debug:
print "_filter_read: got a packet(%d): %s" % (self._read_counter, p)
# Pass the received packet through the filter functions:
if p is not None:
for filter_func in self._received_packet_filters:
p = filter_func(p)
# Stop now if the packet doesn't need further processing:
if p is None:
break
# Return the packet (if there was no timeout and it wasn't filtered)
return p
def _get_ack(self, timeout, expected_seqno):
"""Get the next ack packet
Read packets from the serial device until we get the next ack (which
then gets stored in self._ack), or the timeout expires. non-ack packets
are buffered.
Throws:
- ReadTimeoutError if the timeout expires.
- BadAckSeqnoError if an ack with a bad sequence number is received
"""
endtime = time.time() + timeout
while time.time() < endtime:
# Read the a packet over serial
self._ack = None
remaining = endtime - time.time()
p = self._filter_read(timeout)
# Was the packet filtered?
if p:
# Got an unfiltered packet
if len(self._in_queue) >= self._qsize:
print "Warning: Buffer overflow"
self._in_queue.pop(0)
self._in_queue.append(p)
else:
# Packet was filtered. Was it an ack?
if self._ack is not None:
# The packet was an ack, so remove it from our
# 'unacknowledged seqnos' list (or raise a BadAckSeqnoError
# error if it isn't in the list)
self._unacked_seqnos.seqno_acked(self._ack.seqno)
# Stop reading packets if it's the ack we are waiting for:
if self._ack.seqno == expected_seqno:
return
# Timed out
raise ReadTimeoutError
def close(self):
"""Close the serial device"""
self._s.close()
def read(self, timeout=None):
"""Read a packet, either from the input buffer or from the serial
device.
Returns a RawPacket object, otherwise None if the packet was filtered
(by eg: Serial's printf-filtering function)
Does not retry reads if the first one fails. Use Serial.read() for
that.
"""
if self._in_queue:
return self._in_queue.pop(0)
else:
return self._filter_read(timeout)
def write(self, payload, seqno, timeout=0.2):
"""
Write a packet. If the payload argument is a list, it is
assumed to be exactly the payload. Otherwise the payload is
assume to be a Packet and the real payload is obtain by
calling the .payload().
Only attempts to write once, and times out if an ack packet is not
received within [timeout] seconds. Use Serial.write() if you want
automatic write retries.
seqno should be an integer between 0 and 99 which changes each time you
send a new packet. The value should remain the same when you are
retrying a packet write that just failed.
Raises WriteTimeoutError if the write times out (ack packet doesn't
arrive within [timeout] seconds).
"""
if type(payload) != type([]):
# Assume this will be derived from Packet
payload = payload.payload()
packet = DataFrame();
packet.protocol = self.SERIAL_PROTO_PACKET_ACK
packet.seqno = seqno
packet.dispatch = 0
packet.data = payload
packet = packet.payload()
crc = self._crc16(0, packet)
packet.append(crc & 0xff)
packet.append((crc >> 8) & 0xff)
packet = [self.HDLC_FLAG_BYTE] + self._escape(packet) + [self.HDLC_FLAG_BYTE]
# Write the packet:
self._unacked_seqnos.seqno_sent(seqno) # Keep track of sent seqno's
self._put_bytes(packet)
self._write_counter += 1
# Wait for an ack packet:
if self._debug:
print "Send(%d/%d): %s" % (self._write_counter, self._write_counter_failures, packet)
print "Wait for ack %d ..." % (seqno)
try:
self._get_ack(timeout, seqno)
except ReadTimeoutError:
# Re-raise read timeouts (of ack packets) as write timeouts (of
# the write operation)
self._write_counter_failures += 1
raise WriteTimeoutError
# Received an ack packet, with the expected sequence number
if self._debug:
print "Wait for ack %d done. Latest ack:" % (seqno), self._ack
print "The packet was acked."
print "Returning from SimpleSerial.write..."
def add_received_packet_filter(self, filter_func):
"""Register a received packet-filtering callback function
_filter_read() calls all of the registered filter functions for each
packet received over serial. Registered filter functions are called in
the order they were registered.
Filter functions are called like this: filter_func(packet)
When a filter function recognises and handles a received packet it
should return a None value to indicate that no further processing
is required for the packet.
When a filter function skips a packet (or for some reason you want
further processing to happen on a packet you've just processed), the
function should return the packet that was passed to it as an argument.
"""
self._received_packet_filters.append(filter_func)
def remove_received_packet_filter(self, filter_func):
"""Remove a filter function added with add_received_packet_filter()"""
self._received_packet_filters.remove(filter_func)
def _format_packet(self, payload):
f = NoAckDataFrame(payload)
if f.protocol == self.SERIAL_PROTO_ACK:
rpacket = AckFrame(payload)
return "Ack seqno: %d" % (rpacket.seqno)
else:
rpacket = ActiveMessage(f.data)
return "D: %04x S: %04x L: %02x G: %02x T: %02x | %s" % \
(rpacket.destination, rpacket.source,
rpacket.length, rpacket.group, rpacket.type,
list2hex(rpacket.data))
def _crc16(self, base_crc, frame_data):
crc = base_crc
for b in frame_data:
crc = crc ^ (b << 8)
for i in range(0, 8):
if crc & 0x8000 == 0x8000:
crc = (crc << 1) ^ 0x1021
else:
crc = crc << 1
crc = crc & 0xffff
return crc
def _encode(self, val, dim):
output = []
for i in range(dim):
output.append(val & 0xFF)
val = val >> 8
return output
def _decode(self, v):
r = long(0)
for i in v[::-1]:
r = (r << 8) + i
return r
def _get_byte(self, timeout=None):
# old_timeout = self._s.timeout
# if timeout is not None:
# self._s.timeout = timeout
try:
r = struct.unpack("B", self._s.read())[0]
return r
except struct.error:
# Serial port read timeout
raise socket.timeout
# finally:
# self._s.timeout = old_timeout
def _put_bytes(self, data):
#print "DEBUG: _put_bytes:", data
for b in data:
self._s.write(struct.pack('B', b))
def _unescape(self, packet):
r = []
esc = False
for b in packet:
if esc:
r.append(b ^ 0x20)
esc = False
elif b == self.HDLC_CTLESC_BYTE:
esc = True
else:
r.append(b)
return r
def _escape(self, packet):
r = []
for b in packet:
if b == self.HDLC_FLAG_BYTE or b == self.HDLC_CTLESC_BYTE:
r.append(self.HDLC_CTLESC_BYTE)
r.append(b ^ 0x20)
else:
r.append(b)
return r
def debug(self, debug):
self._debug = debug
class SeqTracker:
"""Class for keeping track of unacknowledged packet sequence numbers.
SeqTracker is used by SimpleSerial to keep track of sequence numbers which
have been sent with write packets, but not yet acknowledged by received
write ack packets.
"""
def __init__(self, keep_for):
"""Initialise a SeqTracker object.
args:
- keep_for is the length of time for which unacknowledged sequence
numbers should be remembered. After this period has elapsed, the
sequence numbers should be forgotten. If the sequence number is
acknowledged later, it will be treated as unkown
"""
self._keep_for = keep_for
self._queue = []
def seqno_sent(self, seqno):
"""Register that a packet with the specified sequence number was just
sent."""
self._gc()
self._queue.append((seqno, time.time()))
def seqno_acked(self, seqno):
"""Register that a sequence number was just acknowledged.
Find the oldest-known occurance of seqno in the queue and remove it. If
not found then raise a BadAckSeqnoError to inform applications that
the sequence number is not known.
"""
self._gc()
for item in self._queue:
if item[0] == seqno:
# Found seqno
self._queue.remove(item)
return
# seqno not found!
raise BadAckSeqnoError
def get_seqno_sent_times(self, seqno):
"""Return the times when packets with the given sequence number were
sent."""
self._gc()
return [item[1] for item in self._queue if item[0] == seqno]
def __contains__(self, seqno):
"""Return True if the seqno was sent recently (and not acknowledged
yet)"""
self._gc()
for item in self._queue:
if item[0] == seqno:
return True
return False
def _gc(self):
"""Remove old items from the queue"""
remove_before = time.time() - self._keep_for
for item in self._queue:
# Time for the sequence to be removed?
if item[1] < remove_before:
# Sequence data is old, so remove it
self._queue.remove(item)
else:
# Sequence number was added recently, so don't remove it. Also
# stop processing the queue because all later items will be
# newer
break
class Serial:
"""
Wraps a SimpleSerial object, and provides some higher-level functionality
like retrying writes and logging printf packets.
"""
def __init__(self, port, baudrate, flush=False, debug=False, qsize=10,
timeout=None):
"""Initialise a Serial object"""
self._debug = debug
self.timeout = timeout # Public attribute
self._seqno = 0
self._simple_serial = SimpleSerial(port, baudrate, flush, debug, qsize,
timeout)
# Setup automatic logging of received printf packets:
self._printf_msg = ""
self._simple_serial.add_received_packet_filter(self._printf_filter)
def close(self):
"""Close the serial device"""
self._simple_serial.close()
def read(self, timeout=None):
"""Read a packet from the serial port.
Retries packet reads until the timeout expires.
Throws ReadTimeoutError if a a packet can't be read within the timeout.
"""
if timeout is None:
timeout = self.timeout
endtime = None
if timeout is not None:
endtime = time.time() + timeout
while endtime is None or time.time() < endtime:
remaining = None
if endtime is not None:
remaining = endtime - time.time()
try:
p = self._simple_serial.read(remaining)
except ReadError:
if self._debug:
print "Packet read failed. Try again."
else:
# Was the packet filtered?
if p is not None:
# Not filtered, so return it.
# In the current TinyOS the packets from the mote are
# always NoAckDataFrame
return NoAckDataFrame(p.data)
# Read timeout expired
raise ReadTimeoutError
def write(self, payload, timeout=None):
"""Write a packet to the serial port
Keeps retrying endlessly, unless a timeout is set. If the timeout
expires then WriteTimeoutError is thrown.
"""
if timeout is None:
timeout = self.timeout
endtime = None
if timeout is not None:
endtime = time.time() + timeout
# Generate the next sequence number:
self._seqno = (self._seqno + 1) % 100
while endtime is None or time.time() < endtime:
try:
ackwait = ACK_WAIT
if endtime is not None:
remaining = endtime - time.time()
ackwait = min(ACK_WAIT, remaining)
before = time.time()
self._simple_serial.write(payload, self._seqno, ackwait)
length = time.time() - before
if length >= ACK_WARN:
print "Warning: Packet write took %.3fs!" % (length)
return True
except Error:
if self._debug:
print "The packet was not acked. Try again."
# Write operation timed out
raise WriteTimeoutError
def _printf_filter(self, packet):
"""Filter for recieved printf packets"""
ampkt = ActiveMessage(NoAckDataFrame(packet.data).data)
if ampkt.type == 100:
self._printf_msg += "".join([chr(i) for i in ampkt.data]).strip('\0')
# Split printf data on newline character:
# (last string in the split list doesn't have a newline after
# it, so we keep it until next time)
lines = self._printf_msg.split('\n')
for line in lines[:-1]:
print "PRINTF:", line
self._printf_msg = lines[-1]
packet = None # No further processing for the printf packet
return packet
class SFClient:
def __init__(self, host, port, qsize=10):
self._in_queue = Queue(qsize)
self._s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._s.connect((host, port))
data = self._s.recv(2)
if data != 'U ':
print "Wrong handshake"
self._s.send("U ")
print "Connected"
thread.start_new_thread(self.run, ())
def run(self):
while True:
length = ord(self._s.recv(1))
data = self._s.recv(length)
data = [ord(c) for c in data][1:]
#print "Recv %d bytes" % (length), ActiveMessage(data)
if self._in_queue.full():
print "Warning: Buffer overflow"
self._in_queue.get()
p = RawPacket()
p.data = data
self._in_queue.put(p, block=False)
def read(self, timeout=0):
return self._in_queue.get()
def write(self, payload):
print "SFClient: write:", payload
if type(payload) != type([]):
# Assume this will be derived from Packet
payload = payload.payload()
payload = [0] + payload
self._s.send(chr(len(payload)))
self._s.send(''.join([chr(c) for c in payload]))
return True
class AM:
def __init__(self, s):
self._s = s
def read(self, timeout=None):
return ActiveMessage(self._s.read(timeout).data)
def write(self, packet, amid, timeout=None):
return self._s.write(ActiveMessage(packet, amid=amid), timeout=timeout)
class SimpleSerialAM(SimpleSerial):
"""A derived class of SimpleSerial so that apps can read and write using
higher-level packet structures.
Serves a simalar purpose to the AM class, but for SimpleSerial objects
instead instead of Serial.
"""
def read_am(self, timeout=None):
"""Read a RawPacket object (or None), convert it to ActiveMessage
(or None), and return to the caller"""
# Get a tos.Rawpacket (or None, if filtered) object
p = self.read(timeout)
if p is not None:
assert isinstance(p, RawPacket)
# Convert tos.RawPacket object into an ActiveMessage:
p = NoAckDataFrame(p.data)
p = ActiveMessage(p.data)
# Return the ActiveMessage (or None) packet:
return p
def write_am(self, packet, amid, seqno, timeout=2.0):
"""Convert app packet format to ActiveMessage, and write the
ActiveMessage packet to serial"""
# Convert from app-specific packet to ActiveMessage:
p = ActiveMessage(packet, amid=amid)
# Write to the serial device
self.write(p, seqno, timeout)
class Packet:
"""
The Packet class offers a handy way to build pack and unpack
binary data based on a given pattern.
"""
def _decode(self, v):
r = long(0)
for i in v:
r = (r << 8) + i
return r
def _encode(self, val, dim):
output = []
for i in range(dim):
output.append(int(val & 0xFF))
val = val >> 8
output.reverse()
return output
def __init__(self, desc, packet = None):
offset = 0
boffset = 0
sum = 0
for i in range(len(desc)-1, -1, -1):
(n, t, s) = desc[i]
if s == None:
if sum > 0:
desc[i] = (n, t, -sum)
break
sum += s
self.__dict__['_schema'] = [(t, s) for (n, t, s) in desc]
self.__dict__['_names'] = [n for (n, t, s) in desc]
self.__dict__['_values'] = []
if type(packet) == type([]):
for (t, s) in self._schema:
if t == 'int':
self._values.append(self._decode(packet[offset:offset + s]))
offset += s
elif t == 'bint':
doffset = 8 - (boffset + s)
self._values.append((packet[offset] >> doffset) & ((1<<s) - 1))
boffset += s
if boffset == 8:
offset += 1
boffset = 0
elif t == 'string':
self._values.append(''.join([chr(i) for i in packet[offset:offset + s]]))
offset += s
elif t == 'blob':
if s:
if s > 0:
self._values.append(packet[offset:offset + s])
offset += s
else:
self._values.append(packet[offset:s])
offset = len(packet) + s
else:
self._values.append(packet[offset:])
elif type(packet) == type(()):
for i in packet:
self._values.append(i)
else:
for v in self._schema:
self._values.append(None)
def __repr__(self):
return self._values.__repr__()
def __str__(self):
r = ""
for i in range(len(self._names)):
r += "%s: %s " % (self._names[i], self._values[i])
for i in range(len(self._names), len(self._values)):
r += "%s" % self._values[i]
return r
# return self._values.__str__()
# Implement the map behavior
def __getitem__(self, key):
return self.__getattr__(key)
def __setitem__(self, key, value):
self.__setattr__(key, value)
def __len__(self):
return len(self._values)
def keys(self):
return self._names
def values(self):
return self._names
# Implement the struct behavior
def __getattr__(self, name):
#print "DEBUG: __getattr__", name
if type(name) == type(0):
return self._names[name]
else:
return self._values[self._names.index(name)]
def __setattr__(self, name, value):
if type(name) == type(0):
self._values[name] = value
else:
self._values[self._names.index(name)] = value
def __ne__(self, other):
if other.__class__ == self.__class__:
return self._values != other._values
else:
return True
def __eq__(self, other):
if other.__class__ == self.__class__:
return self._values == other._values
else:
return False
def __nonzero__(self):
return True;
# Custom
def names(self):
return self._names
def sizes(self):
return self._schema
def payload(self):
r = []
boffset = 0
for i in range(len(self._schema)):
(t, s) = self._schema[i]
if t == 'int':
r += self._encode(self._values[i], s)
boffset = 0
elif t == 'bint':
doffset = 8 - (boffset + s)
if boffset == 0:
r += [self._values[i] << doffset]
else:
r[-1] |= self._values[i] << doffset
boffset += s
if boffset == 8:
boffset = 0
elif self._values[i] != []:
r += self._values[i]
for i in self._values[len(self._schema):]:
r += i
return r
class RawPacket(Packet):
def __init__(self, ts = None, data = None):
Packet.__init__(self,
[('ts' , 'int', 4),
('data', 'blob', None)],
None)
self.ts = ts;
self.data = data
class AckFrame(Packet):
def __init__(self, payload = None):
Packet.__init__(self,
[('protocol', 'int', 1),
('seqno', 'int', 1)],
payload)
class DataFrame(Packet):
def __init__(self, payload = None):
if payload != None and type(payload) != type([]):
# Assume is a Packet
payload = payload.payload()
Packet.__init__(self,
[('protocol', 'int', 1),
('seqno', 'int', 1),
('dispatch', 'int', 1),
('data', 'blob', None)],
payload)
class NoAckDataFrame(Packet):
def __init__(self, payload = None):
if payload != None and type(payload) != type([]):
# Assume is a Packet
payload = payload.payload()
Packet.__init__(self,
[('protocol', 'int', 1),
('dispatch', 'int', 1),
('data', 'blob', None)],
payload)
class ActiveMessage(Packet):
def __init__(self, gpacket = None, amid = 0x00, dest = 0xFFFF):
if type(gpacket) == type([]):
payload = gpacket
else:
# Assume this will be derived from Packet
payload = None
Packet.__init__(self,
[('destination', 'int', 2),
('source', 'int', 2),
('length', 'int', 1),
('group', 'int', 1),
('type', 'int', 1),
('data', 'blob', None)],
payload)
if payload == None:
self.destination = dest
self.source = 0x0000
self.group = 0x00
self.type = amid
self.data = []
if gpacket:
self.data = gpacket.payload()
self.length = len(self.data)
|
7,422 | 76d2c3f74e8fae160396b4015ccec478dba97b87 | # -*- coding: utf-8 -*-
"""
Created on Tue Mai 15 11:34:22 2018
@author: Diogo Leite
"""
from SQL_obj_new.Dataset_config_dataset_new_sql import _DS_config_DS_SQL
class Dataset_conf_ds(object):
"""
This class treat the datasets configuration connection tables object has it exists in DATASET_CONF_DS table database
NOTE: It consistes on a conection class (N to N) to know for each dataset with a given configuration
By default, all FK are in the lasts positions in the parameters declaration
"""
def __init__(self, id_ds_conf_ds = -1, value_configuration = -1, FK_id_configuration_DCT_DCD = -1, FK_id_dataset_DS_DCD = -1):
"""
Constructor of the DDI_interactionDB object. All the parameters have a default value
:param id_ds_conf_ds: id of the configurations dataset - -1 if unknown
:param value_configuration: value of the bins - -1 if unknown
:param FK_id_configuration_DCT_DCD: FK of the configurations (see table DATASET_CONFIGURATIONS_TYPES)- -1 if unknown
:param FK_id_dataset_DS_DCD: FK of the dataset (see table DATASETS)
:type id_ds_conf_ds: int - not required
:type value_configuration: int - not required
:type FK_id_configuration_DCT_DCD: text (date format) - required
:type FK_id_dataset_DS_DCD: int - required
"""
self.id_ds_conf_ds = id_ds_conf_ds
self.value_configuration = value_configuration
self.FK_id_configuration_DCT_DCD = FK_id_configuration_DCT_DCD
self.FK_id_dataset_DS_DCD = FK_id_dataset_DS_DCD
def get_all_datasets_conf_ds():
"""
return an array with all the configurations of datasets in the database
:return: array of datasets configurations
:rtype: array(DDI_interaction_DB)
"""
listOfDatasetDSConfig = []
sqlObj = _DS_config_DS_SQL()
results = sqlObj.select_all_DDI_DB()
for element in results:
listOfDatasetDSConfig.append(Dataset_conf_ds(element[0], element[1], element[2], element[3]))
return listOfDatasetDSConfig
def create_ds_config_ds(self):
"""
Insert a dataset configuration of Dataset in the database return it id
The ds_conf_ds contain:
- value of the creation
- FK of the configuration
- FK of the dataset
:return: id Dataset_conf_ds
:rtype int
"""
sqlObj = _DS_config_DS_SQL()
value_id_ds_conf_ds = sqlObj.insert_DS_conf_DS_return_id_if_not_exists(self.value_configuration, self.FK_id_configuration_DCT_DCD, self.FK_id_dataset_DS_DCD)
self.id_ds_conf_ds = value_id_ds_conf_ds
return value_id_ds_conf_ds |
7,423 | 7a0e7ede263727ef303ba23dff1949c3a7031360 | #!/usr/bin/python
import argparse
import os
import pipes
import sys
import rospy
import std_msgs.msg
import actionlib
import time
import datetime
from geometry_msgs.msg import Pose, Point, Quaternion
from actionlib import *
from location_provider.srv import GetLocationList
from std_srvs.srv import Trigger
try:
import rosplan_interface as planner
import state_machine.msg
from name_provider.srv import GetRealName, CreateNewPerson
except:
pass
from constructs import *
from parser import FnHintParser
name = "sprinkles"
tts = rospy.Publisher("tosay", std_msgs.msg.String, queue_size=5)
valid_in = rospy.Publisher("heard", std_msgs.msg.String, queue_size=5)
loc = rospy.ServiceProxy('/get_location_list', GetLocationList)
locations = None
nicegeneric = S('please') | 'will you'
nicepre = nicegeneric | S('go ahead and')
nicepost = nicegeneric | (~S('right') + 'now')
master = FnHintParser()
face_last_seen = None
currently_speaking = False
TIME_TOLERANCE = 5
def voice_callback(msg):
global currently_speaking
if not msg.data:
time.sleep(2)
currently_speaking = msg.data
def face_callback(_):
global face_last_seen
face_last_seen = datetime.datetime.now()
def face_active():
if face_last_seen is None:
return False
active = face_last_seen > datetime.datetime.now() - datetime.timedelta(seconds=TIME_TOLERANCE)
# rospy.loginfo("Face detected? %s" % str(active))
return active
@master.register_fn(keywords='dir')
def go(place):
global currently_speaking
currently_speaking = True
_say("Okay. I will go to the " + str(place))
return planner.gen_predicate('robotat', x=place)
@master.register_fn()
def say(info):
msg = ""
if info == "name":
msg = "I am " + name
elif info in ['identification', 'id']:
msg = "I am a people bot"
elif info in ['hello', 'hi']:
msg = "Hello there."
elif info == "love":
msg = "Not particularly"
elif info == "friend":
msg = "I am a people bot. I am a friend to everyone."
elif info == 'joke':
msg = os.popen('fortune riddles | sed "s/Q://" | sed "s/A://" | tr "\n\t" " "').read()
if msg:
_say(msg)
def _say(msg):
rospy.loginfo("Saying: " + msg)
tts.publish(std_msgs.msg.String(msg))
@master.register_fn()
def halt():
planner.cancel()
_say("Stopping")
@master.register_fn()
def spin(direction='around'):
_say('No')
return
@master.register_fn()
def bring_msg(source, dest):
msg_name = planner.add_instance('message')
planner.add_predicate('has_message', person=source, msg=msg_name)
return planner.gen_predicate('has_message', person=dest, msg=msg_name)
class InteractServer(object):
def __init__(self, name):
self.active = False
self._action_name = name
self.name = None
self.nl_listen = False
self.goals = []
self._feedback = state_machine.msg.interactFeedback()
self._server = SimpleActionServer("interact",
state_machine.msg.interactAction,
execute_cb=self.execute_cb,
auto_start = False)
self._server.start()
rospy.loginfo( "Interact Server started")
def speech_callback(self, topic_data, parser):
rospy.loginfo("============")
rospy.loginfo('%s, speaking: %s' % (topic_data.data, str(currently_speaking)))
if self.active and not currently_speaking and not self.nl_listen and topic_data.data:
valid_in.publish(std_msgs.msg.String(str(topic_data.data)))
rospy.loginfo("Interpreting...")
goal_s = parser.parse_and_run(topic_data.data)
rospy.loginfo("Result: %s", str(goal_s))
if hasattr(goal_s, '__iter__'):
self.goals.extend(goal_s)
elif goal_s is not None:
self.goals.append(goal_s)
rospy.loginfo("============")
def check_name(self, id):
self.name = None
rospy.loginfo("Checking name... %d" % id)
if not hasattr(self, '_name_service'):
self._name_service = rospy.ServiceProxy('/get_real_name', GetRealName)
try:
rospy.wait_for_service('/get_real_name', 10)
except:
rospy.logwarn("Timeout waiting for person db")
return
res = self._name_service(id)
if res.found_name:
self.name = res.name
_say("Hello %s." % self.name)
else:
_say("I do not recognize you.")
self.set_name(id)
def set_name(self, id=None, *args, **kwargs):
if id is None:
id = self.id
if not hasattr(self, '_nl_service'):
self._nl_service = rospy.ServiceProxy('/nl_recognizer/nl_listen', Trigger)
if not hasattr(self, '_mkname_service'):
self._mkname_service = rospy.ServiceProxy('/create_new_person', CreateNewPerson)
try:
rospy.wait_for_service('/nl_recognizer/nl_listen', 10)
except:
rospy.logwarn("Timeout waiting for listener")
return False
try:
rospy.wait_for_service('/create_new_person', 10)
except:
rospy.logwarn("Timeout waiting for person creation")
return False
_say("Please state your name.")
rospy.sleep(4)
self.nl_listen = True
newname = self._nl_service()
self.nl_listen = False
if newname.success:
valid_in.publish(std_msgs.msg.String(str(newname.message)))
self._mkname_service(newname.message, id)
self.name = newname.message
_say("Hello %s." % self.name)
return newname.message
else:
_say("I didn't catch that. Continuing on.")
return False
def execute_cb(self, goal):
self.id = goal.personID
#print goal.goal_id
if self.active:
if False and self._server.is_preempt_requested():
rospy.loginfo('%s: Preempted' % self._action_name)
self._server.set_preempted()
self.active = False
return
rospy.loginfo("interacting")
self.active = True
#self._feedback.isInteracting = True
# _say("Hello there.")
self.check_name(goal.personID)
_say("Please say a command")
#self._server.publish_feedback(self._feedback)
time.sleep(3)
while face_active():
time.sleep(.5)
if not self.active:
# We're dead, don't send bad info
return
self.active = False
rospy.loginfo("done interacting")
res = state_machine.msg.interactResult()
res.action = self.goals
self.goals = []
_say('Goodbye')
if res.action:
self._server.set_succeeded(res)
else:
self._server.set_aborted()
def get_cmd():
global locations
rospy.wait_for_service('/get_location_list')
locations = loc().output
loc_syntax = (reduce(lambda x, y: x | y, locations, S(locations.pop())))
cmd = ~S('okay') + S(name) + ~nicepre + (
(S('change my name') % 'set_name') |
((S('move') | 'go' | 'drive') % 'go' +
((S('to') + ~S('the') +
loc_syntax % 'place'))) |
(S('stop') | 'halt' | 'exit') % 'halt' |
((S('spin') % 'spin' | S('turn') % 'go') + (S('around') | 'left' | 'right') % 'direction') |
((S('say') | 'tell me' | 'speak' | 'what is' | 'what\'s') % 'say' + ~(S('your') | 'a') +
(S('name') | 'identification' | 'id' | 'hello' | 'hi' | 'joke') % 'info') |
(S("where are you going") % 'where') |
((S('take')|S('bring')|S('give')|S('send')) + ~S('a') + S("message") % 'bring_msg' + ~(S('from') + ~S('the') + loc_syntax % 'source') + S('to') + ~S('the') + loc_syntax % 'dest')
)
return cmd
if __name__ == '__main__':
rospy.init_node('speech_interpreter')
master.register_syntax(get_cmd())
speech_topic = rospy.get_param('~speech_in', '/recognizer/output')
active_topic = rospy.get_param('~active', '/face_finder/closest_face')
planner.init()
srv = InteractServer(rospy.get_name())
master.register_fn(srv.set_name)
textin = rospy.Subscriber(speech_topic, std_msgs.msg.String, callback=srv.speech_callback, callback_args=master)
check_active = rospy.Subscriber(active_topic, rospy.msg.AnyMsg, callback=face_callback)
check_voice = rospy.Subscriber('/is_speaking', std_msgs.msg.Bool, callback=voice_callback)
rospy.spin()
|
7,424 | c7d51f6448400af5630bdc0c29493320af88288e | import pytesseract
from PIL import Image
import tensorflow as tf
from keras.models import load_model
from tensorflow import Graph
import os
import json
import cv2
import numpy as np
global class_graph
def classify(img, c_model):
#global class_graph
""" classifies images in a given folder using the 'model'"""
#img = load_img(im_path,target_size=(input_height, input_width))
#img = img_to_array(img)
im_size = 128
# resize
img = cv2.resize(img, (im_size,im_size))
img = img.astype("float") / 255.0
img = np.expand_dims(img, axis=0)
with class_graph.as_default():
predictions = c_model.predict(img)[0]
return predictions
if __name__ == '__main__':
im_name = "data/demo/images(1).jpg"
# load model
model_path = "data/credit-card.model"
class_model = load_model(model_path)
class_graph=tf.get_default_graph()
crop_img = cv2.imread(im_name)
predictions = classify(crop_img, class_model)
print(predictions) |
7,425 | 310e6e693cdce6ff71d06eac86214a21bef236d4 | """Produce a multi-panel figure of each output lead time in a forecast
"""
import matplotlib.pyplot as plt
import iris.plot as iplt
from irise import convert
from irise.plot.util import add_map
from myscripts import plotdir
from myscripts.models.um import case_studies
columns = 3
def main(forecast, name, levels, *args, **kwargs):
nt = len(forecast)
rows = (nt / columns) + 1
fig = plt.figure(figsize=(18, 10 * float(rows) / columns))
for n, cubes in enumerate(forecast):
row = n / columns
column = n - row * columns
print(row, column)
ax = plt.subplot2grid((rows, columns), (row, column))
cube = convert.calc(name, cubes, levels=levels)[0]
im = iplt.pcolormesh(cube, *args, **kwargs)
add_map()
ax = plt.subplot2grid((rows, columns), (row, column + 1))
cbar = plt.colorbar(im, cax=ax, orientation='horizontal')
plt.savefig(plotdir + name + '_' + str(levels[0]) +
'_' + str(levels[1][0]) + '.png')
return
if __name__ == '__main__':
forecast = case_studies.generate_season_forecast(2013, 11, 1)
name = 'ertel_potential_vorticity'
levels = ('air_potential_temperature', [320])
main(forecast, name, levels, vmin=0, vmax=10, cmap='cubehelix_r')
|
7,426 | 50be2cbdaec6ed76e5d9367c6a83222f9153db82 | '''
Please Note:
Note: It is intended for some problems to be ambiguous. You should gather all requirements up front before implementing one.
Please think of all the corner cases and clarifications yourself.
Validate if a given string is numeric.
Examples:
1."0" => true
2." 0.1 " => true
3."abc" => false
4."1 a" => false
5."2e10" => true
Return 0 / 1 ( 0 for false, 1 for true ) for this problem
Clarify the question using “See Expected Output”
1.Is 1u ( which may be a representation for unsigned integers valid?
For this problem, no.
2.Is 0.1e10 valid?
Yes
3.-01.1e-10?
Yes
4.Hexadecimal numbers like 0xFF?
Not for the purpose of this problem
5. 3. (. not followed by a digit)?
No
6.Can exponent have decimal numbers? 3e0.1?
Not for this problem.
7.Is 1f ( floating point number with f as prefix ) valid?
Not for this problem.
8.How about 1000LL or 1000L ( C++ representation for long and long long numbers )?
Not for this problem.
9.How about integers preceded by 00 or 0? like 008?
Yes for this problem
'''
class Solution:
# @param A : string
# @return an integer
def isNumber(self, A):
while len(A)>0 and A[0]==' ':
A = A[1:]
A=A[::-1]
while len(A)>0 and A[0]==' ':
A = A[1:]
A=A[::-1]
if len(A)==0:
return 0
for c in A:
if c not in [str(i) for i in range(10)] + ['.', 'e', '-', '+']:
return 0
if 'e' in A:
A = A.split('e')
if len(A)!=2:
return 0
return int(self.isnum(A[0], 0) and self.isnum(A[1], 1))
return int(self.isnum(A, 0))
def isnum(self, A, i):
#print(A,i)
if A=='':
return False
if i == 1 or (i == 0 and '.' not in A):
if A[0] in ['+', '-']:
A = A[1:]
if A == '':
return False
for c in A:
if c not in [str(i) for i in range(10)]:
return False
return True
A = A.split('.')
return (self.isnum(A[0], 1) or A[0]=='') and self.isnum(A[1], 1)
|
7,427 | 7b6e73744d711188ab1a622c309b8ee55f3eb471 | # Python : Correct way to strip <p> and </p> from string?
s = s.replace('<p>', '').replace('</p>', '')
|
7,428 | eec2b818ea9d50161bad60e8bf83dcb7ce9bf9fa | from plone import api
from plone.app.robotframework.testing import AUTOLOGIN_LIBRARY_FIXTURE
from plone.app.testing import applyProfile
from plone.app.testing import FunctionalTesting
from plone.app.testing import IntegrationTesting
from plone.app.testing import PLONE_FIXTURE
from plone.app.testing import PloneSandboxLayer
from plone.testing import z2
from zope.configuration import xmlconfig
class OiRAFixture(PloneSandboxLayer):
defaultBases = (PLONE_FIXTURE,)
def setUpZope(self, app, configurationContext):
z2.installProduct(app, 'Products.membrane')
z2.installProduct(app, 'Products.statusmessages')
import Products.statusmessages
xmlconfig.file('configure.zcml',
Products.statusmessages,
context=configurationContext)
import Products.membrane
xmlconfig.file('configure.zcml',
Products.membrane,
context=configurationContext)
import euphorie.client.tests
xmlconfig.file("configure.zcml",
euphorie.client.tests,
context=configurationContext)
import osha.oira
xmlconfig.file('configure.zcml',
osha.oira,
context=configurationContext)
def setUpPloneSite(self, portal):
wftool = api.portal.get_tool(name='portal_workflow')
wftool.setDefaultChain('plone_workflow')
applyProfile(portal, 'euphorie.content:default')
applyProfile(portal, 'euphorie.client:default')
applyProfile(portal, 'euphorie.deployment:default')
applyProfile(portal, 'osha.oira:default')
OIRA_FIXTURE = OiRAFixture()
OIRA_INTEGRATION_TESTING = \
IntegrationTesting(
bases=(OIRA_FIXTURE,),
name="osha.oira:Integration"
)
OIRA_SUITE_ROBOT = FunctionalTesting(
bases=(OIRA_FIXTURE,
AUTOLOGIN_LIBRARY_FIXTURE,
z2.ZSERVER_FIXTURE),
name="OIRA_SUITE_ROBOT")
|
7,429 | 90b9dcd2dfc28446d1979d58ed49a12a85ce5b98 | # Generated by Django 3.1.7 on 2021-03-24 14:51
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Products_Table',
fields=[
('product_id', models.IntegerField(auto_created=True, primary_key=True, serialize=False)),
('product_name', models.CharField(max_length=50)),
('product_details', models.TextField()),
('product_price', models.IntegerField()),
('product_release_date', models.DateTimeField()),
],
),
]
|
7,430 | 71662ff8c68559bf08e1da7f1a1504bfe842c950 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-08-04 13:16
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0007_alter_validators_add_error_messages'),
('enterprise', '0002_auto_20160804_1616'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(blank=True, max_length=255, unique=True, verbose_name='Электронная почта')),
('username', models.CharField(db_index=True, max_length=40, unique=True, verbose_name='Идентификатор')),
('created_at', models.DateField(auto_now_add=True, verbose_name='Дата регистрации')),
('is_active', models.BooleanField(default=True, verbose_name='Активен')),
('ip', models.GenericIPAddressField(blank=True, null=True, verbose_name='IP адрес')),
('surname', models.CharField(blank=True, max_length=50, verbose_name='Фамилия')),
('first_name', models.CharField(blank=True, max_length=25, verbose_name='Имя')),
('middle_name', models.CharField(blank=True, max_length=25, verbose_name='Отчество')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('organisation', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='enterprise.Organisation', verbose_name='Сотрудник организации')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'Пользователь',
'verbose_name_plural': 'Пользователи',
},
),
]
|
7,431 | acf3d188bd6c99774ddf538dcc83f99ad56c7057 | from mpi4py import MPI
from random import random
comm = MPI.COMM_WORLD
mydata = comm.rank
data = comm.gather(mydata)
if comm.rank == 0:
print("Data = ", data)
|
7,432 | 2019a2a5588e57164ff4226ef3bcbbc506f2b315 | """
==============================
Visualize Cylinder with Wrench
==============================
We apply a constant body-fixed wrench to a cylinder and integrate
acceleration to twist and exponential coordinates of transformation
to finally compute the new pose of the cylinder.
"""
import numpy as np
from pytransform3d.transformations import (
transform_from_exponential_coordinates)
import pytransform3d.visualizer as pv
def spatial_inertia_of_cylinder(mass, length, radius):
I_xx = I_yy = 0.25 * mass * radius ** 2 + 1.0 / 12.0 * mass * length ** 2
I_zz = 0.5 * mass * radius ** 2
inertia = np.eye(6)
inertia[:3, :3] *= np.array([I_xx, I_yy, I_zz])
inertia[3:, 3:] *= mass
return inertia
def animation_callback(
step, cylinder, cylinder_frame, prev_cylinder2world,
Stheta_dot, inertia_inv):
if step == 0: # Reset cylinder state
prev_cylinder2world[:, :] = np.eye(4)
Stheta_dot[:] = 0.0
# Apply constant wrench
wrench_in_cylinder = np.array([0.1, 0.001, 0.001, 0.01, 1.0, 1.0])
dt = 0.0005
Stheta_ddot = np.dot(inertia_inv, wrench_in_cylinder)
Stheta_dot += dt * Stheta_ddot
cylinder2world = transform_from_exponential_coordinates(
dt * Stheta_dot).dot(prev_cylinder2world)
# Update visualization
cylinder_frame.set_data(cylinder2world)
cylinder.set_data(cylinder2world)
prev_cylinder2world[:, :] = cylinder2world
return cylinder_frame, cylinder
fig = pv.figure()
# Definition of cylinder
mass = 1.0
length = 0.5
radius = 0.1
inertia_inv = np.linalg.inv(
spatial_inertia_of_cylinder(mass=mass, length=length, radius=radius))
# State of cylinder
cylinder2world = np.eye(4)
twist = np.zeros(6)
cylinder = fig.plot_cylinder(length=length, radius=radius, c=[1, 0.5, 0])
cylinder_frame = fig.plot_transform(A2B=cylinder2world, s=0.5)
fig.plot_transform(A2B=np.eye(4), s=0.5)
fig.view_init()
if "__file__" in globals():
fig.animate(
animation_callback, n_frames=10000,
fargs=(cylinder, cylinder_frame, cylinder2world, twist, inertia_inv),
loop=True)
fig.show()
else:
fig.save_image("__open3d_rendered_image.jpg")
|
7,433 | 9a982e0ab7fff882767a98ed01f5ed68bd710888 | import turtle
def draw_square():
conrad = turtle.Turtle()
conrad.shape("turtle")
conrad.color("red")
conrad.speed(3)
i = 0
while(i < 4):
conrad.forward(200)
conrad.right(90)
i += 1
def draw_circle():
niki = turtle.Turtle()
niki.circle(50)
def draw_triangle():
tri = turtle.Turtle()
tri.shape("turtle")
i = 0
while(i < 3):
tri.forward(135)
tri.right(145)
i += 1
def main():
window = turtle.Screen()
window.bgcolor("blue")
draw_square()
draw_circle()
draw_triangle()
window.exitonclick()
main() |
7,434 | 7c06bd52c924d3e401f50625109c5b8b489df157 | def tort(n, a, b):
return min(n*a, b)
def main():
n, a, b = map(int, input().split())
print(tort(n, a, b))
if __name__ == '__main__':
main()
|
7,435 | 0f3ecd0a7189f57fdbda2360f6e39bd6101e2fdb | from LinkedList import LinkedList
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def addTwoNumbers(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
h1 = l1
v1 = 0
while h1:
v1 = v1*10 + h1.val
h1 = h1.next
h2 = l2
v2 = 0
while h2:
v2 = v2*10 + h2.val
h2 = h2.next
val = str(v1 + v2)
dummy = curr = ListNode(0)
for i in val:
curr.next = ListNode(int(i))
curr = curr.next
return dummy.next
l11, l22 = [7,2,4,3] , [5,6,4]
l1 = LinkedList(l11).getHead()
l2 = LinkedList(l22).getHead()
sl = Solution()
head = sl.addTwoNumbers( l1, l2 )
LinkedList([1]).printLinkedList(head) |
7,436 | 2b579c3def4c2d02d365f019518e8e0b25664460 | import pandas as pd
import matplotlib.pyplot as plt
from netCDF4 import Dataset
from cftime import num2date
import os
import numpy as np
from datetime import datetime, timedelta, date
def plot_temperatures_by_country(values, country, start, end):
"""
Returns a plot for temperature values for a country
from a start point to an end point
"""
filtered = values.loc[(values['Country'] == country) &
(values['dt'] >= start) &
(values['dt'] <= end)]
# x axis values
x1 = filtered['dt']
# corresponding y axis values
y1 = filtered['AverageTemperature']
# plotting the points
plt.plot(x1, y1, label = "line 1")
filtered = values.loc[(values['Country'] == country) &
(values['dt'] >= '1973-01-01') &
(values['dt'] <= '1974-01-01')]
# x axis values
x2 = filtered['dt']
# corresponding y axis values
y2 = filtered['AverageTemperature']
# plotting the points
plt.plot(x2, y2, label="line 2")
# naming the x axis
plt.xlabel('x - axis - date')
# naming the y axis
plt.ylabel('y - axis - temperature')
plt.title('Temperatures from ' + start + ' to ' + end + ' for ' + country)
# function to show the plot
plt.show()
def temperatures_by_city_till2013():
"""
Info for dataset, temperatures by city part 1 - from 1743 to 2013
"""
# Columns: dt,AverageTemperature,AverageTemperatureUncertainty,City,Country,Latitude,Longitude
temperatures = pd.read_csv("GlobalLandTemperatures/GlobalLandTemperaturesByCity.csv")
# 8 599 212 rows
print(len(temperatures))
countries = temperatures['Country'].unique()
print(len(countries))
print(sorted(countries))
def temperatures_by_country_till2013():
"""
Info for dataset, temperatures by country part 1 - from 1743 to 2013
"""
# Columns: dt, AverageTemperature, AverageTemperatureUncertainty, Country
temperatures = pd.read_csv("GlobalLandTemperatures/GlobalLandTemperaturesByCountry.csv")
# 577 462 rows
print(len(temperatures))
countries = temperatures['Country'].unique()
print(len(countries))
print(sorted(countries))
def plot_co2_by_country(values, country, start, end):
"""
Returns a plot for co2 values for a country
from a start point to an end point
"""
filtered = values.loc[(values['Country'] == country) &
(values['Year'] >= start) &
(values['Year'] <= end)]
# x axis values
x1 = filtered['Year']
# corresponding y axis values
y1 = filtered['CO2']
# plotting the points
plt.plot(x1, y1, label = "line 1")
# naming the x axis
plt.xlabel('x - axis - year')
# naming the y axis
plt.ylabel('y - axis - co2')
# giving a title to my graph
plt.title('CO2 from ' + start + ' to ' + end + ' for ' + country)
# function to show the plot
plt.show()
def co2_by_country_till2019():
"""
Info for dataset, co2 by country part 1 - from 1751 to 2017
"""
co2_messy = pd.read_csv("CO2/emission data.csv")
co2 = pd.melt(co2_messy, id_vars=["Country"], var_name="Year", value_name="CO2")
df = pd.DataFrame()
df['Country'] = co2['Country']
df['Year'] = co2['Year']
df['CO2'] = co2['CO2']
df.to_csv(r'C:\Users\stoja\Desktop\EmissionCO2.csv', index=False)
def get_lat_lon():
"""
Returns arrays for latitudes, longitudes, cities and countries
from dataset, temperatures by country part 1, from 1743 to 2013
"""
# Columns: dt,AverageTemperature,AverageTemperatureUncertainty,City,Country,Latitude,Longitude
temperatures = pd.read_csv("GlobalLandTemperatures/GlobalLandTemperaturesByCity.csv")
Latitude = temperatures['Latitude']
Longitude = temperatures['Longitude']
City = temperatures['City']
Country = temperatures['Country']
lat_array = []
long_array = []
cities_array = []
countries_array = []
tuples = []
for i, j, city, country in zip(Latitude, Longitude, City, Country):
if (i, j) not in tuples:
tuples.append((i, j))
lat_array.append(float(i[:-1]))
long_array.append(float(j[:-1]))
cities_array.append(city)
countries_array.append(country)
return lat_array, long_array, cities_array, countries_array
def make_dataset_temperatures(filename, points):
"""
From netCDF4 file to CSV file
"""
ds = Dataset(filename)
lats, lons, cities, countries = get_lat_lon()
# total lat,lon pairs: 1366
print('The number of rows is ' + str(len(lats)*points))
lon = ds.variables['longitude']
lat = ds.variables['latitude']
time = ds.variables['date_number']
lon_array = lon[:]
lat_array = lat[:]
time_array = time[:]
temperature = ds.variables['temperature']
dates = []
for time in time_array[:]:
year = int(time)
rem = time - year
base = datetime(year, 1, 1)
dates.append((base + timedelta(seconds=(base.replace(year=base.year + 1) - base).total_seconds() * rem)).date())
# second approach
# for t in time_array[:]:
# dates.append(num2date(t, units=time.units))
dateResult = []
temperatureResult = []
latitudeResult = []
longitudeResult = []
cityResult = []
countryResult = []
for latitude, longitude, city, country in zip(lats, lons, cities, countries):
# We want to find data for latitude, longitude
# We first need to find the indexes
i = np.abs(lon_array - longitude).argmin()
j = np.abs(lat_array - latitude).argmin()
for d in dates:
dateResult.append(d)
resultTemperature = temperature[:, j, i]
for t in resultTemperature:
temperatureResult.append(t)
resultLatitues = np.full(
shape=points,
fill_value=latitude,
dtype=np.float
)
for l in resultLatitues:
latitudeResult.append(l)
resultLongitudes = np.full(
shape=points,
fill_value=longitude,
dtype=np.float
)
for l in resultLongitudes:
longitudeResult.append(l)
resultCities = np.full(
shape=points,
fill_value=city
)
for c in resultCities:
cityResult.append(c)
resultCountries = np.full(
shape=points,
fill_value=country
)
for c in resultCountries:
countryResult.append(c)
print('iteration no:' + str(i))
df = pd.DataFrame()
df['date'] = dateResult
df['temperature'] = temperatureResult
df['latitude'] = latitudeResult
df['longitude'] = longitudeResult
df['city'] = cityResult
df['country'] = countryResult
df.to_csv(r'C:\Users\stoja\Desktop\Temperatures.csv', index=False)
return df
def model():
# Info for netCDF4 file
# 1416
ds = Dataset('air.mon.mean.v501.nc')
print(ds)
time = ds.variables['time']
print(time.units)
time_array = time[:]
for t in time_array[:]:
print(num2date(t, units=time.units))
if __name__ == '__main__':
print('Start')
# Making the CO2 dataset
co2_by_country_till2019()
# Making the temperatures dataset
df1 = make_dataset_temperatures('air.mon.mean.v501.nc', 1416)
print(df1.head())
# Making the temperatures anomalies dataset
df2 = make_dataset_temperatures('Complete_TAVG_Daily_LatLong1_2010.nc', 3652)
print(df2.head())
|
7,437 | ee0ed255b6851696dc57c01100cd67f5f959cf01 | from typing import List
import pandas as pd
import numpy as np
import pickle
from catboost import CatBoostRegressor
from sklearn.preprocessing import MinMaxScaler
def calculate_probable_age(usersEducationFeatures):
prob_age = {}
grads_count = {}
age_diff1 = 17 # age difference for school
age_diff2 = 22 # age difference for university
for index in usersEducationFeatures.index:
count = 0
skip = False
if not pd.isnull(usersEducationFeatures.at[index, "school_education"]):
prob_age[usersEducationFeatures.at[index, "uid"]] = (
2021 + age_diff1 - usersEducationFeatures.at[index, "school_education"]
)
skip = True
for i in range(1, 8):
if skip:
break
if not pd.isnull(usersEducationFeatures.at[index, f"graduation_{i}"]):
prob_age[usersEducationFeatures.at[index, "uid"]] = (
2021 + age_diff2 - usersEducationFeatures.at[index, f"graduation_{i}"]
)
skip = True
if not pd.isnull(usersEducationFeatures.at[index, "school_education"]):
count += 1
for i in range(1, 8):
if not pd.isnull(usersEducationFeatures.at[index, f"graduation_{i}"]):
count += 1
grads_count[usersEducationFeatures.at[index, "uid"]] = count
return prob_age, grads_count
def get_prob_age(uids, prob_age) -> List[int]:
res = [0] * len(uids)
for i, uid in enumerate(uids):
res[i] = prob_age.setdefault(uid, 0)
return res
def get_grads_count(uids, grads_count) -> List[int]:
res = [0] * len(uids)
for i, uid in enumerate(uids):
res[i] = grads_count.setdefault(uid, 0)
return res
def get_groups_count(uids, usersGroups):
tmp = usersGroups.groupby("uid").count()
groups_count = [0] * len(uids)
for i, uid in enumerate(uids):
try:
groups_count[i] = tmp.at[uid, "gid"]
except:
continue
return groups_count
def get_mean_and_median_group(uids, gid2age, uid_groups):
mean_group = [0.0] * len(uids)
median_group = [0.0] * len(uids)
for i, uid in enumerate(uids):
try:
tmp = [gid2age[x] for x in uid_groups[uid]]
mean_group[i] = sum(tmp) / len(tmp)
median_group[i] = np.median(tmp)
except:
continue
return mean_group, median_group
def get_mean_and_median_friends(uids, uid2age, uid_friends):
mean_friends = [0.0] * len(uids)
median_friends = [0.0] * len(uids)
mean_friends2 = [0.0] * len(uids)
for i, uid in enumerate(uids):
try:
tmp = []
if uid in uid_friends and len(uid_friends[uid]) < 42:
for friend in uid_friends[uid]:
if friend in uid_friends:
for f2 in uid_friends[friend]:
if f2 != uid and f2 in uid2age:
tmp.append(uid2age[f2])
mean_friends2[i] = sum(tmp) / len(tmp) if len(tmp) != 0 else 0
tmp = [uid2age[x] for x in uid_friends[uid] if x in uid2age]
mean_friends[i] = sum(tmp) / len(tmp) if len(tmp) != 0 else 0.0
median_friends[i] = np.median(tmp) if len(tmp) != 0 else 0.0
except:
continue
return mean_friends, median_friends, mean_friends2
def main():
with open("gid2age.pkl", "rb") as fin:
gid2age = pickle.load(fin)
with open("uid2age.pkl", "rb") as fin:
uid2age = pickle.load(fin)
with open("uid_friends.pkl", "rb") as fin:
uid_friends = pickle.load(fin)
with open("scaler.pkl", "rb") as fin:
scaler = pickle.load(fin)
model = CatBoostRegressor()
model.load_model("model")
test = pd.read_csv("/tmp/data/test.csv")
testEducationFeatures = pd.read_csv("/tmp/data/testEducationFeatures.csv")
testGroups = pd.read_csv("/tmp/data/testGroups.csv")
test["cfriends"] = 0
for index in test.index:
uid = test.at[index, "uid"]
if uid in uid_friends:
test.at[index, "cfriends"] = len(uid_friends[uid])
else:
test.at[index, "cfriends"] = 0
prob_age, grads_count = calculate_probable_age(testEducationFeatures)
test["prob_age"] = get_prob_age(test.uid, prob_age)
test["grads_count"] = get_grads_count(test.uid, grads_count)
test["groups_count"] = get_groups_count(test.uid, testGroups)
uid_groups = {}
for index in testGroups.index:
uid = testGroups.at[index, "uid"]
uid_groups[uid] = uid_groups.setdefault(uid, []) + [testGroups.at[index, "gid"]]
test["mean_group_age"], test["median_group_age"] = get_mean_and_median_group(test.uid, gid2age, uid_groups)
test["mean_friends_age"], test["median_friends_age"], test["mean_friends2_age"] = get_mean_and_median_friends(
test.uid, uid2age, uid_friends
)
test["is_prob_age"] = test.prob_age != 0
test["is_group_age"] = test.mean_group_age != 0
test["is_friends_age"] = test.mean_friends_age != 0
X_test = scaler.transform(test.drop(["uid"], axis=1))
y_pred = model.predict(X_test)
res = pd.DataFrame({"uid": test.uid, "age": y_pred})
res.to_csv("/var/log/result", header=True, index=False)
if __name__ == "__main__":
main()
|
7,438 | ee49ce63951721458cb98b370285d04231bb2c20 | import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import numpy.random as nr
import math
import os
from datetime import datetime
from sklearn.linear_model import LinearRegression, SGDRegressor
import sys
import time
import imp
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.metrics import mean_squared_error
from xgboost import XGBRegressor, plot_importance
from sklearn.model_selection import train_test_split
import lightgbm as lgb
def drop_duplicate(data, sub_set):
print('Before drop shape:', data.shape)
before = data.shape[0]
data.drop_duplicates(sub_set, keep='first', inplace=True)
data.reset_index(drop=True, inplace=True)
print('After drop shape:', data.shape)
after = data.shape[0]
print('Total Duplicate:', before - after)
def rmse(predictions, targets):
return np.sqrt(np.mean((predictions - targets) ** 2))
class predict(object):
def __init__(self,trainfile,testfile):
self.trainfile = trainfile
self.testfile = testfile
self.__lr = LinearRegression()
# self.__dtree = DecisionTreeClassifier()
# self.__rforest = RandomForestClassifier()
# self.__svm = SVC(kernel='rbf')
self.lgb_params = {
'feature_fraction': 1,
'metric': 'rmse',
'min_data_in_leaf': 16,
'bagging_fraction': 0.85,
'learning_rate': 0.03,
'objective': 'mse',
'bagging_seed': 2 ** 7,
'num_leaves': 32,
'bagging_freq': 3,
'verbose': 0
}
self.__tree_reg = ExtraTreesRegressor(n_estimators=600, max_depth=38,random_state=50)
self._xgb = XGBRegressor(max_depth=8,n_estimators=1000,min_child_weight=300,colsample_bytree=0.9,subsample=0.9,eta=0.15,seed=42)
self.train_data = None
self.train_labels = None
self.train_data1 = None
self.train_labels1 = None
self.val_data = None
self.val_labels = None
self.test_data = None
self.predicted_labels = None
self.x_train_val = None
self.y_train_val = None
def trainingdata(self):
parser = lambda date: pd.to_datetime(date, format='%d.%m.%Y')
df = pd.read_csv(self.trainfile,parse_dates=['date'],date_parser=parser)
df = df.dropna()
df = df.loc[df['item_cnt_day']>0]
subset_train = ['date', 'date_block_num', 'shop_id', 'item_id', 'item_cnt_day']
drop_duplicate(df, sub_set=subset_train)
median = df[(df.shop_id == 32) & (df.item_id == 2973) & (df.date_block_num == 4) & (df.item_price > 0)].item_price.median()
df.loc[df.item_price < 0, 'item_price'] = median
df['item_cnt_day'] = df['item_cnt_day'].clip(0, 1000)
df['item_price'] = df['item_price'].clip(0, 300000)
df.loc[df.shop_id == 0, 'shop_id'] = 57
df.loc[df.shop_id == 1, 'shop_id'] = 58
df.loc[df.shop_id == 10, 'shop_id'] = 11
df['day'] = df['date'].apply(lambda x: x.strftime('%d'))
df['day'] = df['day'].astype('int64')
df['month'] = df['date'].apply(lambda x: x.strftime('%m'))
df['month'] = df['month'].astype('int64')
df['year'] = df['date'].apply(lambda x: x.strftime('%Y'))
df['year'] = df['year'].astype('int64')
df = df[['day','month','year','item_id', 'shop_id','item_price','item_cnt_day']]
df['item_id'] = np.log1p(df['item_id'])
self.train_labels1 = df['item_cnt_day']
self.train_data1 = df.drop(columns='item_cnt_day')
self.train_data,self.val_data,self.train_labels,self.val_labels=train_test_split(self.train_data1,self.train_labels1,test_size=0.3)
self.x_train_val = self.train_data[-100:]
self.y_train_val = self.train_labels[-100:]
def testingdata(self):
parser = lambda date: pd.to_datetime(date, format='%d.%m.%Y')
df = pd.read_csv(self.testfile,parse_dates=['date'],date_parser=parser)
subset_test = ['date', 'date_block_num', 'shop_id', 'item_id']
drop_duplicate(df, sub_set=subset_test)
df.loc[df.shop_id == 0, 'shop_id'] = 57
df.loc[df.shop_id == 1, 'shop_id'] = 58
df.loc[df.shop_id == 10, 'shop_id'] = 11
df['day'] = df['date'].apply(lambda x: x.strftime('%d'))
df['day'] = df['day'].astype('int64')
df['month'] = df['date'].apply(lambda x: x.strftime('%m'))
df['month'] = df['month'].astype('int64')
df['year'] = df['date'].apply(lambda x: x.strftime('%Y'))
df['year'] = df['year'].astype('int64')
df = df[['day','month','year','item_id', 'shop_id','item_price']]
df['item_id'] = np.log1p(df['item_id'])
self.test_data = df;
def data(self):
self.trainingdata()
self.testingdata()
def trainLinearRegression(self):
self.__lr.fit(self.train_data,self.train_labels)
def testLinearRegression(self):
self.predicted_labels = self.__lr.predict(self.val_data)
# print ("Linear Regression score " + str(self.__lr.score(self.val_data, self.val_labels)))
print ("Linear Regression score " + str(rmse(self.predicted_labels,self.val_labels)))
def trainExtraTreeRegressor(self):
self.__tree_reg.fit(self.train_data,self.train_labels)
def testExtraTreeRegressor(self):
self.predicted_labels = self.__tree_reg.predict(self.val_data)
print ("ExtraTreeRegressor score " + str(rmse(self.predicted_labels,self.val_labels)))
def trainLightGBM(self):
lgb.train(self.lgb_params,lgb.dataset(self.train_data,label=train_labels),300)
def testLightGBM(self):
self.predicted_labels = lgb.predict(self.val_data)
print ("LightGBM score " + str(rmse(self.predicted_labels,self.val_labels)))
def trainXGBoost(self):
self.__xgb.fit(self.train_data,self.train_labels,eval_metric="rmse",eval_set=[(self.train_data, self.train_labels), (self.x_train_val, self.y_train_val)],verbose=True,early_stopping_rounds=10)
def testXGBoost(self):
self.predicted_labels = self.__xgb.predict(self.val_data)
print ("XGBoost score " + str(rmse(self.predicted_labels,self.val_labels)))
if __name__ == "__main__":
train_data_name = sys.argv[1]
test_data_name = sys.argv[2]
model = predict(train_data_name,test_data_name)
model.data()
# model.trainLinearRegression()
# model.testLinearRegression()
# model.trainExtraTreeRegressor()
# model.testExtraTreeRegressor()
# model.trainLightGBM()
# model.testLightGBM()
# model.trainXGBoost()
# model.testXGBoost()
# plotConfusionMatrix(model.test_labels,model.predicted_labels)
# model.trainDecesionTree()
# model.testDecesionTree()
# model.trainRandomForrest()
# model.testRandomForrest()
# model.trainSVM()
# model.testSVM()
|
7,439 | adff75857a1de24267e771c599e4d89486a6ad32 | # Generated by Django 2.0.5 on 2018-07-12 11:08
import assessment.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('assessment', '0006_auto_20180712_1428'),
]
operations = [
migrations.AlterModelManagers(
name='season',
managers=[
('objects', assessment.models.SeasonManager()),
],
),
migrations.AlterField(
model_name='punishmentreward',
name='method',
field=models.TextField(verbose_name='روش'),
),
migrations.AlterField(
model_name='scaleanswer',
name='carried_on',
field=models.BooleanField(default=False, verbose_name='انجام\u200cشده'),
),
migrations.AlterField(
model_name='scaleanswer',
name='qualitativeAnswer',
field=models.CharField(blank=True, max_length=100, null=True, verbose_name='پاسخ کیفی'),
),
migrations.AlterField(
model_name='scaleanswer',
name='quantitativeAnswer',
field=models.CharField(blank=True, max_length=100, null=True, verbose_name='پاسخ کمی'),
),
]
|
7,440 | 8e34b5e15c5b6107d6841e7b567abf967c631f1b | # coding=utf-8
from __future__ import print_function
import os
import sys
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
basedir = os.getcwd()
os.chdir(os.path.dirname(os.path.abspath(__file__)))
sys.path.append('trainer')
sys.path.append('downloader')
from gen.gen_captcha import gen_dataset, load_templates, candidates
from gen.img_process import grey_to_binary, clear_paper_noise
from model.nn import load_model_nn
from model.common import find_model_ckpt
import tensorflow as tf
from gen.utils import vec2str
import numpy as np
from PIL import Image
from downloader import download
def show_im(dataset):
data = np.uint8(dataset[0]).reshape((30, 96)) * 255
im = Image.fromarray(data)
im.show()
def test_model(captcha):
im = Image.open(os.path.join(basedir, 'downloader', 'captchas', captcha))
im = im.convert('L')
im = grey_to_binary(im)
im = clear_paper_noise(im, 5)
# im.show()
# templates = load_templates(os.path.join('trainer', 'templates'))
model = load_model_nn()
x = model['x']
keep_prob = model['keep_prob']
saver = model['saver']
prediction = model['prediction']
graph = model['graph']
model_ckpt_path, _ = find_model_ckpt(os.path.join('trainer', '.checkpoint'))
# print("Used the model:", model_ckpt_path)
with tf.Session(graph=graph) as session:
tf.global_variables_initializer().run()
saver.restore(session, model_ckpt_path)
# dataset, labels = gen_dataset(1, templates) # generate one image
dataset = []
dataset.append(np.asarray(im.convert("L")).reshape([30 * 96]) / 255)
label = prediction.eval(feed_dict={x: dataset, keep_prob: 1.0}, session=session)[0]
string = ''
for i in range(4):
string += chr(label[i] + ord('0'))
print(string)
if __name__ == "__main__":
if len(sys.argv) <= 1:
captcha = download(1)[0]
else:
captcha = sys.argv[1]
test_model(captcha)
|
7,441 | 4d2cb3e0bdd331a1de7f07eb0109f02c9cf832a8 | import logging
import os
import time
import urllib
from collections import namedtuple
from statistics import mean
from urllib.request import urlopen
import bs4
import regex as re
from tika import parser
from scipy.stats import ks_2samp
import config
from TFU.trueformathtml import TrueFormatUpmarkerHTML
from TFU.trueformatpdf2htmlEX import TrueFormatUpmarkerPdf2HTMLEX
from helpers.str_tools import remove_ugly_chars
class PaperReader:
""" multimedial extractor. it reads text from papers in pdfs, urls, html and other things.
Formatting of text makes processing harder, text is cluttered up with remarks of the punlisher on every page,
page and line numbers and other stuff, that must be ignored with the processing, especially, when joining the
texts of different pages, where sentences continue.
detecting text by comparing to the letter distribution of normal prose to parts of the text extracted.
"""
def __init__(self, _threshold=0.001, _length_limit=20000):
with open(config.wordlist, 'r') as f:
self.wordlist = [w for w in list(f.readlines()) if len(w) >= 4]
self.tfu_pdf = TrueFormatUpmarkerPdf2HTMLEX()
self.tfu_html = TrueFormatUpmarkerHTML()
self.length_limit = _length_limit
self.threshold = _threshold
self.normal_data = list(
'used are variants of the predicate calculus. He even says, Lately '
'those who think they ought to be so regarded seem to be winning. '
'Under these circumstances, it does seem odd for McDermott to devote '
'much space to complaining about the logical basis of a book whose '
'very title proclaims it is about logical foundations. In any '
'case, given such a title, it wouldnt seem necessary that readers '
'should be warned that the foundations being explored are not '
'In competition with this diversity is the idea of a unified model '
'of inference. The desire for such a model is strong among those '
'who study declarative representations, and Genesereth and Nilsson '
'are no exception. As are most of their colleagues, they are drawn '
'to the model of inference as the derivation of conclusions that '
'are entailed by a set of beliefs. They wander from this idea in a '
'few places but not for long. It is not hard to see why: Deduction '
'is one of the fews kinds of inference for which we have an '
'interesting general theory. '.lower()
)
def just_extract_text_from_html(self, adress):
logging.info(f"extracting text from {adress}")
try:
with urlopen(adress).read().decode('utf-8') as fdoc:
soup = bs4.BeautifulSoup(fdoc, parent="lxml")
return self.get_only_real_words(soup.get_text(), self.wordlist)
except ValueError:
with open(adress, "r") as fdoc:
soup = bs4.BeautifulSoup(fdoc, features='lxml')
return self.get_only_real_words(soup.get_text(), self.wordlist)
def parse_file_format(self, adress):
if adress.endswith('pdf'):
paths = self.pdfpath2htmlpaths(adress)
if config.parse_pdf2htmlEX:
os.system(f"pdf2htmlEX "
f"--optimize-text 1 "
f"--fit-width {config.reader_width} "
f"\"{adress}\" \"{paths.html_before_indexing}\"")
tfu = self.tfu_pdf
elif adress.endswith('html'):
tfu = self.tfu_html
paths = self.htmlpath2htmlpaths(adress)
logging.warning("trying with html...")
else:
logging.error(f"File '{adress}' could not be processed")
return None
tfu.convert_and_index(paths.html_before_indexing, paths.html_after_indexing)
tfu.save_doc_json(paths.json_path)
os.system(f"cp \"{paths.html_after_indexing}\" \"{paths.apache_path}\"")
self.text = " ".join(list(tfu.indexed_words.values()))
# needed for topic modelling
with open(paths.txt_path, "w") as f:
f.write(self.text)
logging.debug(paths)
self.paths = paths
time.sleep(2)
logging.info(f"extracted text: {self.text[100:]}")
return None
def load_url(self, adress):
response = urllib.request.urlopen(adress)
data = response.read() # a `bytes` object
self.text = parser.from_buffer(data)
def analyse(self):
"""
Extracts prose text from the loaded texts, that may contain line numbers somewhere, adresses, journal links etc.
:return str: prose text
"""
logging.info("transferring text to CorpusCook...")
paragraphs = self.text.split('\n\n')
print("mean length of splitted lines", (mean([len(p) for p in paragraphs])))
# If TIKA resolved '\n'
if (mean([len(p) for p in paragraphs])) > 80:
paragraphs = [re.sub(r"- *\n", '', p) for p in paragraphs]
paragraphs = [p.replace('\n', " ") for p in paragraphs]
paragraphs = [p.replace(';', " ") for p in paragraphs]
joiner = " "
else:
# If TIKA did not
joiner = " "
processed_text = joiner.join([p
for p in paragraphs
if
p and
ks_2samp(self.normal_data, list(p)).pvalue > self.threshold
]
)
return processed_text.strip()[:self.length_limit]
DocPaths = namedtuple("DocPaths", ["html_before_indexing",
"html_after_indexing",
"apache_path",
"json_path",
"txt_path"])
def pdfpath2htmlpaths(self, adress):
# file_extension = os.path.splitext(adress)[1] keep it, but unused
# path = os.path.dirname(adress)
filename = os.path.basename(adress)
html_before_indexing = config.appcorpuscook_docs_document_dir + filename + ".html"
filename = remove_ugly_chars(filename)
html_after_indexing = config.appcorpuscook_docs_document_dir + filename + ".pdf2htmlEX.html"
json_path = config.appcorpuscook_docs_json_dir + filename + ".json"
txt_path = config.appcorpuscook_docs_txt_dir + filename + ".txt"
apache_path = config.apache_dir_document + filename + ".html"
return self.DocPaths(
html_before_indexing,
html_after_indexing,
apache_path,
json_path,
txt_path)
def get_only_real_words(self, text, wordlist):
return text #" ".join([word for word in text.split() if word in wordlist])
def htmlpath2htmlpaths(self, adress):
filename = os.path.basename(adress)
html_before_indexing = config.appcorpuscook_diff_document_dir + filename
filename = remove_ugly_chars(filename)
html_after_indexing = config.appcorpuscook_diff_html_dir + filename + ".pdf2htmlEX.html"
json_path = config.appcorpuscook_diff_json_dir + filename + ".json"
txt_path = config.appcorpuscook_docs_txt_dir + filename + ".txt"
apache_path = config.apache_dir_document + filename + ".html"
return self.DocPaths(
html_before_indexing,
html_after_indexing,
apache_path,
json_path,
txt_path)
|
7,442 | a5b7f565a1797e5f326bcf26ff7c8ad2469dca70 | #!/usr/bin/env python
import argparse
import pymssql
import json
#get the lcmMediaId from DB.
def getMediaId(contentProviderMediaName):
#test db
conn = pymssql.connect(host='CHELLSSSQL23.karmalab.net', user='TravCatalog', password='travel', database='LodgingCatalogMaster_Phoenix')
#prod db
#conn = pymssql.connect(host='LodgingCatalogMaster.ch.expeso.com', user='TravCatalog', password='travel', database='LodgingCatalogMaster_Phoenix')
cur = conn.cursor()
cur.execute('SELECT * FROM media WHERE contentprovidermedianame =%s',contentProviderMediaName)
row = cur.fetchone()
mediaid = None
while row:
mediaid =row[0]
break
return mediaid
def main(messages_file, records):
print ('> Messages: %s; Records: %d' % (messages_file, records))
message_number = 0
with open(messages_file, 'r') as msgs_file:
for message in msgs_file:
if message_number >= records and records > 0:
break
if message.startswith('> '):
continue
try:
jsonMsg = json.loads(message)
mediaid = getMediaId(jsonMsg['fileName'])
if(mediaid != None):
jsonMsg['domainFields']['lcmMediaId']=str(mediaid)
print (json.dumps(jsonMsg))
except (RuntimeError, TypeError, NameError):
print ('> %s error' % message_number)
message_number += 1
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'messages_file', help='File with the messages to write. One message per line'
)
parser.add_argument(
'--records', default=-1, help='Number of messages to read'
)
args = parser.parse_args()
main(args.messages_file, int(args.records))
|
7,443 | d517c1e2eb4d37a2584f1603c704efce6834df92 | # Author: Charse
# py 列表的使用
import copy
name = ["111", "222", "333", "444", "555"]
# 从列表中取得元素
print(name[0], name[2]) # 111 333
print(name[1:3]) # 切片 ['222', '333']
print(name[:3]) # ['111', '222', '333'] 与下标从0开始是一样的
print(name[0:3]) # ['111', '222', '333']
print(name[-2:]) # ['444', '555'] 与name
# 往列表中添加元素
name.append("666") # 直接在末尾添加
name.insert(1, "999") # 在指定位置插入 : 将999插入到下标为1的位置, 原来位置中元素就直接往后顺延
print(name)
# 修改列表中元素
name[0] = "000"
print(name)
# 删除元素
name.pop() # 默认是删除最后一个下标
print(name)
name.pop(2)
print(name)
# 取出指定元素的下标
print(name.index("999"))
# 反转 改变的是分组里面的元素
name.reverse()
print(name)
# 特殊字符, 数字, 大写字母, 小写字母排序. 改变的是数组中的元素
name.sort()
print(name)
# name.clear() remove all items 删除所有的元素
# 复制列表
name2 = name.copy() # 这个是浅copy,如果列表中还有列表,列表的中元素修改了,新的中也同样是修改了
print(name2)
name[1] = "xxx" # name2中是不会进行修改的
names = ["1", [1, 2], "2"]
names[1][0] = 9
print(names)
names1 = copy.copy(names) # 这个是浅copy,与列表的copy是一样的.只是一个引用的copy
names3 = name[:]
print("name3:", names3)
# 进行深copy
names2 = copy.deepcopy(names)
# 对列表的元素进行修改,两者是同样的被修改
# names2 元素内的列表是不会被修改的
names[1][1] = 3
print(names)
print(names1)
print(names2)
# 遍历列表
for i in names2:
print(i)
# 跳跃打印: 从0 开始打印, 到末尾, 步长为2
print(name[0:-1:2])
# 0, -1可以进行省略
print(name[::2])
'''
深浅copy
'''
|
7,444 | 88e1eb4cbfe346c663cca23836c23346e18a8488 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from twython import Twython
import random
tweetStr = "None"
#twitter consumer and access information goes here
api = Twython(apiKey,apiSecret,accessToken,accessTokenSecret)
timeline = api.get_user_timeline()
lastEntry = timeline[0]
sid = str(lastEntry['id'])
def searchTwitter(sString, perP):
return api.search(q=sString, since_id=sid, rpp=perP)
def oneGif(twitSearch, sString, gifName):
for tweet in twitSearch['statuses']:
user = tweet["user"]["screen_name"]
text = tweet['text']
id = str(tweet['id'])
print text.encode('utf-8')
if sString in text.lower():
statushead = "@" + user + " "
if "RT" not in text:
api.create_favorite(id=id)
photo = open('/home/pi/gifs/' + gifName, 'rb')
response = api.upload_media(media=photo, media_type='image/gif')
api.update_status(status=statushead, media_ids=[response['media_id']], in_reply_to_status_id=id)
tweetStr = statushead + gifName
return "Tweeted: " + tweetStr
def threeGif(twitSearch, sString, gifOne, gifTwo, gifThree):
for tweet in twitSearch['statuses']:
user = tweet["user"]["screen_name"]
text = tweet['text']
id = str(tweet['id'])
print text.encode('utf-8')
if sString in text.lower():
statushead = "@" + user + " "
if "RT" not in text:
api.create_favorite(id=id)
number = random.randrange(1,4)
if number == 1:
photo = open('/home/pi/gifs/' + gifOne, 'rb')
response = api.upload_media(media=photo, media_type='image/gif')
api.update_status(status=statushead, media_ids=[response['media_id']], in_reply_to_status_id=id)
tweetStr = statushead + gifOne
if number == 2:
photo = open('/home/pi/gifs/' + gifTwo, 'rb')
response = api.upload_media(media=photo, media_type='image/gif')
api.update_status(status=statushead, media_ids=[response['media_id']], in_reply_to_status_id=id)
tweetStr = statushead + gifTwo
if number == 3:
photo = open('/home/pi/gifs/' + gifThree, 'rb')
response = api.upload_media(media=photo, media_type='image/gif')
api.update_status(status=statushead, media_ids=[response['media_id']], in_reply_to_status_id=id)
tweetStr = statushead + gifThree
return "Tweeted: " + tweetStr
|
7,445 | dd936839d71b97b3a21115498092d8984de0e3f1 | questions = ('Какой язык мы учим?', 'Какой тип данных имеет целая переменная?', 'Какой тип данных имеет вещественная переменная?', 'Какой тип данных имеет логическая переменная?', 'Какой тип данных имеет символьная переменная?')
answers = ('Python', 'Integer', 'Float', 'Bool', 'String')
i = 0
count_answers = 0
while i < len(questions):
user_answers = input('{}...'.format(questions[i]))
if user_answers.capitalize() == answers[i]:
count_answers = count_answers + 1
i += 1
print('Было задано {i} вопросов. Правильных ответов - {count_answers}!'.format(i = i, count_answers = count_answers))
|
7,446 | 7b459cf321f351e1485a9aef0ca23067f411e430 | """Wrapper over the command line migrate tool to better work with
config files."""
import subprocess
import sys
from alembic.migration import MigrationContext
from ..lib.alembic import bootstrap_db
from ..lib.sqla import create_engine
from ..models import DBSession as db
def main():
if len(sys.argv) < 3:
sys.stderr.write('Usage: %s CONFIG_URI {bootstrap | ALEMBIC_OPTS}\n'
% sys.argv[0])
sys.exit(1)
config_uri = sys.argv.pop(1)
if sys.argv[1] == 'bootstrap':
bootstrap_db(config_uri)
else:
engine = create_engine(config_uri)
db.configure(bind=engine)
context = MigrationContext.configure(engine.connect())
db_version = context.get_current_revision()
if not db_version:
sys.stderr.write('Database not initialized.\n'
'Try this: "sortie-db-manage %s bootstrap"\n'
% config_uri)
sys.exit(2)
cmd = ['alembic', '-c', config_uri] + sys.argv[1:]
print(subprocess.check_output(cmd))
|
7,447 | 56b4262e88793be366d8ffe0fe4427fdb2a99bd7 | from app import create_app, db
import unittest
import json
class Test(unittest.TestCase):
def setUp(self):
"""Before each test, set up a blank database"""
self.app = create_app("configmodule.TestingConfig")
self.app.testing = True
self.client = self.app.test_client()
with self.app.app_context():
db.drop_all()
db.create_all()
# Called after every test
def tearDown(self):
with self.app.app_context():
db.session.remove()
db.drop_all()
def test_user(self):
# Create user
rv = self.client.post(
"/api/users/",
data=json.dumps({"email": "test@test.se", "password": "abc123"}),
)
rv_dict = json.loads(rv.data.decode())
assert rv.status_code == 200
assert rv_dict["id"] == 1
assert "password" not in rv_dict
assert rv_dict["email"] == "test@test.se"
# Try loggin with wrong PASSWORD
rv = self.client.post("/api/users/login", data=json.dumps({"email": "test@test.se", "password": "abc1234"}))
assert rv.status_code == 401
# Try loggin with wrong Email
rv = self.client.post("/api/users/login", data=json.dumps({"email": "test1@test.se", "password": "abc1234"}))
assert rv.status_code == 401
# Try loggin with right PASSWORD
rv = self.client.post("/api/users/login", data=json.dumps({"email": "test@test.se", "password": "abc123"}))
rv_dict = json.loads(rv.data.decode())
assert rv.status_code == 200
headers = {"Authorization": "Bearer " + rv_dict["access_token"]}
# Get the current user
rv = self.client.get("/api/users/", headers=headers)
rv_dict = json.loads(rv.data.decode())
assert rv.status_code == 200
assert rv_dict["email"] == "test@test.se"
rv = self.client.put("/api/users/", data=json.dumps({"name": "carl carlsson"}), headers=headers)
rv_dict = json.loads(rv.data.decode())
assert rv.status_code == 200
assert rv_dict["name"] == "Carl Carlsson"
def test_empty(self):
# Try loggin withou any users
rv = self.client.post("/api/users/login", data=json.dumps({"email": "test@test.se", "password": "abc123"}))
assert rv.status_code == 401
if __name__ == "__main__":
unittest.main()
|
7,448 | ae5ec7919b9de4fbf578547c31837add32826f60 |
class Graph:
def __init__(self, num_vertices):
self.adj_list = {}
for i in range(num_vertices):
self.adj_list[i] = []
def add_vertice(self, source):
self.adj_list[source] = []
def add_edge(self, source, dest):
self.adj_list[source].append(dest)
def print_graph(self):
print(self.adj_list)
print(self.adj_list.keys())
def topo_order(self):
def topo_util(source, visited, stack):
visited.add(source)
for neighbour in self.adj_list[source]:
if neighbour not in visited:
topo_util(neighbour, visited, stack)
stack.append(source)
visited, stack = set(), []
for vertex in self.adj_list.keys():
if vertex not in visited:
topo_util(vertex, visited, stack)
stack.reverse()
print(stack)
g = Graph(6)
g.add_edge(5, 0)
g.add_edge(5, 2)
g.add_edge(2, 3)
g.add_edge(3, 1)
g.add_edge(4, 1)
g.add_edge(4, 0)
g.print_graph()
g.topo_order()
|
7,449 | 885fd32c9520dfdc2becd6b1a3d0c0f5f5397112 | from setuptools import setup, find_packages
setup(
name="champ",
version="0.0.1",
description='Channel modeling in Python',
url='https://github.com/sgherbst/champ',
author='Steven Herbst',
author_email='sherbst@stanford.edu',
packages=['champ'],
include_package_data=True,
zip_safe=False,
install_requires=[
'numpy',
'scipy',
'matplotlib',
'mpltools',
'scikit-rf'
]
)
|
7,450 | 7c4709eaa5123b44e6355c6a60932f286e3b1cf5 | #!/usr/bin/env python
#-*- coding:utf8 -*-
# Power by null 2018-09-19 18:41:17
from codebase.mod.mod_test import test_f
|
7,451 | e38149f0d421a43f6aa34a977eee89fe29021b85 | #!/usr/bin/python
# This IDAPython code can be used to de-obfuscate strings generated by
# CryptoWall version 3, as well as any other malware samples that make use of
# this technique.
'''
Example disassembly:
.text:00403EC8 mov ecx, 'V'
.text:00403ECD mov [ebp+var_1C], cx
.text:00403ED1 mov edx, 'e'
.text:00403ED6 mov [ebp+var_1A], dx
.text:00403EDA mov eax, 'r'
.text:00403EDF mov [ebp+var_18], ax
.text:00403EE3 mov ecx, 's'
.text:00403EE8 mov [ebp+var_16], cx
.text:00403EEC mov edx, 'i'
.text:00403EF1 mov [ebp+var_14], dx
.text:00403EF5 mov eax, 'o'
.text:00403EFA mov [ebp+var_12], ax
.text:00403EFE mov ecx, 'n'
'''
pos = here()
original_pos = pos
out = ""
while True:
if GetMnem(pos) == "mov" and GetOpnd(pos, 0)[0] == "e" and GetOpnd(pos, 0)[2] == "x":
out += chr(GetOperandValue(pos,1))
elif GetMnem(pos) == "mov" and "[ebp" in GetOpnd(pos, 0):
None
elif GetMnem(pos) == "xor":
MakeComm(original_pos, out)
print "Making String: %s" % out
out = ""
original_pos = pos
else:
break
pos = NextHead(pos)
|
7,452 | 1c1cd0eeea4dbf446aa4582f42ef1f3b5a4e8875 | # Generated by Django 3.2.2 on 2021-05-11 09:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('meeting', '0004_auto_20210511_0947'),
]
operations = [
migrations.AlterField(
model_name='event',
name='end',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='event',
name='start',
field=models.DateTimeField(auto_now_add=True),
),
]
|
7,453 | 862c5794a4da794678de419f053ae15b11bca6e7 | class GameOfLife:
@staticmethod
def simulate(board):
for row in range(len(board)):
for col in range(len(board[0])):
ones = GameOfLife.countOnes(board, row, col)
if board[row][col] and (ones == 2 or ones == 3):
board[row][col] |= 2
elif not board[row][col] and ones == 3:
board[row][col] |= 2
for row in range(len(board)):
for col in range(len(board[0])):
board[row][col] >>= 1
@staticmethod
def countOnes(board, row, col):
total = 0
total += GameOfLife.isOne(board, row - 1, col - 1)
total += GameOfLife.isOne(board, row - 1, col)
total += GameOfLife.isOne(board, row - 1, col + 1)
total += GameOfLife.isOne(board, row, col - 1)
total += GameOfLife.isOne(board, row, col + 1)
total += GameOfLife.isOne(board, row + 1, col - 1)
total += GameOfLife.isOne(board, row + 1, col)
total += GameOfLife.isOne(board, row + 1, col + 1)
return total
@staticmethod
def isOne(board, row, col):
if row >= len(board) or row < 0:
return 0
if col >= len(board) or col < 0:
return 0
return board[row][col] & 1
|
7,454 | 445ae195edfe9fe9ee58c6c5a14ec787719d698c |
def get_ecgs_by_query(json_data, query):
ecgs_ids = []
for case_id in json_data.keys():
print(case_id)
if query.is_query_ok(json_data[case_id]):
ecgs_ids.append(case_id)
return ecgs_ids
def save_new_dataset_by_ids(old_json, ecg_ids_to_save, name_new_dataset):
"""
Saves json only with selected (by id) patients.
:param old_json: initail dataset dict
:param ecg_ids_to_save: which patient we want to keep for new dataset
:param name_new_dataset: name of file, string, ends with .json
:return:
"""
import json
from settings import PATH_TO_METADATASETS_FOLDER
new_json_data = {}
for ecg_id in old_json.keys():
if ecg_id in ecg_ids_to_save:
new_json_data[ecg_id]= old_json[ecg_id]
result_file_path = PATH_TO_METADATASETS_FOLDER + "\\" + name_new_dataset
with open(result_file_path, 'w') as outfile:
json.dump(new_json_data, outfile)
|
7,455 | 9540319cf192add1fb24375a35d70ea8e3031a72 | __author__ = 'aniket'
import freenect
import cv2
import numpy as np
kernel = nfrp.ones((5,5),np.uint8)
freenect.C
def grayscale():
maske = np.zeros((480,640,3))
a = freenect.sync_get_depth(format=freenect.DEPTH_MM)[0]
mask = a == 0
a[mask] = 8000
mask1 = a > 1000
b = freenect.sync_get_video()[0]
ab = cv2.cvtColor(b, cv2.COLOR_BGR2RGB)
ab[mask1,:] = 0
return ab
while(True):
cv2.imshow('gray',grayscale())
#cv2.imshow('color',colored())
if cv2.waitKey(1) != -1:
break
cv2.destroyAllWindows()
|
7,456 | afd184962e8e69843ca518e140d5fdde3d7c9ed2 | from django.views.generic import TemplateView, FormView, CreateView, ListView
from .models import Order
from .form import OrderForm
class OrdersListView(ListView):
template_name = 'orders/index.html'
queryset = Order.objects.all()
context_object_name = 'order_list'
class OrderCreateView(CreateView):
template_name = 'orders/form.html'
form_class = OrderForm
success_url = '/'
|
7,457 | 605d8144d18207314981872ec57cec6cb2510601 | # def qs(li):
# n = len(li)
# if n <= 1:
# return li
# pivot = li[n - 1]
# left = []
# right = []
# for i in li[:n - 1]:
# if i <= pivot:
# left.append(i)
# else:
# right.append(i)
# left = qs(left)
# right = qs(right)
# return left + [pivot] + right
def qs(li):
n, p = len(li), len(li)//2 - 1
if n <= 1:
return li
print(qs([11, 45, 23, 81, 28, 34]))
print(qs([11, 45, 22, 81, 23, 34, 99, 22, 17, 8]))
print(qs([1, 1, 1, 1, 1, 0, 0, 0, 0, 0]))
|
7,458 | 3222dd7c2d19d86f2e085cb489ab4a48307ba132 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'test1.ui'
#
# Created by: PyQt5 UI code generator 5.7
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(670, 483)
self.pushButton = QtWidgets.QPushButton(Dialog)
self.pushButton.setGeometry(QtCore.QRect(190, 240, 101, 31))
self.pushButton.setObjectName("pushButton")
self.lineEdit = QtWidgets.QLineEdit(Dialog)
self.lineEdit.setGeometry(QtCore.QRect(260, 150, 201, 41))
self.lineEdit.setObjectName("lineEdit")
self.label_2 = QtWidgets.QLabel(Dialog)
self.label_2.setGeometry(QtCore.QRect(100, 330, 421, 91))
font = QtGui.QFont()
font.setPointSize(18)
font.setBold(True)
font.setWeight(75)
self.label_2.setFont(font)
self.label_2.setText("")
self.label_2.setObjectName("label_2")
self.label_1 = QtWidgets.QLabel(Dialog)
self.label_1.setGeometry(QtCore.QRect(90, 150, 81, 41))
self.label_1.setObjectName("label_1")
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.pushButton.setText(_translate("Dialog", "OK"))
self.label_1.setText(_translate("Dialog", "Name"))
|
7,459 | 45b46a08d8b304ac12baf34e0916b249b560418f | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from flask import Flask, request, jsonify
from app import Node
from dbm2 import filemanager
fm = filemanager()
node = Node(fm)
app = Flask(__name__)
@app.route("/transactions/isfull",methods=['GET'])
def isFull():
return jsonify(node.isFull()), 200
@app.route("/transactions/new",methods=["POST"])
def newTransaction():
transaction = request.get_json()
if node.isValidTxn(node.isValidChain(),transaction):
return transaction, 200
else:
return jsonify(False), 200
@app.route("/chain/last",methods=["GET"])
def last_block():
return jsonify(node.getLastBlock()), 200
@app.route("/chain",methods=["GET"])
def get_chain():
return jsonify(node.chain), 200
@app.route("/pnodes/register",methods=["POST"])
def register_pnodes():
nodes = request.get_json()
print(nodes)
if type(nodes)==list:
if len(nodes)>10 and nodes!=[]:
nodes = nodes[:10]
s = [] #succeed
f = [] #failed
for addr in nodes:
if node.addPNode(addr):
s.append(addr)
else:
f.append(addr)
resp = {"Added PNodes":s,
"Not added pnodes":f}
return jsonify(resp), 200
resp = {"Error":"Input format error"}
return jsonify(resp), 400
@app.route("/pnodes/size",methods=["GET"])
def pnodes_size():
return jsonify(len(node.pnodes)), 200
@app.route("/nodes",methods=["GET"])
def get_nodes():
nodes = list(node.nodes)
return jsonify(nodes), 200
@app.route("/nodes/resolve",methods=["GET"])
def resolve_nodes():
added_nodes = node.resolveNodes()
if added_nodes:
return jsonify(added_nodes), 200
else:
return "0 nodes added",400
@app.route("/chain/resolve",methods=["GET"])
def resolve_chain():
r = node.resolveConflicts()
if r:
return jsonify(r), 200
else:
print("Nothing")
return jsonify(r), 400
@app.route("/mine",methods=["GET"])
def mine():
mb = node.mine()
resp = {"Mined_block":mb}
return jsonify(resp), 200
if __name__=="__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-p","--port",default=node.DEFAULT_PORT,type=int,help='port to listen on')
args = parser.parse_args()
port = args.port
node.port=port
app.run(host="",port=port)
|
7,460 | 2e9d71b8055e1bab107cedae69ca3bc4219e7d38 | import joblib
import os
import shutil
import re
from scipy import stats
from functools import partial
import pandas as pd
from multiprocessing import Process, Pool
from nilearn import masking, image
import nibabel as nib
import numpy as np
from tqdm import tqdm
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
from sklearn.utils.multiclass import unique_labels
import seaborn as sns
sns.set()
def get_paths(debug, dataset):
if debug and dataset == 'OASIS':
project_wd = os.getcwd()
project_data = os.path.join(project_wd, 'data')
project_sink = os.path.join(project_data, 'output')
elif debug and dataset == 'BANC':
project_wd = os.getcwd()
project_data = os.path.join(os.getenv('HOME'), 'NaN', 'BANC_2016')
project_sink = os.path.join(project_data, 'output')
elif debug and dataset == 'BOSTON':
project_wd = os.getcwd()
project_data = None
project_sink = None
elif debug and dataset == 'BANC_freesurf':
project_wd = os.getcwd()
project_data = os.path.join(os.getenv('HOME'), 'BayOptPy',
'freesurfer_preprocess')
project_sink = None
elif debug and dataset == 'UKBIO_freesurf':
project_wd = os.getcwd()
project_data = os.path.join(os.getenv('HOME'), 'BayOptPy',
'freesurfer_preprocess')
project_sink = None
elif not debug and dataset == 'OASIS':
project_wd = '/code'
project_data = os.path.join(os.sep, 'NaN', 'data')
project_sink = os.path.join(project_data, 'output')
elif not debug and dataset == 'BANC':
project_wd = '/code'
project_data = os.path.join(os.sep, 'data', 'NaN', 'BANC_2016')
project_sink = os.path.join(project_data, 'output')
elif not debug and dataset == 'BOSTON':
project_wd = '/code'
project_data = None
project_sink = None
elif not debug and (dataset == 'BANC_freesurf' or
dataset == 'UKBIO_freesurf' or
dataset == 'freesurf_combined'
):
project_wd = '/code'
project_data = os.path.join(os.sep, 'code', 'BayOptPy',
'freesurfer_preprocess')
project_sink = None
else:
raise ValueError('Analysis for this dataset is not yet implemented!')
print('Code Path: %s' %project_wd)
print('Data Path: %s' %project_data)
print('Data Out: %s' %project_sink )
return project_wd, project_data, project_sink
def get_output_path(model, analysis, ngen, random_seed, population_size, debug,
mutation, crossover, predicted_attribute):
# Check if output path exists, otherwise create it
rnd_seed_path = get_all_random_seed_paths(model, analysis, ngen, population_size,
debug, mutation, crossover,
predicted_attribute)
output_path = os.path.join(rnd_seed_path, 'random_seed_%03d' %random_seed)
if not os.path.exists(output_path):
os.makedirs(output_path)
return output_path
def get_all_random_seed_paths(model, analysis, ngen, population_size, debug, mutation,
crossover, predicted_attribute):
# As they should have been created by the get_output_path, do not create
# path but just find its location
if analysis == 'vanilla' or analysis == 'feat_selec' or \
analysis == 'feat_combi' or analysis == 'vanilla_combi' or \
analysis == 'random_seed' or analysis == 'ukbio' or \
analysis == 'summary_data' or analysis == 'uniform_dist':
if debug:
output_path = os.path.join('BayOptPy', 'tpot_%s' %model, 'Output',
analysis, predicted_attribute,
'%03d_generations' %ngen)
else:
output_path = os.path.join(os.sep, 'code', 'BayOptPy',
'tpot_%s' %model,
'Output', analysis,
predicted_attribute,
'%03d_generations' %ngen)
elif analysis == 'population':
if debug:
output_path = os.path.join('BayOptPy',
'tpot_%s' %model,
'Output', analysis, predicted_attribute,
'%05d_population_size' %population_size,
'%03d_generations' %ngen)
else:
output_path = os.path.join(os.sep, 'code', 'BayOptPy',
'tpot_%s' %model,
'Output', analysis,
predicted_attribute,
'%05d_population_size' %population_size,
'%03d_generations' %ngen)
elif analysis == 'mutation':
if debug:
output_path = os.path.join('BayOptPy',
'tpot_%s' %model,
'Output', analysis,
predicted_attribute,
'%03d_generations' %ngen,
'%.01f_mut_%.01f_cross' %(mutation, crossover))
else:
output_path = os.path.join(os.sep, 'code', 'BayOptPy',
'tpot_%s' %model,
'Output', analysis,
predicted_attribute,
'%03d_generations' %ngen,
'%.01f_mut_%.01f_cross' %(mutation, crossover))
else:
raise IOError('Analysis path not defined. Passed analysis was %s'
%analysis)
if not os.path.exists(output_path):
os.makedirs(output_path)
return output_path
def get_uniform_dist_data(debug, dataset, resamplefactor, raw, analysis):
"""
This function gets the original dataset and transforms it into a uniformly
distributed dataset.
"""
project_wd, project_data, project_sink = get_paths(debug, dataset)
demographics, imgs, dataframe = get_data(project_data, dataset,
debug, project_wd,
resamplefactor,
raw=raw,
analysis=analysis)
# transform age into ints
demographics['age_int'] = demographics['age'].astype('int32', copy=False)
# Select 14 subjects for all ages that have 14 representatives.
age_range = np.arange(demographics['age'].min(), demographics['age'].max())
# remove entry where you don't have 14 subjects
max_n = 14
age_to_remove = [35, 36, 39, 42, 78, 79, 80, 81, 82, 83, 85, 89]
age_range = np.setdiff1d(age_range, age_to_remove)
# iterate over the dataframe and select 14 subjects for each age range
ids_to_use = []
for age in age_range:
ids_to_use.append(demographics.index[demographics['age_int'] ==
age].tolist()[:max_n])
# flatten ids_to_use
ids_to_use = [item for sublist in ids_to_use for item in sublist]
# Filter the demographics dataframe
demographics = demographics[demographics.index.isin(ids_to_use)]
# set subject's id as index
# filter dataset using index of the subjects
dataframe = dataframe.loc[demographics['id']]
# Print some diagnosis
print('Shape of the new demographics:')
print(demographics.shape)
print('Oldest %d and youngest %d subject' %(demographics['age_int'].max(),
demographics['age_int'].min()))
print('Number of age bins %d' %len(demographics['age_int'].unique()))
return demographics, dataframe
def get_best_pipeline_paths(model, analysis, ngen, random_seed, population_size, debug,
mutation, crossover, predicted_attribute):
# check if folder exists and in case yes, remove it as new runs will save
# new files without overwritting
output_path = get_output_path(model, analysis, ngen, random_seed, population_size,
debug, mutation, crossover,
predicted_attribute)
checkpoint_path = os.path.join(output_path, 'checkpoint_folder')
# Delete folder if it already exists and create a new one
if os.path.exists(checkpoint_path):
shutil.rmtree(checkpoint_path)
print('Deleted pre-exiting checkpoint folder')
if not os.path.exists(checkpoint_path):
os.makedirs(checkpoint_path)
print('Creating checkpoint folder')
return checkpoint_path
def drop_missing_features(dataframe):
'''
This function takes a dataframe and removes the already defined missing
columns from the dataframe.
'''
missing_features = [# This features are repeated or missing on the BIOBANK
# dataset
'BrainSegVolNotVent',
'BrainSegVolNotVent.1',
'BrainSegVolNotVent.2',
'eTIV',
'eTIV.1',
# Drop additional features that are 0 or have no
# biological meaning
'SurfaceHoles',
'rhSurfaceHoles',
'lhSurfaceHoles',
'BrainSegVolNotVentSurf',
'BrainSegVol',
'Optic-Chiasm',
'Right-non-WM-hypointensities',
'Left-non-WM-hypointensities',
'non-WM-hypointensities',
'Right-WM-hypointensities',
'Left-WM-hypointensities',
'WM-hypointensities',
'5th-Ventricle',
'Right-choroid-plexus',
'Left-choroid-plexus',
'Left-Lateral-Ventricle',
'Right-Lateral-Ventricle',
'Left-Inf-Lat-Vent',
'Right-Inf-Lat-Vent',
]
cleaned_df = dataframe.drop(missing_features, axis=1)
return cleaned_df
def get_data_covariates(dataPath, rawsubjectsId, dataset):
if dataset == 'OASIS':
# Load the demographic details from the dataset
demographics = pd.read_csv(os.path.join(dataPath, 'oasis_cross-sectional.csv'))
# sort demographics by ascending id
demographics = demographics.sort_values('ID')
# Check if there is any subject for which we have the fmri data but no demographics
missingsubjectsId = list(set(demographics['ID']) ^ set(rawsubjectsId))
# remove the demographic data from the missing subjects
demographics = demographics.loc[~demographics['ID'].isin(missingsubjectsId)]
# list of subjects that do not have dementia (CDR > 0)
selectedSubId = demographics.loc[(demographics['CDR'] == 0) | (demographics['CDR'].isnull()), 'ID']
# filter demographics to exclude those with CDR > 0
demographics = demographics.loc[demographics['ID'].isin(selectedSubId)]
elif dataset == 'BANC':
# Load the demographic details from the dataset
column_names = ['ID', 'original_dataset', 'sex', 'Age']
demographics = pd.read_csv(os.path.join(dataPath,'original_dataset',
'BANC',
'BANC_2016.csv'), names=column_names)
# Check if there is any subject for which we have the fmri data but no demographics
missingsubjectsId = list(set(demographics['ID']) ^ set(rawsubjectsId))
# remove the demographic data from the missing subjects
demographics = demographics.loc[~demographics['ID'].isin(missingsubjectsId)]
selectedSubId = rawsubjectsId
else:
raise ValueError('Analysis for this dataset is not yet implemented!')
# do some sanity checks
# Check if you have the same number of selectedsubjectsid as the demographic information
assert(len(selectedSubId) == len(demographics))
return demographics, selectedSubId
def _multiprocessing_resample(img, target_affine):
resampled_img = image.resample_img(img, target_affine=target_affine,
interpolation='nearest')
return resampled_img
def _load_nibabel(filePath):
img = nib.load(os.path.join(filePath))
return img
def get_config_dictionary():
# Define the same default pipeline as TPOT light but without the preprocessing operators
regressor_config_dic = {
'sklearn.linear_model.ElasticNetCV': {
'l1_ratio': np.arange(0.0, 1.01, 0.05),
'tol': [1e-5, 1e-4, 1e-3, 1e-2, 1e-1]
},
'sklearn.tree.DecisionTreeRegressor': {
'max_depth': range(1, 11),
'min_samples_split': range(2, 21),
'min_samples_leaf': range(1, 21)
},
'sklearn.neighbors.KNeighborsRegressor': {
'n_neighbors': range(1, 101),
'weights': ["uniform", "distance"],
'p': [1, 2]
},
'sklearn.linear_model.LassoLarsCV': {
'normalize': [True, False]
},
'sklearn.svm.LinearSVR': {
'loss': ["epsilon_insensitive", "squared_epsilon_insensitive"],
'dual': [True, False],
'tol': [1e-5, 1e-4, 1e-3, 1e-2, 1e-1],
'C': [1e-4, 1e-3, 1e-2, 1e-1, 0.5, 1., 5., 10., 15., 20., 25.],
'epsilon': [1e-4, 1e-3, 1e-2, 1e-1, 1.]
},
'sklearn.linear_model.RidgeCV': {
},
# Selectors
'sklearn.feature_selection.SelectFwe': {
'alpha': np.arange(0, 0.05, 0.001),
'score_func': {
'sklearn.feature_selection.f_regression': None
}
},
'sklearn.feature_selection.SelectPercentile': {
'percentile': range(1, 100),
'score_func': {
'sklearn.feature_selection.f_regression': None
}
},
'sklearn.feature_selection.VarianceThreshold': {
'threshold': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.05, 0.1, 0.2]
}
}
return regressor_config_dic
def get_mean_age(df):
mean_age = df['Age'].mean()
std_age = df['Age'].std()
print('Mean Age %.2f +- %.2f' %(mean_age, std_age))
def get_data(project_data, dataset, debug, project_wd, resamplefactor, raw,
analysis):
''' Load the csv files and return
:param project_data:
:param dataset:
:param debug:
:param project_wd:
:param resamplefactor:
:raw: Which type of fressesfurfer should we analyse (the raw, where both
datasets have not been matched or the not raw where the number of columns
between dataset is the same)
:return: demographics:
:return: demographics:
:return: dataframe.values: Just the numeric values of the dataframe
'''
if dataset == 'freesurf_combined' and raw == True:
raise ValueError('The combined analysis cannot use the raw dataset')
print('Loading Brain image data')
elif dataset == 'OASIS':
# remove the file end and get list of all used subjects
fileList = os.listdir(project_data)
rawsubjectsId = [re.sub(r'^smwc1(.*?)\_mpr-1_anon.nii$', '\\1', file) for file in fileList if file.endswith('.nii')]
# TODO: Change this. For testing purpose select just the first 5 subjects
#rawsubjectsId = rawsubjectsId[:25]
# Load the demographics for each subject
demographics, selectedSubId = get_data_covariates(project_data, rawsubjectsId, dataset)
# print subjects mean age
get_mean_age(demographics)
# Load image proxies
imgs = [nib.load(os.path.join(project_data, 'smwc1%s_mpr-1_anon.nii' %subject)) for subject in tqdm(selectedSubId)]
elif dataset == 'BANC':
# For now, performing analysis on White Matter.
project_data_path = os.path.join(project_data, 'wm_data')
# remove the file end and get list of all used subjects
fileList = os.listdir(project_data_path)
rawsubjectsId = [file[5:12] for file in fileList if file.endswith('.nii.gz')]
# TODO: select only a set of 5 subjects
# rawsubjectsId = rawsubjectsId[:5]
# Load the demographics for each subject
demographics, selectedSubId = get_data_covariates(project_data, rawsubjectsId, dataset)
# print subjects mean age
get_mean_age(demographics)
# Get the file path of the selected subjects
subjectsFile = [os.path.join(project_data_path, file) for file in fileList if file[5:12] in selectedSubId]
# Load image proxies
with Pool() as p:
imgs = list(tqdm(p.imap(_load_nibabel, subjectsFile), total=len(selectedSubId)))
elif (dataset == 'BANC_freesurf' and raw==True):
freesurf_df = pd.read_csv(os.path.join(project_wd, 'BayOptPy',
'freesurfer_preprocess',
'original_dataset',
'BANC',
'aparc_aseg_stats_BANC.csv'), delimiter=',', index_col=0)
rawsubjectsId = freesurf_df.index
# Load the demographics for each subject
demographics, selectedSubId = get_data_covariates(project_data, rawsubjectsId, 'BANC')
# return numpy array of the dataframe
# Rename columns to maintain consistency withe ukbio
demographics.rename(index=str, columns={'ID':'id', 'Age': 'age'}, inplace=True)
return demographics, None, freesurf_df
elif (dataset == 'UKBIO_freesurf' and raw==False and not
analysis=='summary_data'):
freesurf_df = pd.read_csv(os.path.join(project_wd, 'BayOptPy',
'freesurfer_preprocess',
'matched_dataset',
'aparc_aseg_UKBIO.csv'), delimiter=',')
# Read the full matrix to get the demographics information
ukbio_full_df = pd.read_csv(os.path.join(project_wd, 'BayOptPy',
'freesurfer_preprocess',
'original_dataset',
'UKBIO',
'UKB_10k_FS_4844_combined.csv'),
delimiter=',',
index_col=False)
demographics = ukbio_full_df[['age', 'sex', 'id']].copy()
freesurf_df = freesurf_df.set_index('id')
return demographics, None, freesurf_df
elif (dataset == 'BANC_freesurf' and raw==False and not
analysis=='summary_data'):
freesurf_df = pd.read_csv(os.path.join(project_wd, 'BayOptPy',
'freesurfer_preprocess',
'matched_dataset',
'aparc_aseg_BANC.csv'), delimiter=',', index_col=0)
rawsubjectsId = freesurf_df.index
# Load the demographics for each subject
demographics, selectedSubId = get_data_covariates(project_data, rawsubjectsId, 'BANC')
# return numpy array of the dataframe
# Rename columns to maintain consistency withe ukbio
demographics.rename(index=str, columns={'ID':'id', 'Age': 'age'}, inplace=True)
return demographics, None, freesurf_df
elif (dataset == 'UKBIO_freesurf' and raw==True and not
analysis=='summary_data'):
freesurf_df = pd.read_csv(os.path.join(project_wd, 'BayOptPy',
'freesurfer_preprocess',
'original_dataset',
'UKBIO',
'UKB_10k_FS_4844_combined.csv'), delimiter=',')
freesurf_df = freesurf_df.drop(columns='id.4844')
demographics = freesurf_df[['age', 'sex', 'id']].copy()
freesurf_df = freesurf_df.set_index('id')
return demographics, None, freesurf_df
elif (dataset == 'UKBIO_freesurf' and raw==False and
analysis=='summary_data'):
# This dataset contains only 21 feature that represent summary metrics
freesurf_df = pd.read_csv(os.path.join(project_wd, 'BayOptPy',
'freesurfer_preprocess',
'matched_dataset',
'aparc_aseg_UKBIO_summary.csv'), delimiter=',')
# Read the full matrix to get the demographics information
ukbio_full_df = pd.read_csv(os.path.join(project_wd, 'BayOptPy',
'freesurfer_preprocess',
'original_dataset',
'UKBIO',
'UKB_10k_FS_4844_combined.csv'), delimiter=',')
demographics = ukbio_full_df[['age', 'sex', 'id']].copy()
return demographics, None, freesurf_df
elif (dataset == 'BANC_freesurf' and raw==False and
analysis=='summary_data'):
# This dataset contains only 21 feature that represent summary metrics
freesurf_df = pd.read_csv(os.path.join(project_wd, 'BayOptPy',
'freesurfer_preprocess',
'matched_dataset',
'aparc_aseg_BANC_summary.csv'),
delimiter=',', index_col=0)
rawsubjectsId = freesurf_df.index
# Load the demographics for each subject
demographics, selectedSubId = get_data_covariates(project_data, rawsubjectsId, 'BANC')
# Rename columns to maintain consistency withe ukbio
demographics.rename(index=str, columns={'ID':'id', 'Age': 'age'}, inplace=True)
return demographics, None, freesurf_df
elif (dataset == 'freesurf_combined'):
ukbio_df = pd.read_csv(os.path.join(project_wd, 'BayOptPy',
'freesurfer_preprocess',
'matched_dataset',
'aparc_aseg_UKBIO.csv'),
delimiter=',', index_col=0)
banc_df = pd.read_csv(os.path.join(project_wd, 'BayOptPy',
'freesurfer_preprocess',
'matched_dataset',
'aparc_aseg_BANC.csv'),
delimiter=',', index_col=0)
ukbio_full_df = pd.read_csv(os.path.join(project_wd, 'BayOptPy',
'freesurfer_preprocess',
'original_dataset',
'UKBIO',
'UKB_10k_FS_4844_combined.csv'), delimiter=',')
rawsubjectsId = banc_df.index
# Load the demographics for each subject
banc_demographics, selectedSubId = get_data_covariates(project_data,
rawsubjectsId,
'BANC')
ukbio_demographics = ukbio_full_df[['age', 'sex', 'id']].copy()
# Concatenate both freesurfeer datasets
freesurfer_df = pd.concat([ukbio_df, banc_df])
# Concatenate demographics information (Age and Sex)
tmp = banc_demographics.drop('original_dataset', axis=1)
tmp.rename(index=str, columns={'ID':'id', 'Age': 'age'}, inplace=True)
# transform M/F into male/female
tmp['sex'] = tmp['sex'].map({'F': 'female', 'M': 'male'})
# Add column to specify dataset
tmp['dataset'] = 'banc'
ukbio_demographics['dataset'] = 'ukbio'
demographics = pd.concat([ukbio_demographics, tmp], sort=False)
# TODO: For now assume that the index in the BIOBANK correspond to th
# Stratify subjects. Divide them into classes <30, 30<40, 40<50, 50<60,
# 60<70, 70<80, 80<90, 90<100. Each age will be then further stratified
# into F/M.
bins = (17, 30, 40, 50, 60, 70, 80, 90)
group_labels = range(1,len(bins))
demographics['age_band'] = pd.cut(demographics['age'], bins,
labels=group_labels)
sex_age_group = demographics.groupby(['sex', 'age_band'])
# Note that the following groups are created:
# ('female', 1), ('female', 2), ('female', 3), ('female', 4), ('female', 5),
# ('female', 6), ('female', 7), ('male', 1), ('male', 2), ('male', 3),
# ('male', 4), ('male', 5), ('male', 6), ('male', 7)]
# This will label the groups cited above in a crescent order. In total
# you will have 1-14 groups, grouped according to their age and sex
demographics['stratify'] = sex_age_group.grouper.group_info[0] + 1
#same order between both fines
return demographics, None, freesurfer_df
else:
raise ValueError('Analysis for this dataset is not yet implemented!')
print('Resample the dataset by a factor of %d' %resamplefactor)
print('Original image size: %s' %(imgs[0].shape,))
# resample dataset to a lower quality. Increase the voxel size by two
resampleby2affine = np.array([[resamplefactor, 1, 1, 1],
[1, resamplefactor, 1, 1],
[1, 1, resamplefactor, 1],
[1, 1, 1, 1]])
target_affine = np.multiply(imgs[0].affine, resampleby2affine)
print('Resampling Images')
with Pool() as p:
args = partial(_multiprocessing_resample, target_affine=target_affine)
resampledimgs = list(tqdm(p.imap(args, imgs), total=len(imgs)))
print('Resampled image size: %s' %(resampledimgs[0].shape,))
# Use nilearn to mask only the brain voxels across subjects
print('Compute brain mask')
#The lower and the upper_cutoff represent the lower and the upper fraction of the histogram to be discarded
MeanImgMask = masking.compute_multi_epi_mask(resampledimgs, lower_cutoff=0.001, upper_cutoff=.85, opening=False)
# Apply the group mask on all subjects.
# Note: The apply_mask function returns the flattened data as a numpy array
maskedData = [masking.apply_mask(img, MeanImgMask) for img in resampledimgs]
# If debug option is set, save an nifti image of the image.
# Note: if you resampled the image you will not be able to overlay it on the original brain
if debug:
mask_path = os.path.join(project_wd, 'BayOptPy', 'tpot')
print('Saving brain mask: %s' %mask_path)
nib.save(MeanImgMask, os.path.join(mask_path, 'mask_%s.nii.gz' %dataset))
print('Applied mask to the dataset')
# Transform the imaging data into a np array (subjects x voxels)
maskedData = np.array(maskedData)
return demographics, imgs, maskedData
def get_mae_for_all_generations(dataset, random_seed, generations, config_dict,
tpot_path):
'''
Get the MAE values for both the training and test dataset
:return:
'''
# Load the scores for the best models
saved_path = os.path.join(tpot_path, 'random_seed_%03d' %random_seed,
'tpot_%s_%s_%03dgen_pipelines.dump'
%(dataset, config_dict, generations))
# Note that if a value is not present for a generation, that means that the
# score did not change from the previous generation
# sort the array in ascending order
logbook = joblib.load(saved_path)
gen = list(logbook['log'].keys())
print('There are %d optminal pipelines' %len(gen))
print('These are the best pipelines')
for generation in gen:
print(logbook['log'][generation]['pipeline_name'])
# Iterate over the the list of saved MAEs and repeat the values where one
# generation is missed
all_mae_test = []
all_mae_train = []
pipeline_complexity = []
curr_gen_idx = 0
# all generations
for generation in range(generations):
if generation == gen[curr_gen_idx]:
all_mae_test.append(abs(logbook['log'][gen[curr_gen_idx]]['pipeline_test_mae']))
all_mae_train.append(abs(logbook['log'][gen[curr_gen_idx]]['pipeline_score']))
pipeline_complexity.append(len(logbook['log'][gen[curr_gen_idx]]['pipeline_sklearn_obj'].named_steps.keys()))
if len(gen) > 1 and (len(gen) > curr_gen_idx + 1):
curr_gen_idx += 1
else:
# repeat the same last value
all_mae_test.append(all_mae_test[-1])
all_mae_train.append(all_mae_train[-1])
pipeline_complexity.append(pipeline_complexity[-1])
# transform the pipeline_complexity into a numpy array, in order to perform
# fancy indexing
pipeline_complexity = np.array(pipeline_complexity)
return all_mae_test, all_mae_train, pipeline_complexity
def set_publication_style():
# Se font size to paper size
plt.style.use(['seaborn-white', 'seaborn-talk'])
matplotlib.rc("font", family="Times New Roman")
# Remove the spines
sns.set_style('white', {"axes.spines.top": False,
"axes.spines.right": False,
"axes.labelsize": 'large'})
def create_age_histogram(df, dataset):
'''
Get an age array and plot and save the age histogram for the analysed sample
'''
# Define plot styple
set_publication_style()
plt.figure()
path_to_save = '/code/BayOptPy/tpot/age_histogram_%s.eps' %dataset
min_age = df['age'].min()
max_age = df['age'].max()
plt.hist(df['age'], bins=65, range=(min_age,max_age))
plt.xlabel('Age')
plt.ylabel('# of Subjects')
plt.legend()
plt.savefig(path_to_save)
plt.close()
def plot_confusion_matrix(y_true, y_pred, classes,
normalize=False,
title=None,
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if not title:
if normalize:
title = 'Normalized confusion matrix'
else:
title = 'Confusion matrix, without normalization'
# Compute confusion matrix
cm = confusion_matrix(y_true, y_pred)
# Only use the labels that appear in the data
labels = [int(x) for x in unique_labels(y_true, y_pred)]
classes = classes[labels]
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
fig, ax = plt.subplots()
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# ... and label them with the respective list entries
xticklabels=classes, yticklabels=classes,
title=title,
ylabel='True label',
xlabel='Predicted label')
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha="center", va="center",
color="white" if cm[i, j] > thresh else "black")
fig.tight_layout()
return ax, cm
def plot_confusion_matrix_boosting(cm_mean, cm_std,
classes,
title=None,
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
fig, ax = plt.subplots()
im = ax.imshow(cm_mean, interpolation='nearest', cmap=cmap)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm_mean.shape[1]),
yticks=np.arange(cm_mean.shape[0]),
# ... and label them with the respective list entries
xticklabels=classes, yticklabels=classes,
title=title,
ylabel='True label',
xlabel='Predicted label')
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = '{0:.2f} ± {1:.2f}'
thresh = cm_mean.max() / 2.
for i in range(cm_mean.shape[0]):
for j in range(cm_mean.shape[1]):
ax.text(j, i, fmt.format(cm_mean[i, j],cm_std[i, j]),
ha="center", va="center",
color="white" if cm_mean[i, j] > thresh else "black")
fig.tight_layout()
return ax
def plot_predicted_vs_true(true_y, predicted_y, save_path, metric):
fig = plt.figure()
plt.scatter(true_y, predicted_y, alpha=.5)
plt.ylabel('Predicted %s' %metric)
plt.xlabel('True %s'%metric)
plt.plot(np.arange(min(true_y),
max(true_y)),
np.arange(min(true_y),
max(true_y)), alpha=.3, linestyle='--',
color='b')
if metric == 'Age':
plt.xticks(np.arange(min(min(true_y), min(predicted_y)),
max(max(true_y), max(predicted_y)), step=10))
plt.yticks(np.arange(min(min(true_y), min(predicted_y)),
max(max(true_y), max(predicted_y)), step=10))
plt.savefig(save_path)
plt.close()
def load_cognitive_data(project_data):
cog_path = os.path.join(project_data, 'cog_ukbio')
cog_df = pd.read_csv(os.path.join(cog_path, 'UKB_10k_cog_bmi.csv'))
cog_df = cog_df.set_index('ID')
return cog_df
def ttest_ind_corrected(performance_a, performance_b, k=10, r=10):
"""Corrected repeated k-fold cv test.
The test assumes that the classifiers were evaluated using cross validation.
Ref:
Bouckaert, Remco R., and Eibe Frank. "Evaluating the replicability of significance tests for comparing learning
algorithms." Pacific-Asia Conference on Knowledge Discovery and Data Mining. Springer, Berlin, Heidelberg, 2004
Args:
performance_a: performances from classifier A
performance_b: performances from classifier B
k: number of folds
r: number of repetitions
Returns:
t: t-statistic of the corrected test.
prob: p-value of the corrected test.
"""
df = k * r - 1
x = performance_a - performance_b
m = np.mean(x)
sigma_2 = np.var(x, ddof=1)
denom = np.sqrt((1 / k * r + 1 / (k - 1)) * sigma_2)
with np.errstate(divide='ignore', invalid='ignore'):
t = np.divide(m, denom)
prob = stats.t.sf(np.abs(t), df) * 2
return t, prob
|
7,461 | de7515cb71c8e30018b14baf8846648d0c76a592 | #!/usr/bin/env python
# Sanjaya Gajurel, Computational Scientist, Case Western Reserve University, April 2015
import vtk
# ------------------------------------------------------------------------------
# Script Entry Point
# ------------------------------------------------------------------------------
if __name__ == "__main__":
print("vtkGraph: Building a graph using Unstructured Grid & dumping it in a vtk file, vertex.vtu, to be visualized using ParaView")
pointSource = vtk.vtkPointSource()
pointSource.Update()
# Create an integer array to store vertex id data & link it with its degree value as a scalar.
degree = vtk.vtkIntArray()
degree.SetNumberOfComponents(1)
degree.SetName("degree")
degree.SetNumberOfTuples(7)
degree.SetValue(0, 2)
degree.SetValue(1, 1)
degree.SetValue(2, 3)
degree.SetValue(3, 3)
degree.SetValue(4, 4)
degree.SetValue(5, 2)
degree.SetValue(6, 1)
pointSource.GetOutput().GetPointData().AddArray(degree)
# Assign co-ordinates for vertices
Points = vtk.vtkPoints()
Points.InsertNextPoint(0, 1, 0)
Points.InsertNextPoint(0, 0, 0)
Points.InsertNextPoint(1, 1, 0)
Points.InsertNextPoint(1, 0, 0)
Points.InsertNextPoint(2, 1, 0)
Points.InsertNextPoint(2, 0, 0)
Points.InsertNextPoint(3, 0, 0)
# Establish the specified edges using CellArray
line = vtk.vtkCellArray()
line.Allocate(8)
line.InsertNextCell(2)
line.InsertCellPoint(0)
line.InsertCellPoint(1)
line.InsertNextCell(2)
line.InsertCellPoint(0)
line.InsertCellPoint(2)
line.InsertNextCell(2)
line.InsertCellPoint(2)
line.InsertCellPoint(3)
line.InsertNextCell(2)
line.InsertCellPoint(2)
line.InsertCellPoint(4)
line.InsertNextCell(2)
line.InsertCellPoint(3)
line.InsertCellPoint(4)
line.InsertNextCell(2)
line.InsertCellPoint(3)
line.InsertCellPoint(5)
line.InsertNextCell(2)
line.InsertCellPoint(4)
line.InsertCellPoint(5)
line.InsertNextCell(2)
line.InsertCellPoint(4)
line.InsertCellPoint(6)
# Add the vertices and edges to unstructured Grid
G = vtk.vtkUnstructuredGrid()
G.GetPointData().SetScalars(degree)
G.SetPoints(Points)
G.SetCells(vtk.VTK_LINE, line)
# Dump the graph in VTK unstructured format (.vtu)
gw = vtk.vtkXMLUnstructuredGridWriter()
gw.SetFileName("vertex.vtu")
gw.SetInputData(G)
gw.Write()
print('---> ')
print("Feed the vertex.vtu file in ParaView/VisIt.")
|
7,462 | cab45a823e319bd504b3db68cf70bff315f44fc6 | import random
import numpy as np
class Board:
def __init__(self, nrows, ncols, random_seed=42):
self.nrows = nrows
self.ncols = ncols
self.random = random.Random()
self.random.seed(random_seed)
self.board = np.zeros((nrows, ncols))
self.score = 0
self.__add_new_numbers()
# Initialize with 1/8 of the board filled, with 90% chance of filling
# with 2, and 10% chance of filling with 4
def __add_new_numbers(self):
num_zeros = (self.board == 0).sum()
for i in range(min((self.nrows*self.ncols)//8, num_zeros)):
random_row = self.random.randint(0,self.nrows-1)
random_col = self.random.randint(0,self.ncols-1)
while self.board[random_row, random_col] != 0:
random_row = self.random.randint(0,self.nrows-1)
random_col = self.random.randint(0,self.ncols-1)
if self.random.random() < 0.9:
self.board[random_row, random_col] = 2
else:
self.board[random_row, random_col] = 4
def __swap_on_board(self, pos1, pos2):
val = self.board[pos1]
self.board[pos1] = self.board[pos2]
self.board[pos2] = val
def __left_swipe_on_row(self, row_index):
left_index = 0
collapsed = False
for i in range(self.ncols):
if self.board[row_index, i] != 0:
if left_index != i:
collapsed = True
self.__swap_on_board((row_index, left_index), (row_index, i))
left_index += 1
for i in range(1, self.ncols):
if self.board[row_index, i] == self.board[row_index, i-1]:
self.board[row_index, i-1] *= 2
self.board[row_index, i] = 0
collapsed = True
self.score += self.board[row_index, i-1]
left_index = 0
for i in range(self.ncols):
if self.board[row_index, i] != 0:
self.__swap_on_board((row_index, left_index), (row_index, i))
left_index += 1
return collapsed
def __up_swipe_on_col(self, col_index):
top_index = 0
collapsed = False
for i in range(self.nrows):
if self.board[i, col_index] != 0:
if top_index != i:
collapsed = True
self.__swap_on_board((top_index, col_index), (i, col_index))
top_index += 1
for i in range(1, self.nrows):
if self.board[i, col_index] == self.board[i-1, col_index]:
self.board[i-1, col_index] *= 2
self.board[i, col_index] = 0
collapsed = True
self.score += self.board[i-1, col_index]
top_index = 0
for i in range(self.nrows):
if self.board[i, col_index] != 0:
self.__swap_on_board((top_index, col_index), (i, col_index))
top_index += 1
return collapsed
def __left_swipe(self):
collapsed = False
for i in range(self.nrows):
if self.__left_swipe_on_row(i):
collapsed = True
return collapsed
def __right_swipe(self):
collapsed = False
for i in range(self.nrows):
for j in range(self.ncols//2):
self.__swap_on_board((i, j), (i, -j-1))
if self.__left_swipe_on_row(i):
collapsed = True
for j in range(self.ncols//2):
self.__swap_on_board((i, j), (i, -j-1))
return collapsed
def __up_swipe(self):
collapsed = False
for i in range(self.ncols):
if self.__up_swipe_on_col(i):
collapsed = True
return collapsed
def __down_swipe(self):
collapsed = False
for i in range(self.ncols):
for j in range(self.nrows//2):
self.__swap_on_board((j, i), (-j-1, i))
if self.__up_swipe_on_col(i):
collapsed = True
for j in range(self.nrows//2):
self.__swap_on_board((j, i), (-j-1, i))
return collapsed
def __display(self):
print(self.board)
print(f"Current score: {self.score}")
def reset(self):
self.score = 0
for i in range(self.nrows):
for j in range(self.ncols):
self.board[i, j] = 0
def play(self):
moves = [self.__up_swipe, self.__left_swipe, self.__down_swipe, self.__right_swipe]
movement_mapping = {char: moves[pos] for pos, char in enumerate('WASD')}
board_copy = Board(self.nrows, self.ncols)
lost = False
while self.board.max() < 2048:
self.__display()
raw_input_value = input("Play with WASD: ").upper()
while len(raw_input_value) == 0:
raw_input_value = input("Play with WASD: ").upper()
direction = raw_input_value[0]
while direction not in movement_mapping:
raw_input_value = input("Play with WASD: ").upper()
while len(raw_input_value) == 0:
raw_input_value = input("Play with WASD: ").upper()
direction = raw_input_value[0]
did_move = movement_mapping[direction]()
if did_move:
self.__add_new_numbers()
# TODO: Make this more efficient
board_copy.board = self.board.copy()
can_swipe_up = board_copy.__up_swipe()
can_swipe_left = board_copy.__left_swipe()
if not (can_swipe_left or can_swipe_up):
lost = True
break
else:
print(f"'{direction}'" + ' is an INVALID MOVE')
if not lost:
print('GAME WON')
else:
print('GAME LOST')
self.__display()
output = self.score
self.reset()
return output
if __name__ == "__main__":
b = Board(4, 4)
b.play() |
7,463 | b38c9357030b2eac8298743cfb4d6c4d58c99ed4 | import redis
r = redis.StrictRedis()
r.set("counter", 40)
print(r.get("counter"))
print(r.incr("counter"))
print(r.incr("counter"))
print(r.get("counter"))
|
7,464 | abe53120a485f608431142c6b9452666fcd72dbf | # -*- coding: utf-8 -*-
"""
Created on Fri Oct 5 17:05:12 2018
@author: Shane
"""
import math
import scipy.integrate as integrate
import random
import numpy as np
import sympy as sym
'''
Question 1
plug and play into formula for VC generalization
'''
print('Question 1')
error = 0.05
for N in [400000,420000,440000,460000,480000]:
print(4* (2*N)**10 * math.exp(-(1/8)*error**2 * N))
"""
Question 2
evaluate the formulae with given quantities, then compare
mH(N) is bounded by dVC, so we substitute
"""
print('Question 2')
d=50
sigma=.05
N=10000
print(math.sqrt((8/N)*math.log((4*(2*N)**d) /sigma)))
print(math.sqrt((2/N)*math.log((2*N*(N)**d))) + math.sqrt((2/N)*math.log(1/sigma)) + 1/N)
e = sym.Symbol('e')
f = sym.Eq(sym.sqrt((1/N) * (2*e + sym.log(6/sigma * (2*N)**d))),e)
print(sym.solve(f, e))
#bound4 cannot calculate with exact fidelity very easily, so we get an approximation
test_range = 1000000
for i in range(test_range):
e = i/test_range
if e < math.sqrt(1/(2*N) * (4*e*(1+e) + math.log(4/sigma) + d*math.log(N**2))):
continue
else:
print(e)
break
"""
Question 3
same as question 2 but with different N
however, since mH(N) is not bounded by dVC, we cannot substitute and instead the growuth function should be 2**N
"""
print('Question 3')
d=50
sigma=.05
N=5
print(math.sqrt((8/N)*math.log((4*2**(2*N)) /sigma)))
print(math.sqrt((2/N)*math.log((2*N*2**N)) + math.sqrt((2/N)*math.log(1/sigma)) + 1/N))
e = sym.Symbol('e')
f = sym.Eq(sym.sqrt((1/N) * (2*e + sym.log(6/sigma * 2**(2*N)))),e)
print(sym.solve(f, e))
#bound4 cannot calculate with exact fidelity very easily, so we get an approximation
test_range = 1000000
for i in range(test_range):
e = i/test_range
if e < math.sqrt(1/(2*N) * (4*e*(1+e) + math.log(4/sigma) + math.log(2**N**2))):
continue
else:
print(e)
break
"""
Question 4
For a given sine curve, we can select 2 points (c,d) between -1 and 1. It seems like the solution is to evaluate it experimentally rather than mathematically, so will run it over a number of iterations
"""
print('Question 4')
nruns = 1000
a = []
for run in range(nruns):
c = random.uniform(-1,1)
d = random.uniform(-1,1)
#solve for best slope
y = np.array([math.sin(math.pi*c), math.sin(math.pi*d)])
X = np.matrix([c, d])
betas = y * X.T * np.linalg.inv(X*X.T)
a.append(betas[[0]])
a_bar = np.average(a)
print(a_bar)
"""
Question 5
To calculate the bias, need to evaluate the sum of squared difference (assuming this cost function) over the domain
"""
print('Question 5')
g = lambda x: ((a_bar*x - math.sin(math.pi*x))**2)/2
print(integrate.quad(g, -1, 1))
"""
Question 6
To calculate the variance, can evaluate experimentally using the a that we have generated
"""
print('Question 6')
variance = []
for slope in a:
#evaluate variance on one point set of data
h = lambda x: ((slope*x - a_bar*x)**2)/2
variance.append(integrate.quad(h, -1, 1)[0])
print(np.average(variance))
"""
Question 7
We can do this empirically again, as evaluating it explicitly for two points is quite challenging to get the correct expectations
"""
print('Question 7')
errors = []
for run in range(nruns):
c = random.uniform(-1,1)
d = random.uniform(-1,1)
#solve for best slope using different equations
y = np.array([math.sin(math.pi*c), math.sin(math.pi*d)])
X1 = np.matrix([1, 1])
X2 = np.matrix([c, d])
X3 = np.matrix([[1, c], [1, d]])
X4 = np.matrix([c**2, d**2])
X5 = np.matrix([[1, c**2], [1, d**2]])
#calculate betas
beta1 = y * X1.T * np.linalg.inv(X1*X1.T)
beta2 = y * X2.T * np.linalg.inv(X2*X2.T)
beta3 = y * X3.T * np.linalg.inv(X3*X3.T)
beta4 = y * X4.T * np.linalg.inv(X4*X4.T)
beta5 = y * X5.T * np.linalg.inv(X5*X5.T)
#evaluate out of sample error for each hypothesis
f1 = lambda x: ((beta1[0] - math.sin(math.pi*x))**2)/2
f2 = lambda x: ((beta2[0]*x - math.sin(math.pi*x))**2)/2
f3 = lambda x: ((beta3[0,0] + beta3[0,1]*x - math.sin(math.pi*x))**2)/2
f4 = lambda x: ((beta4*x**2 - math.sin(math.pi*x))**2)/2
f5 = lambda x: ((beta5[0,0] + beta5[0,1]*x**2 - math.sin(math.pi*x))**2)/2
error = []
error.append(integrate.quad(f1, -1, 1)[0])
error.append(integrate.quad(f2, -1, 1)[0])
error.append(integrate.quad(f3, -1, 1)[0])
error.append(integrate.quad(f4, -1, 1)[0])
error.append(integrate.quad(f5, -1, 1)[0])
errors.append(error)
print(np.average(errors, 0))
"""
Question 8
We solve explicitly for Dvc by showing when mH(N) is no loinger equal to 2^N
"""
print('Question 8')
print('Observe that as long as q > N, the equality holds as the combinatorial term goes to 0')
print('Hence, once N = q, then the equality fails. This occurs for mH(N+1)')
print('The largest N where the equality holds is therefore q')
"""
Question 9
First, we evaluate which statements are true.
a) is true because the intersection of all the sets cannot be larger than the size of any given set
b) is true for the same reason, and therefore is tighter than a
c) is also true for this reason, but is looser than b
d) is false because we already established that the min is a tight upper bound
e) is false for the same reason
"""
print('Question 9')
print('b')
"""
Question 10
First, we evaluate which statements are true.
a) is false, because assuming the sets were completely distinct, the additional constant term in the VC dimension calcuation would cause the sum of the separate VCs to exceed the VC of the union (for example, the perceptron)
b) is true if we can assume that there is only ever a single constant term added in the VC analysis (not rigourously shown, but it is a result from the slides)
c) is false for the same reason as a
d) is false for the same reason as a
e) is true and tighter as the union must at least be the size of the largest set, and the VC dimension would follow accordingly
"""
print('Question 10')
print('e')
|
7,465 | 8f9d823785d42d02a0a3d901d66b46a5cd59cdd7 | import json
import glob
import sys
searchAreaName = sys.argv[1]
# searchAreaName = "slovenia_177sqkm_shards/20161220-162010-c9e0/slovenia_177sqkm_predicted/predict_slovenia_177sqkm_shard"
print('./{0}_??.txt'.format(searchAreaName))
all_predicts = glob.glob('./{0}_??.txt'.format(searchAreaName))
def getBboxes(bboxes):
return [bb for bb in bboxes if sum(bb) > 0.0]
print(all_predicts)
bboxes = {}
for f in all_predicts:
with open(f) as json_data:
data = json.load(json_data)
outputs = data["outputs"]
for key in outputs:
val = outputs[key]["bbox-list"]
if sum(val[0]) > 0.0:
bboxes[key] = getBboxes(val)
#print outputs
with open('{0}_summary.json'.format(searchAreaName), 'w') as fp:
json.dump(bboxes, fp, indent=2)
print("wrote to {0}_summary.json".format(searchAreaName))
|
7,466 | e899b093152ee0923f1e5ad3b5719bbf9eb4339c | from .login import LoginTask
from .tag_search import TagSearchTask
from .timeline import TimelineTask
from .get_follower import GetFollowerTask
from .followback import FollowBackTask
from .unfollow import UnFollowTask
|
7,467 | 64d955d568a6bfec50aad36c9c4f1e36998e4d74 | import csv
import boto3
import pytz
import time
from datetime import datetime, timedelta
# current_time = int(datetime.now())
from boto3.dynamodb.conditions import Key, Attr
def lambda_handler(event, context):
current_date = datetime.now(pytz.timezone('US/Central'))
yesterday_date = current_date - timedleta(days=1)
yesterday_date_string = yesterday_date.strftime("%Y-%m-%dT")
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table('AppStreamDynamoDB1')
response = table.scan(
FilterExpression=Attr('formData').contains(yesterday_date_string)
)
items = response['Items']
print(items) # it should print out the values
print("testing")
print(yesterday_date_string)
if len(items) != 0:
print(items) # it should print null
return items
saving_backup()
delete_entires()
def saving_backup():
s3_client = boto3.client('s3')
key = datetime.now(pytz.timezone('US/Central')).strftime("%Y-%m-%dT")
bucket = 'REPLACE_WITH_BUCKET_NAME'
data = []
serializedData = json.dumps(data)
try:
# response = s3_client.upload_file(file_name, bucket, object_name)
response = s3.put_object(Bucket=bucket, Key=key, Body=serializedData)
except ClientError as e:
logging.error(e)
return False
return True
def delete_entires():
saving_backup() == True
#----------------------Delete Items inside the dynamo db---------------------------------------------
print("Attempting a conditional delete...")
try:
response = table.delete_item(
Key={
'date': yesterday_date_string ,
},
# ConditionExpression="info.rating <= :val",
# ExpressionAttributeValues= {
# ":val": decimal.Decimal(5)
# }
)
except ClientError as e:
if e.response['Error']['Code'] == "ConditionalCheckFailedException":
print(e.response['Error']['Message'])
else:
raise
else:
print("DeleteItem succeeded:")
# print(json.dumps(response, indent=4, cls=DecimalEncoder))
|
7,468 | 18eed41cbc419ecbb215f77235be99f15f86ea9a | '''
Author: Allen Chen
This is an example of entry point to CORE. Pay close attention to the import syntax - they're relative to this repo.
Don't try to run this by doing 'python3 main.py' under this directory. Try to add your Target in Makefile under the root dir,
and call './run YOUR_TARGET_NAME' from root.
'''
from src.CORE.class_TradeBot import TradeBot
from src.util.logging import log_ok, log_info, log_error
bot = TradeBot()
log_info(f"Just initialized a bot named {bot.name}")
log_ok(f"Bot is given cash: {bot.cash}")
log_error("Nothing else to do ! :(") |
7,469 | 109ca06685eece74034f77a98b1d7172a17aca21 | import random
import re
from datetime import datetime, timedelta
from threading import Lock
from telegram.ext import run_async
from src.models.user import UserDB
from src.models.user_stat import UserStat
from src.utils.cache import cache, USER_CACHE_EXPIRE
from src.utils.logger_helpers import get_logger
logger = get_logger(__name__)
class PidorWeekly:
lock = Lock()
re_words = re.compile(
r"\b(ге[йяи]|геев|анал|аналы|аналь\S+|анус|очко|жоп[ау]|жопой|поп[ау]|попой|попк[ау]|попкой|говн[оа]|говном|пенис\S*|член\S*|пизд\S+|гомос\S+|гомик\S*|\S+сексуал\S*|климов\S*|педерас\S+|пидор\S*|пидар\S*|педик\S+|подвор\S+|iphone\S*|айфон\S*|samsung|самсунг\S*|смузи|барбер\S*|рокет\S*|хипстер\S*|лгбт\S*|бабочк\S+|м[ао]к[ао]син\S*|ахтунг\S*|толерант\S+|политкорр?ект\S+|стрижк\S+|бород\S+|аниме\S*|саратов\S*|фемк\S+|\S+изм\S*|dtf|дтф|в[еэ]йп\S*|гироскутер\S*|мизог\S+|козел|козл\S+|муда[кч]\S*|сволоч\S+|ресторан\S*|кача[лт]\S+|мыло|читер\S*|читы?|культур\S+|сра[тл]\S+|насра[тл]\S+|гад\S*|блогг?ер\S*)\b",
re.IGNORECASE)
re_inside = re.compile(r"п[еи]д[оа]р\S*", re.IGNORECASE)
@classmethod
def get_top_pidor(cls, cid, date=None):
monday = cls.__get_current_monday() if date is None else cls.__get_date_monday(date)
db = cls.__get_db(monday, cid)
stats = UserStat.get_chat_stats(cid, date)
# подсчитаем всех по отношению пидор-слов к общему количеству слов этого участника
pidor_by_count = {}
for user_stat, user in stats:
count = user_stat.all_messages_count
# учитываем только тек, кто написал от 30 сообщений
if count < 30 or user_stat.words_count < 500:
continue
if user.uid not in db:
continue
pidor_by_count[user.uid] = db[user.uid] / count
if len(pidor_by_count) > 0:
uid, _ = cls.__sort_dict(pidor_by_count)[0]
elif len(stats) == 0:
return None
else:
_, user = random.choice(stats)
uid = user.uid
return uid
@classmethod
@run_async
def parse_message(cls, message):
msg = message.text
if msg is None:
return
uid = message.from_user.id
cid = message.chat_id
entities = message.parse_entities()
if not cls.__has_pidor(msg):
return
cls.__add(uid, cid)
if message.reply_to_message is not None:
to_uid = message.reply_to_message.from_user.id
cls.__add(to_uid, cid, replay=True)
for entity, entity_text in entities.items():
if entity.type == 'mention':
username = entity_text.lstrip('@').strip()
try:
mentioned_user_uid = UserDB.get_uid_by_username(username)
if mentioned_user_uid:
cls.__add(mentioned_user_uid, cid, replay=True)
except Exception:
pass
continue
if entity.type == 'text_mention':
cls.__add(entity.user.id, cid, replay=True)
continue
@classmethod
def __has_pidor(cls, msg):
msg_lower = msg.lower().replace('ё', 'е')
if cls.re_words.search(msg_lower):
return True
if cls.re_inside.search(msg_lower):
return True
return False
@classmethod
def __add(cls, uid, cid, date=None, replay=False):
monday = cls.__get_current_monday() if date is None else cls.__get_date_monday(date)
logger.debug(f'lock {cid}:{uid}')
with cls.lock:
db = cls.__get_db(monday, cid)
value = 1
if replay is True:
value = 0.4
if uid in db:
db[uid] += value
else:
db[uid] = value
cls.__set_db(db, monday, cid)
@staticmethod
def __sort_dict(d):
return sorted(d.items(), key=lambda x: x[1], reverse=True)
@staticmethod
def __get_cache_key(monday, cid):
return f'pidorweekly:{monday.strftime("%Y%m%d")}:{cid}'
@staticmethod
def __get_date_monday(date):
monday = date - timedelta(days=date.weekday())
return monday.replace(hour=0, minute=0, second=0, microsecond=0)
@classmethod
def __get_current_monday(cls):
return cls.__get_date_monday(datetime.today())
@classmethod
def __get_db(cls, monday, cid):
cached = cache.get(cls.__get_cache_key(monday, cid))
if cached:
return cached
return {}
@classmethod
def __set_db(cls, newdb, monday, cid):
cache.set(cls.__get_cache_key(monday, cid), newdb, time=USER_CACHE_EXPIRE)
|
7,470 | b74c759b51fb6591477757e2ff54b545f225991c | import json
from examtool.api.database import get_exam, get_roster
from examtool.api.extract_questions import extract_questions
from examtool.api.scramble import scramble
from google.cloud import firestore
import warnings
warnings.filterwarnings("ignore", "Your application has authenticated using end user credentials")
db = firestore.Client()
exams = [x.id for x in db.collection("exams").stream()]
for exam in exams:
print("checking", exam)
exam_json = json.dumps(get_exam(exam=exam))
roster = get_roster(exam=exam)
flagged = set()
for email, _ in roster:
template_questions = extract_questions(json.loads(exam_json))
student_questions = list(
extract_questions(scramble(email, json.loads(exam_json), keep_data=True))
)
student_question_lookup = {q['id']: q for q in student_questions}
for question in template_questions:
if question["id"] not in student_question_lookup:
continue
if question["type"] not in ["multiple_choice", "select_all"]:
continue
if question["id"] in flagged:
continue
for i, option in enumerate(question["options"]):
option["index"] = i
s = lambda options: sorted(options, key=lambda q: q["text"])
for a, b in zip(s(question["options"]), s(student_question_lookup[question["id"]]["options"])):
if a["index"] != b.get("index", a["index"]):
flagged.add(question["id"])
continue
if flagged:
print(exam, flagged)
|
7,471 | a12f9435eb4b090bc73be14ad64fdf43c5caa4d2 | from netsec_2017.Lab_3.packets import RequestItem, RequestMoney, RequestToBuy, FinishTransaction, SendItem, SendMoney
from netsec_2017.Lab_3.PLS.client import PLSClient, PLSStackingTransport
from netsec_2017.Lab_3.peepTCP import PeepClientTransport, PEEPClient
import asyncio
import playground
import random, logging
from playground import getConnector
from playground.network.packet import PacketType
from playground.network.packet.fieldtypes import UINT32, STRING, UINT16, UINT8, BUFFER
from playground.network.packet.fieldtypes.attributes import Optional
from playground.network.common.Protocol import StackingProtocol, StackingProtocolFactory, StackingTransport
import zlib
import sys
class ShopClientProtocol(asyncio.Protocol):
clientstate = 0
def __init__(self, loop):
#self.loop = loop
self.transport = None
self.loop = loop
self.deserializer = PacketType.Deserializer()
def connection_made(self, transport):
print("ShopClient connection_made is called\n")
self.transport = transport
# PACKET 1 - Request to Buy packet
startbuy = RequestToBuy()
print("Sending Request to Buy")
self.transport.write(startbuy.__serialize__())
def data_received(self, data):
print("ShopClient Data_received is called")
self.deserializer.update(data)
#print(data)
for pkt in self.deserializer.nextPackets():
#print("Client <------------{}------------- Server".format(pkt.DEFINITION_IDENTIFIER))
if isinstance(pkt, RequestItem) and self.clientstate == 0:
self.clientstate += 1
# PACKET 3 - Send Item packet
item = "Butter"
response = SendItem()
response.Item = item
print("Sent SendItem")
self.transport.write(response.__serialize__())
elif isinstance(pkt, RequestMoney) and self.clientstate == 1:
self.clientstate += 1
# PACKET 5 - Send Money packet
response = SendMoney()
response.Cash = pkt.Amount
print("Sent SendMoney")
self.transport.write(response.__serialize__())
elif isinstance(pkt, FinishTransaction) and self.clientstate == 2:
self.transport.close()
else:
print(pkt.Type)
print("Client Received Incorrect Packet. Closing Connection. Try Again!")
self.transport.close()
def connection_lost(self,exc):
print('\nThe ShopServer sent a connection close to the client')
self.transport.close()
self.transport = None
self.loop.stop()
class initiate():
#1
def __init__(self, loop):
self.loop = loop
def send_first_packet(self):
self.loop = loop
return ShopClientProtocol(loop)
if __name__ == "__main__":
loop = asyncio.get_event_loop()
#logging.getLogger().setLevel(logging.NOTSET) # this logs *everything*
#logging.getLogger().addHandler(logging.StreamHandler()) # logs to stderr
f = StackingProtocolFactory(lambda:PLSClient(), lambda: PEEPClient(loop))
ptConnector = playground.Connector(protocolStack=f)
playground.setConnector("passthrough", ptConnector)
go = initiate(loop)
coro = playground.getConnector('passthrough').create_playground_connection(go.send_first_packet, '20174.1.1.1', 8888)
client = loop.run_until_complete(coro)
# Serve requests until Ctrl+C is pressed
try:
loop.run_forever()
except KeyboardInterrupt:
pass
# Close the server
loop.close()
|
7,472 | e59bd92a94399d4a81687fc5e52e9ae04b9de768 | from django.db import models
from colorfield.fields import ColorField
from api import settings
from os.path import splitext
from datetime import datetime, timedelta
from PIL import Image
def saveTaskPhoto(instance,filename):
taskId = instance.id
name,ext = splitext(filename)
return f'tasks/task_{taskId}{ext}'
class Task(models.Model):
difficulty = models.IntegerField(default=0)
category = models.SmallIntegerField(default=0)
user = models.ForeignKey(settings.AUTH_USER_MODEL,on_delete=models.CASCADE)
title = models.CharField(max_length=150)
repeat = models.IntegerField() # Set when a tasks is created and will not channge
frequency = models.IntegerField() # number of days
duration = models.IntegerField(null=True,blank=True) # number of minutes
thumbnail = models.ImageField(null=True,blank=True,upload_to=saveTaskPhoto)
description = models.TextField(null=True,blank=True)
begin = models.DateField(auto_now_add=True)
lastBegin = models.DateTimeField(null=True,blank=True)
done = models.IntegerField(default=0) # how many time the tasks has been done
class Meta:
unique_together = ('title', 'user',)
def save(self, *args, **kwargs):
super(Task, self).save(*args, **kwargs)
if(self.thumbnail):
image = Image.open(self.thumbnail.path)
output_size = (50, 50)
image.thumbnail(output_size)
image.save(self.thumbnail.path)
@property
def points(self):
return self.done * self.difficulty.points
@property
def finish(self):
return self.done == self.repeat
@property
def predictedEnd(self):
today = datetime.today()
delta = (self.repeat - self.done)*self.frequency
end = today + timedelta(days=delta)
return end.date()
@property
def state(self):
if self.lastBegin is None :
return "to do"
now = datetime.now()
delta = now - self.lastBegin
if delta.days >= self.frequency :
return "to do"
else :
if self.duration and self.lastBegin + timedelta(minutes=self.duration) > now :
return "doing"
else :
return "done"
def __str__(self):
return f'{self.title} / {self.id}'
|
7,473 | 24b6d33849f034b9f61ffd4aaff90a0f428085fe | from pybot import usb4butia
u4b = usb4butia.USB4Butia()
while True:
bot = u4b.getButton(6)
print bot
|
7,474 | 2d0d73c0ea20d6736c10d5201abcfa9d561ef216 | import random
import matplotlib.pyplot as plt
import numpy as np
def dado(n):
i = 1
dos =0
tres =0
cuatro =0
cinco=0
seis =0
siete=0
ocho=0
nueve=0
diez=0
once=0
doce=0
cont = [0,0,0,0,0,0,0,0,0,0,0]
while i <= n:
r1 = random.randint(1,6)
r2 = random.randint(1,6)
#print("Resultado del primer dado es: ",r1)
#print("Resultado del segundo dado es: ",r2)
suma = r1 +r2
# print("Sumatoria de dados ------->",suma)
for j in range(2, 13):
if suma == j:
cont[j-2] = cont[j-2] + 1
i += 1
dividendo =1
if suma == 2:
dividendo=1
dos +=1
elif suma ==3:
dividendo=2
tres +=1
elif suma ==4:
dividendo=3
cuatro +=1
elif suma ==5:
dividendo=4
cinco +=1
elif suma ==6:
dividendo=5
seis +=1
elif suma ==7:
dividendo=6
siete +=1
elif suma ==8:
dividendo=5
ocho +=1
elif suma ==9:
dividendo=4
nueve +=1
elif suma ==10:
dividendo=3
diez +=1
elif suma ==11:
dividendo=2
once +=1
elif suma ==12:
dividendo=1
doce +=1
frecuencia = dividendo/36
# print("La frecuencia es : ", frecuencia)
print("la suma de dos se repitio",dos)
print("la suma de tres se repitio",tres)
print("la suma de cuatro se repitio",cuatro)
print("la suma de cinco se repitio",cinco)
print("la suma de seis se repitio",seis)
print("la suma de siete se repitio",siete)
print("la suma de ocho se repitio",ocho)
print("la suma de nueve se repitio",nueve)
print("la suma de diez se repitio",diez)
print("la suma de once se repitio",once)
print("la suma de doce repitio",doce)
print("fin")
etiqueta = ['2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12']
plt.xticks(range(len(cont)), etiqueta)
plt.xlabel('Sumas')
plt.ylabel('Probabilidad')
plt.title('Simulación suma Dados')
plt.bar(range(len(cont)), cont)
plt.show()
dado(100)
dado(1000)
dado(10000)
|
7,475 | 7639b80c9e6e1b2e1e55a47a862c433b64168cf6 | # 代码3-14 pandas累积统计特征函数、移动窗口统计函数示例
import pandas as pd
D = pd.Series(range(0, 20)) # 构造Series,内容为0~19共20个整数
print(D.cumsum()) # 给出前n项和
print(D.rolling(2).sum()) # 依次对相邻两项求和
|
7,476 | a3239bbe4f85c9f0e1bc845245f024c3feb64923 | # Generated by Django 3.2.3 on 2021-06-01 07:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('info', '0002_auto_20210531_1958'),
]
operations = [
migrations.AddField(
model_name='well',
name='well_status',
field=models.CharField(choices=[('0', 'Бурение'), ('1', 'Освоение'), ('2', 'Бездействие'), ('3', 'Простой')], default='2', max_length=15, verbose_name='Статус скважины'),
),
]
|
7,477 | 8ad47bf292e0046550cc0ef6f6bb75cf179ebd4b | def group(arr):
low, mid, high = 0, 0, len(arr)-1
while mid <= high:
print(arr)
if arr[mid] == 'R' :
arr[low], arr[mid] = arr[mid], arr[low]
low += 1
mid += 1
elif arr[mid] == 'G':
mid += 1
else:
arr[high], arr[mid] = arr[mid], arr[high]
high -= 1
return arr
*arr, = map(str, input("enter the list of R, G, B").split())
print(group(arr))
|
7,478 | 82556291c456b9e43e4e589ea4a77d320430344b | data_dir = "../data"
output_dir = './'
valid_id = dict()
for category in ("beauty", "fashion", "mobile"):
with open("%s/%s_data_info_val_competition.csv" % (data_dir, category), "r") as infile:
next(infile)
for line in infile:
curr_id = line.strip().split(',')[0]
valid_id[curr_id] = True
# This is the new output submission file containing 977987 rows
with open("submission_977.csv", "w") as outfile:
outfile.write("id,tagging\n")
# Please change the file below to your current submission filename containing 1174802 rows
# with open("submission-in.csv", "r") as infile:
with open("%s/submission_2103.csv" % output_dir, "r") as infile:
next(infile)
for line in infile:
curr_id = line.strip().split('_')[0]
if curr_id in valid_id:
outfile.write(line.strip() + '\n') |
7,479 | 96ea9b2b4d892ac88f7fac9594a6d2ad5d69a7c7 | # -*- coding: utf-8 -*-
import os
import logging
import subprocess
import json
import sys
ROOT_PATH = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(ROOT_PATH)
from src.datafactory.common import json_util
from src.datafactory.config import constant
class SegmentProcess(object):
"""
function for segment
"""
def do_nlp_seg(self, sentence):
"""
connect nlp wordseg
"""
cmd = "curl -d '{\"lang_id\":1,\"lang_para\":0,\"query\":\"%s\"}" \
"' %s?username=%s\&app=%s\&encoding=utf8" % (
sentence,
constant.SEGMENT_URL,
constant.SEGMENT_USERNAME,
constant.SEGMENT_APP
)
try:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
retn = p.communicate()[0]
except Exception as e:
logging.critical("segment(%s) failed and try again:%s" % (sentence, e))
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
retn = p.communicate()[0]
return retn
def deparser(self, segment_result_str):
segment_result = []
try:
segment_result_dict = json.loads(segment_result_str,
object_hook=json_util._decode_dict)
if "scw_out" in segment_result_dict and "wordsepbuf" in segment_result_dict["scw_out"]:
wordsepbuf = segment_result_dict["scw_out"]["wordsepbuf"]
wordsepbuf_split = wordsepbuf.strip("\t").split("\t")
for word in wordsepbuf_split:
segment_result.append(word)
else:
logging.critical("segment result(%s) error without wordsepbuf"
% segment_result_str)
except ValueError as e:
logging.critical("deparser segment result(%s) failed: %s" % (segment_result_str, e))
return segment_result
def get_segment(ori_data):
seg = SegmentProcess()
result = seg.do_nlp_seg(ori_data)
segment_result = seg.deparser(result)
return segment_result
if __name__ == "__main__":
print get_segment("同意 写论文的时候 用百度查一个庭园方面的术语\\\"ll")
|
7,480 | 8a9feae4ce209def2c98b7bed993f9b5c019a533 | from tkinter import *
import tkinter as tk
from tkinter import ttk
from tkinter import messagebox
import random
import numpy as np
import timeit
def main():
root = tk.Tk()
# root.geometry('800x500')
root.resizable(width=False, height=False)
root.title('Tugas Algoritma')
canvas = tk.Canvas(root, height=500, width=800)
canvas.pack()
bg = tk.PhotoImage(file='bg.png')
bl = tk.Label(root, image=bg)
bl.place(relwidth=1, relheight=1)
# root.iconbitmap('icons/pypad.ico')
########################################################################
def about():
messagebox.showinfo("About", "Simulasi Algoritma Sorting")
def help_box(event=None):
messagebox.showinfo(
"Help", "For help email to hai@irfnrdh.com", icon='question')
def exit_editor():
if messagebox.askokcancel("Quti", "Do you really want to quit?"):
root.destroy()
root.protocol('WM_DELETE_WINDOW', exit_editor)
def donothing():
print("Nothing to do :v")
########################################################################
# Bubble Sort
def bbsort(angka):
for i in range(len(angka)-1, 0, -1):
# swap = False
for j in range(i):
if angka[j] > angka[j+1]:
tampung = angka[j]
angka[j] = angka[j+1]
angka[j+1] = tampung
# swap = True
# if not swap:
# break
# Selection Sort
def sssort(angka):
for i in range(len(angka)):
min_idx = i
for j in range(i, len(angka)):
if angka[j] < angka[min_idx]:
min_idx = j
tampung = angka[i]
angka[i] = angka[min_idx]
angka[min_idx] = tampung
# Insertion Sort
def issort(angka):
for i in range(1, len(angka)):
idx = angka[i]
for j in range(i-1, 0, -1):
if angka[j] > idx:
angka[j+1] = angka[j]
else:
angka[j+1] = idx
break
# Bubble+Insertion+Selection Sort Optimize
def bisort(angka):
for i in range(len(angka)):
min_idx = i
for j in range(i, len(angka)):
if angka[j] < angka[min_idx]:
min_idx = j
tampung = angka[i]
angka[i] = angka[min_idx]
angka[min_idx] = tampung
########################################################################
def bbs_respon():
if len(listbox_widget.get(0, tk.END)) == 0:
print("Data Lu mane?")
else:
print("#################################################### BUBLE SORT ")
hasil_listbox_widget.delete(0, tk.END)
angka = list(listbox_widget.get(0, tk.END))
print("Data Sample \n", angka)
start = timeit.default_timer()
bbsort(angka)
stop = timeit.default_timer()
runtime = stop - start
print("\n Hasil Sorting \n", angka)
print('RunTime : ', runtime)
print('Jumlah data : ', len(angka))
for hasil_entry in angka:
hasil_listbox_widget.insert(tk.END, hasil_entry)
bbs_time.config(text="% .12f" % runtime)
bbs_time.place(x=420, y=185)
def iss_respon():
if len(listbox_widget.get(0, tk.END)) == 0:
print("Data Lu mane?")
else:
print("#################################################### INSERTION SORT ")
hasil_listbox_widget.delete(0, tk.END)
angka = list(listbox_widget.get(0, tk.END))
print("Data Sample \n", angka)
start = timeit.default_timer()
issort(angka)
stop = timeit.default_timer()
runtime = stop - start
print("\n Hasil Sorting \n", angka)
print('RunTime : ', runtime)
print('Jumlah data : ', len(angka))
for hasil_entry in angka:
hasil_listbox_widget.insert(tk.END, hasil_entry)
iss_time.config(text="% .12f" % runtime)
iss_time.place(x=545, y=185)
def sss_respon():
if len(listbox_widget.get(0, tk.END)) == 0:
print("Data Lu mane?")
else:
print("#################################################### SELECTION SORT ")
hasil_listbox_widget.delete(0, tk.END)
angka = list(listbox_widget.get(0, tk.END))
print("Data Sample \n", angka)
start = timeit.default_timer()
sssort(angka)
stop = timeit.default_timer()
runtime = stop - start
print("\n Hasil Sorting \n", angka)
print('RunTime : ', runtime)
print('Jumlah data : ', len(angka))
for hasil_entry in angka:
hasil_listbox_widget.insert(tk.END, hasil_entry)
sss_time.config(text="% .12f" % runtime)
sss_time.place(x=670, y=185)
def bsi_respon():
if len(listbox_widget.get(0, tk.END)) == 0:
print("Data Lu mane?")
else:
print("#################################################### BSI")
hasil_listbox_widget.delete(0, tk.END)
angka = list(listbox_widget.get(0, tk.END))
print("Data Sample \n", angka)
start = timeit.default_timer()
bisort(angka)
stop = timeit.default_timer()
runtime = stop - start
print("\n Hasil Sorting \n", angka)
print('RunTime : ', runtime)
print('Jumlah data : ', len(angka))
for hasil_entry in angka:
hasil_listbox_widget.insert(tk.END, hasil_entry)
bsi_time.config(text="% .12f" % runtime)
bsi_time.place(x=570, y=333)
def generate(entry):
listbox_widget.delete(0, tk.END)
l = int(entry)
listrandom = []
for i in range(l):
value = random.randint(1, 1000)
listrandom.append(value)
listbox_widget.insert(tk.END, value)
angka = listrandom
# print(listrandom)
# listbox_entries = random.sample(range(100), int(entry))
# for entry in listbox_entries:
# listbox_widget.insert(tk.END, entry)
#angka = listbox_widget.get(0, tk.END)
def cls():
hasil_listbox_widget.delete(0, tk.END)
print("\n" * 100)
# print [ listbox_widget.get(i) for i in listbox_widget.curselection()]
########################################################################
menubar = Menu(root)
filemenu = Menu(menubar, tearoff=0)
filemenu.add_command(label="Generate Random Number", command=donothing)
filemenu.add_command(label="Close", command=exit_editor)
filemenu.add_separator()
filemenu.add_command(label="Exit", command=root.quit)
menubar.add_cascade(label="File", menu=filemenu)
aboutmenu = Menu(menubar, tearoff=0)
menubar.add_cascade(label="About", menu=aboutmenu)
aboutmenu.add_command(label="About", command=about)
aboutmenu.add_command(label="Help", command=help_box)
root.config(menu=menubar)
########################################################################
# DATA SAMPLING ------------------------------
frame_data = tk.Frame(root)
frame_data.place(relx=0.128, rely=0.140, relwidth=0.18,
relheight=0.65, anchor='n')
listbox_widget = tk.Listbox(
frame_data, selectmode="BROWSE", height=20, width=20, background='white')
listbox_widget_scrl = Scrollbar(frame_data, orient=VERTICAL)
listbox_widget.config(yscrollcommand=listbox_widget_scrl.set)
listbox_widget_scrl.configure(command=listbox_widget.yview)
listbox_widget.grid(row=1, sticky=W)
listbox_widget_scrl.grid(row=1, column=1, sticky=NS)
# DATA HASIL ------------------------------
frame_hasil = tk.Frame(root)
frame_hasil.place(relx=0.34, rely=0.140, relwidth=0.18,
relheight=0.65, anchor='n')
hasil_listbox_widget = tk.Listbox(
frame_hasil, selectmode="BROWSE", height=20, width=20, background='white')
hasil_listbox_widget_scrl = Scrollbar(frame_hasil, orient=VERTICAL)
hasil_listbox_widget.config(yscrollcommand=hasil_listbox_widget_scrl.set)
hasil_listbox_widget_scrl.configure(command=hasil_listbox_widget.yview)
# hasil_listbox_entries = random.sample(range(100), 10)
# for hasil_entry in hasil_listbox_entries:
# hasil_listbox_widget.insert(tk.END, hasil_entry)
hasil_listbox_widget.grid(row=1, sticky=W)
hasil_listbox_widget_scrl.grid(row=1, column=1, sticky=NS)
# Entry
entry = tk.Entry(root, font=40, width=7)
entry.place(x=105, y=450)
# BUTTON
bbs_button = tk.Button(root, text="START", font=40,
command=bbs_respon).place(x=434, y=140)
iss_button = tk.Button(root, text="START", font=40,
command=iss_respon).place(x=555, y=140)
sss_button = tk.Button(root, text="START", font=40,
command=sss_respon).place(x=680, y=140)
bsi_button = tk.Button(root, text="START", font=40,
command=bsi_respon).place(x=466, y=330)
# GENERATE DATA SAMPLING
gen_button = tk.Button(root, text="GENERATE", font=40,
command=lambda: generate(entry.get()))
gen_button.place(x=180, y=447)
cls_button = tk.Button(root, text="CLEAN", font=40,
command=cls).place(x=295, y=447)
# RESPON TIME
bbs_time = ttk.Label(root, background="#6367c8",
foreground="#fff")
bbs_time['text'] = "Respon Time"
bbs_time.place(x=429, y=185)
iss_time = tk.Label(root,
background="#6367c8", foreground="#fff")
iss_time['text'] = "Respon Time"
iss_time.place(x=555, y=185)
sss_time = tk.Label(root,
background="#6367c8", foreground="#fff")
sss_time['text'] = "Respon Time"
sss_time.place(x=680, y=185)
bsi_time = tk.Label(root,
background="#6367c8", font=40, foreground="#fff")
bsi_time['text'] = "Respon Time"
bsi_time.place(x=570, y=333)
########################################################################
root.mainloop()
main()
|
7,481 | 3031f695d57492cf3b29694fecd0a41c469a3e00 | botName = "firstBot"
username = "mrthemafia"
password = "oblivion"
client_id = "Y3LQwponbEp07w"
client_secret = "R4oyCEj6hSTJWHfWMwb-DGUOBm8"
|
7,482 | abfff0901e5f825a473119c93f53cba206609428 | # -*- coding: utf-8 -*-
import io
import urllib.request
from pymarc import MARCReader
class Item:
"""
Represents an item from our
Library catalogue (https://www-lib.soton.ac.uk)
Usage:
#>>> import findbooks
#>>> item = findbooks.Item('12345678')
#>>> item.getMarcFields()
#>>> print(item.title)
"""
webcat = "http://lms.soton.ac.uk/cgi-bin/goobi_marc.cgi?itemid="
def __init__(self, barcode):
self.barcode = barcode
self.marc = None
self.record = None
self.title = None
self.author = None
self.year = None
def _get_marc(self):
with urllib.request.urlopen(Item.webcat + self.barcode) as response:
html = response.read().decode("utf-8")
marc = html[html.find(">")+1:html.rfind("<")].strip('''
''')
if "Barcode not found" not in marc:
self.marc = marc
def _get_title(self):
if self.record['245']:
title = self.record['245']['a'].strip(' /:,.')
return title
def _get_long_title(self):
title = self.record.title().strip(' /:,.')
return title
def _get_author(self):
if self.record['100']:
return self.record['100']['a']
elif self.record['110']:
return self.record['110']['a']
elif self.record['111']:
return self.record['111']['a']
else:
return None
def _get_year(self):
date = self.record.pubyear()
if date:
# dates should only have numbers
nums = '1234567890'
new_date = ''
for ch in date:
if ch in nums:
new_date += ch
# dates should have '1' as the first char
if not new_date[0] == "1":
return None
# dates should eb 4 chars long
if not len(new_date) == 4:
return None
return new_date
else:
return None
def get_marc_fields(self, len_title):
self._get_marc()
if self.marc:
with io.BytesIO(self.marc.encode('utf-8')) as fh:
reader = MARCReader(fh)
for record in reader:
self.record = record
self.title = self._get_title() if len_title == "short" else self._get_long_title()
self.author = self._get_author()
self.year = self._get_year()
# item = Item('59571478')
# item.get_marc_fields()
# print(item.title)
|
7,483 | 5e2fcc6379a8ecee0378d26108e4deab9d17dba6 | # All Rights Reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rest_framework import viewsets
from rest_framework.exceptions import APIException
from NSDManagement.serializers import *
from rest_framework.response import Response
from rest_framework import status
from rest_framework.utils import json
from rest_framework.decorators import action
from VnfPackageManagement.models import VnfPkgInfo
from utils.file_manipulation import remove_file, decompress_zip
from utils.format_tools import set_request_parameter_to_string
from utils.process_package.base_package import on_boarded, disabled, enabled, not_in_use, created
from utils.process_package.ns_descriptor import NetworkServiceDescriptor
class NSDescriptorsViewSet(viewsets.ModelViewSet):
queryset = NsdInfo.objects.all()
serializer_class = NsdInfoSerializer
def create(self, request, *args, **kwargs):
set_request_parameter_to_string(request, 'userDefinedData')
request.data['_links'] = {'self': request.build_absolute_uri(),
'nsd_content': request.build_absolute_uri()}
return super().create(request)
def get_success_headers(self, data):
return {'Location': data['_links']['self']}
def list(self, request, *args, **kwargs):
if self.get_queryset().__len__() < 1:
raise APIException(detail='One or more individual NS descriptor resource have been created')
return super().list(request)
def update(self, request, *args, **kwargs):
instance = self.get_object()
if on_boarded != instance.nsdOnboardingState:
raise APIException(detail='NSD nsdOnboardingState is not {}'.format(on_boarded))
if disabled != request.data['nsdOperationalState'] and enabled != request.data['nsdOperationalState']:
raise APIException(detail='ValueError: invalid operationalState',
code=status.HTTP_409_CONFLICT)
response = request.data.copy()
set_request_parameter_to_string(request, 'userDefinedData')
super().update(request)
return Response(response, status=status.HTTP_200_OK)
def destroy(self, request, *args, **kwargs):
instance = self.get_object()
if disabled != instance.nsdOperationalState:
raise APIException(detail='NSD nsdOperationalState is not {}'.format(disabled),
code=status.HTTP_409_CONFLICT)
if not_in_use != instance.nsdUsageState:
raise APIException(detail='NSD nsdUsageState is not {}'.format(not_in_use),
code=status.HTTP_409_CONFLICT)
remove_file('{}{}'.format(nsd_base_path, instance.id))
super().destroy(request)
return Response(status=status.HTTP_204_NO_CONTENT)
@action(detail=True, methods=['PUT'], url_path='nsd_content')
def upload_content(self, request, **kwargs):
instance = self.get_object()
if created != instance.nsdOnboardingState:
raise APIException(detail='NSD nsdOnboardingState is not {}'.format(created),
code=status.HTTP_409_CONFLICT)
if 'application/zip' not in request.META['HTTP_ACCEPT']:
raise APIException(detail='HEAD need to have application/zip value')
network_service_path = decompress_zip(
request.data["file"], '{}{}'.format(nsd_base_path, instance.id) + '/nsd_content/')
network_service_descriptor = NetworkServiceDescriptor(path=network_service_path)
nsd_content = network_service_descriptor.processing_data()
vnf_pkg_ids_list = list()
for vnfd in network_service_descriptor.get_constituent_vnfd():
vnf_pkg_ids_list.append(str(VnfPkgInfo.objects.filter(vnfdId__iexact=vnfd['vnfd_id']).last().id))
nsd_content['vnfPkgIds'] = json.dumps(vnf_pkg_ids_list)
serializer = self.get_serializer(instance, data=nsd_content)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(status=status.HTTP_202_ACCEPTED)
|
7,484 | b9fe758d5fe12b5a15097c0e5a33cb2d57edfdd2 | from django.shortcuts import render, get_object_or_404, redirect
from django.utils import timezone
from .models import Group,SQLlist
from .forms import GroupForm
from .oraConnect import *
from .utils import IfNoneThenNull
########################### Группы ############################
def group_list(request):
groups = Group.objects.all()
return render(request, 'group_list.html', {'groups': groups})
def group_detail(request, pk):
group = get_object_or_404(Group, pk=pk)
return render(request, 'group_detail.html', {'group': group})
def group_add(request):
if request.method == "POST":
form = GroupForm(request.POST)
if form.is_valid():
group = form.save(commit=False)
group.add(group.group_name,request.user)
return redirect('group_detail', pk=group.pk)
else:
form = GroupForm()
return render(request, 'group_edit.html', {'form': form})
def group_edit(request, pk):
group = get_object_or_404(Group, pk=pk)
try:
if group.checked_out_by is None:
raise NameError('NoneCheckoutUser')
if group.checked_out_by is not None and group.checked_out_by != request.user.username:
raise NameError('OtherCheckoutUser')
if group.parent is None:
grpSQL = SQLlist.objects.filter(sql_name = 'Group').first().get_checkin_sql()%(group.group_name,'',IfNoneThenNull(group.strg_period),IfNoneThenNull(group.strg_period_type,''),group.deleted)
if request.method == "POST":
form = GroupForm(request.POST, instance=group)
if form.is_valid():
group = form.save(commit=False)
group.edit(group.group_name,request.user)
return redirect('group_detail', pk=group.pk)
else:
form = GroupForm(instance=group)
return render(request, 'group_edit.html', {'form': form})
except NameError as err:
if err.args[0] == 'OtherCheckoutUser':
print('"%s" Взято на изменение пользователем %s'%(group.group_name,group.checked_out_by))
elif err.args[0] == 'NoneCheckoutUser':
print('"%s" сначала необходимо взять на изменение'%(group.group_name))
return redirect('group_detail', pk=group.pk)
def group_del(request, pk):
group = get_object_or_404(Group, pk=pk)
try:
if group.checked_out_by is None:
raise NameError('NoneCheckoutUser')
if group.checked_out_by is not None and group.checked_out_by != request.user.username:
raise NameError('OtherCheckoutUser')
group.remove(group.group_name)
except NameError as err:
if err.args[0] == 'OtherCheckoutUser':
print('"%s" Взято на изменение пользователем %s'%(group.group_name,group.checked_out_by))
elif err.args[0] == 'NoneCheckoutUser':
print('"%s" сначала необходимо взять на изменение'%(group.group_name))
return render(request, 'group_detail.html', {'group': group})
def group_import(request):
con = OraConnection('DM_SKB','DWHLegator000000','XE')
grpSQL = SQLlist.objects.filter(sql_name = 'Group').first().get_checkout_sql_all()
grpCur = con.get_cursor(grpSQL)
# Не забываем, что есть dir(), с чьей помощью можно узнать очень
# много полезного об инстансе курсорной переменной
#print('grpCur: ', dir(grpCur))
#print('grpCur.getvalue(): ', dir(grpCur.getvalue()))
# описание полей запроса
#for col in con.get_fields(entCur):
# print(col)
for rec in con.get_data(grpCur):
#try:
# Group.objects.filter(group_name = rec[0]).first().delete()
#except:
# None
Group.objects.create(group_name = rec[0],parent = Group.objects.filter(group_name = rec[1]).first(),strg_period = rec[2],strg_period_type = rec[3])
# не забываем закрывать за собой соединение с Ораклом
con.close()
groups = Group.objects.all()
return render(request,'group_list.html',{'groups': groups})
def group_truncate(request):
for rec in Group.objects.all():
rec.delete()
groups = Group.objects.all()
return render(request, 'group_list.html', {'groups': groups})
def group_checkin(request, pk):
group = get_object_or_404(Group, pk=pk)
try:
if group.checked_out_by is None:
raise NameError('NoneCheckoutUser')
if group.checked_out_by is not None and group.checked_out_by != request.user.username:
raise NameError('OtherCheckoutUser')
if group.parent is None:
grpSQL = SQLlist.objects.filter(sql_name = 'Group').first().get_checkin_sql()%(group.group_name,'',IfNoneThenNull(group.strg_period),IfNoneThenNull(group.strg_period_type,''),group.deleted)
else:
grpSQL = SQLlist.objects.filter(sql_name = 'Group').first().get_checkin_sql()%(group.group_name,group.parent.group_name,IfNoneThenNull(group.strg_period),IfNoneThenNull(group.strg_period_type,''),group.deleted)
try:
con = OraConnection('DM_SKB','DWHLegator000000','XE')
con.exec(grpSQL);
group.checkin(group.group_name)
except NameError as err:
print(err)
except NameError as err:
if err.args[0] == 'OtherCheckoutUser':
print('Взято на изменение пользователем %s'%(group.checked_out_by))
elif err.args[0] == 'NoneCheckoutUser':
print('Сначала необходимо взять на изменение')
else:
print(err)
if group.deleted == 1:
group.delete()
groups = Group.objects.all()
return render(request, 'group_list.html', {'groups': groups})
else:
return redirect('group_detail', pk=pk)
def group_checkout(request, pk):
group = get_object_or_404(Group, pk=pk)
#Если взято на изменение другим пользователем
try:
if group.checked_out_by is not None and group.checked_out_by != request.user.username:
raise NameError('OtherCheckoutUser')
except NameError:
print('Взято на изменение пользователем %s'%(group.checked_out_by))
if group.checked_out_by is None or group.checked_out_by == request.user:
group.checkout(group.group_name,request.user)
return render(request, 'group_detail.html', {'group': group})
########################### Окончание Группы ############################
|
7,485 | 3eb40dfe68573b93c544a2279ac5c8728ae9601f | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from tests import unittest
from kepler.descriptors import *
class DescriptorsTestCase(unittest.TestCase):
def testEnumDefaultsToNoopMapper(self):
class Record(object):
cat = Enum(name='cat', enums=['Lucy Cat', 'Hot Pocket'])
r = Record()
r.cat = 'Lucy Cat'
self.assertEqual(r.cat, 'Lucy Cat')
def testEnumAppliesProvidedMapper(self):
class Record(object):
cat = Enum(name='cat', enums=['LUCY CAT', 'HOT POCKET'],
mapper=lambda x: x.upper())
r = Record()
r.cat = 'Hot Pocket'
self.assertEqual(r.cat, 'HOT POCKET')
|
7,486 | 1b43125c2ebffd0a268a4a0ffdbbf407de7b0374 | ''' Compress images '''
from PIL import Image
def resizeImage(image_file):
try:
# get the image's width and height in pixels
img = Image.open(image_file)
width, height = img.size
# get the largest dimension
max_dim = max(img.size)
if max_dim > 1000:
# resize the image using the largest side as dimension
factor = 1000./max_dim
new_width = int(width*factor)
new_height = int(height*factor)
resized_image = img.resize((new_width, new_height), Image.ANTIALIAS)
print width, height, new_width, new_height
# save the resized image to a file
# overwrite existing file
resized_image_file = image_file
resized_image.save(resized_image_file)
#
print("%s resized" % resized_image_file)
except:
print 'Cannot open ' + image_file
# pick an image file you have in the working directory
# (or give full path name)
for i in range(2713):
image_file = "static/images/download/%d.jpg" %(i+1)
resizeImage(image_file) |
7,487 | fdf76ff20260c25d95a9bf751fa78156071a7825 | class Helper:
def __init__(self):
self.commands = ["help",
"lottery",
"poll",
"polling",
"prophecy",
"roll",
"team",
"ub"]
self.commands_full = ["help [command]",
"lottery [seconds]",
"poll",
"polling [poll number]",
"prophecy",
"roll [number]",
"team [type]",
"ub [role]"]
self.command_dict = {}
self.init_command_info()
# noinspection PyMethodMayBeStatic
def display_help(self):
result = "Help has been requested?\n\n" \
"__**Syntax to Summon Me**__\n" \
"Arise! [command] [options (optional)]\n\n" \
"__**Available Commands**__\n"
for com in self.commands_full:
result += f"{com}\n"
result += "\nIf you want more info on a specific command, " \
"use the command \"help\" followed by a command of your choice. " \
"**For example: Arise! help roll**" \
"\nI'm sure Azir will be glad to help you out... I love him so much..."
return result
# noinspection PyMethodMayBeStatic
def display_command(self, command):
if command not in self.commands:
return "That command doesn't exist :/"
result = f"__**Command: {command[0].upper()}{command[1:]}**__\n\n"
result += self.command_dict[command]
return result
# noinspection PyMethodMayBeStatic
def init_command_info(self):
self.command_dict["help"] = "Did somebody say recursion?"
self.command_dict["lottery"] = "**Syntax:** Arise! lottery [seconds]\n\n" \
"__**Description**__\n" \
"Azir's lottery selects one lucky winner from a pool. To enter the pool, " \
"react to the lottery message with ***any*** emoji. I do not discriminate. " \
"The default timer is **60 seconds**. Upon request, a different number of " \
"seconds may be allowed."
self.command_dict["poll"] = "**Syntax:** Arise! poll\n\n" \
"__**Description**__\n" \
"You have questions and I'll help you set them up. Follow the step-by-step " \
"instructions. When you have finished them all, use the polling command to " \
"ask away."
self.command_dict["polling"] = "**Syntax:** Arise! polling [poll number]\n\n" \
"__**Description**__\n" \
"This command allows you to use the poll you've created. If you have multiple " \
"polls, you may enter a number to specify which poll. The default is the first."
self.command_dict["prophecy"] = "Prepare yourself."
self.command_dict["roll"] = "**Syntax:** Arise! roll [number]\n\n" \
"__**Description**__\n" \
"Azir needs random numbers *all* the time so he thought you may need some too. " \
"This command produces a random number from 1 to the default value of **10**. " \
"If you want to roll up to a different number, let me know."
self.command_dict["team"] = "**Syntax:** Arise! team [type]\n\n" \
"__**Description**__\n" \
"Do you want to play a team with a theme? The Shuriman Empire has just the " \
"solution for you! With 25 different groupings (wow. Wow! WOW!!), you'll be " \
"having fun forever :) The default value for [type] is **0** in which you'd " \
"get any random team. To select a team based on location, use **1**. To select " \
"a *funner* team, use **2**."
self.command_dict["ub"] = "**Syntax:** Arise! ub [role]\n\n" \
"__**Description**__\n" \
"Oh, how I love Ultimate Bravery. No one is as good at this game mode as Azir. " \
"**NO ONE!**... Ahem... So basically, you are given a random champion and a build. " \
"Here are the general guidelines:\n\n" \
"1. Don't play this alone. Azir forbids it.\n" \
"2. No rerolling if the champion or build is undesirable.\n" \
"3. Okay, rerolling is allowed is the majority of the group agrees.\n" \
"4. Feel free to use any rune page. Choose wisely.\n" \
"5.a) Build the items in the order that they've been delivered.\n" \
"5.b) The first two items are interchangeable.\n" \
"6. Try your best to win. That's the whole point of this game.\n\n" \
"The default value for [role] is **1**. To select a jungle specific build, " \
"use **2**. To select a support specific build, use **3**."
|
7,488 | 5ad8db85f4f705173cf5d0649af6039ebe1544b2 | text=open('mytext.txt','w')
x=text.write("I like coding\nit is a new part\nof my life!!!")
text=open('mytext.txt')
read=text.readlines()
i=0
counter=0
total=0
print("number of lines :"+str(len(read)))
while i<=len(read)-1:
counter=counter+read[i].count('\n') + read[i].count(' ')
total+=len(read[i])-read[i].count('\n') - read[i].count(' ')
i+=1
counter+=1
print('Number of words is :'+str(counter))
print('total number of letters are :' +str(total)) |
7,489 | 973a58013160cbc71ca46f570bde61eaff87f6a7 | from Adafruit_LSM9DS0 import Adafruit_LSM9DS0
import math
imu = Adafruit_LSM9DS0()
pi = 3.14159265358979323846 # Written here to increase performance/ speed
r2d = 57.2957795 # 1 radian in degrees
loop = 0.05 #
tuning = 0.98 # Constant for tuning Complimentary filter
# Converting accelerometer readings to degrees
ax = #x
ay = #y
az = #z
xAngle = math.atan( ax / ( math.sqrt( ay**2 + az**2 )))
yAngle = math.atan( ay / ( math.sqrt( ax**2 + az**2 )))
zAngle = math.atan( sqrt( ax**2 + ay**2 ) / az)
|
7,490 | 7ae328bcfdec2d17fceb5d707f13cf495fde4469 | import os
import re
import time
import numpy as np
import pandas as pd
from sklearn.cluster import AgglomerativeClustering
import math
import edlib
from progress.bar import IncrementalBar as Bar
from multiprocessing import Pool
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--pools",
default=4,
type=int,
help="Number of threads to use in aligning. Default 4. Optional."
)
parser.add_argument("--misses",
default=5,
type=float,
help="Number of allowed substitutions/insertions/deletions in aligning a sequence of length k. "
"For longer sequences, this is scaled. "
)
parser.add_argument("--aligned",
default=None,
type=str,
help="Path to the output aligned directory. Required."
)
parser.add_argument("--overview",
default=None,
type=str,
help="Path to the output description csv. Required. Pairs with <--aligned> directory."
)
parser.add_argument("--k",
default=-1,
type=int,
help="Size of the k-mer created by BCALM. Required."
)
parser.add_argument("--input",
default=None,
type=str,
help="Path to the input file."
)
parser.set_defaults(all_sqs_result=False)
args = parser.parse_args([] if "__file__" not in globals() else None)
bases = dict(A=0, C=1, G=2, T=3)
bases['-'] = 4
rev_bases = {v: k for k, v in bases.items()}
global_alignment_ident_no = 0
operations = {
'.' : 0,
'-' : 1,
'|' : 0
}
class AlignmentProfile:
def __init__(self, width, df, identifier):
self.ident = identifier
self.profile = np.zeros((5, width))
self.repre_sq = ""
self.seq_alignments = None # this will be a pandas df
self.seq_align_counter = -1
self.calculate_profile(df)
def calculate_profile(self, df):
self.seq_alignments = pd.DataFrame([(index, *np.zeros(self.profile.shape[1], dtype=np.int8)) for index in df.index])
unwrapped_sq = df['sq'].str.split('', expand=True)
unwrapped_sq = unwrapped_sq.drop(columns=[unwrapped_sq.columns[0], unwrapped_sq.columns[-1]])
counts = np.stack(df['count'].values)
for base in bases:
a = unwrapped_sq != base
newX = np.ma.array(counts, mask=a)
new_counts = newX.sum(axis=0)
self.profile[bases[base], :] += new_counts
# repre_sq
maxs = np.argmax(self.profile, axis=0)
self.repre_sq = "".join([rev_bases[x] for x in maxs])
def add_sequence(self, new_sq, new_counts, nice, sq_index):
offset = re.search(nice['target_aligned'].replace('-', ''), self.repre_sq).start(0)
x = self.profile
# padding with the following number of observed positions (sum of all bases)
# pad profile with insertions
insertions = np.where(np.array(list(nice['target_aligned'])) == '-')[0]
for i, index in enumerate(insertions):
if x.shape[1] >= index:
value = 0
else:
value = x[:, index].sum()
x = np.insert(x, index + offset, [0, 0, 0, 0, value], axis=1)
self.seq_alignments.insert(loc=int(index+offset), column=self.seq_align_counter, value=1)
self.seq_align_counter -= 1
# pad new counts with deletions
aligned_query = np.array(list(nice['query_aligned']))
deletions = np.where(aligned_query == '-')[0]
for i, index in enumerate(deletions):
value = new_counts[index]
new_counts = np.insert(new_counts, index, value, axis=0)
i = offset
for base, count in zip(aligned_query, new_counts):
x[bases[base], i] += count
i += 1
self.profile = x
# store new sequence alignment
added_alignment = -np.ones(self.profile.shape[1])
for i, char in enumerate(nice['target_aligned']):
if char == '-':
added_alignment[offset + i] = 1
else:
added_alignment[offset + i] = 0
self.seq_alignments.loc[-1] = [sq_index, *added_alignment] # adding a row
self.seq_alignments.index = self.seq_alignments.index + 1 # shifting index
# recalculate repre_sq -- the most probable one
maxs = np.argmax(self.profile, axis=0)
self.repre_sq = "".join([rev_bases[x] for x in maxs if rev_bases[x] != '-']) # '-' is removed from the sq
def dst_func(x, y):
return (np.array(x) != np.array(y)).sum()
def read_alignment(filename):
for line in open(filename):
sq, count = line.strip('\n').split(';')
yield sq, np.array([int(x) for x in count.split(',')]), count
def cluster_group(df_group, l, dst=dst_func):
sqs = df_group.reset_index()['sq']
n = len(sqs)
if n <= 1:
return np.zeros(n)
dst_matrix = np.zeros((n, n))
for i in range(n):
for j in range(i):
d = dst(sqs[i], sqs[j])
dst_matrix[i, j] = d
dst_matrix[j, i] = d
model = AgglomerativeClustering(distance_threshold=threshold * l,
n_clusters=None,
linkage='complete',
affinity='precomputed')
clusters = model.fit_predict(dst_matrix)
return clusters
aligned_sqs_file = args.input
k = args.k
misses = args.misses
pools = args.pools
threshold = misses / k
if args.aligned is None:
output_profile_dir = aligned_sqs_file + "_profiles"
else:
output_profile_dir = args.aligned
if args.overview is None:
output_csv_file = aligned_sqs_file + "_overview.csv"
else:
output_csv_file = args.overview
# read
df = pd.DataFrame(read_alignment(aligned_sqs_file))
df.columns = ['sq', 'count', 'str_count']
df['length'] = df['sq'].str.len()
# df['alignment'] = -1 # every aligned sq has an alignment identification
groups = df.groupby(by='length')
unique_lengths = df['length'].sort_values(ascending=False).unique()
against = []
longest = unique_lengths[0]
df_group = groups.get_group(longest).copy()
clusters = cluster_group(df_group, longest)
df_group['cluster'] = clusters
alignments = {
}
for cluster, cluster_df in df_group.groupby(by='cluster'):
alignment = AlignmentProfile(longest, cluster_df, global_alignment_ident_no)
alignments[global_alignment_ident_no] = alignment
global_alignment_ident_no += 1
against.append(alignment)
# df.loc[df['sq'].isin(cluster_df['sq']), 'alignment'] = alignment.ident
# to each sequence
start = time.time()
# print(df.groupby(by='length').get_group(longest))
# print("running on shorter")
with Bar("Processing length groups...", max=len(unique_lengths) - 1) as bar:
for length in unique_lengths[1:]:
bar.next()
df_group = groups.get_group(length).copy()
def getDistanceAndAlignment(sq):
# this is a fallback, it should not happen
maxval = np.floor(threshold * len(sq))
min = np.inf
min_target = None
if maxval < 1:
return min,min_target
for target in against:
align_res = edlib.align(sq, target.repre_sq, mode='HW', task='distance', k=maxval)
if align_res['editDistance'] != -1:
if min > align_res['editDistance']:
if align_res['editDistance'] == 0:
return align_res['editDistance'], target.ident
min = align_res['editDistance']
min_target = target
if min_target is not None:
min_target = min_target.ident
return min, min_target
x = length * threshold
if length * threshold >= 1:
# try align
with Pool(pools) as pool:
result = pool.map(getDistanceAndAlignment, df_group['sq'])
df_group['aligned'] = result
# add aligned to profiles
aligned = df_group[df_group['aligned'] != (np.inf, None)]
for index, row in aligned.iterrows():
to = alignments[row['aligned'][1]]
align_res = edlib.align(row.sq, to.repre_sq, mode='HW', task='path')
nice = edlib.getNiceAlignment(align_res, row.sq, to.repre_sq)
to.add_sequence(row.sq, row['count'], nice, index)
# df.loc[df['sq'] == row.sq, 'alignment'] = to.ident
# cluster unaligned, add to against
unaligned = df_group[df_group['aligned'] == (np.inf, None)].copy()
clusters = cluster_group(unaligned, length)
unaligned['cluster'] = clusters
for cluster, cluster_df in unaligned.groupby(by='cluster'):
alignment = AlignmentProfile(length, cluster_df, global_alignment_ident_no)
alignments[global_alignment_ident_no] = alignment
global_alignment_ident_no += 1
against.append(alignment)
else:
# threshold is less than one, no clustering nor alignment takes place
df_group["aligned"] = [(np.inf, None) for _ in range(len(df_group))]
unaligned = df_group.copy()
unaligned["cluster"] = list(range(len(unaligned)))
# print(f"pseudoclustering elapsed: {time.time() - s}")
s = time.time()
for i, row in unaligned.iterrows():
cluster_df = pd.DataFrame(row).T
alignment = AlignmentProfile(length, cluster_df, global_alignment_ident_no)
alignments[global_alignment_ident_no] = alignment
global_alignment_ident_no += 1
against.append(alignment)
# print(f"alignment elapsed: {time.time() - s}")
print(f"{aligned_sqs_file} elapsed: {time.time() - start}")
print(f"{aligned_sqs_file} writing...")
os.makedirs(output_profile_dir, exist_ok=True)
for alignment in against:
filename = f"{output_profile_dir}/{alignment.ident}.prf"
np.save(filename, alignment.profile)
# get actual alignment for each sq
all_alignments = []
for alignment in against:
itemized = alignment.seq_alignments
num_cols = itemized.columns[1:]
# index_col = itemized.columns[0]
# translate to sth readable
for col in num_cols:
itemized[col] = itemized[col].astype(int).apply(str)
itemized['alignment_actual'] = itemized[num_cols].agg(','.join, axis=1) # todo maybe cigar?
itemized = itemized.drop(columns=num_cols)
itemized.columns = ['index_df', 'alignment_actual']
itemized['alignment'] = alignment.ident
all_alignments.append(itemized)
all_alignments = pd.concat(all_alignments)
merged = pd.merge(all_alignments, df, left_on='index_df', right_index=True)
# write sequences in df
merged.drop(columns=['count', 'index_df']).to_csv(output_csv_file, index=False)
print(f"{aligned_sqs_file} done")
|
7,491 | 763d448bc447b88d5f2de777a475a1dd50906527 | class Solution:
def jump(self, nums: List[int]) -> int:
l = len(nums)
jump = 0
curEnd = 0
curFarthest = 0
for i in range(l-1):
curFarthest= max(curFarthest,i+nums[i])
if i==curEnd:
jump+=1
curEnd = curFarthest
return jump |
7,492 | 24b1afb18e1cfdc8d5a62f5ee0147b2d73bc10d8 | #
# Copyright 2021 Splunk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Concurrent executor provides concurrent executing function either in
a thread pool or a process pool
"""
import splunktalib.concurrent.process_pool as pp
import splunktalib.concurrent.thread_pool as tp
class ConcurrentExecutor:
def __init__(self, config):
"""
:param config: dict like object, contains thread_min_size (int),
thread_max_size (int), daemonize_thread (bool),
process_size (int)
"""
self._io_executor = tp.ThreadPool(
config.get("thread_min_size", 0),
config.get("thread_max_size", 0),
config.get("task_queue_size", 1024),
config.get("daemonize_thread", True),
)
self._compute_executor = None
if config.get("process_size", 0):
self._compute_executor = pp.ProcessPool(config.get("process_size", 0))
def start(self):
self._io_executor.start()
def tear_down(self):
self._io_executor.tear_down()
if self._compute_executor is not None:
self._compute_executor.tear_down()
def run_io_func_sync(self, func, args=(), kwargs=None):
"""
:param func: callable
:param args: free params
:param kwargs: named params
:return whatever the func returns
"""
return self._io_executor.apply(func, args, kwargs)
def run_io_func_async(self, func, args=(), kwargs=None, callback=None):
"""
:param func: callable
:param args: free params
:param kwargs: named params
:calllback: when func is done and without exception, call the callback
:return whatever the func returns
"""
return self._io_executor.apply_async(func, args, kwargs, callback)
def enqueue_io_funcs(self, funcs, block=True):
"""
run jobs in a fire and forget way, no result will be handled
over to clients
:param funcs: tuple/list-like or generator like object, func shall be
callable
"""
return self._io_executor.enqueue_funcs(funcs, block)
def run_compute_func_sync(self, func, args=(), kwargs={}):
"""
:param func: callable
:param args: free params
:param kwargs: named params
:return whatever the func returns
"""
assert self._compute_executor is not None
return self._compute_executor.apply(func, args, kwargs)
def run_compute_func_async(self, func, args=(), kwargs={}, callback=None):
"""
:param func: callable
:param args: free params
:param kwargs: named params
:calllback: when func is done and without exception, call the callback
:return whatever the func returns
"""
assert self._compute_executor is not None
return self._compute_executor.apply_async(func, args, kwargs, callback)
|
7,493 | abbefb1e426408b32fa9e125c78b572de22dbb8c | import unittest
from unittest.mock import patch
from fsqlfly.db_helper import *
from fsqlfly.tests.base_test import FSQLFlyTestCase
class MyTestCase(FSQLFlyTestCase):
def test_positive_delete(self):
namespace = Namespace(name='iii')
self.session.add(namespace)
self.session.commit()
t = Transform(name='test', sql='select 1;', namespace=namespace)
self.session.add(t)
self.session.commit()
self.session.delete(namespace)
self.session.commit()
self.assertEqual(self.session.query(Transform).count(), 0)
def get_create_object(self):
connection = Connection(name='a', url='#', type='hive', connector='text')
schema = SchemaEvent(name='test', connection=connection, version=1)
schema2 = SchemaEvent(name='test2', connection=connection, version=2)
r_name = ResourceName(name='b', full_name='a.b', connection=connection, schema_version=schema)
t_name = ResourceTemplate(name='c', resource_name=r_name, type='both', full_name='a.b.c', connection=connection,
schema_version=schema)
v_name = ResourceVersion(name='d', template=t_name, full_name='a.b.c.d', connection=connection,
resource_name=r_name, schema_version=schema)
return connection, schema, schema2, r_name, t_name, v_name
def test_positive_delete_connection(self):
connection, schema, schema2, r_name, t_name, v_name = self.get_create_object()
self.session.add_all([connection, schema, schema2, r_name, t_name, v_name])
self.session.commit()
self.session.delete(connection)
self.session.commit()
self.assertEqual(self.session.query(ResourceName).count(), 0)
self.assertEqual(self.session.query(Connection).count(), 0)
self.assertEqual(self.session.query(ResourceVersion).count(), 0)
self.assertEqual(self.session.query(ResourceTemplate).count(), 0)
self.assertEqual(self.session.query(SchemaEvent).count(), 0)
def test_positive_delete_connection_by_db_helper(self):
connection, schema, schema2, r_name, t_name, v_name = self.get_create_object()
self.session.add_all([connection, schema, schema2, r_name, t_name, v_name])
self.session.commit()
self.assertEqual(self.session.query(Connection).count(), 1)
DBSession.init_engine(self.engine)
with patch.object(settings, 'FSQLFLY_SAVE_MODE_DISABLE', True):
res = DBDao.delete('connection', pk=connection.id)
self.assertEqual(res.success, True)
self.session.close()
self.session = self.get_session()
self.assertEqual(self.session.query(Connection).count(), 0)
self.assertEqual(self.session.query(ResourceName).count(), 0)
self.assertEqual(self.session.query(ResourceVersion).count(), 0)
self.assertEqual(self.session.query(ResourceTemplate).count(), 0)
self.assertEqual(self.session.query(SchemaEvent).count(), 0)
def test_positive_delete_other(self):
connection, schema, schema2, r_name, t_name, v_name = self.get_create_object()
self.session.add_all([connection, schema, schema2, r_name, t_name, v_name])
self.session.commit()
self.session.delete(schema)
self.session.commit()
self.assertEqual(self.session.query(Connection).count(), 1)
self.assertEqual(self.session.query(ResourceName).count(), 0)
self.assertEqual(self.session.query(Connection).count(), 1)
self.assertEqual(self.session.query(ResourceVersion).count(), 0)
self.assertEqual(self.session.query(ResourceTemplate).count(), 0)
self.assertEqual(self.session.query(SchemaEvent).count(), 1)
def test_get_connection_and_resource_name_config(self):
connection_config = """
[jdbc]
insert_primary_key = false
"""
resource_name_config = """
[jdbc]
insert_primary_key = true
"""
connection = Connection(name='a', url='#', type='hive', connector='text', config=connection_config)
schema = SchemaEvent(name='test', connection=connection)
r_name = ResourceName(name='b', full_name='a.b', connection=connection, schema_version=schema,
config=resource_name_config)
self.assertTrue(not r_name.get_config('add_read_partition_key', 'jdbc', bool))
self.assertTrue(not r_name.get_config('add_read_partition_key', 'jdbc', bool))
self.assertEqual(connection.get_config('read_partition_num', 'jdbc', int), 50)
self.assertTrue(r_name.get_config('example11') is None)
self.assertTrue(r_name.get_config('insert_primary_key', 'jdbc', bool))
self.assertTrue(not connection.get_config('insert_primary_key', 'jdbc', bool))
if __name__ == '__main__':
unittest.main()
|
7,494 | ab0c3cf3e43f34874dd94629b746ca1237c3349a | import time
import numpy as np
import matplotlib.pyplot as plt #tutorial: http://pybonacci.org/2012/05/19/manual-de-introduccion-a-matplotlib-pyplot-ii-creando-y-manejando-ventanas-y-configurando-la-sesion/
import threading
from random import shuffle
T = 1
eps = 0.000000001
agilityMin = 1/T
'''------------GOVERMENT'''
class Goverment:
''' manage population '''
def __init__(self ):
self.listID = []
self.listCells = []
self.globalTime = 0
def createPopulation(self, position, map, agility, smellInstinct):
if map.createCell(position) == False:
return False
else:
IDx = len(self.listID)
self.listID.append(IDx)
self.listCells.append(MotherCell(IDx, goverment_i.globalTime, position, agility, smellInstinct, 5, 5, [10, 10], 5))
#(ID, time, positio n, agility, smellInstinct, reproduction, mutability, feeds, mortality)
return True
def retirePopulation (self, IDx):
self.listID[IDx] = 0 #instancia cell no esta borrada creo
def clock(self):
self.globalTime += T
'''------------MAP'''
class Map:
'''manage map(x,y); collision, edges, plot...
map as 3dim matrix, (row, col, feeds (numfeeds + 1/0 if cell in position)
cell in position: [N][[N][pos, feed1, feed2, feed3, feed4, feed5]
'''
def __init__(self, size, num_feeds):
self.size = size
self.map_cells = np.zeros((self.size, self.size)) #ncluye posicion celula
def available(self, position):
#position as row/col
#return True if occupy
row = position[0]
col = position[1]
if row < 0 or row > (self.size - 1) or col < 0 or col > (self.size - 1):
return False
elif self.map_cells[row, col] == 1:
return False
else:
return True
def moveInMap(self, actual_position, position):
if actual_position == position:
return True
if self.available(position):
self.map_cells[position[0]][position[1]] = 1
self.map_cells[actual_position[0]][actual_position[1]] = 0
return True
else:
return False
def createCell(self, pos):
if self.map_cells[pos[0]][pos[1]] == 1:
return False
else:
self.map_cells[pos[0]][pos[1]] = 1
return True
def ploting(self):
plt.ion()
plt.figure()
#plt.matshow(nature_i.map_feeds[0], fignum=1, cmap=plt.cm.gray)
while True:
f1 = plt.subplot2grid((2, 2), (0, 0))
f2 = plt.subplot2grid((2, 2), (0, 1))
f3 = plt.subplot2grid((2, 2), (1, 0), colspan=2)
f1.matshow(nature_i.map_feeds[0], cmap=plt.cm.gray, vmin=0, vmax=nature_i.abundance)
f2.matshow(nature_i.map_feeds[1], cmap=plt.cm.gray, vmin=0, vmax=nature_i.abundance)
f3.matshow(self.map_cells, cmap=plt.get_cmap('gray'), vmin=0, vmax=1)
plt.draw()
plt.pause(0.05)
'''------------NATURE'''
class Nature:
'''manage feed seeds, delete feeds (eat by cells)'''
def __init__(self, abundance, num_feeds, size):
self.abundance = abundance
self.num_feeds = num_feeds
self.feeds = 0
self.size = size
map_feed_size = np.zeros((self.num_feeds, self.size, self.size))#incluye recusros de celda
self.map_size = map_feed_size.shape
self.map_feeds = np.random.randint(0, self.abundance, size = self.map_size)
def deleteFeed(self, position, feed):
map_i.map_feeds[feed][position[0]][position[1]] =\
map_i.map_feeds[feed][position[0]][position[1]] - 1#lo ultimo la columna siempre
def createFeed(self, position, feed):
map_i.map_feeds[feed][position[0]][position[1]] = \
map_i.map_feeds[feed][position[0]][position[1]] + 1
'''------------CELLS'''
class MotherCell:
'''
Steps in a cell:
1/ update skills:
- hungry(feeds)
- mutability(feeds)
- reproductibility(feeds, time)
- mortality (feeds, time)
2/ check reproduction:
True: create cell with actual mutability skill, use feeds
False: pass
3/ check food:
check hungry:
True: calculate distance with smell:
distance = 0: eat(feeds)
distance > 0: move (x, y time) use feeds
4/ check dead(feeds, time):
True: dead
False: pass
'''
def __init__(self,ID, time, position, agility, smellInstinct, reproduction, mutability, feeds, mortality):
self.ID = ID
self.localTime = goverment_i.globalTime - time
self.position = position
#Skills
self.agility = agilityMin * agility# agility 0--5
self.smellInstinct = smellInstinct # from 0 to 10, radious of smeelled cels
self.mutability = mutability # from 0 to 10
self.mortality = mortality # from 0 to 10
self.reproduction = reproduction
self.feeds = feeds #[0, 0] from 0 to 10
self.sweep = self.sweep()# created the sweep list with smellInstinct radious
self.moving = False
self.virtualPos = self.position
'''------------------------'''
def updateStates(self):
#states
self.liveBar = sum(self.feeds) / len(self.feeds)#if liveBar - mortality == 0 => dead
self.hungry = self.liveBar - self.mortality
self.burnFood()
self.food(self.feeds, self.instinct, self.hungry)
self.reproduction(self.mutability, self.feeds)
self.dead(self.liveBar, self.mortality, self.ID)
def reproduction(self):
#mutability, feeds, time?
pass
def food(self):
#feeds, instinct
if self.hungry >= 4:
self.smell()
else:
pass
def burnFood(self):
if self.localTime % 1 == 0:
for i, x in enumerate(self.feeds):
self.feeds[i] = x - 1
def dead(self):
#mortality
if self.liveBar - self.mortality == 0:
goverment_i.retirePopulation(self.ID)
'''------------------------'''
def smell(self):
for smellingPos in self.sweep:
pos = (self.position[0] + smellingPos[0], self.position[1] + smellingPos[1])
if not (pos[0] < 0 or pos[1] < 0 or pos[0] >= map_i.size or pos[1] >= map_i.size):
for i in range(len(self.feeds)):
feeds = nature_i.map_feeds[i][int(pos[0])][int(pos[1])]
if feeds != 0:
self.move(pos)
if map_i.moveInMap(self.position, self.virtualPos) is not True:
return
else:
self.eat((i, pos[0], pos[1]), nature_i)
self.position = self.virtualPos
# print('position: {}, virtualPos: {}feed({}) remain: {}. sweep: {}'.format(
# self.position,self.virtualPos, i, feeds, smellingPos))
time.sleep(0.0005)
return
def move(self, position_smelled):
#manage agility
direct = (position_smelled[0] - self.position[0], position_smelled[1] - self.position[1])
self.virtualPos = (self.position[0] + (T * self.agility)* direct[0],
self.position[1] + (T * self.agility)* direct[1])
self.virtualPos = int(round(self.virtualPos[0],0)), int(round(self.virtualPos[1],0))
def eat(self, food, nature_i):#food = (feed, pos, pos)
self.feeds[food[0]] += 1
nature_i.map_feeds[food[0]][food[1]][food[2]] -= 1
def sweep(self):
sweep = []
signo = 1;
SW = (0, 1);
j = 1;
sweep = [(0, 0), (0, 1)]
iterations = (self.smellInstinct*2) + 1
iterations = (iterations * 2) + ((iterations - 2) * 2)
for i in range(1, iterations):
if i % 2 != 0:
signo = signo * (-1)
row = 1;
col = 0
row = row * signo;
col = col * signo
for x in range(j):
SW = (SW[0] + row, SW[1] + col)
sweep.append(SW)
if i % 2 == 0:
j = j + 1
row = 0;
col = 1;
row = row * signo;
col = col * signo
for x in range(j):
SW = (SW[0] + row, SW[1] + col)
sweep.append((SW))
shuff = sweep[1:8]
shuffle(shuff)
sweep = [sweep[0]] + shuff + sweep[8:]
return sweep
'''-----------MAIN'''
if __name__ == '__main__':
goverment_i = Goverment()
num_feeds = 2
size = 70
abundance = 3
nature_i = Nature(3, num_feeds, size)#abundance and number of feeds
map_i = Map(size, num_feeds)#size, num of feeds
goverment_i.clock()
goverment_i.createPopulation((5, 5), map_i, 1, 5)#position, map, agility, smellInstict
goverment_i.createPopulation((20, 20), map_i, 2, 2)
goverment_i.createPopulation((40, 40), map_i, 3, 4)
t_map_feeds = threading.Thread(target=map_i.ploting)
print ("Iniciada la vida")
print ("Cell position: ", goverment_i.listCells[0].position)
print ("Cell position: ", goverment_i.listCells[1].position)
print ("Cell position: ", goverment_i.listCells[2].position)
t_map_feeds.start()
time.sleep(1)
for x in range(30000):
goverment_i.listCells[0].smell()
goverment_i.listCells[1].smell()
goverment_i.listCells[2].smell()
time.sleep(0.005)
|
7,495 | f3b697e20f60e51d80d655ddf4809aa9afdfcd69 | # -*- coding: utf-8 -*-
"""
Editor de Spyder
Este es un archivo temporal.
"""
def largo (l, n):
i=0
cuenta=1
valor1=0
valor2=0
while cuenta < n+1 or cuenta==n+1:
a=l[i]
b=l[i+1]
if a==b:
cuenta+= 1
valor1=a
i+=1
cuenta=1
while cuenta < n or cuenta == n and i<len(l)-1:
c=l[i]
d=l[i+1]
if c==d:
cuenta+= 1
valor2=c
i+=1
alto=abs(valor1-valor2)
return alto
def hayBorde(l,n,h):
if largo(l,n)==h:
return True
else:
return False
print(hayBorde([2,4,4,4,6,6,6,10,10],2,4))
|
7,496 | 1066f86d3a35e892ca2a7054dfc89fe79f1d32c8 | from django.db import models
from helpers.models import BaseAbstractModel
from Auth.models import Profile
# from Jobs.models import UserJob
from django.db.models.signals import post_save
from django.dispatch import receiver
# Create your models here.
class Notification(BaseAbstractModel):
title = models.CharField(max_length=200)
body = models.TextField()
recipients = models.ManyToManyField(to=Profile,
related_name='notifications',
related_query_name='notification')
time_stamp = models.DateTimeField(auto_now_add=True)
read = models.BooleanField(default=False)
# @receiver(post_save, sender=UserJob)
# def job_handler(sender, instance, **kwargs):
# if instance.is_active:
# profile_list = instance.author.profile.all()
# subscribed_users = profile_list.filter(
# Q(user__notification_subscription__in_app_notifications=True) | Q(
# user__notification_subscription__email_notifications=True))
# email_subscribed_users = profile_list.filter(
# user__notification_subscription__email_notifications=True)
# if(subscribed_users.count() >= 1):
# notification = Notification.objects.create(
# title="New Job on Twous",
# body=re.sub(' +', ' ', "{} has published another job \
# titled {}".format(
# instance.author.first_name.capitalize(),
# instance.title)))
# notification.recipients.add(*subscribed_users)
# if(email_subscribed_users.count() >= 1):
# send_emails_to_recipients(notification, email_subscribed_users)
# notification.save()
|
7,497 | 18dc01f3e1672407800e53d80a85ffc8d5b86c17 | #
# Copyright (C) 2020 RFI
#
# Author: James Parkhurst
#
# This code is distributed under the GPLv3 license, a copy of
# which is included in the root directory of this package.
#
import logging
import numpy
from maptools.util import read, write
# Get the logger
logger = logging.getLogger(__name__)
def array_rebin(data, shape):
"""
Rebin a multidimensional array
Args:
data (array): The input array
shape (tuple): The new shape
"""
# Ensure dimensions are consistent
assert data.ndim == len(shape)
assert data.shape[0] % shape[0] == 0
assert data.shape[1] % shape[1] == 0
assert data.shape[2] % shape[2] == 0
# Get pairs of (shape, bin factor) for each dimension
factors = numpy.array([(d, c // d) for d, c in zip(shape, data.shape)])
# Rebin the array
data = data.reshape(factors.flatten())
for i in range(len(shape)):
data = data.sum(-1 * (i + 1))
return data
def mapfile_rebin(input_filename, output_filename, shape=None):
"""
Rebin the map
Args:
input_filename (str): The input map filename
output_filename (str): The output map filename
shape (tuple): The new shape of the map
"""
# Open the input file
infile = read(input_filename)
# Get the data
data = infile.data
# Get the subset of data
logger.info("Resampling map from shape %s to %s" % (data.shape, tuple(shape)))
data = array_rebin(data, shape)
# Write the output file
outfile = write(output_filename, data, infile=infile)
# Update the voxel size
outfile.voxel_size = (
outfile.voxel_size["z"] * data.shape[0] // shape[0],
outfile.voxel_size["y"] * data.shape[1] // shape[1],
outfile.voxel_size["x"] * data.shape[2] // shape[2],
)
def rebin(*args, **kwargs):
"""
Rebin the map
"""
if len(args) > 0 and type(args[0]) == "str" or "input_filename" in kwargs:
func = mapfile_rebin
else:
func = array_rebin
return func(*args, **kwargs)
|
7,498 | 9184779731d6102498934d77b6d3c0283fc594d9 | from pwn import *
hostname = "pwnable.kr"
portnum = 2222
username = "input2"
passwd = "guest"
def main():
args = ["./input"]
print("./input", end="")
for x in range(99):
print(" AA", end="")
args.append("AA")
print(args)
'''
s = ssh(host=hostname,
port=portnum,
user=username,
password=passwd)
p = s.process(args)
p.interactive()
'''
if __name__ == "__main__":
main()
|
7,499 | 1ce7b292f89fdf3f978c75d4cdf65b6991f71d6f | # Generated by Django 2.2.1 on 2019-05-05 18:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='divida',
name='id_cliente',
field=models.CharField(max_length=10),
),
migrations.AlterField(
model_name='divida',
name='motivo',
field=models.CharField(max_length=100),
),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.