text stringlengths 38 1.54M |
|---|
import os, sys
sys.path.append(os.getcwd())
import time
import numpy as np
import tensorflow as tf
import language_helpers
import tflib as lib
import tflib.ops.linear
import tflib.ops.conv1d
import tflib.plot
from keras.utils import to_categorical
import copy
import pickle
BATCH_SIZE = 64
SEQ_LEN = 33
DIM = 256
LAMBDA = 10
############
try:
with open("../dictionary.pickle", 'rb') as f:
charmap = pickle.load(f)
with open("../inverted_dictionary.pickle", 'rb') as f:
inv_charmap = pickle.load(f)
except Exception as e:
print(e)
exit()
#####################################################################
def softmax(logits):
return tf.reshape(
tf.nn.softmax(
tf.reshape(logits, [-1, len(charmap)])
),
tf.shape(logits)
)
def make_noise(shape):
return tf.random_normal(shape)
def ResBlock(name, inputs):
output = inputs
output = tf.nn.relu(output)
output = lib.ops.conv1d.Conv1D(name+'.1', DIM, DIM, 5, output)
output = tf.nn.relu(output)
output = lib.ops.conv1d.Conv1D(name+'.2', DIM, DIM, 5, output)
return inputs + (0.3*output)
def Generator(n_samples, prev_outputs=None):
output = make_noise(shape=[n_samples, 128])
output = lib.ops.linear.Linear('Generator.Input', 128, SEQ_LEN*DIM, output)
output = tf.reshape(output, [-1, DIM, SEQ_LEN])
output = ResBlock('Generator.1', output)
output = ResBlock('Generator.2', output)
output = ResBlock('Generator.3', output)
output = ResBlock('Generator.4', output)
output = ResBlock('Generator.5', output)
output = lib.ops.conv1d.Conv1D('Generator.Output', DIM, len(charmap), 1, output)
output = tf.transpose(output, [0, 2, 1])
output = softmax(output)
return output
fake_inputs = Generator(BATCH_SIZE)
fake_inputs_discrete = tf.argmax(fake_inputs, fake_inputs.get_shape().ndims-1)
###########################
saver = tf.train.Saver()
with tf.Session() as session:
ckpt = tf.train.get_checkpoint_state("./model")
if ckpt and ckpt.model_checkpoint_path:
saver.restore(session, ckpt.model_checkpoint_path)
else:
session.run(tf.initialize_all_variables())
print("No model found")
def generate_samples():
samples,fake_inputs_discretez = session.run([fake_inputs,fake_inputs_discrete])
samples = np.argmax(samples, axis=2)
decoded_samples = []
for i in range(len(samples)):
decoded = []
for j in range(len(samples[i])):
decoded.append(inv_charmap[samples[i][j]])
decoded_samples.append(tuple(decoded))
return decoded_samples
samples = []
for i in range(10):
sample=generate_samples()
#print(sample)
samples.extend(sample)
with open("TESTING.txt", 'w') as f:
for s in samples:
s = "".join(s)
f.write(s + "\n") |
import logging
import socket
import sys
import time
import threading
logging.basicConfig(level=logging.INFO, stream=sys.stdout)
logger = logging.getLogger(__name__)
class DroneManager(object):
def __init__(self, host_ip='192.168.10.2', host_port=8889,
drone_ip='192.168.10.1', drone_port=8889):
self.host_ip = host_ip
self.host_port = host_port
self.drone_ip = drone_ip
self.drone_port = drone_port
self.drone_address = (drone_ip, drone_port)
# 소켓 생성
# UDP이므로 SOCK_DGRAM 사용
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# 소켓에 주소, 포트 할당
self.socket.bind((self.host_ip, self.host_port))
# 드론의 상태 메시지
self.response = None # 변수 None으로 초기화
self.stop_event = threading.Event() # 상태 메시지를 그만 받을 준비(대기 상태)
# 이벤트 객체 생성(default flag = 0)
# 하나 이상의 쓰레드가 이벤트를 기다리고 있다가, 누군가가 시그널을 주면 일제히 동작을 시작
# Thread 객체를 얻음, 함수 및 메서드 실행 방식
self._response_thread = threading.Thread(target=self.receive_response, # target에 메서드(함수)의 이름 전달
args=(self.stop_event,)) # args에는 튜플이 들어감
self._response_thread.start() # 쓰레드 시작, 병렬처리 시작
# 데이터 보내기(드론에게 명령어 전달)
self.send_command('command')
self.send_command('streamon')
# 드론의 메시지를 받는 함수
def receive_response(self, stop_event):
while not stop_event.is_set(): # stop_event가 이벤트가 오기 전까지 반복, flag의 상태 반환
try:
# 소켓으로부터 데이터를 수신하고, 관련된 주소 및 포트 번호를 반환
self.response, ip = self.socket.recvfrom(3000) # 버퍼 사이즈를 3000
logger.info({'action': 'receive_response', 'response': self.response})
except socket.error as ex:
logger.error({'action': 'receive_response', 'ex': ex})
break
def __dell__(self):
self.stop() # stop함수 호출
def stop(self):
self.stop_event.set() # 이벤트를 설정(flag = 1 로 변경)
# bad file descriptor 로그 발생
# 쓰레드에서 메시지를 보내려는데, 이미 close한 소켓으로 인하여 에러 메시지 발생
# 메시지를 다 받을 때 까지는 잠시 대기하게 만들기
retry = 0
while self._response_thread.isAlive(): # 쓰레드가 살아 있다면
time.sleep(0.3) # 0.3초 대기
if retry > 30: # 0.3초씩 대기를 30번 넘게 한다면
break
retry += 1
self.socket.close() # 소켓 연결을 닫고 연결된 리소스를 모두 해제
def send_command(self, command): # 명령어 전달 하는 함수
logger.info({'action': 'send_command', 'command': command}) # 전달하는 명령어를 로그로 남기기
self.socket.sendto(command.encode('utf-8'), self.drone_address)
# 메시지를 다 받을 때 까지는 잠시 대기하게 만들기
retry = 0
while self.response is None: # is : 변수가 같은 Object(객체)를 가리키면 True
time.sleep(0.3)
if retry > 3:
break
retry += 1
if self.response is None:
response = None
else:
response = self.response.decode('utf-8')
self.response = None
return response
def takeoff(self):
return self.send_command('takeoff') # takeoff란 문자열을 send_command함수에 전달
def land(self):
return self.send_command('land')
# __name__ : 모듈의 이름이 저장되는 변수
if __name__ == '__main__': # 해당 파이썬 파일이 프로그램의 시작점이 맞는지 판단하는 작업
drone_manager = DroneManager()
drone_manager.takeoff()
time.sleep(5) # 5초간 대기
drone_manager.land()
drone_manager.stop() |
import matplotlib.pyplot as plt
from matplotlib.axes import Axes
from matplotlib.figure import Figure
from matplotlib.patches import Rectangle
from data_models.object_detection_models import ImageData
from settings import MATPLOTLIB_COLORS
import cv2
def display_img_data(img_data: ImageData, class_mapping=None):
if class_mapping is None:
class_mapping = {'Person': 0, 'Car': 1}
test_img = cv2.imread(img_data.img_file_path)
read_img_height, read_img_width = test_img.shape[:2]
if read_img_height != img_data.img_height or read_img_width != img_data.img_width:
print('need to resize image')
test_img = cv2.resize(test_img, (img_data.img_width, img_data.img_height))
# OpenCV read, show, write image in BGR order, to display using matplotlib, need to convert to RGB order
test_img = cv2.cvtColor(test_img, cv2.COLOR_BGR2RGB)
fig: Figure = plt.figure()
axes: Axes = plt.axes()
axes.imshow(test_img)
for bbox in img_data.bboxes:
box_width = bbox.get_width()
box_height = bbox.get_height()
bbox_rect = Rectangle((bbox.x1, bbox.y1),
width=box_width, height=box_height,
linewidth=2,
edgecolor=MATPLOTLIB_COLORS[class_mapping[bbox.class_name]],
facecolor='none')
axes.add_patch(bbox_rect)
fig.add_axes(axes)
plt.show() |
import requests
from bs4 import BeautifulSoup
def kino(k,i,j):
if (j != 'кино'):
url = 'http://afisha.ngs.ru/afisha/1/'
r = requests.get(url)
with open('test.html', 'w') as f:
f.write(r.text)
j = 'кино'
with open('test.html', 'r') as f:
soup = BeautifulSoup(f,'lxml')
text = soup.find_all('a', {'class': 'poster-title'})
requests.post('https://api.telegram.org/bot534921788:AAF1aUYd5S62PQY9pKDKK4y7j4SSFbXm9Es/sendMessage?chat_id='+str(k)+'&text=Как насчёт этого?')
requests.post('https://api.telegram.org/bot534921788:AAF1aUYd5S62PQY9pKDKK4y7j4SSFbXm9Es/sendMessage?chat_id='+str(k)+'&text='+text[i].text)
with open('test.html', 'r') as f:
soup = BeautifulSoup(f,'lxml')
text = soup.find_all('a',{'class':'desc_text'})
requests.post('https://api.telegram.org/bot534921788:AAF1aUYd5S62PQY9pKDKK4y7j4SSFbXm9Es/sendMessage?chat_id='+str(k)+'&text='+text[i].text)
requests.post('https://api.telegram.org/bot534921788:AAF1aUYd5S62PQY9pKDKK4y7j4SSFbXm9Es/sendMessage?chat_id='+str(k)+'&text=Если вам не понравился фильм, то повторите команду /cinema ещё раз')
old_message_id = 0
i = 0
old_chat_id = 0
while (1 == 1):
chat_id = ''
chat_text = ''
message_id = ''
text1 = requests.get('https://api.telegram.org/bot534921788:AAF1aUYd5S62PQY9pKDKK4y7j4SSFbXm9Es/getUpdates')
#message_id
r = text1.text.rfind('message_id')
r = r + 12
while (text1.text[r] != ','):
message_id = message_id + text1.text[r]
r = r + 1
print(message_id)
#chat_id
r = text1.text.rfind('id')
r = r + 4
while (text1.text[r] != ','):
chat_id = chat_id + text1.text[r]
r = r + 1
if (old_chat_id != chat_id):
i = 0
old_chat_id = chat_id
print(chat_id)
#text
r = text1.text.rfind('text')
r = r + 7
while (text1.text[r] != '"'):
chat_text = chat_text + text1.text[r]
r = r + 1
#message
if (chat_text == '/start')and(old_message_id != message_id):
requests.post('https://api.telegram.org/bot534921788:AAF1aUYd5S62PQY9pKDKK4y7j4SSFbXm9Es/sendMessage?chat_id='+str(chat_id)+'&text=Команды: /cinema')
if (chat_text == '/cinema')and(old_message_id != message_id):
kino(chat_id,i,'')
i = i+1
old_message_id = message_id
|
import numpy as np
import matplotlib.pyplot as plt
# • the thermal coupling constant C = 9.96 × 10 6 ,
# • the emissivity = 0.62 (corresponding to the fact that a part of the
# outgoing radiation is hold back in the atmosphere due to the green-
# house effect),
# • the Boltzmann constant σ = 5.67 × 10 −8
# • the solar constant S = 136
class Exercise1:
def __init__(self, year, steps):
self.c = 9.96 * (10**6)
self.e = 0.62
self.alpha = 5.67 * (10 ** -8)
self.s = 136
self.steps = steps
self.n = steps
self.delet_t = int(year/steps)
self.T = year
self.times = np.arange(0,self.T,self.delet_t)
def c1(self):
return 1/(4*self.c)
def c2(self):
return (self.alpha*self.e)/self.c
# ẏ(t) = c1*S(1 − α) − c2*y(t)^4 =: f (y(t), t)
# Energy balance model
def energy_balance_model(self, y_k):
return (self.c1() * (self.s * (1-self.alpha))) - (self.c2() * (y_k ** 4))
# y k+1 = y k + ∆tf (yk , tk ),
def simulate(self,init_temparature):
temperature = np.zeros(int(self.n))
temperature[0] = init_temparature
for k in range(1, self.n):
y_k = temperature[k-1]
temperature[k] = y_k + self.delet_t * self.energy_balance_model(y_k)
return temperature
def plots_data(self,intital_temparatures):
fig,graph = plt.subplots()
graph.set_xlabel("Time")
graph.set_ylabel("Temperature")
for v in intital_temparatures:
temp = self.simulate(v)
graph.plot(self.times,temp)
fig.savefig("graph1.png")
T = 10e6
n = 100
obj = Exercise1(T,n)
intital_temparatures = np.arange(50,900,50)
obj.plots_data(intital_temparatures) |
"""
This ``urls.py`` is only used when running the tests via ``runtests.py``.
As you know, every app must be hooked into yout main ``urls.py`` so that
you can actually reach the app's views (provided it has any views, of course).
"""
from django.conf.urls.defaults import include, patterns, url
from django.contrib import admin
from django.views.generic import UpdateView
from test_app.forms import DummyProfileModelForm
from test_app.models import DummyProfileModel
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^admin/', include(admin.site.urls)),
url(r'(?P<pk>\d+)/',
UpdateView.as_view(
model=DummyProfileModel, form_class=DummyProfileModelForm),
name='dummy_profile_update')
)
|
import random
import gym
from gym import spaces
import numpy as np
def tempoIntToStr(tempo): #função para transformar int para horario
milesimo = 1000
horas = int(tempo//(3600 * milesimo))
tempo -= horas * (3600 * milesimo)
minutos = int(tempo//(60 * milesimo))
tempo -= minutos * (60 * milesimo)
segundos = int(tempo//milesimo)
tempo -= segundos * milesimo
return '{:02d}'.format(horas) + ":" + '{:02d}'.format(minutos) + ":" + '{:02d}'.format(segundos) + "." + '{:03d}'.format(tempo)
colunas = 8
class StockTradingEnv(gym.Env):
"""A stock trading environment for OpenAI gym"""
metadata = {'render.modes': ['human']}
def __init__(self, df, comeco_dias, passos):
super(StockTradingEnv, self).__init__()
self.passos = passos
self.df = df
self.comeco_dias = comeco_dias
self.max_preco = df['preco'].max()
self.min_preco = df['preco'].min()
self.max_hr_int = df['hr_int'].max()
self.max_preco_pon = df['preco_pon'].max()
self.max_qnt_soma = df['qnt_soma'].max()
self.max_max = df['max'].max()
self.max_min = df['min'].max()
self.max_IND = df['IND'].max()
self.max_ISP = df['ISP'].max()
self.custo = 1.06/2
# Actions of the format Buy x%, Sell x%, Hold, etc.
self.action_space = spaces.Discrete(3)
# Prices contains the OHCL values for the last five prices
self.observation_space = spaces.Box(
low=0, high=1, shape=(colunas, passos), dtype=np.float16)
def _next_observation(self):
# Get the stock data points for the last 5 days and scale to between 0-1
steps_atras = self.current_step - self.passos + 1
obs = np.array([
self.df.loc[steps_atras: self.current_step, 'preco'].values / self.max_preco,
self.df.loc[steps_atras: self.current_step, 'hr_int'].values / self.max_hr_int,
self.df.loc[steps_atras: self.current_step, 'preco_pon'].values / self.max_preco_pon,
self.df.loc[steps_atras: self.current_step, 'qnt_soma'].values / self.max_qnt_soma,
self.df.loc[steps_atras: self.current_step, 'max'].values / self.max_max,
self.df.loc[steps_atras: self.current_step, 'min'].values / self.max_min,
self.df.loc[steps_atras: self.current_step, 'IND'].values / self.max_IND,
self.df.loc[steps_atras: self.current_step, 'ISP'].values / self.max_ISP,
])
return obs
def _take_action(self, action):
valor_cheio = 0
self.ncont_anterior = self.ncont #salva posio anterior
acao = action - 1
if (acao == 1 and self.ncont == 0) or (acao == -1 and self.ncont == 1):
self.ncont += acao
if self.valor != 0:
valor_cheio = ( self.valor * ( self.max_preco - self.min_preco ) + self.min_preco ) #valor posicionado atual
preco = self.df.loc[self.current_step, "preco"]
preco_ant = self.df.loc[self.current_step - 1, "preco"]
delta_preco = preco - preco_ant
dp = ( preco * ( self.max_preco - self.min_preco ) + self.min_preco ) - valor_cheio #variao do preo atual e do preo de compra/venda
posicao = self.ncont_anterior * dp * 10 - self.custo * abs(acao) #posicao = lucro - custo (INSTANTNEO)
#calculos sobre o valor
if ( self.ncont_anterior == 0 and acao != 0 ): #primeiro valor
self.valor = preco
elif ( self.ncont == 0 ):
self.valor = 0
if(self.ncont != self.ncont_anterior):
return posicao
elif(self.ncont == 1):
return delta_preco
elif(self.ncont == 0):
return -delta_preco
def step(self, action):
# Execute one time step within the environment
reward = self._take_action(action)
self.reward = reward
self.sum_rewards += reward
self.current_step += 1
done = False
if(self.current_step == self.df.shape[0] - 1):
done = True
elif (self.df.iloc[self.current_step]['dt'] != self.df.iloc[self.current_step + 1]['dt']):
done = True
obs = self._next_observation()
return obs, reward, done, {}
def reset(self):
self.ncont = 0
self.ncont_anterior = 0
self.valor = 0
self.reward = 0
self.sum_rewards = 0
self.current_step = self.comeco_dias[random.randrange(len(self.comeco_dias))] + self.passos - 1
return self._next_observation()
def render(self, mode='human', close=False):
print('{0} {1}'.format(self.df.iloc[self.current_step]['dt'], tempoIntToStr(self.df.iloc[self.current_step]['hr_int'])))
print('Preco: {0}'.format(self.df.iloc[self.current_step]['preco']))
print('ncont: {0}'.format(self.ncont))
print('reward: {0}'.format(self.reward))
print('ganho: {0}'.format(self.sum_rewards))
print('---------------------------')
|
p1, s1 = map(int,input().split())
s2, p2 = map(int, input().split())
if p1+p2 > s1+s2:
print("Persepolis")
elif p1+p2 < s1+s2:
print("Esteghlal")
elif s1 > p2:
print("Esteghlal")
elif s1 < p2:
print("Persepolis")
else:
print("Penalty") |
"""Process responses for the route functions."""
def okay(obj, filetype, updir="media"):
"""Return okay python dict."""
d = {
"code": 200,
"data": [
{
"url": "/" + updir + "/" + obj.filename,
"message": "OK.",
"type": obj.mediaType,
"uid": obj.uid,
"lat": obj.lat,
"lon": obj.lon,
}
]
}
return d
def notOkay(code, message):
"""Return failed python dict."""
d = {
"code": code,
"message": message,
"data": []
}
return d
|
from apollo.events.event_handler import EventHandler
from kafka import KafkaProducer
from apollo.configurations import kafka_bootstrap_server
from apollo.monitoring.tracing import print_publish_message
import json
from datetime import datetime
def datetime_handler(x):
if isinstance(x, datetime):
return x.__str__()
class KafkaPublisher(EventHandler):
_producer = None # KafkaProducer
def __init__(self):
self._producer = KafkaProducer(
bootstrap_servers=kafka_bootstrap_server,
max_in_flight_requests_per_connection=1,
retries=2147483647,
acks='all')
def handle(self, envelope, success_callback, fail_callback):
topic = envelope.topic
partition_key = str.encode(envelope.partition_key) if envelope.partition_key != None else None
json_body = json.dumps(envelope.body.to_dict(), sort_keys=True, default=datetime_handler)
future = self._producer.send(topic, str.encode(json_body), partition_key)
future.add_callback(success_callback, checkpoint=envelope.log_metadata)
future.add_errback(fail_callback, checkpoint=envelope.log_metadata) |
from django.db import models
from datetime import datetime, timedelta
import re
class UserManager(models.Manager):
def validator(self, postData):
errors = {}
if len(postData['first_name'])<2:
errors['first_name'] = "First Name must be at least 2 characters."
if len(postData['last_name'])<2:
errors['last_name'] = "Last Name must be at least 2 characters."
if len(postData['usertype']) == 0:
errors['usertype'] = "You must select a user type."
# if len(postData['aboutyou']) == 0:
# errors['aboutyou'] = "You must tell us something about you."
EMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
if not EMAIL_REGEX.match(postData['email']):
errors['email'] = "Invalid email address."
elif User.objects.filter(email=postData['email']):
errors['email'] = "That email is already registered."
if len(postData['password'])<8:
errors['password'] = "Password must be at least 8 characters."
elif postData['password'] != postData['pw_confirm']:
errors['password'] = "Password does not match confirmation."
return errors
class ItemManager(models.Manager):
def validator(self, postData):
errors = {}
if len(postData['title'])<1:
errors['title'] = "Title must be provided!"
if len(postData['description'])<1:
errors['description'] = "Description must be provided!"
if len(postData['price'])<0:
errors['price'] = "Price must be provided."
return errors
class AddressManager(models.Manager):
def validator(self, postData):
errors = {}
if len(postData['address'])<1:
errors['address'] = "Address cannot be blank!"
if len(postData['city'])<1:
errors['city'] = "City must be provided."
if len(postData['state'])<0:
errors['state'] = "State must be provided."
if len(postData['zipcode'])<0:
errors['zipcode'] = "Zipcode must be provided."
return errors
class CreditCardManager(models.Manager):
def validator(self, postData):
errors = {}
if len(postData['number'])==16:
errors['number'] = "Credit card number must be 16 numbers long."
if len(postData['security_code'])==3:
errors['security_code'] = "Security code must be 3 numbers."
if len(postData['first_name'])<1:
errors['first_name'] = "First name must be provided."
if len(postData['last_name'])<1:
errors['last_name'] = "Last name must be provided."
return errors
class Address(models.Model):
address = models.CharField(max_length=45)
address2 = models.CharField(max_length=45)
city = models.CharField(max_length=45)
state = models.CharField(max_length=2)
zipcode = models.IntegerField()
created_at = models.DateTimeField(auto_now_add = True)
updated_at = models.DateTimeField(auto_now = True)
objects = AddressManager()
class User(models.Model):
first_name = models.CharField(max_length = 45)
last_name = models.CharField(max_length = 45)
usertype = models.CharField(max_length=64)
aboutyou = models.TextField(max_length=300, blank=True )
email = models.EmailField(max_length = 254)
password = models.CharField(max_length = 60)
image = models.ImageField(upload_to='profile_image', blank=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
address = models.ForeignKey(Address, related_name="user",null=True, on_delete=models.CASCADE)
objects = UserManager()
class Item(models.Model):
image = models.ImageField(upload_to='item_pic')
title = models.CharField(max_length=255)
description = models.TextField(max_length=255)
price = models.DecimalField(max_digits=10, decimal_places=2)
creator = models.ForeignKey(User, related_name='creator', on_delete=models.CASCADE)
users = models.ManyToManyField(User, related_name='jobs')
date_created = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = ItemManager()
class Cart(models.Model):
total = models.DecimalField(max_digits=8, decimal_places=2)
created_at = models.DateTimeField(auto_now_add = True)
updated_at = models.DateTimeField(auto_now = True)
class CartItem(models.Model):
item = models.ForeignKey(Item, on_delete=models.CASCADE)
cart = models.ForeignKey(Cart, related_name="cart_items", on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add = True)
updated_at = models.DateTimeField(auto_now = True)
class CreditCard(models.Model):
number = models.IntegerField()
security_code = models.IntegerField()
expiration_date = models.DateField()
first_name = models.CharField(max_length=45)
last_name = models.CharField(max_length=45)
address = models.ForeignKey(Address, related_name="card", on_delete = models.CASCADE)
user = models.ForeignKey(User, related_name="credit_cards", on_delete = models.CASCADE)
created_at = models.DateTimeField(auto_now_add = True)
updated_at = models.DateTimeField(auto_now = True)
objects = CreditCardManager()
class Order(models.Model):
status = models.CharField(max_length=45)
cart = models.OneToOneField(Cart,on_delete=models.CASCADE)
user = models.ForeignKey(User, related_name="orders", on_delete=models.CASCADE)
credit_card = models.ForeignKey(CreditCard, related_name="orders", on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add = True)
updated_at = models.DateTimeField(auto_now = True)
def __str__(self):
return str(self.title) + ": $" + str(self.price) |
print("Please think of a number between 0 and 100!")
low=0
middle=50
high=100
guess=raw_input("Is your number " + str(middle) + "?\nEnter 'h' to indicate the guess is too high. Enter 'l' to indicate the guess is too low. Enter 'c' to indicate I guessed correctly. ")
while guess != "c":
if guess == "l":
low=middle
middle=int((middle+high)/2)
guess=raw_input("Is your number " + str(middle) + "?\nEnter 'h' to indicate the guess is too high. Enter 'l' to indicate the guess is too low. Enter 'c' to indicate I guessed correctly. ")
elif guess == "h":
high=middle
middle=int((middle+low)/2)
guess=raw_input("Is your number " + str(middle) + "?\nEnter 'h' to indicate the guess is too high. Enter 'l' to indicate the guess is too low. Enter 'c' to indicate I guessed correctly. ")
else:
print("Please enter l, h, or c.")
guess=raw_input("Is your number " + str(middle) + "?\nEnter 'h' to indicate the guess is too high. Enter 'l' to indicate the guess is too low. Enter 'c' to indicate I guessed correctly. ")
print("Game Over. Your secret number was " + str(middle)) |
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import precision_score,f1_score,roc_curve,auc
from keras.models import Sequential
from keras.layers import Dense, LSTM
from keras.layers.embeddings import Embedding
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.callbacks import EarlyStopping,TensorBoard
from keras.optimizers import Adagrad
def format_example(wordlist):
string = ''
for word in wordlist:
string = string + ' ' + word
return string
def binary_conversion(decimal_list:list,threshold:float):
return_list = []
for element in decimal_list:
if element >= threshold:
return_list.append(1)
if element < threshold:
return_list.append(0)
return return_list
#Dataset loading
df = pd.read_pickle('pitt_dataframe.pickle')
numeric_label = []
for string in df.label:
if string == 'Dementia':
numeric_label.append(1)
if string == 'Control':
numeric_label.append(0)
X_train, X_test, y_train, y_test = train_test_split(df.text, np.array(numeric_label), test_size=0.2, random_state=42)
#Tokenize and create sequence.
### Create sequence
vocabulary_size = 30000
sequence_len = 250
tokenizer = Tokenizer(num_words= vocabulary_size)
tokenizer.fit_on_texts(X_train)
train_sequences = tokenizer.texts_to_sequences(X_train)
train_sequences = pad_sequences(train_sequences, maxlen=sequence_len)
test_sequences = tokenizer.texts_to_sequences(X_test)
test_sequences = pad_sequences(test_sequences, maxlen=sequence_len)
#Word embeddings initialization
embeddings_index = dict()
f = open('glove.6B/glove.6B.100d.txt')
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
embedding_matrix = np.zeros((vocabulary_size, 100))
for word, index in tokenizer.word_index.items():
if index > vocabulary_size - 1:
break
else:
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[index] = embedding_vector
#Callbacks
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0.0001, patience=10, verbose=1, mode='auto')
tensor_borad = TensorBoard(log_dir='./logs', histogram_freq=0, batch_size=32, write_graph=True, write_grads=False, write_images=False, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None, embeddings_data=None, update_freq='epoch')
optimizer = Adagrad(lr=0.001, epsilon=None, decay=0.0)
## create model
model_glove = Sequential()
model_glove.add(Embedding(vocabulary_size, 100, input_length=sequence_len, weights=[embedding_matrix], trainable=False))
model_glove.add(LSTM(40))
model_glove.add(Dense(1, activation='sigmoid'))
model_glove.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
## Fit train data
if not os.path.isfile("model_weights/glove_embeddings_classifier_weights.h5"):
print("[LOG] Training the model...")
model_glove.fit(train_sequences,y_train , validation_split=0.2, epochs = 300,callbacks=[early_stopping,tensor_borad])
model_glove.save_weights("model_weights/glove_embeddings_classifier_weights.h5")
print("[LOG] Saved weights to disk")
else:
print("[LOG] Loading weights from disk...")
model_glove.load_weights("model_weights/glove_embeddings_classifier_weights.h5")
result = model_glove.predict(test_sequences)
y_score = binary_conversion(result,0.5)
test_precision = precision_score(y_test,y_score)
test_f1 = f1_score(y_test,y_score)
print("Test precision: {}, Test F1 score: {}, with classification threshold 0.5".format(test_precision,test_f1))
#Compute the ROC curve for the classifier
fpr, tpr, _ = roc_curve(y_test, result)
roc_auc = auc(fpr, tpr)
plt.figure()
lw = 2
plt.plot(fpr, tpr, color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic GloVe classifier')
plt.legend(loc="lower right")
plt.savefig('roc_curve.png')
selection_list = []
#Printing some examples of correctly classified patients:
#Here there are some index of samples in the test set that have been correctly classifieds by our model or not (they have been hand picked).
true_positive = 272
true_negative = 140
false_negative = 547
false_positive = 81
print('False Negative: {}'.format(format_example(X_test[false_negative])))
print('True Negative: {}'.format(format_example(X_test[true_negative])))
print('True Positive: {}'.format(format_example(X_test[true_positive])))
print('False Positive: {}'.format(format_example(X_test[false_positive])))
|
from ansible.plugins.callback import CallbackBase
import os
import sys
import logging
import json
import shutil
class ResultsCollector(CallbackBase):
def __init__(self, *args, **kwargs):
super(ResultsCollector, self).__init__(*args, **kwargs)
self.host_ok = {}
self.host_unreachable = {}
self.host_failed = {}
def v2_runner_on_unreachable(self, result,**kwargs):
self.host = result._host
print(json.dumps({self.host.name: result._result}, indent=4))
def v2_runner_on_ok(self, result, *args, **kwargs):
self.host = result._host
print(json.dumps({self.host.name: result._result}, indent=4))
def v2_runner_on_failed(self, result, *args, **kwargs):
self.host = result._host
print(json.dumps({self.host.name: result._result}, indent=4))
|
"""
Lab4: Q1B
-Create new security group
"""
import boto3
from botocore.exceptions import ClientError
ec2 = boto3.client('ec2')
try:
message = ec2.create_security_group(GroupName="TheWizardIsHere",Description="Only level 20 wizards allowed")
print("Sucess",message)
except ClientError as e:
print(e)
|
from tree import Tree
tree = Tree()
tree.add_node(1) # root node
tree.add_node(2,3) # root node
tree.add_node(4,5) # root node
tree.add_node(6,7) # root node
tree.display(1) |
from dcbase.tests.unit import UnitTestCase
from django.core.urlresolvers import reverse
class TestUserView(UnitTestCase):
def setUp(self):
super().setUp()
self.user = self.createUser()
self.url = self._createUrl(self.user.username)
self.response = self.client.get(self.url)
def _createUrl(self, username):
return reverse('account_profile_user', kwargs={'username': username})
def test_rendersProfileTemplate(self):
self.assertTemplateUsed(self.response, 'dcbase/profile.html')
def test_contextContainsUserAsGivenInUrl(self):
self.assertEqual(self.user, self.response.context['profileUser'])
def test_returnsNotFoundStatusIfUserNameIsInvalid(self):
url = self._createUrl(self.randStr())
response = self.client.get(url)
self.assertResponseStatusIsNotFound(response)
|
from flask import Flask
from flask import Response, request
import json
import sys
#import file from main path
sys.path.insert(0, '/home/pi/Desktop/Flow_sensor')
import function
import db_connection
app = Flask(__name__)
@app.route('/')
def index():
return 'Flow Sensor API! '
#Get sensor data
@app.route('/sensorData/<sensorId>', methods=['GET'])
def getSensorData(sensorId):
sensor = function.getSensor(int(sensorId))
sensorData = json.loads(sensor)
sensorName = sensorData['name']
fileName = sensorName + sensorId
sensorData_ = function.file("/home/pi/Desktop/Flow_sensor/json/" + fileName + ".json")
return sensorData_
#Add new Sensor
@app.route('/sensors', methods=['POST'])
def addSensor():
sensor_ = request.get_json(silent=True)
json_str = json.dumps(sensor_)
resp = json.loads(json_str)
sensor = function.addSensor(
resp['name'],
resp['type'],
resp['diameter'],
resp['description'],
int(resp['input']),
int(resp['output']),
int(resp['status']),
)
return sensor
#Find by Id Sensor
@app.route('/sensors/<sensorId>', methods=['GET'])
def getSensor(sensorId):
sensor_ = function.getSensor(int(sensorId))
return sensor_
#Find All Sensors
@app.route('/sensors', methods=['GET'])
def getSensors():
sensors = function.getSensors()
return sensors
#Update by Id Sensor
@app.route('/sensors/<sensorId>', methods=['PUT'])
def updateSensors(sensorId):
sensor_ = request.get_json(silent=True)
json_str = json.dumps(sensor_)
resp = json.loads(json_str)
sensor = function.updateSensor(
int(resp['inputPin']),
resp['name'],
resp['type'],
resp['diameter'],
resp['description']
)
return getSensor(sensorId)
#Delete Sensor
@app.route('/sensors/<sensorId>', methods=['DELETE'])
def deleteSensors(sensorId):
sensors = function.deleteSensor(int(sensorId))
return sensors
#Find by Pin number
@app.route('/pinSensor/<pin>', methods=['GET'])
def pinSensorId(pin):
sensor = function.getSensor(int(pin))
return sensor
#Login user
@app.route('/login', methods=['POST'])
def loginUser():
user_ = request.get_json(silent=True)
json_str = json.dumps(user_)
resp = json.loads(json_str)
user = function.loginUser(
resp['email'],
resp['password'])
return user
#Add new User
@app.route('/users', methods=['POST'])
def addUser():
user_ = request.get_json(silent=True)
json_str = json.dumps(user_)
resp = json.loads(json_str)
user = function.addUser(
resp['first_name'],
resp['last_name'],
resp['email'],
resp['password']
)
return user
#Find by Id User
@app.route('/users/<userId>', methods=['GET'])
def getUser(userId):
user = function.getUser(int(userId))
return user
#Find all Users
@app.route('/users', methods=['GET'])
def getUsers():
users = function.getUsers()
return users
#Update by Id User
@app.route('/users/<userId>', methods=['PUT'])
def updateUser(userId):
user_ = request.get_json(silent=True)
json_str = json.dumps(user_)
resp = json.loads(json_str)
user = function.updateUser(
int(userId),
resp['first_name'],
resp['last_name'],
resp['email'],
resp['password']
)
return getUser(userId)
#Delete User
@app.route('/users/<userId>', methods=['DELETE'])
def deleteUser(userId):
user = function.deleteUser(int(userId))
return user
if __name__ == '__main__':
app.run(debug=True, host='192.168.2.200')
|
from heapq import heappop, heappush
w = [ # 顶点间距离, -1表示无穷大
[0, 7, 9, -1, -1, 14],
[7, 0, 10, 15, -1, -1],
[9, 10, 0, 11, -1, 2],
[-1, 15, 11, 0, 6, -1],
[-1, -1, -1, 6, 0, 9],
[14, -1, 2, -1, 9, 0]
]
def dijkstra(start, dest):
n = 6 # 顶点数
visited = list() # 标记顶点是否已确定
q = [(0, start, str(start))] # 路径长, 序号, 路径
while q: # 当Q非空
d, u, p = heappop(q)
if u == dest:
print(d, p)
break # 在找到目的地之后终止
if u not in visited:
visited.append(u)
v_reached_from_u = [i for i in range(n) if w[u][i] != -1] # u能到达的顶点
for v in v_reached_from_u:
if v not in visited:
heappush(q, ((d + w[u][v]), v, ''.join((p, '->', str(v))))) # 到顶点v的某条路径的距离
def main():
# start = int(input())
# dest = int(input())
dijkstra(0, 5)
if __name__ == '__main__':
main()
|
import numpy as np
from scipy.interpolate import CubicSpline
def DA_Jitter(X, sigma=0.05):
myNoise = np.random.normal(loc=0, scale=sigma, size=X.shape)
return X+myNoise
def DA_Scaling(X, sigma=0.1):
scalingFactor = np.random.normal(loc=1.0, scale=sigma, size=(1,X.shape[1])) # shape=(1,3)
myNoise = np.matmul(np.ones((X.shape[0],1)), scalingFactor)
return X*myNoise
def GenerateRandomCurves(X, sigma=0.2, knot=4):
xx = (np.ones((X.shape[1],1))*(np.arange(0,X.shape[0], (X.shape[0]-1)/(knot+1)))).transpose()
yy = np.random.normal(loc=1.0, scale=sigma, size=(knot+2, X.shape[1]))
x_range = np.arange(X.shape[0])
x_curve = []
for i in range(X.shape[1]):
x_curve.append(CubicSpline(xx[:,i], yy[:,i])(x_range))
return np.array(x_curve).transpose()
def DA_MagWarp(X, sigma):
return X * GenerateRandomCurves(X, sigma)
def DA_TimeWarp(X, sigma=0.2):
def DistortTimesteps(X, sigma=0.2):
tt = GenerateRandomCurves(X, sigma) # Regard these samples aroun 1 as time intervals
tt_cum = np.cumsum(tt, axis=0) # Add intervals to make a cumulative graph
# Make the last value to have X.shape[0]
t_scale = [(X.shape[0]-1)/tt_cum[-1,i] for i in range(X.shape[1])]
for i in range(X.shape[1]):
tt_cum[:,i] = tt_cum[:,i]*t_scale[i]
return tt_cum
tt_new = DistortTimesteps(X, sigma)
X_new = np.zeros(X.shape)
x_range = np.arange(X.shape[0])
for i in range(X.shape[1]):
X_new[:,i] = np.interp(x_range, tt_new[:,i], X[:,i])
return X_new
def DA(X, y, iterations = 10):
features = X
labels = y
for iter in range(iterations):
DA_batch = []
DA_batch_label = []
for i in range(X.shape[0]):
DA_batch.append(DA_TimeWarp(DA_MagWarp(features[i], 0.001)))
DA_batch_label.append(y[i])
# if y[i] == 0 and random.uniform(0,1) < 0.22311:
# DA_batch.append(DA_TimeWarp(DA_MagWarp(features[i], 0.002)))
# DA_batch_label.append(y[i])
DA_batch = np.array(DA_batch)
DA_batch_label = np.array(DA_batch_label)
features = np.vstack((features,DA_batch))
labels = np.concatenate((labels, DA_batch_label))
return features, labels
def DA_wc(X, y, iterations = 10):
features = X
labels = y
for iter in range(iterations):
DA_batch = []
DA_batch_label = []
for i in range(X.shape[0]):
DA_batch.append(DA_TimeWarp(DA_MagWarp(features[i], 0.001)))
DA_batch_label.append(y[i])
# if y[i] == 0 and random.uniform(0,1) < 0.22311:
# DA_batch.append(DA_TimeWarp(DA_MagWarp(features[i], 0.002)))
# DA_batch_label.append(y[i])
DA_batch = np.array(DA_batch)
DA_batch_label = np.array(DA_batch_label)
features = np.vstack((features,DA_batch))
labels = np.vstack((labels, DA_batch_label))
return features, labels
def DA_3cla(X, y, iterations = 10):
features = X
labels = y
for iter in range(iterations):
DA_batch = []
DA_batch_label = []
for i in range(X.shape[0]):
DA_batch.append(DA_TimeWarp(DA_MagWarp(features[i], 0.001)))
DA_batch_label.append(y[i])
if y[i] == 2:
for j in range(4):
DA_batch.append(DA_TimeWarp(DA_MagWarp(features[i], 0.001)))
DA_batch_label.append(y[i])
DA_batch = np.array(DA_batch)
DA_batch_label = np.array(DA_batch_label)
features = np.vstack((features,DA_batch))
labels = np.concatenate((labels, DA_batch_label))
return features, labels |
from calcFunctions import *
from plot import *
from tkinter import *
import webbrowser
def path(f):
#we ask for the shape properties
print("Image of a shape: {z=x+iy: x0<x<x1, y=f*(x)}")
x0=float(ask('x0=', '0'))
x1=float(ask('x1=', '1'))
print("Please type in the (real) function for the path of the plot:")
func1=ask('f*(x)=', '0')
f1=lambda x: eval(func1)
#we ask for the computer boundaries
print("""As I am a computer, \
I will create N "random" points""")
N=int(ask('N=', 10000))
#creating the list of points
listz=createRandomFromPath(x0, x1, f1, N)
#plot function
myPlot(f, listz)
class Path(Frame):
def __init__(self, boss=None, **arg):
#class __init__
Frame.__init__(self, boss, **arg)
#name the window:
boss.title("Images of complicated shapes by a complex function")
#plot variables
self.x0=None
self.x1=None
self.fPath=None
self.N=None
self.f=None
##graph objects
Label(self, text='{ z = x + iy : x0 < x < x1 & y = f*(x) }', \
font=('Times', -30, 'bold'), fg='red').pack(pady=15)
#website link (explanation of the shape)
Button(self, text='Shapes explanations', \
command=lambda e=None: webbrowser.open_new("shapes.html")).pack()
#x0
Label(self, text='Lower bound for x (x0)', \
font=('Times', -20, 'bold')).pack()
self.x0Input=Entry(self, width=25)
self.x0Input.pack(anchor='w')
self.x0Input.insert(0, '-1.5')
Label(self, text='(x0 should be real)').pack(anchor='w')
#x1
Label(self, text='Upper bound for x (x1)', \
font=('Times', -20, 'bold')).pack()
self.x1Input=Entry(self, width=25)
self.x1Input.pack(anchor='w')
self.x1Input.insert(0, '2')
Label(self, text='(x1 should be real)').pack(anchor='w')
#f*
Label(self, text='Function of y in terms of x ( f*(x) )', \
font=('Times', -20, 'bold')).pack()
self.func1Input=Entry(self, width=25)
self.func1Input.pack(anchor='w')
self.func1Input.insert(0, '-x*x*x + x*x')
Label(self, text='(f*(x) should be real, \
I will take the modulus if not)').pack(anchor='w')
#N
Label(self, text='Number of points randomly generated (N)', \
font=('Times', -20, 'bold')).pack()
self.NInput=Entry(self, width=25)
self.NInput.pack(anchor='w')
self.NInput.insert(0, '10000')
#func
Label(self, text='Complex function to graph ( f(z) )', \
font=('Times', -20, 'bold')).pack()
self.funcInput=Entry(self, width=40)
self.funcInput.pack(anchor='w')
self.funcInput.insert(0, '-1j-2*z')
Label(self, text='Functions that you can use are listed here:', \
justify='left').pack(anchor='w')
url="https://docs.python.org/3.5/library/cmath.html"
Button(self, text='Mathematical functions for complex numbers', \
command=lambda e=None: webbrowser.open_new(url)).pack(anchor='w')
#info label
self.info=Label(self, text=">>> click 'GRAPH !' to draw the graph <<<",\
font=('Times', -20, 'bold'), fg='grey')
self.info.pack(pady=10)
#validation button
Button(self, text='GARPH !', bg='green', \
command=self.graph).pack(pady=10)
def graph(self):
if self.updateVals():
#creating the list of points
listz=createRandomFromPath(self.x0, self.x1, \
self.f1, self.N)
#plot function
myPlot(self.f, listz)
def updateVals(self, event=None):
#x0
try:
self.x0=complex(self.x0Input.get())
except:
self.info.config(text="Invalid x0", fg='red')
else:
#x1
try:
self.x1=complex(self.x1Input.get())
except:
self.info.config(text="Invalid x1", fg='red')
else:
#N
try:
self.N=int(self.NInput.get())
except:
self.info.config(text="Invalid N", fg='red')
else:
#func*
try:
self.f1=lambda x: eval(self.func1Input.get())
except:
self.info.config(text="Invalid function f*", fg='red')
else:
#func
try:
self.f=lambda z: eval(self.funcInput.get())
except:
self.info.config(text="Invalid function f", \
fg='red')
else:
self.info.config(text="Everything is valid", \
fg='green')
return True
return False
if __name__=='__main__':
root=Tk()
Path(root).pack(anchor='w', padx=5, pady=5)
root.mainloop()
|
from schematics.types import StringType, IntType, IPv4Type
from model import SbbModel
class IpAddress(SbbModel):
ipaddress = IPv4Type(primary_key=True)
status = IntType()
#history = Map(Integer, Integer)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from controllers.baseengine import RoundGameEngine, readInput
from controllers.statsengine import StatsEngine, ParticularStatsEngine
from controllers.db import db
class Phase10Engine(RoundGameEngine):
def __init__(self):
super(Phase10Engine, self).__init__()
self.game = "Phase10"
def getPhases(self):
cur = db.execute(
"Select key,value from GameExtras "
"where Game_name='{}' and key like 'Phase %' "
"order by key asc".format(self.game))
return [row['value'] for row in cur]
def getRemainingPhasesFromPlayer(self, player):
remaining = list(range(1, 11))
if (player in self.match.phasesCleared):
for phase in self.match.phasesCleared[player]:
remaining.remove(phase)
return remaining
def getCompletedPhasesFromPlayer(self, player):
if (player in self.match.phasesCleared):
return self.match.phasesCleared[player]
else:
return list()
def hasPhaseCompleted(self, player, phase):
if phase in self.getCompletedPhasesFromPlayer(player):
return True
else:
return False
def hasPhaseRemaining(self, player, phase):
if phase in self.getRemainingPhasesFromPlayer(player):
return True
else:
return False
def printExtraPlayerStats(self, player):
print("Phases completed: {}".format(
self.getCompletedPhasesFromPlayer(player)))
def printExtraStats(self):
print("Phases:")
print("====================")
for n, phase in enumerate(self.getPhases(), start=1):
print(" Phase {0:02}: {1}".format(n, phase))
print("====================")
print(" Quick desc: s=set, r=run, c=colour, cr=colour run")
print(" Example: 2s4 = 2 sets of 4 cards")
def runStubRoundPlayer(self, player, winner):
score = 0
cleared = 1
if self.getPhasesInOrderFlag():
try:
a_phase = self.getCompletedPhasesFromPlayer(player)[-1] + 1
except IndexError:
a_phase = 1
else:
a_phase = readInput("{} aimed phase number: ".format(player), int,
lambda x: x > 0 and self.hasPhaseRemaining(
player, x),
"Sorry, phase not valid or already completed.")
if not winner == player:
score = readInput("{} round score: ".format(
player), int, lambda x: x > 0, "Sorry, invalid score number.")
if (score >= 50):
cleared = readInput(
"Did {} complete phase {}?[1/0]: ".format(player, a_phase),
int, lambda x: x in [0, 1])
self.addRoundInfo(
player, score, {'aimedPhase': a_phase, 'isCompleted': cleared})
def extraStubConfig(self):
pio = readInput(
"Follow phases in order? [1/0]: ", int, lambda x: x in (0, 1))
self.setPhasesInOrderFlag(bool(pio))
def getPhasesInOrderFlag(self): return self.match.getPhasesInOrderFlag()
def setPhasesInOrderFlag(self, flag): self.match.setPhasesInOrderFlag(flag)
class Phase10MasterEngine(Phase10Engine):
def __init__(self):
super(Phase10MasterEngine, self).__init__()
self.game = "Phase10Master"
if __name__ == "__main__":
game = readInput('Game to play (Phase10/Phase10Master): ',
str, lambda x: x in ['Phase10', 'Phase10Master'])
if game == 'Phase10':
pe = Phase10Engine()
else:
pe = Phase10MasterEngine()
pe.gameStub()
class Phase10StatsQueries(object):
worst_phases = """
SELECT game, nick, min(pc) AS min_phases from (
SELECT Match.Game_name as game,Match.idMatch AS match,
rs.nick AS nick ,count(value) AS pc
FROM RoundStatistics AS rs,Match
WHERE rs.idMatch = Match.idMatch
AND key = "PhaseCompleted"
AND value <> 0
AND state = 1
GROUP BY game, Match.idMatch, rs.nick
) AS temp
GROUP BY game, nick
"""
damned_phases = """
SELECT Game_name AS game, nick AS player, value AS phase,
COUNT(value) AS times
FROM Match,RoundStatistics
WHERE
Match.idMatch = RoundStatistics.idMatch
AND key="PhaseAimed"
GROUP BY game, player, phase
ORDER BY game, player, phase
"""
class Phase10StatsEngine(StatsEngine):
def __init__(self):
super(Phase10StatsEngine, self).__init__()
q = Phase10StatsQueries()
self._worst_phases = q.worst_phases
self._damned_phases = q.damned_phases
def update(self):
super(Phase10StatsEngine, self).update()
self.wphases = db.queryDict(self._worst_phases)
for row in self.wphases:
game = row['game']
player = row['nick']
for r2 in self.generalplayerstats:
if r2['nick'] == player and r2['game'] == game:
r2['min_phases'] = row['min_phases']
break
rows = db.queryDict(self._damned_phases)
attempts = {}
for row in rows:
if row['game'] not in attempts:
attempts[row['game']] = {}
if row['player'] not in attempts[row['game']]:
attempts[row['game']][row['player']] = [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
attempts[row['game']][row['player']
][int(row['phase'])-1] = row['times']
for row in self.generalplayerstats:
if row['game'] in attempts:
if row['nick'] in attempts[row['game']]:
times = attempts[row['game']][row['nick']]
max_times = max(times)
row['damned_phase'] = times.index(max_times) + 1
class Phase10ParticularStatsEngine(Phase10StatsEngine, ParticularStatsEngine):
def updatePlayers(self, players):
super(Phase10ParticularStatsEngine, self).updatePlayers(players)
if players:
q = Phase10StatsQueries()
self._worst_phases = q.worst_phases.replace(
'WHERE', "WHERE {} AND".format("Match." + self._newclause))
self._damned_phases = q.damned_phases.replace(
'WHERE', "WHERE {} AND".format("Match." + self._newclause))
|
#!/usr/bin/env python
import sys
sys.path.append(".")
import numpy
import geometry_msgs.msg
#from interactive_markers.interactive_marker_server import *
#from interactive_markers.menu_handler import *
import trajectory_msgs.msg
#import moveit_commander
import moveit_msgs.srv
import rospy
import sensor_msgs.msg
import sys
import tf
#from visualization_msgs.msg import InteractiveMarkerControl
#from visualization_msgs.msg import Marker
from sensor_msgs.msg import JointState
from copy import deepcopy
from threading import Thread, Lock
from obstacle_generator import ObstacleGenerator
from obstacle_generator import convert_to_message
import time
def convert_to_trans_message(T):
t = geometry_msgs.msg.Transform()
position = tf.transformations.translation_from_matrix(T)
orientation = tf.transformations.quaternion_from_matrix(T)
t.translation.x = position[0]
t.translation.y = position[1]
t.translation.z = position[2]
t.rotation.x = orientation[0]
t.rotation.y = orientation[1]
t.rotation.z = orientation[2]
t.rotation.w = orientation[3]
return t
def convert_from_message(msg):
R = tf.transformations.quaternion_matrix((msg.orientation.x,
msg.orientation.y,
msg.orientation.z,
msg.orientation.w))
T = tf.transformations.translation_matrix((msg.position.x,
msg.position.y,
msg.position.z))
return numpy.dot(T,R)
def is_same(matrix0, matrix1):
matrix0 = numpy.array(matrix0, dtype=numpy.float64, copy=True)
matrix0 /= matrix0[3, 3]
matrix1 = numpy.array(matrix1, dtype=numpy.float64, copy=True)
matrix1 /= matrix1[3, 3]
return numpy.allclose(matrix0, matrix1, 0, 2e-2)
class Grader(object):
def __init__(self, og):
self.og = og
self.mutex = Lock()
# Publisher to send commands
self.pub_command = rospy.Publisher("/motion_planning_goal", geometry_msgs.msg.Transform,
queue_size=1)
self.listener = tf.TransformListener()
# Subscribes to information about what the current joint values are.
rospy.Subscriber("/joint_states", sensor_msgs.msg.JointState,
self.joint_states_callback)
# Subscriber to slighly reduce obstacle size once trajectory is sent
rospy.Subscriber("/joint_trajectory", trajectory_msgs.msg.JointTrajectory,
self.execute_motion_cb)
# Publisher to set robot position
self.pub_reset = rospy.Publisher("/joint_command", JointState, queue_size=1)
rospy.sleep(0.5)
# Wait for validity check service
rospy.wait_for_service("check_state_validity")
self.state_valid_service = rospy.ServiceProxy('check_state_validity',
moveit_msgs.srv.GetStateValidity)
self.reset_robot()
def joint_states_callback(self, joint_state):
self.mutex.acquire()
self.joint_state = joint_state
self.mutex.release()
def execute_motion_cb(self, data):
self.og.remove_planning_obs()
def check_validity(self, joint_state):
req = moveit_msgs.srv.GetStateValidityRequest()
req.group_name = "lwr_arm"
req.robot_state = moveit_msgs.msg.RobotState()
req.robot_state.joint_state = joint_state
res = self.state_valid_service(req)
return res.valid
#Resets the robot to a known pose
def reset_robot(self):
cmd = JointState()
#cmd.position.append(0.35)
#cmd.position.append(2.04)
#cmd.position.append(-1.35)
#cmd.position.append(1.03)
#cmd.position.append(-0.53)
#cmd.position.append(1.34)
#cmd.position.append(1.64)
cmd.position.append(-1.33)
cmd.position.append(-0.3)
cmd.position.append(3)
cmd.position.append(-1)
cmd.position.append(1.6)
cmd.position.append(-1.6)
cmd.position.append(-1.75)
self.pub_reset.publish(cmd)
rospy.sleep(1.0)
def goto_pose(self, name, T, timeout, points):
self.pub_command.publish(convert_to_trans_message(T))
print 'Goal published'
start_time = time.time()
done = False
while not done and not rospy.is_shutdown():
self.mutex.acquire()
last_joint_state = deepcopy(self.joint_state)
self.mutex.release()
if not self.check_validity(last_joint_state):
print name + ": COLLISION: 0"
break
try:
(trans,rot) = self.listener.lookupTransform('world_link','lwr_arm_7_link',
rospy.Time(0))
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
print "TF Exception!"
continue
TR = numpy.dot(tf.transformations.translation_matrix(trans),
tf.transformations.quaternion_matrix(rot))
if (is_same(T, TR)):
print name + ": PASSED: ", points
return points
done = True
if (time.time() - start_time > timeout) :
print name + ": Robot took too long to reach desired pose. Grader timed out"
return 0
done = True
else:
rospy.sleep(0.05)
if __name__ == '__main__':
rospy.init_node('mp_grader', anonymous=True)
og = ObstacleGenerator()
grade = 0
extra_credit = 0
g = Grader(og)
rospy.sleep(1.0)
og.no_obs()
rospy.sleep(0.5)
Ttrans = tf.transformations.translation_matrix((0.5,0.0,0.5))
Rtrans = tf.transformations.rotation_matrix(0,(0,0,1))
#Ttrans = tf.transformations.translation_matrix((0.6,0.5,0.6))
#Rtrans = tf.transformations.rotation_matrix(1.57,(0,1,0))
T = numpy.dot(Ttrans,Rtrans)
grade_none = g.goto_pose("No Obstacle", T, 15, 2)
og.simple_obs()
rospy.sleep(0.5)
Ttrans = tf.transformations.translation_matrix((0.45,0.45,0.35))
Rtrans = tf.transformations.rotation_matrix(1.57,(0,1,0))
#Ttrans = tf.transformations.translation_matrix((0.5,-0.03,0.24))
#Rtrans = tf.transformations.rotation_matrix(1.57,(0,1,0))
T = numpy.dot(Ttrans,Rtrans)
grade_simple = g.goto_pose("Simple Obstacle", T, 60, 4)
og.complex_obs()
rospy.sleep(0.5)
if grade_simple !=0:
Ttrans = tf.transformations.translation_matrix((0.51,0.04,0.55))
Rtrans = tf.transformations.rotation_matrix(0,(0,1,0))
else:
Ttrans = tf.transformations.translation_matrix((0.4,0.5,0.3))
Rtrans = tf.transformations.rotation_matrix(1.57,(0,1,0))
T = numpy.dot(Ttrans,Rtrans)
grade_hard = g.goto_pose("Hard Obstacle", T, 150, 4)
g.reset_robot()
og.super_obs()
rospy.sleep(0.5)
#Ttrans = tf.transformations.translation_matrix((0.56,0.0,0.56))
#Rtrans = tf.transformations.rotation_matrix(1.57,(0,1,0))
#Ttrans = tf.transformations.translation_matrix((0.478,0.018,0.551))
#Rtrans = tf.transformations.rotation_matrix(-2.56699,(0,-1,0))
Ttrans = tf.transformations.translation_matrix((0.443729966879, 0.0396669730544 ,0.548577845097))
Rtrans = tf.transformations.rotation_matrix(-2.3612393753033585,(-0.08182223, -0.97748883, 0.19447547))
T = numpy.dot(Ttrans,Rtrans)
extra_credit = g.goto_pose("Super Hard Obstacle", T, 260, 1)
grade = grade_none + grade_simple + grade_hard
print "Grade: ", grade
print "Extra credit: ", extra_credit
|
""" Classes and functions for adjusting strain data.
"""
# Copyright (C) 2015 Ben Lackey, Christopher M. Biwer
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import numpy
import pycbc
import scipy
class Calibration:
""" Class for adjusting time-varying calibration parameters of strain data.
"""
def __init__(self, freq=None, fc0=None, invc0=None, c0=None, d0=None,
a_tst0=None, a_pu0=None):
""" Initialize the class with the transfer functions for a given epoch
that starts at time t0.
Parameters
----------
freq : array
The frquencies corresponding to the values of c0, d0, a0 in Hertz.
fc0 : float
Coupled-cavity (CC) pole at time t0 when c0=c(t0) and a0=a(t0)
are measured.
invc0 : array
Initial inverse sensing function 1/c0 at t0 for the frequencies.
Supply either invC0 or C0.
c0 : array
Initial sensing function at t0 for the frequencies.
Supply either invc0 or c0.
d0 : array
Digital filter for the frequencies.
a_tst0 : array
Initial actuation function for the test mass at t0 for the
frequencies.
a_pu0 : array
Initial actuation function at the penultimate mass stage at t0 for
the frequencies.
"""
# cast frequencies to real numbers
self.freq = numpy.real(freq)
# set initial sensing function and its inverse
if invc0 is not None:
self.invc0 = invc0
self.c0 = 1.0 / invc0
if c0 is not None:
self.invc0 = 1.0 / c0
self.c0 = c0
# set the other initial transfer functions
self.d0 = d0
self.a_tst0 = a_tst0
self.a_pu0 = a_pu0
self.fc0 = fc0
# calculate initial open loop gain
self.g0 = self.c0 * self.d0 * (self.a_tst0 + self.a_pu0)
# calculate initial response function
self.r0 = (1.0 + self.g0) / self.c0
# calculate the residual of c0 after factoring out the CC pole fc0
# if using the new calibration convention where fc0 is defined
if fc0 is not None:
self.c_res = self.c0 * (1 + 1.0j * self.freq / fc0)
def update_c(self, delta_fc=0.0, kappa_c=1.0):
""" Calculate the sensing function c(f,t) given the new parameters
kappa_c(t), kappa_a(t), and \Delta f_c(t) = f_c(t) - f_c(t_0).
Parameters
----------
delta_fc : float
Change in coupled-cavity (CC) pole at a time t:
\Delta f_c(t) = f_c(t) - f_c(t_0).
kappa_c : float
Scalar correction factor for sensing function at time t.
Returns
-------
c : numpy.array
The new sensing function c(f,t).
"""
fc = self.fc0 + delta_fc
return self.c_res * kappa_c / (1 + 1.0j * self.freq / fc)
def update_g(self, delta_fc=0.0, kappa_c=1.0,
kappa_tst_re=1.0, kappa_tst_im=0.0,
kappa_pu_re=1.0, kappa_pu_im=0.0):
""" Calculate the open loop gain g(f,t) given the new parameters
kappa_c(t), kappa_a(t), and \Delta f_c(t) = f_c(t) - f_c(t_0).
Parameters
----------
delta_fc : float
Change in coupled-cavity (CC) pole at a time t:
\Delta f_c(t) = f_c(t) - f_c(t_0).
kappa_c : float
Scalar correction factor for sensing function c at time t.
kappa_tst_re : float
Real part of scalar correction factor for actuation function
a_tst0 at time t.
kappa_pu_re : float
Real part of scalar correction factor for actuation function
a_pu0 at time t.
kappa_tst_im : float
Imaginary part of scalar correction factor for actuation function
a_tst0 at time t.
kappa_pu_im : float
Imaginary part of scalar correction factor for actuation function
a_pu0 at time t.
Returns
-------
g : numpy.array
The new open loop gain g(f,t).
"""
c = self.update_c(delta_fc=delta_fc, kappa_c=kappa_c)
a_tst = self.a_tst0 * (kappa_tst_re + 1.0j * kappa_tst_im)
a_pu = self.a_pu0 * (kappa_pu_re + 1.0j * kappa_pu_im)
return c * self.d0 * (a_tst + a_pu)
def update_r(self, delta_fc=0.0, kappa_c=1.0,
kappa_tst_re=1.0, kappa_tst_im=0.0,
kappa_pu_re=1.0, kappa_pu_im=0.0):
""" Calculate the response function R(f,t) given the new parameters
kappa_c(t), kappa_a(t), and \Delta f_c(t) = f_c(t) - f_c(t_0).
Parameters
----------
delta_fc : float
Change in coupled-cavity (CC) pole at a time t:
\Delta f_c(t) = f_c(t) - f_c(t_0).
kappa_c : float
Scalar correction factor for sensing function c at time t.
kappa_tst_re : float
Real part of scalar correction factor for actuation function
a_tst0 at time t.
kappa_pu_re : float
Real part of scalar correction factor for actuation function
a_pu0 at time t.
kappa_tst_im : float
Imaginary part of scalar correction factor for actuation function
a_tst0 at time t.
kappa_pu_im : float
Imaginary part of scalar correction factor for actuation function
a_pu0 at time t.
Returns
-------
r : numpy.array
The new response function r(f,t).
"""
c = self.update_c(delta_fc=delta_fc, kappa_c=kappa_c)
g = self.update_g(delta_fc=delta_fc, kappa_c=kappa_c,
kappa_tst_re=kappa_tst_re, kappa_tst_im=kappa_tst_im,
kappa_pu_re=kappa_pu_re, kappa_pu_im=kappa_pu_im)
return (1.0 + g) / c
def adjust_strain(self, strain, delta_fc=0.0, kappa_c=1.0,
kappa_tst_re=1.0, kappa_tst_im=0.0,
kappa_pu_re=1.0, kappa_pu_im=0.0):
"""Adjust the TimeSeries strain by changing the time-dependent
calibration parameters kappa_c(t), kappa_a(t), and
\Delta f_c(t) = f_c(t) - f_c(t_0).
Parameters
----------
strain : TimeSeries
The strain that you want to adjust.
delta_fc : float
Change in coupled-cavity (CC) pole at a time t:
\Delta f_c(t) = f_c(t) - f_c(t_0)
kappa_c : float
Scalar correction factor for sensing function c0 at time t.
kappa_tst_re : float
Real part of scalar correction factor for actuation function
A_{tst0} at time t.
kappa_tst_im : float
Imaginary part of scalar correction factor for actuation function
A_tst0 at time t.
kappa_pu_re : float
Real part of scalar correction factor for actuation function
A_{pu0} at time t.
kappa_pu_im : float
Imaginary part of scalar correction factor for actuation function
A_{pu0} at time t.
Returns
-------
strain_adjusted : TimeSeries
The adjusted strain.
"""
# convert time series to frequency domain
strain_tilde = strain.to_frequencyseries()
# get the "true" and "adjusted" transfer functions
r_true = self.r0
r_adjusted = self.update_r(delta_fc=delta_fc, kappa_c=kappa_c,
kappa_tst_re=kappa_tst_re,
kappa_tst_im=kappa_tst_im,
kappa_pu_re=kappa_pu_re,
kappa_pu_im=kappa_pu_im)
# get the error function to apply to the strain in the frequency-domain
k = r_adjusted / r_true
# decompose into amplitude and unwrapped phase
k_amp = numpy.abs(k)
k_phase = numpy.unwrap(numpy.angle(k))
# convert to a FrequencySeries by interpolating then resampling
order = 1
k_amp_off = scipy.interpolate.UnivariateSpline(self.freq, k_amp,
k=order, s=0)
k_phase_off = scipy.interpolate.UnivariateSpline(self.freq, k_phase,
k=order, s=0)
# interpolation/vector operations are much faster if you cast
# FrequencySeries to numpy.array
freq_even = strain_tilde.sample_frequencies.numpy()
k_even_sample = k_amp_off(freq_even) * \
numpy.exp(1.0j * k_phase_off(freq_even))
strain_tilde_adjusted = pycbc.types.FrequencySeries(
strain_tilde.numpy() * k_even_sample,
delta_f=strain_tilde.delta_f)
# IFFT to get time series
strain_adjusted = strain_tilde_adjusted.to_timeseries()
strain_adjusted.start_time = strain.start_time
return strain_adjusted
|
"""Implementation of Bayesian Quadrature."""
class BayesianQuadrature:
"""Bayesian quadrature.
Bayesian quadrature methods build a model for the integrand via function
evaluations and return a belief over the value of the integral on a given
domain with respect to the specified measure.
Parameters
----------
fun0
Stochastic process modelling function to be integrated.
"""
def __init__(self, fun0):
self.fun0 = fun0
def integrate(self, fun, domain, measure, nevals):
"""Integrate the function ``fun``.
Parameters
----------
fun :
Function to be integrated.
domain :
Domain to integrate over.
measure :
Measure to integrate against.
nevals :
Number of function evaluations.
Returns
-------
"""
raise NotImplementedError
|
from detectron2.engine import DefaultTrainer
from detectron2.evaluation import COCOEvaluator
from detectron2 import model_zoo
from utils import get_my_cfg
from visdrone import register_one_set
import os
if __name__ == '__main__':
train_dataset = "VisDrone2019-DET-train"
val_dataset = "VisDrone2019-DET-val"
register_one_set(train_dataset)
register_one_set(val_dataset)
cfg = get_my_cfg()
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-Detection/faster_rcnn_R_50_FPN_1x.yaml")
cfg.DATASETS.TRAIN = (train_dataset, )
cfg.DATASETS.TEST = (val_dataset, )
os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
trainer = DefaultTrainer(cfg)
trainer.resume_or_load(resume=False)
trainer.train()
evaluator = COCOEvaluator(val_dataset, ("bbox",), False, output_dir=os.path.join("output", "evaluate"))
trainer.test(cfg, trainer.model, evaluator) |
""" This set of functions are various random things needed somewhere in the project. """
import numpy as np
from adcsim import transformations as tr
def random_dcm():
"""
This function generates a random DCM.
method: generate a random vector and angle of rotation (PRV attitude coordinates) then calculate the corresponding
DCM
:return: random DCM
"""
e = 2*np.random.random(3) - 1
e = e/np.linalg.norm(e) # random unit vector
r = np.pi*np.random.random() # random angle between 0 and 180 (-180 to 180 would also be fine?)
return tr.prv_to_dcm(r, e)
def cross_product_operator(vec):
"""
Takes in a vector and outputs its 'cross product operator'.
See Part1/3_Directional-Cosine-Matrix-_DCM_.pdf page 14
:param vec: any 3D vector
:return: 3x3 cross product operator
"""
return np.array([[0, -vec[2], vec[1]], [vec[2], 0, -vec[0]], [-vec[1], vec[0], 0]])
def align_z_to_nadir(pos_vec):
"""
This function takes a position vector and outputs one of the infinite amount of DCM's that represent the
translation from the inertial frame to the body frame, such that the body frames z-axis is perfectly aligned
with the position vector input.
:param pos_vec: position vector from spg4
:return: DCM matrix
"""
p = pos_vec
p = p / np.linalg.norm(p)
r = np.random.random(3)
r = r / np.linalg.norm(r)
t1 = p
t2 = cross_product_operator(p) @ r
t2 = t2 / np.linalg.norm(t2)
t3 = cross_product_operator(t1) @ t2
return np.array([t2, t3, t1])
def initial_align_gravity_stabilization(pos_vec, vel_vec):
"""
This function takes a position and velocity vector and outputs the DCM that represents the translation from the
inertial frame to the body frame, such that the body frames z-axis aligns with the position vector and the
body frames y-axis aligns with the cross track (i.e. the direction perpendicular to nadir and the velocity track)
:param pos_vec: position vector from spg4
:param vel_vec: velocity vector from spg4
:return: DCM matrix
"""
p = pos_vec
p = p / np.linalg.norm(p)
v = vel_vec
v = v / np.linalg.norm(v)
t1 = cross_product_operator(p) @ v
t1 = t1/np.linalg.norm(t1)
v_corrected = cross_product_operator(t1) @ p
dcm = np.array([v_corrected, t1, p])
return dcm
def inertial_to_orbit_frame(pos_vec, vel_vec):
"""
This function calculates the DCM matrix that translates the inertial frame to the orbit frame.
The orbit frame has one axis pointing straight nadir, one axis perpendicular to this as well as the velocity
direction, and the last axis completes the coordinate system. For a perfectly circular orbit, the last axis is
the same direction as the velocity vector.
:param pos_vec: position vector from spg4
:param vel_vec: velocity vector from spg4
:return: DCM matrix
"""
p = pos_vec
p = p / np.linalg.norm(p)
v = vel_vec
v = v / np.linalg.norm(v)
t1 = cross_product_operator(v) @ p
t1 = t1/np.linalg.norm(t1)
v_corrected = cross_product_operator(p) @ t1
dcm = np.array([v_corrected, t1, -p])
return dcm
|
import qsim.evolution.hamiltonian
from qsim.graph_algorithms.graph import line_graph, ring_graph
from qsim.tools import tools
import numpy as np
from scipy import sparse
from scipy.sparse.linalg import expm_multiply, eigsh, expm
from scipy.special import comb
from scipy.special import jv, iv
from qsim.codes.quantum_state import State
from scipy.fft import fft
import matplotlib.pyplot as plt
import dill
from os import path
def disorder_hamiltonian(states, h=1., subspace=None):
if subspace is None:
disorder = (np.random.random(size=states.shape[1]) - 1 / 2) * h
return sparse.csc_matrix(
(np.sum((1 / 2 - states) * 2 * disorder, axis=1), (np.arange(states.shape[0]),
np.arange(states.shape[0]))),
shape=(states.shape[0], states.shape[0]))
if subspace is 'all':
return (np.random.random(size=states) - 1 / 2) * h
def matvec_heisenberg(heisenberg: qsim.evolution.hamiltonian.HamiltonianHeisenberg, disorder, state: State):
temp = np.zeros_like(state)
# For each logical qubit
state_shape = state.shape
for i in range(state.number_logical_qudits):
ind = 2 ** i
out = np.zeros_like(state, dtype=np.complex128)
state = state.reshape((-1, 2, ind), order='F')
# Note index start from the right (sN,...,s3,s2,s1)
out = out.reshape((-1, 2, ind), order='F')
out[:, [0, 1], :] = state[:, [0, 1], :]
out[:, 1, :] = -disorder[i] * out[:, 1, :]
out[:, 0, :] = disorder[i] * out[:, 0, :]
state = state.reshape(state_shape, order='F')
out = out.reshape(state_shape, order='F')
temp = temp + out
return heisenberg.left_multiply(state) + temp
def matvec_heisenberg_imag(heisenberg: qsim.evolution.hamiltonian.HamiltonianHeisenberg, disorder, state: State):
temp = np.zeros_like(state)
# For each logical qubit
state_shape = state.shape
for i in range(state.number_logical_qudits):
ind = 2 ** i
out = np.zeros_like(state, dtype=np.complex128)
state = state.reshape((-1, 2, ind), order='F')
# Note index start from the right (sN,...,s3,s2,s1)
out = out.reshape((-1, 2, ind), order='F')
out[:, [0, 1], :] = state[:, [0, 1], :]
out[:, 1, :] = -disorder[i] * out[:, 1, :]
out[:, 0, :] = disorder[i] * out[:, 0, :]
state = state.reshape(state_shape, order='F')
out = out.reshape(state_shape, order='F')
temp = temp + out
return -1j * (heisenberg.left_multiply(state) + temp)
def matvec(A, v):
"""
for A a sparse matrix or LinearOperator
"""
if callable(A):
return A(v)
else:
return A.dot(v)
def chebyshev(A, v, tau, eps, p_check, verbose=False):
def max_eig_estimate(A):
# This is the infinity-norm. Lanczos method probably more accurate but slower
eigval = eigsh(A, k=1, which='LA', return_eigenvectors=False)
return eigval[0]
def min_eig_estimate(A):
eigval, eigvec = eigsh(A, k=1, which='SA')
return eigval[0], eigvec
# eps is (relative?) error
# p is frequency of checking convergence
norm = np.linalg.norm(v)
# First estimate spectral range
lambda_1, v_1 = min_eig_estimate(1j * A)
c1 = np.dot(v, v_1)
lambda_n = max_eig_estimate(1j * A)
l1 = (lambda_n - lambda_1) / 2
ln = (lambda_n + lambda_1) / 2
def truncation_m(k0, eps, s_norm, v):
# used to set the actual truncation point of the cheb iteration
m = k0 + 1
while True:
if np.sum(np.abs(chebs_upper[m:])) * norm / s_norm < eps:
return m
else:
m += 1
def chebs(m, l1, ln, tau):
# first m chebyshev coefficients
# first find the needed Bessel function values by backwards recursion
bessels = np.zeros(m + 1, dtype=np.complex128)
bessels[m] = bessel_asymptotic(m, tau * l1)
bessels[m - 1] = bessel_asymptotic(m - 1, tau * l1)
for i in range(m - 2, -1, -1):
print(i)
bessels[i] = bessel_asymptotic(i, tau * l1)
# bessels[i] = 2*(i+1) / (-1j*tau * l1) * bessels[i + 1] + bessels[i + 2]
# Now convert to Chebyshev coefficient with exponential prefactor:
bessels *= 2 * np.exp(tau * ln)
bessels[0] *= 0.5
return bessels
def bessel_asymptotic(nu, z):
return iv(nu, z)
def bessel_approximate(nu, z, order=5):
# from Abramowitz and Stegun, 9.7.
# What is a good order to choose?
mu = 4 * nu ** 2
def term(n):
tmp = 1
for i in range(n):
tmp *= mu - (2 * i + 1) ** 2
return (-1) ** n * tmp / (np.math.factorial(n) * (8 * z) ** n)
subleading = 0
for n in range(order):
subleading += term(n)
return np.exp(z) / (2 * np.pi * z) ** 0.5 * subleading
def m_max(eps, tau, lambda_1, lambda_n):
bound = eps * np.abs(c1) / (4 * norm)
if ((0.5) ** (np.exp(1) * tau * (lambda_n - lambda_1) / 2) < bound):
return int(np.exp(1) * tau * (lambda_n - lambda_1) / 2)
else:
return int(np.log(1 / bound) / np.log(2))
# minimum m satisfying E(m) < bound
# u1 is the output min eigvec of the RQ minimization function
m = 0
bound = eps * np.abs(c1) / (4 * norm)
while True:
if E(m, tau, lambda_1, lambda_n) < bound:
return m
else:
m += 1
def E(m, tau, lambda_1, lambda_n):
b = 0.618
d = 0.438 # Pull more accurate values later
rho = lambda_n - lambda_1
if m > tau * rho:
return d ** m / (1 - d)
else:
return np.exp(-b * (m + 1) ** 2 / (tau * rho)) * (1 + np.sqrt(np.pi * tau * rho / (4 * b))) + \
d ** (tau * rho) / (1 - d)
# Compute conservative upper bound for the number of terms needed
if verbose:
print('beginning')
m_upper = m_max(eps, tau, lambda_1, lambda_n)
if verbose:
print('m_upper:{}'.format(m_upper))
# A_k in the expansion
print(chebs(5, 0.8, 2.0, -1j * 1.2))
raise Exception
chebs_upper = chebs(m_upper, l1, ln, -1j * tau)
# print(chebs_upper, [bessel_asymptotic(m, -tau*l1)*2 * np.exp(-tau * ln) for m in range(0, m_upper+1)])
# print(np.sum(chebs(1000, l1, ln, tau))-np.sum(chebs_upper), 2*norm*np.exp(-tau*lambda_1)*E(m_upper, tau, lambda_1, lambda_n))
if verbose:
print('chebs:')
print(chebs_upper)
# initialize vectors etc for cheb iteration
v_old = v
v_new = matvec(A, v) / l1 - ln / l1 * v
s_current = chebs_upper[1] * v_new + chebs_upper[0] * v_old
s_norms = [np.linalg.norm(chebs_upper[0] * v), np.linalg.norm(s_current)]
exit_test = False
norm_test = False
k = 1
while not exit_test:
v_old = 2 * (matvec(A, v_new) / l1 - ln / l1 * v_new) - v_old
s_current = s_current + chebs_upper[k + 1] * v_old
s_norms.append(np.linalg.norm(s_current))
if k % p_check == 0 and not norm_test:
r = np.linalg.norm(s_current) / s_norms[k + 1 - p_check]
if np.abs(r - 1) < 0.1:
k0 = k + 1
norm_test = True
m_upper = truncation_m(k0, eps, s_norms[k + 1 - p_check], v)
if verbose:
print('r={}, m_upper_new={}'.format(r, m_upper))
print('k={}'.format(k))
k += 1
exit_test = (k == m_upper)
v_new, v_old = v_old, v_new
return s_current
def chebs_real(m, l1, ln, tau):
# first m chebyshev coefficients
# first find the needed Bessel function values by backwards recursion
bessels = np.zeros(m + 1, dtype=np.complex128)
bessels[m] = iv(m, tau * l1)
bessels[m - 1] = iv(m - 1, tau * l1)
for i in range(m - 2, -1, -1):
bessels[i] = iv(i, tau * l1)
# bessels[i] = 2*(i+1) / (-1j*tau * l1) * bessels[i + 1] + bessels[i + 2]
# Now convert to Chebyshev coefficient with exponential prefactor:
bessels *= 2 * np.exp(-tau * ln)
bessels[0] *= 0.5
return bessels
def chebs_imag(m, l1, ln, tau):
# first m chebyshev coefficients
# first find the needed Bessel function values by backwards recursion
bessels = np.zeros(m + 1, dtype=np.complex128)
bessels[m] = jv(m, tau * l1) * (1j) ** (-m)
bessels[m - 1] = jv(m - 1, tau * l1) * (1j) ** (-(m - 1))
for k in range(m - 2, -1, -1):
# bessels[k] = (1j)**(-k)*jv(k, tau * l1)
bessels[k] = 2 * (k + 1) / (-1j * tau * l1) * bessels[k + 1] + bessels[k + 2]
# Now convert to Chebyshev coefficient with exponential prefactor:
bessels *= 2 * np.exp(1j * tau * ln)
bessels[0] *= 0.5
return bessels
"""print(chebs_real(5, .8, 2., -1j*1.2))
print(chebs_imag(5, .8, 2., 1.2))
raise Exception
d = 10
v = np.random.random(d)
v = np.array(v/np.linalg.norm(v), dtype=np.complex128)
A = np.random.random((d, d))
A = -1j*(A+A.T)
tau = 1
correct = expm_multiply(A*tau, v)
result = chebyshev(A, v, tau, 1e-6, 7, verbose=True)
#print(correct)
#print(result)
#print(np.linalg.norm(result))
#print((np.abs(correct)-np.abs(result))/np.abs(correct))"""
"""
def generate_time_evolved_states(graph, times, verbose=False, h=1):
import dill
if verbose:
print('beginning')
heisenberg = qsim.evolution.hamiltonian.HamiltonianHeisenberg(graph, subspace=0, energies=(1/4, 1/2))
#print(heisenberg.hamiltonian.todense(), heisenberg.hamiltonian.shape)
dill.dump(heisenberg, open('heisenberg_'+str(graph.n)+'.pickle', 'wb'))
#heisenberg = dill.load(open('heisenberg_18.pickle', 'rb'))
dim = int(comb(graph.n, graph.n // 2))
hamiltonian = heisenberg.hamiltonian + disorder_hamiltonian(heisenberg.states, h=h)
print('hi', eigsh(hamiltonian, k=1, which='SA'))
raise Exception
#hamiltonian_exp = np.exp(hamiltonian.todense)
if verbose:
print('initialized Hamiltonian')
if isinstance(times, list):
states_list = []
states_exp_list = []
z_mag_list = []
z_mag_exp_list = []
for time in times:
n_times = len(time)
hamiltonian_exp = expm(-1j *hamiltonian * (time[1] - time[0]))
state = np.zeros((dim, 1))
state[-1, 0] = 1
states = np.zeros((dim, n_times), dtype=np.complex128)
states_exp = np.zeros((dim, n_times), dtype=np.complex128)
states[..., 0] = state.flatten()
states_exp[..., 0] = state.flatten()
for i in range(n_times - 1):
if verbose:
print(i)
states[..., i + 1] = expm_multiply(-1j * hamiltonian * (time[i + 1] - time[i]), states[..., i])
states_exp[..., i + 1] = hamiltonian_exp @ states_exp[...,i]
z_mag = ((np.abs(states) ** 2).T @ (heisenberg.states - 1 / 2) * 2).real/2
z_mag_exp = ((np.abs(states_exp) ** 2).T @ (heisenberg.states - 1 / 2) * 2).real / 2
print(z_mag)
states_list.append(states)
states_exp_list.append(states_exp)
z_mag_list.append(z_mag)
z_mag_exp_list.append(z_mag)
return states_list, states_exp_list, z_mag_list, z_mag_exp_list
else:
n_times = len(times)
#hamiltonian_exp = expm(-1j *hamiltonian * (times[1] - times[0]))
state = np.zeros((dim,1))
state[-1,0] = 1
states = np.zeros((dim, n_times), dtype=np.complex128)
states[...,0] = state.flatten()
#states_exp = np.zeros((dim, n_times), dtype=np.complex128)
states_cheb = np.zeros((dim, n_times), dtype=np.complex128)
states = np.zeros((dim, n_times), dtype=np.complex128)
states[..., 0] = state.flatten()
#states_exp[..., 0] = state.flatten()
states_cheb[..., 0] = state.flatten()
for i in range(n_times-1):
if verbose:
print(i)
states[...,i+1] = expm_multiply(-1j*hamiltonian*(times[i+1]-times[i]), states[...,i])
#states_exp[..., i + 1] = hamiltonian_exp @ states_exp[..., i]
states_cheb[..., i + 1] = chebyshev(-1j*hamiltonian, states_cheb[...,i], times[i+1]-times[i], 1e-6, 7)
z_mag = ((np.abs(states)**2).T @ (heisenberg.states-1/2)*2).real/2
#z_mag_exp = ((np.abs(states_exp) ** 2).T @ (heisenberg.states - 1 / 2) * 2).real/2
z_mag_cheb = ((np.abs(states_cheb) ** 2).T @ (heisenberg.states - 1 / 2) * 2).real / 2
return states, states_cheb, states_cheb, z_mag, z_mag_cheb, z_mag_cheb
def magnetization(graph, h=1):
t_final = 3
n_times = 10**t_final
n=graph.n
times = np.linspace(1, 10**t_final, n_times)
states, states_exp, states_cheb, z_mag, z_mag_exp, z_mag_cheb = generate_time_evolved_states(graph, times, verbose=True, h=h)
# Compute z magnetization
fig, ax = plt.subplots(2,graph.n//2, sharey=True)
for i in range(graph.n//2):
ax[0][i].semilogx(times, (z_mag[:,i].flatten()), label='Pade')
ax[1][i].semilogx(times, (z_mag[:, i+graph.n//2].flatten()))
ax[0][i].semilogx(times, (z_mag_cheb[:, i].flatten()), label='Chebyshev')
ax[1][i].semilogx(times, (z_mag_cheb[:, i + graph.n // 2].flatten()))
ax[0][i].text(-0.1, 1.05, '$x=$'+str(i), transform=ax[0][i].transAxes,
size=10, weight='bold')
ax[1][i].text(-0.1, 1.05, '$x=$' + str(i+graph.n//2), transform=ax[1][i].transAxes,
size=10, weight='bold')
ax[0][0].legend()
ax[0][0].set_ylabel(r'$\langle S_z(x)\rangle $')
ax[1][0].set_ylabel(r'$\langle S_z(x)\rangle $')
fig.text(0.5, 0.04, 'Time $(1/J)$', ha='center')
fig.suptitle(r'$J=J_z=1, h_{\mathrm{max}}=$'+str(h))
plt.show()
z_mag = np.abs(fft(z_mag, axis=1))
fig, ax = plt.subplots(2,graph.n//2, sharey=True)
for i in range(graph.n//2):
ax[0][i].semilogx(times, (z_mag[:, i].flatten()))
ax[1][i].semilogx(times, (z_mag[:, i + graph.n // 2].flatten()))
ax[0][i].text(-0.1, 1.05, '$k=$' + str(i), transform=ax[0][i].transAxes,
size=10, weight='bold')
ax[1][i].text(-0.1, 1.05, '$k=$' + str(i + graph.n // 2), transform=ax[1][i].transAxes,
size=10, weight='bold')
ax[0][0].set_ylabel(r'$\langle S_z(k)\rangle $')
ax[1][0].set_ylabel(r'$\langle S_z(k)\rangle $')
ax[1][graph.n//4].set_xlabel('Time $(1/J)$')
plt.show()
#generate_time_evolved_states(line_graph(8), np.linspace(0, 1000, 1), verbose=True)
n = 28
print(2**n)
import time
t0 = time.time()
print('beginning')
heisenberg = qsim.evolution.hamiltonian.HamiltonianHeisenberg(line_graph(n), subspace='all', energies=(1/4, 1/2))
disorder = disorder_hamiltonian(n, subspace='all', h=1.)
print(disorder)
state = State(np.random.random((2**n, 1)))
state = state/np.linalg.norm(state)
#state = State(np.ones((2**n, 1)))
#print(state)
state = matvec_heisenberg(heisenberg, disorder, state)
print(state)
print(time.time()-t0)
"""
def return_probability(graph, times, verbose=False, h=1, exact=True):
# For random product states in the computational basis, time evolve then compute
if verbose:
print('beginning')
czz_tot = np.zeros((len(times), graph.n))
num = 0
for _ in range(1):
for k in range(2 ** graph.n):
print(k)
# Compute the total magnetization
z_mags_init = 2 * (1 / 2 - tools.int_to_nary(k, size=graph.n))
if np.sum(z_mags_init) == 0:
num += 1
if verbose:
print(z_mags_init)
subspace = np.sum(z_mags_init)
if path.exists('heisenberg_' + str(graph.n) + '_' + str(subspace) + '.pickle'):
heisenberg = dill.load(open('heisenberg_' + str(graph.n) + '_' + str(subspace) + '.pickle', 'rb'))
else:
heisenberg = qsim.evolution.hamiltonian.HamiltonianHeisenberg(graph, subspace=subspace,
energies=(1 / 4, 1 / 2))
dill.dump(heisenberg, open('heisenberg_' + str(graph.n) + '_' + str(subspace) + '.pickle', 'wb'))
if verbose:
print('initialized Hamiltonian')
# For every computational basis state,
dim = int(comb(graph.n, int((graph.n + subspace) / 2)))
print(dim, subspace)
hamiltonian = heisenberg.hamiltonian + disorder_hamiltonian(heisenberg.states, h=h)
n_times = len(times)
# hamiltonian_exp = expm(-1j *hamiltonian * (times[1] - times[0]))
# print(2*(1-heisenberg.states-1/2),z_mags_init)
ind = np.argwhere(np.sum(np.abs(2 * (1 - heisenberg.states - 1 / 2) - z_mags_init), axis=1) == 0)[0, 0]
# print(ind)
state = np.zeros((dim, 1))
state[ind, 0] = 1
states = np.zeros((dim, n_times), dtype=np.complex128)
states[:, 0] = state.flatten()
for i in range(n_times - 1):
if verbose:
print(i)
states[..., i+1] = expm_multiply(-1j * hamiltonian * (times[i + 1] - times[i]), states[..., i])
z_mags = ((np.abs(states) ** 2).T @ (1 - heisenberg.states - 1 / 2) * 2).real
# print(z_mags_init, z_mags, z_mags*z_mags_init/4)
czz = z_mags * z_mags_init
czz_tot = czz_tot + czz
print(num)
return czz_tot / num
#print(10 ** np.linspace(0, 3, 5))
#print(np.mean(return_probability(line_graph(6), np.concatenate([[0], 10 ** np.linspace(0, 3, 10)]), h=1), axis=1))
|
import csv
import json
from 臺灣言語工具.解析整理.拆文分析器 import 拆文分析器
from 臺灣言語工具.音標系統.台語 import 新白話字
from 臺灣言語工具.音標系統.閩南語.臺灣閩南語羅馬字拼音 import 臺灣閩南語羅馬字拼音通行韻母表
def _main():
tsuanpooji = set()
tsuanpoojitiau = set()
for 字物件 in csvtsuliau():
tailo = 新白話字(字物件.型)
if (
tailo.音標 is not None and
tailo.韻 in 臺灣閩南語羅馬字拼音通行韻母表
):
tsuanpooji.add(字物件.看分詞().strip('0123456789'))
tsuanpoojitiau.add(字物件.看分詞().lstrip('01'))
with open('tsonghong.json', 'w') as tong:
json.dump(
{
'音節加調種類': len(tsuanpoojitiau),
'音節無調種類': len(tsuanpooji),
},
tong, ensure_ascii=False, sort_keys=False, indent=2
)
with open('tsuanpoojitiau.txt', 'w') as tong:
print('\n'.join(sorted(tsuanpoojitiau)), file=tong)
with open('tsuanpooji.txt', 'w') as tong:
print('\n'.join(sorted(tsuanpooji)), file=tong)
def csvtsuliau():
with open('docker.csv', encoding='utf-8') as 詞表檔:
for ho, 資料 in enumerate(csv.reader(詞表檔)):
if ho != 0:
yield from 拆文分析器.建立句物件(資料[1]).篩出字物件()
_main()
|
class Config:
lr = 1e-4
dropout = 0.2
qs_max_len = 20
qb_max_len = 95
ct_max_len = 150
char_max_len = 16
epochs = 50
batch_size = 30
char_dim = 15
l2_weight = 0
patience = 5
k_fold = 0
categories_num = 2
period = 50
need_punct = False
wipe_num = 0
word_trainable = False
concat_q = True
need_shuffle = True
use_char_level = True
load_best_model = False
model_dir = './models/CQAModel'
log_dir = './models/CQAModel'
glove_filename = 'word2vec_dim200_domain_specific.pkl'
train_list = []
dev_list = []
test_list = []
|
import os.path
def load_data(sys_name, file_index):
# 텍스트파일로 저장된
filename_list = os.listdir('../data/{0}'.format(sys_name))
if file_index >= len(filename_list):
return False
elif filename_list[file_index][-3:] != 'txt':
return False
else:
filename = filename_list[file_index]
filepath = '../data/{0}/'.format(sys_name) + filename
fd = open(filepath, 'r')
temp = fd.readlines()
data_raw = [float(e.strip()) for e in temp]
fd.close()
dim = int(data_raw[0])
n_particles = int(data_raw[1])
# mass_list = data_raw[2:2+n_particles]
if (len(data_raw) - 2) % (2 * dim): # 안 나누어떨어질 경우
print("invalid file length: file no{1} of {0}".format(sys_name, file_index))
return False
n_frames = (len(data_raw) - 2) // (2 * dim * n_particles)
data = []
for frame_index in range(n_frames):
datum = []
for particle_index in range(n_particles):
temp = []
start = 2 + frame_index*(dim*2*n_particles) + particle_index*(dim*2)
x = data_raw[start: start + dim]
v = data_raw[start + dim: start + dim*2]
temp += x
temp += v
datum.append(temp)
data.append(datum)
return data
def load_prediction(sys_name, file_index):
filename_list = os.listdir('../data_prediction/{0}'.format(sys_name))
if file_index >= len(filename_list):
return False
elif filename_list[file_index][-3:] != 'txt':
return False
else:
filename = filename_list[file_index]
filepath = '../data_prediction/{0}/'.format(sys_name) + filename
fd = open(filepath, 'r')
temp = fd.readlines()
data_raw = [float(e.strip()) for e in temp]
fd.close()
dim = int(data_raw[0])
n_particles = int(data_raw[1])
# mass_list = data_raw[2:2+n_particles]
if (len(data_raw) - 2) % (2 * dim): # 안 나누어떨어질 경우
print("invalid file length: file no{1} of {0}".format(sys_name, file_index))
return False
n_frames = (len(data_raw) - 2) // (2 * dim * n_particles)
data = []
for frame_index in range(n_frames):
datum = []
for particle_index in range(n_particles):
temp = []
start = 2 + frame_index*(dim*2*n_particles) + particle_index*(dim*2)
x = data_raw[start: start + dim]
v = data_raw[start + dim: start + dim*2]
temp += x
temp += v
datum.append(temp)
data.append(datum)
return data
def comp_data(sys_name, file_index, comp_rate):
# filename_list = os.listdir('../data/{0}'.format(sys_name))
filename_list = os.listdir('../data_comp/{0}/{1:06d}'.format(sys_name, comp_rate//2))
if file_index >= len(filename_list):
return False
elif filename_list[file_index][-3:] != 'txt':
return False
else:
filename = filename_list[file_index]
# filepath = '../data/{0}/'.format(sys_name) + filename
filepath = '../data_comp/{0}/{1:06d}/'.format(sys_name, comp_rate//2) + filename
fd = open(filepath, 'r')
temp = fd.readlines()
data_raw = [float(e.strip()) for e in temp]
fd.close()
dim = int(data_raw[0])
n_particles = int(data_raw[1])
# mass_list = data_raw[2:2 + n_particles]
if (len(data_raw) - 2) % (2 * dim): # 안 나누어떨어질 경우
print("invalid file length: file no{1} of {0}".format(sys_name, file_index))
return False
n_frames = (len(data_raw) - 2) // (2 * dim * n_particles)
data = []
frame_index = 0
while frame_index < n_frames:
datum = []
for particle_index in range(n_particles):
temp = []
start = 2 + frame_index * (dim * 2 * n_particles) + particle_index * (dim * 2)
x = data_raw[start: start + dim]
v = data_raw[start + dim: start + dim * 2]
temp += x
temp += v
datum.append(temp)
data.append(datum)
# frame_index += comp_rate
frame_index += 2
dir1 = '../data_comp/{0}'.format(sys_name)
dir2 = '../data_comp/{0}/{1:06d}'.format(sys_name, comp_rate)
if not os.path.exists(dir1):
os.makedirs(dir1)
if not os.path.exists(dir2):
os.makedirs(dir2)
filepath_comp = '../data_comp/{0}/{1:06d}/'.format(sys_name, comp_rate) + filename
str_write = ''
str_write += "{0}\n{1}\n".format(dim, n_particles)
# for mass in mass_list:
# str_write += str(mass) + '\n'
for datum_comp in data:
for ptl_state in datum_comp: # except its mass
for xsNvs in ptl_state:
str_write += str(xsNvs) + '\n'
fd_comp = open(filepath_comp, 'w')
fd_comp.write(str_write)
fd_comp.close()
return True
# 한 번의 상황으로 여러 개의 학습 데이터를 만들 수 있다.?
def make_batch(states):
"""
:param states:
torch.tensor(
[
[state1], [state2], [state3], ...
]
)
:return:
"""
n_states = len(states)
input_batch = []
target_batch = []
for i, state in enumerate(states):
if i <= 1:
continue
# 첫 번째 입자에 대한 상대위치를 적자
prev_state = states[i-1]
pre_prev_state = states[i-2]
first_particle_position_vec = prev_state[0][0:2]
first_particle_velocity_vec = prev_state[0][2:4]
# input 으로 들어가는건 첫 번째 입자의 속도, 그리고 나머지들의 상대 위치이다.
input_frame = []
delta_v_prev = [prev_state[0][i] - pre_prev_state[0][i] for i in [2,3]]
input_frame.append(prev_state[0][2:4] + delta_v_prev) # 먼저 첫 번째 입자의 속도를 넣는다. 그리고 첫 번째 입자의 속도 변화도 넣는다.
for idx, ptl_state in enumerate(prev_state):
if idx == 0: # 우리는 나머지 입자들을 원한다.
continue
else:
rel_positionNvelocity = []
for pos_i, position in enumerate(ptl_state[0:2]):
rel_positionNvelocity.append(position-first_particle_position_vec[pos_i])
for vel_i, velocity in enumerate(ptl_state[2:4]):
rel_positionNvelocity.append(velocity-first_particle_velocity_vec[vel_i])
input_frame.append(rel_positionNvelocity)
input_batch.append(input_frame) # 이 전의 state 를 input 으로 기록
delta_xv = [state[0][j] - states[i-1][0][j] for j in range(len(state[0]))]
delta_xv_prev = [states[i-1][0][j] - states[i-2][0][j] for j in range(len(state[0]))]
delta_a = [delta_xv[k] - delta_xv_prev[k] for k in [2,3]]
target_batch.append(delta_xv + delta_a) # 첫 번째 particle 의 운동 변화을 학습한다. # 결과 state, 맨 앞의 질량을 뺸다.
return input_batch, target_batch
# 새로 쓸 make_batch 함수
def make_batch_rev(states):
return (states[:-1], states[1:]) |
# Copyright (c) Code Written and Tested by Ahmed Emad in 24/03/2020, 14:36.
from rest_framework import permissions
from recipes.models import RecipeModel, RecipeImageModel, RecipeReviewModel
class IsOwner(permissions.BasePermission):
"""
Custom permission to only allow owners of an object to edit it.
"""
def has_object_permission(self, request, view, obj):
if request.method in permissions.SAFE_METHODS:
return True
if type(obj) == RecipeModel or type(obj) == RecipeReviewModel:
return obj.user == request.user
elif type(obj) == RecipeImageModel:
return obj.recipe.user == request.user
return False
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
"""
this is a unicode file test
"""
#this is the code use utf-8
CODEC = 'utf-8'
FILE = 'unicode.txt'
#this is write the content to the file,then must be encode to the utf-8
hello_out = u'Hello KEL,中文测试\n'
bytes_out = hello_out.encode('utf-8')
f = open(FILE,'w')
f.write(bytes_out)
f.close()
#this is read from the file,then must decode the file
f = open(FILE,'r')
hello_in = f.read()
bytes_in = hello_in.decode(CODEC)
f.close()
print bytes_in,
|
# Copyright 2017-2023 Posit Software, PBC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import click
from guild import click_util
from . import remote_support
from . import runs_support
def runs_stop_params(fn):
click_util.append_params(
fn,
[
click.Option(
("-y", "--yes"), help="Do not prompt before stopping.", is_flag=True
),
click.Option(
("-n", "--no-wait"),
help="Don't wait for remote runs to stop.",
is_flag=True,
),
click.Option(
("--force",),
help=(
"Forceably stop the runs after a period of time (specified by "
"'--timeout')."
),
is_flag=True,
),
click.Option(
("--timeout",),
type=click.IntRange(min=0),
metavar="N",
default=30,
help="Timeout in seconds to wait for a run to stop (default is 30)."
),
runs_support.runs_arg,
runs_support.common_filters,
remote_support.remote_option("Stop remote runs."),
],
)
return fn
@click.command(name="stop")
@runs_stop_params
@click.pass_context
@click_util.use_args
@click_util.render_doc
def stop_runs(ctx, args):
"""Stop one or more runs.
Runs are stopped by specifying one or more RUN arguments. See
SPECIFYING RUNS and FILTER topics for information on specifying
runs to be stopped.
Only runs with status of 'running' are considered for this
operation.
If `RUN` is not specified, the latest selected run is stopped.
{{ runs_support.runs_arg }}
If a `RUN` argument is not specified, ``1`` is assumed (the most
recent run with status 'running').
{{ runs_support.common_filters }}
### Stop Remote Runs
To stop remote runs, use `--remote`.
{{ remote_support.remote_option }}
"""
from . import runs_impl
runs_impl.stop_runs(args, ctx)
|
from random import choice
class RandomWalk:
"""a class to generate random walks"""
def __init__(self,num_points = 5000):
"""init attributes of walk"""
self.num_points = num_points
#all walks start at (0,0)
self.x_values = [0]
self.y_values = [0]
def fill_walk(self):
"""calculate all points on the walk"""
while len(self.x_values)< self.num_points:#keep taking steps until wak reaches desired length
x_step = self.get_step()
y_step = self.get_step()
if x_step ==0 and y_step ==0:#continue if no movement to ignore move so it isnt plotted and mess up results
continue
x = self.x_values[-1] + x_step
y = self.y_values[-1] + y_step
self.x_values.append(x)
self.y_values.append(y)
def get_step(self):
direction = choice([1, -1])#choice of left or right/up or down
distance = choice([0,1,2,3,4])#how far does this travel
return distance*direction
|
from scipy import signal
import matplotlib.pyplot as plt
r_l = 66
r_h = 1600
c_l, c_h = 1e-6, 1e-6
twof = signal.TransferFunction([1], [0.001, 1])
threec = signal.TransferFunction([r_h * c_h, 0], [r_l * r_h * c_l * c_h, r_l * c_l + r_h * c_h, 1])
w, mag, phase = signal.bode(twof)
w2, mag2, phase2 = signal.bode(threec)
plt.figure()
plt.semilogx(w, mag)
plt.title('Frequency vs. Magnitude')
plt.xlabel('Frequency [Hz]')
plt.ylabel('Magnitude [dB]')
plt.figure()
plt.semilogx(w, phase)
plt.title('Frequency vs. Phase')
plt.xlabel('Frequency [Hz]')
plt.ylabel('Phase [deg]')
plt.figure()
plt.semilogx(w2, mag2)
plt.xlim(1e-1, 1e+9)
plt.title('Frequency vs. Magnitude')
plt.xlabel('Frequency [Hz]')
plt.ylabel('Magnitude [dB]')
plt.figure()
plt.semilogx(w2, phase2)
plt.xlim(1e-1, 1e+9)
plt.title('Frequency vs. Phase')
plt.xlabel('Frequency [Hz]')
plt.ylabel('Phase [deg]')
plt.show()
|
# Generated by Django 3.0.4 on 2020-06-01 15:33
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('partage_repas', '0002_profil'),
]
operations = [
migrations.AlterModelOptions(
name='profil',
options={'verbose_name': 'profil'},
),
]
|
#!/usr/bin/python3
""" Top ten """
import json
import requests
def top_ten(subreddit):
""" Print the tittles of the first 10 hot posts """
url = 'https://www.reddit.com/r/{}/hot.json?limit=10'.format(subreddit)
headers = {'User-Agent': 'My User Agent 1.0'}
request = requests.get(url, headers=headers)
req = request.json()
if request.status_code == 404:
print(None)
else:
hot = req.get('data').get('children')
for count in hot:
print(count.get('data').get('title'))
|
from http.server import HTTPServer, BaseHTTPRequestHandler
from io import BytesIO
import socket
hostname = socket.gethostname()
ip = socket.gethostbyname(hostname)
ipBytes = ip.encode()
class Handler(BaseHTTPRequestHandler):
def do_POST(self):
content_length = int(self.headers['Content-Length'])
body = self.rfile.read(content_length)
self.send_response(200)
self.end_headers()
response = BytesIO()
response.write(ipBytes)
response.write(b': Says:')
response.write(body)
self.wfile.write(response.getvalue())
print('Server listening on port 80...')
httpd = HTTPServer(('0.0.0.0', 80), Handler)
httpd.serve_forever()
|
my_mail = 'vol1@test.test'
my_pass = 'vol1'
api_key = "5402354a55655803bd34fe6b16344855c6056d34b7746bae11b2e990"
#api_key = ""
base_url = 'https://petfriends1.herokuapp.com/' |
##############################################################################
#
# Copyright (c) 2000-2009 Jens Vagelpohl and Contributors. All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
""" LDAP-based CMF member data tool
$Id$
"""
# Python imports
import os
from copy import deepcopy
# General Zope imports
from AccessControl import ClassSecurityInfo
from Acquisition import aq_base
from App.class_init import default__class_init__ as InitializeClass
from App.Common import package_home
from ZPublisher.HTTPRequest import HTTPRequest
from Products.PageTemplates.PageTemplateFile import PageTemplateFile
# CMF imports
from Products.CMFCore.utils import getToolByName
from Products.CMFCore.permissions import ManagePortal
from Products.CMFCore.MemberDataTool import MemberDataTool
from Products.CMFCore.MemberDataTool import MemberData
_marker = []
_wwwdir = os.path.join(package_home(globals()), 'www')
class LDAPMemberDataTool(MemberDataTool):
""" This tool wraps user objects, making them act as Member objects. """
security = ClassSecurityInfo()
meta_type = 'LDAP Member Data Tool'
title = 'LDAP Member Data Tool'
security.declareProtected(ManagePortal, 'manage_showContents')
manage_showContents = PageTemplateFile('cmfldap_contents.pt', _wwwdir)
security.declareProtected(ManagePortal, 'manage_memberProperties')
manage_memberProperties = PageTemplateFile( 'cmfldap_memberProperties.pt'
, _wwwdir
)
manage_options = ( ( { 'label' : 'Member Properties'
, 'action' : 'manage_memberProperties'
}
,
)
+ MemberDataTool.manage_options
)
def __init__(self, id='portal_memberdata'):
self.id = id
MemberDataTool.__init__(self)
self._sorted_attributes = ()
def wrapUser(self, u):
"""
If possible, returns the Member object that corresponds
to the given User object.
"""
id = u.getUserName()
members = self._members
if not members.has_key(id):
base = aq_base(self)
members[id] = LDAPMemberData(base, id)
wrapper = members[id].__of__(self).__of__(u)
# We set the MemberData global options if we found values
# in the UserData. (for instance the 'email')
try:
global mapped_attrs
mapped_attrs = self.acl_users.getMappedUserAttrs()
for MappedUserAttr in mapped_attrs:
try:
# get the property value from LDAPUser object, if it is
# empty then the method will raise an exception
PropertyValue = u.getProperty(MappedUserAttr[1])
# now read the value from the wrapper
WrapperPropertyValue = wrapper.getProperty(MappedUserAttr[1])
# redefine the wrapper value if it differ
if ( PropertyValue is not None and
PropertyValue != '' and
PropertyValue != WrapperPropertyValue ):
setattr(wrapper, MappedUserAttr[1], PropertyValue)
except:
# the exception may be thrown if PropertyValue is empty
pass
except:
pass
# Return a wrapper with self as containment and
# the user as context.
return wrapper
#################################################################
# CMFLDAP-specific API used in the ZMI only
#################################################################
security.declareProtected(ManagePortal, 'getAvailableMemberProperties')
def getAvailableMemberProperties(self):
""" Return a list of attributes that have not been assigned yet """
uf_schema = deepcopy(self.acl_users.getSchemaConfig())
return [uf_schema[x] for x in uf_schema.keys()
if x not in self._sorted_attributes]
security.declarePublic('getSortedMemberProperties')
def getSortedMemberProperties(self):
""" Return a sorted sequence of dictionaries describing the properties
available for portal members
This method is declared Public because it is used for the join_form
as well.
"""
sorted_schema = []
uf_schema = deepcopy(self.acl_users.getSchemaConfig())
uf_login = self.acl_users.getProperty('_login_attr')
for property_id in self._sorted_attributes:
property_info = uf_schema.get(property_id, None)
# Filtering out those properties that are either invalid
# or provided already by the default machinery.
if ( property_info is not None and
property_id not in (uf_login, 'mail') ):
sorted_schema.append(property_info)
return tuple(sorted_schema)
security.declareProtected(ManagePortal, 'addMemberProperty')
def addMemberProperty(self, property_id):
""" Add a new property. The property_id represents the true LDAP
attribute name
"""
if property_id in self._sorted_attributes:
return
if property_id not in self.acl_users.getSchemaConfig().keys():
return
sorted = list(self._sorted_attributes)
sorted.append(property_id)
self._sorted_attributes = tuple(sorted)
security.declareProtected(ManagePortal, 'manage_addMemberProperty')
def manage_addMemberProperty(self, property_id, REQUEST=None):
""" ZMI wrapper for addMemberProperty """
self.addMemberProperty(property_id)
if REQUEST is not None:
msg = 'Property %s added.' % property_id
return self.manage_memberProperties(manage_tabs_message=msg)
security.declareProtected(ManagePortal, 'removeMemberProperty')
def removeMemberProperty(self, property_id):
""" Remove a member property. The property_id represents the true
LDAP attribute name
"""
if property_id not in self._sorted_attributes:
return
sorted = list(self._sorted_attributes)
sorted.remove(property_id)
self._sorted_attributes = tuple(sorted)
security.declareProtected(ManagePortal, 'manage_removeMemberProperty')
def manage_removeMemberProperty(self, property_id=None, REQUEST=None):
""" ZMI wrapper for removeMemberProperty """
if property_id is None:
msg = 'Please select a property.'
else:
self.removeMemberProperty(property_id)
msg = 'Property %s removed.' % property_id
if REQUEST is not None:
return self.manage_memberProperties(manage_tabs_message=msg)
security.declareProtected(ManagePortal, 'moveMemberPropertyUp')
def moveMemberPropertyUp(self, property_id):
""" Move a member property up in the sort ranking. The property_id
represents the true LDAP attribute name.
"""
if property_id not in self._sorted_attributes:
return
sorted = list(self._sorted_attributes)
property_idx = sorted.index(property_id)
prior_idx = property_idx - 1
if property_idx > 0:
current_occupier = sorted[prior_idx]
sorted[prior_idx] = property_id
sorted[property_idx] = current_occupier
self._sorted_attributes = tuple(sorted)
security.declareProtected(ManagePortal, 'manage_moveMemberPropertyUp')
def manage_moveMemberPropertyUp(self, property_id=None, REQUEST=None):
""" ZMI wrapper for moveMemberPropertyUp """
if property_id is None:
msg = 'Please select a property.'
else:
self.moveMemberPropertyUp(property_id)
msg = 'Property %s moved.' % property_id
if REQUEST is not None:
return self.manage_memberProperties(manage_tabs_message=msg)
security.declareProtected(ManagePortal, 'moveMemberPropertyDown')
def moveMemberPropertyDown(self, property_id):
""" Move a member property down in the sort ranking. The property_id
represents the true LDAP attribute name.
"""
if property_id not in self._sorted_attributes:
return
sorted = list(self._sorted_attributes)
property_idx = sorted.index(property_id)
next_idx = property_idx + 1
if property_idx < len(sorted) - 1:
current_occupier = sorted[next_idx]
sorted[next_idx] = property_id
sorted[property_idx] = current_occupier
self._sorted_attributes = tuple(sorted)
security.declareProtected(ManagePortal, 'manage_moveMemberPropertyDown')
def manage_moveMemberPropertyDown(self, property_id=None, REQUEST=None):
""" ZMI wrapper for moveMemberPropertyDown """
if property_id is None:
msg = 'Please select a property.'
else:
self.moveMemberPropertyDown(property_id)
msg = 'Property %s moved.' % property_id
if REQUEST is not None:
return self.manage_memberProperties(manage_tabs_message=msg)
InitializeClass(LDAPMemberDataTool)
class LDAPMemberData(MemberData):
""" Member Data wrapper for the LDAP-driven Member Data Tool """
def setMemberProperties(self, mapping):
""" Sets the properties of the member. """
acl = self.acl_users
ldap_schemakeys = [x[0] for x in acl.getLDAPSchema()]
if isinstance(mapping, HTTPRequest):
mapping = mapping.form
# back conversion of mapped attributes
mapped_attrs = acl.getMappedUserAttrs()
for mapped_attr in mapped_attrs:
if ( not mapping.has_key(mapped_attr[0])
and mapping.has_key(mapped_attr[1]) ):
mapping[mapped_attr[0]] = mapping[mapped_attr[1]]
# Special-case a couple keys which are pretty much "hard-coded"
# in CMF
if mapping.has_key('email') and not mapping.has_key('mail'):
mapping['mail'] = mapping['email']
change_vals = filter( None
, map( lambda x, lsk=ldap_schemakeys: x in lsk
, mapping.keys()
)
)
try:
if change_vals:
user_obj = self.getUser()
rdn_attr = acl.getProperty('_rdnattr')
if not mapping.has_key(rdn_attr):
mapping[rdn_attr] = user_obj.getUserName()
acl.manage_editUser( user_obj.getUserDN()
, kwargs=mapping
)
except:
pass
# Before we hand this over to the default MemberData implementation,
# purge out all keys we have already set via LDAP so that we never
# shadow a LDAP value on the member data wrapper
# We want to hand over the "default" stuff like listed status or
# the skin selection.
consumed_attributes = [x[0] for x in mapped_attrs]
consumed_attributes.extend(ldap_schemakeys)
for key in consumed_attributes:
if mapping.has_key(key):
del mapping[key]
MemberData.setMemberProperties(self, mapping)
def setSecurityProfile(self, password=None, roles=None, domains=None):
""" Set the user's basic security profile """
acl = self.acl_users
u = self.getUser()
user_dn = u.getUserDN()
if password is not None:
acl.manage_editUserPassword(user_dn, password)
u.__ = password
if roles is not None:
all_roles = acl.getGroups()
role_dns = []
my_new_roles = []
for role_name, role_dn in all_roles:
if role_name in roles:
my_new_roles.append(role_name)
role_dns.append(role_dn)
u.roles = my_new_roles
acl.manage_editUserRoles(user_dn, role_dns)
if domains is not None:
u.domains = domains
def getPassword(self):
""" Retrieve the user's password if there is a valid record in the
user folder cache, otherwise create a new one and set it on the user
object and in LDAP
"""
user_obj = self.getUser()
pwd = user_obj._getPassword()
if pwd == 'undef': # This user object did not result from a login
reg_tool = getToolByName(self, 'portal_registration')
pwd = reg_tool.generatePassword()
self.setSecurityProfile(password=pwd)
self.acl_users._expireUser(user_obj)
return pwd
InitializeClass(LDAPMemberData)
|
# -*- coding: utf-8 -*-
import torch
import torch.nn as nn
from kornia.losses import SSIM
from kornia.filters import SpatialGradient
from torch.nn import functional as F
#def inverse_huber_loss(target, output):
# absdiff = torch.abs(output-target)
# C = 0.2*torch.max(absdiff).item()
# return torch.mean(torch.where(absdiff < C, absdiff,(absdiff*absdiff+C*C)/(2*C) ))
def DiceLoss (inputs, targets, smooth=1e-6):
# #comment out if your model contains a sigmoid or equivalent activation layer
# inputs = F.sigmoid(inputs)
#flatten label and prediction tensors
inputs = inputs.view(-1)
targets = targets.view(-1)
#intersection is equivalent to True Positive count
#union is the mutually inclusive area of all labels & predictions
intersection = (inputs * targets).sum()
dice = (2.*intersection + smooth)/(inputs.sum() + targets.sum() + smooth)
return 1 - dice
def IoULoss (inputs, targets, smooth=1e-6):
# #comment out if your model contains a sigmoid or equivalent activation layer
# inputs = F.sigmoid(inputs)
#flatten label and prediction tensors
inputs = inputs.view(-1)
targets = targets.view(-1)
#intersection is equivalent to True Positive count
#union is the mutually inclusive area of all labels & predictions
intersection = (inputs * targets).sum()
total = (inputs + targets).sum()
union = total - intersection
IoU = (intersection + smooth)/(union + smooth)
return 1 - IoU
def IoU_BCELoss (targets, inputs, smooth=1e-6):
# #comment out if your model contains a sigmoid or equivalent activation layer
# inputs = F.sigmoid(inputs)
#flatten label and prediction tensors
inputs = inputs.view(-1)
targets = targets.view(-1)
intersection = (inputs * targets).sum()
total = (inputs + targets).sum()
union = total - intersection
IoU = (intersection + smooth)/(union + smooth)
iou_loss = 1 - IoU
BCE = F.binary_cross_entropy_with_logits(inputs, targets)
IoU_BCE = BCE + iou_loss
return IoU_BCE
def Dice_BCELoss (targets, inputs, smooth=1e-6):
# #comment out if your model contains a sigmoid or equivalent activation layer
# inputs = F.sigmoid(inputs)
#flatten label and prediction tensors
inputs = inputs.view(-1)
targets = targets.view(-1)
intersection = (inputs * targets).sum()
dice = (2.*intersection + smooth)/(inputs.sum() + targets.sum() + smooth)
dice_loss = 1 - dice
BCE = F.binary_cross_entropy_with_logits(inputs, targets)
IoU_BCE = BCE + dice_loss
return IoU_BCE
def edge_loss(target, output):
gt_pred = SpatialGradient()(output)
assert(gt_pred.ndim == 5)
assert(gt_pred.shape[2] == 2)
dy_pred = gt_pred[:,:,0,:,:]
dx_pred = gt_pred[:,:,1,:,:]
gt_true = SpatialGradient()(target)
dy_true = gt_true[:,:,0,:,:]
dx_true = gt_true[:,:,1,:,:]
l_edge = torch.mean(torch.abs(dy_pred - dy_true) + torch.abs(dx_pred - dx_true))
return l_edge
def loss_mask_function(target, output):
# IoU loss
# l_iou = IoULoss(target, output)
# l_dice = DiceLoss(target, output)
# # BCE + IoU loss
l_iou = IoU_BCELoss(target, output)
# s = f"(d{l_iou.item():0.3f})"
# s = f"(d{l_dice.item():0.3f})"
loss = l_iou
# loss = F.binary_cross_entropy_with_logits(output, target)
s = f"(d{l_iou:0.3f})"
return loss, s
def loss_depth_function(target, output, w_ssim=1.0, w_edge=1.0, w_depth=1.0):
# Structural similarity (SSIM) index
# 1 - ssim_index is computed internally, within SSIM()
l_ssim = SSIM(3, reduction="mean")(output, target)
# Edges
l_edge = edge_loss(output, target)
# Point-wise depth
l_depth = nn.L1Loss()(output, target)
# l_huber = inverse_huber_loss(target, output)
# l_mse = nn.MSELoss()(output*10, target*10)
# l_bce = nn.BCEWithLogitsLoss()(output, target)
s = f"(d{l_depth.item():0.3f},s{l_ssim.item():0.3f},e{l_edge.item():0.3f})"
loss = (w_ssim * l_ssim) + (w_depth * l_depth) + (w_edge * l_edge)
return loss, s |
import datetime
from django.contrib.auth.models import User as admin
from django.utils import timezone
from django.utils.safestring import mark_safe
from djongo import models
# Create your models here.
class State(models.Model):
state_name = models.CharField(max_length=50)
def __str__(self):
return self.state_name
class City(models.Model):
city_name = models.CharField(max_length=50)
state = models.ForeignKey(State, on_delete=models.CASCADE)
def __str__(self):
return self.city_name
class Designer(models.Model):
designer_name = models.CharField(max_length=100)
email = models.EmailField()
phone = models.CharField(max_length=12)
password = models.CharField(max_length=100)
description = models.TextField()
join_date = models.DateField()
status = models.BooleanField(default=True)
city = models.ForeignKey(City, on_delete=models.CASCADE)
admin = models.ForeignKey(admin, on_delete=models.CASCADE)
def __str__(self):
return self.designer_name
class Customer(models.Model):
username = models.CharField(max_length=100)
email = models.EmailField()
password = models.CharField(max_length=100)
join_date = models.DateField()
status = models.BooleanField(default=True)
def __str__(self):
return self.username
class User(models.Model):
username = models.CharField(max_length=100)
email = models.EmailField()
password = models.CharField(max_length=100)
join_date = models.DateField(default=datetime.date.today)
status = models.BooleanField(default=True)
def __str__(self):
return self.username
class Address(models.Model):
name = models.CharField(max_length=100)
phone = models.CharField(max_length=12)
addr = models.TextField(verbose_name="Address")
pincode = models.IntegerField()
city = models.ForeignKey(City, on_delete=models.CASCADE)
user = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
return self.addr
class Branch(models.Model):
branch_name = models.CharField(max_length=50)
addr = models.TextField(verbose_name="Address")
created_at = models.DateField(default=datetime.date.today)
city = models.ForeignKey(City, on_delete=models.CASCADE)
admin = models.ForeignKey(admin, on_delete=models.CASCADE)
def __str__(self):
return self.branch_name
class Category(models.Model):
cat_name = models.CharField(max_length=100, verbose_name="Category Name")
def __str__(self):
return self.cat_name
class Product(models.Model):
pdt_name = models.CharField(max_length=100, verbose_name="Product Name")
description = models.TextField()
image = models.ImageField(upload_to='img')
price = models.IntegerField()
status = models.BooleanField(default=True)
created_at = models.DateField(default=datetime.date.today)
category = models.ForeignKey(Category, on_delete=models.CASCADE)
def prodImg(self):
return mark_safe('<img src="{}" width="100" />'.format(self.image.url))
prodImg.short_description = "Image"
prodImg.allow_tags = True
def __str__(self):
return self.pdt_name
class Design(models.Model):
ins_choice = (
(1, "Admin"),
(2, "Designer")
)
design_name = models.CharField(max_length=100, verbose_name="Design Name")
description = models.TextField()
image = models.ImageField(upload_to='img')
price = models.IntegerField()
status = models.BooleanField(default=True)
inserted_by = models.PositiveIntegerField(choices=ins_choice, default=1)
creator_id = models.IntegerField(verbose_name="Creator Name")
created_at = models.DateField(default=datetime.date.today)
def prodImg(self):
return mark_safe('<img src="{}" width="100" />'.format(self.image.url))
prodImg.short_description = "Image"
prodImg.allow_tags = True
def __str__(self):
return self.design_name
class Contact(models.Model):
name = models.CharField(max_length=50)
email = models.EmailField()
message = models.TextField()
def __str__(self):
return self.name
class DesignElement(models.Model):
pos_X = models.FloatField()
pos_Y = models.FloatField()
width = models.FloatField()
height = models.FloatField()
pdt_id = models.ForeignKey(Product, on_delete=models.CASCADE)
design_id = models.ForeignKey(Design, on_delete=models.CASCADE)
def __str__(self):
return self.design_id.design_name
class Cart(models.Model):
qty = models.IntegerField()
datetime = models.DateField(default=datetime.date.today)
type = models.IntegerField()
product = models.ForeignKey(Product, on_delete=models.CASCADE)
design = models.ForeignKey(Design, on_delete=models.CASCADE)
user = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
return self.product.pdt_name
class Order(models.Model):
ins_choice = (
(0, "Processing"),
(1, "Out For Delivery"),
(2, "Delivered")
)
datetime = models.DateField(default=datetime.date.today)
status = models.PositiveIntegerField(choices=ins_choice, default=0)
addr = models.ForeignKey(Address, on_delete=models.CASCADE)
user = models.ForeignKey(User, on_delete=models.CASCADE)
class OrderItemPdt(models.Model):
qty = models.IntegerField()
price = models.FloatField()
product = models.ForeignKey(Product, on_delete=models.CASCADE)
order = models.ForeignKey(Order, on_delete=models.CASCADE)
class OrderItemDesign(models.Model):
price = models.FloatField()
design = models.ForeignKey(Design, on_delete=models.CASCADE)
order = models.ForeignKey(Order, on_delete=models.CASCADE)
class Payment(models.Model):
amount = models.FloatField()
payment_method = models.CharField(max_length=50)
datetime = models.DateField(default=datetime.date.today)
status = models.IntegerField(default=0)
order = models.ForeignKey(Order, on_delete=models.CASCADE)
#
# class Message(models.Model):
# message = models.TextField()
# datetime = models.DateField(default=datetime.date.today)
# sender = models.IntegerField(1)
# type = models.IntegerField(1)
# user = models.ForeignKey(User, on_delete=models.CASCADE)
# designer = models.ForeignKey(Designer, on_delete=models.CASCADE)
class ChatMessage(models.Model):
msg = models.TextField()
datetime = models.DateTimeField(default=timezone.now)
sender = models.IntegerField(2)
type = models.IntegerField(2)
status = models.IntegerField(2)
user = models.ForeignKey(User, on_delete=models.CASCADE)
designer = models.ForeignKey(Designer, on_delete=models.CASCADE)
|
from modeltranslation.translator import register, TranslationOptions
from .models import DayTemplateRule, RuleSet
@register(DayTemplateRule)
class SimpleRuleTranslation(TranslationOptions):
fields = ('name',)
@register(RuleSet)
class RuleSetTranslation(TranslationOptions):
fields = ('name',)
|
import numpy as np
from genetic_algorithm.dna import DNA
from scipy.special import softmax
class Population:
"""
Class to generate a population with genes of the type (a, b)
"""
def __init__(self, max_pop, low, high, fitness_calculator):
self.generation = 0
self.best = None
self.pop_probabilities = None
self.fitness_calculator = fitness_calculator
self.low = low
self.high = high
self.population = list()
for _ in range(max_pop):
new_dna = DNA(low=self.low, high=self.high)
self.population.append(new_dna)
self.eval_population()
def eval_population(self):
for pop in self.population:
pop.calc_fitness(fitness_calculator=self.fitness_calculator)
return None
def natural_selection(self):
self.pop_probabilities = dict()
all_fitness = [pop.fitness for pop in self.population]
all_fitness = np.max(all_fitness) - all_fitness
probabilities = softmax(all_fitness)
for i, pop in enumerate(self.population):
self.pop_probabilities[pop] = probabilities[i]
return None
def generate(self, mutation_rate, mutation_range, tolerance):
for i in range(len(self.population)):
indexes = list(range(len(self.population)))
index_a = np.random.choice(a=indexes, p=list(self.pop_probabilities.values()))
index_b = np.random.choice(a=indexes, p=list(self.pop_probabilities.values()))
parent_a = self.population[index_a]
parent_b = self.population[index_b]
child = parent_a.reproduce(parent_b)
child.mutate(mutation_rate=mutation_rate, tolerance=tolerance, mutation_range=mutation_range)
self.population[i] = child
self.generation += 1
return None
def validate(self):
best_fitness = 1000000
index = 0
for i, pop in enumerate(self.population):
if pop.fitness < best_fitness:
index = i
best_fitness = pop.fitness
self.best = self.population[index]
return None
def get_best(self):
return tuple(self.best.genes), self.best.fitness
def get_generation(self):
return self.generation
def get_avg_fitness(self):
total = 0
for pop in self.population:
total += pop.fitness
return total/len(self.population)
|
import cv2
import numpy as np
import json
import imutils as iu
from imutils.perspective import four_point_transform
import time
from utils import helper_visuals as iv
import utils.plugins.CMT.CMT as CMT
from utils import modelio as mio
from keras.preprocessing.image import img_to_array
# import utils.plugins.CMT.util as cmt_utils
# Parse filter arguments
def parse_args(args):
obj = {args[0]: args[1]}
for i in range(2, len(args)):
arg = args[i].split("=")
obj[arg[0]] = None if len(arg) == 1 else arg[1]
return obj
# Apply a filter on the image using convolution with a filter K
def plugin_filter(proc, args):
global filter_bank
filtr = parse_args(args)
hdr = filter_bank[filtr.get("filter")]["handler"]
hdr_type = type(hdr).__name__
if hdr_type == 'function':
if 'none' in filtr.keys():
proc.remove_plugin(hdr)
else:
# TODO: replace args with filtr
proc.append_plugin(hdr, args)
elif hdr_type == 'str':
if hdr == 'remove':
proc.remove_all_plugins()
elif hdr == 'load':
if len(args) > 2:
fn = "filters/{}.{}".format(args[2], "json")
try:
f = open(fn, 'r')
obj = json.load(f)
for plugin in obj:
if plugin[0] == "filter":
plugin_filter(proc, plugin)
elif plugin[0] == "crop":
proc.refPt = [tuple(plugin[1][0]), tuple(plugin[1][1])]
proc.selection = True
f.close()
print("[info] {} plugins loaded".format(len(obj)))
except FileNotFoundError:
return "[error] File not found: '{}'".format(fn)
else:
print("[warning] Wrong number of arguments")
elif hdr == 'save':
if len(args) > 2:
obj = []
for plugin in proc.plugins:
obj.append(plugin[1])
if proc.selection:
obj.append(["crop", proc.refPt])
if len(obj) > 0:
fn = "filters/{}.{}".format(args[2], "json")
f = open(fn, 'w')
json.dump(obj, f, indent=4)
print(json.dumps(obj, indent=4))
print("[info] {} active plugins saved in '{}'".format(len(obj), fn))
else:
print("[warning] No active plugin. Save cancelled!")
else:
print("[warning] Wrong number of arguments")
elif hdr == 'list':
for plugin in proc.plugins:
print(plugin[1])
elif hdr == 'help':
print("\nDisplaying available filters:\n------------------------------")
for key in filter_bank:
name = "{:<15}".format(key)
print("%s: %s" % (name, filter_bank[key]["desc"]))
# construct average blurring kernels used to smooth an image
def hdr_smallBlur(proc, img, plugin):
smallBlur = np.ones((7, 7), dtype="float") * (1.0 / (7 * 7))
# custom convolution
# return = iv.convolve(img, smallBlur)
return cv2.filter2D(img, -1, smallBlur)
def hdr_largeBlur(proc, img, plugin):
largeBlur = np.ones((21, 21), dtype="float") * (1.0 / (21 * 21))
return cv2.filter2D(img, -1, largeBlur)
def hdr_gaussian(proc, img, plugin):
args = plugin[1]
blurred = cv2.GaussianBlur(img, (5, 5), 0)
return blurred
# construct a sharpening filter
def hdr_sharpen(proc, img, plugin):
sharpen = np.array((
[0, -1, 0],
[-1, 5, -1],
[0, -1, 0]), dtype="int")
return cv2.filter2D(img, -1, sharpen)
# construct the Laplacian kernel used to detect edge-like regions of an image
def hdr_laplacian(proc, img, plugin):
laplacian = np.array((
[0, 1, 0],
[1, -4, 1],
[0, 1, 0]), dtype="int")
return cv2.filter2D(img, -1, laplacian)
# construct the Sobel x-axis kernel
def hdr_sobelX(proc, img, plugin):
sobelX = np.array((
[-1, 0, 1],
[-2, 0, 2],
[-1, 0, 1]), dtype="int")
return cv2.filter2D(img, -1, sobelX)
# construct the Sobel y-axis kernel
def hdr_sobelY(proc, img, plugin):
sobelY = np.array((
[-1, -2, -1],
[0, 0, 0],
[1, 2, 1]), dtype="int")
return cv2.filter2D(img, -1, sobelY)
# construct an emboss kernel
def hdr_emboss(proc, img, plugin):
emboss = np.array((
[-2, -1, 0],
[-1, 1, 1],
[0, 1, 2]), dtype="int")
return cv2.filter2D(img, -1, emboss)
def hdr_sobel(proc, img, plugin):
args = plugin[1]
ks = 1 if len(args) < 3 else int(args[2])
# Calculate gradient
gx = cv2.Sobel(img, cv2.CV_32F, 1, 0, ksize=ks)
gy = cv2.Sobel(img, cv2.CV_32F, 0, 1, ksize=ks)
# Calculate gradient magnitude and direction ( in degrees )
# mag, angle = cv2.cartToPolar(gx, gy, angleInDegrees=True)
gx = np.uint8(np.absolute(gx))
gy = np.uint8(np.absolute(gy))
out = cv2.bitwise_or(gx, gy)
return out
def hdr_threshold(proc, img, plugin):
args = plugin[1]
warn = False
# convert to grayscale
if len(img.shape) > 2:
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
else:
gray = img
threshold_type = 'normal'
if len(args) > 2:
threshold_type = args[2]
if threshold_type == 'normal':
thresh = 0 if len(args) < 4 else int(args[3])
out = cv2.threshold(gray, thresh, 255, cv2.THRESH_BINARY_INV)[1]
elif threshold_type == 'otsu':
thresh = 0 if len(args) < 4 else int(args[3])
out = cv2.threshold(gray, thresh, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
elif threshold_type == 'adapt-mean':
neib = 11 if len(args) < 4 else int(args[3])
if neib % 2 == 1:
out = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, neib, 2)
else:
warn = True
elif threshold_type == 'adapt-gauss':
neib = 11 if len(args) < 4 else int(args[3])
if neib % 2 == 1:
out = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, neib, 2)
else:
warn = True
elif threshold_type == 'help':
warn = True
if warn:
print("[warning] Valid thresholds: (normal | otsu) [thresh] | (adapt-mean | adapt-gauss) [neighbors]")
print(" thresh: 0-255 default 0")
print(" neighbors: 3, 5, 7, 9,.. (odd number)")
out = img
plugin_filter(proc, ['filter', 'threshold', 'none'])
return out
def hdr_canny(proc, img, plugin):
args = plugin[1]
# get min-max thresholds
min = 100 if len(args) < 3 else int(args[2])
max = 200 if len(args) < 4 else int(args[3])
return cv2.Canny(img, min, max)
def hdr_inverse_colors(proc, img, plugin):
return cv2.bitwise_not(img)
def hdr_resize(proc, img, plugin):
args = plugin[1]
imout = img
if len(args) == 4:
w = int(args[2])
h = int(args[3])
# resize image if required
if [h, w] != img.shape[:2]:
imout = cv2.resize(img, (w, h), interpolation=cv2.INTER_CUBIC)
return imout
def hdr_rotate(proc, img, plugin):
rows,cols = img.shape[:2]
args = plugin[1]
angle = int(args[2])
r = rows//2 if len(args) < 4 else int(args[3])
c = cols//2 if len(args) < 5 else int(args[4])
M = cv2.getRotationMatrix2D((c, r), angle, 1)
return cv2.warpAffine(img, M, (cols,rows))
def hdr_equalizer(proc, img, plugin):
# check if input is in grayscale
if (img.shape[2] == 3):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
else:
gray = img
return cv2.equalizeHist(gray)
def hdr_add_noise(proc, img, plugin):
args = plugin[1]
mean = (50, 50, 50) if len(args) <= 2 else (int(args[2]), int(args[2]), int(args[2]))
sigma = (50, 50, 50) if len(args) <= 3 else (int(args[3]), int(args[3]), int(args[3]))
noise = cv2.randn(img.copy(), mean, sigma)
alpha = 0.5 if len(args) <= 4 else float(args[4])
beta = 1 - alpha
return cv2.addWeighted(img, alpha, noise, beta, 0.0)
def hdr_contours(proc, img, plugin):
args = plugin[1]
if len(img.shape) > 2:
# binarize image with canny defaults
thresh = hdr_canny(proc, img, [None, []])
else:
# already binarized
thresh = img
# get args
cmd = dict(it.split("=") if it.find("=") != -1 else [it, ""] for it in args)
# read args
ratio_low = float(cmd.get("rl", 0))
ratio_high = float(cmd.get("rh", 0))
area_low = float(cmd.get("al", 0))
area_high = float(cmd.get("ah", 0))
margin = int(cmd.get("margin", 2))
post_preview = cmd.get("pp", "off") == "on"
extract_preview = cmd.get("extract", "off") == "on"
# find contours in the image, keeping only the four largest
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if iu.is_cv2() else cnts[1]
# apply area restrictions
# if area_low + area_high + ratio_low + ratio_high != 0:
cn = []
for c in cnts:
# keep only contours with 4 vertices
epsilon = 0.1 * cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, epsilon, True)
if len(approx) == 4:
# compute the bounding box for the contour
# (x, y, w, h) = cv2.boundingRect(c)
(x, y, w, h) = cv2.boundingRect(approx)
ratio = (w**2 - h**2)/(w*h)
if (area_low + area_high == 0 or area_low <= w*h <= area_high) and \
(ratio_low + ratio_high == 0 or ratio_low <= ratio <= ratio_high):
# cn.append(c)
cn.append(approx)
cnts = cn
# sort by size
items = int(cmd.get("cnt", 10))
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:items]
# short by position from left to right (throws an error sometimes)
# cnts = contours.sort_contours(cnts)[0]
# output = cv2.merge([thresh] * 3)
# set the image to be overlayed to the original frame, bypassing filters
if post_preview:
output = img.copy()
else:
output = proc.last_frame.copy()
if len(plugin) < 3:
plugin.append(["contours", cnts])
else:
plugin[2][1] = cnts
# loop over the contours
for c in cnts:
# compute the bounding box for the contour
(x, y, w, h) = cv2.boundingRect(c)
if extract_preview:
birds_eye = four_point_transform(output, c.reshape(4, 2))
return birds_eye
# birds_eye = four_point_transform(output, c.reshape(4, 2))
# dims = birds_eye.shape[:2]
# output = iv.ovelray_patch(output, birds_eye, [(y, x), (y+dims[0], x+dims[1])])
# draw the prediction on the output image
cv2.rectangle(output, (x - margin, y - margin), (x + w + margin, y + h + margin), (0, 255, 0), 1)
ratio = round((w**2 - h**2)/(w*h), 2)
ovinfo = "{} ({})".format(ratio, round(w*h, 2))
proc.putText(output, ovinfo, cord=(x-2, y-2), color=(0, 100, 255))
# if items == 1 and len(cnts) == 1:
# output = proc.save_frame
if "monitor" in args:
wn = "plugin_contours"
# img_out = cv2.resize(output, proc.winsize, interpolation=cv2.INTER_CUBIC)
# img_out = output
# cv2.createTrackbar("slider", wn, 3, 7, proc.set_plugin_param )
# cv2.imshow(wn, img)
return output
def hdr_erosion(proc, img, plugin):
args = plugin[1]
ks = 5 if len(args) <= 2 else int(args[2])
kernel = np.ones((ks, ks), np.uint8)
return cv2.erode(img, kernel, iterations=1)
def hdr_dilation(proc, img, plugin):
args = plugin[1]
ks = 5 if len(args) <= 2 else int(args[2])
kernel = np.ones((ks, ks), np.uint8)
return cv2.dilate(img, kernel, iterations=1)
def plugin_grid(proc, img, plugin):
# get args
args = plugin[1]
cmd = dict(it.split("=") if it.find("=") != -1 else [it, ""] for it in args)
# read args
dy = int(cmd.get("dy", 16))
dx = int(cmd.get("dx", 16))
return iv.gridlines(img, (dy, dx), 1, np.array([0, 0, 0], dtype="uint8"))
def plugin_checker(proc, img, plugin):
return iv.checkerboard(img, 64, np.array([0, 0, 0], dtype="uint8"), np.array([100, 100, 100], dtype="uint8"))
# Face detector global variables
face_cascade = None
eye_cascade = None
def hdr_face_detection(proc, img, plugin):
global face_cascade, eye_cascade
output = img
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
if face_cascade is None or eye_cascade is None:
face_cascade = cv2.CascadeClassifier('cascades/haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('cascades/haarcascade_eye.xml')
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
cv2.rectangle(output,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = output[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_gray)
for (ex,ey,ew,eh) in eyes:
cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
return output
# Face detector global variables
thermo_cascade = None
def hdr_thermo_detection(proc, img, plugin):
global thermo_cascade
output = img
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
if thermo_cascade is None:
thermo_cascade = cv2.CascadeClassifier('cascades/haarcascade_thermometer.xml')
thermo = thermo_cascade.detectMultiScale(gray, 1.3, 5)
# sort by size
items = 10 if len(args) < 3 else int(args[2])
thermo = sorted(thermo, key=lambda item: (item[2]*item[3]), reverse=True)[:items]
for (x,y,w,h) in thermo:
cv2.rectangle(output,(x,y),(x+w,y+h),(255,0,0),2)
return output
# Face detector global variables
nums_cascade = None
def hdr_num_detection(proc, img, plugin):
global nums_cascade
# get args
args = plugin[1]
cmd = dict(it.split("=") if it.find("=") != -1 else [it, ""] for it in args)
# set the image to be overlayed to the original frame, bypassing filters
post_preview = cmd.get("pp", "off") == "on"
if post_preview:
output = img.copy()
else:
output = proc.last_frame.copy()
# convert to grayscale
if len(img.shape) > 2:
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
else:
gray = img
# load the pre-trained network
if nums_cascade is None:
model = cmd.get("model", "cascades/lenet_lcdigits_14x28.hdf5")
if model is None:
print("[warning] Model not found.")
plugin_filter(proc, ['filter', 'numbers', 'none'])
return output
nums_cascade = mio.get_model(model)
# get contours filter if active
contours = proc.get_plugin(hdr_contours)
if proc.selection:
# grab inner selection mask area
tx, ty, bx, by = proc.refPt[0][0], proc.refPt[0][1], proc.refPt[1][0], proc.refPt[1][1]
gray = gray[ty:by, tx:bx].copy()
gray = cv2.resize(gray, (14, 28), interpolation=cv2.INTER_CUBIC)
# gray = gray[np.newaxis, :, :, np.newaxis]
gray = img_to_array(gray)
gray = np.expand_dims(gray, axis=0)
elif proc.tracking:
# grab object tracker mask area
tx, ty, bx, by = proc.tracker.tl[0], proc.tracker.tl[1], proc.tracker.br[0], proc.tracker.br[1]
gray = gray[ty:by, tx:bx].copy()
gray = gray[np.newaxis, :, :, np.newaxis]
elif not (proc.tracking or proc.selection):
gray = cv2.resize(gray, (14, 28), interpolation=cv2.INTER_CUBIC)
gray = img_to_array(gray)
gray = np.expand_dims(gray, axis=0)
elif contours is not None:
batchROIs = None
batchLocs = []
for c in contours[2][1]:
# compute the bounding box for the contour
(tx, ty, w, h) = cv2.boundingRect(c)
# expand contours a bit
margin = 3
tx -= margin
ty -= margin
bx = tx + w + margin
by = ty + h + margin
roi = gray[ty:by, tx:bx]
roi = cv2.resize(roi, (14, 28), interpolation=cv2.INTER_CUBIC)
# roi = roi[:, :, np.newaxis]
roi = img_to_array(roi)
roi = np.expand_dims(roi, axis=0)
if batchROIs is None:
batchROIs = roi
else:
batchROIs = np.vstack([batchROIs, roi])
batchLocs.append((tx, ty, bx, by))
if batchROIs is not None:
# gray = batchROIs[np.newaxis, :, :]
gray = batchROIs
else:
gray = None
# print("Contours array:", gray)
if gray is not None:
predictions = nums_cascade.predict(gray, batch_size=8)
preds = predictions.argmax(axis=1)
# print(classification_report(preds, preds, target_names=[str(x) for x in classes]))
# print(preds)
if contours is not None:
for i in range(len(preds)):
ovinfo = "{}".format(preds[i])
tx, ty, bx, by = batchLocs[i]
# draw the prediction on the output image
cv2.rectangle(output, (tx, ty), (bx, by), (0, 255, 0), 1)
proc.putText(output, ovinfo, cord=(tx, by+15), size=0.6, color=(0, 100, 255))
else:
ovinfo = "{}".format(preds[0])
if not (proc.tracking or proc.selection):
proc.putText(output, ovinfo, cord=(20, 20), size=0.6, color=(0, 100, 255))
else:
proc.putText(output, ovinfo, cord=(tx, by+15), size=0.6, color=(0, 100, 255))
return output
# CMT object tracking
def hdr_tracker_cmt(proc, img, plugin):
output = img.copy()
gray = cv2.cvtColor(output, cv2.COLOR_BGR2GRAY)
if proc.selection:
tx, ty, bx, by = proc.refPt[0][0], proc.refPt[0][1], proc.refPt[1][0], proc.refPt[1][1]
cmt_tracker = CMT.CMT()
# cmt_tracker.estimate_scale = args.estimate_scale
cmt_tracker.estimate_rotation = False
tl = (tx, ty)
br = (bx, by)
cmt_tracker.initialise(gray, tl, br)
# plugin.append(cmt_tracker)
plugin.append(["debug", True])
proc.selection = False
proc.tracking = True
proc.tracker = cmt_tracker
frame = 1
plugin.append(frame)
elif proc.tracking:
# plugin = proc.get_plugin(hdr_cmt)
# cmt_tracker = plugin[2]
cmt_tracker = proc.tracker
tic = time.time()
cmt_tracker.process_frame(gray)
toc = time.time()
# Display results
# Draw updated estimate
# if cmt_tracker.has_result:
# cv2.line(output, cmt_tracker.tl, cmt_tracker.tr, (0, 255, 0), 2)
# cv2.line(output, cmt_tracker.tr, cmt_tracker.br, (0, 255, 0), 2)
# cv2.line(output, cmt_tracker.br, cmt_tracker.bl, (0, 255, 0), 2)
# cv2.line(output, cmt_tracker.bl, cmt_tracker.tl, (0, 255, 0), 2)
# cv2.rectangle(output, cmt_tracker.tl, cmt_tracker.br, (0, 255, 0), 1)
# cmt_utils.draw_keypoints(cmt_tracker.tracked_keypoints, output, (255, 255, 255))
# this is from simplescale
# cmt_utils.draw_keypoints(cmt_tracker.votes[:, :2], output) # blue
# cmt_utils.draw_keypoints(cmt_tracker.outliers[:, :2], output, (0, 0, 255))
# Advance frame number
frame = plugin[3] + 1
plugin[3] = frame
if plugin[2][1]:
print('\r{5:04d}: center: {0:.2f},{1:.2f} scale: {2:.2f}, active: {3:03d}, {4:04.0f}ms'.format(cmt_tracker.center[0], cmt_tracker.center[1], cmt_tracker.scale_estimate, cmt_tracker.active_keypoints.shape[0], 1000 * (toc - tic), frame), end='')
return output
# OpenCV KCF object tracking
def hdr_tracker(proc, img, plugin):
output = img.copy()
gray = cv2.cvtColor(output, cv2.COLOR_BGR2GRAY)
# get args
args = plugin[1]
cmd = dict(it.split("=") if it.find("=") != -1 else [it, ""] for it in args)
if cmd.get("name", "") == "cmt":
proc.remove_plugin(hdr_tracker)
plugin_filter(proc, ["filter", "cmt"])
return output
if proc.selection:
# initialize a dictionary that maps strings to their corresponding
# OpenCV object tracker implementations
# OPENCV_OBJECT_TRACKERS = {
# "csrt": cv2.TrackerCSRT_create,
# "kcf": cv2.TrackerKCF_create,
# "boosting": cv2.TrackerBoosting_create,
# "mil": cv2.TrackerMIL_create,
# "tld": cv2.TrackerTLD_create,
# "medianflow": cv2.TrackerMedianFlow_create,
# "mosse": cv2.TrackerMOSSE_create
# }
OPENCV_OBJECT_TRACKERS = {
"kcf": cv2.TrackerKCF_create,
"boosting": cv2.TrackerBoosting_create,
"mil": cv2.TrackerMIL_create,
"tld": cv2.TrackerTLD_create,
"medianflow": cv2.TrackerMedianFlow_create
}
# grab the appropriate object tracker using our dictionary of
# OpenCV object tracker objects
intracker = OPENCV_OBJECT_TRACKERS[cmd.get("name", "kcf")]()
# Get the roi from selection
tx, ty, bx, by = proc.refPt[0][0], proc.refPt[0][1], proc.refPt[1][0], proc.refPt[1][1]
intracker.init(img, (tx, ty, bx-tx, by-ty))
# Wrapper object for tracker
tracker = type('mytracker', (object,), {
"tracker": intracker,
"has_result": False,
"tl": (0, 0),
"br": (0, 0)
})
plugin.append(["debug", True])
proc.selection = False
proc.tracking = True
proc.tracker = tracker
frame = 1
plugin.append(frame)
elif proc.tracking:
# plugin = proc.get_plugin(hdr_cmt)
# cmt_tracker = plugin[2]
tracker = proc.tracker
intracker = tracker.tracker
tic = time.time()
# grab the new bounding box coordinates of the object
(success, box) = intracker.update(img)
tracker.has_result = success
# check to see if the tracking was a success
if success:
(x, y, w, h) = [int(v) for v in box]
tracker.tl = (x, y)
tracker.br = (x + w, y + h)
toc = time.time()
frame = plugin[3] + 1
plugin[3] = frame
# if plugin[2][1]:
# print('\rframe: {:04d}, {:04.0f}ms'.format(frame, 1000 * (toc - tic)), end='')
return output
# ROI selection with OpenCV
def hdr_roi_select(proc, img, plugin):
# proc.pause_frame = True
proc.pause()
initBB = cv2.selectROI(proc.winname, img, fromCenter=False,
showCrosshair=True)
print(type(initBB), initBB)
# proc.pause_frame = False
proc.unpause()
return img
# Harris corner feature extractor
def hdr_harris_corner(proc, img, plugin):
output = img
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = np.float32(gray)
dst = cv2.cornerHarris(gray, 2, 3, 0.04)
# result is dilated for marking the corners, not important
dst = cv2.dilate(dst, None)
# Threshold for an optimal value, it may vary depending on the image.
output[dst > 0.01*dst.max()] = [0, 0, 255]
return output
# Harris corner with subPixel accuracy feature extractor
def hdr_harris_corner_subpixel(proc, img, plugin):
output = img
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = np.float32(gray)
dst = cv2.cornerHarris(gray, 2, 3, 0.04)
# result is dilated for marking the corners, not important
dst = cv2.dilate(dst, None)
# Threshold for an optimal value, it may vary depending on the image.
# output[dst > 0.01*dst.max()] = [0, 0, 255]
ret, dst = cv2.threshold(dst, 0.01 * dst.max(), 255, 0)
dst = np.uint8(dst)
# find centroids
ret, labels, stats, centroids = cv2.connectedComponentsWithStats(dst)
# define the criteria to stop and refine the corners
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.001)
corners = cv2.cornerSubPix(gray, np.float32(centroids), (5, 5), (-1, -1), criteria)
# Now draw them
res = np.hstack((centroids, corners))
res = np.int0(res)
output[res[:, 1], res[:, 0]] = [0, 0, 255]
output[res[:, 3], res[:, 2]] = [0, 255 ,0]
return output
# Harris corner feature extractor
def hdr_good_features_to_track(proc, img, plugin):
output = img
# convert to grayscale
if len(img.shape) > 2:
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
else:
gray = img
corners = cv2.goodFeaturesToTrack(gray, 25, 0.01, 10)
corners = np.int0(corners)
for i in corners:
x, y = i.ravel()
cv2.circle(output, (x, y), 3, 255, -1)
return output
# Deskew image using openCV moments
# Experimental - use a binary image with skewed digits to preview results
def hdr_deskew(proc, img, plugin):
SZ = img.shape[:2]
# convert to grayscale
if len(img.shape) > 2:
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
else:
gray = img
imout = gray
if proc.selection:
tx, ty, bx, by = proc.refPt[0][0], proc.refPt[0][1], proc.refPt[1][0], proc.refPt[1][1]
m = imout[ty:by, tx:bx].copy()
else:
m = cv2.moments(imout)
if abs(m['mu02']) < 1e-2:
# no deskewing needed.
return imout
# Calculate skew based on central momemts.
skew = m['mu11']/m['mu02']
# Calculate affine transform to correct skewness.
M = np.float32([[1, skew, -0.5*SZ[0]*skew], [0, 1, 0]])
# Apply affine transform
imout = cv2.warpAffine(imout, M, SZ, flags=cv2.WARP_INVERSE_MAP | cv2.INTER_LINEAR)
return imout
# Image patch overlay into selected area.
# If an image is not given from the console, it copies the selected area patch
# By moving or re-selecting a new area, the patch is pasted into.
def hdr_patch(proc, img, plugin):
# get args
args = plugin[1]
cmd = dict(it.split("=") if it.find("=") != -1 else [it, ""] for it in args)
fname = cmd.get("file", "")
imout = img
if proc.selection:
tx, ty, bx, by = proc.refPt[0][0], proc.refPt[0][1], proc.refPt[1][0], proc.refPt[1][1]
plugin = proc.get_plugin(hdr_patch)
if len(plugin) == 3:
patch = plugin[2].copy()
elif len(fname) > 0:
patch = cv2.imread(fname)
plugin.append(patch.copy())
else:
args.append("source")
patch = imout[ty:by, tx:bx].copy()
plugin.append(patch.copy())
if patch.shape[:2] != (by-ty, bx-tx):
patch = cv2.resize(patch, (bx-tx, by-ty), interpolation=cv2.INTER_AREA)
imout = iv.ovelray_patch(img, patch, [(ty, tx), (by, bx)])
return imout
# construct the kernel bank, a list of functions applying kernels or filters
filter_bank = {
"blur" : { "handler": hdr_smallBlur, "desc": "Soft blur 7x7 kernel" },
"blur-more" : { "handler": hdr_largeBlur, "desc": "Hard blur 21x21 kernel" },
"gaussian" : { "handler": hdr_gaussian, "desc": "Classic gaussian blur" },
"sharpen" : { "handler": hdr_sharpen, "desc": "Sharpen filter (3x3 kernel)" },
"laplacian" : { "handler": hdr_laplacian, "desc": "Laplacian filter (3x3 kernel)" },
"sobel" : { "handler": hdr_sobel, "desc": "{}{:17}{}{:<17}{}".format(
"<kernel size>: reveal outlines\n",
" ", "Apply sobel in both directions.\n",
" ", "<kernel-size> (odd numbers only): [1|3|5|..]")
},
"sobel-x" : { "handler": hdr_sobelX, "desc": "Apply sobel in x direction (3x3 kernel)" },
"sobel-y" : { "handler": hdr_sobelY, "desc": "Apply sobel in y direction (3x3 kernel)" },
"emboss" : { "handler": hdr_emboss, "desc": "Emboss filter (3x3 kernel)." },
"threshold" : { "handler": hdr_threshold, "desc": "{}{:<17}{}{:<17}{}".format(
"(normal|otsu) [thresh] | (adapt-mean|adapt-gauss) [neighbors]\n",
" ", "thresh: 0-255, default 0\n",
" ", "neighbors (odd numbers only): 3, 5, 7, 9,.., default 11")
},
"canny" : { "handler": hdr_canny, "desc": "Canny" },
"inverse" : { "handler": hdr_inverse_colors, "desc": "Description" },
"equalizer" : { "handler": hdr_equalizer, "desc": "Equalizer" },
"noise" : { "handler": hdr_add_noise, "desc": "Description" },
"contours" : { "handler": hdr_contours, "desc": "Contours" },
"erode" : { "handler": hdr_erosion, "desc": "Description" },
"dilate" : { "handler": hdr_dilation, "desc": "Description" },
"grid" : { "handler": plugin_grid, "desc": "Description" },
"resize" : { "handler": hdr_resize, "desc": "Post resize frames" },
"rotate" : { "handler": hdr_rotate, "desc": "Description" },
"faces" : { "handler": hdr_face_detection, "desc": "Description" },
"thermo" : { "handler": hdr_thermo_detection, "desc": "Description" },
"numbers" : { "handler": hdr_num_detection, "desc": "Description" },
"cmt" : { "handler": hdr_tracker_cmt, "desc": "Description" },
"tracker" : { "handler": hdr_tracker, "desc": "Description" },
"roi" : { "handler": hdr_roi_select, "desc": "Description" },
"harris" : { "handler": hdr_harris_corner, "desc": "Description" },
"harris-sp" : { "handler": hdr_harris_corner_subpixel, "desc": "Description" },
"good-feat" : { "handler": hdr_good_features_to_track, "desc": "Description" },
"deskew" : { "handler": hdr_deskew, "desc": "Description" },
"patch" : { "handler": hdr_patch, "desc": "Description" },
"none" : { "handler": "remove", "desc": "{}{:<17}{}".format(
"Remove all filters.\n",
" ", "If none is given after a filter name, it removes this filter only")
},
"load" : { "handler": "load", "desc": "Description" },
"save" : { "handler": "save", "desc": "Description" },
"list" : { "handler": "list", "desc": "List active filters" },
"help" : { "handler": "help", "desc": "Description"}
}
|
# String converted to binary sequence
def str2bin(msg):
msg_bin = ''
for i in range(0, len(msg)):
msg_bin = msg_bin + format(ord(msg[i]),'08b')
return msg_bin
msg = 'This is a test message'
msg_bin = str2bin(msg)
print(msg_bin, end = '\n')
|
def default_special_forms_table():
from anoky.fallback import fallback_import
operators = fallback_import("anoky.special_forms.operators")
assign = fallback_import("anoky.special_forms.assign")
containers = fallback_import("anoky.special_forms.containers")
Def = fallback_import("anoky.special_forms.def", "Def")
Try = fallback_import("anoky.special_forms.try", "Try")
Raise = fallback_import("anoky.special_forms.raise", "Raise")
Assert = fallback_import("anoky.special_forms.assert", "Assert")
Pass = fallback_import("anoky.special_forms.pass", "Pass")
Return = fallback_import("anoky.special_forms.return", "Return")
Yield = fallback_import("anoky.special_forms.yield", "Yield")
Break = fallback_import("anoky.special_forms.break", "Break")
Continue = fallback_import("anoky.special_forms.continue", "Continue")
While = fallback_import("anoky.special_forms.while", "While")
With = fallback_import("anoky.special_forms.with", "With")
For = fallback_import("anoky.special_forms.for", "For")
NotInIsolation = fallback_import("anoky.special_forms.not_in_isolation", "NotInIsolation")
# ExpectPreviousForm = fallback_import("anoky.special_forms.expect_previous_form", "ExpectPreviousForm")
Import = fallback_import("anoky.special_forms.import", "Import")
MetaImport = fallback_import("anoky.special_forms.import", "MetaImport")
Quote = fallback_import("anoky.special_forms.quote", "Quote")
(RawMacro, RawSpecialForm) = fallback_import("anoky.macros.rawmacro", "RawMacro", "RawSpecialForm")
Compare = fallback_import("anoky.special_forms.comparison", "Compare")
If = fallback_import("anoky.special_forms.if", "If")
Attribute = fallback_import("anoky.special_forms.attribute", "Attribute")
Class = fallback_import("anoky.special_forms.class", "Class")
(Global, Nonlocal) = fallback_import("anoky.special_forms.scope_statement", "Global", "Nonlocal")
return {
"=": assign.Assign(),
".": Attribute(),
# "+" : Op.UnaryAddOp(),
# "-" : Op.UnarySubOp(),
"not": operators.NotOp(),
"~": operators.InvertOp(),
"+": operators.AddOp(),
"-": operators.SubOp(),
"*": operators.MultOp(),
"/": operators.DivOp(),
"//": operators.FloorDivOp(),
"%": operators.ModOp(),
"**": operators.PowOp(),
"<<": operators.LShiftOp(),
">>": operators.RShiftOp(),
"|": operators.BitOrOp(),
"^": operators.BitXorOp(),
"&": operators.BitAndOp(),
"@": operators.MatMultOp(),
"and": operators.AndOp(),
"or": operators.OrOp(),
"+=": assign.AddAssign(),
"-=": assign.SubtractAssign(),
"*=": assign.MultiplyAssign(),
"/=": assign.DivideAssign(),
"//=": assign.IntDivideAssign(),
"%=": assign.ModuloAssign(),
"**=": assign.PowAssign(),
"<<=": assign.BitLShiftAssign(),
">>=": assign.BitRShiftAssign(),
"|=": assign.BitOrAssign(),
"^=": assign.BitXorAssign(),
"&=": assign.BitAndAssign(),
"@=": assign.MatMultAssign(),
"compare": Compare(),
"[]": containers.List(),
"{}": containers.BraceContainer(),
# "{}" : Ct.Dict(), #or set?
"@[]": containers.Subscript(),
"if": If(),
"else": NotInIsolation(),
"elif": NotInIsolation(),
"while": While(),
"for": For(),
"raise": Raise(),
"try": Try(),
"assert": Assert(),
"pass": Pass(),
"return": Return(),
"yield": Yield(),
"break": Break(),
"continue": Continue(),
"with": With(),
"def": Def(),
"global": Global(),
"nonlocal": Nonlocal(),
"class": Class(),
"import": Import(),
"#meta_import": MetaImport(),
"quote": Quote(),
"rawmacro": RawMacro(),
"rawspecial": RawSpecialForm()
}
# container = Cnt.Container()
# default_special_forms_table["{}"] = container
# default_special_forms_table["[]"] = container
|
# Resolva o seguinte sistema de equações não-lineares:
# 2x - y - e**x = 0
# -x + 2y - e**y = 0
|
# Python imports.
# Other imports.
from agents.safe_agent import SafeAgent
from constants import *
from dynamic_programming import value_iteration
class SafeEGreedyAgent(SafeAgent):
def __init__(self, actions, states, reward_func, initial_safe_states, initial_safe_actions,
similarity_function, analagous_state_function,
epsilon=0.1, annealing_time=1000,
transition_support_function=None,
gamma=0.99, vi_horizon=100, name='safe-e-greedy-agent', beta_T=0.5,
tau=0.1, update_frequency=100,
use_sparse_matrices=False, safe=True):
SafeAgent.__init__(self, actions, states, reward_func, initial_safe_states, initial_safe_actions,
similarity_function, analagous_state_function,
transition_support_function, gamma, vi_horizon, name, beta_T,
tau, update_frequency, use_sparse_matrices)
self.safe = safe
self.epsilon = epsilon
self.annealing_time = annealing_time
self.q = np.zeros([self.num_states, self.num_actions], dtype=DTYPE)
def act(self, state, reward, learning=True):
s = self.state_to_id[state]
if self.s0 is None:
self.s0 = s
if self.prev_state is not None:
ps = self.state_to_id[self.prev_state]
pa = self.action_to_id[self.prev_action]
psa = np.ravel_multi_index([ps, pa], [self.num_states, self.num_actions])
T_hat = self.transition_table.T_hat[psa, :].toarray().flatten()
T_count = T_hat[s]
if learning:
self.update(self.prev_state, self.prev_action, reward, state)
if self.safe:
if reward < 0:
print('FAILURE: safe agent hit unsafe state')
safe_actions = self.z_safe[s, :]
if safe_actions.sum() == 0:
print('UNSAFE: safe policy is not defined for state %s' % state)
safe_actions = np.ones(self.num_actions)
qs = np.where(safe_actions, self.q[s, :], -np.inf)
else:
safe_actions = np.ones_like(self.actions)
qs = self.q[s, :]
epsilon = self.epsilon + max(0, (self.annealing_time - self.step_number)/self.annealing_time)*(1 - self.epsilon)
if np.random.rand() < epsilon:
a = np.random.choice(np.argwhere(safe_actions).flatten())
else:
a = np.argmax(qs)
action = self.actions[a]
self.prev_state = None if state.is_terminal() else state
self.prev_action = action
self.step_number += 1
return action
def update(self, state, action, reward, next_state):
# If this is the first state, just return.
if state is None:
return
self.transition_table.insert(state, action, reward, next_state)
if self.step_number % self.update_frequency == 0:
transition_matrix, reward_matrix, terminal_states, eps_T = self.transition_table.prepare_for_vi(
tau=self.tau)
action_mask = None
if self.safe:
self.z_safe = self.calculate_z_safe(transition_matrix, eps_T, terminal_states)
action_mask = self.z_safe
else:
# ensure that the agent learns the safe-optimal policy
reward_matrix = np.where(reward_matrix < 0, -100, reward_matrix)
_, self.q = value_iteration.value_iteration(transition_matrix, reward_matrix, terminal_states,
horizon=self.vi_horizon,
gamma=self.gamma,
action_mask=action_mask,
q_default=-100,
use_sparse_matrices=True)
|
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 7 08:20:54 2018
@author: Dorota
"""
data = open("data.txt").read().split("\n")
for i, d in enumerate(data):
data[i] = d.split("must be finished before step")
data[i][0] = data[i][0].replace(" ","")
data[i][0] = data[i][0].replace("Step","")
data[i][1] = data[i][1].replace(" can begin.","")
data[i][1] = data[i][1].replace(" ","")
key = set()
for k in data:
key.add(k[1])
my_items = {}
for k in key:
my_items[k] = []
for val in data:
my_items[val[1]].append(val[0])
queue = []
for v in data:
if v[0] not in list(key):
my_items[v[0]] = []
queue.append(v[0])
queue = list((sorted(queue))[0])
i = 0
while i < 1 +len(my_items.keys()):
v_queue = []
for key, val in my_items.items():
result = all(v in queue for v in val)
if result:
if key not in queue:
v_queue.append(key)
if len( v_queue) != 0:
new_item = (sorted(v_queue)[0])
queue.append(new_item)
i = i+1
#solution to the first part of task:
print("".join(queue))
|
import cv2
import numpy as np
MIN_MATCH_COUNT = 10
img1 = cv2.imread('ojota1.jpg')
cv2.imshow('img1', img1)
gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
img2 = cv2.imread('ojota2.jpg')
cv2.imshow('img2', img2)
gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
# Inicializamos el detector y el descriptor
sift = cv2.xfeatures2d.SIFT_create()
kp1, des1 = sift.detectAndCompute(gray1, None)
kp2, des2 = sift.detectAndCompute(gray2, None)
img1=cv2.drawKeypoints(gray1,kp1,img1)
cv2.imwrite('sift_keypoints.jpg',img1)
matcher = cv2.BFMatcher (cv2.NORM_L2)
matches = matcher.knnMatch(des1, des2, k=2)
# Guardamos los buenos matches usando el test de razón de Lowe
good = [ ]
for m, n in matches :
if m.distance < 0.70*n.distance:
good.append(m)
if(len(good) > MIN_MATCH_COUNT):
src_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
dst_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
H, mask = cv2.findHomography(dst_pts, src_pts, cv2.RANSAC, 5.0) # Computamos la homografía con RANSAC
wimg2 = cv2.warpPerspective(img2, H, (img1.shape[1], img1.shape[0]))
# Mezclamos ambas imágenes
alpha = 0.5
blend = np.array(wimg2 * alpha + img1 * (1 - alpha), dtype=np.uint8)
cv2.imshow('blend', blend)
while(1):
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyAllWindows() |
# -*- coding: utf-8 -*-
"""
Created on Sat May 5 23:17:17 2018
@author: 王磊
"""
# coding:utf-8
import numpy as np
import matplotlib.pyplot as plt
def drawHeart():
plt.figure(figsize=(12, 6))
arrayOne = np.linspace(-np.pi/2, np.pi/2, 1000)
x = np.cos(arrayOne)
y = np.sin(arrayOne) + np.power(x, 2.0/3)
plt.plot(x, y, color='red', linewidth=2, label='h')
plt.plot(-x, y, color='red', linewidth=2, label='h')
plt.xlabel('横轴:时间', fontproperties='FangSong', fontsize=20, color='r')
plt.ylabel('纵轴:高度', fontproperties='FangSong', fontsize=20, color='b')
plt.xlim(-2, 2)
plt.ylim(-1.5, 2)
plt.show()
if __name__ == '__main__':
drawHeart()
|
# -*- coding: UTF-8 -*-
"""
PCI math Lib
大整数请用python内建类型int
大浮点数请用decimal.Decimal或sympy.Float,比如Decimal('3.1415926535')或Float('1e-3', 3)
"""
import PCILib.PCImathLib.analysis as pcianalysis
import PCILib.PCImathLib.discrete as pcidiscrete
import PCILib.PCImathLib.stat as pcistat
__all__=[]
class Cartesian(object):
"""
Python 计算笛卡尔积
计算多个集合的笛卡尔积,有规律可循,算法和代码也不难,但是很多语言都没有提供直接计算笛卡尔积的方法,需要自己写大段大段的代码计算笛卡尔积,python 提供了一种最简单的计算笛卡称积的方法(只需要一行代码),详见下面的代码:
>>> car = Cartesian([1, 2, 3, 4])
>>> car.add_data([5, 6, 7, 8],[9, 10, 11, 12])
>>> print(car.build(return_list=True))
[(1, 5, 9), (1, 5, 10), (1, 5, 11), (1, 5, 12), (1, 6, 9), (1, 6, 10), (1, 6, 11), (1, 6, 12), (1, 7, 9), (1, 7, 10), (1, 7, 11), (1, 7, 12), (1, 8, 9), (1, 8, 10), (1, 8, 11), (1, 8, 12), (2, 5, 9), (2, 5, 10), (2, 5, 11), (2, 5, 12), (2, 6, 9), (2, 6, 10), (2, 6, 11), (2, 6, 12), (2, 7, 9), (2, 7, 10), (2, 7, 11), (2, 7, 12), (2, 8, 9), (2, 8, 10), (2, 8, 11), (2, 8, 12), (3, 5, 9), (3, 5, 10), (3, 5, 11), (3, 5, 12), (3, 6, 9), (3, 6, 10), (3, 6, 11), (3, 6, 12), (3, 7, 9), (3, 7, 10), (3, 7, 11), (3, 7, 12), (3, 8, 9), (3, 8, 10), (3, 8, 11), (3, 8, 12), (4, 5, 9), (4, 5, 10), (4, 5, 11), (4, 5, 12), (4, 6, 9), (4, 6, 10), (4, 6, 11), (4, 6, 12), (4, 7, 9), (4, 7, 10), (4, 7, 11), (4, 7, 12), (4, 8, 9), (4, 8, 10), (4, 8, 11), (4, 8, 12)]
"""
def __init__(self, *data):
self._data_list = []
for datum in data:
self._data_list.append(datum)
def add_data(self, *data): # 添加生成笛卡尔积的数据列表
for datum in data:
self._data_list.append(datum)
def build(self, return_list=False):
"""计算笛卡尔积"""
import itertools
if return_list:
return [item for item in itertools.product(*self._data_list)]
else: # 返回set类型
return {item for item in itertools.product(*self._data_list)}
from .stat import (mean,std)
from .discrete import (factorial,Prime,fact,eular,gcd,lcm,sgn,euclidean_algorithm,euclidean_algorithm_recursion,
mod_m_inverse,solve_congruence,change_base,pow_n_mod_m,solve_congruence_set,primality_test,
primitive_root,discrete_logarithm,legendre_symbol,quick_pow)
from .analysis import (fourier_even,fourier_odd,fourier_series,fourier_transform_inverse,fourier_transform,
generalized_fourier,legrendre,legrendre_list,schmidt,schmidt_list,schmidt_orthogonalization,
schmidt_orthogonalization_list,convolution,convolution1,dot_product2)
if __name__ == '__main__':
import time
import doctest
start_time = time.time()
doctest.testmod()
print("运行时间: {} s".format(time.time() - start_time))
|
# Made from a copy from channelRoomGUI:
import pygame
from pygame.locals import *
import time
import textBox
import textBoxList
import button
import sys
import clientContext
import channelRoomGUI
import waitingRoomWindow
def main(threadName, args):
if len(args) > 1:
connection = args[1]
else:
connection = clientContext.ClientContext('127.0.0.1', 6789, 'Moe')
pygame.init()
screen_width = 1300
screen_height = 900
USER_PASSWORD_MSG_TIME = 5000
USER_REFRESH_TIME = 2000
lastRefreshTime = round(time.time() * 1000) - USER_REFRESH_TIME
REFRESH_MSG = 'number of game rooms:'
PLAYERS_IN_CHANNEL_HEADER = 'players in channel:'
BADPASSWORD = 'ERROR: Bad password.'
ERROR = 'ERROR:'
listOfGamesInChannel = []
clock = pygame.time.Clock()
size = width, height = screen_width, screen_height
screen = pygame.display.set_mode(size)
# Variable declaration for cursor
dot_image_file = 'Image/dot.png'
dot = pygame.image.load(dot_image_file).convert()
red_dot_image_file = 'Image/reddot.png'
reddot = pygame.image.load(red_dot_image_file).convert()
green_dot_image_file = 'Image/greendot.png'
greendot = pygame.image.load(green_dot_image_file).convert()
# End variable declaration for cursor.
WHITE = (255, 255, 255)
myfont = pygame.font.SysFont("comicsansms", 30)
mouseJustPressed = 0
mouseHeld = 0
mouseJustRelease = 0
joinButton = button.Button(900, 600, 300, 50, "Join", (0, 255, 0), (255, 0, 255))
cancelButton = button.Button(900, 700, 300, 50, "Cancel", (0, 255, 0), (255, 0, 255))
enterPressed = 0
roomNameTextBox = textBox.TextBox((21 * screen_width) / 32, (1 * screen_height) / 5, 350, 50, '', (255, 255, 255),
(23, 128, 0), '')
passwordTextBox = textBox.TextBox((21 * screen_width) / 32, (2 * screen_height) / 5, 350, 50, '', (255, 255, 255),
(23, 128, 0), '')
errorMessageDisplayStart = 0
printAskForPassword = 0
printError = 0
serverConnectionBoxes = textBoxList.TextBoxList([])
serverConnectionBoxes.addTextbox(roomNameTextBox)
serverConnectionBoxes.addTextbox(passwordTextBox)
tryingToJoin = 0
tryingToJoinRoom = ''
while 1 == 1:
# React to user events:
mouseJustPressed = 0
mouseJustRelease = 0
frameHasKeyboardEvent = 0
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == MOUSEBUTTONDOWN:
if event.button == 1:
mouseHeld = 1
mouseJustPressed = 1
elif event.type == MOUSEBUTTONUP:
if event.button == 1:
mouseHeld = 0
mouseJustRelease = 1
elif event.type == pygame.KEYDOWN or event.type == pygame.KEYUP:
serverConnectionBoxes.dealWithKeyboard(event)
frameHasKeyboardEvent = 1
if serverConnectionBoxes.checkIfEnterPressed(event) == 1:
enterPressed = 1
mx, my = pygame.mouse.get_pos()
if frameHasKeyboardEvent == 0:
serverConnectionBoxes.handleKeyboardButtonHeldDown()
if mouseJustRelease == 1:
serverConnectionBoxes.checkClickForTextBoxes(mx, my, 1)
serverConnectionBoxes.drawTextBoxes(screen)
if enterPressed == 0:
enterPressed = joinButton.updateButtonAndCheckIfPressed(mx, my, mouseJustPressed, mouseJustRelease)
if enterPressed == 1:
tryingToJoinRoom = roomNameTextBox.getCurrentText()
connection.sendMessageToServer('/join ' + tryingToJoinRoom + ' ' + passwordTextBox.getCurrentText() + '\n')
enterPressed = 0
tryingToJoin = 1
cancelPressed = cancelButton.updateButtonAndCheckIfPressed(mx, my, mouseJustPressed, mouseJustRelease)
if cancelPressed == 1:
channelRoomGUI.main('', ['from joinGameWindow.py', connection])
cancelPressed = 0
# END React to user events:
# printStuff:
joinButton.printButton(pygame, screen)
cancelButton.printButton(pygame, screen)
# printgamename/password labels:
myfont = pygame.font.SysFont("comicsansms", 25)
label = myfont.render("room name:", 1, (255, 255, 255))
screen.blit(label, ((21 * screen_width) / 32, (1 * screen_height) / 5 - 50))
label = myfont.render("password:", 1, (255, 255, 255))
screen.blit(label, ((21 * screen_width) / 32, (2 * screen_height) / 5 - 50))
# End printgamename/password labels.
# print ERROR MSG:
if printAskForPassword == 1:
if round(time.time() * 1000) < errorMessageDisplayStart + USER_PASSWORD_MSG_TIME:
myfont = pygame.font.SysFont("comicsansms", 25)
label = myfont.render("Please Enter Password!", 1, (255, 0, 0))
screen.blit(label, ((21 * screen_width) / 32, (1 * screen_height) / 5 - 150))
else:
printAskForPassword = 0
elif printError == 1:
if round(time.time() * 1000) < errorMessageDisplayStart + USER_PASSWORD_MSG_TIME:
myfont = pygame.font.SysFont("comicsansms", 25)
label = myfont.render("Error trying to join the game.", 1, (255, 0, 0))
screen.blit(label, ((21 * screen_width) / 32, (1 * screen_height) / 5 - 150))
else:
printError = 0
# END print ERR MSG
# print open games in channel:
for i in range(0, len(listOfGamesInChannel)):
myfont = pygame.font.SysFont("comicsansms", 25)
label = myfont.render(listOfGamesInChannel[i], 1, (255, 255, 255))
screen.blit(label, (100, 30 + 30 * i))
if i > 0:
if mouseJustRelease == 1:
if mx > 100 and mx < 800 and my > 30 + 30 * i and my < 30 + 30 * i + 30:
print('BUTTON pressed: ' + str(i))
roomNameTextBox.setCurrentText(listOfGamesInChannel[i].split(' ')[1])
else:
if mouseJustRelease == 1:
if mx > 100 and mx < 800 and my > 30 + 30 * i and my < 30 + 30 * i + 30:
print('Pressed number of games message.')
# End print stuff.
# React to server messages:
temp = connection.getNextServerMessageInQueue()
if temp != '':
if temp.endswith('\n'):
temp = temp[0:-1]
if temp.startswith('\n'):
temp = temp[1:]
# Receive answer from server: (sent /refresh message)...
# check if you're in game!
if tryingToJoin == 1 and temp.startswith('Game joined:'):
# Move to game room as joiner:
connection.setJoiner()
connection.setCurrentGameName(roomToGameDict[tryingToJoinRoom])
waitingRoomWindow.main('', ['from joinGameWindow.py', connection])
if temp.startswith(BADPASSWORD):
serverConnectionBoxes.shiftSelectedToIndex(1)
printAskForPassword = 1
printError = 0
errorMessageDisplayStart = round(time.time() * 1000)
tryingToJoin = 0
elif temp.startswith(ERROR):
printError = 1
printAskForPassword = 0
errorMessageDisplayStart = round(time.time() * 1000)
tryingToJoin = 0
elif temp.startswith(REFRESH_MSG):
lines = temp.split('\n')
foundListOfUsers = 0
roomToGameDict = {};
listOfGamesInChannel = []
for line in lines:
if foundListOfUsers == 1 and line.strip(' ') == '':
break
elif foundListOfUsers == 1 or line.startswith(REFRESH_MSG):
if line.startswith(REFRESH_MSG):
foundListOfUsers = 1
listOfGamesInChannel.append(line)
roomToGameDict[line.split(' ')[1]] = line.split(' ')[0][0:-1]
foundListOfUsers = 0
else:
print('DEBUG: ' + temp)
# End React to server messages
# Ask server for a periodic update.
if round(time.time() * 1000) - lastRefreshTime > USER_REFRESH_TIME:
connection.sendMessageToServer("/refresh" + "\n")
lastRefreshTime = round(time.time() * 1000)
# end ask server for a periodic update.
pygame.display.update()
screen.fill(0)
clock.tick(30)
if __name__ == "__main__":
main('main', sys.argv)
# start server
# python channelRoomGUI.py
|
# -*- coding: utf-8 -*-
from functools import reduce
from . import BoxItem, CylinderItem, EnvelopeItem
class Package(object):
FORMAT_BOX = 1
FORMAT_CYLINDER = 2
FORMAT_ENVELOPE = 3
MIN_WEIGHT = 0.005
MAX_WEIGHT = 30.0
def __init__(self, format = None):
self.format = format or self.FORMAT_BOX
self.items = []
def get_format(self):
return self.format
def is_format(self, format):
return True if self.format is format else False
def add_item(self):
raise NotImplementedError
def has_items(self):
return True if len(self.items) > 0 else False
def get_items(self):
return self.items
def get_dimensions(self):
raise NotImplementedError
def get_weight(self):
weight = reduce(lambda s,i: s + i.weight, self.items, 0)
return weight if weight >= self.MIN_WEIGHT else self.MIN_WEIGHT
def is_valid(self):
raise NotImplementedError
def api_format(self):
raise NotImplementedError
class BoxPackage(Package):
MIN_HEIGHT = 2.0
MIN_WIDTH = 11.0
MIN_DEPTH = 16.0
MAX_HEIGHT = 105.0
MAX_WIDTH = 105.0
MAX_DEPTH = 105.0
MAX_VOLUME = 200.0
def __init__(self):
Package.__init__(self,Package.FORMAT_BOX)
def add_item(self, height, width, depth, weight):
return self.items.append(BoxItem(height,width,depth,weight))
def get_dimensions(self):
items = []
for item in self.items:
dimensions = sorted([item.height,item.width,item.depth])
items.append(BoxItem(
height=dimensions[0],
width=dimensions[1],
depth=dimensions[2],
weight=item.weight
))
shadow = {
'height': max(list(map(lambda i: i.height, items)) + [self.MIN_HEIGHT]),
'width': max(list(map(lambda i: i.width, items)) + [self.MIN_WIDTH]),
'depth': max(list(map(lambda i: i.depth, items)) + [self.MIN_DEPTH])
}
dimension = [k for k,v in shadow.items() if v==min(shadow.values())][0]
accumulator = reduce(lambda s,i: s + getattr(i,dimension),items,0)
shadow[dimension] = accumulator if accumulator > shadow[dimension] else shadow[dimension]
return (shadow['height'],shadow['width'],shadow['depth'])
def is_valid(self):
height, width, depth = self.get_dimensions()
volume = height + width + depth
weight = self.get_weight()
return True if height <= self.MAX_HEIGHT and width <= self.MAX_WIDTH \
and depth <= self.MAX_DEPTH and volume <= self.MAX_VOLUME \
and weight <= self.MAX_WEIGHT else False
def api_format(self):
if self.is_valid():
height, width, depth = self.get_dimensions()
return {
'nCdFormato': self.get_format(),
'nVlAltura': height,
'nVlLargura': width,
'nVlComprimento': depth,
'nVlPeso': self.get_weight()
}
else:
raise Exception('The current package is not a valid package due to some validation constraints')
class CylinderPackage(Package):
MIN_LENGTH = 18.0
MIN_DIAMETER = 5.0
MAX_LENGTH = 105.0
MAX_DIAMETER = 91.0
MAX_VOLUME = 200.0
def __init__(self):
Package.__init__(self,Package.FORMAT_CYLINDER)
def add_item(self, length, diameter, weight):
return self.items.append(CylinderItem(length,diameter,weight))
def get_dimensions(self):
diameter = max(list(map(lambda i: i.diameter, self.items)) + [self.MIN_DIAMETER])
length = max([reduce(lambda s,i: s + i.length, self.items, 0),self.MIN_LENGTH])
return (length,diameter)
def is_valid(self):
length, diameter = self.get_dimensions()
volume = length + (2 * diameter)
weight =self.get_weight()
return True if length <= self.MAX_LENGTH and diameter <= self.MAX_DIAMETER \
and volume <= self.MAX_VOLUME and weight <= self.MAX_WEIGHT else False
def api_format(self):
if self.is_valid():
length, diameter = self.get_dimensions()
return {
'nCdFormato': self.get_format(),
'nVlComprimento': length,
'nVlDiametro': diameter,
'nVlPeso': self.get_weight()
}
else:
raise Exception('The current package is not a valid package due to some validation constraints')
class EnvelopePackage(Package):
MIN_WIDTH = 11.0
MIN_LENGTH = 16.0
MAX_WIDTH = 60.0
MAX_LENGTH = 60.0
MAX_WEIGHT = 1.0
def __init__(self):
Package.__init__(self,Package.FORMAT_ENVELOPE)
def add_item(self, width, length, weight):
return self.items.append(EnvelopeItem(width,length,weight))
def get_dimensions(self):
width = max(list(map(lambda i: i.width, self.items)) + [self.MIN_WIDTH])
length = max(list(map(lambda i: i.length, self.items)) + [self.MIN_LENGTH])
return (width, length)
def is_valid(self):
width, length = self.get_dimensions()
weight = self.get_weight()
return True if width <= self.MAX_WIDTH and length <= self.MAX_LENGTH \
and weight <= self.MAX_WEIGHT else False
def api_format(self):
if self.is_valid():
width, length = self.get_dimensions()
return {
'nCdFormato': self.get_format(),
'nVlAltura': 0.0,
'nVlLargura': width,
'nVlComprimento': length,
'nVlPeso': self.get_weight()
}
else:
raise Exception('The current package is not a valid package due to some validation constraints')
|
from django.contrib import admin
from models import *
from django.contrib import databrowse
from django.forms import ModelForm
from django import forms
class ItemAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug": ("name",)}
list_display = ('__unicode__', 'amount_sold', 'amount_on_stock')
filter_horizontal = ('related_items', )
class SubItemAdmin(ItemAdmin):
prepopulated_fields = {"slug": ("name", "variant_title")}
admin.site.register(MainItem, ItemAdmin)
admin.site.register(SubItem, SubItemAdmin)
admin.site.register(SubItemType)
class CategoryAdmin(admin.ModelAdmin):
list_display = ('name', 'sites_display')
list_filter = ('sites',)
search_fields = ['name']
prepopulated_fields = {"slug": ("name",)}
admin.site.register(Category, CategoryAdmin)
admin.site.register(Fee)
admin.site.register(OrderFee)
class OrderPaymentInline(admin.TabularInline):
model = OrderPayment
extra = 0
template = 'admin/catalog/inline.html'
class OrderLineInline(admin.TabularInline):
model = OrderLine
fields = ('name', 'amount', 'price')
class OrderFailedPaymentInline(admin.TabularInline):
model = OrderFailedPayment
extra = 0
template = 'admin/catalog/inline.html'
class OrderMessageInline(admin.TabularInline):
model = OrderMessage
extra = 0
template = 'admin/catalog/inline.html'
fields = ('message', 'is_send', 'received_error', 'date')
class OrderAdmin(admin.ModelAdmin):
list_display = ('__unicode__', 'paid', 'delivered', 'get_full_name', 'get_payments', 'total')
inlines = [OrderLineInline, OrderMessageInline, OrderPaymentInline, OrderFailedPaymentInline]
admin.site.register(Order, OrderAdmin)
class DeliveryLineModelForm(ModelForm):
def __init__(self, *args, **kwargs):
super(DeliveryLineModelForm, self).__init__(*args, **kwargs)
try:
if 'instance' in kwargs:
order = kwargs['instance'].orderline.order
else:
order = tuple(i[0] for i in self.fields['delivery'].widget.choices)[1]
order = Delivery.objects.get(pk=order)
order = order.order
self.fields['orderline'].queryset = OrderLine.objects.filter(order=order)
except:
self.fields["amount"] = forms.CharField( #Add room throws DoesNotExist error
widget=forms.HiddenInput,
required=False,
label='Choose order and save delivery before adding lines')
self.fields['orderline']=forms.CharField( #Add room throws DoesNotExist error
widget=forms.HiddenInput,
required=False,
label='')
class DeliveryLineInline(admin.TabularInline):
form = DeliveryLineModelForm
model = DeliveryLine
class DeliveryAdmin(admin.ModelAdmin):
inlines = [DeliveryLineInline]
admin.site.register(Delivery, DeliveryAdmin)
databrowse.site.register(Order)
databrowse.site.register(OrderPayment) |
import torch
import network
from TorchSUL import Model as M
import config
import numpy as np
import util
import cv2
import testutil
# import visutil2
# from pycocotools.cocoeval import COCOeval
# from pycocotools.coco import COCO
def vis_skeleton(img, pts):
pts = pts[:,:3]
# img = visutil2.deprocess(img)
# img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
for i in range(len(pts)):
x,y,conf = pts[i]
if conf>0.1:
cv2.circle(img, (int(x), int(y)), 3, (0,255,0), -1)
return img
def test_img(img, model):
pts_all = []
scores_all = []
# original size
pts, scores,_,_,_ = testutil.run_pipeline(img, model)
pts_all += pts
scores_all += scores
# crop ratio 2
imgs2, metas2 = util.crop_images(img, 2)
for i,m in zip(imgs2, metas2):
pts, scores,_,_,_ = testutil.run_pipeline(i, model)
pts = util.restore_pts(pts, m)
pts_all += pts
scores_all += scores
# crop ratio 4
imgs2, metas2 = util.crop_images(img, 4)
for i,m in zip(imgs2, metas2):
pts, scores,_,_,_ = testutil.run_pipeline(i, model)
pts = util.restore_pts(pts, m)
pts_all += pts
scores_all += scores
pts, scores = util.nms(pts_all, scores_all)
# print(scores)
# for i in range(len(pts)):
# if scores[i]<0.3:
# continue
# imgcp = img.copy()
# skltn = vis_skeleton(imgcp, pts[i])
# cv2.imwrite('outputs/skt_%d.png'%i, skltn)
return pts, scores
# initialize
model = network.DensityNet(config.density_num_layers, config.density_channels, config.density_level,\
config.gcn_layers, config.gcn_channels, config.head_layernum, config.head_chn, config.upsample_layers, config.upsample_chn)
# coco = COCO('person_keypoints_val2017.json')
# ids = list(coco.imgs.keys())
with torch.no_grad():
x = np.float32(np.random.random(size=[1,3,config.inp_size,config.inp_size]))
x = torch.from_numpy(x)
model(x)
M.Saver(model).restore('model_coco/')
model.eval()
model.cuda()
imgname = '000000410650.jpg'
img = cv2.imread(imgname)
pts, scores = test_img(img, model)
# results = {}
# for i in tqdm(ids):
# fname = './val2017/%012d.jpg'%i
# img = cv2.imread(fname)
# pts, scores = test_img(img, model)
# results[i] = [pts, scores]
# pickle.dump(results, open('coco_results.pkl', 'wb'))
|
#!/usr/bin/env python
#encoding:utf-8
# -*- coding: utf-8 -*-
import logging
import os
import os.path
import sys
import subprocess
import _subprocess
import string
import logging
import time
import shutil
import codecs
import ConfigParser
from RenderBase import RenderBase
class MayaPre(RenderBase):
def __init__(self,**paramDict):
RenderBase.__init__(self,**paramDict)
def readRenderCfg(self):
self.G_PROCESS_LOG.info('[Max.readRenderCfg.start.....]'+self.G_RENDER_WORK_TASK_CFG)
renderCfg=os.path.join(self.G_RENDER_WORK_TASK_CFG,'render.cfg').replace('/','\\')
self.RENDER_CFG_PARSER = ConfigParser.ConfigParser()
try:
self.G_PROCESS_LOG.info('read rendercfg by utf16')
self.RENDER_CFG_PARSER.readfp(codecs.open(renderCfg, "r", "UTF-16"))
except Exception, e:
self.G_PROCESS_LOG.info(e)
try:
self.G_PROCESS_LOG.info('read rendercfg by utf8')
self.RENDER_CFG_PARSER.readfp(codecs.open(renderCfg, "r", "UTF-8"))
except Exception, e:
self.G_PROCESS_LOG.info(e)
self.RENDER_CFG_PARSER.readfp(codecs.open(renderCfg, "r"))
self.G_PROCESS_LOG.info('read rendercfg by default')
#self.RENDER_CFG_PARSER.read(renderCfg)
self.G_PROCESS_LOG.info('[maya.readRenderCfg.end.....]')
def getPackFile(self):
if self.RENDER_CFG_PARSER.has_option('common','cgFile'):
self.packFile=self.RENDER_CFG_PARSER.get('common','render_file').replace("/","\\")
self.cgFileName=os.path.basename(self.RENDER_CFG_PARSER.get('common','cgFile'))
self.cgFilePath=os.path.dirname(self.packFile)
self.cgFile=os.path.join(self.cgFilePath,self.cgFileName)
self.G_PROCESS_LOG.info('[maya.readRenderCfg.end.....]')
def mountFrom(self):
mountFrom=self.RENDER_CFG_PARSER.get('common','mountFrom')
s=eval(mountFrom)
projectPath = self.G_PATH_INPUTPROJECT[0:self.G_PATH_INPUTPROJECT.index(self.G_USERID_PARENT)-1]
for key in s.keys():
if not os.path.exists(s[key]):
cmd='try3 net use '+s[key]+' '+projectPath.replace('/','\\')+key.replace('/','\\')
self.G_PROCESS_LOG.info(cmd)
self.RBcmd(cmd)
def RBrender(self):#7
self.G_PROCESS_LOG.info('[MayaPre.RBrender.start.....]')
filePath = ""
result = 0
self.exe = "C:\\7-Zip\\7z.exe"
if os.path.exists(self.cgFile):
result = self.is_same(self.packFile,self.cgFile)
if result==0:
unpackCmd=self.exe +' e "'+self.packFile+'" -o"'+self.cgFilePath+'" -y'
self.RBcmd(unpackCmd,False,False)
else:
self.G_PROCESS_LOG.info('[fileExist.....]')
self.G_PROCESS_LOG.info('[MayaPre.RBrender.end.....]')
def run_command(self,cmd):
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= _subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = _subprocess.SW_HIDE
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, startupinfo=startupinfo)
while 1:
#returns None while subprocess is running
return_code = p.poll()
if return_code == 0:
break
# elif return_code == 1:
# raise Exception(cmd + " was terminated for some reason.")
elif return_code != None:
print "exit return code is: " + str(return_code)
break
# raise Exception(cmd + " was crashed for some reason.")
line = p.stdout.readline()
yield line
def get_zip_info(self, zip_file):
{'Attributes': 'A',
'Block': '0',
'Blocks': '1',
'CRC': '836CB95D',
'Encrypted': '-',
'Headers Size': '138',
'Method': 'LZMA2:20',
'Modified': '2015-03-28 15:59:26',
'Packed Size': '29191866',
'Path': 'M02_P04_S046.mb',
'Physical Size': '29192004',
'Size': '138382876',
'Solid': '-',
'Type': '7z'}
cmd = "\"%s\" l -slt \"%s\"" % (self.exe, zip_file)
print cmd
result = {}
for line in self.run_command(cmd):
if "=" in line:
line_split = [i.strip() for i in line.strip().split("=")]
result[line_split[0]] = line_split[1]
return result
def is_same(self, zip_file, src):
if os.path.exists(zip_file) and os.path.exists(src):
zip_info = self.get_zip_info(zip_file)
z_time = zip_info["Modified"]
z_size = zip_info["Size"]
f_time = time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(os.path.getmtime(src)))
f_size = str(os.path.getsize(src))
if z_time == f_time and z_size == f_size:
return 1
def RBexecute(self):#Render
#self.RBBackupPy()
self.RBinitLog()
self.G_RENDER_LOG.info('[MayaPre.RBexecute.start.....]')
self.RBprePy()
self.RBcopyTempFile()
self.RBreadCfg()
#self.RBhanFile()
self.RBrenderConfig()
self.readRenderCfg()
self.delNetUse()
self.mountFrom()
self.getPackFile()
self.RBrender()
#self.RBresultAction()
#self.RBpostPy()
self.G_RENDER_LOG.info('[MayaPre.RBexecute.end.....]') |
import csv
from collections import defaultdict, Counter
import operator
lang_info={"NA": 0, "Assembly": 0, "Bash/Shell/PowerShell": 0, "C": 0, "C++": 0, "C#": 0, "Clojure": 0, "Dart": 0, "Elixir": 0, "Erlang": 0, "F#": 0, "Go": 0, "HTML/CSS": 0, "Java": 0, "JavaScript": 0, "Kotlin": 0, "Objective-C": 0, "PHP": 0, "Python": 0, "R": 0, "Ruby": 0, "Rust": 0, "Scala": 0, "SQL": 0, "Swift": 0, "TypeScript": 0, "VBA": 0, "WebAssembly": 0, "Other(s):": 0}
def highest_freq(lst):
for sublist in lst:
for el in sublist:
lang_info[sublist]+=1
return max(lang_info.items(), key=operator.itemgetter(1))[0]
with open('developer_survey_2019/survey_results_public.csv') as f:
csv_reader = csv.DictReader(f)
total_count=0
for lines in csv_reader:
languages = lines['LanguageDesireNextYear'].split(';')
most_desired_langugae=highest_freq(languages)
# Uncomment below line to see count of each language from the survey
# print(lang_info)
print('Most Desired Language of 2020 is:')
print(f'\t{most_desired_langugae}') |
from ajouterWidget import Ui_AjouterWidget
from PyQt5.QtCore import pyqtSlot
class Ajouter(Ui_AjouterWidget):
def __init__(self, centredWidget, db):
super(Ajouter, self).__init__()
self.setupUi(centredWidget)
self.widget.close()
self.labelAttention.close()
self.pushButtonSauvgarder.setEnabled(False)
self.lineEditWidgets = (self.lineEditNom,self.lineEditPrenom,self.lineEditTelephone)
for widget in self.lineEditWidgets:
widget.textChanged.connect(self.EnableSauvgarder)
self.dateTimeEdit.dateTimeChanged.connect(self.dateTimeValide)
self.db = db
self.cursor = db.cursor()
def EnableSauvgarder(self):
for widget in self.lineEditWidgets:
if widget.text() == "":
self.pushButtonSauvgarder.setEnabled(False)
return
self.pushButtonSauvgarder.setEnabled(True)
self.dateTimeValide(self.dateTimeEdit.dateTime())
def dateTimeValide(self, dateTime):
if dateTime.date().day()>10 and dateTime.date().month()>0 and dateTime.date().year()>2016:
self.pushButtonSauvgarder.setEnabled(True)
self.labelAttention.close()
else :
self.pushButtonSauvgarder.setEnabled(False)
self.labelAttention.show()
|
import numpy as np
import pandas as pd
from datetime import datetime
from pandas_datareader import DataReader
rand_array = np.random.rand(3, 3)
df = pd.DataFrame(data=rand_array, columns=['a', 'b', 'c'])
print(df)
# 取得する期間
end = datetime.now()
start = datetime(end.year - 1, end.month, end.day)
print(end)
AAPL = DataReader('AAPL', 'yahoo', start, end)
# print(AAPL)
# AAPL.info()
# print(AAPL['Adj Close'].plot(legend=True, figsize=(10, 4))) |
"""Localhost client mocked components."""
import logging
import re
from splitio.storage import ImpressionStorage, EventStorage, TelemetryStorage
_LEGACY_COMMENT_LINE_RE = re.compile(r'^#.*$')
_LEGACY_DEFINITION_LINE_RE = re.compile(r'^(?<![^#])(?P<feature>[\w_-]+)\s+(?P<treatment>[\w_-]+)$')
_LOGGER = logging.getLogger(__name__)
class LocalhostImpressionsStorage(ImpressionStorage):
"""Impression storage that doesn't cache anything."""
def put(self, *_, **__): # pylint: disable=arguments-differ
"""Accept any arguments and do nothing."""
pass
def pop_many(self, *_, **__): # pylint: disable=arguments-differ
"""Accept any arguments and do nothing."""
pass
def clear(self, *_, **__): # pylint: disable=arguments-differ
"""Accept any arguments and do nothing."""
pass
class LocalhostEventsStorage(EventStorage):
"""Impression storage that doesn't cache anything."""
def put(self, *_, **__): # pylint: disable=arguments-differ
"""Accept any arguments and do nothing."""
pass
def pop_many(self, *_, **__): # pylint: disable=arguments-differ
"""Accept any arguments and do nothing."""
pass
def clear(self, *_, **__): # pylint: disable=arguments-differ
"""Accept any arguments and do nothing."""
pass
class LocalhostTelemetryStorage(TelemetryStorage):
"""Impression storage that doesn't cache anything."""
def inc_latency(self, *_, **__): # pylint: disable=arguments-differ
"""Accept any arguments and do nothing."""
pass
def inc_counter(self, *_, **__): # pylint: disable=arguments-differ
"""Accept any arguments and do nothing."""
pass
def put_gauge(self, *_, **__): # pylint: disable=arguments-differ
"""Accept any arguments and do nothing."""
pass
def pop_latencies(self, *_, **__): # pylint: disable=arguments-differ
"""Accept any arguments and do nothing."""
pass
def pop_counters(self, *_, **__): # pylint: disable=arguments-differ
"""Accept any arguments and do nothing."""
pass
def pop_gauges(self, *_, **__): # pylint: disable=arguments-differ
"""Accept any arguments and do nothing."""
pass
def clear(self, *_, **__): # pylint: disable=arguments-differ
"""Accept any arguments and do nothing."""
pass
|
#!/usr/bin/env python
import os
import sys
import json
import numpy as np
import matplotlib.pyplot as plt
from scipy.spatial import ConvexHull
import argparse
def spcgrp_props(space_group):
cst_marker = '.'
cst_color = '#000000'
cst_marker_size = 2
cst_zorder = 2
cst_fillstyle = 'full'
if space_group < 3:
cst_label = 'Triclinic'
elif space_group < 16:
cst_label = 'Monoclinic'
cst_marker = '>'
cst_marker_size = 3
cst_zorder = 8
cst_color = '#00aaaa'
elif space_group < 75:
cst_label = 'Orthorhombic'
cst_marker = 'o'
cst_marker_size = 3
cst_zorder = 9
cst_color = '#0000ff'
cst_fillstyle = 'none'
elif space_group < 143:
cst_label = 'Tetragonal'
cst_marker = '>'
cst_marker_size = 4
cst_zorder = 8
cst_color = '#00aaaa'
elif space_group < 168:
cst_label = 'Trigonal'
cst_marker = 'v'
cst_marker_size = 6
cst_zorder = 8
cst_color = '#00ff00'
cst_fillstyle = 'none'
elif space_group < 195:
cst_marker = '^'
cst_label = 'Hexagonal'
cst_marker_size = 8
cst_zorder = 8
cst_color = '#ff00ff'
cst_fillstyle = 'none'
else:
cst_marker = '*'
cst_label = 'Cubic'
cst_marker_size = 10
cst_zorder = 10
cst_color = '#ff0000'
return cst_marker, cst_color, cst_label, cst_marker_size, cst_zorder, cst_fillstyle
def formation_energy(energy, x, energy_left, energy_right):
return energy - (1 - x) * energy_left - x * energy_right
def create_convex(bottom, top, energy_left, energy_right, input, output):
if not os.path.isfile(input):
print('File not found %s' % input)
data = json.load(open(input))
plt.figure(figsize=(11, 8.5))
for idata in data:
if idata['ratio'] == 0:
if energy_left is None or energy_left > idata['energy_pa']:
energy_left = idata['energy_pa']
print('Energy per atom for %2s: %9.3f' % (idata['formula'], energy_left))
elif idata['ratio'] == 1:
if energy_right is None or energy_right > idata['energy_pa']:
energy_right = idata['energy_pa']
print('Energy per atom for %2s: %9.3f' % (idata['formula'], energy_right))
if energy_left is None or energy_right is None:
print('Pure elements not found, formation energy cannot be computed')
sys.exit(1)
points = []
for idata in data:
x = idata['ratio']
spcgrp = idata['spcgrp']
marker, color, lab, m, z, fs = spcgrp_props(spcgrp)
y = formation_energy(idata['energy_pa'], x, energy_left, energy_right)
plt.plot(x, y, marker=marker, ms=m, color=color, fillstyle=fs, zorder=z)
points.append([x, formation_energy(idata['energy_pa'], x, energy_left, energy_right)])
points.append([0.0, 0.0])
points.append([1.0, 0.0])
points = np.array(points)
hull = ConvexHull(points)
for simplex in hull.simplices:
if points[simplex, 1][0] <= 0.0 and points[simplex, 1][1] <= 0.0:
if points[simplex, 1][0] == 0.0 and points[simplex, 1][1] == 0.0:
continue
plt.plot(points[simplex, 0], points[simplex, 1], 'k-', zorder=1)
ylims = plt.ylim()
if bottom is None:
bottom = ylims[0]
if top is None:
top = ylims[1]
for spcgrp in [15, 74, 142, 167, 194, 230]:
marker, color, lab, m, z, fs = spcgrp_props(spcgrp)
print('Marker for %12s: %s' % (lab, marker))
plt.plot(-100, -100, marker, ms=m, fillstyle=fs, color=color, label=lab)
plt.xlim(-0.05, 1.05)
print('Limits', bottom, top)
plt.ylim(bottom, top)
plt.legend(loc=9, prop={'size': 10}, numpoints=1)
plt.subplots_adjust(left=0.12, bottom=0.13, right=0.98, top=0.96, wspace=None, hspace=None)
plt.xlabel(r'Composition balance')
plt.ylabel(r'Formation Energy [eV]')
plt.savefig(output)
return plt.gcf()
if __name__ == "__main__":
description = """Collect structures from several databases for plotting convex hulls"""
parser = argparse.ArgumentParser(description=description)
parser.add_argument('-t', '--top',
default=None, metavar='ymax', type=float,
help='Maximum value of formation energy')
parser.add_argument('-b', '--bottom',
default=None, metavar='ymax', type=float,
help='Minimum value of formation energy')
parser.add_argument('-l', '--left_energy_pa',
default=None, metavar='energy_pa', type=float,
help='Energy per atom of left specie')
parser.add_argument('-r', '--right_energy_pa',
default=None, metavar='energy_pa', type=float,
help='Energy per atom right specie')
parser.add_argument('-i', '--input',
default='convex.json', metavar='convex.json', type=str,
help='Input file for the Convex Hull (JSON file)')
parser.add_argument('-o', '--output',
default='convex.pdf', metavar='convex.pdf', type=str,
help='Output file (default: convex.pdf)')
args = parser.parse_args()
if not os.path.isfile(args.input):
parser.print_help()
exit(1)
print(args)
create_convex(args.bottom, args.top, args.left_energy_pa, args.right_energy_pa, args.input, args.output)
|
localrules: pureclip
def make_pureclip_input():
# this expects directories under input/ that contain bed files
# in directories named signal and control
fns = get_input_dirs()
chdir = list(map(lambda fn: fn.replace('input/','output/pureclip/'), fns))
chsuff = list(map(lambda fn: re.sub(r'$', '_pureclip_sites.bed', fn), chdir))
return(chsuff)
def make_pureclip_onlysignal_input():
# this expects directories under input/ that contain bed files
# in directories named signal and control
fns = get_input_dirs()
chdir = list(map(lambda fn: fn.replace('input/','output/pureclip_onlysignal/'), fns))
chsuff = list(map(lambda fn: re.sub(r'$', '_pureclip_sites.bed', fn), chdir))
return(chsuff)
def make_pureclip_bam_fmt_input():
# this expects directories under input/ that contain bed files
# in directories named signal and control
fns = get_input_dirs()
chdir = list(map(lambda fn: fn.replace('input/','output/pureclip_bam_fmt/'), fns))
chsuff = list(map(lambda fn: re.sub(r'$', '_pureclip_sites.bed', fn), chdir))
return(chsuff)
def make_pureclip_onlysignal_bam_fmt_input():
# this expects directories under input/ that contain bed files
# in directories named signal and control
fns = get_input_dirs()
chdir = list(map(lambda fn: fn.replace('input/','output/pureclip_onlysignal_bam_fmt/'), fns))
chsuff = list(map(lambda fn: re.sub(r'$', '_pureclip_sites.bed', fn), chdir))
return(chsuff)
rule pureclip:
input:
make_pureclip_input()
rule pureclip_onlysignal:
input:
make_pureclip_onlysignal_input()
rule pureclip_bam_fmt:
input:
make_pureclip_bam_fmt_input()
rule pureclip_onlysignal_bam_fmt:
input:
make_pureclip_onlysignal_bam_fmt_input()
rule pureclip_impl:
input:
sig_bam = 'output/pureclip/{id}/signal.bam',
sig_bai = 'output/pureclip/{id}/signal.bam.bai',
ctl_bam = 'output/pureclip/{id}/control.bam',
ctl_bai = 'output/pureclip/{id}/control.bam.bai',
output:
sites = 'output/pureclip/{id}_pureclip_sites.bed',
regions = 'output/pureclip/{id}_pureclip_regions.bed',
log:
'log/pureclip/{id}_pureclip.log',
params:
genome = '~/genomes/{}.fa'.format(config['genome']),
conda:
'../envs/pureclip.yaml',
# use 20, 40, 80, 160 GB of RAM
resources:
vmem = lambda wildcards, attempt: math.ceil(10*(2**(attempt)))
threads: 1
shell:
'pureclip '
'-i {input.sig_bam} -bai {input.sig_bai} '
'-ibam {input.ctl_bam} -ibai {input.ctl_bai} '
'-g {params.genome} '
'-iv "chr1;chr2;chr3;" '
'-nt {threads} -nta {threads} '
'-o {output.sites} '
'-ld '
'-or {output.regions} 2>&1 > {log}; '
rule pureclip_onlysignal_impl:
input:
sig_bam = 'output/pureclip_onlysignal/{id}/signal.bam',
sig_bai = 'output/pureclip_onlysignal/{id}/signal.bam.bai',
output:
sites = 'output/pureclip_onlysignal/{id}_pureclip_sites.bed',
regions = 'output/pureclip_onlysignal/{id}_pureclip_regions.bed',
log:
'log/pureclip_onlysignal_bam_fmt/{id}_pureclip.log',
params:
genome = '~/genomes/{}.fa'.format(config['genome']),
conda:
'../envs/pureclip.yaml',
# use 20, 40, 80, 160 GB of RAM
resources:
vmem = lambda wildcards, attempt: math.ceil(10*(2**(attempt)))
threads: 1
shell:
'pureclip '
'-i {input.sig_bam} -bai {input.sig_bai} '
'-g {params.genome} '
'-iv "chr1;chr2;chr3;" '
'-ld '
'-nt {threads} -nta {threads} '
'-o {output.sites} '
'-or {output.regions} 2>&1 > {log}; '
rule pureclip_bam_fmt_impl:
input:
sig_bam = 'output/pureclip_bam_fmt/{id}/signal.bam',
sig_bai = 'output/pureclip_bam_fmt/{id}/signal.bam.bai',
ctl_bam = 'output/pureclip_bam_fmt/{id}/control.bam',
ctl_bai = 'output/pureclip_bam_fmt/{id}/control.bam.bai',
output:
sites = 'output/pureclip_bam_fmt/{id}_pureclip_sites.bed',
regions = 'output/pureclip_bam_fmt/{id}_pureclip_regions.bed',
log:
'log/pureclip_bam_fmt/{id}_pureclip.log',
params:
genome = '~/genomes/{}.fa'.format(config['genome']),
conda:
'../envs/pureclip.yaml',
# use 20, 40, 80, 160 GB of RAM
resources:
vmem = lambda wildcards, attempt: math.ceil(10*(2**(attempt)))
threads: 1
shell:
'pureclip '
'-i {input.sig_bam} -bai {input.sig_bai} '
'-ibam {input.ctl_bam} -ibai {input.ctl_bai} '
'-g {params.genome} '
'-iv "chr1;chr2;chr3;" '
'-ld '
'-nt {threads} -nta {threads} '
'-o {output.sites} '
'-or {output.regions} 2>&1 > {log}; '
rule pureclip_onlysignal_bam_fmt_impl:
input:
sig_bam = 'output/pureclip_onlysignal_bam_fmt/{id}/signal.bam',
sig_bai = 'output/pureclip_onlysignal_bam_fmt/{id}/signal.bam.bai',
output:
sites = 'output/pureclip_onlysignal_bam_fmt/{id}_pureclip_sites.bed',
regions = 'output/pureclip_onlysignal_bam_fmt/{id}_pureclip_regions.bed',
log:
'log/pureclip_onlysignal_bam_fmt/{id}_pureclip.log',
params:
genome = '~/genomes/{}.fa'.format(config['genome']),
conda:
'../envs/pureclip.yaml',
# use 20, 40, 80, 160 GB of RAM
resources:
vmem = lambda wildcards, attempt: math.ceil(10*(2**(attempt)))
threads: 1
shell:
'pureclip '
'-i {input.sig_bam} -bai {input.sig_bai} '
'-g {params.genome} '
'-iv "chr1;chr2;chr3;" '
'-ld '
'-nt {threads} -nta {threads} '
'-o {output.sites} '
'-or {output.regions} 2>&1 > {log}; '
rule pureclip_combine_bed_to_bam:
input:
dir = 'input/{id}/{sigtype}',
limits = lambda wildcards: "{}.limits".format(config["genome"]),
output:
combined_bam = 'output/pureclip/{id}/{sigtype}.bam',
conda:
'../envs/bedtobam.yaml'
shell:
'cat {input.dir}/*.bed | '
'sort -k1,1 -k2,2n | '
'bedtools slop -b 10 -g {input.limits} -i - | '
'bedtools bedtobam -i - -g {input.limits} | '
'samtools sort > {output.combined_bam}; '
rule pureclip_onlysignal_combine_bed_to_bam:
input:
dir = 'input/{id}/signal',
limits = lambda wildcards: "{}.limits".format(config["genome"]),
output:
combined_bam = 'output/pureclip_onlysignal/{id}/signal.bam',
conda:
'../envs/bedtobam.yaml'
shell:
'cat {input.dir}/*.bed | '
'bedtools slop -b 10 -g {input.limits} -i - | '
'bedtools bedtobam -i - -g {input.limits} | '
'samtools sort > {output.combined_bam}; '
rule pureclip_onlysignal_combine_bam_filter_fmt:
input:
dir = 'input/{id}/signal',
limits = lambda wildcards: "{}.limits".format(config["genome"]),
output:
combined_bam = 'output/pureclip_onlysignal_bam_fmt/{id}/signal.bam',
params:
merged_bam = 'output/pureclip_onlysignal_bam_fmt/{id}/merged.bam',
conda:
'../envs/bedtobam.yaml'
shell:
'samtools merge -f -u {params.merged_bam} {input.dir}/*.bam; '
'samtools index {params.merged_bam}; '
'samtools view -hb -f 66 -o {output.combined_bam} {params.merged_bam}; '
rule pureclip_combine_bam_filter_fmt:
input:
dir = 'input/{id}/{sourcedir}',
limits = lambda wildcards: "{}.limits".format(config["genome"]),
output:
combined_bam = 'output/pureclip_bam_fmt/{id}/{sourcedir}.bam',
params:
merged_bam = 'output/pureclip_bam_fmt/{id}/{sourcedir}_merged.bam',
conda:
'../envs/bedtobam.yaml'
shell:
'samtools merge -f -u {params.merged_bam} {input.dir}/*.bam; '
'samtools index {params.merged_bam}; '
'samtools view -hb -f 66 -o {output.combined_bam} {params.merged_bam}; '
|
import pandas as pd
df = pd.DataFrame({'name':['A','B','C','D','E','F','G','H','I','J','K','L'],
'type':[0,0,1,1,1,2,2,2,2,2,2,3],
'level':[None, None,1,2,3,None, None, None, None, None, None,None],
'pl':[100,150,200,100,300,50,150,-200,100,100,-150,100],
'transfer':[0,0,0,0,0,0,0,0,0,0,0,0],
'parent':[None, 'C','A','C','D','A','B','D','E','D','E','J']})
total = df['pl'].sum()
half = total/2
avg = total/12
mininum, maxum = 0,200
df_transfer = pd.DataFrame(columns=['from','to','value'])
df.sort_values(by='type', ascending=False, inplace=True)
for i, row in df.iterrows():
arr = []
arr.append(row['name'])
print(row['name'])
if avg>=maxum:
threshold=maxum
else:
threshold=50
print(f'total:{total},threshold:{threshold}')
while len(arr)>0:
name = arr[0]
parent = df.loc[df['name']==name,'parent'].values[0]
pl = df.loc[df['name']==name,'pl'].values[0]
arr.pop(0)
if parent != None:
if ',' in parent:
p1,p2 = parent.split(',')
arr.append(p1)
arr.append(p2)
if df.loc[df['name']==p1,'pl'].values[0]>=df.loc[df['name']==p2,'pl'].values[0]:
parent = p1
else:
parent = p2
else:
arr.append(parent)
if pl>0 and pl<200:
pass
# print(f'{name}:pass')
if pl<0 and parent !=None:
ask = threshold - pl
df.loc[df['name']==name,'pl']=ask+pl
df.loc[df['name']==parent,'pl']=df.loc[df['name']==parent,'pl'].values[0]-ask
new_row = {'from':parent,'to':name,'value':ask}
df_transfer = df_transfer.append(new_row, ignore_index=True)
if pl>200 and parent !=None:
offer = pl-threshold
df.loc[df['name']==name,'pl']=pl-offer
df.loc[df['name']==parent,'pl']=df.loc[df['name']==parent,'pl'].values[0]+ offer
new_row = {'from':name,'to':parent,'value':offer}
df_transfer = df_transfer.append(new_row, ignore_index=True)
df.sort_values(by=['type','pl'], ascending=True, inplace=True)
|
import lxml.etree
from .h9msg import H9msg
class Common(H9msg):
def _dump(self, node, res):
if isinstance(res, dict):
for n in node:
if n.tag == 'value':
if n.text is None:
res[n.attrib['name']] = ''
else:
res[n.attrib['name']] = n.text
elif n.tag == 'dict':
tmp = {}
self._dump(n, tmp)
res[n.attrib['name']] = tmp
elif n.tag == 'array':
tmp = []
self._dump(n, tmp)
res[n.attrib['name']] = tmp
elif isinstance(res, list):
for n in node:
if n.tag == 'value':
res.append(n.text)
elif n.tag == 'dict':
tmp = {}
self._dump(n, tmp)
res.append(tmp)
elif n.tag == 'array':
tmp = []
self._dump(n, tmp)
res.append(tmp)
return res
def _to_xml(self, value, name=None):
res = None
if isinstance(value, list):
if name:
res = lxml.etree.Element("array", name=name)
else:
res = lxml.etree.Element("array")
for v in value:
res.append(self._to_xml(v))
elif isinstance(value, dict):
if name:
res = lxml.etree.Element("dict", name=name)
else:
res = lxml.etree.Element("dict")
for k, v in value.items():
res.append(self._to_xml(v, k))
elif name:
res = lxml.etree.Element("value", name=name)
res.text = str(value)
else:
res = lxml.etree.Element("value")
res.text = str(value)
return res
@property
def value(self) -> dict:
res = {}
self._dump(self._xml[0], res)
return res
@value.setter
def value(self, value: dict):
if not value:
for child in self._xml[0]:
self._xml[0].remove(child)
else:
for k, v in value.items():
self._xml[0].append(self._to_xml(v, k))
def to_dict(self):
res = dict()
if self.value:
res['value'] = self.value
return res
|
#!/usr/bin/env python
# -*- coding: utf-8
#
# Example of Usage :
# If we want to generate 15 imposters of spanish language , we use the next command
# python src/imposters_generator.py --lang sp --seed training/ --output esimposters --imposters 15
#
# If we want to generate 150 imposters of spanish language , we use the next command
# python src/imposters_generator.py --lang en --seed training/ --output enimposters --imposters 150
#
import os,re, sys, glob, codecs, requests, getopt, justext , shutil, time
import numpy as np
from BeautifulSoup import BeautifulSoup
#
# Variable
# --------
# lang
# Dictionary with the definition of every available language.
#
# Skeleton
# --------
# langsearch (string):
# Equivalent of the language in Google search.
#
# min (int), max(int):
# Range to qualify the length of the corpus. This variable must be setted for every language to get a best approximation
# If the length < min then good corpus. If the length between min and max, or length > max then good corpus
#
# lang (string):
# Name of the language, used to get the stop word list
#
lang = {
'ES': {'imposters': 1500,'langsearch':'es', 'min' : 50, 'max':70, 'lang':'Spanish'},
'EN': {'imposters': 1200,'langsearch':'en', 'min' : 50, 'max':80, 'lang':'English'},
'GR': {'imposters': 1300,'langsearch':'el', 'min' : 50, 'max':90, 'lang':'Greek'},
'NL': {'imposters': 1100,'langsearch':'nl', 'min' : 60, 'max':70, 'lang':'Dutch'},
}
#
# Function
# --------
# getCorpus
# Get the corpus from a HTML text using justext python's library.
#
# Parameters
# ----------
# html (string) :
# HTML text
#
# stopwords (list) :
# List of stop words, used by justext to get a good qualification of the clean text
#
# lmin (int), lmax (int) :
# Range to qualify the lenght of the corpus.
#
# Returns
# -------
# full_text (string):
# The clean corpus of a web page
#
def getCorpus(html, stopwords, lmin, lmax):
full_text = []
paragraphs = justext.justext(html, stopwords, lmin, lmax)
for paragraph in paragraphs:
if paragraph.cf_class == 'good':
real_text = ''.join("%s" % i.encode('utf-8') for i in paragraph.text_nodes)
full_text.append(real_text)
return ' '.join(full_text)
#
# Function
# --------
# doSearch
# Function to do a search in Google to get the first ten result of a query.
# The ten links are processed to get the corpus of the sites and are saved as imposter's documet.
# BeautifulSoup is used here ;)
#
# Parameters
# ----------
# query (string) :
# The text to search in Google. Example 'romeo visita México', 'apple green market'
#
# selection (list) :
# List with the selected language (variable lang), all the properties are saved in this variable
#
# stopwords (list) :
# List of stop words, used by justext to get a good qualification of the clean text
#
# path (string) :
# Path where the file is saved
#
def doSearch(query, selection, stopwords, path):
print "Generated query : %s " % query
search = 'https://www.google.com/search?q=%s&lr=lang_%s' % (query, selection['langsearch'])
try:
r = requests.get(search,timeout=5, verify = False )
bs = BeautifulSoup(r.text)
for result in bs.findAll('h3','r'):
a = result.find('a')
href =re.split(r'\/(.*)\?q=(.*)\&sa',a.get('href'))
try :
#We verify if the link is an url and it is not a file
if href[1] == 'url' and any( href[2].upper().endswith(ext) for ext in ('.XLS','.XLSX','.PDF','.DOC')) == False :
source = requests.get(href[2],timeout=5)
corpus = getCorpus(source.text, stopwords, selection['min'], selection['max'])
if corpus :
size = len(glob.glob(path+"/*.txt")) + 1
number = "%04d"% size
print "Creating imposter : %s - %s" % (number,href[2])
imposter = open(path+"/imposter"+number+".txt","w")
imposter.write(corpus)
imposter.close()
except:
isfile = 1
except:
time.sleep(1)
print "Error"
#doSearch(query,selection,stopwords,path)
#
# Function
# --------
# doImposter
# Function that generates the imposter's documents.
# In first place we find all the documents of the language related. It's important use the structure of PAN competition.
# If we select Spanish (SP) , the function will try to find all the .TXT files in the SP directories
#
# Parameters
# ----------
# seed (string) :
# The path of the directory where the .TXT files to process are located
#
# out (string) :
# Name of the directory that will be created
#
# mainlang (string):
# Language to be processed, this option is determinated by the nomenclature used by PAN. SP = Spanish, EN = English, etc.
#
# imposters(int) :
# Number of impostors that has to be created
#
def doImposter(seed,out,mainlang,imposters):
# We find all the TXT of the LANG directory
# /PATH/LANG/*.TXT
#path = seed+mainlang+"*/*.txt"
path = seed+"/*.txt"
files = glob.glob(path)
# Numbers of files to be chosen. This file are mixed to get random words
file_choice = 3
# Number of words to be chosen to build the query.
word_choice = 3
words = []
selection = lang[mainlang]
# Random selection of the files to be mixed
randomfiles = np.random.choice(files, file_choice)
for single_file in randomfiles:
textwords = ''.join( [line.strip() for line in codecs.open(single_file,'r','utf-8')] ).split()
words = words + textwords
stopwords = justext.get_stoplist(selection['lang'])
# After choose a text, we elimiate all the stop words of the variable
cleanwords = [word for word in words if word not in set(stopwords)]
# Creation of ouput directory
# output = os.path.join(out,mainlang)
output = out
if not os.path.exists(output):
os.makedirs(output)
# ERASE
#else:
# shutil.rmtree(out)
# os.makedirs(output)
created = 0
print "Max imposters : %s" % imposters
while created <= int(imposters) :
query = ' '.join( np.random.choice( cleanwords, word_choice) )
try:
doSearch(query, selection, stopwords, output)
except:
print "Error"
created = len(glob.glob(output+"/*.txt"))
def main(argv):
mainlang = ""
seed = ""
out = ""
imp = 0
try:
opts, args = getopt.getopt(argv,"hi:o:",["lang=","seed=","output=","imposters="])
except getopt.GetoptError:
print "Usage : imposter.py --lang [ES|EN|GR|NL]--seed <directory> --output <directory> --imposters <number>"
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print "imposter.py --lang [ES|EN|GR|NL] --seed <directory> --output <directory> --imposters <number>"
sys.exit()
elif opt in("--l","--lang"):
mainlang = arg
elif opt in("--s","--seed"):
seed = arg
elif opt in("--o","--output"):
out = arg
elif opt in("--i","--imposters"):
imp = arg
try:
doImposter(seed,out,mainlang.upper(), imp)
except :
print "Bad parameters"
if __name__ == "__main__":
main(sys.argv[1:])
|
# get data
import functions as f
#import cv2
from sklearn.neighbors import KNeighborsClassifier
import numpy as np
from sklearn import datasets, svm, metrics
data = f.get_array_from_images('images_no_copies')
dev, test = f.split_data(data, 0.2)
training_data, training_labels = f.reshape(dev)
test_data, test_labels = f.reshape(test)
classifier = svm.SVC(gamma=0.001)
classifier.fit(training_data, training_labels)
score = classifier.score(test_data, test_labels)
print(score) |
# pip install pymongo 安装
from pymongo import MongoClient
from bson.objectid import ObjectId
import pymongo
# 更新文档
# 连接服务器
conn = MongoClient("localhost", 27017)
# 连接数据库 数据库的名字是 myDB
db = conn.myDB
# 得到数据库下面的集合
colletion = db.student
#开始操作数控下面的 表(colletion)
# 更新文档
#将 name = 基本密码 的数据 中的 age 变为 88
res = colletion.update({"name":"基本密码"},{"$set":{"age":88}})
print(res)
conn.close() |
#How to define or create function
def greet():
print("Hello , This is my first python function")
print("Welcome to python world")
greet()
|
# -*- coding: utf-8 -*-
# Игра «Быки и коровы»
# https://goo.gl/Go2mb9
from mastermind_engine import get_number, check_number
from termcolor import cprint, colored
cprint('Загадано четырехзначное число, попробуй отгадать!', color='green')
counter = 0
while True:
print('Ход', counter + 1)
user_number = input(colored('Какое число загадано?', color='cyan'))
if user_number.isdigit() and int(user_number) in range(1000, 10000):
checking_number = check_number(user_number)
print('Быки -', checking_number['bulls'],
'Коровы -', checking_number['cows'])
if checking_number['bulls'] == 4:
print('Вы победили! Чило ходов -', counter + 1)
user_choose = input(colored('Хотите еще? Y/N ', color='blue'))
if user_choose == 'Y' or user_choose == 'y':
get_number()
counter = 0
continue
else:
break
counter += 1
else:
cprint('Некорректный ввод!', color='red')
|
import random
import math
class bbplayer:
#stats_array = [[0 for x in range(2)] 0 for x in range(9)]
#2manystats
stats_pts = 0
stats_fga = 0
stats_fgm = 0
stats_3ga = 0
stats_3gm = 0
stats_ass = 0
stats_reb = 0
stats_stl = 0
stats_blk = 0
stats_ofa = 0
stats_ofm = 0
stats_gms = 0
stats_tot_pts = 0
stats_tot_fga = 0
stats_tot_fgm = 0
stats_tot_3ga = 0
stats_tot_3gm = 0
stats_tot_ass = 0
stats_tot_reb = 0
stats_tot_stl = 0
stats_tot_blk = 0
stats_tot_msm = 0
stats_tot_ofa = 0
stats_tot_ofm = 0
def __init__(self, name, pref_pos, height, weight, speed, age, int_s, mid_s, out_s, passing, handling, steal, block, int_d, out_d, rebounding, ins_t, mid_t, out_t, real_fga, atts):
self.name = name
self.height = height
self.pref_pos = pref_pos
self.weight = weight
self.speed = speed
self.age = age
self.int_s = int_s
self.mid_s = mid_s
self.out_s = out_s
self.passing = passing
self.handling = handling
self.steal = steal
self.block = block
self.int_d = int_d
self.out_d = out_d
self.rebounding = rebounding
self.ovrshoot = (int_s + mid_s + out_s) / 3
self.atts = atts
self.ins_t = ins_t
self.mid_t = mid_t
self.out_t = out_t
self.real_fga = real_fga
def game_reset_pstats(self):
self.stats_gms += 1
self.stats_tot_pts += self.stats_pts
self.stats_pts = 0
self.stats_tot_fga += self.stats_fga
self.stats_fga = 0
self.stats_tot_fgm += self.stats_fgm
self.stats_fgm = 0
self.stats_tot_3ga += self.stats_3ga
self.stats_3ga = 0
self.stats_tot_3gm += self.stats_3gm
self.stats_3gm = 0
self.stats_tot_ass += self.stats_ass
self.stats_ass = 0
self.stats_tot_reb += self.stats_reb
self.stats_reb = 0
self.stats_tot_stl += self.stats_stl
self.stats_stl = 0
self.stats_tot_blk += self.stats_blk
self.stats_blk = 0
self.stats_tot_blk += self.stats_blk
self.stats_blk = 0
self.stats_tot_ofa += self.stats_ofa
self.stats_ofa = 0
self.stats_tot_ofm += self.stats_ofm
self.stats_ofm = 0
'''
for x in stats_array:
x[1] += x[0]
x[0] = 0
'''
def set_stats_zero(self):
self.stats_gms = 0
self.stats_tot_pts = 0
self.stats_tot_fga = 0
self.stats_tot_fgm = 0
self.stats_tot_3ga = 0
self.stats_tot_3gm = 0
self.stats_tot_ass = 0
self.stats_tot_reb = 0
self.stats_tot_stl = 0
self.stats_tot_blk = 0
self.stats_tot_msm = 0
self.stats_tot_ofa = 0
self.stats_tot_ofm = 0
@property
def overall(self):
return int(self.speed + self.int_s**1.3 + self.mid_s**1.3 + self.out_s**1.3 + self.passing**1.1 + self.handling + self.steal**1.1 + self.block**1.1 + self.int_d**1.2 + self.out_d**1.2 + self.rebounding**1.2)
@property
def ppg(self):
return self.stats_tot_pts/self.stats_gms
@property
def fgp(self):
if self.stats_tot_fga > 0:
return self.stats_tot_fgm/self.stats_tot_fga
else: return 0
@property
def fp3(self):
if self.stats_tot_3ga > 0:
return self.stats_tot_3gm/self.stats_tot_3ga
else: return 0
@property
def rpg(self):
return self.stats_tot_reb/self.stats_gms
@property
def apg(self):
return self.stats_tot_ass/self.stats_gms
@property
def spg(self):
return self.stats_tot_stl/self.stats_gms
@property
def bpg(self):
return self.stats_tot_blk/self.stats_gms
def print_ratings(self, labels): #labels = 1 if they want headings, 0 if jsut raw stats
if labels==1:
print("NAME: | HT|WGT|AG|SP|IN|MD|OT|PS|HD|ST|BL|ID|OD|RB|")
if self.height>99: disp_height = 99
else: disp_height = self.height
if self.speed>99: disp_speed = 99
else: disp_speed = self.speed
if self.int_s>99: disp_int_s = 99
else: disp_int_s = self.int_s
if self.mid_s>99: disp_mid_s = 99
else: disp_mid_s = self.mid_s
if self.out_s>99: disp_out_s = 99
else: disp_out_s = self.out_s
if self.passing>99: disp_passing = 99
else: disp_passing = self.passing
if self.handling>99: disp_handling = 99
else: disp_handling = self.handling
if self.steal>99: disp_steal = 99
else: disp_steal = self.steal
if self.block>99: disp_block = 99
else: disp_block = self.block
if self.int_d>99: disp_int_d = 99
else: disp_int_d = self.int_d
if self.out_d>99: disp_out_d = 99
else: disp_out_d = self.out_d
if self.rebounding>99: disp_rebounding = 99
else: disp_rebounding = self.rebounding
print("{name:<13}|".format(name=self.name), disp_height, self.weight, self.age, disp_speed, disp_int_s, disp_mid_s, disp_out_s, disp_passing, disp_handling, disp_steal, disp_block, disp_int_d, disp_out_d, disp_rebounding, self.overall, self.pref_pos)
def print_pergame_boxplayer(self):
print("{name:<13}| {ppg:<4} | {fgp:<4} | {fp3:<4} | {reb:<4} | {ass:<4} | {stl:<4}| {blk:<4}| {fga:<2} | {ga3:<2} | {msm:<3} {pos}".format(name=self.name, ppg=int(self.ppg*10)/10, fgp=int(self.fgp*1000)/10, fp3=int(self.fp3*999)/10,
reb=int(self.rpg*10)/10, ass=int(self.apg*10)/10, stl=int(self.spg*10)/10, blk=int(self.bpg*10)/10, fga=int(self.stats_tot_fga/self.stats_gms), ga3=int(self.stats_tot_3ga/self.stats_gms), msm=int(self.stats_tot_msm/self.stats_gms), pos=self.pref_pos))
def print_boxplayer(self):
print("{name:<13}| {points:<3} | {fgm:<2}/ {fga:<2} | {gm3:<2}/ {ga3:<2} | {rebounds:<3} | {assists:<3} | {steals:<3} | {blocks:<3}".format(name=self.name, points=self.stats_pts, fgm=self.stats_fgm,
fga=self.stats_fga, gm3=self.stats_3gm, ga3=self.stats_3ga, rebounds=self.stats_reb, assists=self.stats_ass, steals=self.stats_stl, blocks=self.stats_blk)) |
import random
from enum import Enum
from minesweeper.core.board import GameBoard
from datetime import datetime
from minesweeper.core.cell import RevealCellResult
class GameState(Enum):
NEW = 0
STARTED = 1
PAUSE = 2
WIN = 3
LOST = 4
class MinesweeperGame:
def __init__(self, rows, cols, mines_number, state, started_at=datetime.now(), ended_at=None,
total_cells_revealed=0):
self.board = GameBoard(rows, cols)
self._set_random_mines(mines_number)
self.state = state
self.started_at = started_at
self.ended_at = ended_at
self.total_cells_revealed = total_cells_revealed
def _set_random_mines(self, mines_number):
if 0 <= mines_number <= self.board.total_cells() * 0.4:
while mines_number > 0:
row = random.randint(0, self.board.rows - 1)
col = random.randint(0, self.board.rows - 1)
if self.board.can_add_mine_to(row, col):
self.board.add_mine(row, col)
mines_number -= 1
else:
raise ValueError('Game cannot be created. Invalid params values.')
def set_mine_at(self, i, j):
self.board.add_mine(i, j)
def cell(self, i, j):
return self.board.cell(i, j)
def reveal(self, i, j):
if self.state == GameState.NEW:
self.state = GameState.STARTED
cell = self.cell(i, j)
reveal_result = cell.reveal()
{
RevealCellResult.LOST: self.notify_lost,
RevealCellResult.INVALID: self.notify_invalid_cell,
RevealCellResult.ALIVE: self.empty_cell_revealed
}[reveal_result](cell)
def end_game(self):
self.board.reveal_all()
self.ended_at = datetime.now()
def notify_lost(self, cell):
self.state = GameState.LOST
self.end_game()
def notify_invalid_cell(self, cell):
raise ValueError("Cell ({},{}) cannot be revealed as it's not valid".format(cell.row, cell.col))
def notify_win(self):
self.end_game()
self.state = GameState.WIN
def empty_cell_revealed(self, cell):
self.register_empty_cell_revealed()
self.reveal_borders(cell)
self.update_game_state_if_needed()
def register_empty_cell_revealed(self):
self.total_cells_revealed += 1
def were_all_empty_cells_revealed(self):
return self.total_cells_revealed == self.board.total_cells() - self.board.mines_number
def update_game_state_if_needed(self):
if self.were_all_empty_cells_revealed():
self.notify_win()
def reveal_borders(self, cell):
if cell.is_eligible_to_reveal_borders():
for row, col in self.board.all_positions_candidated_to_be_revealed_from(cell):
cell_to_reveal = self.board.cell(row, col)
if cell_to_reveal.can_be_revealed() and not cell_to_reveal.is_a_mine():
cell_to_reveal.reveal()
self.register_empty_cell_revealed()
self.reveal_borders(cell_to_reveal)
def board_to_string(self):
return str(self.board) |
"""
Django settings for Easytest project.
Generated by 'django-admin startproject' using Django 3.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
print(BASE_DIR)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'lzeg!%=5+l$b!xqm!zmmk$s=z)a$^p@o&f-ndejtycvo=-s-v-'
# SECURITY WARNING: don't run with debug turned on in production!
# Django设置DEBUG为False时,'django.contrib.staticfiles'会关闭,即Django不会自动搜索静态文件
DEBUG = True
ALLOWED_HOSTS = ["*"]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'easy.apps.EasyConfig',
#'django_apscheduler',
'dwebsocket',
# "drf_yasg",
]
# REST全局配置
REST_FRAMEWORK = {
# JSONRenderer:以JSON的格式返回、BaseRenderer:数据嵌套到HTML中展示
"DEFAULT_RENDERER_CLASSES": ["rest_framework.renderers.JSONRenderer"],
# 全局版本控制
"DEFAULT_VERSIONING_CLASS": "rest_framework.versioning.URLPathVersioning",
"ALLOWED_VERSIONS": ["v1", "v2"], # 允许的版本
"VERSION_PARAM": "version", # 版本默认传参
"DEFAULT_VERSION": "v1", # 默认版本号为V1,当没有传版本号时
}
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Easytest.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'easy/templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Easytest.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'EasyTest',
'USER': 'root',
'PASSWORD': '123456',
'HOST': '47.98.56.102', # aliyun
'PORT': '3307',
"CONN_MAX_AGE": 9,
# 取消外键检查
'OPTIONS': {
"init_command": "SET foreign_key_checks = 0;",
}
}
}
CACHES = {
#连接可使用get_redis_connection("default")
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://47.98.56.102:6379/1",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
"CONNECTION_POOL_KWARGS": {"max_connections": 100}
},
#"PASSWORD":"密码"
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
# LANGUAGE_CODE = 'en-us'
LANGUAGE_CODE = 'zh-Hans'
# TIME_ZONE = 'UTC'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
# 静态文件,static 是在Django 具体APP下建立的static目录,用来存放静态资源
STATIC_URL = '/static/'
# 使用 collectstatic后收集的静态文件的存放绝对路径
STATIC_ROOT = "/Users/yons/PycharmProjects/Easytest/static_file"
# STATICFILES_DIRS一般用来设置通用的静态资源,对应的目录不放在APP下,而是放在Project
# STATICFILES_DIRS = [os.path.join(BASE_DIR, 'common_static')]
# 开启该中间件之后,默认会为任何开放的HttpResponse设置X-Frame-Options协议头为DENY,如果你想要设置为SAMEOGIGIN
X_FRAME_OPTIONS = 'SAMEORIGIN'
# 配置日志
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'standard': {
'format': '%(asctime)s,%(process)d,%(name)s,%(levelname)s,%(filename)s:%(lineno)d,%(message)s'
},
},
'filters': {
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler',
'include_html': True,
# 'filters': ['special'],
},
'default': {
'level': 'ERROR',
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(os.path.curdir, 'logs/Error.log'),
'maxBytes': 1024 * 1024 * 50, # 50 MB
'backupCount': 5,
'formatter': 'standard',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'standard'
},
},
'loggers': {
'django': {
'handlers': ['default', 'console'] if DEBUG else ["default"],
'level': 'ERROR',
'propagate': False
},
'': {
'handlers': ['default', 'console'] if DEBUG else ["default"],
'level': 'DEBUG',
'propagate': False
},
}
}
#pip install -i https://pypi.doubanio.com/simple/
|
import morepath
class App(morepath.App):
pass
@App.path('/welcome')
class Welcome(object):
pass
@App.view(model=Welcome)
def root_default(self, request):
return "Hello World!"
App.commit()
main = App()
|
from django.conf.urls import url
from . import views
from django.conf.urls import include
urlpatterns = [
url(r'^student/(?P<pk>[0-9]+)$', views.Delete_Update_Student.as_view()),
url(r'^student/', views.Student.as_view()),
]
|
import unittest
import requests
import json
import random
class TestServer(unittest.TestCase):
# Check that the server gives a list of all candidates.
def test_gives_list(self):
d = requests.get('http://qainterview.cogniance.com/candidates')
code = d.status_code
obj = json.loads(d.text)
self.assertEqual(code, 200)
size = len(obj['candidates'])
self.assertTrue(size > 0)
# Check that the server add new candidate.
def test_add_new_candidate(self):
load = {'name': 'Masha', 'position': 'QA intern'}
json_params = json.dumps(load)
headers = {'content-type': 'application/json'}
r = requests.post('http://qainterview.cogniance.com/candidates', data=json_params, headers=headers)
code = r.status_code
self.assertEqual(code, 201)
# Check negative verifications.
def test_incorrect_add_data(self):
load = {'dogName': 'Bill', 'catName': 'Sarah', 'parrotName': 'Kluv'}
json_params = json.dumps(load)
headers = {'content-type': 'application/json'}
b = requests.post('http://qainterview.cogniance.com/candidates', data=json_params, headers=headers)
code = b.status_code
self.assertEqual(code, 400)
# get the candidate.
def test_get_one_candidate(self):
load = {'name': 'Masha3', 'position': 'test position'}
json_params = json.dumps(load)
headers = {'content-type': 'application/json'}
rs = requests.post('http://qainterview.cogniance.com/candidates', data=json_params, headers=headers)
obj = json.loads(rs.text)
new_candidate = obj['candidate']
new_candidate_id = new_candidate['id']
url = ('http://qainterview.cogniance.com/candidates/'+str(new_candidate_id))
k = requests.get(url)
self.assertEqual(k.status_code, 200)
obj = json.loads(k.text)
self.assertEqual(obj['candidate']['id'], new_candidate_id, 'Returns incorrect id')
self.assertEqual(obj['candidate']['name'], 'Masha3', 'Returns incorrect name')
self.assertEqual(obj['candidate']['position'], 'test position', 'Returns incorrect position')
# get wrong candidate with id
def test_get_one_wrong_candidate(self):
url = ('http://qainterview.cogniance.com/candidates/'+str(random.randint(9999, 99999)))
k = requests.get(url)
self.assertEqual(k.status_code, 404)
# get wrong candidate with id as string
def test_get_one_wrong_string_candidate(self):
url = ('http://qainterview.cogniance.com/candidates/'+'WrongPath')
k = requests.get(url)
self.assertEqual(k.status_code, 404)
# Delete the candidat.
def test_delete_candidate(self):
load = {'name': 'Masha', 'position': 'QA intern'}
json_params = json.dumps(load)
headers = {'content-type': 'application/json'}
rs = requests.post('http://qainterview.cogniance.com/candidates', data=json_params, headers=headers)
obj = json.loads(rs.text)
new_candidate = obj['candidate']
new_candidate_id = new_candidate['id']
url = ('http://qainterview.cogniance.com/candidates/'+str(new_candidate_id))
k = requests.delete(url)
code = k.status_code
self.assertEqual(code, 200)
unittest.main() |
#Uses python3
import functools
import sys
def compare(x, y):
xy = "".join([x,y])
yx = "".join([y,x])
if xy < yx:
return -1
elif xy > yx:
return 1
else:
return 0
def largest_number(A):
A.sort(key=functools.cmp_to_key(compare), reverse=True)
return "".join(A)
if __name__ == '__main__':
input = sys.stdin.read()
data = input.split()
a = data[1:]
print(largest_number(a))
|
class Settings():
"""存储外星人入侵的所有设置的类"""
def __init__(self):
"""初始化游戏的设置"""
# 屏幕设置
self.screen_width = 1024
self.screen_height = 768
self.bg_color = (30, 144, 255)
self.ship_speed_rate = 1.0
# 游戏中子弹的相关参数需要在此设置
self.bullet_speed_rate = 1
self.bullet_width = 3
self.bullet_height = 15
self.bullet_color = 248, 255, 255
self.bullet_allowed = 3
|
import collections
import datetime
import urllib
import pytz
from pylons import c, g
from pylons.i18n import _, ungettext
from r2.lib import filters
from r2.lib.pages import Reddit, UserTableItem
from r2.lib.menus import NavMenu, NavButton
from r2.lib.template_helpers import add_sr
from r2.lib.memoize import memoize
from r2.lib.wrapped import Templated, Wrapped
from r2.models import Account, Subreddit, Link, NotFound, Listing, UserListing
from r2.lib.strings import strings
from r2.lib.utils import tup, fuzz_activity
from r2.lib.jsontemplates import (
JsonTemplate,
ObjectTemplate,
ThingJsonTemplate,
)
from reddit_liveupdate.activity import ACTIVITY_FUZZING_THRESHOLD
from reddit_liveupdate.utils import pretty_time, pairwise
class LiveUpdateTitle(Templated):
pass
class LiveUpdatePage(Reddit):
extension_handling = False
extra_page_classes = ["live-update"]
extra_stylesheets = Reddit.extra_stylesheets + ["liveupdate.less"]
def __init__(self, content, websocket_url=None):
extra_js_config = {
"liveupdate_event": c.liveupdate_event._id,
"liveupdate_pixel_domain": g.liveupdate_pixel_domain,
}
if websocket_url:
extra_js_config["liveupdate_websocket"] = websocket_url
title = c.liveupdate_event.title
if c.liveupdate_event.state == "live":
title = _("[live]") + " " + title
Reddit.__init__(self,
title=title,
show_sidebar=False,
content=content,
extra_js_config=extra_js_config,
)
def build_toolbars(self):
toolbars = [LiveUpdateTitle()]
if c.liveupdate_can_edit or c.liveupdate_can_manage:
tabs = [
NavButton(
_("updates"),
"/",
),
]
if c.liveupdate_can_edit:
tabs.append(NavButton(
_("settings"),
"/edit",
))
tabs.append(NavButton(
_("reporters"),
"/reporters",
))
toolbars.append(NavMenu(
tabs,
base_path="/live/" + c.liveupdate_event._id,
type="tabmenu",
))
return toolbars
class LiveUpdateEmbed(LiveUpdatePage):
extra_page_classes = LiveUpdatePage.extra_page_classes + ["embed"]
class LiveUpdateEvent(Templated):
def __init__(self, event, listing, show_sidebar):
self.event = event
self.listing = listing
self.visitor_count = self._get_active_visitors()
if show_sidebar:
self.discussions = LiveUpdateOtherDiscussions()
self.show_sidebar = show_sidebar
reporter_accounts = Account._byID(event.reporter_ids,
data=True, return_dict=False)
self.reporters = sorted((LiveUpdateAccount(e)
for e in reporter_accounts),
key=lambda e: e.name)
Templated.__init__(self)
def _get_active_visitors(self):
count = self.event.active_visitors
if count < ACTIVITY_FUZZING_THRESHOLD and not c.user_is_admin:
return "~%d" % fuzz_activity(count)
return count
class LiveUpdateEventConfiguration(Templated):
def __init__(self):
self.ungrouped_timezones = []
self.grouped_timezones = collections.defaultdict(list)
for tzname in pytz.common_timezones:
if "/" not in tzname:
self.ungrouped_timezones.append(tzname)
else:
region, zone = tzname.split("/", 1)
self.grouped_timezones[region].append(zone)
Templated.__init__(self)
class ReporterTableItem(UserTableItem):
type = "liveupdate_reporter"
def __init__(self, user, event, editable=True):
self.event = event
self.render_class = ReporterTableItem
UserTableItem.__init__(self, user, editable=editable)
@property
def cells(self):
if self.editable:
return ("user", "sendmessage", "remove")
else:
return ("user",)
@property
def _id(self):
return self.user._id
@classmethod
def add_props(cls, item, *k):
return item
@property
def container_name(self):
return self.event._id
@property
def remove_action(self):
return "live/%s/rm_reporter" % self.event._id
class ReporterListing(UserListing):
type = "liveupdate_reporter"
def __init__(self, event, builder, editable=True):
self.event = event
UserListing.__init__(self, builder, addable=editable, nextprev=False)
@property
def destination(self):
return "live/%s/add_reporter" % self.event._id
@property
def form_title(self):
return _("add reporter")
@property
def title(self):
return _("current reporters")
@property
def container_name(self):
return self.event._id
class LiveUpdateEventJsonTemplate(JsonTemplate):
def render(self, thing=None, *a, **kw):
return ObjectTemplate(thing.listing.render() if thing else {})
class LiveUpdateJsonTemplate(ThingJsonTemplate):
_data_attrs_ = ThingJsonTemplate.data_attrs(
id="_id",
body="body",
body_html="body_html",
)
def thing_attr(self, thing, attr):
if attr == "_id":
return str(thing._id)
elif attr == "body_html":
return filters.spaceCompress(filters.safemarkdown(thing.body))
return ThingJsonTemplate.thing_attr(self, thing, attr)
def kind(self, wrapped):
return "LiveUpdate"
class LiveUpdateAccount(Templated):
def __init__(self, user):
Templated.__init__(self,
deleted=user._deleted,
name=user.name,
fullname=user._fullname,
)
class LiveUpdateOtherDiscussions(Templated):
max_links = 5
def __init__(self):
links = self.get_links(c.liveupdate_event._id)
self.more_links = len(links) > self.max_links
self.links = links[:self.max_links]
self.submit_url = "/submit?" + urllib.urlencode({
"url": add_sr("/live/" + c.liveupdate_event._id,
sr_path=False, force_hostname=True),
"title": c.liveupdate_event.title,
})
Templated.__init__(self)
@classmethod
@memoize("live_update_discussion_ids", time=60)
def _get_related_link_ids(cls, event_id):
url = add_sr("/live/%s" % event_id, sr_path=False, force_hostname=True)
try:
links = tup(Link._by_url(url, sr=None))
except NotFound:
links = []
return [link._id for link in links]
@classmethod
def get_links(cls, event_id):
link_ids = cls._get_related_link_ids(event_id)
links = Link._byID(link_ids, data=True, return_dict=False)
links.sort(key=lambda L: L.num_comments, reverse=True)
sr_ids = set(L.sr_id for L in links)
subreddits = Subreddit._byID(sr_ids, data=True)
wrapped = []
for link in links:
w = Wrapped(link)
w.subreddit = subreddits[link.sr_id]
# ideally we'd check if the user can see the subreddit, but by
# doing this we keep everything user unspecific which makes caching
# easier.
if w.subreddit.type == "private":
continue
comment_label = ungettext("comment", "comments", link.num_comments)
w.comments_label = strings.number_label % dict(
num=link.num_comments, thing=comment_label)
wrapped.append(w)
return wrapped
class LiveUpdateSeparator(Templated):
def __init__(self, date):
self.date = date.replace(minute=0, second=0, microsecond=0)
self.date_str = pretty_time(self.date)
Templated.__init__(self)
class LiveUpdateListing(Listing):
def __init__(self, builder):
self.current_time = datetime.datetime.now(g.tz)
self.current_time_str = pretty_time(self.current_time)
Listing.__init__(self, builder)
def things_with_separators(self):
items = [self.things[0]]
for prev, update in pairwise(self.things):
if update._date.hour != prev._date.hour:
items.append(LiveUpdateSeparator(prev._date))
items.append(update)
return items
def liveupdate_add_props(user, wrapped):
account_ids = set(w.author_id for w in wrapped)
accounts = Account._byID(account_ids, data=True)
for item in wrapped:
item.author = LiveUpdateAccount(accounts[item.author_id])
item.date_str = pretty_time(item._date)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import json
from random import randint
import time
import sys
data = sys.stdin.readlines() # read stdin
time.sleep(5) # Stop for a while to simulate some processing
obj = {"items": ["a", "b", "c", data], "boolean": True, "integer": 123456,
"random": randint(0, 9999)}
ret = json.dumps(obj, ensure_ascii=True)
with open('/tmp/test', 'a') as file:
file.write('hello world!\n')
print("write into stderr", file=sys.stderr)
print(ret)
|
"""
list four bytes fields
"""
four_bytes = {
"JobId",
"LineNumber",
"ReelNumber",
"order",
"TRACE_SEQUENCE_LINE",
"TRACE_SEQUENCE_FILE",
"FieldRecord",
"TraceNumber",
"EnergySourcePoint",
"CDP",
"CDP_TRACE",
"offset",
"ReceiverGroupElevation",
"SourceSurfaceElevation",
"SourceDepth",
"ReceiverDatumElevation",
"SourceDatumElevation",
"SourceWaterDepth",
"GroupWaterDepth",
"SourceX",
"SourceY",
"GroupX",
"GroupY",
}
trace_head_names = [
"TRACE_SEQUENCE_LINE",
"TRACE_SEQUENCE_FILE",
"FieldRecord",
"TraceNumber",
"EnergySourcePoint",
"CDP",
"CDP_TRACE",
"TraceIdentificationCode",
"NSummedTraces",
"NStackedTraces",
"DataUse",
"offset",
"ReceiverGroupElevation",
"SourceSurfaceElevation",
"SourceDepth",
"ReceiverDatumElevation",
"SourceDatumElevation",
"SourceWaterDepth",
"GroupWaterDepth",
"ElevationScalar",
"SourceGroupScalar",
"SourceX",
"SourceY",
"GroupX",
"GroupY",
"CoordinateUnits",
"WeatheringVelocity",
"SubWeatheringVelocity",
"SourceUpholeTime",
"GroupUpholeTime",
"SourceStaticCorrection",
"GroupStaticCorrection",
"TotalStaticApplied",
"LagTimeA",
"LagTimeB",
"DelayRecordingTime",
"MuteTimeStart",
"MuteTimeEND",
"TRACE_SAMPLE_COUNT",
"TRACE_SAMPLE_INTERVAL",
"GainType",
"InstrumentGainConstant",
"InstrumentInitialGain",
"Correlated",
"SweepFrequencyStart",
"SweepFrequencyEnd",
"SweepLength",
"SweepType",
"SweepTraceTaperLengthStart",
"SweepTraceTaperLengthEnd",
"TaperType",
"AliasFilterFrequency",
"AliasFilterSlope",
"NotchFilterFrequency",
"NotchFilterSlope",
"LowCutFrequency",
"HighCutFrequency",
"LowCutSlope",
"HighCutSlope",
"YearDataRecorded",
"DayOfYear",
"HourOfDay",
"MinuteOfHour",
"SecondOfMinute",
"TimeBaseCode",
"TraceWeightingFactor",
"GeophoneGroupNumberRoll1",
"GeophoneGroupNumberFirstTraceOrigField",
"GeophoneGroupNumberLastTraceOrigField",
"GapSize",
"OverTravel",
"spare",
]
|
from bs4 import BeautifulSoup as bs
import requests
import pandas as pd
from splinter import Browser
from webdriver_manager.chrome import ChromeDriverManager
def init_browser():
# @NOTE: Replace the path with your actual path to the chromedriver
executable_path = {'executable_path': ChromeDriverManager().install()}
return Browser("chrome", **executable_path, headless=False)
def scrape():
# I import the code for mission_to_mars.ipynb
#inital the connection to chrome browser
browser = init_browser()
#initial the diction for mongo DB
marsData={}
url = 'https://mars.nasa.gov/news/'
#retrieve the webpage
response = requests.get(url)
#BeautifulSoup object
soup = bs(response.text, 'html.parser')
#Retrieve the latest subject and content from the Mars website
news_title = soup.find('div', class_="content_title").find('a').text
news_p = soup.find('div', class_="rollover_description_inner").text
print("The new titel is: " + news_title)
print("-----------------------------------------------------------------")
print("The content is: " + news_p)
print("-----------------------------------------------------------------")
#assign the value to diction
marsData['latest_news'] = news_title
marsData['latest_news_content'] = news_p
# ## JPL Mars Space Images - Featured Image
# Visit the url for JPL Featured Space Image here.
# Use splinter to navigate the site and find the image url for the current Featured Mars Image and assign the url string to a variable called featured_image_url.
url = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'
base_url = 'https://www.jpl.nasa.gov'
#retrieve the webpage
response = requests.get(url)
soup = bs(response.text, 'html.parser')
#Concatenate sublink and base url
splinter_url = base_url + soup.find('a', class_="button fancybox")['data-link']
print("The splinter site url is: " + splinter_url)
print("-----------------------------------------------------------------")
#connect url by splinter
browser.visit(splinter_url)
base_url = 'https://www.jpl.nasa.gov'
#splinter
html = browser.html
#Parse HTML object with BeautifulSoup
soup = bs(html, 'html.parser')
#Concatenate sublink and base url
featured_image_url = base_url + soup.find('img', class_="main_image")['src']
print("The featured image link is: " + featured_image_url)
print("-----------------------------------------------------------------")
marsData['featured_image_url'] = featured_image_url
browser.quit()
# ## Mars Facts
# *Visit the Mars Facts webpage here and use Pandas to scrape the table containing facts about the planet including Diameter, Mass, etc.
# *Use Pandas to convert the data to a HTML table string.
url = 'https://space-facts.com/mars/'
#using table data frame from Mars webpage
tables = pd.read_html(url)
tables
#initial the first column
df = tables[0]
#rename column names
df.columns = ['Facts', 'Value']
#set the index
df.set_index('Facts', inplace=True)
df
#create HTML table and the bold border
html_table = df.to_html(border=3)
#Remove enter characters
marsData['mars_facts_html'] = html_table.replace('\n', '')
print(marsData['mars_facts_html'])
# ## Mars Hemispheres
# *Visit the Mars Facts webpage here and use Pandas to scrape the table containing facts about the planet including Diameter, Mass, etc.
# *Use Pandas to convert the data to a HTML table string.
url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
base_url = "https://astrogeology.usgs.gov"
#retrieve the webpage
response = requests.get(url)
soup = bs(response.text, 'html.parser')
#Retrieve all image url to a list
results = soup.find_all('a', class_="itemLink product-item")
full_resolution_image_url = []
for result in results:
#Concatenate sublink and base url
full_resolution_image_url.append(base_url + result['href'])
print(full_resolution_image_url)
#inital the list for the dicton
hemisphere_image_urls = []
base_url = 'https://astrogeology.usgs.gov'
for url in full_resolution_image_url:
#retrieve the webpage from the new website
response = requests.get(url)
soup = bs(response.text, 'html.parser')
#Retrieve url to full resolution image
image_url = soup.find('div', class_="downloads").find('ul').find('li').find('a')['href']
#Retrieve the subject
title = soup.find('h2', class_="title").text
#initial diction and put into list
resolution_dict = { "title":title,"img_url": image_url }
hemisphere_image_urls.append(resolution_dict)
print(title)
print(image_url)
print("----------------------------------------------------------------")
print(hemisphere_image_urls)
marsData['hemisphere_image_urls'] = hemisphere_image_urls
#print all data from diction
print(marsData)
# return the data to app.py
return marsData
|
''' Clustering example of iris dataset
Commented codes display all possible graphs
'''
import pandas as pd
import matplotlib.pyplot as plt
import numpy
from sklearn.cluster import KMeans
iris = pd.read_csv('C:/Users/jhunjhun/Downloads/iris.csv')
iris = iris.iloc[:,[0,1,2,3]]
#x = iris.iloc[:,0]
#y = iris.iloc[:,1]
X = iris.iloc[:,[0,1]].values
#print(iris)
#plt.scatter(x,y)
f = plt.figure(1)
#kmeans = KMeans(n_clusters=3)
#kmeans.fit(iris)
#y_kmeans = kmeans.predict(iris)
#plt.scatter(x, y, c=y_kmeans, cmap='plasma')
#plt.xlabel('sepal length')
#plt.ylabel('sepal width')
#plt.title('Sepal Length V/S Sepal Width')
#centers = kmeans.cluster_centers_
#X = centers[:,0]
#Y = centers[:,1]
#plt.scatter(X, Y, c='black')
clf = KMeans(n_clusters=3, init='k-means++')
y_kmeans = clf.fit_predict(X)
fig = plt.figure(figsize=(10, 8))
plt.scatter(X[y_kmeans == 0, 0], X[y_kmeans == 0, 1], color='red', label='Cluster 1', edgecolors='black')
plt.scatter(X[y_kmeans == 1, 0], X[y_kmeans == 1, 1], color='green', label='Cluster 2', edgecolors='black')
plt.scatter(X[y_kmeans == 2, 0], X[y_kmeans == 2, 1], color='blue', label='Cluster 3', edgecolors='black')
# cluster centres
plt.scatter(clf.cluster_centers_[:, 0], clf.cluster_centers_[:, 1], color='magenta', label='Centroid',edgecolors='black')
plt.legend()
plt.title('Clusters using KMeans')
plt.ylabel('Sepal Width')
plt.xlabel('Sepal Length')
f.show()
#g = plt.figure(2)
#x = iris.iloc[:,0]
#y = iris.iloc[:,2]
##print(iris)
##plt.scatter(x,y)
#kmeans = KMeans(n_clusters=3)
#kmeans.fit(iris)
#y_kmeans = kmeans.predict(iris)
#plt.scatter(x, y, c=y_kmeans, cmap='plasma')
#plt.xlabel('sepal length')
#plt.ylabel('petal length')
#plt.title('Sepal Length V/S Petal Length')
#centers = kmeans.cluster_centers_
#X = centers[:,0]
#Y = centers[:,2]
#plt.scatter(X, Y, c='black')
#g.show()
#
#
#h = plt.figure(3)
#x = iris.iloc[:,0]
#y = iris.iloc[:,3]
##print(iris)
##plt.scatter(x,y)
#kmeans = KMeans(n_clusters=3)
#kmeans.fit(iris)
#y_kmeans = kmeans.predict(iris)
#plt.scatter(x, y, c=y_kmeans, cmap='plasma')
#plt.xlabel('sepal length')
#plt.ylabel('petal width')
#plt.title('Sepal Length V/S Petal Width')
#centers = kmeans.cluster_centers_
#X = centers[:,0]
#Y = centers[:,3]
#plt.scatter(X, Y, c='black')
#h.show()
#
#
#i = plt.figure(4)
#x = iris.iloc[:,1]
#y = iris.iloc[:,2]
##print(iris)
##plt.scatter(x,y)
#kmeans = KMeans(n_clusters=3)
#kmeans.fit(iris)
#y_kmeans = kmeans.predict(iris)
#plt.scatter(x, y, c=y_kmeans, cmap='plasma')
#plt.xlabel('sepal width')
#plt.ylabel('petal length')
#plt.title('Sepal Width V/S Petal Length')
#centers = kmeans.cluster_centers_
#X = centers[:,1]
#Y = centers[:,2]
#plt.scatter(X, Y, c='black')
#i.show()
#
#
#j = plt.figure(5)
#x = iris.iloc[:,1]
#y = iris.iloc[:,3]
##print(iris)
##plt.scatter(x,y)
#kmeans = KMeans(n_clusters=3)
#kmeans.fit(iris)
#y_kmeans = kmeans.predict(iris)
#plt.scatter(x, y, c=y_kmeans, cmap='plasma')
#plt.xlabel('sepal width')
#plt.ylabel('petal width')
#plt.title('Sepal Width V/S Petal Width')
#centers = kmeans.cluster_centers_
#X = centers[:,1]
#Y = centers[:,3]
#plt.scatter(X, Y, c='black')
#j.show()
#
#
#k = plt.figure(6)
#x = iris.iloc[:,2]
#y = iris.iloc[:,3]
##print(iris)
##plt.scatter(x,y)
#kmeans = KMeans(n_clusters=3)
#kmeans.fit(iris)
#y_kmeans = kmeans.predict(iris)
#plt.scatter(x, y, c=y_kmeans, cmap='plasma')
#plt.xlabel('petal length')
#plt.ylabel('petal width')
#plt.title('Petal Length V/S Petal Width')
#centers = kmeans.cluster_centers_
#X = centers[:,2]
#Y = centers[:,3]
#plt.scatter(X, Y, c='black')
#k.show()
|
# Generated by Django 2.1.3 on 2018-12-05 19:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('microlly_blog', '0003_auto_20181202_2355'),
]
operations = [
migrations.AlterModelOptions(
name='post',
options={'verbose_name': 'Publication', 'verbose_name_plural': 'Publications'},
),
migrations.AlterField(
model_name='post',
name='edit_date',
field=models.DateTimeField(auto_now_add=True),
),
]
|
from flask import Flask, render_template, url_for,request,jsonify,g,make_response
import datetime
from flask_cors import CORS
import mysql.connector as db
from flask_httpauth import HTTPBasicAuth,MultiAuth,HTTPTokenAuth
import base64
from flask_mail import Mail,Message
import random
import string
con=None
app = Flask(__name__)
basic_auth = HTTPBasicAuth()
token_auth = HTTPTokenAuth(scheme='Basic')
CORS(app)
app.config["MAIL_USERNAME"]="email name"
app.config["MAIL_PASSWORD"]="your email pwd"
app.config["MAIL_SERVER"]="smtpout.secureserver.net"
app.config["MAIL_PORT"]=465
app.config["MAIL_USE_SSL"]=True
mail = Mail(app)
@basic_auth.verify_password
def verify_password(username, password):
#con=db.connect(host="localhost",user="<your user>",password="<your pwd>",auth_plugin='mysql_native_password',database="<your DB>")
if username=="hegde":
return True
return False
@token_auth.error_handler
def auth_error():
return make_response()
def get_db_connection():
global con
if con==None:
con=db.connect(host="localhost",user="<your user>",password="<your pwd>",auth_plugin='mysql_native_password',database="<your DB>")
return con
if not con.is_connected():
con=db.connect(host="localhost",user="<your user>",password="<your pwd>",auth_plugin='mysql_native_password',database="<your DB>")
return con
@token_auth.verify_token
def verify_token(token):
ret=True
try:
validatedToken=validate_session((token,0))
if validatedToken =="expired":
return False
except Exception:
ret=False
raise
#con.close()
finally:
pass
#con.close()
return ret
def randomString(stringLength):
"""Generate a random string with the combination of lowercase and uppercase letters """
letters = string.ascii_letters
return ''.join(random.choice(letters) for i in range(stringLength))
def get_rc_from_db(args):
con=get_db_connection()
try:
results=[]
cursor = con.cursor()
cursor.callproc('get_result_category_labels',args)
for r in cursor.stored_results():
for row in r:
results.append(dict(zip(r.column_names,row)))
finally:
cursor.close()
return results
def get_all_amc_from_db():
con=get_db_connection()
try:
results=[]
cursor = con.cursor()
cursor.callproc('get_all_amcs',tuple())
for r in cursor.stored_results():
for row in r:
results.append(dict(zip(r.column_names,row)))
finally:
cursor.close()
return results
def create_session(args):
con=get_db_connection()
try:
results=[]
cursor = con.cursor()
results=cursor.callproc('create_session',args)
finally:
cursor.close()
return results[-1]
def validate_session(args):
con=get_db_connection()
try:
results=[]
cursor = con.cursor()
results=cursor.callproc('validate_session',args)
finally:
cursor.close()
return results[-1]
@app.route('/category/<int:stock_id>/<string:mode>/<int:no_of_qtr>')
@token_auth.login_required
def get_result_category_labels(stock_id,mode,no_of_qtr):
stockCompare=request.args.get('stockCompare',0)
con=get_db_connection()
category_labels=[]
try:
category_labels=get_rc_from_db((stock_id,int(stockCompare),no_of_qtr,mode))
finally:
if con.is_connected():
pass
#con.close()
return jsonify(category_labels)
def get_type_from_db(args):
con=get_db_connection()
try:
results=[]
cursor = con.cursor()
cursor.callproc('get_result_type_label',args)
for r in cursor.stored_results():
for row in r:
results.append(dict(zip(r.column_names,row)))
finally:
cursor.close()
return results
def get_scheme_nav_from_db(args):
con=get_db_connection()
try:
results=[]
cursor = con.cursor()
cursor.callproc('get_scheme_nav',args)
for r in cursor.stored_results():
for row in r:
results.append(dict(zip(r.column_names,row)))
finally:
cursor.close()
return results
def get_scheme_cat_from_db(args):
con=get_db_connection()
try:
results=[]
cursor = con.cursor()
cursor.callproc('get_scheme_category',args)
for r in cursor.stored_results():
for row in r:
results.append(dict(zip(r.column_names,row)))
finally:
cursor.close()
return results
def get_mf_nav_from_db(args):
con=get_db_connection()
try:
results1=[]
results2=[]
cursor = con.cursor()
cursor.callproc('get_mutual_funds_nav',args)
for i,r in enumerate(cursor.stored_results()):
for row in r:
if i==0:
results1.append(dict(zip(r.column_names,row)))
else:
results2.append(dict(zip(r.column_names,row)))
finally:
cursor.close()
return results1,results2
@app.route("/mutualFunds/schemeCat/<int:amc1>/<int:amc2>")
def get_scheme_category(amc1,amc2):
scheme_cat=get_scheme_cat_from_db((amc1,amc2))
return jsonify(scheme_cat)
@app.route("/mutualFunds/schemeNav/<int:amc1>/<int:amc2>/<int:scheme_cat_id>")
def get_scheme_nav(amc1,amc2,scheme_cat_id):
scheme_nav=get_scheme_nav_from_db((amc1,amc2,scheme_cat_id))
return jsonify(scheme_nav)
@app.route("/mutualFunds/performance/<int:code1>/<int:code2>/<int:days>")
def get_mf_nav(code1,code2,days):
mf_nav=get_mf_nav_from_db((code1,code2,days))
return jsonify(mf_nav)
@app.route('/type/<int:stock_id>/<string:mode>/<int:no_of_qtr>/<int:catID>')
@token_auth.login_required
def get_result_type_labels(stock_id,mode,no_of_qtr,catID):
stockCompare=request.args.get('stockCompare',0)
con=get_db_connection()
type_labels=[]
try:
type_labels=get_type_from_db((stock_id,int(stockCompare),no_of_qtr,mode,catID))
finally:
if con.is_connected():
pass
#con.close()
return jsonify(type_labels)
@app.route('/search/<string:search_string>')
@token_auth.login_required
def get_stock_search_results(search_string):
con=get_db_connection()
try:
cur=con.cursor(prepared=True)
select_stmt = "SELECT stock_id,stock_name FROM stocks WHERE stock_name like %s or nse_id like %s or bse_id like %s"
cur.execute(select_stmt, ( "%"+search_string+"%","%"+search_string+"%","%"+search_string+"%"))
#cur.callproc('visualize.search_stock',(search_string,refcur))
#for i,j in cur:
# print(i,j)
out=[{'stockId':id,'stockName':name.decode("utf-8") } for id,name in cur]
finally:
pass
#con.close()
return jsonify(out)
@app.route('/graph/<int:stockID>')
@token_auth.login_required
def get_graph_data(stockID):
mode=request.args.get('mode',None)
categoryId=request.args.get('categoryId',None)
fromQtr=request.args.get('fromQtr',None)
totalQtr=request.args.get('totalQtr',None)
totalQtrIncome=request.args.get('totalQtrIncome',None)
stmtTypeID=request.args.get('resultTypeId',None)
secondaryStockID=request.args.get('secondaryStockID',None)
price1=request.args.get('price1','N')
price2=request.args.get('price2','N')
con=get_db_connection()
try:
cur=con.cursor()
cur.callproc('get_graph_data',(stockID,secondaryStockID,categoryId,stmtTypeID,mode,fromQtr,totalQtr,totalQtrIncome,price1,price2))
results=[]
price1=[]
price2=[]
for i,r in enumerate(cur.stored_results()):
if i==0:
for row in r:
results.append(dict(zip(r.column_names,row)))
elif i==1:
for row in r:
if request.args.get('price1','N')=="Y":
price1.append(dict(zip(r.column_names,row)))
else:
price2.append(dict(zip(r.column_names,row)))
else:
for row in r:
price2.append(dict(zip(r.column_names,row)))
if mode !="S":
out=[{'name':r["stock_id"],'year':r["period"],'value':float(r["value"])} for r in results]
else:
out=[{'year':r["period"],'value':float(r["value"])} for r in results]
finally:
pass
#con.close()
return jsonify(income=out,price1=price1,price2=price2)
@app.route('/')
def login():
#qryr_labels=[('6qtr',1),('8qtr',2),('12qtr',3),('max',4)]
#return render_template('test.html',qryr_labels=qryr_labels)
#return render_template('test.html',message="Please login to use the service")
return render_template('login.html',login=True,mf_class="none",eq_class="active")
@app.route('/mutualFunds',methods=['GET'])
def mutualFunds():
token=request.cookies.get("token")
if verify_token(token):
amcs=get_all_amc_from_db()
resp=make_response(render_template('mutualFunds.html',amcs=amcs,login=False,mf_class="active",eq_class="none",tokenIn=token))
else:
resp=make_response(render_template('messageToUser.html',message="There was an error validating session. Please try again later."))
return resp
@app.route('/home',methods=['POST','GET'])
def home():
if request.method=="POST":
email=request.form.get('emailInput',None)
con=get_db_connection()
try:
cur=con.cursor()
results=cur.callproc('verify_user',(email,0))
if results[-1] != "N":
token=randomString(30)
s=create_session((email,token,0))
except Exception:
#con.close()
pass
finally:
pass
#con.close()
if results[-1]=="N":
return render_template('messageToUser.html',message="You are not registered. Please enter email and click register and activate your ID to use this tool.")
qryr_labels=[('6qtr',1),('8qtr',2),('12qtr',3),('max',4)]
if s=="fail":
render_template('messageToUser.html',message="There was an error creating session. Please try again later.")
else:
resp=make_response(render_template('equity.html',qryr_labels=qryr_labels,tokenIn=token,login=False,mf_class="none",eq_class="active"))
resp.set_cookie('token', token)
return resp
else:
token=request.cookies.get("token")
if verify_token(token):
qryr_labels=[('6qtr',1),('8qtr',2),('12qtr',3),('max',4)]
resp=make_response(render_template('equity.html',qryr_labels=qryr_labels,tokenIn=token,login=False,mf_class="none",eq_class="active"))
else:
resp=make_response(render_template('messageToUser.html',message="There was an error validating session. Please try again later."))
return resp
@app.route('/register',methods=['POST'])
def register():
results=[]
email=request.form.get('emailInput',None)
con=get_db_connection()
try:
cur=con.cursor()
token=randomString(10)
results=cur.callproc('register_user',(email,'S',token,0,1))
except Exception:
results.append("fail")
finally:
pass
#con.close()
if results[-2]!="success":
if results[-2]=="AR":
return render_template('messageToUser.html',message="Already registered. Please activate your ID if you have not done so.")
else:
return render_template('messageToUser.html',message="There was an error registering you. Please try again later.")
msg = Message("Hello",sender="communications@stocksandcharts.in",recipients=[email])
msg.body = "testing"
msg.html = """<div style='margin-left: 50px;margin-right: 50px;margin-bottom: 50px'>
<p>Dear {email}</p><br>
<span>Thanks for your interest in the service. You can activate your account by clicking below link.</span><br>
<br>
<a href='{link}' ' style='background: blue;padding: 10px;color: yellow;font-family: Calibri;font-size: 15px;border-radius: 10px '><b>Activate account<b></a>
<br>
<br>
<p>Thanks and Regards, <br> Manjunath
</p>
</div>""".format(email=email,link=results[-1])
try:
mail.send(msg)
except Exception:
pass
return render_template('messageToUser.html',message="You have been registered. Please login to your email and activate the servce to login. Dont forget to check spam folder if you did not get email!")
def activate_user(args):
con=get_db_connection()
try:
results=[]
cursor = con.cursor()
results=cursor.callproc('activate_user',args)
finally:
cursor.close()
return results[-1]
@app.route('/activate/<int:actid>/<string:token>')
def activate(actid,token):
message=None
try:
results=activate_user((actid,0))
print(results)
if results=="success":
message="You have been activated. Please login to use the service"
else:
message=results
except Exception as e:
print(e)
message="There was an error activating. Please ty again"
#con.close()
finally:
pass
#con.close()
return render_template('messageToUser.html',message=message)
if __name__ == "__main__":
try:
app.run(host='0.0.0.0', debug=True)
finally:
if con.is_connected():
con.close()
|
from pypif import pif
from pypif.obj import *
chemical_system = ChemicalSystem()
chemical_system.chemical_formula = 'MgO2'
band_gap = Property()
band_gap.name = 'Band gap'
band_gap.scalars = 7.8
band_gap.units = 'eV'
chemical_system.properties = band_gap
print(pif.dumps(chemical_system, indent=4))
|
import glob
import cv2
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import math
from tqdm import tqdm
import os
from pathlib import Path
from defisheye import Defisheye
fov = 180
pfov = 120
dtype = "equalarea" # #"linear","stereographic","orthographic"
format_ = "fullframe" # #"circular"
dataset_dir = "images"
images_dir = dataset_dir + "/images3/" ##enter dataset directory of frames
'''
Data format will be like:
../images3/
01_frame.jpg
02_frame.jpg
..
..
..
'''
img_name = glob.glob(images_dir + "*.jpg")
img_list = []
for name in img_name:
name = (Path(name).stem)+'.jpg'
img_list.append(name)
list.sort(img_list)
print("Total images: " + str(len(img_list)))
#output_video = cv2.VideoWriter('video.mp4', cv2.VideoWriter_fourcc(*'mp4v'), 20, (1056, 1056))
image_file = os.path.join(images_dir,img_list[0])
frame0 = cv2.imread(image_file)
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
height,width,layers= frame0.shape
output_video=cv2.VideoWriter('defished.avi', fourcc, 20, (width,height)) ##saved video format, name
for i in tqdm(range(0, len(img_list))):
image_file = os.path.join(images_dir,img_list[i])
img = cv2.imread(image_file)
obj = Defisheye(img, dtype=dtype, format=format_, fov=fov, pfov=pfov)
frame_to_write = obj.convert()
output_video.write(frame_to_write)
#cv2.destroyAllWindows()
output_video.release()
|
import os
import logging
# from flask_migrate import Migrate, MigrateCommand
# from flask_script import Manager
# from webapp import blueprint
from app import create_app, db, migrate
from loguru import logger
import unittest
# http://127.0.0.1:5000/api/v1/documentation
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
class InterceptHandler(logging.Handler):
def emit(self, record):
"""sets logger options
Parameters:
record-- logging record
Returns:
None--
"""
logger_opt = logger.opt(depth=1, exception=record.exc_info)
logger_opt.log(record.levelno, record.getMessage())
# app = create_app(os.getenv("FLASK_ENV") or "localdev")
app = create_app()
# more log configuration for rotation, retention, and level
logger.add(
os.path.join(os.path.abspath(os.path.dirname(__file__)), "logs/events.log"),
level="DEBUG",
format="{time} {level} {message}",
backtrace=True,
rotation="5 MB",
retention=9,
)
# add logger
app.logger.addHandler(InterceptHandler())
logging.basicConfig(handlers=[InterceptHandler()], level=20)
app.app_context().push()
@app.after_request
def after_request(response):
"""allows cookies for CORS
Parameters:
response-- flask response
Returns:
response-- flask response
"""
response.headers.add("Access-Control-Allow-Credentials", "true")
return response
@app.teardown_request
def teardown_request(exception):
if exception:
db.session.rollback()
db.session.remove()
# # creates app manager
# manager = Manager(app)
# # # sets migration operator
# # migrate = Migrate(app, db, compare_type=True)
# # # adds migration functionality
# # manager.add_command("db", MigrateCommand)
# @manager.command
# def run():
# """Command line argument to run the app
# Returns:
# None--
# """
# app.run()
# @manager.command
# def test():
# """runs the unit tests
# Returns:
# int-- 0 if tests are successful
# """
# tests = unittest.TestLoader().discover("webapp/test", pattern="*test.py")
# result = unittest.TextTestRunner(verbosity=2).run(tests)
# if result.wasSuccessful():
# return 0
# return 1
if __name__ == "__main__":
app.run()
|
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
import os
def expand_package_data(src_dirs, strip=""):
ret = []
for src_dir in src_dirs:
for path, dnames, fnames in os.walk(src_dir):
for fname in fnames:
ret.append(os.path.join(path, fname).replace(strip, ""))
return ret
os.chdir(os.path.dirname(os.path.abspath(__file__)))
setup(
name = "${app_name}",
version = "0.1",
url = 'gethue.com',
description = 'A new Hue App',
author = 'Elephant',
packages = find_packages('src'),
package_dir = {'': 'src'},
install_requires = ['setuptools', 'desktop'],
entry_points = { 'desktop.sdk.application': '${app_name}=${app_name}' },
zip_safe = False,
package_data = {
# Include static resources. Package_data doesn't
# deal well with directory globs, so we enumerate
# the files manually.
'${app_name}': expand_package_data(
["src/${app_name}/templates", "src/${app_name}/static"],
"src/${app_name}/")
}
)
|
"""
6.
Спортсмен занимается ежедневными пробежками.
В первый день его результат составил a километров.
Каждый день спортсмен увеличивал результат на 10 % относительно предыдущего.
Требуется определить номер дня, на который результат спортсмена составит не менее b километров.
Программа должна принимать значения параметров a и b и выводить одно натуральное число — номер дня.
Например: a = 2, b = 3.
Результат:
1-й день: 2
2-й день: 2,2
3-й день: 2,42
4-й день: 2,66
5-й день: 2,93
6-й день: 3,22
Ответ: на 6-й день спортсмен достиг результата — не менее 3 км.
"""
start = int(input("Количество килиметров в первый день: "))
target = int(input("Укажите целевое расстояние: "))
day = 1
print(f"{day}-й день: {start}")
if start <= 0 or target <= 0:
print('Ошибка, введите положительное число больше ноля')
else:
while start <= target:
start *= 1.1
day += 1
print(f"{day}-й день: {round(start,2)}")
print(f"Hа {day}-й день спортсмен достиг результата — не менее {round(start)} км.")
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
from lxml import etree
import sys
from django.core import management
from django.core.management.base import BaseCommand
from django.db import transaction
from desktop.conf import USE_NEW_EDITOR
from desktop.models import Directory, Document, Document2, Document2Permission
from hadoop import cluster
from liboozie.submittion import create_directories
from notebook.models import make_notebook
from useradmin.models import get_default_user_group, install_sample_user
from oozie.conf import LOCAL_SAMPLE_DATA_DIR, LOCAL_SAMPLE_DIR, REMOTE_SAMPLE_DIR, ENABLE_V2
from oozie.models import Workflow, Coordinator, Bundle
from oozie.importlib.workflows import import_workflow_root
from oozie.importlib.coordinators import import_coordinator_root
from oozie.importlib.bundles import import_bundle_root
if sys.version_info[0] > 2:
from django.utils.translation import gettext as _
else:
from django.utils.translation import ugettext as _
LOG = logging.getLogger()
class Command(BaseCommand):
def _import_workflows(self, directory, managed=True):
for example_directory_name in os.listdir(directory):
if os.path.isdir(os.path.join(directory, example_directory_name)):
with open(os.path.join(directory, example_directory_name, 'workflow.zip')) as fp:
workflow_xml, metadata = Workflow.decompress(fp)
workflow_root = etree.fromstring(workflow_xml)
try:
Workflow.objects.get(name=workflow_root.get('name'), managed=managed)
except Workflow.DoesNotExist:
LOG.info(_("Installing workflow %s") % workflow_root.get('name'))
LOG.debug("Workflow definition:\n%s" % workflow_xml)
workflow = Workflow.objects.new_workflow(owner=self.user)
workflow.is_shared = True
workflow.managed = managed
workflow.name = workflow_root.get('name')
workflow.save()
Workflow.objects.initialize(workflow)
import_workflow_root(workflow=workflow, workflow_definition_root=workflow_root, metadata=metadata, fs=self.fs)
workflow.doc.all().delete() # Delete doc as it messes up the example sharing
def _import_coordinators(self, directory):
for example_directory_name in os.listdir(directory):
if os.path.isdir(os.path.join(directory, example_directory_name)):
with open(os.path.join(directory, example_directory_name, 'coordinator.zip')) as fp:
coordinator_xml, metadata = Coordinator.decompress(fp)
coordinator_root = etree.fromstring(coordinator_xml)
try:
Coordinator.objects.get(name=coordinator_root.get('name'))
except Coordinator.DoesNotExist:
LOG.info(_("Installing coordinator %s") % coordinator_root.get('name'))
LOG.debug("Coordinator definition:\n%s" % coordinator_xml)
coordinator = Coordinator(owner=self.user, is_shared=True)
coordinator.name = coordinator_root.get('name')
coordinator.save()
import_coordinator_root(coordinator=coordinator, coordinator_definition_root=coordinator_root, metadata=metadata)
def _import_bundles(self, directory):
for example_directory_name in os.listdir(directory):
if os.path.isdir(os.path.join(directory, example_directory_name)):
with open(os.path.join(directory, example_directory_name, 'bundle.zip')) as fp:
bundle_xml, metadata = Bundle.decompress(fp)
bundle_root = etree.fromstring(bundle_xml)
try:
Bundle.objects.get(name=bundle_root.get('name'))
except Bundle.DoesNotExist:
LOG.info(_("Installing bundle %s") % bundle_root.get('name'))
LOG.debug("Bundle definition:\n%s" % bundle_xml)
bundle = Bundle(owner=self.user, is_shared=True)
bundle.name = bundle_root.get('name')
bundle.save()
import_bundle_root(bundle=bundle, bundle_definition_root=bundle_root, metadata=metadata)
def _install_mapreduce_example(self):
doc2 = None
name = _('MapReduce Sleep Job')
if Document2.objects.filter(owner=self.user, name=name, type='query-mapreduce', is_history=False).exists():
LOG.info("Sample mapreduce editor job already installed.")
doc2 = Document2.objects.get(owner=self.user, name=name, type='query-mapreduce', is_history=False)
else:
snippet_properties = {
'app_jar': '/user/hue/oozie/workspaces/lib/hadoop-examples.jar',
'hadoopProperties': ['mapred.mapper.class=org.apache.hadoop.examples.SleepJob',
'mapred.reducer.class=org.apache.hadoop.examples.SleepJob',
'mapred.mapoutput.key.class=org.apache.hadoop.io.IntWritable',
'mapred.mapoutput.value.class=org.apache.hadoop.io.NullWritable',
'mapred.output.format.class=org.apache.hadoop.mapred.lib.NullOutputFormat',
'mapred.input.format.class=org.apache.hadoop.examples.SleepJob$SleepInputFormat',
'mapred.partitioner.class=org.apache.hadoop.examples.SleepJob',
'sleep.job.map.sleep.time=5', 'sleep.job.reduce.sleep.time=10'],
'archives': [],
'jars': []
}
notebook = make_notebook(
name=name,
description=_('Sleep: Example MapReduce job'),
editor_type='mapreduce',
statement='',
status='ready',
snippet_properties=snippet_properties,
is_saved=True
)
# Remove files, functions, settings from snippet properties
data = notebook.get_data()
data['snippets'][0]['properties'].pop('functions')
data['snippets'][0]['properties'].pop('settings')
try:
with transaction.atomic():
doc2 = Document2.objects.create(
owner=self.user,
name=data['name'],
type='query-mapreduce',
description=data['description'],
data=json.dumps(data)
)
except Exception as e:
LOG.exception("Failed to create sample mapreduce job document: %s" % e)
# Just to be sure we delete Doc2 object incase of exception.
# Possible when there are mixed InnoDB and MyISAM tables
if doc2 and Document2.objects.filter(id=doc2.id).exists():
doc2.delete()
return doc2
def _install_java_example(self):
doc2 = None
name = _('Java TeraGen Job')
if Document2.objects.filter(owner=self.user, name=name, type='query-java', is_history=False).exists():
LOG.info("Sample Java editor job already installed.")
doc2 = Document2.objects.get(owner=self.user, name=name, type='query-java', is_history=False)
else:
snippet_properties = {
'app_jar': '/user/hue/oozie/workspaces/lib/hadoop-examples.jar',
'class': 'org.apache.hadoop.examples.terasort.TeraGen',
'java_opts': '',
'hadoopProperties': [],
'archives': [],
'files': [],
'arguments': ['10000', 'output_dir/teragen'],
'capture_output': False
}
notebook = make_notebook(
name=name,
description=_('TeraGen: Generates N rows of random data to a directory.'),
editor_type='java',
statement='',
status='ready',
snippet_properties=snippet_properties,
is_saved=True
)
# Remove files, functions, settings from snippet properties
data = notebook.get_data()
data['snippets'][0]['properties'].pop('functions')
data['snippets'][0]['properties'].pop('settings')
try:
with transaction.atomic():
doc2 = Document2.objects.create(
owner=self.user,
name=data['name'],
type='query-java',
description=data['description'],
data=json.dumps(data)
)
except Exception as e:
LOG.exception("Failed to create sample Java job document: %s" % e)
# Just to be sure we delete Doc2 object incase of exception.
# Possible when there are mixed InnoDB and MyISAM tables
if doc2 and Document2.objects.filter(id=doc2.id).exists():
doc2.delete()
return doc2
def _install_spark_example(self):
doc2 = None
name = _('Spark File Copy Job')
if Document2.objects.filter(owner=self.user, name=name, type='query-spark2', is_history=False).exists():
LOG.info("Sample Spark editor job already installed.")
doc2 = Document2.objects.get(owner=self.user, name=name, type='query-spark2', is_history=False)
else:
snippet_properties = {
'jars': ['/user/hue/oozie/workspaces/workflows/spark-scala/lib/oozie-examples.jar'],
'class': 'org.apache.oozie.example.SparkFileCopy',
'app_name': '',
'spark_opts': [],
'spark_arguments': [
"/user/hue/oozie/workspaces/data/sonnets.txt",
"sonnets"
],
'files': []
}
notebook = make_notebook(
name=name,
description=_('File Copy: Example Spark job'),
editor_type='spark2',
statement='',
status='ready',
snippet_properties=snippet_properties,
is_saved=True
)
# Remove files, functions, settings from snippet properties
data = notebook.get_data()
data['snippets'][0]['properties'].pop('functions')
data['snippets'][0]['properties'].pop('settings')
try:
with transaction.atomic():
doc2 = Document2.objects.create(
owner=self.user,
name=data['name'],
type='query-spark2',
description=data['description'],
data=json.dumps(data)
)
except Exception as e:
LOG.exception("Failed to create sample Spark job document: %s" % e)
# Just to be sure we delete Doc2 object incase of exception.
# Possible when there are mixed InnoDB and MyISAM tables
if doc2 and Document2.objects.filter(id=doc2.id).exists():
doc2.delete()
return doc2
def _install_pyspark_example(self):
doc2 = None
name = _('PySpark Pi Estimator Job')
if Document2.objects.filter(owner=self.user, name=name, type='query-spark2', is_history=False).exists():
LOG.info("Sample pyspark editor job already installed.")
doc2 = Document2.objects.get(owner=self.user, name=name, type='query-spark2', is_history=False)
else:
snippet_properties = {
'jars': ['/user/hue/oozie/workspaces/lib/pi.py'],
'class': '',
'app_name': '',
'spark_opts': [],
'spark_arguments': [],
'files': []
}
notebook = make_notebook(
name=name,
description=_('Pi Estimator: Example PySpark job'),
editor_type='spark2',
statement='',
status='ready',
snippet_properties=snippet_properties,
is_saved=True
)
# Remove files, functions, settings from snippet properties
data = notebook.get_data()
data['snippets'][0]['properties'].pop('functions')
data['snippets'][0]['properties'].pop('settings')
try:
with transaction.atomic():
doc2 = Document2.objects.create(
owner=self.user,
name=data['name'],
type='query-spark2',
description=data['description'],
data=json.dumps(data)
)
except Exception as e:
LOG.exception("Failed to create sample PySpark job document: %s" % e)
# Just to be sure we delete Doc2 object incase of exception.
# Possible when there are mixed InnoDB and MyISAM tables
if doc2 and Document2.objects.filter(id=doc2.id).exists():
doc2.delete()
return doc2
def install_examples(self):
data_dir = LOCAL_SAMPLE_DIR.get()
unmanaged_dir = os.path.join(data_dir, 'unmanaged')
self._import_workflows(unmanaged_dir, managed=False)
def handle(self, *args, **options):
self.user = install_sample_user()
self.fs = cluster.get_hdfs()
LOG.info(_("Creating sample directory '%s' in HDFS") % REMOTE_SAMPLE_DIR.get())
create_directories(self.fs, [REMOTE_SAMPLE_DIR.get()])
remote_dir = REMOTE_SAMPLE_DIR.get()
# Copy examples binaries
for name in os.listdir(LOCAL_SAMPLE_DIR.get()):
local_dir = self.fs.join(LOCAL_SAMPLE_DIR.get(), name)
remote_data_dir = self.fs.join(remote_dir, name)
LOG.info(_('Copying examples %(local_dir)s to %(remote_data_dir)s\n') % {
'local_dir': local_dir, 'remote_data_dir': remote_data_dir})
self.fs.do_as_user(self.user.username, self.fs.copyFromLocal, local_dir, remote_data_dir)
# Copy sample data
local_dir = LOCAL_SAMPLE_DATA_DIR.get()
remote_data_dir = self.fs.join(remote_dir, 'data')
LOG.info(_('Copying data %(local_dir)s to %(remote_data_dir)s\n') % {
'local_dir': local_dir, 'remote_data_dir': remote_data_dir})
self.fs.do_as_user(self.user.username, self.fs.copyFromLocal, local_dir, remote_data_dir)
# Get or create sample user directories
home_dir = Directory.objects.get_home_directory(self.user)
examples_dir, created = Directory.objects.get_or_create(
parent_directory=home_dir,
owner=self.user,
name=Document2.EXAMPLES_DIR
)
# Load jobs
LOG.info(_("Installing examples..."))
if ENABLE_V2.get():
with transaction.atomic():
if sys.version_info[0] > 2:
management.call_command('loaddata', 'initial_oozie_examples.json', verbosity=2)
else:
management.call_command('loaddata', 'initial_oozie_examples.json', verbosity=2, commit=False)
# Install editor oozie examples without doc1 link
LOG.info("Using Hue 4, will install oozie editor samples.")
example_jobs = []
example_jobs.append(self._install_mapreduce_example())
example_jobs.append(self._install_java_example())
example_jobs.append(self._install_spark_example())
example_jobs.append(self._install_pyspark_example())
# If documents exist but have been trashed, recover from Trash
for doc in example_jobs:
if doc is not None and doc.parent_directory != examples_dir:
doc.parent_directory = examples_dir
doc.save()
# Share oozie examples with default group
oozie_examples = Document2.objects.filter(
type__in=['oozie-workflow2', 'oozie-coordinator2', 'oozie-bundle2'],
owner=self.user,
parent_directory=None
)
oozie_examples.update(parent_directory=examples_dir)
examples_dir.share(self.user, Document2Permission.READ_PERM, groups=[get_default_user_group()])
|
"""
Definition of TreeNode:
"""
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
class ReturnType:
def __init__(self, cur_sum, min_sum,subtree):
self.cur_sum = cur_sum
self.min_sum = min_sum
self.subtree = subtree
class Solution:
"""
@param root: the root of binary tree
@return: the root of the minimum subtree
"""
def findSubtree(self, root):
# result = ReturnType()
result = self.findSubtreeHelper(root)
return result.subtree.val
def findSubtreeHelper(self, root):
# return ReturnType
if root is None:
return ReturnType(0,float('inf'),None)
left_re = self.findSubtreeHelper(root.left)
right_re = self.findSubtreeHelper(root.right)
result = ReturnType(left_re.cur_sum + right_re.cur_sum + root.val, # 当前值等于left、right返回的和和节点值相加
left_re.cur_sum + right_re.cur_sum + root.val, # 假设left、right返回空,这时最小值是节点值本身
root)
if left_re.min_sum <= result.min_sum:
# 如果当前算得的最小和 大于 左节点保存的最小和,
# min_sum起到保存最小和的作用
result.min_sum = left_re.min_sum
result.subtree = left_re.subtree
if right_re.min_sum <= result.min_sum:
result.min_sum = right_re.min_sum
result.subtree = right_re.subtree
return result
if __name__ == '__main__':
node1 = TreeNode(1)
node2 = TreeNode(-5)
node3 = TreeNode(2)
node4 = TreeNode(0)
node5 = TreeNode(2)
node6 = TreeNode(-4)
node7 = TreeNode(-5)
node1.left = node2
node1.right = node3
node2.left = node4
node2.right = node5
node3.left = node6
node4.right = node7
root = node1
print(Solution().findSubtree(root)) |
from django.contrib import admin
from .models import Room, Reservation, ReservedRoom, BigText, Schedule, DeletedReservation
admin.site.register(Reservation)
admin.site.register(DeletedReservation)
admin.site.register(Room)
admin.site.register(ReservedRoom)
admin.site.register(Schedule)
admin.site.register(BigText) |
from train_and_eval_utils import CNNDailyMailDataset
from pytorch_transformers import XLNetTokenizer
best_list = [("1a0cb94420e0e1ce99d2e67607175aa067d2adae", "../data/dev_data0-9/dev_data_5"),
("76f3f120cf62c5fc7a0cd0c95d3245775e41834a", "../data/dev_data0-9/dev_data_6"),
("3bb5c44400b711cb49c9e0ad025ae674a81ea43f", "../data/dev_data0-9/dev_data_6")]
worst_list = [("eaeaab1f32b0bd58ccbadc269331da95a227b77a", "../data/dev_data0-9/dev_data_3"),
("121fd3cf9ce903f515797dc1a9e7ffe03daf5820", "../data/dev_data0-9/dev_data_7"),
("0d593c3807c979dfd4b0a0a380ce911e9e07e2f4", "../data/dev_data0-9/dev_data_7")]
best_list2 = [("3bb5c44400b711cb49c9e0ad025ae674a81ea43f","../data/dev_data0-9/dev_data_6"),
("692781308ebf134e5e2aaac2ced718d0bba6bada","../data/dev_data0-9/dev_data_7"),
("64c48d1888ed32f5adc887275a87a488f5a5fcc8", "../data/dev_data0-9/dev_data_1")]
worst_list2 = [("093b975f7dffb6040ffee0729233f473de0f82db", "../data/dev_data0-9/dev_data_4"),
("4358634a59d0aad941da3d9997650d40665f6d79", "../data/dev_data0-9/dev_data_8"),
("8fbee4db6fc56a137a2b87991337d8fa066817dc", "../data/dev_data0-9/dev_data_5")]
for article, origin in worst_list2:
t = XLNetTokenizer.from_pretrained("xlnet-base-cased")
dat = CNNDailyMailDataset(tokenizer=t, data_dir=origin)
storyname = next(st for st in dat.stories_path if article in st)
index = dat.stories_path.index(storyname)
_, story, summary = dat[index]
story_encoded = t.encode(" ".join(story))[:67]
print(t.decode(story_encoded))
print(" ".join(summary))
|
from logging import getLogger
from unittest import TestCase
import requests
logger = getLogger(__name__)
BASE_URL = "http://calendar:8123"
def get_calendar(calendar, **params):
# This will insert the number of the calendar into the url
url = BASE_URL + "/calendars/{}/dates/".format(calendar)
# This will send the request and return the response
return requests.get(url, params=params)
def add_date(calendar, **data):
url = BASE_URL + "/calendars/{}/".format(calendar)
return requests.post(url, data=data)
class Base(TestCase):
def setUp(self):
super().setUp()
requests.get(BASE_URL + "/reset_calendars")
class TestGetCalendar(Base):
def setUp(self):
super().setUp()
def test_calendar_not_found(self):
"""
This tests validates that we return an error for non-existing calendars
"""
response = get_calendar(1000)
# Checking the status code
self.assertEqual(response.status_code, 404)
# Checking the response itself
self.assertEqual(response.json(), {})
def test_get_empty_calendar(self):
"""
This tests validates that we return an empty dict when a calendar is empty
"""
response = get_calendar(0)
# Checking the status code
self.assertEqual(response.status_code, 200)
# Checking the response itself
self.assertEqual(response.json(), {})
def test_get_calendar_with_data(self):
response = get_calendar(1)
data = {
"2018-01-01": "Go fishing",
"2018-01-11": "Go surfing",
"2018-02-12": "Go skying",
"2018-03-09": "Go swimming",
}
# Checking the status code
self.assertEqual(response.status_code, 200)
# Checking the response itself
self.assertEqual(response.json(), data)
class TestAddEntry(Base):
def test_add_event_to_available_date(self):
"""
In this test, we'll heck that adding a date correctly returns a 201 status code
and that the date is added
"""
response = add_date(calendar=1, date="2019-10-10", event="Go hiking")
# Checking the status code
self.assertEqual(response.status_code, 201)
# Confirmation that the entry has been added to the db
all_dates = get_calendar(1).json()
self.assertIn("2019-10-10", all_dates)
def test_error_409_adding_event_when_date_is_taken(self):
"""
In this test, we'll check that when we try to add an event to a date that
already contain an event, we get an error with status 409, and the new event
is not added.
"""
# @todo: Write your test code here. Test it with calendar #1
# raise NotImplementedError("This test has yet to be implemented.")
def test_error_404_adding_date_to_non_existing_calendar(self):
"""
In this test, we'll check that if we try to add an event to a calendar that does
not exist, we get an error with status 404, and the new event is not added.
"""
# @todo: Write your test code here. Test it with calendar #1
response = add_date(calendar=32, date="2018-01-11", event="Nomnomnom")
self.assertEqual(response.status_code, 404)
# raise NotImplementedError("This test has yet to be implemented.")
|
from ..Base.Patch import Patch
class LymphNode(Patch):
def __init__(self, patch_id, species, loads, position):
Patch.__init__(self, patch_id, species, loads, position)
def __str__(self):
return "LN {0}".format(self.id)
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
# Extracted items -> temporary containers -> then to the csv
import scrapy
class Scrapy2Item(scrapy.Item):
theindex = scrapy.Field()
theurl = scrapy.Field()
sku = scrapy.Field()
title = scrapy.Field()
artist = scrapy.Field()
image_urls = scrapy.Field()
tracklist = scrapy.Field()
description = scrapy.Field()
image_db_filepath = scrapy.Field()
allowonweb = scrapy.Field() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.