text stringlengths 38 1.54M |
|---|
#!/usr/local/bin/python3
# -*- conding: UTF-8 -*-
# Filename:primeNum.py
# author by : Lexi
# 质数判断
# 一个大于1的自然数,除了1和它本身外,不能被其他自然数(质数)整除(2, 3, 5, 7等),换句话说就是该数除了1和它本身以外不再有其他的因数。
while True:
try:
num = int(input('输入一个整数:'))
except ValueError:
print("输入的不是整数!")
continue
# 质数大于1
if num > 1:
#查看因子
for i in range(2,num):
if (num % i) == 0:
print(num,"不是质数")
print(i,"乘以",num//i,"是",num)
break
else:
print(num,"是质数")
# 如果输入的数字小于或等于1,不是质数
else:
print(num,"不是质数")
break
|
#!/usr/bin/env python
from setuptools import find_packages, setup
setup(
name='drongo',
version='1.2.0',
description='A nano web-framework for python.',
author='Sattvik Chakravarthy, Sagar Chakravarthy',
author_email='sattvik@gmail.com',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
],
packages=find_packages(),
url='https://github.com/drongo-framework/drongo',
include_package_data=True,
install_requires=[
'six',
],
zip_safe=False,
)
|
#!/usr/bin/env python
#
# glvector_funcs.py - Functions used by glrgbvector_funcs and
# gllinevector_funcs.
#
# Author: Paul McCarthy <pauldmccarthy@gmail.com>
#
"""This module contains logic for managing vertex and fragment shader programs
used for rendering :class:`.GLRGBVector` and :class:`.GLLineVector` instances.
These functions are used by the :mod:`.gl14.glrgbvector_funcs` and
:mod:`.gl14.gllinevector_funcs` modules.
"""
import numpy as np
import fsleyes.gl.shaders as shaders
import fsl.utils.transform as transform
def destroy(self):
"""Destroys the vertex/fragment shader programs created in :func:`init`.
"""
self.shader.destroy()
self.shader = None
def compileShaders(self, vertShader):
"""Compiles the vertex/fragment shader programs (by creating a
:class:`.GLSLShader` instance).
If the :attr:`.VectorOpts.colourImage` property is set, the ``glvolume``
fragment shader is used. Otherwise, the ``glvector`` fragment shader
is used.
"""
if self.shader is not None:
self.shader.destroy()
opts = self.opts
useVolumeFragShader = opts.colourImage is not None
if useVolumeFragShader: fragShader = 'glvolume'
else: fragShader = 'glvector'
vertSrc = shaders.getVertexShader( vertShader)
fragSrc = shaders.getFragmentShader(fragShader)
if useVolumeFragShader:
textures = {
'clipTexture' : 1,
'imageTexture' : 2,
'colourTexture' : 3,
'negColourTexture' : 3
}
else:
textures = {
'modulateTexture' : 0,
'clipTexture' : 1,
'vectorTexture' : 4,
}
self.shader = shaders.ARBPShader(vertSrc,
fragSrc,
shaders.getShaderDir(),
textures)
def updateShaderState(self):
"""Updates the state of the vector vertex and fragment shaders - the
fragment shader may may be either the ``glvolume`` or the ``glvector``
shader.
"""
opts = self.opts
useVolumeFragShader = opts.colourImage is not None
modLow, modHigh = self.getModulateRange()
clipLow, clipHigh = self.getClippingRange()
clipping = [clipLow, clipHigh, -1, -1]
if np.isclose(modHigh, modLow):
mod = [0, 0, 0, -1]
else:
mod = [modLow, modHigh, 1.0 / (modHigh - modLow), -1]
# Inputs which are required by both the
# glvolume and glvetor fragment shaders
self.shader.setFragParam('clipping', clipping)
clipCoordXform = self.getAuxTextureXform('clip')
colourCoordXform = self.getAuxTextureXform('colour')
modCoordXform = self.getAuxTextureXform('modulate')
self.shader.setVertParam('clipCoordXform', clipCoordXform)
self.shader.setVertParam('colourCoordXform', colourCoordXform)
self.shader.setVertParam('modCoordXform', modCoordXform)
if useVolumeFragShader:
voxValXform = self.colourTexture.voxValXform
cmapXform = self.cmapTexture.getCoordinateTransform()
voxValXform = transform.concat(cmapXform, voxValXform)
voxValXform = [voxValXform[0, 0], voxValXform[0, 3], 0, 0]
self.shader.setFragParam('voxValXform', voxValXform)
else:
colours, colourXform = self.getVectorColours()
voxValXform = self.imageTexture.voxValXform
voxValXform = [voxValXform[0, 0], voxValXform[0, 3], 0, 0]
self.shader.setFragParam('voxValXform', voxValXform)
self.shader.setFragParam('mod', mod)
self.shader.setFragParam('xColour', colours[0])
self.shader.setFragParam('yColour', colours[1])
self.shader.setFragParam('zColour', colours[2])
self.shader.setFragParam('colourXform', [colourXform[0, 0],
colourXform[0, 3], 0, 0])
return True
|
A, B = map(int, input().split())
sum = A + B
print(A, '+', B, '=', sum)
"""
Вычислить A+B, где A и B целые числа в диапазоне -1017<A,B<1017
Формат входных данных
В первой строке входных данных даны два числа A и B –(-1017<A,B<1017)
Формат выходных данных
Cтрока содержащая сумму двух чисел.
"""
|
# 4. Write a Python program to get a single string from two given strings, separated
# by a space and swap the first two characters of each string.
# Sample String : 'abc', 'xyz'
# Expected Result : 'xyc abz'
word1 = input("Enter the first string : ")
word2 = input ("Enter the second string : ")
first = word1.replace(word1[:2], word2[:2])
second = word2.replace(word2[:2], word1[:2])
print (first + ' ' + second) |
# -*- coding: utf-8 -*-
"""
Created on Tue May 29 17:20:30 2018
@author: likkhian
"""
import numpy as np
import tensorflow as tf
import os
from random import shuffle
tf.logging.set_verbosity(tf.logging.INFO)
def cnn_model_fn(features,labels,mode):
'''model function for cnn'''
#input layer
input_layer=tf.reshape(features['x'],[-1,84,168,1])
#convolutional layer 1, including namescopes. Examine conv2d vs Conv2d!
#Conv2d is a class. conv2d is a function that uses the Conv2d class.
#to get more info, look up programmers_guide/low_level_intro and layer Function shortcuts
#Use the class to dig into layer detail.
with tf.name_scope('lik_conv1'):
conv1=tf.layers.conv2d(
inputs=input_layer,
filters=32,
kernel_size=[5,5],
padding='same',
activation=tf.nn.relu,
name='conv1')
conv1_kernel=tf.get_collection(tf.GraphKeys.VARIABLES, 'conv1/kernel')[0]
conv1_kernel_transposed=tf.transpose(conv1_kernel,[3,0,1,2])
conv1_bias = tf.get_collection(tf.GraphKeys.VARIABLES, 'conv1/bias')[0]
convimg1=tf.reshape(conv1,[-1,84,168,32,1])
convimg2=tf.transpose(convimg1,[0,3,1,2,4])
#pooling layer 1
pool1=tf.layers.max_pooling2d(inputs=conv1,pool_size=[3,6],strides=[3,6])
#convolutional layer 2, and pooling layer 2
with tf.name_scope('lik_conv2'):
conv2=tf.layers.conv2d(
inputs=pool1,
filters=64,
kernel_size=[5,5],
padding='same',
activation=tf.nn.relu)
pool2=tf.layers.max_pooling2d(inputs=conv2,pool_size=[4,4],strides=4)
#dense layer
pool2_flat=tf.reshape(pool2,[-1,7*7*64])
dense=tf.layers.dense(inputs=pool2_flat,units=1024,activation=tf.nn.relu)
#extract weights and bias for tensorboard histogram
weights = tf.get_default_graph().get_tensor_by_name(os.path.split(dense.name)[0] + '/kernel:0')
bias = tf.get_default_graph().get_tensor_by_name(os.path.split(dense.name)[0] + '/bias:0')
dropout=tf.layers.dropout(inputs=dense,rate=0.4,
training=mode==tf.estimator.ModeKeys.TRAIN)
#logits layer
logits=tf.layers.dense(inputs=dropout,units=12)
predictions={
#generate predictions (for PREDICT and EVAL mode)
'classes':tf.argmax(input=logits,axis=1),
#Add softmax_tensor to the graph. used for predict and logging hook
'probabilities':tf.nn.softmax(logits,name='softmax_tensor')}
if(mode==tf.estimator.ModeKeys.PREDICT):
return tf.estimator.EstimatorSpec(mode=mode,predictions=predictions)
#calculate loss
loss=tf.losses.sparse_softmax_cross_entropy(labels=labels,logits=logits)
#save loss as a scalar
tf.summary.scalar('lik_loss',loss)
tf.summary.image('lik_input',input_layer,4)
tf.summary.image('conv1_filter',conv1_kernel_transposed,32)
tf.summary.histogram('conv1_bias',conv1_bias)
tf.summary.histogram('lik_denasa_wts',weights)
tf.summary.histogram('lik_dense_bias',bias)
tf.summary.image('lik_convimg',convimg2[0,:,:,:],32)
#add evaluation metrics. Moved it here so accuracy will be in training too
eval_metric_ops={
'accuracy':tf.metrics.accuracy(
labels=labels, predictions=predictions['classes'])}
tf.summary.scalar('lik_acc',eval_metric_ops['accuracy'][1])
#print confusion matrix images
confused=tf.confusion_matrix(labels=labels,predictions=predictions['classes'],dtype=tf.float16)
confused1=tf.reshape(confused,[1,12,12,1])
tf.summary.image('confusion_mat',confused1) #cols are predictions, rows are labels
#print misclassified images
mislabeled=tf.not_equal(tf.cast(predictions['classes'],tf.int32),labels)
wrong_input=tf.boolean_mask(predictions['classes'],mislabeled)
actual_label=tf.boolean_mask(labels,mislabeled)
mislabeled2=tf.Print(mislabeled,[wrong_input,actual_label],'printing mislabeled')
mislabeled_images = tf.boolean_mask(input_layer, mislabeled2)
tf.summary.image('mislabled',mislabeled_images,4)
#configure training op
if(mode==tf.estimator.ModeKeys.TRAIN):
optimizer=tf.train.GradientDescentOptimizer(learning_rate=0.001)
train_op=optimizer.minimize(
loss=loss,
global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode=mode,loss=loss,train_op=train_op)
return tf.estimator.EstimatorSpec(mode=mode,loss=loss,eval_metric_ops=eval_metric_ops)
def main(unused_argv):
#load training and eval data
trmm_data = np.load('./trmm_data.npy').astype(np.float32)
trmm_labels = np.load('./trmm_label.npy').astype(np.int32)
c = list(zip(trmm_data,trmm_labels))
shuffle(c)
trmm_data_t,trmm_labels_t=zip(*c)
trmm_data=np.array(trmm_data_t)
trmm_labels=np.array(trmm_labels_t)
train_data = trmm_data[:int(0.8*len(trmm_labels))]
test_data = trmm_data[int(0.8*len(trmm_labels)):]
# validate_data = trmm_data[int(0.8*len(trmm_labels)):]
train_labels = trmm_labels[:int(0.8*len(trmm_labels))]
test_labels = trmm_labels[int(0.8*len(trmm_labels)):]
# validate_labels = trmm_labels[int(0.8*len(trmm_labels)):]
# c = list(zip(train_data,train_labels))
# shuffle(c)
# train_data_t,train_labels_t=zip(*c)
# train_data=np.array(train_data_t)
# train_labels=np.array(train_labels_t)
#
# c2 = list(zip(test_data,test_labels))
# shuffle(c2)
# test_data_t,test_labels_t=zip(*c)
# test_data=np.array(test_data_t)
# test_labels=np.array(test_labels_t)
assert train_data.shape[0] == train_labels.shape[0]
dataset = tf.contrib.data.Dataset.from_tensors((train_data, train_labels))
print(dataset)
#create the estimator
mnist_classifier=tf.estimator.Estimator(model_fn=cnn_model_fn,
model_dir='./trmm_convnet4')
#set up logging
tensors_to_log={'probabilities':'softmax_tensor'}
logging_hook=tf.train.LoggingTensorHook(tensors=tensors_to_log,
every_n_iter=100)
# training time
train_input_fn=tf.estimator.inputs.numpy_input_fn(
x={'x':train_data},
y=train_labels,
batch_size=100,
num_epochs=None,
shuffle=True)
mnist_classifier.train(
input_fn=train_input_fn,steps=20000,hooks=[logging_hook])
eval_input_fn=tf.estimator.inputs.numpy_input_fn(
x={'x':test_data},
y=test_labels,
num_epochs=1,
shuffle=False)
eval_results=mnist_classifier.evaluate(input_fn=eval_input_fn)
print(eval_results)
if __name__=='__main__':
tf.app.run() |
from getAngles_cffi import ffi,lib
def getAngles(angleX, angleY, hauteur):
# forcing variable to be doubles
anglex = ffi.cast("double",float(angleX))
angley = ffi.cast("double",float(angleY))
hauteur = ffi.cast("double",float(hauteur))
anglesMoteurs = ffi.new("double[3]")
# computing
lib.getAngles(anglex, angley, hauteur, anglesMoteurs)
#
for i, angle in enumerate(anglesMoteurs):
anglesMoteurs[i] = 312 - angle
# returning array of 3 angles
return anglesMoteurs
|
from myPackage import tools as tl
from myPackage import preprocess
from myPackage import minutiaeExtraction as minExtract
from enhancementFP import image_enhance as img_e
from os.path import basename, splitext, exists
import time
from numpy import mean, std
import os
from imutils import paths
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import Adam
from sklearn.model_selection import train_test_split
import _pickle as cPickle
import sklearn.ensemble
from sklearn import metrics
from sklearn.metrics import accuracy_score
from keras.preprocessing.image import img_to_array
from keras.utils import to_categorical
from pyimagesearch.lenet import LeNet
from imutils import paths
import matplotlib.pyplot as plt
import numpy as np
import argparse
import random
import cv2 as cv2
import os
if __name__ == '__main__':
# ap = argparse.ArgumentParser()
# ap.add_argument("-p", "--path", required=True,
# help="-p Source path where the images are stored.")
# ap.add_argument("-r", "--results", required= False,
# help="-r Destiny path where the results will be stored.")
# args = vars(ap.parse_args())
# Configuration
EPOCHS = 100
INIT_LR = 1e-3
BS = 32
image_ext = '.tif'
plot = False
path = None
dir_gam = "./Image"
dir_res = "E:\IBE\GitHub\MinutiaeFingerprint\Result"
# ratio = 0.2
# Create folders for results
# -r ../Data/Results/fingerprints
# if args.get("results") is not None:
# if not exists(args["results"]):
# tl.makeDir(args["results"])
# path = args["results"]
path = dir_res
dir = "E:\Aji Sapta Pramulen\Python\Kode Program\images\SI"
# grab the image paths and randomly shuffle them
imagePaths = sorted(list(paths.list_images(dir_gam)))
# random.seed(42)
# random.shuffle(imagePaths)
# Extract names
all_images = tl.natSort(imagePaths)
data = []
labels = []
# Split train and test data
# train_data, test_data = tl.split_train_test(all_images, ratio)
print("\nAll_images size: {}\n".format(len(all_images)))
all_times= []
for image in all_images:
start = time.time()
name = splitext(basename(image))[0]
print("\nProcessing image '{}'".format(name))
cleaned_img = preprocess.blurrImage(image, name, plot)
enhanced_img = img_e.image_enhance(cleaned_img, name, plot)
cleaned_img = preprocess.cleanImage(enhanced_img, name, plot)
# skeleton = preprocess.zhangSuen(cleaned_img, name, plot)
skeleton = preprocess.thinImage(cleaned_img, name, plot)
# minExtract.process(skeleton, name, plot, path)
label = image.split(os.path.sep)[-2]
temp = minExtract.process(skeleton, name, label)
if label == "Accidental":
label = 1
elif (label == "Central pocket loop"):
label = 2
elif (label == "Leteral pocket loop"):
label = 3
elif (label == "Plain Arch"):
label = 4
elif (label == "Plain whorl"):
label = 5
elif (label == "Radial loop"):
label = 6
elif (label == "Tented arch"):
label = 7
elif (label == "Twinted loop"):
label = 8
elif (label == "Ulnair loop"):
label = 9
labels.append(label)
# temp = minExtract.process(skeleton, name,label)
data.append(temp)
all_times.append((time.time()-start))
print("[INFO] loading . ...")
# data = np.array(data)
# labels = np.array(labels)
#
#
# from sklearn.model_selection import train_test_split
#
# X_train, X_test, y_train, y_test = train_test_split(data, labels, test_size=0.25, random_state=0)
#
# # Fitting Logistic Regression to the Training set
# from sklearn.ensemble import RandomForestClassifier
#
# classifier = RandomForestClassifier(n_estimators=5000, criterion='entropy', random_state=0)
# classifier.fit(X_train, y_train)
#
# # Predicting the Test set results
# y_pred = classifier.predict(X_test)
#
# print('SkLearn : ', metrics.accuracy_score(y_test, y_pred))
# print("Train Accuracy :: ", accuracy_score(y_train, classifier.predict(X_train)))
# mean = mean(all_times)
# std = std(all_times)
# print("\n\nAlgorithm takes {:2.3f} (+/-{:2.3f}) seconds per image".format(mean, std)) |
# -*- coding: UTF-8 -*-
# author:yuliang
import time
from threading import RLock,Thread
import serial
#龙岗IO
class LongGangIO(Thread):
def __init__(self,COM):
Thread.__init__(self)
self.com = COM
self.IN_STATUS = {}
self.isRunning =False
self.serialPortLock=RLock()
self.initIO()
def initIO(self):
try:
self.BoardSerial = serial.Serial(port=self.com,baudrate=9600,timeout=2)
self.BoardSerial.setRTS(False)
self.isRunning=True
self.start()
except:
import traceback
traceback.print_exc()
def run(self):
while self.isRunning:
self.getSTATUS()
def getSTATUS(self):
self.serialPortLock.acquire()
try:
self.BoardSerial.write(bytearray([0x00,0x5A,0x56,0x00,0x07,0x00,0x00,0x00,0xB7]))
res = self.BoardSerial.read(9)
except Exception as e:
print(e)
finally:
self.serialPortLock.release()
if res=='':return
if res[-1]!=self.checkSum(res[:-1]):
print('CHECKSUM ERROR')
return
if res:
stat=res[7]
print(stat,type(stat),type(res),res)
mask=0b00000001
for i in range(8):
self.IN_STATUS[i]=bool(stat&mask)
mask=mask<<1
print(self.IN_STATUS)
def checkSum(self,fram):
sum=0
for i in fram:
if type(i)==str:
sum+=ord(i)
else:
sum+=i
return sum%256
def __getitem__(self, item):
try:
return self.IN_STATUS[item] if item in self.IN_STATUS else False
finally:
pass
def __setitem__(self, key, value):
CMD=[0x00,0x5A,0x56,0x00,None,None,0x00,0x00,None]
CMD[4]= 0x01 if value else 0x02
CMD[5]=key+1
CMD[8]=self.checkSum(CMD[:-1])
self.serialPortLock.acquire()
try:
self.BoardSerial.write(bytearray(CMD))
self.BoardSerial.read(9)
except Exception as e:
print(e)
finally:
self.serialPortLock.release()
if __name__ == '__main__':
ser=LongGangIO('com3')
time.sleep(1)
while ser.isRunning:
ser[4]=1
time.sleep(2)
ser[4]=0
time.sleep(2)
|
from django.http import JsonResponse
# Create your views here.
def home(request):
return JsonResponse({'message': 'Welcome to the E-commerce API'}) |
list1 = ['Google', 'Runoob', 'Taobao']
list1.insert(1, 'Baidu')
print ('列表插入元素后为 : ', list1) |
import os
if __name__ == '__main__':
print('当前进程(%s)启动...'%(os.getpid()))
pid = os.fork()
if pid < 0 :
print('fork 出现错误')
elif pid == 0:
print('我是子进程(%s),父进程是(%s)'%(os.getpid(),os.getppid()))
else:
print('我(%s)创建了一个子进程(%s)'%(os.getpid,pid))
|
#global default = "hey"
class color:
#
# Part one: To be used as :
# from pylinux-colors import *
# print color.red + "your-text"
#
default = '\033[39m'
black = '\033[30m'
red = '\033[31m'
green = '\033[32m'
yellow = '\033[33m'
blue = '\033[34m'
magenta = '\033[35m'
cyan = '\033[36m'
white = '\033[00m'
#
# Part two: To be used as :
# from pylinux-colors import *
# color.set(green)
#
def setc(self, colo):
if colo == "default":
print self.default
elif colo == "black":
print self.black
elif colo == "red":
print self.red
elif colo == "green":
print self.green
elif colo == "yellow":
print self.yellow
elif colo == "blue":
print self.blue
elif colo == "magenta":
print self.magenta
elif colo == "cyan":
print self.cyan
elif colo == "white":
print self.white
else:
print self.red
|
import struct, socket
# Simple usage Example: WakeUp("AA:AA:AA:AA:AA:AA")
def WakeUp(mac_address):
addr_byte = mac_address.split(':')
hw_addr = struct.pack('BBBBBB', int(addr_byte[0], 16), int(addr_byte[1], 16), int(addr_byte[2], 16), int(addr_byte[3], 16), int(addr_byte[4], 16), int(addr_byte[5], 16))
payload = '\xff' * 6 + hw_addr * 16
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
s.sendto(mac_address, ('<broadcast>', 9))
s.close()
|
# This Python file uses the following encoding: utf-8
# Copyright 2015 Tin Arm Engineering AB
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import parser, tools
from ortools.constraint_solver import pywrapcp
from ortools.constraint_solver import routing_enums_pb2
class CreateDistanceCallback(object):
"""Create callback to calculate distances and travel times between points."""
def __init__(self, locations, num):
"""Initialize distance array."""
self.num = num
self.matrix = {}
for from_node in range(num):
self.matrix[from_node] = {}
for to_node in range(num):
x1 = locations[from_node][0]
y1 = locations[from_node][1]
x2 = locations[to_node][0]
y2 = locations[to_node][1]
self.matrix[from_node][to_node] = tools.distance(x1, y1, x2, y2)
def Distance(self, from_node, to_node):
return int(self.matrix[from_node][to_node])
class CreateDemandCallback(object):
"""Create callback to get demands at location node."""
def __init__(self, demands):
self.matrix = demands
def Demand(self, from_node, to_node):
return self.matrix[from_node]
class CreateTravelTimeCallback(object):
"""Create callback to get travel times between locations."""
def __init__(self, dist_callback, speed):
self.dist_callback = dist_callback
self.speed = speed
def TravelTime(self, from_node, to_node):
travel_time = self.dist_callback(from_node, to_node) * 3600 / self.speed
return int(travel_time)
class Stop:
def __init__(self, id, addr, pickup, time_window, curr_load):
"""
Initializes a Stop object, which is the smallest data point in the RoutingCalculator
:param id: The given ID of the Stop
:param addr: The address that the stop will be at
:param pickup: A boolean dictating whether the Stop is a pickup or dropoff
:param time_window: A tuple of times in seconds that the driver can stop at this location
:param curr_load: The current load of passengers of the vehicle when it reaches the stop
"""
self.id = id
self.pickup = pickup
self.addr = addr
self.time_window = time_window
self.curr_load = curr_load
def __str__(self):
return " {pickup_dropoff} at {addr}, Load({load}) Time({tmin}, {tmax})".format( #
pickup_dropoff='Pickup' if self.pickup else 'Dropoff',
addr=self.addr,
load=self.curr_load,
tmin=tools.seconds_to_time(self.time_window[0]),
tmax=tools.seconds_to_time(self.time_window[1]))
def __eq__(self, other):
return self.__dict__ == other.__dict__
def to_dict(self):
return {"Address": self.addr,
"Type": "Pickup" if self.pickup else "Dropoff",
"Load": self.curr_load,
"Earliest": tools.seconds_to_time(self.time_window[0]),
"Latest": tools.seconds_to_time(self.time_window[1])}
class Route:
def __init__(self):
self.stops = []
def add_stop(self, stop):
"""
Adds a stop to this Route
:param stop: A Stop
:return: void -- changes the list to include the stop
"""
self.stops.append(stop)
def __str__(self):
return "Depot -> -> {0} -> Depot".format(" -> ".join([str(stop) for stop in self.stops]))
def valid(self):
"""
Returns True if all corresponding dropoffs and pickups exist in the Route
"""
pickups = [stop.id for stop in self.stops if stop.pickup]
dropoffs = {stop.id for stop in self.stops if not stop.pickup}
return {p + 1 for p in pickups} == dropoffs
def __eq__(self, other):
return self.stops == other.stops
class RoutingCalculator:
def __init__(self):
self.routes = []
def add_route(self, route):
"""
Adds a route to the calculator
:param route: Route object
"""
self.routes.append(route)
def __str__(self):
return "\n\n".join(['Route {0}: {1}'.format(i + 1, str(route)) for i, route in enumerate(self.routes)])
def valid(self):
"""
Returns True if all Routes are valid
"""
return all(route.valid() for route in self.routes)
def __eq__(self, other):
return self.routes == other.routes
def to_json_format(self):
return [[stop.to_dict() for stop in route.stops] for route in self.routes]
def main(in_dict, geo_file, failure_file, num_trips):
"""
The main Routing Calculator function, which calculates the routes given correct input
:param in_dict: A record-like list of dictionaries which indicates all Trip objects that will be parsed
:param geo_file: The path to a JSON file which maps all known addresses to geocodes
:param failure_file: The path to a JSON file which lists all addresses that can't be geocoded
:param num_trips: The number of trips to be included in the calculator
:return: A RoutingCalculator object that contains all Routes and Stop values
"""
# Create the data.
trip_data = parser.AllTrips(in_dict, geo_file, failure_file)
print("Running...\n")
data = [trip_data.locations, trip_data.demands, trip_data.starttimes, trip_data.endtimes]
locations = data[0]
demands = data[1]
start_times = data[2]
end_times = data[3]
num_locations = min(num_trips * 2 + 1, len(locations))
depot = 0
num_vehicles = max(10, int(num_locations * 0.3))
# Create routing model.
if num_locations > 0:
routing = pywrapcp.RoutingModel(num_locations, num_vehicles, depot)
search_parameters = pywrapcp.RoutingModel.DefaultSearchParameters()
for i in range(2, num_locations, 2):
routing.AddPickupAndDelivery(i - 1, i)
# Setting first solution heuristic: the
# method for finding a first solution to the problem.
search_parameters.first_solution_strategy = (
routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)
# Callbacks to the distance function and travel time functions here.
dist_between_locations = CreateDistanceCallback(locations, num_locations)
dist_callback = dist_between_locations.Distance
routing.SetArcCostEvaluatorOfAllVehicles(dist_callback)
demands_at_locations = CreateDemandCallback(demands)
demands_callback = demands_at_locations.Demand
# Add a dimension for demand.
slack_max = 0
vehicle_capacity = 8 # what is the max of the capacity? 8?
fix_start_cumul_to_zero = True
demand = "Demand"
routing.AddDimension(demands_callback, slack_max, vehicle_capacity,
fix_start_cumul_to_zero, demand)
# Adding capacity dimension constraints.
VehicleCapacity = 8
NullCapacitySlack = 0
fix_start_cumul_to_zero = True
capacity = "Capacity"
routing.AddDimension(demands_callback, NullCapacitySlack, VehicleCapacity,
fix_start_cumul_to_zero, capacity)
# Add time dimension.
horizon = 24 * 3600
time = "Time"
speed = 25
travel_times = CreateTravelTimeCallback(dist_callback, speed)
travel_time_callback = travel_times.TravelTime
routing.AddDimension(travel_time_callback, # total time function callback
horizon,
horizon,
fix_start_cumul_to_zero,
time)
# Add time window constraints.
time_dimension = routing.GetDimensionOrDie(time)
for location in range(1, num_locations):
start = start_times[location]
end = end_times[location]
time_dimension.CumulVar(location).SetRange(start, end)
# Solve displays a solution if any.
assignment = routing.SolveWithParameters(search_parameters)
if assignment:
# Solution cost.
# Inspect solution.
routes = RoutingCalculator()
capacity_dimension = routing.GetDimensionOrDie(capacity)
time_dimension = routing.GetDimensionOrDie(time)
for vehicle_nbr in range(num_vehicles):
if not routing.IsVehicleUsed(assignment, vehicle_nbr):
continue
route = Route()
index = assignment.Value(routing.NextVar(routing.Start(vehicle_nbr)))
while not routing.IsEnd(index):
node_index = routing.IndexToNode(index)
load_var = capacity_dimension.CumulVar(index)
time_var = time_dimension.CumulVar(index)
route.add_stop(Stop(node_index, trip_data.addresses[node_index], node_index % 2 == 1,
(assignment.Min(time_var), assignment.Max(time_var)),
assignment.Value(load_var)))
index = assignment.Value(routing.NextVar(index))
routes.add_route(route)
if routes.valid():
print(routes)
return routes, trip_data.uber
else:
print("Invalid Routes")
raise Exception("Invalid Routes")
else:
print('No solution found.')
raise Exception("No solution could be found")
else:
print('Specify an instance greater than 0.')
raise Exception("Specify an instance greater than 0.") |
import ADC0832
import space
def init():
ADC0832.setup()
def loop():
while True:
res = ADC0832.getResult() - 80
if res < 0:
res = 0
if res > 100:
res = 100
print 'res = %d' %res
time.sleep(0.2)
if __name__ =='__main__':
init()
try:
loop()
except KeyboardInterrupt:
ADC0832.destroy()
prin'The End'
|
dict1 = {"a": "1", "b": "b", "c": "c"}
dict2 = {"a": "2", "b": "b", "c": "c"}
dict3 = {"a": "3", "b": "b", "c": "c"}
list = [dict1, dict2, dict3]
dada = [dict["b"] for dict in list if dict["a"] == "2"]
print(dada)
|
import time
from odoo import models, fields, api, _
from datetime import date,datetime,timedelta
from odoo.exceptions import except_orm, Warning, RedirectWarning
import math
class Fees_Line(models.Model):
"""
Fees Lines
"""
_name = 'fees.line'
_description = 'Fees Line'
name = fields.Many2one('product.product','Name',required=True)
sequence = fields.Integer('Priority')
amount = fields.Float('Amount',required=True)
reg_id = fields.Many2one('registration', string='Registrations', invisible=True)
reg_form_id = fields.Many2one('registration', string='Registrations Form', invisible=True)
type=fields.Selection([('required','Required'),('optional','optional')])
fees_id=fields.Many2one('fees.structure','Fees')
fee_pay_type = fields.Many2one('fee.payment.type',string="Fee Payment Type")
stud_id = fields.Many2one('res.partner','Student id')
next_term = fields.Many2one('acd.term',string="Next Term")
update_amount = fields.Float(string='Update Amount')
discount = fields.Float(string="Discount(%)")
discount_amount = fields.Float(string='Discount Amount')
amount_from_percentage = fields.Float(string='Update Amount In Percentage(%)')
@api.onchange('name')
def on_addimition_fee(self):
if self.name.is_admission_fee == True:
one_type = self.env['fee.payment.type'].search([('name','=','one')])
if one_type.id:
self.fee_pay_type = one_type.id
@api.onchange('update_amount')
def get_update_percentage(self):
if self.update_amount > 0 and self.amount > 0 :
self.amount_from_percentage = (self.update_amount/self.amount)*100
print 'hghfkkkkkkk'
# @api.onchange('amount_from_percentage')
# def get_update_amount(self):
# # if self.amount_from_percentage >= 0 and self.amount_from_percentage <= 100:
# if self.amount_from_percentage > 0:
# self.update_amount = (self.amount*self.amount_from_percentage)/100
@api.onchange('amount_from_percentage')
def get_update_amount(self):
# if self.amount_from_percentage >= 0 and self.amount_from_percentage <= 100:
if self.amount_from_percentage > 0:
pass
# self.update_amount = round((self.amount*(self.amount_from_percentage))/100)
print "vhhgjghhhhhhh"
# else:
# raise except_orm(_('Warning!'),_("please enter valid Update Percentage(%)."))
@api.onchange('discount')
def onchange_discount(self):
if self.discount > 100.0 or self.discount < 0.0:
self.discount = 0.00
raise except_orm(_('Warning!'),_("please enter valid discount(%)."))
@api.model
def create(self,vals):
if 'discount' in vals:
if vals['discount'] > 100.0 or vals['discount'] < 0.0:
raise except_orm(_('Warning!'),_("please enter valid discount(%)."))
return super(Fees_Line, self).create(vals)
@api.multi
def write(self, vals):
if 'update_amount' not in vals and 'amount' in vals:
vals['amount'] = self.amount
raise except_orm(_('Warning!'),
_("You can not update amount directly from hear !, please use Update Amount and Update button"))
# discount change then discount amount calculation
if 'discount' in vals:
if vals['discount'] > 100.0 or vals['discount'] < 0.0:
raise except_orm(_('Warning!'),_("please enter valid discount(%)."))
else:
if self.amount > 0.0 and 'discount_amount' not in vals:
vals['discount_amount'] = (self.amount * vals['discount'])/100
# discount amount change then discount calculation
if 'discount_amount' in vals and vals['discount_amount'] >= 0.0:
if self.amount > 0.0 and 'discount' not in vals:
vals['discount'] = (vals['discount_amount']*100)/self.amount
return super(Fees_Line, self).write(vals)
@api.model
def default_get(self, fields):
"""
this method is use for default set value when create new record.
:param fields:
:return:
"""
res = super(Fees_Line, self).default_get(fields)
# record update only when student_id get from Context
if 'student_id' in self._context and self._context['student_id']:
res.update({'stud_id': self._context['student_id']})
return res
@api.multi
def unlink(self):
"""
This Method is call, when we delete record.
------------------------------------------
:return:
"""
# add validation for record delation time
for each in self:
if each.type=='required' and each.reg_form_id:
raise except_orm(_('Warning!'),
_("You cant delete the required fees lines!"))
res=super(Fees_Line,self).unlink()
return res
@api.model
def get_month_difference(self,start_date, date_today):
"""
"""
years_diff = date_today.year - start_date.year
months_diff = 0
if date_today.month >= start_date.month:
months_diff = date_today.month - start_date.month
else:
years_diff -= 1
months_diff = 12 + (date_today.month - start_date.month)
days_diff = 0
if date_today.day >= start_date.day:
days_diff = date_today.day - start_date.day
else:
months_diff -= 1
days_diff = 31 + (date_today.day - start_date.day)
if months_diff < 0:
months_diff = 11
years_diff -= 1
age = years_diff
month_dict = {
'years' : years_diff,
'months' : months_diff,
'days' : days_diff
}
return month_dict
@api.model
def months_between(self,start_date,end_date):
months = []
month_year = []
cursor = start_date
while cursor <= end_date:
if cursor.month not in months:
months.append(cursor.month)
month_year.append((int(cursor.month),int(cursor.year)))
cursor += timedelta(weeks=1)
return month_year
@api.multi
def update_fee_amount(self):
"""
this method use to update student fee,
--> update old fee amount with new fee amount,
--> when fee update then generate history,
--> update fee amount in student structure,
--> update remaining fee amount in student fee paid detail,
--> again calculation of fee as per new fee structure,
--------------------------------------------------------
@param self : object pointer
"""
student_obj = self.env['res.partner']
if self.update_amount == 0.00:
raise except_orm(_('Warning!'),
_("please mention update amount value : (%s)") % (self.update_amount))
else:
if self.amount == self.update_amount:
raise except_orm(_('No Update!'),
_("Fee amount %s and Update fee amount %s are same.") % (self.amount,self.update_amount))
# genarate fees history
sequence = self.fees_id.fee_history_line.search_count([('fee_structure_id','=',self.fees_id.id)])
fee_history_line = {
'sequence': sequence + 1,
'name' : self.name.id,
'old_amount': self.amount,
'new_amount': self.update_amount,
'date': date.today(),
'fee_structure_id' : self.fees_id.id,
}
self.fees_id.fee_history_line = [(0, 0, fee_history_line)]
# update student fee structure
unpaid_diff = {}
for stud_rec in student_obj.search([('is_parent', '=', False), ('course_id', '=', self.fees_id.course_id.id),
('batch_id', '=', self.fees_id.academic_year_id.id)]):
if stud_rec.admission_date:
joining_date = datetime.strptime(stud_rec.admission_date,"%Y-%m-%d").date()
start_date = datetime.strptime(self.fees_id.academic_year_id.start_date,"%Y-%m-%d").date()
# end_date= datetime.strptime(self.fees_id.academic_year_id.end_date,"%Y-%m-%d").date()
# total_diff = self.get_month_difference(start_date,end_date)
total_month = self.fees_id.academic_year_id.month_ids.search_count([('batch_id','=',self.fees_id.academic_year_id.id),
('leave_month','=',False)])
leave_month = []
for l_month in self.fees_id.academic_year_id.month_ids.search([('batch_id','=',self.fees_id.academic_year_id.id),
('leave_month','=',True)]):
leave_month.append((int(l_month.name),int(l_month.year)))
month_in_stj = self.months_between(start_date,joining_date)
unpaid_diff = self.get_month_difference(start_date,joining_date)
for fee_structure_rec in stud_rec.student_fee_line.search([('stud_id','=',stud_rec.id),
('name','=',self.name.id)],limit=1):
new_amount = self.update_amount
if fee_structure_rec.fee_pay_type.name not in ['one']:
if unpaid_diff and (unpaid_diff.get('months') > 0 or unpaid_diff.get('days') > 0):
unpaid_month = float(unpaid_diff.get('months'))
# if unpaid_diff.get('days') > 0:
# unpaid_month += 1
if len(month_in_stj) > 0 and len(leave_month) > 0:
for leave_month_year in leave_month:
if leave_month_year in month_in_stj:
unpaid_month -= 1
if unpaid_month > 0.00 and total_month > 0.00:
unpaid_amount = (new_amount * unpaid_month) / total_month
new_amount -= unpaid_amount
if fee_structure_rec.id:
fee_structure_rec.write({'update_amount' : 0.00,'amount' : round(new_amount,2)})
for fee_detail in stud_rec.payble_fee_ids.search([('name','=',self.name.id),
('student_id','=',stud_rec.id)]):
if fee_detail.id:
fee_detail.total_amount = round(new_amount,2)
stud_rec.update_fee_structure()
# update on Fee master line
val = {
'update_amount' : 0.00,
'amount_from_percentage':0.0,
'amount' : self.update_amount,
}
self.write(val)
return True
# @api.model
# def create(self, vals):
# if 'fees_id' in vals:
# fees_id=vals['fees_id']
# fees_obj=self.env['fees.structure'].browse(fees_id)
# start_date=fees_obj.academic_year_id.start_date
# end_date=fees_obj.academic_year_id.end_date
#
# if vals['fee_pay_type']:
#
# type=self.env['fee.payment.type'].browse(vals['fee_pay_type'])
#
# if type.name=='term':
# if ((type.start_date>=start_date) and (type.start_date<=end_date) and (type.end_date>=start_date) and (type.end_date<=end_date)):
# else:
# raise except_orm(_('Warning!'),
# _("Term Start Date and End Date should be inbetween of Acadamic Year Start Date and End Date!"))
#
# return super(Fees_Line, self).create(vals)
class Fees_Structure(models.Model):
"""
Fees structure
"""
_name = 'fees.structure'
_description = 'Fees Structure'
name = fields.Char('Name',required=True)
code = fields.Char('Code',required=True)
course_id= fields.Many2one('course', string='Class', required=True)
academic_year_id=fields.Many2one('batch','Academic Year')
fee_line_ids = fields.One2many('fees.line','fees_id','Fees Lines')
type=fields.Selection([('reg','Registration'),('academic','Academic')])
fee_history_line = fields.One2many('fee.history.line','fee_structure_id',string="Fee History")
_sql_constraints = [
('code_uniq','unique(code)', 'The code of the Fees Structure must be unique !')
]
@api.onchange('type','course_id','academic_year_id')
def name_code_generate(self):
name = ''
if self.course_id and self.academic_year_id and self.type:
name = self.type.upper() + '/' +\
self.course_id.name+ '/' + \
self.academic_year_id.name
self.name = name
self.code = name
@api.model
def create(self,vals):
old_rec=self.search([('type','=',vals['type']),('course_id','=',vals['course_id']),('academic_year_id','=',vals['academic_year_id'])])
if len(old_rec) != 0:
raise except_orm(_('Warning!'),
_("Fee structute already exist"))
return super(Fees_Structure,self).create(vals)
@api.multi
def write(self,vals):
if ('type' in vals) or ('course_id' in vals) or ('academic_year_id' in vals):
if ('type' in vals):
type=vals['type']
else:
type=self.type
if ('course_id' in vals):
course_id=vals['course_id']
else:
course_id=self.course_id.id
if ('academic_year_id' in vals):
academic_year_id=vals['academic_year_id']
else:
academic_year_id=self.academic_year_id.id
old_rec=self.search([('type','=',type),('course_id','=',course_id),('academic_year_id','=',academic_year_id)])
if len(old_rec) != 0:
raise except_orm(_('Warning!'),
_("Fee structute already exist"))
return super(Fees_Structure,self).write(vals)
class fee_payment_type(models.Model):
"""
Fee Payment Type
"""
_name = 'fee.payment.type'
_description = 'Fee Payment Type'
name = fields.Selection([
('month', 'Monthly'),
('alt_month', 'Alternate Month'),
('quater', 'Quarterly'),
('year', 'Yearly'),
('one', 'One Time'),
('half_year','Half Year'),
('term','Term'),
],string="Name")
code = fields.Char('Code')
@api.multi
def name_get(self):
res = []
def get_patment_type(type):
val = {
'month' : 'Monthly',
'alt_month' : 'Alternate Month',
'quater' : 'Quarterly',
'year' : 'Yearly',
'one' : 'One Time',
'half_year' : 'Half Year',
'term' : 'Term',
}
if val[type]:
return val[type]
for record in self:
name = get_patment_type(record.name)
res.append((record.id, name))
return res
@api.model
def create(self,vals):
vals['code']=vals['name']
if vals['name']:
old_rec=self.search([('name','=',vals['name'])])
if len(old_rec) != 0:
raise except_orm(_('Warning!'),
_("Payment Type %s is already exist") % (vals['name']))
return super(fee_payment_type,self).create(vals)
@api.multi
def write(self,vals):
if 'name' in vals:
old_rec=self.search([('name','=',vals['name'])])
if len(old_rec) != 0:
raise except_orm(_('Warning!'),
_("Payment Type %s is already exist") % (vals['name']))
return super(fee_payment_type,self).write(vals)
|
# -*- coding: UTF-8 -*-
#! /usr/bin/python
import csv
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import MySQLdb.connector
from MySQLdb.connector.constants import ClientFlag
from MySQLdb.connector.constants import SQLMode
import MySQLdbdb
MySQLdb_config = {
'host':'localhost',
'user':'sddivid',
'password':'22476103',
'port':3306,
'database':'python',
'charset':'utf8',
'client_flags':[ClientFlag.LOCAL_FILES],
}
def isset(v):
try:
type (eval(v))
except:
return False
else:
return True
def LoadFile():
try:
cnn = MySQLdb.connector.connect(**MySQLdb_config)
sql = "LOAD DATA LOCAL INFILE '/etc/workspace/NTBUtest.csv' REPLACE INTO TABLE test FIELDS TERMINATED BY ',' ENCLOSED BY '\"' LINES TERMINATED BY '\n'"
cursor = cnn.cursor()
cursor.execute(sql)
cnn.commit()
except MySQLdb.connector.Error as e:
print('LoadFile sql fails {}'.format(e))
finally:
if isset("cursor"):
cursor.close()
if isset("cnn"):
cnn.close()
def LoadFile2():
cnn = MySQLdbdb.connect(host="localhost", user="sddivid", passwd="22476103", db="python", charset="utf8")
cursor = cnn.cursor()
sql = "LOAD DATA LOCAL INFILE '/etc/workspace/NTBUtest.csv' REPLACE INTO TABLE test FIELDS TERMINATED BY ',' ENCLOSED BY '\"' LINES TERMINATED BY '\n'"
cursor.execute(sql)
cnn.commit()
if __name__ == "__main__":
# LoadFile()
LoadFile2()
|
from conftest import driver
from selenium import webdriver
from pages.vacancies_page import VacanciesPage
class TestVacation:
def test_choose_region(self,driver):
needed_region = 'Кубинка'
vacancies_main_page = VacanciesPage(driver)
vacancies_main_page.go_to_vacancies()
vacancies_main_page.enter_region_for_keys(needed_region)
vacancies_main_page.click_on_needed_region(needed_region)
vacancies_main_page.click_on_search_button()
result = vacancies_main_page.find_all_regions()
for i in result:
print("нашла")
assert i.find(needed_region) or i == needed_region
vacancies_main_page.click_on_show_contacts()
assert vacancies_main_page.find_phone()
def test_search(self, driver):
# constants
word = 'Центральный'
vacancies_main_page = VacanciesPage(driver)
vacancies_main_page.go_to_vacancies()
vacancies_main_page.enter_word_in_company_names_input(word)
vacancies_main_page.click_on_search_button()
result = vacancies_main_page.find_all_company_names()
for i in result:
assert i.find(word) or i == word
|
# -*- coding: utf-8 -*-
# Copyright 2017-TODAY LasLabs Inc.
# License MIT (https://opensource.org/licenses/MIT).
from .abstract_product_endpoint import AbstractProductEndpoint
from ..models.extract import Extract
class Extracts(AbstractProductEndpoint):
"""This represents the ``Extracts`` Endpoint.
https://developers.cannabisreports.com/docs/extracts
"""
__object__ = Extract
__endpoint__ = 'extracts'
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from knack.help_files import helps # pylint: disable=import-error
helps['aks use-dev-spaces'] = """
type: command
short-summary: (PREVIEW) Use Azure Dev Spaces with a managed Kubernetes cluster.
parameters:
- name: --name -n
type: string
short-summary: Name of the managed cluster.
- name: --resource-group -g
type: string
short-summary: Name of resource group. You can configure the default group using 'az configure --defaults group=<name>'.
- name: --space -s
type: string
short-summary: Name of the dev space to use.
- name: --parent-space -p
type: string
short-summary: Name of a parent dev space to inherit from when creating a new dev space. By default, if there is already a single dev space with no parent, the new space inherits from this one.
"""
helps['aks remove-dev-spaces'] = """
type: command
short-summary: (PREVIEW) Remove Azure Dev Spaces from a managed Kubernetes cluster.
parameters:
- name: --name -n
type: string
short-summary: Name of the managed cluster.
- name: --resource-group -g
type: string
short-summary: Name of resource group. You can configure the default group using 'az configure --defaults group=<name>'.
- name: --yes -y
type: bool
short-summary: Do not prompt for confirmation.
"""
|
# -*- coding: utf-8 -*-
{
'name': "GeProMi Documentacion Digital",
'summary': """
La Documentacion Digital será soporte para el
Expediente Digital.
""",
'description': """
Este archivo tiene la funcion de brindar soporte para el expediente digital.
Se destacan las siguientes funciones:
* Soporte para documento adjunto, el cual debe mostrarse de alguna manera en el cuerpo del expediente.
* Soporte para añadir archivo al documento adjunto. Para lo cual se deberan poder unir archivos PDF, sean firmados digitalmente o no.
""",
'author': "Gis Minero Nacional",
'website': "http://www.gismineronacional.com",
# Categories can be used to filter modules in modules listing
# Check https://github.com/odoo/odoo/blob/master/odoo/addons/base/module/module_data.xml
# for the full list
'category': 'Uncategorized',
'version': '0.1',
# any module necessary for this one to work correctly
'depends': ['base', 'gepromi', 'expediente', 'hr', 'tarea_flujo_exp'],
#, 'tarea_flujo_exp'
# always loaded
'data': [
'views/views_over.xml',
'views/views.xml',
'security/doc_digital.xml',
'security/ir.model.access.csv',
],
# only loaded in demonstration mode
'demo': [
#'demo/demo.xml',
],
'application': True,
'auto_install': False,
}
|
# Generated by Django 2.0.5 on 2018-09-04 07:59
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Diagram',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='Illustration', max_length=50)),
('height', models.IntegerField()),
('width', models.IntegerField()),
],
),
migrations.CreateModel(
name='GraphArrow',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('X0', models.FloatField(blank=True, null=True)),
('Y0', models.FloatField(blank=True, null=True)),
('X1', models.FloatField(blank=True, null=True)),
('Y1', models.FloatField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='GraphNode',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('radius', models.FloatField()),
('X', models.FloatField()),
('Y', models.FloatField()),
('diagram', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='nodes', to='graphs.Diagram')),
],
),
migrations.CreateModel(
name='Icon',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(blank=True, max_length=100)),
('source', models.CharField(blank=True, max_length=200)),
('picture', models.ImageField(blank=True, null=True, upload_to='graphs/icons/')),
('color', models.CharField(default='grey', max_length=50)),
('order', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='Link',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('relative_share', models.FloatField(blank=True, default=None, null=True)),
('cause_description', models.CharField(blank=True, max_length=400)),
('consequence_description', models.CharField(blank=True, max_length=400)),
],
),
migrations.CreateModel(
name='Sector',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('short_description', models.CharField(blank=True, max_length=400)),
('color', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Topic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('short_description', models.CharField(blank=True, max_length=400)),
('date_created', models.DateField(auto_now_add=True)),
('icon_picture', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='graphs.Icon')),
('sector', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='graphs.Sector')),
],
),
migrations.AddField(
model_name='link',
name='cause',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='consequences', to='graphs.Topic'),
),
migrations.AddField(
model_name='link',
name='consequence',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='causes', to='graphs.Topic'),
),
migrations.AddField(
model_name='graphnode',
name='liens',
field=models.ManyToManyField(blank=True, to='graphs.Link'),
),
migrations.AddField(
model_name='graphnode',
name='sector',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='graphs.Sector'),
),
migrations.AddField(
model_name='graphnode',
name='topic',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='graphs.Topic'),
),
migrations.AddField(
model_name='grapharrow',
name='cause',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='graphs.GraphNode'),
),
migrations.AddField(
model_name='grapharrow',
name='consequence',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='graphs.GraphNode'),
),
migrations.AddField(
model_name='grapharrow',
name='diagram',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='arrows', to='graphs.Diagram'),
),
]
|
from django import forms
from . import models
class createform(forms.ModelForm):
class Meta:
model = models.STATE
fields = ('title', 'slug', 'body', 'image')
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from airflow.contrib.operators import spark_submit_operator
from airflow.utils.trigger_rule import TriggerRule
import xml.etree.ElementTree as ET
from definitions import TPL_PATH
from mappers.action_mapper import ActionMapper
from utils import xml_utils, el_utils
import jinja2
class SparkMapper(ActionMapper):
def __init__(self, oozie_node, task_id,
trigger_rule=TriggerRule.ALL_SUCCESS, params={}, template='spark.tpl'):
ActionMapper.__init__(self, oozie_node, task_id, trigger_rule)
self.template = template
self.params = params
self.task_id = task_id
self.trigger_rule = trigger_rule
self._parse_oozie_node(oozie_node)
def _parse_oozie_node(self, oozie_node):
"""
Property values specified in the configuration element override
values specified in the job-xml file.
"""
self.application = ''
self.conf = {}
self.conn_id = 'spark_default'
self.files = None
self.py_files = None
self.driver_classpath = None
self.jars = None
self.java_class = None
self.packages = None
self.exclude_packages = None
self.repositories = None
self.total_executor_cores = None
self.executor_cores = None
self.executor_memory = None
self.driver_memory = None
self.keytab = None
self.principal = None
self.name = 'airflow-spark'
self.num_executors = None
self.application_args = []
self.env_vars = None
self.verbose = False
# Prepare nodes
self.delete_paths = []
self.mkdir_paths = []
prepare_nodes = xml_utils.find_nodes_by_tag(oozie_node, 'prepare')
if prepare_nodes:
# If there exists a prepare node, there will only be one, according
# to oozie xml schema
self.delete_paths, self.mkdir_paths = self._parse_prepare_node(
prepare_nodes[0])
# master url, deploy mode,
self.application = self._test_and_set(oozie_node, 'jar', '\'\'',
params=self.params, quote=True)
self.name = self._test_and_set(oozie_node, 'name', '\'airflow-spark\'',
params=self.params, quote=True)
self.java_class = self._test_and_set(oozie_node, 'class', None,
params=self.params, quote=True)
config_node = xml_utils.find_nodes_by_tag(oozie_node, 'configuration')
job_xml = xml_utils.find_nodes_by_tag(oozie_node, 'job-xml')
for xml_file in job_xml:
tree = ET.parse(xml_file.text)
self.conf = {**self.conf,
**self._parse_spark_config(tree.getroot())}
if config_node:
self.conf = {**self.conf,
**self._parse_spark_config(config_node[0])}
spark_opts = xml_utils.find_nodes_by_tag(oozie_node, 'spark-opts')
if spark_opts:
self._update_class_spark_opts(spark_opts[0])
app_args = xml_utils.find_nodes_by_tag(oozie_node, 'arg')
for arg in app_args:
self.application_args.append(el_utils.replace_el_with_var(arg.text,
self.params,
quote=False))
@staticmethod
def _test_and_set(root, tag, default=None, params={}, quote=False):
"""
If a node exists in the oozie_node with the tag specified in tag, it
will attempt to replace the EL (if it exists) with the corresponding
variable. If no EL var is found, it just returns text. However, if the
tag is not found under oozie_node, then return default. If there are
more than one with the specified tag, it uses the first one found.
"""
var = xml_utils.find_nodes_by_tag(root, tag)
if var:
# Only check the first one
return el_utils.replace_el_with_var(var[0].text,
params=params,
quote=quote)
else:
return default
@staticmethod
def _parse_spark_config(config_node):
conf_dict = {}
for prop in config_node:
name = prop.find('name').text
value = prop.find('value').text
conf_dict[name] = value
return conf_dict
def _update_class_spark_opts(self, spark_opts_node):
"""
Some examples of the spark-opts element:
'--conf key=value'
'--conf key1=value1 value2'
'--conf key1="value1 value2"'
'--conf key1=value1 key2="value2 value3"'
'--conf key=value --verbose --properties-file user.properties'
"""
spark_opts = spark_opts_node.text.split("--")[1:]
clean_opts = [opt.strip() for opt in spark_opts]
clean_opts_split = [opt.split(maxsplit=1) for opt in clean_opts]
if ['verbose'] in clean_opts_split:
self.__dict__['verbose'] = True
clean_opts_split.remove(['verbose'])
for spark_opt in clean_opts_split:
# Can have multiple "--conf" in spark_opts
if 'conf' == spark_opt[0]:
# Splits key1=value1 into [key1, value1]
conf_val = spark_opt[1].split("=", maxsplit=1)
self.conf[conf_val[0]] = conf_val[1]
else:
self.__dict__[spark_opt[0]] = '\'' + ' '.join(
spark_opt[1:]) + '\''
@staticmethod
def _parse_prepare_node(prepare_node):
"""
<prepare>
<delete path="[PATH]"/>
...
<mkdir path="[PATH]"/>
...
</prepare>
"""
delete_paths = []
mkdir_paths = []
for node in prepare_node:
node_path = el_utils.convert_el_to_jinja(node.attrib['path'],
quote=False)
if node.tag == 'delete':
delete_paths.append(node_path)
else:
mkdir_paths.append(node_path)
return delete_paths, mkdir_paths
def convert_to_text(self):
template_loader = jinja2.FileSystemLoader(searchpath=TPL_PATH)
template_env = jinja2.Environment(loader=template_loader)
spark_template = template_env.get_template(self.template)
prepare_template = template_env.get_template('prepare.tpl')
# If we have found a prepare node, we must reorder nodes.
if self.delete_paths or self.mkdir_paths:
prep_text = prepare_template.render(task_id=self.task_id,
trigger_rule=self.trigger_rule,
delete_paths=self.delete_paths,
mkdir_paths=self.mkdir_paths)
# Don't want to change class variable
op_dict = self.__dict__.copy()
op_dict['task_id'] = self.task_id + '_reorder'
op_text = spark_template.render(**op_dict)
return op_text + prep_text
else:
return spark_template.render(**self.__dict__)
def convert_to_airflow_op(self):
"""
Converts the class into a SparkSubmitOperator, this requires
correct setup of the Airflow connection.
"""
return spark_submit_operator.SparkSubmitOperator(
task_id=self.task_id,
trigger_rule=self.trigger_rule,
params=self.params,
# Spark specific
conn_id='spark_default',
name=self.name,
application=self.application,
conf=self.conf,
files=self.files,
py_files=self.py_files,
jars=self.jars,
java_class=self.java_class,
packages=self.packages,
exclude_packages=self.exclude_packages,
repositories=self.repositories,
total_executor_cores=self.total_executor_cores,
executor_cores=self.executor_cores,
executor_memory=self.executor_memory,
driver_memory=self.driver_memory,
keytab=self.keytab,
principal=self.principal,
num_executors=self.num_executors,
application_args=self.application_args,
verbose=self.verbose,
env_vars=self.env_vars,
driver_classpath=self.driver_classpath
)
@staticmethod
def required_imports():
# Dummy and Bash are for the potential prepare statement
return ['from airflow.contrib.operators import spark_submit_operator',
'from airflow.operators import bash_operator',
'from airflow.operators import dummy_operator']
def get_task_id(self):
# If the prepare node has been parsed then we reconfigure the execution
# path of Airflow by adding delete/mkdir bash nodes before the actual
# spark node executes.
if self.has_prepare():
return self.task_id + '_reorder'
else:
return self.task_id
def has_prepare(self):
return self.delete_paths or self.mkdir_paths
|
#!/usr/bin/env python3
"""Shuffle data in two matrices in the same way"""
import numpy as np
def shuffle_data(X, Y):
"""Shuffle data in two matrices in the same way"""
shufflidx = np.random.permutation(X.shape[0])
return X[shufflidx], Y[shufflidx]
|
from neuron import *
from neuron import h as nrn
from numpy import *
from pylab import *
soma = h.Section()
soma.L = 25
soma.insert('hh')
soma.nseg = 10
soma1 = h.Section()
soma1.L = 25
soma1.insert('hh')
soma1.nseg = 10
stimNc = h.NetStim()
stimNc.noise = 1
stimNc.start = 5
stimNc.number = 1
stimNc.interval = 20
syn = h.ExpSyn (0.5, sec = soma)
nc = h.NetCon(stimNc, syn)
nc.weight[0] = 1
nc.record()
# Test NC
syn1 = h.ExpSyn (0.5, sec = soma1)
soma.push()
nc1 = h.NetCon( soma(0.5)._ref_v, syn1 )
nc1.delay = 10
nc1.weight[0] = -0.01
nc1.threshold = 10
# record
vec = {}
for var in 'v_1 ', 'v_2 ', 't ':
vec[var] = h.Vector()
vec['v_1 '].record(soma(0.5)._ref_v)
vec['v_2 '].record(soma1(0.5)._ref_v)
vec ['t '].record(h._ref_t)
# run the simulation
h.load_file("stdrun.hoc")
h.init()
h.tstop = 50.0
h.run()
# plot the results
figure()
plot(vec['t '],vec['v_1 '], vec['t '], vec['v_2 '])
show()
h.topology()
a = min(vec['v_2 '])
print a+65
|
from staff.models import Staff, Employee, Driver
from utils.models import TypeDocument
from django.shortcuts import render_to_response, render
from django.template import RequestContext
from django.contrib.auth.decorators import login_required
from django.conf import settings
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.db.utils import IntegrityError
import xlrd
import datetime
import os.path
import re
@login_required
def index(request):
staff = Staff.objects.all()
paginator = Paginator(staff, 10)
page = request.GET.get('page')
try:
show_lines = paginator.page(page)
except PageNotAnInteger:
show_lines = paginator.page(1)
except EmptyPage:
show_lines = paginator.page(paginator.num_pages)
return render_to_response('staff.html', {'staff_list': show_lines},
context_instance=RequestContext(request))
@login_required
def uploadDriver(request):
reader = xlrd.open_workbook(settings.MEDIA_ROOT + '/' + settings.UPLOAD_PATH + '/files/autos.xls',
encoding_override="utf_8")
sh = reader.sheet_by_name(u'custodios')
for rownum in range(sh.nrows):
#row = sh.row_values(rownum)
datalist = []
for i, cell in enumerate(sh.row(rownum)):
value = cell.value
if cell.ctype == 3:
value = datetime.datetime(*xlrd.xldate_as_tuple(value, reader.datemode))
datalist.append(value.date())
else:
datalist.append(value)
if len(datalist) == 5:
createDrivers(datalist)
return render(request, 'done.html')
def createDrivers(datalist):
try:
item = int(datalist[1])
internal_number = int(datalist[0])
except ValueError:
internal_number = datalist[0]
print 'Item = ' + str(item)
try:
driver_license = int(datalist[2])
except ValueError:
driver_license= datalist[2]
try:
employee = Employee.objects.get(item=item)
except Exception, err:
print item
print err
try:
is_active = True
if datalist[4]:
expiration_date=datalist[4]
if datetime.date.today() > datalist[4]:
is_active = False
else:
is_active = False
expiration_date=None
#if len(driver_license) == 0:
# driver_license = None
try:
if driver_license == '':
driver_license = None
driver = Driver(employee=employee, driver_license=driver_license, driver_category=datalist[3],
expiration_date=expiration_date, is_active=is_active)
driver.save()
except IntegrityError:
pass
except Exception, err:
print '----------------'
print datalist
print 'Error con item = ' + str(item)
print 'driver_license: ' + str(driver_license)
print 'category:' + str(datalist[3])
print 'expiration_date: ' + str(expiration_date)
@login_required
def uploadStaff(request):
reader = xlrd.open_workbook(settings.MEDIA_ROOT + '/' + settings.UPLOAD_PATH + '/' + 'files/lista_1.xls',
encoding_override="utf_8")
sh = reader.sheet_by_name(u'todos')
for rownum in range(sh.nrows):
#row = sh.row_values(rownum)
datalist = []
for i, cell in enumerate(sh.row(rownum)):
value = cell.value
if cell.ctype == 3:
value = datetime.datetime(*xlrd.xldate_as_tuple(value, reader.datemode))
datalist.append(value.date())
else:
datalist.append(value)
if len(datalist) == 6:
createStaff(datalist)
return render(request, 'done.html')
def createStaff(datalist):
all_data_user = readAllDataUser(datalist[0], datalist[1], datalist[2], datalist[3], datalist[4], datalist[5])
loadStaff(all_data_user['staff'], all_data_user['employee'])
def readAllDataUser(item, name, birth_date, ci, cargo, unidad):
item = int(item)
myList = name.split()
size_name = len(myList)
ci = ci.split()
num_ci = ci[0]
locale_ci = ci[1]
try:
last_name = myList[0] + " " + myList[1]
if size_name > 3:
first_name = myList[2] + " " + myList[3]
else:
first_name = myList[2]
except IndexError:
last_name = myList[0]
first_name = myList[1]
employee = {"item": item, "cargo": cargo, "unidad": unidad}
path_avatar = settings.PROJECT_ROOT + settings.MEDIA_URL + settings.STAFF_AVATAR_PATH + "/" + str(item) + ".jpg"
if os.path.exists(path_avatar):
staff_user = {"first_name": first_name, "last_name": last_name, "ci": num_ci,
"locale_issue": str(locale_ci).lower(), "birth_date": birth_date,
"avatar": settings.STAFF_AVATAR_PATH + "/" + str(item) + ".jpg",
"about": 'Trabajador de Comteco'}
else:
staff_user = {"first_name": first_name, "last_name": last_name, "ci": num_ci,
"locale_issue": str(locale_ci).lower(), "birth_date": birth_date,
"avatar": 'images/default_avatar.png', "about": 'Trabajador de Comteco'}
return {"staff": staff_user, "employee": employee}
def loadStaff(staff, employee):
# cargo = createPosition(staff['cargo'])
# unit = createWorkUnit(staff['unidad'])
type_document = TypeDocument.objects.get(key='ci')
try:
s = Staff(photo=staff['avatar'], first_name=staff['first_name'], last_name=staff['last_name'],
birth_date=staff['birth_date'], type_document=type_document, val_document=staff['ci'],
locale_issue=staff['locale_issue'], date_joined=datetime.datetime.now(), is_active=True)
s.save()
except IntegrityError:
print 'ERROR GARRAFAL EN USUARIO CON CI: ' + str(staff['ci']) + ' E ITEM: ' + str(employee['item'])
try:
e = Employee(staff=s, item=employee['item'])
e.save()
except IntegrityError:
print 'ERROR AL CARGAR EL ITEM: ' + employee['item']
"""
def createPosition(nameCargo):
try:
cargo = Position(name=nameCargo)
cargo.save()
except IntegrityError:
cargo = Position.objects.get(name=nameCargo)
return cargo
def createWorkUnit(nameUnit):
try:
unit = WorkUnit(name=nameUnit)
unit.save()
except IntegrityError:
unit = WorkUnit.objects.get(name=nameUnit)
return unit
"""
|
import main
import unittest
class ComposeTest(unittest.TestCase):
def testing(self):
self.assertEqual(main.compose("byGt\nhTts\nRTFF\nCnnI", "jIRl\nViBu\nrWOb\nNkTB"),
"bNkTB\nhTrWO\nRTFVi\nCnnIj")
self.assertEqual(main.compose("HXxA\nTGBf\nIPhg\nuUMD", "Hcbj\nqteH\nGbMJ\ngYPW"),
"HgYPW\nTGGbM\nIPhqt\nuUMDH")
if __name__ == '__main__':
unittest.main()
|
class MyClass:
variable = "blah"
def foo(self):
print('Hello from MyClass')
myObjX = MyClass()
myObjY = MyClass()
myObjY.variable = "Giggity goo"
print(myObjX.variable)
print(myObjY.variable)
|
import pytest
import logging
import yaml
from asgi_lifespan import LifespanManager
from httpx import AsyncClient
from app.main import app
from app import messages
from tests import test_constants
config = yaml.safe_load(open("config.yml"))
logger = logging.getLogger(__name__)
def pytest_generate_tests(metafunc):
if "client" in metafunc.fixturenames:
metafunc.parametrize(
"client, cache_log_msg",
[
(
pytest.lazy_fixture("httpxclient_with_cache"),
messages.CACHE_AVAILABLE,
),
(
pytest.lazy_fixture("httpxclient_without_cache"),
messages.CACHE_UNAVAILABLE,
),
],
)
@pytest.fixture()
async def httpxclient_with_cache(caplog):
async with LifespanManager(app):
caplog.set_level(logging.DEBUG)
async with AsyncClient(app=app, base_url="http://test") as ac:
yield ac
@pytest.fixture()
async def httpxclient_without_cache(caplog, cache_log_msg):
caplog.set_level(logging.DEBUG)
async with AsyncClient(app=app, base_url="http://test") as ac:
yield ac
@pytest.mark.asyncio
async def test_all_restaurants(client, cache_log_msg, caplog):
response = await client.get(app.url_path_for("get_all_restaurants"))
assert response.status_code == 200
assert response.json()
assert test_constants.VALID_MANY_RESTAURANTS_STRING in response.text
assert cache_log_msg in caplog.text
@pytest.mark.asyncio
async def test_single_valid_restaurant(client, cache_log_msg, caplog):
response = await client.get(
app.url_path_for(
"get_single_restaurant", restaurant=test_constants.VALID_RESTAURANT_ID
)
)
assert response.status_code == 200
assert response.json()
assert test_constants.VALID_RESTAURANT_TITLE in response.text
assert cache_log_msg in caplog.text
@pytest.mark.asyncio
async def test_single_invalid_restaurant(client, cache_log_msg, caplog):
response = await client.get(
app.url_path_for(
"get_single_restaurant", restaurant=test_constants.INVALID_RESTAURANT_ID
)
)
assert response.status_code == 404
assert cache_log_msg in caplog.text
@pytest.mark.asyncio
async def test_relay_anything_all_restaurants(client, cache_log_msg, caplog):
response = await client.get(
app.url_path_for("relay_anything", query=config["RESTAURANTS_PATH"])
)
assert response.status_code == 200
assert response.json()
assert test_constants.VALID_MANY_RESTAURANTS_STRING in response.text
assert cache_log_msg in caplog.text
@pytest.mark.asyncio
async def test_relay_anything_single_restaurants(client, cache_log_msg, caplog):
response = await client.get(
app.url_path_for(
"relay_anything",
query=f"{config['RESTAURANTS_PATH']}/{test_constants.VALID_RESTAURANT_ID}",
)
)
assert response.status_code == 200
assert response.json()
assert test_constants.VALID_RESTAURANT_TITLE in response.text
assert cache_log_msg in caplog.text
|
while True:
pass # Busy-wait for keyboard interrupt (Ctrl+C)
class MyEmptyClass:
pass
def initlog(*args):
pass # Remember to implement this!
|
__author__ = 'k22li'
)
import re
import os
#print callable(getattr(str, 'split'))
# test functions of filter & map
pat = re.compile('st$')
availableChoice = ['a', 'b', 'c', 'test']
print filter(pat.search, availableChoice)
testFunc = lambda x : os.path.splitext(x)[0]
print map(testFunc, ['a', 'a.b', 'a.b.c', 'test'] |
# include <stdio.h>
# include <conio.h>
# define rafik main
void rafik()
{
clrscr();
printf("Main function is not used here");
getch();
}
|
import colors as col
import random
# Source for algorithms: Kooi B. 'Yet another mastermind strategy', https://www.rug.nl/research/portal/files/9871441/icgamaster.pdf used on and before 19-02-2020
def evaluateColors(guessedColors: list,secretCode) -> dict:
tempColors = secretCode[:]
tempGuessedColors = guessedColors[:]
pins = {
'black': 0,
'white': 0
}
j = 0
for i in range(0, len(secretCode)):
j +=1
if secretCode[i] == guessedColors[i]:
pins['black'] += 1
j -=1
del tempColors[j]
del tempGuessedColors[j]
for guessedColor in tempGuessedColors:
try:
i = tempColors.index(guessedColor)
if tempColors[i] == guessedColor:
pins['white'] +=1
del tempColors[i]
except ValueError:
pass
return pins
def generateAllCombinations():
arr = []
for colorOne in col.all_colors:
for colorTwo in col.all_colors:
for colorThree in col.all_colors:
for colorFour in col.all_colors:
arr.append([colorOne,colorTwo,colorThree,colorFour])
print('populate for all')
return arr
def repopulateAllCombinations():
global allPossibleCombinations
global mutableAllPossibleCombinations
mutableAllPossibleCombinations = [*allPossibleCombinations]
allPossibleCombinations = generateAllCombinations()
mutableAllPossibleCombinations = []
repopulateAllCombinations()
def generateSecret(amount=4):
col.SECRET = []
for i in range(0,amount):
col.SECRET.append(col.all_colors[random.randint(0,len(col.all_colors)-1)])
"""
This algorithm is made as described in 'Yet another mastermind strategy'.
It also function as described, with expected results around 4.6 tries on average.
"""
def simpleAlgorithm(turn,secret):
if(turn == 0):
currentGuesse = ['Red', 'Red', 'Green', 'Blue']
else:
# currentGuesse = gePosibilityFromIndex(0)
currentGuesse = gePosibilityFromIndex(int(len(mutableAllPossibleCombinations) /2))
masterPins = evaluateColors(currentGuesse,secret)
filterPosibilityList(currentGuesse,masterPins)
return masterPins
"""
This heuristic algorithm is made by me in a small effort to try and improve on the simple algorithm as described in 'Yet another mastermind strategy'.
The strategy was to broaden the removed amount of items by chosing a guess from oppositeparts of the list each time.
My changes have however little to no effect.
"""
def heuristicAlgorithm(turn,secret):
if(turn == 0):
currentGuess = ['Red', 'Green', 'Blue', 'Yellow']
elif turn % 2 == 0:
currentGuess = gePosibilityFromIndex(int(len(mutableAllPossibleCombinations) /4))
else:
currentGuess = gePosibilityFromIndex(int((len(mutableAllPossibleCombinations) /4)*3))
masterPins = evaluateColors(currentGuess,secret)
filterPosibilityList(currentGuess,masterPins)
return masterPins
def gePosibilityFromIndex(index):
item = mutableAllPossibleCombinations[index]
mutableAllPossibleCombinations.remove(item)
return item
def filterPosibilityList(currentGuesse,masterPins):
global mutableAllPossibleCombinations
tmp = mutableAllPossibleCombinations[:]
for possibility in mutableAllPossibleCombinations:
pins = evaluateColors(currentGuesse,possibility)
if(masterPins != pins):
tmp.remove(possibility)
mutableAllPossibleCombinations = tmp
"""
This funciton gathers all the combinations and assignes the amount that combinations has hit to each option in the list.
This is done so we can asses the best guess in every worstcase situation.
As you can imagne this will take a rather long time and is not much better than the simple sort.
"""
def partitionSize():
# This function makes me want to cry
partitions = {}
for possibility in mutableAllPossibleCombinations:
key = ','.join(possibility) # the key is the combination it self.
partitions[key] = {'4,0': 0, '3,0': 0, '2,0': 0, '1,0': 0, '2,2': 0, '1,2': 0, '0,2': 0, '2,1': 0, '1,1': 0, '0,1': 0, '0,0': 0, '1,3': 0, '0,3': 0, '0,4': 0} # with value all the hits on combinations
for _possibility in mutableAllPossibleCombinations:
# Compare all the combinations to one another and update the partitions dict with the hits.
_pins = evaluateColors(possibility,_possibility)
pinToStr = f"{_pins['black']},{_pins['white']}"
partitions[key][pinToStr] +=1
highestPerPartition = {}
for partitionKey in partitions:
# Get the most likely scenario per possibility.
highestKey = max(partitions[partitionKey], key=partitions[partitionKey].get)
# Add all the posibilities to a dict with the key as the combination, the amount from the most likely outcome is its value.
highestPerPartition[partitionKey] = partitions[partitionKey][highestKey]
# From all the worst case scenario's get the best one.
best = min(highestPerPartition, key=highestPerPartition.get)
return best.split(',') # Make the guesse by decoding the key
"""
This algorithm uses the partition sizing that is mentiond in 'Yet another mastermind strategy'.
For game state i calculate all the possible worstcase scenario's and get the best one for my next guess
"""
def consistentWorstCaseAlgorithm(turn:int,secret):
global mutableAllPossibleCombinations
if(turn == 0):
currentGuesse = ['Red', 'Red', 'Green', 'Green']
else:
currentGuesse = partitionSize()
masterPins = evaluateColors(currentGuesse,secret)
filterPosibilityList(currentGuesse,masterPins)
return masterPins |
import csv
import requests
import pandas as pd
from zipfile import ZipFile
from io import StringIO
URL = 'https://www.quandl.com/api/v3/databases/%(dataset)s/codes'
def dataset_url(dataset):
return URL % {'dataset': dataset}
def download_file(url):
r = requests.get(url)
if r.status_code == 200:
return StringIO(r.text)
def unzip(file_):
d = unzip_files(file_)
return d[list(d.keys())[0]]
def unzip_files(file_):
d = {}
with ZipFile(file_, 'r') as zipfile:
for filename in zipfile.namelist():
d[filename] = str(zipfile.read(filename))
return d
def csv_rows(str):
for row in csv.reader(StringIO(str)):
yield row
def csv_dicts(str, fieldnames=None):
for d in csv.DictReader(StringIO(str), fieldnames=fieldnames):
yield d
def get_symbols_list(dataset):
csv_ = unzip(download_file(dataset_url(dataset)))
return map(lambda x: x[0].replace(dataset + '/', ''), csv_rows(csv_))
def get_symbols_dict(dataset):
csv_ = unzip(download_file(dataset_url(dataset)))
return dict(csv_rows(csv_))
def get_symbols_df(dataset):
csv_ = unzip(download_file(dataset_url(dataset)))
df = pd.read_csv(StringIO(csv_), header=None, names=['symbol', 'company'])
df.symbol = df.symbols.map(lambda x: x.replace(dataset + '/', ''))
df.company = df.company.map(lambda x: x.replace('Prices, Dividends, Splits and Trading Volume', ''))
return df
|
# -*- coding: UTF-8 -*-
# Copyright 2013-2016 by Luc Saffre.
# License: BSD, see LICENSE for more details.
"""A library for `fabric <http://docs.fabfile.org>`__ with tasks I use
to manage my Python projects.
NOTE: This module is deprecated. Use :mod:`atelier.invlib` instead.
.. contents::
:local:
.. _fab_commands:
``fab`` commands
================
Documenting
-----------
.. command:: fab blog
Edit today's blog entry, create an empty file if it doesn't yet exist.
.. command:: fab cd
Output a reStructuredText formatted list of all commits in all
projects today.
.. command:: fab bd
Converted to :cmd:`inv bd`.
.. command:: fab pd
Converted to :cmd:`inv pd`.
.. command:: fab clean
Converted to :cmd:`inv clean`.
.. command:: fab readme
Converted to :cmd:`inv readme`.
.. command:: fab api
No longer exists because we now use autosummary instead of
sphinx-apidoc.
Generate `.rst` files below `docs/api` by running `sphinx-apidoc
<http://sphinx-doc.org/invocation.html#invocation-of-sphinx-apidoc>`_.
This is no longer used by most of my projects, at least those
which I converted to `sphinx.ext.autosummary`.
.. command:: fab docs
Has been replaced by :cmd:`inv bd`.
.. command:: fab pub
Has been replaced by :cmd:`inv pd`.
Internationalization
--------------------
.. command:: fab mm
Converted to :cmd:`inv mm`.
Deploy
------
.. command:: fab release
Converted to :cmd:`inv release`.
.. command:: fab sdist
Converted to :cmd:`inv sdist`.
.. command:: fab ci
Converted to :cmd:`inv ci`.
.. command:: fab reg
Converted to :cmd:`inv reg`.
Testing
-------
.. command:: fab initdb
Converted to :cmd:`inv initdb`.
.. command:: fab test
Converted to :cmd:`inv test`.
.. command:: fab test_sdist
Converted to :cmd:`inv test_sdist`.
Miscellaneous
-------------
.. command:: fab summary
Converted to :cmd:`inv ls`.
Installation
============
To be used by creating a :file:`fabfile.py` in your project's root
directory with at least the following two lines::
from atelier.fablib import *
setup_from_fabfile(globals())
See :func:`setup_from_fabfile` for more information.
Configuration files
===================
.. xfile:: fabfile.py
In your :xfile:`fabfile.py` file you can specify project-specific
configuration settings. Example content::
from atelier.fablib import *
setup_from_fabfile(globals(), "foobar")
env.languages = "de fr et nl".split()
env.tolerate_sphinx_warnings = True
add_demo_project('foobar.demo')
.. xfile:: .fabricrc
To specify certain default preferences for all your projects, you can
create a file named :xfile:`.fabricrc` in your home directory with
content like this::
user = luc
blogger_project = lino
docs_rsync_dest = luc@example.org:~/public_html/%s
docs_rsync_dest = luc@example.org:~/public_html/{prj}_{docs}
sdist_dir = /home/luc/projects/lino/docs/dl
temp_dir = /home/luc/tmp
Project settings
================
`fabric <http://docs.fabfile.org>`__ works with a global "environment"
object named ``env``.
The following section documents the possible attributes of this object
as used by :mod:`atelier.fablib`.
You usually define these in your :xfile:`fabfile.py`. For some of
them (those who are simple strings) you can define user-specific
default values in a :xfile:`.fabricrc` file.
.. class:: env
.. attribute:: locale_dir
The name of the directory where `fab mm` et al should write their
catalog files.
.. attribute:: sdist_dir
.. attribute:: editor_command
A string with the command name of your text editor. Example::
editor_command = "emacsclient -n {0}"
The ``{0}`` will be replaced by the filename.
Note that this must be a *non waiting* command, i.e. which
launches the editor on the specified file in a new window and then
returns control to the command line without waiting for that new
window to terminate.
.. attribute:: docs_rsync_dest
A Python template string which defines the rsync destination for
publishing your projects documentation.
Used by :cmd:`fab pub`.
Example::
env.docs_rsync_dest = 'luc@example.org:~/public_html/{prj}_{docs}'
The ``{prj}`` in this template will be replaced by the internal
name of this project, and ``{{docs}}`` by the name of the doctree
(taken from :attr:`doc_trees`).
For backward compatibility the following (deprecated) template is
also still allowed::
env.docs_rsync_dest = 'luc@example.org:~/public_html/%s'
The ``%s`` in this template will be replaced by a name `xxx_yyy`,
where `xxx` is the internal name of this project and `yyy` the
name of the doctree (taken from :attr:`doc_trees`).
.. attribute:: doc_trees
A list of directory names (relative to your project directory)
containing Sphinx document trees.
Default value is ``['docs']``
If this project has a main package, then `env.doc_trees` will be
replaced by `doc_trees` attribute of that module.
.. attribute:: cleanable_files
A list of wildcards to be cleaned by :cmd:`fab clean`.
.. attribute:: use_dirhtml
Whether `sphinx-build
<http://sphinx-doc.org/invocation.html#invocation-of-sphinx-build>`__
should use ``dirhtml`` instead of the default ``html`` builder.
.. attribute:: tolerate_sphinx_warnings
Whether `sphinx-build` should tolerate warnings.
.. attribute:: languages
A list of language codes for which userdocs are being maintained.
.. attribute:: apidoc_exclude_pathnames
No longer used because we now use autosummary instead of
sphinx-apidoc.
a list of filenames (or directory names) to be excluded when you
run :cmd:`fab api`.
.. attribute:: revision_control_system
The revision control system used by your project.
Allowed values are `'git'`, `'hg'` or `None`.
Used by :cmd:`fab ci`.
.. attribute:: use_mercurial
**No longer used.** Use :attr:`env.revision_control_system` instead.)
.. attribute:: demo_projects
The list of *Django demo projects* included in this project.
Django demo projects are used by the test suite and the Sphinx
documentation. Before running :command:`fab test` or
:command:`fab bd`, they must have been initialized. To initialize
them, run :command:`fab initdb`.
It is not launched automatically by :command:`fab test` or
:command:`fab bd` because it can take some time and is not always
necessary.
History
=======
- 20141020 moved `doc_trees` project to :class:`atelier.Project`.
- 20141001 added support for multiple doc trees per project
(:attr:`env.doc_trees`).
- 20140116 : added support for managing namespace packages
TODO
====
- replace `env.blogger_project` by an attribute of the main module
(like `intersphinx_urls`)
(The rest of this page is automatically generated stuff.)
"""
import importlib
import os
import datetime
import glob
import sphinx
from babel.dates import format_date
from unipath import Path
from atelier.utils import i2d
from atelier import rstgen
try:
from fabric.api import env, local, task
from fabric.utils import abort, puts
from fabric.contrib.console import confirm
from fabric.api import lcd
except ImportError:
def task(**kwargs):
def d(f):
return f
return d
# ignore it here so that Sphinx autodoc can import it even
# if fabric is not installed.
def get_current_date(today=None):
"""
"""
if today is None:
return datetime.date.today()
return i2d(today)
class RstFile(object):
def __init__(self, local_root, url_root, parts):
self.path = local_root.child(*parts) + '.rst'
self.url = url_root + "/" + "/".join(parts) + '.html'
# if parts[0] == 'docs':
# self.url = url_root + "/" + "/".join(parts[1:]) + '.html'
# else:
# raise Exception("20131125")
# self.url = url_root + "/" + "/".join(parts) + '.html'
def add_demo_project(p):
"""Register the specified settings module as being a Django demo project.
See also :attr:`env.demo_projects`.
"""
if p in env.demo_projects:
return
# raise Exception("Duplicate entry %r in demo_projects." % db)
env.demo_projects.append(p)
def setup_from_fabfile(
globals_dict, main_package=None, settings_module_name=None):
"""To be called from within your project's :xfile:`fabfile.py`.
Minimal example::
from atelier.fablib import *
setup_from_fabfile(globals())
If this doctree is the main doctree of a Python project, then the
minimal example should be::
from atelier.fablib import *
setup_from_fabfile(globals(), "foobar")
Where "foobar" is the Python name of your project's main package.
"""
if not '__file__' in globals_dict:
raise Exception(
"No '__file__' in %r. "
"First parameter to must be `globals()`" % globals_dict)
fabfile = Path(globals_dict['__file__'])
if not fabfile.exists():
raise Exception("No such file: %s" % fabfile)
env.root_dir = fabfile.parent.absolute()
# print("20141027 %s %s " % (main_package, env.root_dir))
env.project_name = env.root_dir.name
env.setdefault('build_dir_name', '.build') # but ablog needs '_build'
env.setdefault('long_date_format', "%Y%m%d (%A, %d %B %Y)")
# env.work_root = Path(env.work_root)
env.setdefault('use_dirhtml', False)
env.setdefault('blog_root', env.root_dir.child('docs'))
env.setdefault('sdist_dir', None)
env.setdefault('editor_command', None)
if env.sdist_dir is not None:
env.sdist_dir = Path(env.sdist_dir)
env.main_package = main_package
env.locale_dir = None
env.tolerate_sphinx_warnings = False
env.demo_projects = []
env.revision_control_system = None
env.apidoc_exclude_pathnames = []
# env.blogger_url = "http://blog.example.com/"
env.setdefault('languages', None)
env.setdefault('blogger_project', None)
env.setdefault('blogger_url', None)
env.setdefault('cleanable_files', [])
if isinstance(env.languages, basestring):
env.languages = env.languages.split()
# if env.main_package:
# env.SETUP_INFO = get_setup_info(Path(env.root_dir))
# else:
# env.SETUP_INFO = None
if settings_module_name is not None:
os.environ['DJANGO_SETTINGS_MODULE'] = settings_module_name
from django.conf import settings
# why was this? settings.SITE.startup()
env.languages = [lng.name for lng in settings.SITE.languages]
# env.demo_databases.append(settings_module_name)
#~ env.userdocs_base_language = settings.SITE.languages[0].name
# The following import will populate the projects
from atelier.projects import get_project_info
env.current_project = get_project_info(env.root_dir)
env.doc_trees = env.current_project.doc_trees
# env.SETUP_INFO = env.current_project.SETUP_INFO
setup_from_project = setup_from_fabfile # backwards compat
#~ def confirm(msg,default='y',others='n',**override_callbacks):
#~ text = "%s [%s%s]" % (msg,default.upper(),others)
#~ def y(): return True
# ~ # def n(): abort("Missing user confirmation for:\n%s" % msg)
#~ def n(): abort("Missing user confirmation")
#~ callbacks = dict(y=y,n=n)
#~ callbacks.update(override_callbacks)
#~ while True:
#~ answer = prompt(text)
# ~ # answer = raw_input(prompt)
#~ if not answer:
#~ answer = default
#~ answer = answer.lower()
#~ if answer:
#~ return callbacks.get(answer)()
def must_confirm(*args, **kw):
if not confirm(*args, **kw):
abort("Dann eben nicht...")
def must_exist(p):
if not p.exists():
abort("No such file: %s" % p.absolute())
def rmtree_after_confirm(p):
if not p.exists():
return
if confirm("OK to remove %s and everything under it?" % p.absolute()):
p.rmtree()
def unused_get_locale_dir():
# replaced by env.locale_dir
if not env.main_package:
return None # abort("No main_package")
args = env.main_package.split('.')
args.append('locale')
p = env.root_dir.child(*args)
if not p.isdir():
return None # abort("Directory %s does not exist." % p)
return p
def cleanup_pyc(p):
"""Thanks to oddthinking on http://stackoverflow.com/questions/2528283
"""
for root, dirs, files in os.walk(p):
pyc_files = [filename for filename in files if filename.endswith(".pyc")]
py_files = set([filename for filename in files if filename.endswith(".py")])
excess_pyc_files = [pyc_filename for pyc_filename in pyc_files if pyc_filename[:-1] not in py_files]
for excess_pyc_file in excess_pyc_files:
full_path = os.path.join(root, excess_pyc_file)
must_confirm("Remove excess file %s:" % full_path)
os.remove(full_path)
@task(alias='unused_mm')
def make_messages():
"Extract messages, then initialize and update all catalogs."
extract_messages()
init_catalog_code()
update_catalog_code()
if False:
extract_messages_userdocs()
setup_babel_userdocs('init_catalog')
setup_babel_userdocs('update_catalog')
def extract_messages():
"""Extract messages from source files to `django.pot` file"""
# locale_dir = get_locale_dir()
locale_dir = env.locale_dir
if locale_dir is None:
return
args = ["python", "setup.py"]
args += ["extract_messages"]
args += ["-o", Path(locale_dir).child("django.pot")]
cmd = ' '.join(args)
#~ must_confirm(cmd)
local(cmd)
def extract_messages_userdocs():
"""
Run the Sphinx gettext builder on userdocs.
"""
userdocs = env.root_dir.child('userdocs')
if not userdocs.isdir():
return # abort("Directory %s does not exist." % userdocs)
args = ['sphinx-build', '-b', 'gettext']
#~ args += cmdline_args
# ~ args += ['-a'] # all files, not only outdated
# ~ args += ['-P'] # no postmortem
# ~ args += ['-Q'] # no output
#~ if not env.tolerate_sphinx_warnings:
# ~ args += ['-W'] # consider warnings as errors
#~ args += ['-w',env.DOCSDIR.child('warnings.txt')]
args += [userdocs]
args += [userdocs.child("translations")]
cmd = ' '.join(args)
local(cmd)
@task(alias='rename')
def rename_data_url_friendly():
data_dir = env.root_dir.child('docs', 'data')
#~ print list(data_dir.listdir(names_only=True))
print(list(data_dir.walk()))
def setup_babel_userdocs(babelcmd):
"""Create userdocs .po files if necessary."""
userdocs = env.root_dir.child('userdocs')
if not userdocs.isdir():
return
locale_dir = userdocs.child('translations')
for domain in locale_dir.listdir('*.pot', names_only=True):
domain = domain[:-4]
for loc in env.languages:
if loc != env.languages[0]:
po_file = Path(locale_dir, loc, 'LC_MESSAGES', '%s.po' %
domain)
mo_file = Path(locale_dir, loc, 'LC_MESSAGES', '%s.mo' %
domain)
pot_file = Path(locale_dir, '%s.pot' % domain)
if babelcmd == 'init_catalog' and po_file.exists():
print("Skip %s because file exists." % po_file)
#~ elif babelcmd == 'compile_catalog' and not mo_file.needs_update(po_file):
#~ print "Skip %s because newer than .po" % mo_file
else:
args = ["python", "setup.py"]
args += [babelcmd]
args += ["-l", loc]
args += ["--domain", domain]
args += ["-d", locale_dir]
#~ args += [ "-o" , po_file ]
#~ if babelcmd == 'init_catalog':
if babelcmd == 'compile_catalog':
args += ["-i", po_file]
else:
args += ["-i", pot_file]
cmd = ' '.join(args)
#~ must_confirm(cmd)
local(cmd)
@task(alias='cmu')
def compile_catalog_userdocs():
setup_babel_userdocs('compile_catalog')
def init_catalog_code():
"""Create code .po files if necessary."""
from lino.core.site import to_locale
locale_dir = env.locale_dir
# locale_dir = get_locale_dir()
if locale_dir is None:
return
locale_dir = Path(locale_dir)
for loc in env.languages:
if loc != 'en':
f = locale_dir.child(loc, 'LC_MESSAGES', 'django.po')
if f.exists():
print("Skip %s because file exists." % f)
else:
args = ["python", "setup.py"]
args += ["init_catalog"]
args += ["--domain django"]
args += ["-l", to_locale(loc)]
args += ["-d", locale_dir]
#~ args += [ "-o" , f ]
args += ["-i", locale_dir.child('django.pot')]
cmd = ' '.join(args)
must_confirm(cmd)
local(cmd)
def update_catalog_code():
"""Update .po files from .pot file."""
from lino.core.site import to_locale
locale_dir = env.locale_dir
# locale_dir = get_locale_dir()
if locale_dir is None:
return
locale_dir = Path(locale_dir)
for loc in env.languages:
if loc != env.languages[0]:
args = ["python", "setup.py"]
args += ["update_catalog"]
args += ["--domain django"]
#~ args += [ "-d" , locale_dir ]
args += ["-o", locale_dir.child(loc, 'LC_MESSAGES', 'django.po')]
args += ["-i", locale_dir.child("django.pot")]
args += ["-l", to_locale(loc)]
cmd = ' '.join(args)
#~ must_confirm(cmd)
local(cmd)
@task(alias='cm')
def compile_catalog():
"""Compile .po files to .mo files."""
from lino.core.site import to_locale
locale_dir = env.locale_dir
# locale_dir = get_locale_dir()
if locale_dir is None:
return
for loc in env.languages:
if loc != env.languages[0]:
args = ["python", "setup.py"]
args += ["compile_catalog"]
args += ["-i", locale_dir.child(loc, 'LC_MESSAGES', 'django.po')]
args += ["-o", locale_dir.child(loc, 'LC_MESSAGES', 'django.mo')]
args += ["--domain django"]
#~ args += [ "-d" , locale_dir ]
args += ["-l", to_locale(loc)]
cmd = ' '.join(args)
#~ must_confirm(cmd)
local(cmd)
@task(alias='mss')
def makescreenshots():
"""generate screenshot .jpg files to gen/screenshots."""
run_in_demo_projects('makescreenshots', '--traceback')
@task(alias='sss')
def syncscreenshots():
"""synchronize gen/screenshots to userdocs/gen/screenshots."""
run_in_demo_projects('syncscreenshots', '--traceback',
'gen/screenshots', 'userdocs/gen/screenshots')
def sphinx_build(builder, docs_dir,
cmdline_args=[], language=None, build_dir_cmd=None):
args = ['sphinx-build', '-b', builder]
args += cmdline_args
# ~ args += ['-a'] # all files, not only outdated
# ~ args += ['-P'] # no postmortem
# ~ args += ['-Q'] # no output
# build_dir = docs_dir.child(env.build_dir_name)
build_dir = Path(env.build_dir_name)
if language is not None:
args += ['-D', 'language=' + language]
# needed in select_lang.html template
args += ['-A', 'language=' + language]
if language != env.languages[0]:
build_dir = build_dir.child(language)
#~ print 20130726, build_dir
if env.tolerate_sphinx_warnings:
args += ['-w', 'warnings_%s.txt' % builder]
else:
args += ['-W'] # consider warnings as errors
# args += ['-vvv'] # increase verbosity
#~ args += ['-w'+Path(env.root_dir,'sphinx_doctest_warnings.txt')]
args += ['.', build_dir]
cmd = ' '.join(args)
with lcd(docs_dir):
local(cmd)
if build_dir_cmd is not None:
with lcd(build_dir):
local(build_dir_cmd)
def sync_docs_data(docs_dir):
build_dir = docs_dir.child(env.build_dir_name)
for data in ('dl', 'data'):
src = docs_dir.child(data).absolute()
if src.isdir():
target = build_dir.child('dl')
target.mkdir()
cmd = 'cp -ur %s %s' % (src, target.parent)
local(cmd)
if False:
# according to http://mathiasbynens.be/notes/rel-shortcut-icon
for n in ['favicon.ico']:
src = docs_dir.child(n).absolute()
if src.exists():
target = build_dir.child(n)
cmd = 'cp %s %s' % (src, target.parent)
local(cmd)
@task(alias='userdocs')
def build_userdocs(*cmdline_args):
"""
Deprecated. sphinx-build the userdocs tree in all languages
"""
if env.languages is None:
return
docs_dir = env.root_dir.child('userdocs')
if not docs_dir.exists():
return
for lng in env.languages:
sphinx_build('html', docs_dir, cmdline_args, lng)
sync_docs_data(docs_dir)
@task(alias='pdf')
def build_userdocs_pdf(*cmdline_args):
if env.languages is None:
return
docs_dir = env.root_dir.child('userdocs')
if not docs_dir.exists():
return
for lng in env.languages:
sphinx_build('latex', docs_dir, cmdline_args,
lng, build_dir_cmd='make all-pdf')
sync_docs_data(docs_dir)
@task(alias='linkcheck')
def sphinx_build_linkcheck(*cmdline_args):
"""sphinxbuild -b linkcheck docs."""
docs_dir = env.root_dir.child('docs')
if docs_dir.exists():
sphinx_build('linkcheck', docs_dir, cmdline_args)
docs_dir = env.root_dir.child('userdocs')
if docs_dir.exists():
lng = env.languages[0]
#~ lng = env.userdocs_base_language
sphinx_build('linkcheck', docs_dir, cmdline_args, lng)
def get_doc_trees():
for rel_doc_tree in env.doc_trees:
docs_dir = env.root_dir.child(rel_doc_tree)
if not docs_dir.exists():
msg = "Directory %s does not exist." % docs_dir
msg += "\nCheck your project's `doc_trees` setting."
raise Exception(msg)
yield docs_dir
def run_in_demo_projects(admin_cmd, *more):
"""Run the given shell command in each demo project (see
:attr:`env.demo_projects`).
"""
for mod in env.demo_projects:
puts("-" * 80)
puts("In demo project {0}:".format(mod))
from importlib import import_module
m = import_module(mod)
# p = Path(m.__file__).parent.absolute()
p = m.SITE.cache_dir or m.SITE.project_dir
with lcd(p):
args = ["django-admin.py"]
args += [admin_cmd]
args += more
#~ args += ["--noinput"]
args += ["--settings=" + mod]
#~ args += [" --pythonpath=%s" % p.absolute()]
cmd = " ".join(args)
local(cmd)
@task(alias='ddt')
def double_dump_test():
"""
Perform a "double dump test" on every demo database.
TODO: convert this to a Lino management command.
"""
raise Exception("Not yet converted after 20150129")
if len(env.demo_databases) == 0:
return
a = Path(env.temp_dir, 'a')
b = Path(env.temp_dir, 'b')
rmtree_after_confirm(a)
rmtree_after_confirm(b)
#~ if not confirm("This will possibly break the demo databases. Are you sure?"):
#~ return
#~ a.mkdir()
with lcd(env.temp_dir):
for db in env.demo_databases:
if a.exists():
a.rmtree()
if b.exists():
b.rmtree()
local("django-admin.py dump2py --settings=%s --traceback a" % db)
local(
"django-admin.py run --settings=%s --traceback a/restore.py" %
db)
local("django-admin.py dump2py --settings=%s --traceback b" % db)
local("diff a b")
|
# Oppgave 6)
#Lag et program som får brukeren til å skrive inn 2 navn med 2 tilhørende bursdager.
#La så brukeren søke på navnet og tilhørende bursdag skrives ut.
navn = ""
#Lager variabelen navn som en tom string/""
index = 0
#Lager variabelen index som har verdien 0
liste = {}
#Lager en ordbok som er lagret i variabelen liste
for i in range(2):
#Sier for-løkka skal ha en range på 2. Det vil si at for-løkka gjennomgås 2 ganger
navn = input("Skriv inn navn: ")
#SKriver ut et inputfelt hvor brukeren skal skrive inn et navn
bursdag = input("Skriv inn bursdag: ")
#Skriver ut et inputfelt hvor brukeren skal skrive inn en bursdag
liste[navn] = bursdag
#Legger til navn som nøkkelverdi i ordboken liste og bursdag som innholdsverdi
for i in liste:
#Går gjennom lista og skriver ut bursdag og dato ved å referere til innholds- og
#nøkkelverdien i orboken liste
print(str(i) + " har bursdag " + str(liste[i]))
index = input("Søk på navn og finn bursdag ")
#Ber brukeren skrive inn et navn som lagres i variabelen index
if index in liste:
#Om index, altså det brukeren har skrevet inn i siste inputfelt, finnes i liste vil
#det skrives ut navn og innholdsverdien til navn i lista
print (index + " har bursdag " + liste[index])
else:
#Om ikke index stemmer i if-setningen skrives else-setningen ut
print("Skriv in et gyldig navn")
|
def valid(s):
s = s.lower()
s = s.strip().split(' ')
s = "".join(char for char in s if char.isalnum())
return s == s[::-1]
print(valid("Race car"))
print(valid("fat"))
print(valid(":racecar"))
|
import numpy as np
import pandas as pd
from scipy.io.arff import loadarff
# functions to prep dataset
import sklearn.datasets as skdata
from sklearn.datasets import fetch_openml
from sklearn.model_selection import train_test_split
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import SimpleImputer, IterativeImputer
import miceforest as mf
# import tensorflow_datasets
import os
# define path locations relative to this file
dir_path = os.path.dirname(os.path.realpath(__file__))
thoracic_path = os.path.join(dir_path, "ThoracicSurgery.arff")
abalone_path = os.path.join(dir_path, "abalone.data")
bank_path = os.path.join(dir_path, "bank-additional/bank-additional.csv")
anneal_path_train = os.path.join(dir_path, "anneal.data")
anneal_path_test = os.path.join(dir_path, "anneal.test")
# convenience imputation functions
def simple(train, valid, test, dtypes=None):
if dtypes is None:
imp = SimpleImputer(missing_values=np.nan, strategy='mean')
imp.fit(train)
train = imp.transform(train)
if valid is not None:
valid = imp.transform(valid)
if test is not None:
test = imp.transform(test)
if dtypes is not None:
cont = np.array(dtypes) == 0
cat = np.array(dtypes) == 1
imp1 = SimpleImputer(missing_values=np.nan, strategy='mean')
imp2 = SimpleImputer(missing_values=np.nan, strategy='most_frequent')
imp1.fit(train[:, cont])
imp2.fit(train[:, cat])
train[:, cont] = imp1.transform(train[:, cont])
train[:, cat] = imp2.transform(train[:, cat])
if valid is not None:
valid[:, cont] = imp1.transform(valid[:, cont])
valid[:, cat] = imp2.transform(valid[:, cat])
if test is not None:
test[:, cont] = imp1.transform(test[:, cont])
test[:, cat] = imp2.transform(test[:, cat])
return train, valid, test
def iterative(train, rng_key, dtypes=None, valid=None, test=None):
imp = IterativeImputer(max_iter=10, random_state=rng_key)
imp.fit(train)
train = imp.transform(train)
if valid is not None:
valid = imp.transform(valid)
if test is not None:
test = imp.transform(test)
return train, valid, test
def miceforest(train, rng_key, dtypes=None, valid=None, test=None):
colnames = [str(i) for i in range(train.shape[1])]
df = pd.DataFrame(train, columns=colnames)
kernel = mf.MultipleImputedKernel(
df,
datasets=20,
save_all_iterations=True,
random_state=10,
mean_match_candidates=0
)
kernel.mice(3)
train = kernel.complete_data(0).values
if valid is not None:
valid_imp = kernel.impute_new_data(
new_data=pd.DataFrame(valid, columns=colnames))
valid = valid_imp.complete_data(0).values
if test is not None:
test_imp = kernel.impute_new_data(
new_data=pd.DataFrame(test, columns=colnames))
test = test_imp.complete_data(0).values
return train, valid, test
# dataset generating functions
def spiral(
N,
missing=None,
imputation=None, # one of none, simple, iterative, miceforest
train_complete=False,
test_complete=True,
split=0.33,
rng_key=0,
p=0.5,
cols_miss=1
):
rng = np.random.default_rng(rng_key)
theta = np.sqrt(rng.uniform(0,1,N))*2*np.pi # np.linspace(0,2*pi,100)
r_a = 2*theta + np.pi
data_a = np.array([np.cos(theta)*r_a, np.sin(theta)*r_a]).T
x_a = data_a + rng.standard_normal((N,2))
r_b = -2*theta - np.pi
data_b = np.array([np.cos(theta)*r_b, np.sin(theta)*r_b]).T
x_b = data_b + rng.standard_normal((N,2))
res_a = np.append(x_a, np.zeros((N,1)), axis=1)
res_b = np.append(x_b, np.ones((N,1)), axis=1)
res = np.append(res_a, res_b, axis=0)
rng.shuffle(res)
X_ = res[:, :2]
y = res[:, 2]
# create a noise column x3 and x4 transformation using x1, x2
x3 = rng.standard_normal((N*2,1)) * 5
x4 = (y).reshape((-1,1)) + rng.uniform(0,1,(N*2, 1)) # y with noise - should be highly informative...
X_ = np.hstack([X_, x3, x4])
key = rng.integers(9999)
if missing is None:
train_complete = True
test_complete = True
if train_complete and test_complete:
X = X_
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=key)
key = rng.integers(9999)
X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, test_size=0.33, random_state=key)
elif train_complete and not test_complete: # TRAIN COMPLETE IS TRUE AND TEST COMPLETE IS FALSE
X_train, X, y_train, y_test = train_test_split(X_, y, test_size=0.33, random_state=key)
key = rng.integers(9999)
X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, test_size=0.33, random_state=key)
elif not train_complete and test_complete:
X, X_test, y_train, y_test = train_test_split(X_, y, test_size=0.33, random_state=key)
elif not train_complete and not test_complete:
X = X_
# create missingness mask
cols = X.shape[1]
if missing == "MAR":
cols_miss = np.minimum(cols - 1, cols_miss) # clip cols missing
q = rng.uniform(0.3,0.7,(cols-1,))
corrections = []
for col in range(cols-1):
correction = X[:,col] > np.quantile(X[:,col], q[col], keepdims=True) # dependency on each x
corrections.append(correction)
corrections = np.concatenate(corrections)
corrections = np.where(corrections, 0.0, 1.0).reshape((-1,cols - 1))
print(corrections.shape, X.shape)
rand_arr = rng.uniform(0,1,(X.shape[0], cols - 1)) * corrections
nan_arr = np.where(rand_arr > (1-p), np.nan, 1.0)
X[:, -cols_miss:] *= nan_arr[:, -cols_miss:] # dependency is shifted to the left, therefore MAR
if missing == "MNAR":
cols_miss = np.minimum(cols, cols_miss) # clip cols missing
q = rng.uniform(0.3,0.7,(cols,))
corrections = []
for col in range(cols):
correction = X[:,col] > np.quantile(X[:,col], q[col], keepdims=True) # dependency on each x
corrections.append(correction)
corrections = np.concatenate(corrections)
corrections = np.where(corrections, 0.0, 1.0).reshape((-1,cols))
rand_arr = rng.uniform(0,1,(X.shape[0], cols)) * corrections
nan_arr = np.where(rand_arr > (1-p), np.nan, 1.0)
X[:, -cols_miss:] *= nan_arr[:, -cols_miss:] # dependency is not shifted to the left, therefore MNAR
if type(missing) == float or missing == "MCAR":
cols_miss = np.minimum(cols, cols_miss) # clip cols missing
if type(missing) == float: p = missing
rand_arr = rng.uniform(0,1,(X.shape[0], cols_miss))
nan_arr = np.where(rand_arr < p, np.nan, 1.0)
X[:, -cols_miss:] *= nan_arr
if type(missing) == tuple and missing[1] == "MNAR":
correction1 = X[:,-1:] < np.quantile(X[:,-1:], 0.2, keepdims=True) # dependency on x4 MNAR
correction2 = X[:,:1] < np.quantile(X[:,:1], 0.2, keepdims=True) # dependency on x1 MAR
correction3 = X[:,1:2] < np.quantile(X[:,1:2], 0.5, keepdims=True) # dependency on x2 MAR
correction = (correction1 | correction2) | correction3
correction = np.where(correction, 0.0, 1.0).reshape((-1,1)) # dependency on x4
rand_arr = rng.uniform(0,1,(X.shape[0], 1)) * correction
# missingness is dependent on unobserved missing values
nan_arr = np.where(rand_arr > (1 - missing[0]), np.nan, 1.0)
X[:, -1:] *= nan_arr
# generate train, validate, test datasets and impute training
key = rng.integers(9999)
if train_complete and test_complete:
pass
elif train_complete and not test_complete:
X_test = X
elif not train_complete and test_complete:
X_train = X
X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, test_size=0.33, random_state=key)
elif not train_complete and not test_complete:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=key)
key = rng.integers(9999)
X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, test_size=0.33, random_state=key)
# missingness diagnostics
# diagnostics = {"X_train":{}, "X_valid":{}, "X_test":{}}
# diagnostics["X_train"]["cols"] = np.isnan(X_train).sum(0) / X_train.shape[0]
# diagnostics["X_train"]["rows"] = np.any(np.isnan(X_train), axis=1).sum() / X_train.shape[0]
# diagnostics["X_valid"]["cols"] = np.isnan(X_valid).sum(0) / X_valid.shape[0]
# diagnostics["X_valid"]["rows"] = np.any(np.isnan(X_valid), axis=1).sum() / X_valid.shape[0]
# diagnostics["X_test"]["cols"] = np.isnan(X_test).sum(0) / X_test.shape[0]
# diagnostics["X_test"]["rows"] = np.any(np.isnan(X_test), axis=1).sum() / X_test.shape[0]
# print(diagnostics)
# perform desired imputation strategy
if imputation == "simple" and missing is not None:
X_train, X_valid, X_test = simple(
X_train,
dtypes=None,
valid=X_valid,
test=X_test)
key = rng.integers(9999)
if imputation == "iterative" and missing is not None:
X_train, X_valid, X_test = iterative(
X_train,
key,
dtypes=None,
valid=X_valid,
test=X_test)
key = rng.integers(9999)
if imputation == "miceforest" and missing is not None:
if test_complete:
test_input = None
else:
test_input = X_test
X_train, X_valid, test_input = miceforest(
X_train,
int(key),
dtypes=None,
valid=X_valid,
test=test_input)
if test_complete:
X_test = X_test
else:
X_test = test_input
return X_train, X_valid, X_test, y_train, y_valid, y_test, (x_a, x_b), 2
def thoracic(
missing="MAR",
imputation=None, # one of none, simple, iterative, miceforest
train_complete=False,
test_complete=True,
split=0.33,
rng_key=0,
p=0.5,
cols_miss=1
):
# import data
rng = np.random.default_rng(rng_key)
data, meta = loadarff(thoracic_path)
d = pd.DataFrame(data)
# convert categorical variables to integer encoding
cols = []
for name in meta.names():
m = meta[name]
if m[0] == 'nominal':
cols.append(1)
l = list(m[1])
d[name] = [l.index(x.decode('UTF-8')) for x in d[name].values]
else:
cols.append(0)
cols = cols[:-1]
X_ = d.values[:, :-1]
y = d.values[:, -1]
if missing is None:
train_complete = True
test_complete = True
if train_complete and test_complete:
X = X_
key = rng.integers(9999)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=split, random_state=key)
key = rng.integers(9999)
X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, test_size=split, random_state=key)
elif train_complete and not test_complete: # TRAIN COMPLETE IS TRUE AND TEST COMPLETE IS FALSE
key = rng.integers(9999)
X_train, X, y_train, y_test = train_test_split(X_, y, test_size=split, random_state=key)
key = rng.integers(9999)
X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, test_size=split, random_state=key)
elif not train_complete and test_complete:
key = rng.integers(9999)
X, X_test, y_train, y_test = train_test_split(X_, y, test_size=split, random_state=key)
elif not train_complete and not test_complete:
X = X_
cols = X.shape[1]
if missing == "MCAR":
cols_miss = np.minimum(cols, cols_miss) # clip cols missing
rand_arr = rng.uniform(0, 1, (X.shape[0], cols_miss))
nan_arr = np.where(rand_arr < p, np.nan, 1.0)
X[:, -cols_miss:] *= nan_arr
if missing == "MAR":
cols_miss = np.minimum(cols - 1, cols_miss) # clip cols missing
q = rng.uniform(0.3,0.7,(cols-1,))
corrections = []
for col in range(cols-1):
correction = X[:,col] > np.quantile(X[:,col], q[col], keepdims=True) # dependency on each x
corrections.append(correction)
corrections = np.concatenate(corrections)
corrections = np.where(corrections, 0.0, 1.0).reshape((-1,cols - 1))
print(corrections.shape, X.shape)
rand_arr = rng.uniform(0,1,(X.shape[0], cols - 1)) * corrections
nan_arr = np.where(rand_arr > (1-p), np.nan, 1.0)
X[:, -cols_miss:] *= nan_arr[:, -cols_miss:] # dependency is shifted to the left, therefore MAR
if missing == "MNAR":
cols_miss = np.minimum(cols, cols_miss) # clip cols missing
q = rng.uniform(0.3,0.7,(cols,))
corrections = []
for col in range(cols):
correction = X[:,col] > np.quantile(X[:,col], q[col], keepdims=True) # dependency on each x
corrections.append(correction)
corrections = np.concatenate(corrections)
corrections = np.where(corrections, 0.0, 1.0).reshape((-1,cols))
rand_arr = rng.uniform(0,1,(X.shape[0], cols)) * corrections
nan_arr = np.where(rand_arr > (1-p), np.nan, 1.0)
X[:, -cols_miss:] *= nan_arr[:, -cols_miss:] # dependency is not shifted to the left, therefore MNAR
# generate train, validate, test datasets and impute training
if train_complete and test_complete:
pass
elif train_complete and not test_complete:
X_test = X
elif not train_complete and test_complete:
X_train = X
key = rng.integers(9999)
X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, test_size=split, random_state=key)
elif not train_complete and not test_complete:
key = rng.integers(9999)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=split, random_state=key)
key = rng.integers(9999)
X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, test_size=split, random_state=key)
# missingness diagnostics
# diagnostics = {"X_train":{}, "X_valid":{}, "X_test":{}}
# diagnostics["X_train"]["cols"] = np.isnan(X_train).sum(0) / X_train.shape[0]
# diagnostics["X_train"]["rows"] = np.any(np.isnan(X_train), axis=1).sum() / X_train.shape[0]
# diagnostics["X_valid"]["cols"] = np.isnan(X_valid).sum(0) / X_valid.shape[0]
# diagnostics["X_valid"]["rows"] = np.any(np.isnan(X_valid), axis=1).sum() / X_valid.shape[0]
# diagnostics["X_test"]["cols"] = np.isnan(X_test).sum(0) / X_test.shape[0]
# diagnostics["X_test"]["rows"] = np.any(np.isnan(X_test), axis=1).sum() / X_test.shape[0]
# print(diagnostics)
# perform desired imputation strategy
if imputation == "simple" and missing is not None:
X_train, X_valid, X_test = simple(
X_train,
dtypes=None,
valid=X_valid,
test=X_test)
key = rng.integers(9999)
if imputation == "iterative" and missing is not None:
X_train, X_valid, X_test = iterative(
X_train,
key,
dtypes=None,
valid=X_valid,
test=X_test)
key = rng.integers(9999)
if imputation == "miceforest" and missing is not None:
if test_complete:
test_input = None
else:
test_input = X_test
X_train, X_valid, test_input = miceforest(
X_train,
int(key),
dtypes=None,
valid=X_valid,
test=test_input)
if test_complete:
X_test = X_test
else:
X_test = test_input
return X_train, X_valid, X_test, y_train, y_valid, y_test, 2
def abalone(
missing="MAR",
imputation=None, # one of none, simple, iterative, miceforest
train_complete=False,
test_complete=True,
split=0.33,
rng_key=0,
p=0.5,
cols_miss=1
):
rng = np.random.default_rng(rng_key)
data = pd.read_csv(abalone_path, header=None)
cat = list(data[0].unique())
data[0] = [cat.index(i) for i in data[0].values]
X_ = data.values[:, :-1]
y = data.values[:, -1]
unique = list(np.unique(y))
y = np.array([unique.index(v) for v in y])
coltypes = [1] + [0 for i in range(X_.shape[1] - 1)]
if missing is None:
train_complete = True
test_complete = True
if train_complete and test_complete:
X = X_
key = rng.integers(9999)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=split, random_state=key)
key = rng.integers(9999)
X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, test_size=split, random_state=key)
elif train_complete and not test_complete: # TRAIN COMPLETE IS TRUE AND TEST COMPLETE IS FALSE
key = rng.integers(9999)
X_train, X, y_train, y_test = train_test_split(X_, y, test_size=split, random_state=key)
key = rng.integers(9999)
X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, test_size=split, random_state=key)
elif not train_complete and test_complete:
key = rng.integers(9999)
X, X_test, y_train, y_test = train_test_split(X_, y, test_size=split, random_state=key)
elif not train_complete and not test_complete:
X = X_
cols = X.shape[1]
if missing == "MCAR":
cols_miss = np.minimum(cols, cols_miss) # clip cols missing
rand_arr = rng.uniform(0, 1, (X.shape[0], cols_miss))
nan_arr = np.where(rand_arr < p, np.nan, 1.0)
X[:, -cols_miss:] *= nan_arr
if missing == "MAR":
cols_miss = np.minimum(cols - 1, cols_miss) # clip cols missing
q = rng.uniform(0.3,0.7,(cols-1,))
corrections = []
for col in range(cols-1):
correction = X[:,col] > np.quantile(X[:,col], q[col], keepdims=True) # dependency on each x
corrections.append(correction)
corrections = np.concatenate(corrections)
corrections = np.where(corrections, 0.0, 1.0).reshape((-1,cols - 1))
print(corrections.shape, X.shape)
rand_arr = rng.uniform(0,1,(X.shape[0], cols - 1)) * corrections
nan_arr = np.where(rand_arr > (1-p), np.nan, 1.0)
X[:, -cols_miss:] *= nan_arr[:, -cols_miss:] # dependency is shifted to the left, therefore MAR
if missing == "MNAR":
cols_miss = np.minimum(cols, cols_miss) # clip cols missing
q = rng.uniform(0.3,0.7,(cols,))
corrections = []
for col in range(cols):
correction = X[:,col] > np.quantile(X[:,col], q[col], keepdims=True) # dependency on each x
corrections.append(correction)
corrections = np.concatenate(corrections)
corrections = np.where(corrections, 0.0, 1.0).reshape((-1,cols))
rand_arr = rng.uniform(0,1,(X.shape[0], cols)) * corrections
nan_arr = np.where(rand_arr > (1-p), np.nan, 1.0)
X[:, -cols_miss:] *= nan_arr[:, -cols_miss:] # dependency is not shifted to the left, therefore MNAR
# generate train, validate, test datasets and impute training
if train_complete and test_complete:
pass
elif train_complete and not test_complete:
X_test = X
elif not train_complete and test_complete:
X_train = X
key = rng.integers(9999)
X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, test_size=split, random_state=key)
elif not train_complete and not test_complete:
key = rng.integers(9999)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=split, random_state=key)
key = rng.integers(9999)
X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, test_size=split, random_state=key)
# missingness diagnostics
# diagnostics = {"X_train":{}, "X_valid":{}, "X_test":{}}
# diagnostics["X_train"]["cols"] = np.isnan(X_train).sum(0) / X_train.shape[0]
# diagnostics["X_train"]["rows"] = np.any(np.isnan(X_train), axis=1).sum() / X_train.shape[0]
# diagnostics["X_valid"]["cols"] = np.isnan(X_valid).sum(0) / X_valid.shape[0]
# diagnostics["X_valid"]["rows"] = np.any(np.isnan(X_valid), axis=1).sum() / X_valid.shape[0]
# diagnostics["X_test"]["cols"] = np.isnan(X_test).sum(0) / X_test.shape[0]
# diagnostics["X_test"]["rows"] = np.any(np.isnan(X_test), axis=1).sum() / X_test.shape[0]
# print(diagnostics)
# perform desired imputation strategy
if imputation == "simple" and missing is not None:
X_train, X_valid, X_test = simple(
X_train,
dtypes=None,
valid=X_valid,
test=X_test)
key = rng.integers(9999)
if imputation == "iterative" and missing is not None:
X_train, X_valid, X_test = iterative(
X_train,
key,
dtypes=None,
valid=X_valid,
test=X_test)
key = rng.integers(9999)
if imputation == "miceforest" and missing is not None:
if test_complete:
test_input = None
else:
test_input = X_test
X_train, X_valid, test_input = miceforest(
X_train,
int(key),
dtypes=None,
valid=X_valid,
test=test_input)
if test_complete:
X_test = X_test
else:
X_test = test_input
return X_train, X_valid, X_test, y_train, y_valid, y_test, 1
def banking(imputation=None, split=0.33, rng_key=0):
rng = np.random.default_rng(rng_key)
data = pd.read_csv(bank_path, sep=";")
cont = ['age', 'duration', 'campaign', 'pdays', 'previous', 'emp.var.rate', 'cons.price.idx', 'cons.conf.idx', 'euribor3m', 'nr.employed']
cat = ['job', 'marital', 'education', 'default', 'housing', 'loan', 'contact', 'month', 'day_of_week', 'poutcome', 'y']
def lab_2_num(array):
unique_list = [l for l in list(np.unique(array)) if l != "unknown"]
return np.array([unique_list.index(l) if l != "unknown" else np.nan for l in array])
for c in cat:
data[c] = lab_2_num(data[c].values)
data = data[cont + cat]
coltype = [1 if i in cat else 0 for i in cont+cat]
coltype = coltype[:-1]
X = data.values[:, :-1]
y = data.values[:, -1]
# split data
key = rng.integers(9999)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=split, random_state=key)
key = rng.integers(9999)
X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, test_size=split, random_state=key)
# diagnostics = {"X_train":{}, "X_valid":{}, "X_test":{}}
# diagnostics["X_train"]["cols"] = np.isnan(X_train).sum(0) / X_train.shape[0]
# diagnostics["X_train"]["rows"] = np.any(np.isnan(X_train), axis=1).sum() / X_train.shape[0]
# diagnostics["X_valid"]["cols"] = np.isnan(X_valid).sum(0) / X_valid.shape[0]
# diagnostics["X_valid"]["rows"] = np.any(np.isnan(X_valid), axis=1).sum() / X_valid.shape[0]
# diagnostics["X_test"]["cols"] = np.isnan(X_test).sum(0) / X_test.shape[0]
# diagnostics["X_test"]["rows"] = np.any(np.isnan(X_test), axis=1).sum() / X_test.shape[0]
# print(diagnostics)
# perform desired imputation strategy
rng = np.random.default_rng(rng_key)
if imputation == "simple":
X_train, _, X_test = simple(
X_train,
dtypes=coltype,
valid=None,
test=X_test)
key = rng.integers(9999)
if imputation == "iterative":
X_train, _, X_test = iterative(
X_train,
int(key),
valid=None,
test=X_test)
key = rng.integers(9999)
if imputation == "miceforest":
X_train, _, X_test = miceforest(
X_train,
int(key),
valid=None,
test=X_test)
return X_train, X_valid, X_test, y_train, y_valid, y_test, 2
def anneal(imputation=None, split=0.33, rng_key=0):
cont = [3,4,8,32,33,34]
def prep_data(train, test):
cols = []
for i in range(39):
if i not in cont:
d = train.values[:, i].astype(str)
t = test.values[:, i].astype(str)
vals = np.unique(np.concatenate([d[d != 'nan'], t[t != 'nan']]))
vals = list(vals)
dcoded = [np.nan if j == 'nan' else vals.index(j) for j in d]
tcoded = [np.nan if j == 'nan' else vals.index(j) for j in t]
if np.all(np.isnan(dcoded)):
pass
else:
cols.append(i)
train[i] = dcoded
test[i] = tcoded
else:
d = train.values[:, i].astype(np.float64)
t = test.values[:, i].astype(np.float64)
train[i] = d
test[i] = t
if np.all(np.isnan(d)):
pass
else:
cols.append(i)
train[i] = dcoded
test[i] = tcoded
return train[cols].values, test[cols].values
training = pd.read_csv(anneal_path_train, header=None, na_values=["?"])
testing = pd.read_csv(anneal_path_test, header=None, na_values=["?"])
training, testing = prep_data(training, testing)
X_train, y_train = training[:,:-1], training[:,-1]
X_test, y_test = testing[:,:-1], testing[:,-1]
# perform desired imputation strategy
rng = np.random.default_rng(rng_key)
if imputation == "simple":
X_train, _, X_test = simple(
X_train,
dtypes=[0 if i in cont else 1 for i in range(X_train.shape[1])],
valid=None,
test=X_test)
key = rng.integers(9999)
if imputation == "iterative":
X_train, _, X_test = iterative(
X_train,
int(key),
valid=None,
test=X_test)
key = rng.integers(9999)
if imputation == "miceforest":
X_train, _, X_test = miceforest(
X_train,
int(key),
valid=None,
test=X_test)
# can't presplit before imputation as data is too sparse few
X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, test_size=split, random_state=rng_key+1)
return X_train, X_valid, X_test, y_train, y_valid, y_test, 6
def mnist(
missing="MCAR",
imputation=None, # one of none, simple, iterative, miceforest
train_complete=False,
test_complete=True,
split=0.33,
rng_key=0,
p=0.5,
):
rng = np.random.default_rng(rng_key)
# X_, y = fetch_openml('mnist_784', version=1, return_X_y=True, as_frame=False)
X_, y = skdata.load_digits(return_X_y=True)
if missing is None:
train_complete = True
test_complete = True
if train_complete and test_complete:
X = X_
key = rng.integers(9999)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=split, random_state=key)
key = rng.integers(9999)
X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, test_size=split, random_state=key)
elif train_complete and not test_complete: # TRAIN COMPLETE IS TRUE AND TEST COMPLETE IS FALSE
key = rng.integers(9999)
X_train, X, y_train, y_test = train_test_split(X_, y, test_size=split, random_state=key)
key = rng.integers(9999)
X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, test_size=split, random_state=key)
elif not train_complete and test_complete:
key = rng.integers(9999)
X, X_test, y_train, y_test = train_test_split(X_, y, test_size=split, random_state=key)
elif not train_complete and not test_complete:
X = X_
if missing == "MCAR":
rand_arr = rng.uniform(0, 1, X.shape)
nan_arr = np.where(rand_arr < p, np.nan, 1.0)
X *= nan_arr
elif missing == "MAR":
# delete a square based on location. Not 'technically' MAR but less 'random' than MCAR implementation
square = np.ones((1, 8, 8))
for xi in range(8):
for yi in range(8):
if (0 < xi <= 4) and (0 < yi <= 4):
square[:, xi, yi] = np.nan
X *= square.reshape((1, 64))
elif missing is not None:
print("not implemented")
# generate train, validate, test datasets and impute training
if train_complete and test_complete:
pass
elif train_complete and not test_complete:
X_test = X
elif not train_complete and test_complete:
X_train = X
key = rng.integers(9999)
X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, test_size=split, random_state=key)
elif not train_complete and not test_complete:
key = rng.integers(9999)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=split, random_state=key)
key = rng.integers(9999)
X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, test_size=split, random_state=key)
# missingness diagnostics
# diagnostics = {"X_train":{}, "X_valid":{}, "X_test":{}}
# diagnostics["X_train"]["cols"] = np.isnan(X_train).sum(0) / X_train.shape[0]
# diagnostics["X_train"]["rows"] = np.any(np.isnan(X_train), axis=1).sum() / X_train.shape[0]
# diagnostics["X_valid"]["cols"] = np.isnan(X_valid).sum(0) / X_valid.shape[0]
# diagnostics["X_valid"]["rows"] = np.any(np.isnan(X_valid), axis=1).sum() / X_valid.shape[0]
# diagnostics["X_test"]["cols"] = np.isnan(X_test).sum(0) / X_test.shape[0]
# diagnostics["X_test"]["rows"] = np.any(np.isnan(X_test), axis=1).sum() / X_test.shape[0]
# print(diagnostics)
# perform desired imputation strategy
if imputation == "simple" and missing is not None:
X_train, X_valid, X_test = simple(
X_train,
dtypes=None,
valid=X_valid,
test=X_test)
key = rng.integers(9999)
if imputation == "iterative" and missing is not None:
X_train, X_valid, X_test = iterative(
X_train,
key,
dtypes=None,
valid=X_valid,
test=X_test)
key = rng.integers(9999)
if imputation == "miceforest" and missing is not None:
if test_complete:
test_input = None
else:
test_input = X_test
X_train, X_valid, test_input = miceforest(
X_train,
int(key),
dtypes=None,
valid=X_valid,
test=test_input)
if test_complete:
X_test = X_test
else:
X_test = test_input
return X_train, X_valid, X_test, y_train.astype(int), y_valid.astype(int), y_test.astype(int), 10
|
import random
p = random.randint(1, 6)
print(p) # para saber o n correto antes
r = int(input('N: '))
while not(r == p):
print ('incorreto' if not(r == p) else 'correto')
r = int(input('N: '))
|
class Hashtable:
def __init__(self, lst=None):
if lst:
self._hashtable = self.hashing(lst)
@classmethod
def hashing(cls, lst):
""""
group's number count watched elements the group
/ /
20000: [{22341: True, ...}, 0]
\
original value
"""
hashtable = {cls.group(x): [] for x in lst}
for x in lst:
hashtable[cls.group(x)].append(x)
return hashtable
@property
def hashtable(self):
return self._hashtable
@classmethod
def group(cls, x, c=10000):
"""
Hash-function.
"""
r = lambda x: x % c # remainder
k = lambda x: int((x - r(x)) / c) # coefficient
groups = lambda x: k(x) * c if x >= 0 else (k(x) + 1) * c
return groups(x)
if __name__ == "__main__":
lst = [1, 20000, 12314241, -1, -10001, -15000]
ht = Hashtable(lst)
print(ht.hashtable)
|
#!/usr/bin/env python
#_*_coding:utf-8_*_
#作者:Paul哥
import urllib2,cookielib,random,urllib,json,time,re,datetime
import LoginAccount,TrainNumQuery,BookingSeat
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
import ssl,sys
ssl._create_default_https_context = ssl._create_unverified_context
reload(sys)
sys.setdefaultencoding('utf8')
LoginRun=LoginAccount.Login()
TrainQuery=TrainNumQuery.QueryTrain()
SeatBooking=BookingSeat.Booking()
from bs4 import BeautifulSoup
class ShopTicket:
def __init__(self):
self.Imagesuffix=random.random() #验证码请求尾部随机数字
self.ImageUrl='https://kyfw.12306.cn/otn/passcodeNew/getPassCodeNew?module=login&rand=sjrand&'+str(self.Imagesuffix) #验证码请求URL
self.checkcodeurl='https://kyfw.12306.cn/otn/passcodeNew/checkRandCodeAnsyn' #12306检测验证码对错的url
self.checkloginurl='https://kyfw.12306.cn/otn/login/loginAysnSuggest'#12306检测用户名密码验证码对错的url
self.loginurl='https://kyfw.12306.cn/otn/index/initMy12306' #12306正式登陆页面URL
self.mainurl='https://kyfw.12306.cn/otn/'#请求12306前半部分链接
self.loginout='https://kyfw.12306.cn/otn/login/loginOut'
self.accessHeaders={
'Host': 'kyfw.12306.cn',
'Connection': 'keep-alive',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2490.71 Safari/537.36',
'Referer': 'https://kyfw.12306.cn/otn/init',
#'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'zh-CN,zh;q=0.8'
} #12306HTTP请求头
self.getpassengerHeaders={
'Host': 'kyfw.12306.cn',
'Connection': 'keep-alive',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2490.71 Safari/537.36',
'Referer': 'https://kyfw.12306.cn/otn/confirmPassenger/initDc',
#'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'zh-CN,zh;q=0.8'
}
self.checkRandCodeAnsyntheaders={
'Host': 'kyfw.12306.cn',
'Connection': 'keep-alive',
#'Content-Length': '446',
'Accept':'*/*',
'Origin': 'https://kyfw.12306.cn',
'X-Requested-With': 'XMLHttpRequest',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.93 Safari/537.36',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Referer': 'https://kyfw.12306.cn/otn/leftTicket/init',
#'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.8'
}
self.FullSeatTypeDict={'商务座':'9','二等座':'O','一等座':'M','特等座':'P','高级软卧':'6','软卧':'4','硬卧':'3','软座':'2','硬座':'1'}
self.sixcodedict={'1':'40,75','2':'110,75','3':'180,75','4':'250,75','5':'40,150','6':'110,150','7':'180,150','8':'250,150'}
self.eighteencodedict={'a':'25,30','b':'70,30','c':'120,30','d':'170,30','e':'220,30','f':'270,30','g':'25,80','h':'70,80','i':'120,80','j':'170,80','k':'220,80','l':'270,80','m':'25,130','n':'70,130','o':'120,130','p':'170,130','q':'220,130','r':'270,130'}
#坐标对应数字
self.cookie=cookielib.CookieJar() #cookie制作器
self.cookieHandler=urllib2.HTTPCookieProcessor(self.cookie)
self.opener=urllib2.build_opener(self.cookieHandler)
urllib2.install_opener(self.opener)
self.Imagesuffix=random.random()
self.ImageUrl='https://kyfw.12306.cn/otn/passcodeNew/getPassCodeNew?module=login&rand=sjrand&'+str(self.Imagesuffix)
self.BookingImageurl='https://kyfw.12306.cn/otn/passcodeNew/getPassCodeNew?module=passenger&rand=randp&'+str(self.Imagesuffix)
def LoginOut(self):
request=urllib2.Request(self.loginout,headers=self.accessHeaders)
try:
response=urllib2.urlopen(request,timeout=2)
print '退出登录成功'
except:
print "退出失败"
def inputuserandpasswd(self):
username=raw_input('请输入您的用户名:')
password=raw_input('请输入您的密码:')
userpassdict={'user':username,'passwd':password}
return userpassdict
def login(self): ##请用户输入用户名密码登录
userpass=self.inputuserandpasswd()
while True:
for i in range(3):
getimagestatus=LoginRun.GetCodeImage(self.accessHeaders,self.ImageUrl)
if getimagestatus=='GetImageSuccess':
break
else:
pass
img=Image.open('code.jpg')
font=ImageFont.truetype('arial.ttf',size=30)
draw = ImageDraw.Draw(img)
draw.text((55, 55),"1",(255,0,0),font=font)
draw.text((125, 55),"2",(255,0,0),font=font)
draw.text((200, 55),"3",(255,0,0),font=font)
draw.text((270, 55),"4",(255,0,0),font=font)
draw.text((55, 120),"5",(255,0,0),font=font)
draw.text((125, 120),"6",(255,0,0),font=font)
draw.text((200, 120),"7",(255,0,0),font=font)
draw.text((270, 120),"8",(255,0,0),font=font)
img.show()
img.close()
codestr=''
Input=raw_input('请输入验证码数字:')
# print "验证码类型:1.八码 2.十八码"
# sixoreitht=raw_input('您输入的验证码类型是:')
# if sixoreitht=='1':
# codedict=self.sixcodedict
# else:
# codedict=self.eighteencodedict
# font=ImageFont.truetype('arial.ttf',size=20)
# img=Image.open('code.jpg')
# draw = ImageDraw.Draw(img)
# draw.text((45, 44),"a",(255,0,0),font=font)
# draw.text((95, 44),"b",(255,0,0),font=font)
# draw.text((140, 44),"c",(255,0,0),font=font)
# draw.text((185, 44),"d",(255,0,0),font=font)
# draw.text((235, 44),"e",(255,0,0),font=font)
# draw.text((280, 44),"f",(255,0,0),font=font)
# draw.text((44, 95),"g",(255,0,0),font=font)
# draw.text((95, 95),"h",(255,0,0),font=font)
# draw.text((140, 95),"i",(255,0,0),font=font)
# draw.text((185, 95),"j",(255,0,0),font=font)
# draw.text((235, 95),"k",(255,0,0),font=font)
# draw.text((280, 95),"l",(255,0,0),font=font)
# draw.text((44, 140),"m",(255,0,0),font=font)
# draw.text((95, 140),"n",(255,0,0),font=font)
# draw.text((140, 140),"o",(255,0,0),font=font)
# draw.text((185, 140),"p",(255,0,0),font=font)
# draw.text((235, 140),"q",(255,0,0),font=font)
# draw.text((280, 140),"r",(255,0,0),font=font)
# img.show()
# img.close()
for i in Input:
codestr=codestr + self.sixcodedict[i]+','
coderesult=codestr[:-1]
data={"randCode":str(coderesult),"rand":'sjrand'}
data=urllib.urlencode(data)
codestrcookies=LoginRun.PostLoginInfo(self.checkRandCodeAnsyntheaders,data)
if codestrcookies!='checkcodeFalse':
print '验证码输入正确'
else:
print "不好意思,验证码错误,请重试"
continue
data={"loginUserDTO.user_name":userpass['user'],"userDTO.password":userpass['passwd'],"randCode":coderesult}
postdata=urllib.urlencode(data)
request=urllib2.Request(self.checkloginurl,headers=self.accessHeaders,data=postdata)
try:
checkresponse=urllib2.urlopen(request)
except:
checkresponse=urllib2.urlopen(request)
logincheck=json.loads(checkresponse.read())
if logincheck["data"].has_key("loginCheck") and logincheck["data"]["loginCheck"]=='Y':
print '用户名及密码正确'
break
else:
print "不好意思,您用户名密码输入有误,请重新输入,如果输错次数超过4次,用户将被锁定"
userpass=self.inputuserandpasswd()
continue
request2=urllib2.Request(self.loginurl,headers=self.accessHeaders)
loginresponse=urllib2.urlopen(request2)
html=loginresponse.read()
# file=open('login.html','wb')
# file.write(html)
# file.close()
soup=BeautifulSoup(html,'lxml')
loginstatus=str(soup.p)
if loginstatus[3:18]=='欢迎您登录':
return 'LoginSuccess'
def Query(self,FromST,ToST,Startdate):
checkqueryurl=TrainQuery.CreatecheckqueryUrl(FromST,ToST,Startdate)
queryurl=TrainQuery.CreateUrl(FromST,ToST,Startdate)
chechqueryresult=TrainQuery.GetTrainNumDate(checkqueryurl)
#print chechqueryresult
for i in range(3):
trainnum=TrainQuery.GetTrainNumDate(queryurl)
if trainnum!='GetError':
trainnuminfo=TrainQuery.GetNumDict(trainnum)#查询车次信息,返回整个车次信息所有内容
return trainnuminfo
else:
print "网络慢,请稍等。。。"
else:
print "无法获取到车次信息"
def PrintNumInfo(self,traininfo): #打印出车次信息,返回车次预订检测data
print "预订号"+"|"+" 车次号 "+"|"+" 出发站 "+"|"+" 到达站 "+"|"+"出发时间"+"|"+"到达时间"+"|"+"商务座"+"|"+"特等座"+"|"+"一等座"+"|"+"二等座"+"|"+"高级软卧"+"|"+"软卧"+"|"+"硬卧"+"|"+"软座"+"|"+"硬座"+"|"+"无座"+"|"+"预定状态"
count=0
TrainInfodict={}
for Num in traininfo:
count+=1
NumInfo=Num["queryLeftNewDTO"]
secretStr=(Num["secretStr"])
TraNum=(NumInfo["station_train_code"])
From=(NumInfo["from_station_name"])
To=(NumInfo["to_station_name"])
starttime=(NumInfo["start_time"])
arrivetime=(NumInfo["arrive_time"])
swz=(NumInfo["swz_num"])
tz=(NumInfo["tz_num"])
zy=(NumInfo["zy_num"])
ze=(NumInfo["ze_num"])
gr=(NumInfo["gr_num"])
rw=(NumInfo["rw_num"])
yw=(NumInfo["yw_num"])
rz=(NumInfo["rz_num"])
yz=(NumInfo["yz_num"])
wz=(NumInfo["wz_num"])
startdate=str(NumInfo['start_train_date'])
traindate=startdate[0:4]+'-'+startdate[4:6]+'-'+startdate[6:8]
dateformat='%Y-%m-%d'
backdate=time.strftime(dateformat, time.localtime())
#postdict={"secretStr":str(secretStr),"train_date":str(traindate),"back_train_date":str(backdate),"tour_flag":"dc","purpose_codes":"ADULT","query_from_station_name":From,"query_to_station_name":To,"undefined":""}
#postdata=json.dumps(postdict)
TrainInfodict[count]=Num
#print (str(count)).ljust(5),'|',TraNum.ljust(6),'|',From.center(6),'|',To.center(6),'|',starttime.center(6),'|',arrivetime.center(6),'|',swz.center(4),'|',tz.center(4),'|',zy.center(4),'|',ze.center(4),'|',gr.center(6),'|',rw.center(2)
print '-------------------------------------------------------------------------------------------'
print "%-6d|%-8s|%-6s|%-7s|%-8s|%-8s|%-6s|%-6s|%-6s|%-6s|%-8s|%-4s|%-4s|%-4s|%-4s|%-4s|%-8s" % (count,TraNum,From,To,starttime,arrivetime,swz,tz,zy,ze,gr,rw,yw,rz,yz,wz,Num["buttonTextInfo"])
return TrainInfodict
def GetSubmitorderdata(self,Num):
NumInfo=Num["queryLeftNewDTO"]
secretStr=(Num["secretStr"])
From=(NumInfo["from_station_name"])
To=(NumInfo["to_station_name"])
startdate=str(NumInfo['start_train_date'])
traindate=startdate[0:4]+'-'+startdate[4:6]+'-'+startdate[6:8]
dateformat='%Y-%m-%d'
backdate=time.strftime(dateformat, time.localtime())
#postdict={"secretStr":str(secretStr),"train_date":str(traindate),"back_train_date":str(backdate),"tour_flag":"dc","purpose_codes":"ADULT","query_from_station_name":From,"query_to_station_name":To,"undefined":""}
postdata="secretStr"+'='+str(secretStr)+'&'+"train_date"+'='+str(traindate)+'&'+"back_train_date"+'='+str(backdate)+"&"+"tour_flag"+"="+"dc"+"&"+"purpose_codes"+"="+"ADULT"+"&"+"query_from_station_name"+"="+str(From)+"&"+"query_to_station_name"+"="+str(To)+"&"+"undefined"
#postdata=json.dumps(postdict)
return postdata
def GetRequToken(self,checkresult):
token= re.search(r'var globalRepeatSubmitToken = \'(.*?)\'\;',checkresult,re.DOTALL)
return token.group(1)
def GetSeatType(self,checkresult):
seat_type=re.search(r'var init_seatTypes\=\[\{(.*?)\}\]\;',checkresult,re.DOTALL)
#print checkresult
try:
SeatTypeStr=seat_type.group(1)
except:
print '获取坐席失败'
self.LoginOut()
SeatTypelist=SeatTypeStr.split('},{')
SeatTypeDict={}
count=0
for seat in SeatTypelist:
count+=1
seatjsonstr='{'+seat+'}'
JsonStr = seatjsonstr.replace("'","\"")
jsonstr1=JsonStr.replace("null","\"null\"")
jsonstr=json.loads(jsonstr1)
SeatTypeDict[str(count)]=jsonstr['value']
for k,v in SeatTypeDict.items():
print k,'.',v
return SeatTypeDict
def BookingCheckCode(self,token):
while True:
for i in range(3):
getimagestatus=LoginRun.GetCodeImage(self.getpassengerHeaders,self.BookingImageurl)
if getimagestatus=='GetImageSuccess':
break
else:
pass
img=Image.open('code.jpg')
img.show()
img.close()
print "验证码类型:1.八码 2.十八码"
sixoreitht=raw_input('您输入的验证码类型是:')
if sixoreitht=='1':
codedict=self.sixcodedict
img=Image.open('code.jpg')
font=ImageFont.truetype('arial.ttf',size=30)
draw = ImageDraw.Draw(img)
draw.text((55, 55),"1",(255,0,0),font=font)
draw.text((125, 55),"2",(255,0,0),font=font)
draw.text((200, 55),"3",(255,0,0),font=font)
draw.text((270, 55),"4",(255,0,0),font=font)
draw.text((55, 120),"5",(255,0,0),font=font)
draw.text((125, 120),"6",(255,0,0),font=font)
draw.text((200, 120),"7",(255,0,0),font=font)
draw.text((270, 120),"8",(255,0,0),font=font)
img.show()
img.close()
else:
codedict=self.eighteencodedict
font=ImageFont.truetype('arial.ttf',size=20)
img=Image.open('code.jpg')
draw = ImageDraw.Draw(img)
draw.text((45, 44),"a",(255,0,0),font=font)
draw.text((95, 44),"b",(255,0,0),font=font)
draw.text((140, 44),"c",(255,0,0),font=font)
draw.text((185, 44),"d",(255,0,0),font=font)
draw.text((235, 44),"e",(255,0,0),font=font)
draw.text((280, 44),"f",(255,0,0),font=font)
draw.text((44, 95),"g",(255,0,0),font=font)
draw.text((95, 95),"h",(255,0,0),font=font)
draw.text((140, 95),"i",(255,0,0),font=font)
draw.text((185, 95),"j",(255,0,0),font=font)
draw.text((235, 95),"k",(255,0,0),font=font)
draw.text((280, 95),"l",(255,0,0),font=font)
draw.text((44, 140),"m",(255,0,0),font=font)
draw.text((95, 140),"n",(255,0,0),font=font)
draw.text((140, 140),"o",(255,0,0),font=font)
draw.text((185, 140),"p",(255,0,0),font=font)
draw.text((235, 140),"q",(255,0,0),font=font)
draw.text((280, 140),"r",(255,0,0),font=font)
img.show()
img.close()
codestr=''
Input=raw_input('请输入验证码:')
for i in Input:
codestr=codestr + codedict[i]+','
coderesult=codestr[:-1]
data={"randCode":str(coderesult),"rand":'randp',"REPEAT_SUBMIT_TOKEN":str(token),"_json_att":""}
data=urllib.urlencode(data)
codestrcookies=LoginRun.PostLoginInfo(self.getpassengerHeaders,data)
if codestrcookies!='checkcodeFalse':
print '验证码输入正确'
print '接下来为您出票,可能需要一段时间,请稍等。。。。。'
return coderesult
else:
print "不好意思,验证码错误,请重试"
continue
def GetCheckOrderINfo(self,PassengerData,bookincodestr,seatstr,token):
PassengerName=PassengerData['passenger_name']
PassengerIdCode=PassengerData['passenger_id_type_code']
PassengerIdNum=PassengerData['passenger_id_no']
if PassengerData.has_key('mobile_no')==False:
PassengerMobile=''
else:
PassengerMobile=PassengerData['mobile_no']
passengerTicketStr=seatstr+','+'0,1,'+PassengerName+','+PassengerIdCode+','+PassengerIdNum+','+PassengerMobile+',N'
oldPassengerStr=PassengerName+','+PassengerIdCode+','+PassengerIdNum+',1_'
cancel_flag='2'
bed_level_order_num='000000000000000000000000000000'
tour_flag='dc'
data={"cancel_flag":cancel_flag,"bed_level_order_num":bed_level_order_num,"passengerTicketStr":passengerTicketStr,"oldPassengerStr":oldPassengerStr,"tour_flag":tour_flag,"randCode":bookincodestr,"REPEAT_SUBMIT_TOKEN":token,"_json_att":""}
usedata=json.dumps(data)
postdata=urllib.urlencode(data)
postdatalist=[]
postdatalist.append(usedata)
postdatalist.append(postdata)
return postdatalist
def GetCheckQueueCountInfo(self,startdate,checkresult,token,seatstr):
Weekdict={'1':"Mon",'2':"Tue","3":"Wed","4":"Thu","5":"Fri","6":"Sat","7":"Sun"}
Monthdict={"01":"Jan","02":"Feb","03":"Mar","04":"Apr","05":"May","06":"Jun","07":"Jul","08":"Aug","09":"Sep","10":"Oct","11":"Nov","12":"Dec"}
year=startdate[0:4]
month=startdate[4:6]
day=startdate[6:8]
anyday=datetime.datetime(int(year),int(month),int(day)).strftime("%w")
weekday=Weekdict[str(anyday)]
monthstr=Monthdict[month]
train_date=weekday+" "+monthstr+" "+day+" "+year+" "+"00:00:00 GMT+0800 (中国标准时间)"
orderRequestDTO = re.search(r'var orderRequestDTO\=(.*?)\;',checkresult,re.DOTALL)
orderRequeststr=orderRequestDTO.group(1)
orderRequeststr = orderRequeststr.replace("'","\"")
orderRequeststrjson=orderRequeststr.replace("null","\"null\"")
orderRequestDTOdict=json.loads(orderRequeststrjson)
train_num=orderRequestDTOdict['train_no']
stationTrainCode=orderRequestDTOdict['station_train_code']
fromStationTelecode=orderRequestDTOdict['from_station_telecode']
tostationtelecode=orderRequestDTOdict['to_station_telecode']
InfoForPassenger= re.search(r'var ticketInfoForPassengerForm\=(.*?)\;',checkresult,re.DOTALL)
InfoForPassengerstr=InfoForPassenger.group(1)
InfoForPassengerstr = InfoForPassengerstr.replace("'","\"")
InfoForPassengerjson=InfoForPassengerstr.replace("null","\"null\"")
InfoForPassengerdict=json.loads(InfoForPassengerjson)
leftTicket=InfoForPassengerdict['leftTicketStr']
purpose_codes=InfoForPassengerdict['purpose_codes']
data={"train_date":train_date,"train_no":train_num,"stationTrainCode":stationTrainCode,"seatType":seatstr,"fromStationTelecode":fromStationTelecode,"toStationTelecode":tostationtelecode,"leftTicket":leftTicket,"purpose_codes":purpose_codes,"REPEAT_SUBMIT_TOKEN":token,"_json_att":""}
#postdata=json.dumps(data)
postdata=urllib.urlencode(data)
return postdata
def GetconfirmSingleinfo(self,CheckOrderINfo,checkresult,token):
InfoForPassenger= re.search(r'var ticketInfoForPassengerForm\=(.*?)\;',checkresult,re.DOTALL)
InfoForPassengerstr=InfoForPassenger.group(1)
InfoForPassengerstr = InfoForPassengerstr.replace("'","\"")
InfoForPassengerjson=InfoForPassengerstr.replace("null","\"null\"")
InfoForPassengerdict=json.loads(InfoForPassengerjson)
leftTicketstr=InfoForPassengerdict['leftTicketStr']
key_check_isChange=InfoForPassengerdict['key_check_isChange']
train_location=InfoForPassengerdict['train_location']
purpose_codes=InfoForPassengerdict['purpose_codes']
CheckOrderINfodict=json.loads(CheckOrderINfo[0])
passengerTicketStr=CheckOrderINfodict['passengerTicketStr']
oldPassengerStr=CheckOrderINfodict['oldPassengerStr']
randCode=CheckOrderINfodict['randCode']
roomType='00'
dwAll='N'
data={"passengerTicketStr":passengerTicketStr,"oldPassengerStr":oldPassengerStr,"randCode":randCode,"key_check_isChange":key_check_isChange,"leftTicketStr":leftTicketstr,"train_location":train_location,"roomType":roomType,"purpose_codes":purpose_codes,"REPEAT_SUBMIT_TOKEN":token,"dwAll":dwAll,"_json_att":""}
#postdata=json.dumps(data)
postdata=urllib.urlencode(data)
return postdata
def PrintOrderResult(self,resp):
PayOrderInfostr= re.search(r'var parOrderDTOJson \= (.*?)\}\'\;',resp,re.DOTALL)
PayOrderstr=PayOrderInfostr.group(1)
PayOrderstr=PayOrderstr+"}'"
PayOrderstr = PayOrderstr.replace("'","\"")
PayOrderjson=str(PayOrderstr.replace("null","\"null\""))
infojson=json.loads(PayOrderjson)
PayOrderInfo=json.loads(infojson)
Payorderlist=PayOrderInfo['orders'][0]
getticketnum=Payorderlist['sequence_no'] #取票号
totalprice=Payorderlist['ticket_total_price_page'] #票价
fromstation=(Payorderlist['tickets'][0])['stationTrainDTO']['from_station_name'] #出发站
tostation=(Payorderlist['tickets'][0])['stationTrainDTO']['to_station_name'] #到达站
trainnum=(Payorderlist['tickets'][0])['stationTrainDTO']['station_train_code'] #车次号
passengername=(Payorderlist['tickets'][0])['passengerDTO']['passenger_name'] #乘客姓名
traincoachnum=(Payorderlist['tickets'][0])['coach_name'] #车厢号
seatnum=(Payorderlist['tickets'][0])['seat_name'] #座位号
Seattype=(Payorderlist['tickets'][0])['seat_type_name'] #席别类型
lostpaytime=(Payorderlist['tickets'][0])['lose_time'] #支付截止时间
print '您好,恭喜您已成功为%s预订%s至%s的%s次列车,您的坐席为%s,坐席位置在%s号车厢%s座位,票价为%s元' % (passengername,fromstation,tostation,trainnum,Seattype,traincoachnum,seatnum,totalprice)
print '请您在%s之前完成支付,过期作废,完成支付后您的取票号为%s,请牢记!' % (lostpaytime,getticketnum)
def ShopRun(self):
print "欢迎您来到Paul哥的火车票服务中心,请问您需要什么服务?"
print "1.火车票服务 2.跟老刘去大保健 3.跟油哥学洞玄子三十六散手"
ServiceNum=input('请输入服务数字:')
if ServiceNum==1:
Loginstatus=self.login()
if Loginstatus=='LoginSuccess':
print '恭喜您登陆成功,请选择您需要的服务:'
print "1.车票预订 2.车次查询 3.余票查询 4.退票 5.改签 6.东莞直通车"
service=raw_input('请输入服务数字:')
if service=='1':
FromST=raw_input('请输入您的出发站:')
ToST=raw_input('请输入您的到达站:')
Startdate=raw_input('请输入您的出发日期(8位数字):')
queryinfo=self.Query(FromST,ToST,Startdate)
bookingdata=self.PrintNumInfo(queryinfo)
bookingnum=int(raw_input('请选择您要预订的车次:'))
checkdatastr=bookingdata[bookingnum]
checkdata=self.GetSubmitorderdata(checkdatastr)
for i in range(3):
checkresult=SeatBooking.BookingCheck(checkdata)
if checkresult!='BookingCheckError':
break
else:
continue
else:
print '连续三次都获取失败,不是我程序问题,是网络问题哦!'
token=self.GetRequToken(checkresult)
for i in range(3):
Passinfo=SeatBooking.GetPassengerjson(token)
if Passinfo!='GetPassengerError':
break
else:
continue
else:
print '连续三次都获取失败,不是我程序问题,是网络问题哦!'
passinfodict=SeatBooking.ChoicePassenger(Passinfo)
choicePassenger=raw_input('请选择乘客:')
PassengerData=passinfodict[choicePassenger] #乘客的信息
SeatTypedict=self.GetSeatType(checkresult)
choiceSeatType=raw_input('请选择席别:')
SeatType=SeatTypedict[str(choiceSeatType)]
SeatTypeStr=self.FullSeatTypeDict[str(SeatType)] #座位编码
bookincodestr=self.BookingCheckCode(token) #扣位验证码字符
CheckOrderINfo=self.GetCheckOrderINfo(PassengerData,bookincodestr,SeatTypeStr,token)
getQueueCountInfo=self.GetCheckQueueCountInfo(Startdate,checkresult,token,SeatTypeStr)
confirmSingleInfo=self.GetconfirmSingleinfo(CheckOrderINfo,checkresult,token)
checkorderresult=SeatBooking.CheckOrderInfo(CheckOrderINfo[1])
#print checkorderresult
getQueueCountresult=SeatBooking.GetQueueCount(getQueueCountInfo)
#print getQueueCountresult
confirmSingleresult=SeatBooking.confirmSingleForQueue(confirmSingleInfo)
#print confirmSingleresult
for i in range(10):
queryorderid=SeatBooking.GetqueryOrderWaitTime(token)
if queryorderid==None or queryorderid=='orderIdnull' or queryorderid=='GetOrderIdError':
time.sleep(1)
continue
else:
break
else:
print '十次都没获取到orderid,我也没办法了,下次吧,拜拜!'
self.LoginOut()
resultdata={"orderSequence_no":queryorderid,"REPEAT_SUBMIT_TOKEN":token,"_json_att":""}
resultorderdata=urllib.urlencode(resultdata)
GetResultOrderStatus=SeatBooking.ResultOrder(resultorderdata)
#print GetResultOrderStatus
Payorderdata={"REPEAT_SUBMIT_TOKEN":token,"_json_att":""}
if GetResultOrderStatus=='OrderRusultOK':
PayOrderInfo=SeatBooking.PayOrder(Payorderdata)
if PayOrderInfo!='GetPayOrderFailure':
self.PrintOrderResult(PayOrderInfo)
self.LoginOut()
if __name__=="__main__":
Service=ShopTicket()
Service.ShopRun()
|
#!/usr/bin/env python
"""
wrapper for JSLint using Spidermonkey engine
TODO:
* support for JSLint options
"""
import os
from subprocess import Popen, PIPE
try:
from json import loads as json
except ImportError:
from simplejson import loads as json
try:
from pkg_resources import resource_filename
except ImportError:
from jslint.util import resource_filename
DEPENDENCIES = [resource_filename("jslint", filename) for filename in
["fulljslint.js", "json2.js", "lintwrapper.js"]]
# XXX: JSON support built in from Spidermonkey 1.8
def lint(filepath):
"""
check given file using JSLint (via Spidermonkey)
"""
options = {} # TODO: read from argument
command = ["js"]
for filename in DEPENDENCIES:
command.extend(["-f", filename])
source = open(filepath)
errors = Popen(command, stdin=source, stdout=PIPE).communicate()[0]
# XXX: errors incomplete (e.g. not reporting missing var)!?
source.close()
return json(errors)
def format(errors, filepath):
"""
convert JSLint errors object into report using standard error format
<filepath>:<line>:<column>:<message>
"""
lines = (":".join([
filepath,
str(error["line"] + 1),
str(error["character"] + 1),
error["reason"]
]) for error in errors if error)
# XXX: ignoring members id, evidence, raw, a, b, c, d
return "\n".join(lines)
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.products_view, name="products"),
path('create_product/', views.product_create_view, name='create_product'),
path('create_categorie/', views.categorie_create_view, name='create_categorie'),
] |
import time
from selenium import webdriver
validateText = "Haroon"
driver = webdriver.Chrome(executable_path="C:\\Users\\Haroon\\Downloads\\chromedriver.exe")
driver.get("https://rahulshettyacademy.com/AutomationPractice/")
driver.maximize_window()
checkboxes = driver.find_elements_by_xpath("//input[@type='checkbox']")
print(len(checkboxes))
for checkbox in checkboxes:
if checkbox.get_attribute("value") == "option2":
checkbox.click()
assert checkbox.is_selected()
if checkbox.get_attribute("value") == "option1":
checkbox.click()
assert checkbox.is_selected()
radiobuttons = driver.find_elements_by_name("radioButton")
radiobuttons[1].click()
assert radiobuttons[1].is_selected()
time.sleep(3)
driver.find_element_by_xpath("//input[@id='name']").send_keys(validateText)
driver.find_element_by_id("alertbtn").click()
alert = driver.switch_to.alert
alertText = alert.text
assert validateText in alertText
# print(alert.text)
alert.accept()
time.sleep(2)
driver.find_element_by_id("confirmbtn").click()
alert = driver.switch_to.alert
alertText = alert.text
alert.dismiss()
time.sleep(2)
assert driver.find_element_by_id("displayed-text").is_displayed()
driver.find_element_by_id("hide-textbox").click()
time.sleep(2)
print(driver.find_element_by_id("displayed-text").is_displayed()) # return False
time.sleep(3)
print(driver.find_element_by_id("show-textbox").is_selected()) # return False
time.sleep(3)
driver.close()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Bob'
import os,sys,platform
if platform.system() == 'Windows':
BASE_DIR = '\\'.join(os.path.abspath(os.path.dirname(__file__)).split('\\')[:-1])
else:
BASE_DIR = '/'.join(os.path.realpath(__file__).split('/')[:-2])
sys.path.append(BASE_DIR)
from core import HouseStark
if __name__ == '__main__':
HouseStark.ArgvHandle(sys.argv) |
class UserModel(Tower):
@model_property
def inference(self):
# Create some wrappers for simplicity
def conv2d(x, W, b, s, name, padding='SAME'):
# Conv2D wrapper, with bias and relu activation
x = tf.nn.conv2d(x, W, strides=[1, s, s, 1], padding=padding, name=name + '_conv2d')
x = tf.nn.bias_add(x, b, name=name + 'bias_add')
return tf.nn.relu(x)
def maxpool2d(x, k, s, name, padding='VALID'):
# MaxPool2D wrapper
return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, s, s, 1], padding=padding, name=name + '_maxpool2d')
# Create model
def conv_net(x, weights, biases):
# scale (divide by MNIST std)
x = x * 0.0125
# Convolution Layer
conv1 = conv2d(x, weights['wc1'], biases['bc1'], s=1, name='CONV1', padding='VALID')
# Max Pooling (down-sampling)
conv1 = maxpool2d(conv1, k=2, s=2, name='CONV1', padding='VALID')
# Convolution Layer
conv2 = conv2d(conv1, weights['wc2'], biases['bc2'], s=1, name='CONV2', padding='VALID')
# Max Pooling (down-sampling)
conv2 = maxpool2d(conv2, k=2, s=2, name='CONV2', padding='VALID')
# Fully connected layer
# Reshape conv2 output to fit fully connected layer input
fc1 = tf.reshape(conv2, [-1, weights['wd1'].get_shape().as_list()[0]], name="FC1_reshape")
fc1 = tf.add(tf.matmul(fc1, weights['wd1'], name='FC1_multi'), biases['bd1'], name='FC1_add')
fc1 = tf.nn.relu(fc1, name='FC1_relu')
# Apply Dropout
if self.is_training:
fc1 = tf.nn.dropout(fc1, 0.5, name='FC1_drop')
# Output, class prediction
out = tf.add(tf.matmul(fc1, weights['out'], name='OUT_multi'), biases['out'], name='OUT_add')
true_out = tf.add(tf.matmul(out, weights['true_out'], name='OUT_multi'), biases['true_out'], name='TRUE_OUT_add')
return true_out
# Store layers weight & bias
weights = {
# 5x5 conv, 1 input, 20 outputs
'wc1': tf.get_variable('wc1', [5, 5, self.input_shape[2], 20], initializer=tf.contrib.layers.xavier_initializer()),
# 5x5 conv, 20 inputs, 50 outputs
'wc2': tf.get_variable('wc2', [5, 5, 20, 50], initializer=tf.contrib.layers.xavier_initializer()),
# fully connected, 4*4*16=800 inputs, 500 outputs
'wd1': tf.get_variable('wd1', [4*4*50, 500], initializer=tf.contrib.layers.xavier_initializer()),
# 500 inputs, 10 outputs (class prediction)
'out': tf.get_variable('wout_not_in_use', [500, 10], initializer=tf.contrib.layers.xavier_initializer()),
# adjust from 10 classes to 2 output
'true_out': tf.get_variable('twout', [10, 2], initializer=tf.contrib.layers.xavier_initializer())
}
self.weights = weights
# Leave the intial biases zero
biases = {
'bc1': tf.get_variable('bc1', [20], initializer=tf.constant_initializer(0.0)),
'bc2': tf.get_variable('bc2', [50], initializer=tf.constant_initializer(0.0)),
'bd1': tf.get_variable('bd1', [500], initializer=tf.constant_initializer(0.0)),
'out': tf.get_variable('bout_not_in_use', [10], initializer=tf.constant_initializer(0.0)),
'true_out': tf.get_variable('tbout', [2], initializer=tf.constant_initializer(0.0))
}
self.biases = biases
model = conv_net(self.x, weights, biases)
return model
@model_property
def loss(self):
loss = digits.classification_loss(self.inference, self.y)
accuracy = digits.classification_accuracy(self.inference, self.y)
self.summaries.append(tf.summary.scalar(accuracy.op.name, accuracy))
return loss
|
from dazer_methods import Dazer
from collections import OrderedDict
from numpy import empty, random, median, percentile, array, linspace, zeros, std
from scipy import stats
from uncertainties import ufloat
from uncertainties.unumpy import nominal_values, std_devs
from lib.CodeTools.sigfig import round_sig
from lib.Math_Libraries.bces_script import bces, bcesboot
from pandas import DataFrame
from lmfit import Model, Parameters
from scipy.optimize import curve_fit
from kapteyn import kmpfit
import statsmodels.formula.api as smf
def linear_model(x, m, n):
return m*x+n
def residuals_lin(p, c):
x, y = c
m, n = p
return (y - linear_model(x, m, n))
def linear_model2(x, mA, mB, n):
return mA*x[0] + mB*x[1] + n
def linear_model3(x, mA, mB, mC, n):
return mA*x[0] + mB*x[1] + mC*x[2] + n
def residuals_lin2(p, c):
x, y = c
mA, mB, n = p
return (y - linear_model2(x, mA, mB, n))
def residuals_lin3(p, c):
x, y = c
mA, mB, mC, n = p
return (y - linear_model3(x, mA, mB, mC, n))
return mA*x[0] + mB*x[1] + mC*x[2] + n
def latex_float(f):
float_str = "{0:.2g}".format(f)
if "e" in float_str:
base, exponent = float_str.split("e")
return r"10^{{{}}}".format(int(exponent))
else:
return float_str
#Create class object
dz = Dazer()
script_code = dz.get_script_code()
#Define plot frame and colors
size_dict = {'figure.figsize':(18, 8), 'axes.labelsize':38, 'legend.fontsize':28, 'font.family':'Times New Roman', 'mathtext.default':'regular', 'xtick.labelsize':34, 'ytick.labelsize':34}
dz.FigConf(plotSize = size_dict)
#Load catalogue dataframe
catalogue_dict = dz.import_catalogue()
catalogue_df = dz.load_excel_DF('/home/vital/Dropbox/Astrophysics/Data/WHT_observations/WHT_Galaxies_properties.xlsx')
dz.quick_indexing(catalogue_df)
#Regressions properties
MC_iterations = 10000
WMAP_coordinates = array([ufloat(0.0, 0.0), ufloat(0.24709, 0.00025)])
#Generate the indeces
Regresions_dict = OrderedDict()
Regresions_dict['Regressions'] = ['Oxygen object abundance', 'Nitrogen object abundance', 'Sulfur object abundance']
Regresions_dict['metal x axis'] = ['OI_HI_emis2nd', 'NI_HI_emis2nd', 'SI_HI_emis2nd']
Regresions_dict['helium y axis'] = ['Ymass_O_emis2nd', 'Ymass_O_emis2nd', 'Ymass_S_emis2nd']
Regresions_dict['element'] = ['O', 'N', 'S']
Regresions_dict['factor'] = [1e5, 1e6, 1e6]
Regresions_dict['title'] = ['Helium mass fraction versus oxygen abundance', 'Helium mass fraction versus nitrogen abundance', 'Helium mass fraction versus sulfur abundance']
Regresions_dict['x label'] = [r'$\frac{{O}}{{H}}$ $({})$'.format(latex_float(Regresions_dict['factor'][0])), r'$\frac{{N}}{{H}}$ $({})$'.format(latex_float(Regresions_dict['factor'][1])), r'$\frac{{S}}{{H}}$ $({})$'.format(latex_float(Regresions_dict['factor'][2]))]
Regresions_dict['y label'] = [r'$Y_{\frac{O}{H}}$', r'$Y_{\frac{O}{H}}$', r'$Y_{\frac{S}{H}}$']
Regresions_dict['Colors'] = [dz.colorVector['green'], dz.colorVector['dark blue'], dz.colorVector['orangish']]
Regresions_list = [['O', 'N'], ['O', 'S'], ['N', 'S'], ['O', 'N', 'S']]
#Additional regression label
regr_dict = {'0': ['y|x'], '3': ['Orthogonal']}
lmod = Model(linear_model)
#Dict with regression methods
method_dict = {'lm2': linear_model2, 'lm3': linear_model3, 'rlm2': residuals_lin2, 'rlm3': residuals_lin3}
p0 = array([0.005, 0.25])
p0_c = array([0.005, 0.005, 0.25])
#Folder to save the plots
output_folder = '/home/vital/Dropbox/Astrophysics/Papers/Yp_AlternativeMethods/images/'
#Dictionary to store the the data from the table (we are going to use kapteyn)
regr_dict = OrderedDict()
regr_dict['$Y_{P,\,O}$'] = [0,0,0]
regr_dict['$Y_{P,\,N}$'] = [0,0,0]
regr_dict['$Y_{P,\,S}$'] = [0,0,0]
regr_dict['$Y_{P,\,O-N}$'] = [0,0,0]
regr_dict['$Y_{P,\,O-S}$'] = [0,0,0]
regr_dict['$Y_{P,\,N-S}$'] = [0,0,0]
regr_dict['$Y_{P,\,O-N-S}$'] = [0,0,0]
regr_dict['$Y_{P,\,O-N-S}$'] = [0,0,0]
exter_regr_dict = OrderedDict()
exter_regr_dict['$Y_{P,\,O}^{1}$'] = [0.2446,0.0029,'5'] #Peimbert
exter_regr_dict['$Y_{P,\,O}^{2}$'] = [0.2449,0.0040,'15'] #Aver
exter_regr_dict['$Y_{P,\,O}^{3}$'] = [0.2551,0.0022,'28'] #Izotov
exter_regr_dict['$Y_{P,\,Planck BBN}^{4}$'] = [0.24467,0.00020,'-']
arguments_coords = {'xycoords':'data', 'textcoords':'data', 'arrowprops':dict(arrowstyle='->'), 'horizontalalignment':'right', 'verticalalignment':'top'}
#Perform linear regressions with plots
for i in range(len(Regresions_dict['Regressions'])):
#Get right regression values properties
element = Regresions_dict['element'][i]
regression = Regresions_dict['Regressions'][i]
metal_x = Regresions_dict['metal x axis'][i]
helium_y = Regresions_dict['helium y axis'][i]
reject_idx = catalogue_df[element + '_valid'].isnull()
reject_objs = catalogue_df[element + '_valid'][~reject_idx].index.values
#Get objects which meet regression conditions
idces_metal = (catalogue_df[metal_x].notnull()) & (catalogue_df[helium_y].notnull()) & (~catalogue_df.index.isin(reject_objs))
objects = catalogue_df.loc[idces_metal].index.values
x = catalogue_df.loc[idces_metal, metal_x].values * Regresions_dict['factor'][i]
y = catalogue_df.loc[idces_metal, helium_y].values
quick_ref = catalogue_df.loc[idces_metal].quick_index
print 'Doing element', element
for idx in range(len(x)):
print objects[idx], x[idx].nominal_value, x[idx].std_dev, y[idx].nominal_value, y[idx].std_dev
#Get the data for the excees N/O objects
NO_excess_idcs = ((catalogue_df[element + '_valid'] == 'NO_excess') | (catalogue_df[element + '_valid'] == 'ignored')) & (catalogue_df[metal_x].notnull()) & (catalogue_df[helium_y].notnull())
x_NO = catalogue_df.loc[NO_excess_idcs, metal_x].values * Regresions_dict['factor'][i]
y_NO = catalogue_df.loc[NO_excess_idcs, helium_y].values
quickref_NO = catalogue_df.loc[NO_excess_idcs].quick_index
print '--Doing regression', Regresions_dict['element'][i]
print '--- Using these objs {}:'.format(len(objects)), ', '.join(list(objects))
print '--- Estos no me gustan {}:'.format(len(reject_objs)), ', '.join(list(reject_objs))
#Create containers
metal_matrix = empty((len(objects), MC_iterations))
Y_matrix = empty((len(objects), MC_iterations))
m_vector, n_vector = empty(MC_iterations), empty(MC_iterations)
m_vectorlmfit = empty(MC_iterations)
n_vectorlmfit = empty(MC_iterations)
lmfit_matrix = empty([2, MC_iterations])
lmfit_error = empty([2, MC_iterations])
curvefit_matrix = empty([2, MC_iterations])
kapteyn_matrix = empty([2, MC_iterations])
#Generate the distributions
for j in range(len(objects)):
metal_matrix[j,:] = random.normal(x[j].nominal_value, x[j].std_dev, size = MC_iterations)
Y_matrix[j,:] = random.normal(y[j].nominal_value, y[j].std_dev, size = MC_iterations)
#Run the fits
for k in range(MC_iterations):
x_i = metal_matrix[:,k]
y_i = Y_matrix[:,k]
m, n, r_value, p_value, std_err = stats.linregress(x_i, y_i)
m_vector[k], n_vector[k] = m, n
#Lmfit
result_lmfit = lmod.fit(y_i, x=x_i, m=0.005, n=0.24709)
lmfit_matrix[:,k] = array(result_lmfit.params.valuesdict().values())
lmfit_error[:,k] = array([result_lmfit.params['m'].stderr, result_lmfit.params['n'].stderr])
#Curvefit
best_vals, covar = curve_fit(linear_model, x_i, y_i, p0=p0)
curvefit_matrix[:,k] = best_vals
#kapteyn
fitobj = kmpfit.Fitter(residuals=residuals_lin, data= (x_i, y_i))
fitobj.fit(params0 = p0)
kapteyn_matrix[:,k] = fitobj.params
#Get fit mean values
n_Median, n_16th, n_84th = median(n_vector), percentile(n_vector,16), percentile(n_vector,84)
m_Median, m_16th, m_84th = median(m_vector), percentile(m_vector,16), percentile(m_vector,84)
m_Median_lm, m_16th_lm, m_84th_lm = median(lmfit_matrix[0,:]), percentile(lmfit_matrix[0,:],16), percentile(lmfit_matrix[0,:],84)
n_Median_lm, n_16th_lm, n_84th_lm = median(lmfit_matrix[1,:]), percentile(lmfit_matrix[1,:],16), percentile(lmfit_matrix[1,:],84)
m_Median_lm_error, n_Median_lm_error = median(lmfit_error[0,:]), median(lmfit_error[1,:])
m_Median_cf, m_16th_cf, m_84th_cf = median(curvefit_matrix[0,:]), percentile(curvefit_matrix[0,:],16), percentile(curvefit_matrix[0,:],84)
n_Median_cf, n_16th_cf, n_84th_cf = median(curvefit_matrix[1,:]), percentile(curvefit_matrix[1,:],16), percentile(curvefit_matrix[1,:],84)
m_Median_kp, m_16th_kp, m_84th_kp = median(kapteyn_matrix[0,:]), percentile(kapteyn_matrix[0,:],16), percentile(kapteyn_matrix[0,:],84)
n_Median_kp, n_16th_kp, n_84th_kp = median(kapteyn_matrix[1,:]), percentile(kapteyn_matrix[1,:],16), percentile(kapteyn_matrix[1,:],84)
#Bootstrap BCES
m, n, m_err, n_err, cov = bcesboot(nominal_values(x), std_devs(x), nominal_values(y), std_devs(y), cerr=zeros(len(x)), nsim=10000)
print 'BCES y dependent'
print 'n',n[0], n_err[0]
#print 'm',m[0], m_err[0]
print 'BCES Orthogonal'
print 'n',n[3], n_err[3]
#print 'm',m[3], m_err[3]
print 'Stats lingress'
print 'n', n_Median, n_Median-n_16th, n_84th-n_Median
#print 'm', m_Median, m_Median-m_16th, m_84th-m_Median
print 'Lmfit'
print 'n', n_Median_lm, n_Median_lm-n_16th_lm, n_84th_lm-n_Median_lm
#print 'm', m_Median_lm, m_Median_lm-m_16th_lm, m_84th_lm-m_Median_lm
print 'curvefit'
print 'n', n_Median_cf, n_Median_cf-n_16th_cf, n_84th_cf-n_Median_cf
#print 'm', m_Median_cf, m_Median_cf-m_16th_cf, m_84th_cf-m_Median_cf
print 'kapteyn'
print 'n', n_Median_kp, n_Median_kp-n_16th_kp, n_84th_kp-n_Median_kp, '\n'
#print 'm', m_Median_kp, m_Median_kp-m_16th_kp, m_84th_kp-m_Median_kp
#Saving the data
entry_key = r'$Y_{{P,\,{elem}}}$'.format(elem=element)
regr_dict[entry_key][0] = median(kapteyn_matrix[1,:])
regr_dict[entry_key][1] = std(kapteyn_matrix[1,:])
regr_dict[entry_key][2] = len(objects)
#Linear data
x_regression_range = linspace(0.0, max(nominal_values(x)) * 1.10, 20)
y_regression_range = m_Median_cf * x_regression_range + n_Median_cf
label_regr = 'Linear fit'
#label_regr = 'SCIPY bootstrap: $Y_{{P}} = {n}_{{-{lowerlimit}}}^{{+{upperlimit}}}$'.format(title = Regresions_dict['title'][i], n = round_sig(n_Median,4, scien_notation=False), lowerlimit = round_sig(n_Median-n_16th,2, scien_notation=False), upperlimit = round_sig(n_84th-n_Median,2, scien_notation=False))
#Plotting the data,
label_regression = r'Plank prediction: $Y = 0.24709\pm0.00025$'
dz.data_plot(nominal_values(x), nominal_values(y), color = Regresions_dict['Colors'][i], label='HII galaxies included', markerstyle='o', x_error=std_devs(x), y_error=std_devs(y))
dz.data_plot(x_regression_range, y_regression_range, label = label_regr, color = Regresions_dict['Colors'][i], linestyle = '--')
#dz.plot_text(nominal_values(x), nominal_values(y), quick_ref)
#Plotting NO objects
dz.data_plot(nominal_values(x_NO), nominal_values(y_NO), color = Regresions_dict['Colors'][i], label='HII galaxies excluded', markerstyle='x', x_error=std_devs(x_NO), y_error=std_devs(y_NO), e_style=':')
#dz.plot_text(nominal_values(x_NO), nominal_values(y_NO), quickref_NO, y_pad=1.05)
# for ii in range(len(y_NO)):
# x_coord, y_coord = nominal_values(x_NO)[ii], nominal_values(y_NO)[ii]
# dz.Axis.annotate(s=quickref_NO[ii],xy=(x_coord, y_coord), xytext=(x_coord, y_coord*1.20), **arguments_coords)
if element != 'S':
for ii in range(len(y_NO)):
x_coord, y_coord = nominal_values(x_NO)[ii], nominal_values(y_NO)[ii]
dz.Axis.text(x_coord, y_coord, quickref_NO[ii], {'ha': 'left', 'va': 'bottom'}, rotation=65, fontsize=18)
else:
counter = 0
coords_sulfur = [3, 3.5, 3.25]
arrows_sulfur = [3.1, 3.60, 3.35]
for ii in range(len(y_NO)):
if quickref_NO[ii] in ['SHOC592','SHOC220', 'SHOC036']:
x_coord, y_coord = nominal_values(x_NO)[ii], nominal_values(y_NO)[ii]
dz.Axis.text(x_coord, y_coord, quickref_NO[ii], {'ha': 'left', 'va': 'bottom'}, rotation=65, fontsize=18)
elif quickref_NO[ii] in ['SHOC588', 'SHOC575', 'SHOC579']:
x_coord, y_coord = nominal_values(x_NO)[ii], nominal_values(y_NO)[ii]
# dz.Axis.annotate(quickref_NO[ii], xy=(x_coord, y_coord), xycoords='data',
# xytext=(coords_sulfur[counter], 0.35), textcoords='data',
# arrowprops=dict(arrowstyle="->", lw=1.5), rotation=65, fontsize=18)
dz.Axis.annotate('', xy=(x_coord, y_coord), xycoords='data',
xytext=(arrows_sulfur[counter], 0.308), textcoords='data',
arrowprops=dict(arrowstyle="->", lw=1.5), fontsize=18)
dz.Axis.annotate(quickref_NO[ii], xy=(x_coord, y_coord), xycoords='data',
xytext=(coords_sulfur[counter], 0.35), textcoords='data',
rotation=65, fontsize=18)
counter += 1
#Plot WMAP prediction
dz.data_plot(WMAP_coordinates[0].nominal_value, WMAP_coordinates[1].nominal_value, color = dz.colorVector['pink'], label='Planck prediction', markerstyle='o', x_error=WMAP_coordinates[0].std_dev, y_error=WMAP_coordinates[1].std_dev)
#plotTitle = r'{title}: $Y_{{P}} = {n}_{{-{lowerlimit}}}^{{+{upperlimit}}}$'.format(title = Regresions_dict['title'][i], n = round_sig(n_Median,4, scien_notation=False), lowerlimit = round_sig(n_Median-n_16th,2, scien_notation=False), upperlimit = round_sig(n_84th-n_Median,2, scien_notation=False))
dz.Axis.set_ylim(0.1,0.4)
dz.FigWording(Regresions_dict['x label'][i], Regresions_dict['y label'][i], '', loc='lower center', ncols_leg=2)
output_pickle = '{objFolder}{element}_regression_2nd'.format(objFolder=output_folder, element = element)
dz.save_manager(output_pickle, save_pickle = False)
# Combined regressions
for i in range(len(Regresions_list)):
regr_group = Regresions_list[i]
dim_group = len(regr_group)
ext_method = str(dim_group)
p0 = array([0.005] * dim_group + [0.25])
# Define lmfit model
params = Parameters()
for idx in range(dim_group):
params.add('m' + str(idx), value=0.005)
params.add('n', value=0.25)
# Loop through the elements valid objects
idcs = (catalogue_df['Ymass_O_emis2nd'].notnull())
for element in regr_group:
abunCode = '{}I_HI_emis2nd'.format(element)
valCode = '{}_valid'.format(element)
idcs = idcs & (catalogue_df[abunCode].notnull()) & (catalogue_df[valCode].isnull())
# Get data
data_dict = OrderedDict()
objects = catalogue_df.loc[idcs].index.values
data_dict['Y'] = catalogue_df.loc[idcs, 'Ymass_O_emis2nd'].values
for element in regr_group:
abunCode = '{}I_HI_emis2nd'.format(element)
data_dict[element] = catalogue_df.loc[idcs, abunCode].values
# Generate containers for the data
metal_matrix = empty((dim_group, len(objects), MC_iterations))
Y_matrix = empty((len(objects), MC_iterations))
lmfit_matrix = empty([dim_group + 1, MC_iterations])
lmfit_error = empty([dim_group + 1, MC_iterations])
curvefit_matrix = empty([dim_group + 1, MC_iterations])
kapteyn_matrix = empty([dim_group + 1, MC_iterations])
stats_matrix = empty([dim_group + 1, MC_iterations])
# Generate the distributions
for j in range(len(objects)):
Y_matrix[j, :] = random.normal(data_dict['Y'][j].nominal_value, data_dict['Y'][j].std_dev, size=MC_iterations)
for i_dim in range(dim_group):
element = regr_group[i_dim]
metal_matrix[i_dim, j, :] = random.normal(data_dict[element][j].nominal_value, data_dict[element][j].std_dev,
size=MC_iterations)
# Run the curvefit and kapteyn fit
formula_label = 'Y ~ ' + ' + '.join(regr_group)
for k in range(MC_iterations):
# Dictionary to store the current iteration
x_ith = metal_matrix[:, :, k]
y_ith = Y_matrix[:, k]
# Curvefit
best_vals, covar = curve_fit(method_dict['lm' + ext_method], x_ith, y_ith, p0=p0)
curvefit_matrix[:, k] = best_vals
# kapteyn
fitobj = kmpfit.Fitter(residuals=method_dict['rlm' + ext_method], data=(x_ith, y_ith))
fitobj.fit(params0=p0)
kapteyn_matrix[:, k] = fitobj.params
# Get fit mean values
idx_n = dim_group
n_median_lmerror = median(lmfit_matrix[idx_n, :])
n_Median_cf, n_16th_cf, n_84th_cf = median(curvefit_matrix[idx_n, :]), percentile(curvefit_matrix[idx_n, :],16), percentile(curvefit_matrix[idx_n, :], 84)
n_Median_kp, n_16th_kp, n_84th_kp = median(kapteyn_matrix[idx_n, :]), percentile(kapteyn_matrix[idx_n, :],16), percentile(kapteyn_matrix[idx_n, :], 84)
#Saving the data
if dim_group == 2:
entry_key = r'$Y_{{P,\,{elemA}-{elemB}}}$'.format(elemA=regr_group[0], elemB=regr_group[1])
regr_dict[entry_key][0] = median(kapteyn_matrix[idx_n, :])
regr_dict[entry_key][1] = std(kapteyn_matrix[idx_n, :])
regr_dict[entry_key][2] = int(len(objects))
elif dim_group == 3:
entry_key = r'$Y_{{P,\,{elemA}-{elemB}-{elemC}}}$'.format(elemA=regr_group[0], elemB=regr_group[1], elemC=regr_group[2])
regr_dict[entry_key][0] = median(kapteyn_matrix[idx_n, :])
regr_dict[entry_key][1] = std(kapteyn_matrix[idx_n, :])
regr_dict[entry_key][2] = int(len(objects))
# Display results
print '\n---Regression', Regresions_list[i], 'using {} objects'.format(len(objects))
print 'curvefit'
print n_Median_cf, n_84th_cf - n_Median_cf, n_Median_cf - n_16th_cf
print 'kapteyn'
print n_Median_kp, n_84th_kp - n_Median_kp, n_Median_kp - n_16th_kp
#Make the table
pdf_address = '/home/vital/Dropbox/Astrophysics/Papers/Yp_AlternativeMethods/tables/yp_determinations'
headers = ['Elemental regression', 'Magnitude', 'Number of objects']
dz.pdf_insert_table(headers)
# pdf_address = '/home/vital/Dropbox/Astrophysics/Papers/Yp_AlternativeMethods/tables/yp_determinations.pdf'
# dz.create_pdfDoc(pdf_address, pdf_type='table')
#
# headers = ['Elemental regression', 'Magnitude', 'Number of objects']
# dz.pdf_insert_table(headers)
last_key = regr_dict.keys()[-1]
for key in regr_dict:
magnitude_entry = r'${}\pm{}$'.format(round_sig(regr_dict[key][0],3,scien_notation=False),round_sig(regr_dict[key][1],1,scien_notation=False))
row = [key, magnitude_entry, str(int(regr_dict[key][2]))]
dz.addTableRow(row, last_row = False if last_key != last_key else True)
dz.table.add_hline()
for key in exter_regr_dict:
magnitude_entry = r'${}\pm{}$'.format(exter_regr_dict[key][0],exter_regr_dict[key][1])
row = [key, magnitude_entry, exter_regr_dict[key][2]]
dz.addTableRow(row, last_row = False if last_key != last_key else True)
dz.generate_pdf(output_address=pdf_address)
# dz.generate_pdf()
|
#!/usr/bin/env python
import pprint
import sys
import yaml
def get_yaml(file_name=None):
"""Basic function that returns a Python data structure from a YAML file.
Parameters: File name of YAML file
"""
if file_name is None:
raise ValueError("No YAML file passed to function!")
try:
return yaml.load_all(open(file_name))
except IOError:
print('Oops! Check the name of the YAML file passed as an argument.')
print('The file passed during execution does not appear to exist.')
sys.exit()
def main():
"""Main method to iterate over data structures imported from YAML files.
Parameters: None
"""
if len(sys.argv) != 2:
print('Oops! Might want to check your syntax!')
print('You should run this example like this:')
print('python {f} <NAME OF YAML FILE>'.format(f=__file__))
sys.exit()
else:
_YAML_FILE = sys.argv[1]
docs = get_yaml(_YAML_FILE)
for doc in docs:
print('Here is the Python data type:')
print(type(doc))
print('Here is the data:')
pprint.pprint(doc)
if __name__ == '__main__':
main()
|
from unittest import mock
import pytest
from antareslauncher.data_repo.idata_repo import IDataRepo
from antareslauncher.remote_environnement.remote_environment_with_slurm import (
RemoteEnvironmentWithSlurm,
)
from antareslauncher.remote_environnement.slurm_script_features import (
SlurmScriptFeatures,
)
from antareslauncher.use_cases.check_remote_queue.check_queue_controller import (
CheckQueueController,
)
from antareslauncher.use_cases.check_remote_queue.slurm_queue_show import SlurmQueueShow
from antareslauncher.use_cases.retrieve.state_updater import StateUpdater
class TestIntegrationCheckQueueController:
def setup_method(self):
self.connection_mock = mock.Mock()
self.connection_mock.username = "username"
self.connection_mock.execute_command = mock.Mock(return_value=("", ""))
slurm_script_features = SlurmScriptFeatures()
env_mock = RemoteEnvironmentWithSlurm(
_connection=self.connection_mock,
slurm_script_features=slurm_script_features,
)
display_mock = mock.Mock()
slurm_queue_show = SlurmQueueShow(env_mock, display_mock)
state_updater = StateUpdater(env_mock, display_mock)
repo = mock.MagicMock(spec=IDataRepo)
self.check_queue_controller = CheckQueueController(
slurm_queue_show, state_updater, repo
)
@pytest.mark.integration_test
def test_check_queue_controller_check_queue_calls_connection_execute_command(self):
# when
self.check_queue_controller.check_queue()
# then
self.connection_mock.execute_command.assert_called_once()
|
from components import time
from tests import base
import unittest
class TestTime(base.TestBase):
def setUp(self):
super().setUp()
self.time = time.Time(self.cfg['TIME'])
def test_config(self):
self.assertEqual(self.cfg['TIME']['format'], '%a %d/%m %R')
def test_time(self):
from datetime import datetime
from locale import setlocale, LC_TIME
setlocale(LC_TIME, 'uk_UA')
self.assertEqual(datetime.now().strftime('%a %d/%m %R'), self.time.text)
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
"""
Created on Tue May 16 17:37:45 2017
@author: Anuj
"""
import numpy as np
def gauss_seidel(phi, phi_bdry, source, epsit, max_iters,
omega, coeff, coeff_p, resid):
nx = phi.shape[1]
ny = phi.shape[0]
iter_count = 0
iter_update = 10 * epsit
conv_err = epsit
while iter_count < max_iters and iter_update > conv_err:
iter_count += 1
phi_max = 0
dphi_max = 0
#resid_max = 0
phi_old = np.copy(phi)
for i in range(1, nx-1):
for j in range(1, ny-1):
phi[j, i] = ((omega * (source[j, i] + coeff*phi[j, i-1]
+ coeff*phi[j-1, i] + coeff*phi[j+1, i]
+ coeff*phi[j, i+1])/coeff_p)
+ (1-omega)*phi[j, i])
phi_max = max(phi_max, abs(phi[j,i]))
dphi = abs(phi[j, i] - phi_old[j, i])
dphi_max = max(dphi_max, dphi)
phi[phi < phi_bdry] = phi_bdry
# for i in range(1, nx-1):
# for j in range(1, ny-1):
# resid[j, i] = (source[j, i] + coeff*phi[j, i-1]
# + coeff*phi[j-1, i] + coeff*phi[j+1, i]
# + coeff*phi[j, i+1] - coeff_p*phi[j, i])
resid[1:ny-1, 1:nx-1] = (source[1:ny-1, 1:nx-1]
+ coeff*phi[1:ny-1, 0:nx-2]
+ coeff*phi[0:ny-2, 1:nx-1]
+ coeff*phi[2:ny, 1:nx-1]
+ coeff*phi[1:ny-1, 2:nx]
- coeff_p*phi[1:ny-1, 1:nx-1])
iter_update = dphi_max
conv_err = epsit * phi_max
return phi, phi_old, iter_count, iter_update, resid, dphi_max, conv_err |
import math
import logging
import numpy as np
from statsmodels.tsa.seasonal import seasonal_decompose
def generate_linear_series_from_model(time_series_len, model):
res = list()
predict = np.poly1d(model)
for i in range(time_series_len):
res.append(predict(i))
return res
def generate_arma_series_from_model(time_series_len, list):
train_size = int(time_series_len*0.6)
res = list()
prediction = model.predict(train_size, time_series_len)
logging.debug(prediction)
return [round(val) for val in prediction]
def generate_trend_from_decompose(series, period = 7):
formatted_series = [d['count'] for d in series]
data = seasonal_decompose(formatted_series, model='additive', period=period)
return data.trend |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayMerchantWeikeBilltaxModifyModel(object):
def __init__(self):
self._actual_tax = None
self._alipay_trans_serial_no = None
self._bill_month = None
self._bill_no = None
self._bill_version = None
self._expect_tax = None
self._gmt_modified = None
self._out_biz_no = None
self._tax_rebate = None
self._tax_rebate_gmt_pay = None
self._tax_rebate_serial_no = None
self._weike_user_id = None
@property
def actual_tax(self):
return self._actual_tax
@actual_tax.setter
def actual_tax(self, value):
self._actual_tax = value
@property
def alipay_trans_serial_no(self):
return self._alipay_trans_serial_no
@alipay_trans_serial_no.setter
def alipay_trans_serial_no(self, value):
self._alipay_trans_serial_no = value
@property
def bill_month(self):
return self._bill_month
@bill_month.setter
def bill_month(self, value):
self._bill_month = value
@property
def bill_no(self):
return self._bill_no
@bill_no.setter
def bill_no(self, value):
self._bill_no = value
@property
def bill_version(self):
return self._bill_version
@bill_version.setter
def bill_version(self, value):
self._bill_version = value
@property
def expect_tax(self):
return self._expect_tax
@expect_tax.setter
def expect_tax(self, value):
self._expect_tax = value
@property
def gmt_modified(self):
return self._gmt_modified
@gmt_modified.setter
def gmt_modified(self, value):
self._gmt_modified = value
@property
def out_biz_no(self):
return self._out_biz_no
@out_biz_no.setter
def out_biz_no(self, value):
self._out_biz_no = value
@property
def tax_rebate(self):
return self._tax_rebate
@tax_rebate.setter
def tax_rebate(self, value):
self._tax_rebate = value
@property
def tax_rebate_gmt_pay(self):
return self._tax_rebate_gmt_pay
@tax_rebate_gmt_pay.setter
def tax_rebate_gmt_pay(self, value):
self._tax_rebate_gmt_pay = value
@property
def tax_rebate_serial_no(self):
return self._tax_rebate_serial_no
@tax_rebate_serial_no.setter
def tax_rebate_serial_no(self, value):
self._tax_rebate_serial_no = value
@property
def weike_user_id(self):
return self._weike_user_id
@weike_user_id.setter
def weike_user_id(self, value):
self._weike_user_id = value
def to_alipay_dict(self):
params = dict()
if self.actual_tax:
if hasattr(self.actual_tax, 'to_alipay_dict'):
params['actual_tax'] = self.actual_tax.to_alipay_dict()
else:
params['actual_tax'] = self.actual_tax
if self.alipay_trans_serial_no:
if hasattr(self.alipay_trans_serial_no, 'to_alipay_dict'):
params['alipay_trans_serial_no'] = self.alipay_trans_serial_no.to_alipay_dict()
else:
params['alipay_trans_serial_no'] = self.alipay_trans_serial_no
if self.bill_month:
if hasattr(self.bill_month, 'to_alipay_dict'):
params['bill_month'] = self.bill_month.to_alipay_dict()
else:
params['bill_month'] = self.bill_month
if self.bill_no:
if hasattr(self.bill_no, 'to_alipay_dict'):
params['bill_no'] = self.bill_no.to_alipay_dict()
else:
params['bill_no'] = self.bill_no
if self.bill_version:
if hasattr(self.bill_version, 'to_alipay_dict'):
params['bill_version'] = self.bill_version.to_alipay_dict()
else:
params['bill_version'] = self.bill_version
if self.expect_tax:
if hasattr(self.expect_tax, 'to_alipay_dict'):
params['expect_tax'] = self.expect_tax.to_alipay_dict()
else:
params['expect_tax'] = self.expect_tax
if self.gmt_modified:
if hasattr(self.gmt_modified, 'to_alipay_dict'):
params['gmt_modified'] = self.gmt_modified.to_alipay_dict()
else:
params['gmt_modified'] = self.gmt_modified
if self.out_biz_no:
if hasattr(self.out_biz_no, 'to_alipay_dict'):
params['out_biz_no'] = self.out_biz_no.to_alipay_dict()
else:
params['out_biz_no'] = self.out_biz_no
if self.tax_rebate:
if hasattr(self.tax_rebate, 'to_alipay_dict'):
params['tax_rebate'] = self.tax_rebate.to_alipay_dict()
else:
params['tax_rebate'] = self.tax_rebate
if self.tax_rebate_gmt_pay:
if hasattr(self.tax_rebate_gmt_pay, 'to_alipay_dict'):
params['tax_rebate_gmt_pay'] = self.tax_rebate_gmt_pay.to_alipay_dict()
else:
params['tax_rebate_gmt_pay'] = self.tax_rebate_gmt_pay
if self.tax_rebate_serial_no:
if hasattr(self.tax_rebate_serial_no, 'to_alipay_dict'):
params['tax_rebate_serial_no'] = self.tax_rebate_serial_no.to_alipay_dict()
else:
params['tax_rebate_serial_no'] = self.tax_rebate_serial_no
if self.weike_user_id:
if hasattr(self.weike_user_id, 'to_alipay_dict'):
params['weike_user_id'] = self.weike_user_id.to_alipay_dict()
else:
params['weike_user_id'] = self.weike_user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayMerchantWeikeBilltaxModifyModel()
if 'actual_tax' in d:
o.actual_tax = d['actual_tax']
if 'alipay_trans_serial_no' in d:
o.alipay_trans_serial_no = d['alipay_trans_serial_no']
if 'bill_month' in d:
o.bill_month = d['bill_month']
if 'bill_no' in d:
o.bill_no = d['bill_no']
if 'bill_version' in d:
o.bill_version = d['bill_version']
if 'expect_tax' in d:
o.expect_tax = d['expect_tax']
if 'gmt_modified' in d:
o.gmt_modified = d['gmt_modified']
if 'out_biz_no' in d:
o.out_biz_no = d['out_biz_no']
if 'tax_rebate' in d:
o.tax_rebate = d['tax_rebate']
if 'tax_rebate_gmt_pay' in d:
o.tax_rebate_gmt_pay = d['tax_rebate_gmt_pay']
if 'tax_rebate_serial_no' in d:
o.tax_rebate_serial_no = d['tax_rebate_serial_no']
if 'weike_user_id' in d:
o.weike_user_id = d['weike_user_id']
return o
|
import time
import json
import random
import requests
tournament_dict = {
"identifier": "4nidzmunvpvxk1ir9b6m8mpay",
"tournamentName": "Ukrainian Football League",
"location": "Ukraine",
"stadium": "Dynamo",
"league": "Ukrainian Football",
"startDate": "2016-07-11 19:00:00 GMT-0000",
"sportName": "football",
"providerName": "perform"
}
def update_tournament():
"""Update fields in tournament dictionary.
:return:dict
"""
tournament_dict['identifier'] = 'r56f0430409b488e99880c4e1fd22cd5'
tournament_dict['startDate'] = time.strftime("%Y-%m-%d %H:%M:%S GMT-0000")
return tournament_dict
def load_events():
"""Load json file with events and update events field.
:return:dict
"""
tournament = update_tournament()
# event_id = int(time.time() * 1000)
event_id = 1502303120430
with open('./event_details.json') as events_file:
data = json.load(events_file)
data['id'] = event_id
data['matchId'] = event_id
data['rbid'] = event_id
data['homeTeam'] = 'Dnipro'
data['homeTeamAbbr'] = 'DNP'
data['awayTeam'] = 'Shachtar'
data['awayTeamAbbr'] = 'SHC'
data['name'] = data['homeTeam'] + ' vs ' + data['awayTeam']
data['tournamentID'] = tournament['identifier']
data['leagueUuid'] = tournament['identifier']
data['stadium'] = tournament['stadium']
data['league'] = tournament['league']
data['country'] = 'Ukraine'
data['uuid'] = '4poiu4cdfh0v0679mndd9vyb6'
data['date'] = time.strftime("%Y-%m-%d %H:%M:%S")
data['startTime'] = time.strftime("%Y-%m-%d %H:%M:%S")
data['startDate'] = time.strftime("%a %b %d %Y %H:%M:%S GMT+0000 (UTC)")
return data
def load_incidents():
"""Load json file with incidents and update incident fields.
:return:dict
"""
events = load_events()
with open('./incident_events.json') as incident_file:
data_incident = json.load(incident_file)
for incident in data_incident['incidents']:
incident['eventId'] = events['id']
for field in incident['payload']:
incident['payload']['eventId'] = events['id']
incident['payload']['ID'] = events['id']
incident['payload']['UUID'] = events['uuid']
return data_incident['incidents']
data_tournament = update_tournament()
data_events = load_events()
body_tournament = json.dumps(data_tournament, indent=2)
body_events = json.dumps(data_events, indent=2)
vis_rtc_url = 'https://vis-tst2-coral.symphony-solutions.eu'
sport = 'football'
provider = 'perform'
content_type_headers = {"Content-Type": "application/json"}
post_tournament = requests.post(url=vis_rtc_url + '/storeTournament/%s/%s' % (sport, provider),
headers=content_type_headers, data=body_tournament, verify=False)
post_events = requests.post(url=vis_rtc_url + '/matchDetails/%s/%s' % (sport, provider),
headers=content_type_headers, data=body_events, verify=False)
openBetID = random.randint(1, 9999999)
data_mapping = {
"openbetId": openBetID,
"id": data_events['id'],
"type": 'auto'
}
body_mapping = json.dumps(data_mapping)
post_mapping = requests.post(url=vis_rtc_url + '/addMapping/%s/%s' % (sport, provider),
headers=content_type_headers, data=body_mapping, verify=False)
data_incident = load_incidents()
# for incident in data_incident['incidents']:
for incident in data_incident[:]:
body_incident = json.dumps(incident['payload'], indent=2)
# print body_incident
post_incident = requests.post(url=vis_rtc_url + '/storeIncident/%s/%s' % (sport, provider),
headers=content_type_headers, data=body_incident, verify=False)
print '*** Post incidents details: Response code is %s, content %s' % (post_incident.status_code, post_incident.content)
print '*** Post tournament details: Response code is %s, content %s' % (post_tournament.status_code, post_tournament.content)
print '*** Post event details: Response code is %s, content %s' % (post_events.status_code, post_events.content)
print '*** Post mapping details: Response code is %s, content %s' % (post_mapping.status_code, post_mapping.content)
|
# Generated by Django 2.0 on 2018-12-03 15:15
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('home', '0005_remove_expenses_test'),
]
operations = [
migrations.AlterField(
model_name='splittransactions',
name='spentby',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='split_spentby', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='splittransactions',
name='spentfor',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='split_spentfor', to=settings.AUTH_USER_MODEL),
),
]
|
#!/usr/bin/env python
#-*-coding:utf-8-*-
US=[4,1,5,3,6,9,2,8]
def heap_sort(lst):
for start in range((len(lst)-2)/2,-1,-1):
sift_down(lst,start,len(lst)-1)
print lst
for end in range(len(lst)-1,0,-1):
lst[0],lst[end] = lst[end],lst[0]
sift_down(lst,0,end-1)
print lst
def sift_down(lst,start,end):
root = start
while True:
child = 2*root + 1
if child > end:break
if child+1 <= end and lst[child] < lst[child+1]:
child += 1
if lst[root] < lst[child]:
lst[root],lst[child] = lst[child],lst[root]
root = child
else:
break
if __name__ == "__main__":
heap_sort(US)
|
#!usr/bin/env python3.6
"""Write a file of trees without extremely low branch lengths.
The idea is that these are the trees that orthofinder estimated incorrectly.
"""
import re
import sys
import my_module as mod
def get_args():
"""Get user arguments."""
if len(sys.argv) == 4:
return sys.argv[1:]
else:
print("\nUSAGE python exclude_shite_trees.py treedir nodes_file outfile\n")
def extract_branch_lengths(tree):
"""Get all the branch lengths on a tree.
Returns only a list of the lengths - not ordered or anything fancy."""
lengths = re.findall(r':[0-9\.e\-]+', tree)
float_lengths = []
for length in lengths:
float_lengths.append(float(length[1:]))
return float_lengths
def main():
"""Do the above."""
treedir, node_file, outfile = get_args()
nodelines = mod.get_file_data(node_file)
i = 0
while i < 578:
flag = 1
if "full_" + str(i) + ".rooted" in nodelines:
try:
tree_string = mod.get_file_data(treedir + "/full_"
+ str(i) + ".rooted")[0]
except:
i += 1
continue
branch_lengths = extract_branch_lengths(tree_string)
for bl in branch_lengths:
if bl < 0.01:
flag = 0
break
if flag == 1:
out = open(outfile, "a")
out.write("full_" + str(i) + ".rooted" + "\n")
out.close()
i += 1
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from neodroid.environments.droid_environment import SingleUnityEnvironment
from neodroid.utilities.exceptions.exceptions import SensorNotAvailableException
from neodroid.utilities.snapshot_extraction.camera_extraction import (
extract_camera_observation,
extract_from_cameras,
)
__author__ = "Christian Heider Nielsen"
class CameraObservationWrapper(SingleUnityEnvironment):
def __init__(self, auto_reset=True, **kwargs):
super().__init__(**kwargs)
self._auto_reset = auto_reset
self.reset()
def __next__(self):
if not self._is_connected_to_server:
return
return self.fetch_new_frame(None)
def sensor(self, key: str):
if self._last_snapshots:
state_env_0 = list(self._last_snapshots.values())[0]
return extract_camera_observation(state_env_0, key)
raise SensorNotAvailableException
def update(self):
return super().react()
def fetch_new_frame(self, *args, **kwargs):
message = super().react(*args, **kwargs)
if message.terminated and self._auto_reset:
super().reset()
message = self.fetch_new_frame()
if message:
return extract_from_cameras(message)
return None
|
#!/usr/bin/env python
import numpy as np
from scipy.linalg import expm
from lab3_header import *
"""
Use 'expm' for matrix exponential.
Angles are in radian, distance are in meters.
"""
def Get_MS():
# =================== Your code starts here ====================#
# Fill in the correct values for S1~6, as well as the M matrix
M = np.eye(4)
S = np.zeros((6,6))
w = np.zeros((6, 3))
q = np.zeros((6, 3))
w[0] = [0, 0, 1]
w[1] = [0, 1, 0]
w[2] = [0, 1, 0]
w[3] = [0, 1, 0]
w[4] = [1, 0, 0]
w[5] = [0, 1, 0]
offset = [-.150, .150, 0.01]
q[0] = [0, 0, 0]
q[1] = [0, .120, .152]
q[2] = [.244, .120, .152]
q[3] = [.244 + .213, .120-.093, .152]
q[4] = [.244 + .213, .120 - .093 + .083, .152]
q[5] = [.540, .120 - .093 + .083, .152]
for i in range(len(q)):
q[i] += offset
for i in range(len(S)):
S[i] = np.concatenate([w[i], np.cross(-w[i], q[i])])
# print(S)
M[0] = [0, -1, 0, .540 - .15]
M[1] = [0, 0, -1, .120 - .093 + .083 + .082 + .059 + 0.15]
M[2] = [1, 0, 0, .152 + .0535 + 0.01]
M[3] = [0, 0, 0, 1]
# print(M)
# ==============================================================#
return M, S
"""
Function that calculates encoder numbers for each motor
"""
def lab_fk(theta1, theta2, theta3, theta4, theta5, theta6):
def skew_sym(screw):
m = np.zeros((4,4))
[w1, w2, w3, v1, v2, v3] = [element for element in screw]
m[0] = [0, -w3, w2, v1]
m[1] = [w3, 0, -w1, v2]
m[2] = [-w2, w1, 0, v3]
m[3] = [0, 0, 0, 0]
return m
# Initialize the return_value
return_value = [None, None, None, None, None, None]
# =========== Implement joint angle to encoder expressions here ===========
print("Foward kinematics calculated:\n")
# =================== Your code starts here ====================#
theta = np.array([theta1,theta2,theta3,theta4,theta5,theta6])
T = np.eye(4)
M, S = Get_MS()
for i in range(len(S)):
T = np.dot(T, expm(skew_sym(S[i])*theta[i]))
T = np.dot(T, M)
# ==============================================================#
print(str(T) + "\n")
return_value[0] = theta1 + PI
return_value[1] = theta2
return_value[2] = theta3
return_value[3] = theta4 - (0.5*PI)
return_value[4] = theta5
return_value[5] = theta6
return return_value
Get_MS() |
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import streamlit as st
# @st.cache
class plot:
# @st.cache(suppress_st_warning=False)
def Count_Record(self,data):
sns.set(font_scale=1.5)
st.header("Overview of dataset.")
st.write(data.head(10))
fig, ax = plt.subplots(figsize=(15,10))
region_count = sns.countplot(data=data, x="region")
region_count.set_xticklabels(region_count.get_xticklabels(), rotation=20)
st.pyplot(fig)
# @st.cache(suppress_st_warning=False)
def General_Overview(self,data):
sns.set(font_scale=1.0)
fig, ax = plt.subplots(5, 1, figsize=(30, 40), )
col = ['fertility', 'life', 'population', 'child_mortality', 'gdp']
for index, col1 in enumerate(col):
print(index, col1)
mean1 = data.groupby("Year")[col1].mean()
mean_plt = sns.barplot(x=mean1.index, y=mean1.values, ax=ax[int(index)])
mean_plt.set(xlabel="Year")
mean_plt.set_ylabel(col1, fontsize=50)
mean_plt.set_yticklabels(mean_plt.get_ylabel(),size = 10)
plt.setp(mean_plt.get_xticklabels(), rotation=45)
fig.tight_layout(pad=2.0)
print("")
st.pyplot(fig)
# @st.cache(suppress_st_warning=False)
def Region_vs_All(self,data,Name=None):
sns.set(font_scale=1)
if Name==None:
fig, ax = plt.subplots(5, 1, figsize=(25, 35))
col = ['fertility', 'life', 'population', 'child_mortality', 'gdp']
for index, col1 in enumerate(col):
print(index, col1)
mean1 = data.groupby("region")[col1].mean()
mean_plt = sns.barplot(x=mean1.index, y=mean1.values, ax=ax[int(index)])
mean_plt.set_ylabel(col1, fontsize=50)
plt.setp(mean_plt.get_xticklabels(), rotation=10)
fig.tight_layout(pad=2.0)
st.pyplot(fig)
else:
sub_data = data.loc[data.region == Name]
fig, ax = plt.subplots(5, 1, figsize=(30, 40))
col = ['fertility', 'life', 'population', 'child_mortality', 'gdp']
for index, col1 in enumerate(col):
print(index, col1)
mean1 = sub_data.groupby("Year")[col1].mean()
mean_plt = sns.barplot(x=mean1.index, y=mean1.values, ax=ax[int(index)])
mean_plt.set_ylabel(col1, fontsize=50)
plt.setp(mean_plt.get_xticklabels(), rotation=45)
fig.tight_layout(pad=2.0)
st.pyplot(fig)
def Region_vs_Box(self,data):
print("Provide some indication of the data's symmetry and skewness." +
"Unlike many other methods of data display.\nBoxplots show outliers")
col = ['fertility', 'life', 'population', 'child_mortality', 'gdp']
fig, ax = plt.subplots(5, 1, figsize=(15, 25))
for index, col1 in enumerate(col):
sns.boxplot(data=data, x="region", y=col1, orient="v", ax=ax[index])
# @st.cache(suppress_st_warning=False)
def County_wise_Analysis(self,data, Name):
# sns.set(font=30)
sns.set(font_scale=3)
sub_data = data.loc[data.Country == Name]
if sub_data.shape[0] == 0:
return st.write("Please check the spelling of country..")
fig, ax = plt.subplots(3, 2, figsize=(30, 35))
sns.lineplot(data=sub_data, x="Year", y="fertility", ax=ax[0][0])
sns.lineplot(data=sub_data, x="Year", y="life", ax=ax[0][1])
sns.lineplot(data=sub_data, x="Year", y="population", ax=ax[1][0])
sns.lineplot(data=sub_data, x="Year", y="child_mortality", ax=ax[1][1])
sns.lineplot(data=sub_data, x="Year", y="gdp", ax=ax[2][0])
print("In Country...")
st.pyplot(fig)
# sns.set(font_scale=None)
# @st.cache(suppress_st_warning=False)
class main_class:
def main(self,data,input1=None,input2=None):
try:
obj=plot()
if input1=="Select":
return obj.Count_Record(data)
if input1=="General Overview":
return obj.General_Overview(data)
elif input1 == "region wise":
return obj.Region_vs_All(data,input2)
elif input1 == "Based On Country":
return obj.County_wise_Analysis(data,input2)
except Exception as e:
st.write("Exception",e)
|
import matplotlib.pyplot as plt
from math import sqrt
from typing import Tuple, List
import numpy
def load_data() -> Tuple[List[int], List[int], List[float]]:
"""Carrega o arquivo data2.txt e prepara os dados para serem utilizados.
Returns: Dados definindo o tamanho da casa o número de quartos e o preço da casa.
"""
file_path = 'files/data2.txt'
house_length: List[int] = []
rooms_number: List[int] = []
houses_price: List[float] = []
with open(file_path) as file:
lines = file.readlines()
for line in lines:
h_l, r_n, h_p = line.split(',')
house_length.append(int(h_l))
rooms_number.append(int(r_n))
houses_price.append(float(h_p))
return house_length, rooms_number, houses_price
def standard_deviation(samples: List[int], mean: float) -> float:
"""Calcula o desvio padrão de uma amostra.
Args:
samples: Amostra.
mean: Média.
Returns: Desvio padrão.
"""
sample_length: float = len(samples)
summation: float = 0
for sample in samples:
summation += pow(sample - mean, 2)
return sqrt(summation/sample_length)
def sub_task_2_1(house_length: List[int], rooms_number: List[int]) -> Tuple[List[float], List[float]]:
"""Realiza a normalização dos valores da feature.
Args:
house_length: Tamanho da casa.
rooms_number: Número de quartos.
Returns: Features normalizadas.
"""
h_l_mean: float = 0
r_n_mean: float = 0
features_length = len(house_length)
# iterando para descobrimos os valores médios
for i in range(features_length):
h_l_mean += house_length[i]
r_n_mean += rooms_number[i]
# calculando as médias
h_l_mean = h_l_mean / features_length
r_n_mean = r_n_mean / features_length
# cálculo do desvio padrão
h_l_standard_d = standard_deviation(house_length, h_l_mean)
r_n_standard_d = standard_deviation(rooms_number, r_n_mean)
# ajuste das amostras
house_length = [round((element - h_l_mean)/h_l_standard_d, 2) for element in house_length]
rooms_number = [round((element - r_n_mean)/r_n_standard_d, 2) for element in rooms_number]
return house_length, rooms_number
def h_theta(theta_0: float, theta_1: float, theta_2: float, feat_x_0: float, feat_x_1: float) -> float:
"""H de theta dado um valor x irá gerar um valor y estimado. Equação do plano C + Bx_1 + Cx_2.
Args:
theta_0: C da equação.
theta_1: B da equação.
theta_2: A da equação.
feat_x_0: Valor da primeira feat.
feat_x_1: Valor da segunda feat.
Returns: Y gerado pelo plano.
"""
return theta_0 + (theta_1 * feat_x_0) + (theta_2 * feat_x_1)
def cost_function(theta_0: float, theta_1: float, theta_2: float, x_0_sample: List[float], x_1_sample: List[float],
y_sample: List[int], sample_size: int) -> float:
"""Função custo J.
Args:
theta_0: Theta 0.
theta_1: Theta 1.
theta_2: Theta 2.
x_0_sample: X_0 da amostra.
x_1_sample: X_1 da amostra.
y_sample: Y da amostra.
sample_size: Tamanho da amostra.
Returns: Valor da função custo.
"""
summation: float = 0
for i in range(sample_size):
summation += pow(h_theta(theta_0, theta_1, theta_2, x_0_sample[i], x_1_sample[i]) - y_sample[i], 2)
return summation / (2 * sample_size)
def derivate_cost_function_theta_0(theta_0: float, theta_1: float, theta_2: float, x_0_sample: List[float],
x_1_sample: List[float], y_sample: List[float], sample_size: int) -> float:
"""Derivada da função custo em relação a theta_0.
Args:
theta_0: Theta 0.
theta_1: Theta 1.
theta_2: Theta 2.
x_0_sample: X_0 da amostra.
x_1_sample: X_1 da amostra.
y_sample: Y da amostra.
sample_size: Tamanho da amostra.
Returns: Valor da derivada da função custo em relação a theta 0.
"""
summation: float = 0
for i in range(sample_size):
summation += (h_theta(theta_0, theta_1, theta_2, x_0_sample[i], x_1_sample[i]) - y_sample[i])
return summation / sample_size
def derivate_cost_function_theta_1(theta_0: float, theta_1: float, theta_2: float, x_0_sample: List[float],
x_1_sample: List[float], y_sample: List[float], sample_size: int) -> float:
"""Derivada da função custo em relação a theta_1.
Args:
theta_0: Theta 0.
theta_1: Theta 1.
theta_2: Theta 2.
x_0_sample: X da amostra.
x_1_sample: X da amostra.
y_sample: Y da amostra.
sample_size: Tamanho da amostra.
Returns: Valor da derivada da função custo em relação a theta 1.
"""
summation: float = 0
for i in range(sample_size):
summation += ((h_theta(theta_0, theta_1, theta_2, x_0_sample[i], x_1_sample[i]) - y_sample[i]) * x_0_sample[i])
return summation / sample_size
def derivate_cost_function_theta_2(theta_0: float, theta_1: float, theta_2: float, x_0_sample: List[float],
x_1_sample: List[float], y_sample: List[float], sample_size: int) -> float:
"""Derivada da função custo em relação a theta_1.
Args:
theta_0: Theta 0.
theta_1: Theta 1.
theta_2: Theta 2.
x_0_sample: X da amostra.
x_1_sample: X da amostra.
y_sample: Y da amostra.
sample_size: Tamanho da amostra.
Returns: Valor da derivada da função custo em relação a theta 1.
"""
summation: float = 0
for i in range(sample_size):
summation += ((h_theta(theta_0, theta_1, theta_2, x_0_sample[i], x_1_sample[i]) - y_sample[i]) * x_1_sample[i])
return summation / sample_size
# gradiente descendente
def sub_task_2_2(epoch: int, alpha: float = 0.01) -> Tuple[List[float], float, float, float]:
""" Faz a execução da regressão linear utilizando o gradiente descendente.
Args:
epoch: Épocas executando o algoritmo.
alpha: Taxa de aprendizado do algoritmo.
Returns: None
"""
# valores iniciais de theta da regressão
theta_0: float = 1
theta_1: float = 1
theta_2: float = 1
x_0_sample, x_1_sample, y_sample = load_data()
x_0_sample, x_1_sample = sub_task_2_1(x_0_sample, x_1_sample)
sample_size = len(x_0_sample)
cost_values: List[float] = []
for _ in range(epoch):
temp_0 = theta_0 - (alpha * derivate_cost_function_theta_0(theta_0, theta_1, theta_2, x_0_sample, x_1_sample,
y_sample, sample_size))
temp_1 = theta_1 - (alpha * derivate_cost_function_theta_1(theta_0, theta_1, theta_2, x_0_sample, x_1_sample,
y_sample, sample_size))
temp_2 = theta_2 - (alpha * derivate_cost_function_theta_2(theta_0, theta_1, theta_2, x_0_sample, x_1_sample,
y_sample, sample_size))
theta_0 = temp_0
theta_1 = temp_1
theta_2 = temp_2
cost_values.append(cost_function(theta_0, theta_1, theta_2, x_0_sample, x_1_sample, y_sample, sample_size))
# pegando o último ajuste
return cost_values, theta_0, theta_1, theta_2
def task_3() -> None:
"""Calcula os valores usando a equação normal.
Returns: None.
"""
# inicializando para montarmos as matrizes
x_1_sample, x_2_sample, y_sample = load_data()
# transformando as listas em matrizes
X = []
for index, _ in enumerate(x_1_sample):
X.append([1, x_1_sample[index], x_2_sample[index]])
# montando a matriz X
X = numpy.asmatrix(X)
y_sample = numpy.array(y_sample).reshape(len(y_sample), 1)
# operando
first_factor = numpy.linalg.inv(numpy.matmul(X.T, X))
second_factor = numpy.matmul(first_factor, X.T)
theta = numpy.matmul(second_factor, y_sample)
return theta
thetas = task_3()
# Exibindo o erro por época
cost_values, theta_0, theta_1, theta_2 = sub_task_2_2(1000)
print(theta_0, theta_1, theta_2)
print(thetas[0][0], thetas[1][0], thetas[2][0])
# epochs = [i for i in range(1, 1000+1)]
# plt.plot(epochs, cost_values)
# plt.title('Função custo em relação a quantidade de épocas. Alpha = 0.1')
# plt.show()
|
#MenuTitle: Delete guidelines
# -*- coding: utf-8 -*-
__doc__="""
Delete all local guidelines in selected glyphs.
"""
import GlyphsApp
selectedLayers = Glyphs.font.selectedLayers
def process( thisLayer ):
thisLayer.guideLines = []
for thisLayer in selectedLayers:
thisGlyph = thisLayer.parent
print "Deleting guidelines in:", thisGlyph.name
thisGlyph.beginUndo()
process( thisLayer )
thisGlyph.endUndo()
|
from random import *
import eval
def generate_quiz():
# Hint: Return [x, y, op, result]
x = randint(1,10)
y = randint(1,10)
a_list = ["+", "-", "*", "/"]
op = choice(a_list)
res = eval.cal(x, y, op)
error = [-1, 0, 0, 1]
ran_er = choice(error)
result = res + ran_er
return [x, y, op , result]
def check_answer(x, y, op, result, user_choice):
boo = True
res = eval.cal(x, y, op)
if result == res :
if user_choice == True:
boo = True
elif user_choice == False:
boo = False
else:
if user_choice == False:
boo = True
elif user_choice == True:
boo = False
return boo
# sound
|
import os
from django.contrib.gis.utils import LayerMapping
from . models import CadastralCommunity, cadastralcommunity_mapping
cc_shps = os.path.abspath(
os.path.join(
os.path.dirname(__file__), 'data', 'iad', 'AT.shp')
)
def import_shapes(verbose=True):
lm = LayerMapping(
CadastralCommunity, cc_shps, cadastralcommunity_mapping,
transform=True, encoding='utf-8',
)
lm.save(strict=True, verbose=verbose)
|
from django.contrib import admin
from commanderie.models import Bureau, Chevalier, Commanderie
# Register your models here.
admin.site.register(Commanderie)
admin.site.register(Chevalier)
admin.site.register(Bureau)
|
#!/usr/bin/python3
# tests evaluation of expressions in ranges
x = 5
for i in range((x//2)+1,x*2): print(i)
|
perg = 'S'
cont = soma = media = 0
while perg == 'S':
num = int(input('Digite um número: '))
perg = str(input('Deseja continuar? [S/N]: ')).upper().strip()
soma += num
cont += 1
if cont == 1:
maior = menor = num
else:
if num > maior:
maior = num
if num < menor:
menor = num
media = soma / cont
print('Foram digitados {} números e a média é {:.2f}'.format(cont, media))
print('O maior valor digitado foi {} e o menor é {}'.format(maior, menor))
|
import os
from urllib.parse import urlparse
import pytest
import factory
from demo_app import factories, models
from tests.utils import init_postgresql_database, drop_postgresql_database
class AppConfigTest(factories.AppConfig):
DEBUG = False
TESTING = True
SQLALCHEMY_DATABASE_URI = os.environ.get('DB_URI', 'postgres://flask:flask@localhost/flask_test')
SQLALCHEMY_TRACK_MODIFICATIONS = False
@pytest.fixture(scope='session')
def app(request):
app_ = factories.create_app(AppConfigTest)
db_uri = urlparse(app_.config['SQLALCHEMY_DATABASE_URI'])
pg_host = db_uri.hostname
pg_port = db_uri.port
pg_user = db_uri.username
pg_password = db_uri.password
pg_db = db_uri.path[1:]
init_postgresql_database(pg_user, pg_password, pg_host, pg_port, pg_db)
factories.configure_db(app_)
models.db.create_all()
@request.addfinalizer
def drop_database():
drop_postgresql_database(pg_user, pg_password, pg_host, pg_port, pg_db)
return app_
@pytest.fixture(scope='session')
def _db(app):
"""
Provide the transactional fixtures with access to the database via a Flask-SQLAlchemy
database connection.
"""
return models.db
@pytest.fixture(scope='function')
def base_factory(db_session):
class BaseFactory(factory.alchemy.SQLAlchemyModelFactory):
"""Base model factory."""
class Meta:
abstract = True
sqlalchemy_session = db_session
sqlalchemy_session_persistence = 'flush'
return BaseFactory
@pytest.fixture(scope='function')
def category_factory(base_factory):
class CategoryFactory(base_factory):
class Meta:
model = models.Category
name = factory.Sequence(lambda n: u'Category %d' % n)
return CategoryFactory
@pytest.fixture(scope='function')
def post_factory(base_factory, category_factory):
class PostFactory(base_factory):
class Meta:
model = models.Post
title = factory.Sequence(lambda n: u'Post Title %d' % n)
body = factory.Sequence(lambda n: u'Post Body %d' % n)
category = factory.SubFactory(category_factory)
return PostFactory
|
#Going to use Flask, going to upload through heroku
# Text is done through HTML,
# Embedding is done through Trinket.io
# Bootstrap 4 should make it look nice
#To activate virtual env, type virtual/Scripts/activate
#
# https://virtual-tic-tac-toe.herokuapp.com/ | https://git.heroku.com/virtual-tic-tac-toe.git
#
# To deactivate virtual environment, simply type deactivate in the console
from flask import Flask, render_template
from flask import Flask
app = Flask(__name__)
@app.route("/")
def home():
return render_template("home.html")
@app.route("/about")
def about():
return render_template("about.html")
@app.route("/game")
def game():
return render_template("game.html")
#Each one of these routes is to a separate part of the page
if __name__ == "__main__":
app.run(debug=True) |
from functools import partial
import logging
import os
from kaon.parsers import KaonLECParser
from pion.parsing.lec import PionLECParser
from su2.models import PionLECSU2, KaonLECSU2
log = logging.getLogger(__name__)
def parse_pion_lecs_from_folder(folder):
all_data = PionLECParser().get_from_folder(folder)
bulk_list = [PionLECSU2(**d) for d in all_data]
PionLECSU2.objects.insert(bulk_list)
get_24c_pion_lecs_su2 = partial(parse_pion_lecs_from_folder,
os.path.join('data', '24c', 'lec',
'pion_lecs', 'su2'))
def parse_kaon_lecs_from_folder(folder, m_s):
all_data = KaonLECParser().get_from_folder(folder)
for d in all_data:
d['m_s'] = m_s
bulk_list = [KaonLECSU2(**d) for d in all_data]
KaonLECSU2.objects.insert(bulk_list)
get_24c_kaon_lecs_su2_02 = partial(parse_kaon_lecs_from_folder,
os.path.join('data', '24c', 'lec',
'kaon_lecs', 'su2', '0.02'),
0.02)
get_24c_kaon_lecs_su2_03 = partial(parse_kaon_lecs_from_folder,
os.path.join('data', '24c', 'lec',
'kaon_lecs', 'su2', '0.03'),
0.03)
parsers = [
{
'name': '24c SU2 Pion LECS',
'parser': get_24c_pion_lecs_su2,
'enabled': False,
},
{
'name': '24c SU2 Kaon LECS 0.02',
'parser': get_24c_kaon_lecs_su2_02,
},
{
'name': '24c SU2 Kaon LECS 0.03',
'parser': get_24c_kaon_lecs_su2_03,
},
] |
#!/usr/bin/env python3
import collections
import json
import os
from urllib import parse
from flask import (Flask,
render_template,
request)
import redis
url = parse.urlparse(os.environ.get('REDISCLOUD_URL', "redis://localhost:6379/0"))
db = redis.Redis(host=url.hostname, port=url.port, password=url.password)
app = Flask(__name__)
DEBUG = True
HOST = '0.0.0.0'
PORT = 5000
# Keep no more than 1000 records per device, for now:
CAPPED_SIZE = 1000
devices_key = "devices"
metrics_template = "metrics:%s"
# TODO Add a decorator that 403s non-http (X-Forwarded-Proto != https)
# requests to tell the user to retry the request on HTTPS.
@app.route("/")
def hello():
context = {}
return render_template("index.html", **context)
@app.route("/metrics", methods=["GET"])
def metrics(deviceid="device1"):
data = {
"moisture": [],
"temperature": [],
"light": [],
}
metrics_key = metrics_template % (deviceid,)
metrics = db.lrange(metrics_key, 0, 20)
for row in metrics:
m, t, l = row.decode("utf-8").split(",")
data['moisture'].append(int(m))
data['temperature'].append(float(t))
data['light'].append(int(l))
return app.response_class(
json.dumps(data), status=200,
mimetype='application/json'
)
@app.route("/metrics", methods=["POST"])
def new_metric(deviceid="device1"):
moisture = request.args.get('m', -1)
if moisture:
moisture = int(moisture)
temperature = request.args.get('t', -100.0)
if temperature:
temperature = float(temperature)
light = request.args.get('l', -1)
if light:
light = int(light)
metrics_key = metrics_template % (deviceid,)
db.sadd(devices_key, deviceid)
db.rpush(metrics_key, "%i,%f,%i"%(moisture, temperature, light))
db.ltrim(metrics_key, 0, CAPPED_SIZE)
print(f"received metric: moisture {moisture}, temperature {temperature}, light {light}")
return "OK", 200
if __name__=="__main__":
app.run(HOST, PORT, debug=DEBUG)
|
from flask import Flask, render_template, request, redirect, url_for, flash
from config import app
from model import Contacts, db
@app.route('/')
@app.route('/index.html')
def index():
return render_template("index.html")
@app.route('/about_me.html')
def about_me():
return render_template("about_me.html")
@app.route('/message_send.html')
def done():
return render_template("message_send.html")
@app.route('/contact_me.html', methods=['POST', 'GET'])
def contact_me():
if request.method == 'POST':
if not request.form['name'] or not request.form['surname'] or not request.form['message']:
flash("Please enter all required data", 'error')
else:
new_message = Contacts(
request.form['name'], request.form['surname'], request.form['message'])
db.session.add(new_message)
db.session.commit()
flash("Message successfully send.")
return render_template("/message_send.html")
return render_template("/contact_me.html")
if __name__ == "__main__":
db.create_all()
app.run(debug=True)
|
#!/usr/bin/env python
from sys import argv
from parsers import parse_fasta, LongFastaID
from writers import write_fasta
if __name__ == "__main__":
fasta = argv[1]
out_file = argv[2]
fasta_dict = parse_fasta(fasta)
prot_id_to_seq = {}
for fasta_id, sequence in fasta_dict.items():
fasta_id = LongFastaID(fasta_id)
prot_id = fasta_id.protein_id
assert prot_id not in prot_id_to_seq
prot_id_to_seq[prot_id] = sequence
write_fasta(prot_id_to_seq, out_file)
|
import json
import Queue
import settings
import time
import threading
import yaml
import commands
import serial
import sys
import spidev
import RPi.GPIO as GPIO
#from thirtybirds.Logs.main import Exception_Collector
from thirtybirds.Network.manager import init as network_init
def network_status_handler(msg):
print "network_status_handler", msg
def network_message_handler(msg):
try:
#print "network_message_handler", msg
topic = msg[0]
host, sensor, data = yaml.safe_load(msg[1])
# print "Exception Received:", ex
except Exception as e:
print "exception in network_message_handler", e
network = None
def init(HOSTNAME):
global network
network = network_init(
hostname=HOSTNAME,
role="client",
discovery_multicastGroup=settings.discovery_multicastGroup,
discovery_multicastPort=settings.discovery_multicastPort,
discovery_responsePort=settings.discovery_responsePort,
pubsub_pubPort=settings.pubsub_pubPort,
message_callback=network_message_handler,
status_callback=network_status_handler
)
network.subscribe_to_topic("system") # subscribe to all system messages
network.subscribe_to_topic("exceptions")
network.subscribe_to_topic("Eyeballs")
######## MOTOR CONTROL ##########
class Controller():
def __init__(self, deviceId=0):
self.deviceId = deviceId
self.open = False
self.devicePath = "/dev/ttyUSB" + str(deviceId)
try:
self.serial = serial.Serial(
port=self.devicePath,
baudrate=115200,
bytesize=serial.EIGHTBITS,
#startbits=serial.STARTBITS_ONE,
stopbits=serial.STOPBITS_ONE,
parity=serial.PARITY_NONE,
)
#self.serial.open()
self.open = True
print "Serial connected at ", self.devicePath
except:
self.open = False
print("could not open device at ", self.devicePath)
def setSpeed(self, channel, rpm):
if self.open:
cmd = '!G ' + str(channel) + ' '+str(rpm)+'\r'
print cmd
self.serial.write(cmd)
else:
print 'Serial not connected'
pass
controller = Controller()
######## ABSOLUTE ENCODER ###########
class AMT203():
def __init__(self, bus=0, deviceId=0, pin=3):
self.deviceId = deviceId
self.bus = bus
self.pin = pin
GPIO.setmode(GPIO.BCM)
GPIO.setup(self.pin, GPIO.OUT)
GPIO.output(self.pin, GPIO.HIGH)
try:
self.spi = spidev.SpiDev()
self.spi.open(self.bus,self.deviceId)
self.open = True
print "SPI connected. Device id: ", self.deviceId
except:
self.open = False
print "Could not connect to SPI device"
def clean_buffer(self):
first_result = self.spi.xfer([0x00],0,20)
while first_result[0] != 165:
first_result = self.spi.xfer([0x00],0,20)
print "Buffer empty"
def get_position(self):
first_result = self.spi.xfer([0x10],0,20)
while first_result[0] != 16:
first_result = self.spi.xfer([0x00],0,20)
msb_result = self.spi.xfer([0x00],0,20)
lsb_result = self.spi.xfer([0x00],0,20)
print "MSB: %s | LSB: %s " % (msb_result, lsb_result)
# msb_bin = bin(msb_result[0]<<8)[2:]
# lsb_bin = bin(lsb_result[0])[2:]
final_result = (msb_result[0]<<8 | lsb_result[0])
print "Final: ", final_result
self.clean_buffer()
def set_zero(self):
first_result = self.spi.xfer([0x70],0,20)
while first_result[0] != 128:
first_result = self.spi.xfer([0x00],0,20)
print "Zero set was successful and the new position offset is stored in EEPROM"
self.clean_buffer()
GPIO.output(self.pin, GPIO.LOW)
time.sleep(0.1)
GPIO.output(self.pin, GPIO.HIGH)
amt = AMT203() |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import numpy as np
import sympy as sp
import matplotlib.pyplot as plt
from collections import Counter
from prettytable import PrettyTable
def create_sample(n, a, b):
X = []
Y = []
Xi = np.random.uniform(0, 1, n)
for i in range(n):
X.append(round(Xi[i] * (b - a) + a, 3))
Y.append(round(y_function(X[i]), 3))
return X, Y
def empirical_func(sample):
#print(sample)
n = sum(sample.values())
k = sorted(sample.keys())
p = 0
plt.plot([0, k[0]], [0, 0], marker='.')
for i in range(1, len(c)):
p += c.get(k[i - 1]) / n
plt.plot([k[i - 1], k[i]], [p, p], marker='.')
# plt.plot([k[len(c)-1], k[len(c)-1] + 2], [1, 1], marker='o')
plt.grid(True)
#theoretical_func(0.1, 36.0)
#plt.show()
def theoretical_func(a, b):
x = np.arange(a, b, 0.1)
#y = (x * ((np.log(x) + 5) / 12) / x)
y = np.arange(a, b, 0.1)
for i in range(len(x)):
y[i] = (np.log(x[i]) + 5) / 12
plt.plot(x, y, color='r')
plt.grid(True)
#plt.show()
n = int(input('Input n: '))
x = sp.symbols('x')
expression = sp.exp(x)
y_function = sp.lambdify(x, expression, 'numpy')
a = -5
b = 7
a_x = np.exp(-5)
b_x = np.exp(7)
X, Y = create_sample(n, a, b)
table = PrettyTable()
table.field_names = ['x', 'y']
for i in range(len(X)):
table.add_row([X[i], Y[i]])
print(table)
c = Counter(Y)
print('Вариационный ряд:')
print(sorted(Y))
empirical_func(c)
plt.show()
theoretical_func(a_x, b_x)
plt.show()
empirical_func(c)
theoretical_func(a_x, b_x)
plt.show()
|
import pymongo
def mongo_connect():
try:
connection = pymongo.MongoClient()
print "MongoDB is connected!"
return connection
except pymongo.errors.ConnectionFailure, e:
print "Could not connect to MongoDB: %s" % e
connection = mongo_connect()
db = connection['twitter_stream']
coll = db.my_collection
doc = {"name": "David", "surname": "Gunner", "twitter": "@gunnerjnr84"}
coll.insert(doc)
result = coll.find_one()
print result # {u'twitter': u'@gunnerjnr84', u'_id': ObjectId('5629264db1bae125ac446ba5'), u'surname': u'Gunner', u'name': u'Dave'}
|
#!/usr/bin/python3
"""Lockboxes.
Given a box full of boxes full of keys to open other boxes, determine if all
boxes can be opened.
"""
def canUnlockAll(boxes):
"""canUnlockAll.
Determine if all boxes inside boxes can be opened.
Arguments:
-- boxes: A box full of boxes, which may contain keys.
Return:
True if all boxes can be opened; False otherwise.
"""
keychain = set([])
boxStack = []
currentBox = boxes[0]
key = None
done = False
if len(boxes) > 0:
keychain.add(0)
while(not done):
if len(currentBox) > 0:
key = currentBox.pop(0)
if key not in keychain and key < len(boxes):
keychain.add(key)
boxStack.append(currentBox)
currentBox = boxes[key]
else:
if len(boxStack) > 0:
currentBox = boxStack.pop()
else:
done = True
return len(keychain) == len(boxes)
|
# Write a class to hold player information, e.g. what room they are in
# currently.
class Player:
def __init__(self,name,location,items=[]):
self.name=name
self.location=location
self.items=items
def change_location(self,direction):
if direction =='w':
self.location=self.location.n_to
elif direction=='s':
self.location=self.location.s_to
elif direction=='a':
self.location=self.location.w_to
elif direction =='d':
self.location=self.location.e_to
def action(self,action,item):
self.location.take_or_drop_items(action,item)
if action=='take':
if not item in self.items:
self.items.append(item)
item.pick_up=True
item.is_taken(item)
else:
print(f'You have picked up {item.name} already')
elif action == 'drop':
if not item in self.items:
print(f'You don\'t have that item')
else:
item.pick_up=False
item.is_taken(item)
self.items.remove(item)
def show_items(self):
for i in range(len(self.items)):
print(f'You have {self.items[i].name}')
if len(self.items)==0:
print('You don\'t have anything')
|
"""Run an example script to quickly test any SimpliSafe system."""
# pylint: disable=protected-access
import asyncio
from aiohttp import ClientSession
from pikrellcam_python import PiKrellCam
from aiohttp.client_exceptions import ClientError, ClientResponseError
async def exercise_client(
host: str, port: str, user: str, password: str, websession: ClientSession) -> None:
"""Test a SimpliSafe client (regardless of version)."""
print('User:{0} @ {1}'.format(user, host))
print('========================')
try:
camera = await PiKrellCam.login(host,port,user, password, websession)
await camera.update()
value = await camera.is_motion_enabled()
print('Motion Enable is:{0}'.format(value))
value = await camera.is_recording()
print('Recording State is:%s' % value)
except (ClientError, ClientResponseError) as ex:
print('Unable to connect to PiKrellCam:{0}'.format(str(ex)))
async def main() -> None:
"""Create the aiohttp session and run the example."""
async with ClientSession() as websession:
print()
await exercise_client('10.0.1.4', '8080', 'pi', 'darius', websession)
asyncio.get_event_loop().run_until_complete(main())
|
import base64
import json
import os
from datetime import datetime
import babel
import numpy as np
import pandas as pd
import pytz
from dateutil.relativedelta import relativedelta
from odoo import api, fields, models, tools, _
from odoo.exceptions import ValidationError, UserError
from odoo.tools import config
from statsmodels.tsa.ar_model import AutoReg
from statsmodels.tsa.arima_model import ARIMA
from statsmodels.tsa.arima_model import ARMA
class KsSalesForecast(models.Model):
_name = 'ks.sales.forecast'
_inherit = ['mail.thread', 'mail.activity.mixin']
_rec_name = 'ks_name'
_order = 'id desc'
_description = 'This model is to predict sales on the bases of historical data with trending and seasonal factor'
ks_name = fields.Char('Name', tracking=True, default=lambda self: _('New'), readonly=True)
ks_is_file = fields.Boolean(default=False, tracking=True)
ks_file_type = fields.Selection([('csv', 'CSV'), ('xlx', 'Excel')], string=_('File Type'), default='csv',
tracking=True)
ks_import_file = fields.Binary(string=_('File'), tracking=True)
ks_file_name = fields.Char(string=_('File Name'), tracking=True)
ks_is_method_change = fields.Boolean(default=False, tracking=True)
ks_forecast_method = fields.Selection([
('ar', 'Autoregression (AR)'),
('ma', 'Moving Average (MA)'),
('arma', 'Autoregressive Moving Average (ARMA)'),
('arima', 'Autoregressive Integrated Moving Average (ARIMA)'),
], tracking=True)
ks_default_forecast_method = fields.Char(compute='ks_default_forecast_method_value', store=True)
ks_model = fields.Many2one('ir.model', 'Model', default=lambda self: self.env.ref("sale.model_sale_order"),
readonly=True, invisible=True, tracking=True)
ks_start_date = fields.Datetime(string=_('Start Date'), required=True, tracking=True)
ks_end_date = fields.Datetime(string=_('End Date'), required=True, tracking=True)
ks_forecast_base = fields.Selection([('all', 'All Products'), ('product', 'Specific Products')],
string=_('Forecast Base'), default='product', tracking=True)
ks_product_ids = fields.Many2many('product.product', invisible=True, tracking=True)
ks_p = fields.Integer(string=_('P Coefficient (Auto Regressive)'))
ks_d = fields.Integer(string=_('D Coefficient (Integrated)'))
ks_q = fields.Integer(string=_('Q Coefficient (Moving Average)'))
ks_forecast_unit = fields.Integer(string=_('Forecast Unit'), tracking=True, required=True, default=1)
ks_forecast_period = fields.Selection([('day', 'Day'), ('month', 'Month'), ('year', 'Year')],
string=_('Forecast Period'), default='month', tracking=True, required=True)
ks_is_predicted = fields.Boolean()
ks_chart_data = fields.Text(default=0)
ks_graph_view = fields.Integer(default=1)
@api.depends('ks_forecast_unit')
@api.onchange('ks_forecast_unit')
def ks_forecast_unit_method(self):
if self.ks_forecast_unit < 1:
raise ValidationError(_('Please Enter a positive non-zero number.'))
@api.model
def create(self, values):
if 'ks_name' not in values or values['ks_name'] == _('New'):
values['ks_name'] = self.env['ir.sequence'].next_by_code('ks.sales.forecast') or _('New')
if not values.get('ks_is_method_change'):
values.update(
{'ks_default_forecast_method': self.env['ir.config_parameter'].sudo().get_param('ks_forecast_method')})
elif values.get('ks_forecast_method'):
values.update({'ks_default_forecast_method': values.get('ks_forecast_method')})
return super(KsSalesForecast, self).create(values)
def write(self, values):
if values.get('ks_forecast_method'):
values.update({'ks_default_forecast_method': values.get('ks_forecast_method')})
return super(KsSalesForecast, self).write(values)
@api.onchange('ks_start_date', 'ks_end_date')
def ks_onchange_dates(self):
if self.ks_start_date and self.ks_end_date:
if not self.ks_start_date < self.ks_end_date:
raise ValidationError('Start Date should be less then End Date')
@api.onchange('ks_forecast_method', 'ks_is_method_change')
def ks_default_forecast_method_value(self):
for rec in self:
if not rec.ks_is_method_change:
rec.ks_default_forecast_method = self.env['ir.config_parameter'].sudo().get_param('ks_forecast_method')
elif rec.ks_forecast_method:
rec.ks_default_forecast_method = rec.ks_forecast_method
def ks_predict_sales(self):
vals = []
if self.ks_is_file:
temp_path = os.path.join(config.get('data_dir'), "temp")
if not os.path.exists(temp_path):
os.makedirs(temp_path)
file_name = self.ks_file_name
file_path = temp_path + '/' + file_name
temp_file = open(file_path, 'wb')
temp_file.write(base64.b64decode(self.ks_import_file))
temp_file.close()
previous_data = pd.read_csv(temp_file.name, index_col=['Date', 'Sales'])
product_groups = previous_data.groupby(previous_data.Product).groups
products = product_groups.keys()
for product in products:
sales_list = []
product_id = self.env['product.product'].search([('name', '=', product)], limit=1)
file_datas = product_groups[product].values
for file_data in file_datas:
sales_list.append(float(file_data[1]))
sale_data = {
'ks_forecast_id': self.id,
'ks_date': datetime.strptime(file_data[0], tools.DEFAULT_SERVER_DATE_FORMAT),
'ks_value': float(file_data[1]),
'ks_product_id': product_id.id
}
vals.append(sale_data)
sales_data = pd.read_csv(temp_file.name, index_col='Date', usecols=['Sales', 'Date'])
forecast_method = self.env['ir.config_parameter'].sudo().get_param('ks_forecast_method')
if self.ks_is_method_change:
forecast_method = self.ks_forecast_method
data_frame = pd.DataFrame(sales_list)
if forecast_method:
forecast_method_name = 'ks_%s_method' % forecast_method
if hasattr(self, forecast_method_name):
method = getattr(self, forecast_method_name)
results = method(product_groups[product])
# print(results)
for value, month in zip(results, results.index):
ks_date = datetime.strftime(month, tools.DEFAULT_SERVER_DATE_FORMAT)
forecast_data = {
'ks_forecast_id': self.id,
'ks_date': datetime.strptime(ks_date, tools.DEFAULT_SERVER_DATE_FORMAT),
'ks_value': value,
'ks_product_id': product_id.id
}
vals.append(forecast_data)
self.env['ks.sales.forecast.result'].create(vals)
# print(sales_data)
else:
end_date = self.ks_end_date
query = """
select
date_trunc(%(unit)s, so.date_order) as date,
sum(sol.price_subtotal),
sol.product_id
from sale_order_line as sol
inner join sale_order as so
on sol.order_id = so.id
where
date_order >= %(start_date)s and date_order <= %(end_date)s and sol.product_id in %(product_condition)s
group by date, sol.product_id
order by date
"""
product_condition = tuple(self.env['product.product'].search([]).ids)
if self.ks_forecast_base == 'product':
product_condition = tuple(self.ks_product_ids.ids)
if self.ks_forecast_period == 'month':
if end_date.day > 15:
end_date = end_date + relativedelta(day=31)
else:
end_date = end_date + relativedelta(day=1)
self.env.cr.execute(query, {
'unit': self.ks_forecast_period,
'start_date': self.ks_start_date,
'end_date': end_date,
'product_condition': product_condition
})
result = self.env.cr.fetchall()
# print(result)
if len(result) == 0:
raise UserError(_("Sales data is not available for these products"))
data_dict = {}
for data in result:
keys = data_dict.keys()
sale_data = {
'ks_forecast_id': self.id,
'ks_date': data[0],
'ks_value': float(data[1]),
'ks_product_id': data[2]
}
vals.append(sale_data)
if data[2] in keys:
data_dict[data[2]]['date'].append(data[0])
data_dict[data[2]]['sales'].append(data[1])
data_dict[data[2]]['forecast_sales'].append(0.0)
else:
data_dict[data[2]] = {'date': [], 'sales': [], 'forecast_sales': []}
data_dict[data[2]]['date'].append(data[0])
data_dict[data[2]]['sales'].append(data[1])
data_dict[data[2]]['forecast_sales'].append(0.0)
product_keys = data_dict.keys()
for product in product_keys:
product_id = self.env['product.product'].browse(product)
product_sales_data = data_dict[product]
sales_list = product_sales_data.get('sales')
forecast_method = self.env['ir.config_parameter'].sudo().get_param('ks_forecast_method')
if self.ks_is_method_change:
forecast_method = self.ks_forecast_method
data_frame = np.array(sales_list)
if forecast_method and len(data_frame) > 8:
results = 0
try:
forecast_method_name = 'ks_%s_method' % forecast_method
if hasattr(self, forecast_method_name):
p = self.ks_p
q = self.ks_q
d = self.ks_d
method = getattr(self, forecast_method_name)
results = method(data_frame, p, q, d)
except Exception as e:
return self.env['ks.message.wizard'].ks_pop_up_message(names='Error', message=e)
for (i, value) in zip(range(0, len(results)), results):
i = i + 1
if self.ks_forecast_period == 'day':
ks_date = end_date + relativedelta(days=i)
elif self.ks_forecast_period == 'month':
ks_date = end_date + relativedelta(months=i)
else:
ks_date = end_date + relativedelta(years=i)
forecast_data = {
'ks_forecast_id': self.id,
'ks_date': ks_date,
'ks_value': value,
'ks_product_id': product_id.id
}
data_dict[product_id.id]['date'].append(ks_date)
data_dict[product_id.id]['sales'].append(0.0)
data_dict[product_id.id]['forecast_sales'].append(value)
vals.append(forecast_data)
elif not len(data_frame) > 8:
raise UserError(
_('You do not have sufficient data for "%s" product. We need minimum 9 "%ss" data') % (
product_id.name, self.ks_forecast_period))
else:
raise UserError(_('Please select a forecast method'))
keys = data_dict.keys()
final_dict = {}
dict_data = {}
if keys:
dates = []
for product in keys:
dates.extend(data_dict[product]['date'])
dates = list(set(dates))
dates.sort()
labels = [self.format_label(values) for values in dates]
final_dict.update({
'labels': labels,
'datasets': []
})
product_keys = data_dict.keys()
for product in product_keys:
dict_data[product] = {
'sales': {},
'forecast_sales': {},
}
for final_date in dates:
if final_date in data_dict[product]['date']:
data_index = data_dict[product]['date'].index(final_date)
dict_data[product]['sales'][final_date] = data_dict[product]['sales'][data_index]
dict_data[product]['forecast_sales'][final_date] = data_dict[product]['forecast_sales'][
data_index]
else:
dict_data[product]['sales'][final_date] = 0.0
dict_data[product]['forecast_sales'][final_date] = 0.0
if dict_data:
product_keys = data_dict.keys()
for product in product_keys:
product_id = self.env['product.product'].browse(product)
product_name = product_id.code + ' ' + product_id.name if product_id.code else product_id.name
final_dict['datasets'] = final_dict['datasets'] + [{
'data': list(dict_data[product]['sales'].values()),
'label': product_name + '/Previous',
}, {
'data': list(dict_data[product]['forecast_sales'].values()),
'label': product_name + '/Forecast'
}]
self.ks_chart_data = json.dumps(final_dict)
forecast_result = self.env['ks.sales.forecast.result']
forecast_records = forecast_result.search([('ks_forecast_id', '=', self.id)])
if forecast_records.ids:
for forecast_record in forecast_records:
forecast_record.unlink()
forecast_result.create(vals)
else:
forecast_result.create(vals)
self.ks_is_predicted = True
@api.model
def format_label(self, value, ftype='datetime', display_format='MMMM yyyy'):
if self.ks_forecast_period == 'day':
display_format = 'dd MMMM yyyy'
elif self.ks_forecast_period == 'year':
display_format = 'yyyy'
tz_convert = self._context.get('tz')
locale = self._context.get('lang') or 'en_US'
tzinfo = None
if ftype == 'datetime':
if tz_convert:
value = pytz.timezone(self._context['tz']).localize(value)
tzinfo = value.tzinfo
return babel.dates.format_datetime(value, format=display_format, tzinfo=tzinfo, locale=locale)
else:
if tz_convert:
value = pytz.timezone(self._context['tz']).localize(value)
tzinfo = value.tzinfo
return babel.dates.format_date(value, format=display_format, locale=locale)
def ks_ar_method(self, data_frame, p=False, d=False, q=False):
ks_ar_model = AutoReg(data_frame, lags=1)
ks_fit_model = ks_ar_model.fit()
forecast_period = self.ks_forecast_unit - 1
forecast_value = ks_fit_model.predict(len(data_frame), len(data_frame) + forecast_period)
return forecast_value
def ks_ma_method(self, data_frame, p=False, d=False, q=False):
ks_ma_model = ARMA(data_frame, order=(0, q))
ks_fit_model = ks_ma_model.fit(disp=False)
forecast_period = self.ks_forecast_unit - 1
forecast_value = ks_fit_model.predict(len(data_frame), len(data_frame) + forecast_period)
return forecast_value
def ks_arma_method(self, data_frame, p=False, d=False, q=False):
ks_arma_model = ARMA(data_frame, order=(p, q))
ks_fit_model = ks_arma_model.fit(disp=False)
forecast_period = self.ks_forecast_unit - 1
forecast_value = ks_fit_model.predict(len(data_frame), len(data_frame) + forecast_period)
return forecast_value
def ks_arima_method(self, data_frame, p=False, d=False, q=False):
ks_arima_model = ARIMA(data_frame, order=(p, d, q))
ks_fit_model = ks_arima_model.fit(disp=False)
forecast_period = self.ks_forecast_unit - 1
forecast_value = ks_fit_model.predict(len(data_frame), len(data_frame) + forecast_period)
return forecast_value
def ks_auto_arima_method(self, data_frame, p=False, d=False, q=False):
ks_arima_model = ARMA(data_frame, order=(2, 1))
ks_fit_model = ks_arima_model.fit(disp=False)
forecast_period = self.ks_forecast_unit - 1
forecast_value = ks_fit_model.predict(len(data_frame), len(data_frame) + forecast_period)
return forecast_value
|
import boto3
import hashlib
import moto
import os.path
import qarnot
from pathlib import Path
from qarnot.bucket import Bucket
from unittest import TestCase
from unittest.mock import patch, Mock
def mock_connection_base(mock_s3buckets=None):
mock_connection = Mock({'other.side_effect': KeyError})
mock_connection.s3client = Mock()
mock_connection.s3resource = Mock()
mock_connection.s3resource.Bucket.return_value = mock_s3buckets
mock_connection._sanitize_bucket_paths = True
mock_connection._show_bucket_warnings = True
return mock_connection
class TestBucketPublicMethods(TestCase):
def test_init_bucket(self):
mock_connection = mock_connection_base()
bucket = Bucket(mock_connection, "name", True)
mock_connection.s3client.create_bucket.assert_called_once()
@patch("qarnot.bucket.Bucket.add_file")
def test_bucket_add_string(self, add_file):
bucket = qarnot.bucket.Bucket(mock_connection_base(), "bucket_name", False)
string_to_send = "Test string to be send"
remote_path = "path/to/go"
bucket.add_string(string_to_send, remote_path)
add_file.assert_called_once()
args = add_file.call_args[0]
assert args[0].read() == string_to_send.encode('utf-8')
assert args[1] == remote_path
# ================================== Utils functions ==================================
def write_in(path, text):
os.makedirs(path.parent, exist_ok=True)
with open(path, 'w+') as the_file:
the_file.write(text)
def compute_etag(path):
hash_md5 = hashlib.md5()
with open(path, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return '%s' % hash_md5.hexdigest()
# Returns a set containing the couple (filename, etag) for every file in a given folder
def list_local_files(path):
set_of_filenames = set()
for root, _, files in os.walk(path):
for file_name in files:
relative_file_path = Path(os.path.join(root, file_name)).relative_to(path).as_posix()
set_of_filenames.add((relative_file_path, compute_etag(os.path.join(root, file_name))))
return set_of_filenames
# This is just to keep track of how many times the copy_file method was called
class BucketWithCopyCounter(Bucket):
def __init__(self, connection, name, create=True):
super().__init__(connection, name, create)
self._nbr_of_copies = 0
def copy_file(self, source, dest):
self._nbr_of_copies += 1
super().copy_file(source, dest)
# ================================== Tests using Moto ==================================
class TestBucketPublicMethodsMoto:
@moto.mock_s3
def test_sync_files_avoid_unnecessary_copies(self, tmp_path):
# cf QNET-5274
bucket_name = "dolly"
# Mock S3 client and resource
q_conn = mock_connection_base()
q_conn.s3client = boto3.client("s3")
q_conn.s3resource = boto3.resource('s3')
# Add 2 identical files in the bucket by our own way
bucket = BucketWithCopyCounter(q_conn, bucket_name, True)
bucket.add_string("Tu ne copieras point sur ton voisin", "remote1")
bucket.add_string("Tu ne copieras point sur ton voisin", "remote2")
# Write some files with identical content in a temporary folder
write_in(tmp_path / "local1", 'Tu ne copieras point sur ton voisin')
write_in(tmp_path / "local2", 'Tu ne copieras point sur ton voisin')
# Synchronize the content of this folder with the bucket
bucket.sync_directory(tmp_path.as_posix())
# Check that it's indeed synchronized
local_files = list_local_files(tmp_path)
bucket_files = set()
for file in bucket.list_files():
bucket_files.add((file.key, file.e_tag.strip('"')))
assert local_files == bucket_files, "Bucket and local folder have different content whereas they should be " \
"identical "
# Check that there were no unnecessary copies performed
assert bucket._nbr_of_copies == 2, "The copy method should have been called only twice\
({} calls here)".format(bucket._nbr_of_copies)
|
import operator
ttL = [ ('토마스', 5), ('헨리', 8), ('에드워드', 9), ('토마스', 12), ('에드워드',1)]
tD = {}
tL = []
tR, cR = 1,1
for tmpTup in ttL:
tName = tmpTup[0]
tWeight = tmpTup[1]
if tName in tD:
tD[tName] += tWeight
else:
tD[tName] = tWeight
print(list(tD.items()))
tL = sorted(tD.items(), key=operator.itemgetter(1), reverse=True) # sorted 함수에서 key=operator.itemgetter(1) 해주면 1번째 놈을 기준으로 sort하겠다는 뜻
rank = 1
print(tL)
print("-----------------")
print("기차","\t","총수송량","\t","순위")
print("-----------------")
for train in tL:
print(train[0],"\t",train[1],"\t",rank)
rank += 1 |
from rest_framework.routers import DefaultRouter
from . import views
router = DefaultRouter()
router.register(r'tags', views.SkillTagsViewSet, basename='tags')
router.register(r'skills', views.SkillViewSet, basename='skills')
router.register(r'projects', views.ProjectViewSet, basename='projects')
router.register(r'workplaces', views.WorkViewSet, basename='workplaces')
urlpatterns = router.urls
|
"""
Inorder Traversal of a tree
"""
class Node:
def __init__(self, val):
self.data = val
self.left = None
self.right = None
def printInorder(root):
if root:
printInorder(root.left)
print(root.data, end = " ")
printInorder(root.right)
root = Node(4)
root.left = Node(5)
root.right = Node(6)
root.left.left = Node(8)
root.right.left = Node(2)
root.right.left.left = Node(1)
root.right.left.right = Node(12)
root.right.right = Node(7)
printInorder(root) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
def search_a_2d_matrix_ii(matrix, target):
"""
给定一个m*n的矩阵,请在该矩阵中找到一个目标数值,请注意,该矩阵有如下特性:
1.每行中的整数从左到右是依次增大的
2.每列中的整数从上到下是依次增大的
:param matrix: List[List[int]]
:param target: int
:return: bool
"""
if not matrix:
return False
row = len(matrix)
col = len(matrix[0])
i, j = row - 1, 0
while i >= 0 and j <= col - 1:
if matrix[i][j] < target:
j += 1
elif matrix[i][j] > target:
i -= 1
else:
return True
return False
# def binary_search(nums, target):
# start, end = 0, len(nums)-1
# while start + 1 < end:
# mid = int((start + end) / 2)
# if nums[mid] < target:
# start = mid
# elif nums[mid] > target:
# end = mid
# else:
# return True
# if nums[start] == target:
# return True
# if nums[end] == target:
# return True
# return False
#
# for nums in matrix:
# if binary_search(nums, target):
# return True
# return False
if __name__ == "__main__":
nums = [
[1, 4, 7, 11, 15],
[2, 5, 8, 12, 19],
[3, 6, 9, 16, 22],
[10, 13, 14, 17, 24],
[18, 21, 23, 26, 30]
]
target = 20
print(search_a_2d_matrix_ii(nums, target)) |
from distutils.core import setup
from distutils.extension import Extension
import sys
import numpy
#Determine whether to use Cython
if '--cythonize' in sys.argv:
cythonize_switch = True
del sys.argv[sys.argv.index('--cythonize')]
else:
cythonize_switch = False
#Find all includes
numpy_include = numpy.get_include()
#Set up the ext_modules for Cython or not, depending
if cythonize_switch:
from Cython.Distutils import build_ext
from Cython.Build import cythonize
ext_modules = cythonize([Extension("parallel.parallel", ["parallel/parallel.pyx"],include_dirs = [numpy_include],
extra_compile_args=['-fopenmp'], extra_link_args=['-fopenmp'])])
else:
ext_modules = [Extension("parallel.parallel", ["parallel/parallel.c"],include_dirs = [numpy_include],
extra_compile_args=['-fopenmp'], extra_link_args=['-fopenmp'])]
#Create a dictionary of arguments for setup
setup_args = {'name':'parallel-test',
'version':'0.1.0',
'author':'Jason Rudy',
'author_email':'jcrudy@gmail.com',
'packages':['parallel',],
'license':'LICENSE.txt',
'description':'Let\'s try some parallel programming in Cython',
'long_description':open('README.md','r').read(),
'py_modules' : [],
'ext_modules' : ext_modules,
'classifiers' : ['Development Status :: 3 - Alpha'],
'requires':[]}
#Add the build_ext command only if cythonizing
if cythonize_switch:
setup_args['cmdclass'] = {'build_ext': build_ext}
#Finally
setup(**setup_args)
|
'''
Convert image to grayscale
'''
import tensorflow as tf
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
filename = "../lesson3/MarshOrchid.jpg"
raw_image_data = mpimg.imread(filename)
image = tf.placeholder(tf.int32, [None, None, 3])
# Reduce axis 2 by mean (= color)
# i.e. image = [[[r,g,b], ...]]
# out = [[[ grayvalue ], ... ]] where grayvalue = mean(r, g, b)
out = tf.reduce_mean(image, 2, keep_dims=True)
# Associate r,g,b to the same mean value = concat mean on axis 2.
# out = [[[ grayvalu, grayvalue, grayvalue], ...]]
out = tf.concat([out, out, out], 2)
out = tf.cast(out, tf.uint8)
with tf.Session() as session:
result = session.run(out, feed_dict={image: raw_image_data})
print(result.shape)
plt.imshow(result)
plt.show()
|
from typing import Generic, TypeVar, Callable
T = TypeVar("T")
class JSONStorageItem(Generic[T]):
def __init__(self, _key: str, _default: Callable[[], T], /): ...
def get(self) -> T: ...
def set(self, value: T) -> None: ...
def invalidate_cache(self) -> None: ...
|
import torch
import numpy as np
class MyQuantize(torch.autograd.Function):
@staticmethod
def forward(ctx, inputs,args):
ctx.args = args
x_lim_abs = args.enc_value_limit
x_lim_range = 2.0 * x_lim_abs
x_input_norm = torch.clamp(inputs, -x_lim_abs, x_lim_abs)
if args.quantize == 2:
outputs_int = torch.sign(x_input_norm)
else:
outputs_int = torch.round((x_input_norm +x_lim_abs) * ((args.quantize - 1.0)/x_lim_range)) * x_lim_range/(args.quantize - 1.0) - x_lim_abs
return outputs_int
@staticmethod
def backward(ctx, grad_output):
# STE implementations
if ctx.args.enc_clipping in ['inputs', 'both']:
input, = ctx.saved_tensors
grad_output[input>1.0]=0
grad_output[input<-1.0]=0
if ctx.args.enc_clipping in ['gradient', 'both']:
grad_output = torch.clamp(grad_output, -ctx.args.enc_grad_limit, ctx.args.enc_grad_limit)
#print torch.min(grad_output), torch.max(grad_output)
grad_input = grad_output.clone()
return grad_input, None
def quantizer(imgs, args):
myquantize = MyQuantize.apply
encoded = myquantize(imgs, args)
encoded_imgs = encoded
return encoded_imgs
def add_qr(imgs):
# add some things
# add item for up left.
new_imgs = imgs
new_imgs[:, :, :7, :7] = -1.0
new_imgs[:, :, 1, 1:6] = +1.0
new_imgs[:, :, 5, 1:6] = +1.0
new_imgs[:, :, 1:6, 1] = +1.0
new_imgs[:, :, 1:6, 5] = +1.0
new_imgs[:, :, 7, :8] = +1.0
new_imgs[:, :, :8, 7] = +1.0
# add item for left down.
new_imgs[:, :, -7:, :7] = -1.0
new_imgs[:, :, -2, 1:6] = +1.0
new_imgs[:, :, -6, 1:6] = +1.0
new_imgs[:, :, -6:-2, 1] = +1.0
new_imgs[:, :, -6:-2, 5] = +1.0
new_imgs[:, :, -8, :8] = +1.0
new_imgs[:, :, -8:, 7] = +1.0
# add item for right up
new_imgs[:, :, :7, -7:] = -1.0
new_imgs[:, :, 1, -6:-2] = +1.0
new_imgs[:, :, 5, -6:-2] = +1.0
new_imgs[:, :, 1:6, -2] = +1.0
new_imgs[:, :, 1:6, -6] = +1.0
new_imgs[:, :, 7, -7:] = +1.0
new_imgs[:, :, :8, -8] = +1.0
return new_imgs
def bsc(imgs, bsc_p, device):
fwd_noise = torch.from_numpy(np.random.choice([-1.0, 1.0], imgs.shape,
p=[bsc_p, 1 - bsc_p])).type(torch.FloatTensor).to(device)
imgs = imgs*fwd_noise
return imgs
|
from django.contrib import messages
from django.contrib.auth import get_user_model
from django.contrib.auth.decorators import login_required
from django.db.models import Q
from django.shortcuts import render, redirect, get_object_or_404
from feed.models import Post
from .forms import UserRegisterForm, ProfileUpdateForm
from .models import Profile, Relationship
# Create your views here.
User = get_user_model()
def register(request):
if request.method == 'POST':
form = UserRegisterForm(request.POST)
if form.is_valid():
form.save()
messages.success(request, 'Registration successful Please Login')
return redirect('home')
else:
form = UserRegisterForm()
return render(request, 'account/register.html', {'form': form})
@login_required(login_url='/account/login/')
def my_profile(request):
profile = Profile.objects.select_related("user").prefetch_related(
'friends').get(user=request.user)
user_post = Post.objects.select_related('user_name').filter(
user_name=request.user).order_by('-date_posted')
context = {'profile': profile, 'user_post': user_post}
return render(request, 'account/my_profile.html', context)
@login_required(login_url='/account/login/')
def update_profile(request):
if request.method == 'POST':
form = ProfileUpdateForm(request.POST, request.FILES,
instance=request.user.profile)
if form.is_valid():
form.save()
return redirect('my_profile')
else:
form = ProfileUpdateForm(instance=request.user.profile)
context = {
'form': form
}
return render(request, 'account/edit_profile.html', context)
@login_required(login_url='/account/login/')
def friend_list(request):
friends = request.user.profile.friends.select_related('profile')
context = {
'friends': friends
}
return render(request, "account/friend_list.html", context)
@login_required(login_url='/account/login/')
def invites_received(request):
profile = Profile.objects.select_related("user").get(user=request.user)
qs = Relationship.objects.filter(receiver=profile,
status='send').select_related(
'sender__user', 'receiver__user')
results = list(map(lambda x: x.sender, qs))
is_empty = False
if len(results) == 0:
is_empty = True
context = {
'qs': results,
'is_empty': is_empty,
}
return render(request, 'account/my_invites.html', context)
@login_required(login_url='/account/login/')
def accept_invitation(request):
if request.method == "POST":
pk = request.POST.get('profile_pk')
sender = Profile.objects.get(pk=pk)
receiver = Profile.objects.get(user=request.user)
rel = get_object_or_404(Relationship, sender=sender, receiver=receiver)
if rel.status == 'send':
rel.status = "accepted"
rel.save()
return redirect('my_invites_view')
@login_required(login_url='/account/login/')
def reject_invitation(request):
if request.method == "POST":
pk = request.POST.get('profile_pk')
sender = Profile.objects.get(pk=pk)
receiver = Profile.objects.get(user=request.user)
rel = get_object_or_404(Relationship, sender=sender, receiver=receiver)
rel.delete()
return redirect('my_invites_view')
@login_required(login_url='/account/login/')
def profile_list(request):
# import pdb;
# pdb.set_trace()
all_profile = Profile.objects.select_related('user').prefetch_related(
'friends').exclude(user=request.user)
profile = Profile.objects.select_related("user").get(user=request.user)
all_relations = Relationship.objects.filter(
Q(sender=profile) | Q(receiver=profile)).select_related(
'sender__user', 'receiver__user')
receivers = []
senders = []
for item in all_relations:
senders.append(item.sender.user)
receivers.append(item.receiver.user)
is_empty = False
if not all_profile:
is_empty = False
context = {
'all_profile': all_profile,
'rel_receiver': receivers,
'rel_sender': senders,
'is_empty': is_empty
}
return render(request, 'account/profile_list.html', context)
@login_required(login_url='/account/login/')
def add_friend(request):
if request.method == "POST":
pk = request.POST.get('profile_pk')
user = request.user
# sender = Profile.objects.get(user=user)
receiver = Profile.objects.get(pk=pk)
rel = Relationship.objects.create(sender=user.profile,
receiver=receiver, status='send')
return redirect(request.META.get('HTTP_REFERER'))
return redirect('my_profile')
@login_required(login_url='/account/login/')
def remove_friend(request):
if request.method == "POST":
pk = request.POST.get('profile_pk')
user = request.user
# sender = Profile.objects.get(user=user)
receiver = Profile.objects.get(pk=pk)
rel = Relationship.objects.get(
(Q(sender=user.profile) & Q(receiver=receiver)) | (
Q(sender=receiver) & Q(receiver=user.profile)))
rel.delete()
return redirect(request.META.get('HTTP_REFERER'))
return redirect('my_profile')
@login_required(login_url='/account/login/')
def search_users(request):
query = request.GET.get('q')
object_list = Profile.objects.select_related('user').prefetch_related(
'friends').exclude(user=request.user).filter(
user__username__icontains=query)
profile = Profile.objects.select_related("user").get(user=request.user)
all_relations = Relationship.objects.filter(
Q(sender=profile) | Q(receiver=profile)).select_related(
'sender__user', 'receiver__user')
receivers = []
senders = []
for item in all_relations:
senders.append(item.sender.user)
receivers.append(item.receiver.user)
is_empty = False
if not object_list:
is_empty = False
context = {
'all_profile': object_list,
'rel_receiver': receivers,
'rel_sender': senders,
'is_empty': is_empty
}
# context = {
# 'users': object_list
# }
return render(request, "account/search_users.html", context)
@login_required(login_url='/account/login/')
def user_profile(request):
if request.method == "POST":
pk = request.POST.get('profile_pk')
print(pk)
profile = Profile.objects.select_related("user").prefetch_related(
'friends').get(pk=pk)
user_post = Post.objects.select_related('user_name').filter(
user_name=profile.user).order_by('-date_posted')
context = {'profile': profile, 'user_post': user_post}
return render(request, 'account/user_profile.html', context)
return redirect(request.META.get('HTTP_REFERER'))
|
print('{} DESAFIO 9 {}'.format('='*10, '='*10))
n = int(input('Digite um número: '))
print('\n\033[31mTABUADA DO {}\033[m'.format(n))
print('{} X {:2} = {:2}'.format(n, 0, n * 0))
print('{} X {:2} = {:2}'.format(n, 1, n * 1))
print('{} X {:2} = {:2}'.format(n, 2, n * 2))
print('{} X {:2} = {:2}'.format(n, 3, n * 3))
print('{} X {:2} = {:2}'.format(n, 4, n * 4))
print('{} X {:2} = {:2}'.format(n, 5, n * 5))
print('{} X {:2} = {:2}'.format(n, 6, n * 6))
print('{} X {:2} = {:2}'.format(n, 7, n * 7))
print('{} X {:2} = {:2}'.format(n, 8, n * 8))
print('{} X {:2} = {:2}'.format(n, 9, n * 9))
print('{} X {:2} = {:2}'.format(n, 10, n * 10))
|
__author__ = 'mike'
import numpy as np
import matplotlib.pyplot as plt
def scatter_xy(ax, m_obj, x_dtype, y_dtype, x_component, y_component, **plt_opt):
pass
def generate_plots(n=3, xsize=5., ysize=5., tight_layout=False):
"""
Generates a number of subplots that are organized i a way to fit on a landscape plot.
Parameter
---------
n: int
number of plots to be generated
xsize: float
size along x for each plot
ysize: float
size along y for each plot
tight_layout: bool
using tight_layout (True) or not (False)
Returns
-------
fig matplotlib figure instance
"""
a = np.floor(n ** 0.5).astype(int)
b = np.ceil(1. * n / a).astype(int)
# print "a\t=\t%d\nb\t=\t%d\na*b\t=\t%d\nn\t=\t%d" % (a,b,a*b,n)
fig = plt.figure(figsize=(xsize * b, ysize * a))
for i in range(1, n + 1):
ax = fig.add_subplot(a, b, i)
fig.set_tight_layout(tight_layout)
return fig
def get_subplot(fig, i):
"""
Returns the axis object i from the figure that has been created with generate_plots
:param fig:
:param i:
:return:
"""
n = len(fig.axes)
a = np.floor(n ** 0.5).astype(int)
b = np.ceil(1. * n / a).astype(int)
ax = fig.add_subplot(a, b, i + 1)
return ax
def get_min_max_all_ax(fig):
"""
gets the minimim and maximum for xlim and of ylim
"""
xout = []
yout = []
#get xlimits
for ax in fig.axes:
xout.append(ax.get_xlim())
yout.append(ax.get_ylim())
xout = np.array(xout)
yout = np.array(yout)
return [np.min(xout), np.max(xout)], [np.min(yout), np.max(yout)]
def set_lim_all_ax(fig, xlim=None, ylim=None):
"""
Sets the ylimits for all plots of the specified figure
Parameters
----------
"""
for ax in fig.axes:
if xlim:
ax.set_xlim(xlim)
if ylim:
ax.set_ylim(ylim)
def create_heat_color_map(value_list, reverse=False):
"""
takes a list of values and creates a list of colors from blue to red (or reversed if reverse = True)
:param value_list:
:param reverse:
:return:
"""
r = np.linspace(0, 255, len(value_list)).astype('int')
r = [hex(i)[2:-1].zfill(2) for i in r]
# r = [i.encode('hex') for i in r]
b = r[::-1]
out = ['#%2s' % r[i] + '00' + '%2s' % b[i] for i in range(len(value_list))]
if reverse:
out = out[::-1]
return out
|
from lib.message.base_message import base_message
from lib.util.error import DiscordError
from lib.util.parameter import parameter
class list_tags(base_message):
length = {
'min': 0,
'max': 0
}
def run(self, client):
self.assert_length()
p = parameter.getInstance()
cfg = p.__config__
if not self.message.server.id in cfg.sections():
cfg.add_section(self.message.server.id)
tags = []
for section in cfg.sections():
if section.endswith(':' + self.message.server.id):
tags.append(section.split(':')[0])
if len(tags) == 0:
raise DiscordError('No servers configured!')
yield from client.send_message(self.message.channel, 'Currently the following tags are configured:' + self.format_list(tags))
def format_list(self,parts):
return '\n\u2022 ' + ('\n\u2022 ').join(parts) |
class Workset(WorksetPreview,IDisposable):
""" Represents a workset in the document. """
@staticmethod
def Create(document,name):
"""
Create(document: Document,name: str) -> Workset
Creates a new workset.
document: The document in which the new instance is created.
name: The workset name.
Returns: Returns the newly created workset.
"""
pass
def Dispose(self):
""" Dispose(self: WorksetPreview,A_0: bool) """
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: WorksetPreview,disposing: bool) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
IsEditable=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Whether the workset is editable.
Get: IsEditable(self: Workset) -> bool
"""
IsOpen=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Whether the workset is open (rather than closed).
Get: IsOpen(self: Workset) -> bool
"""
IsVisibleByDefault=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Whether the workset is visible by default.
Get: IsVisibleByDefault(self: Workset) -> bool
"""
Kind=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Kind of the workset.
Get: Kind(self: Workset) -> WorksetKind
"""
|
from django.shortcuts import render
from .models import SiteLoc, User
from math import ceil
from django.core.mail import send_mail
# Create your views here.
from django.http import HttpResponse
def index(request):
# products = Site.objects.all()
# print(products)
# n = len(products)
# nSlides = n//4 + ceil((n/4)-(n//4))
allPlaces = []
catplaces = SiteLoc.objects.values('country', 'id')
cats = {item['country'] for item in catplaces}
for cat in cats:
prod = SiteLoc.objects.filter(country=cat)
n = len(prod)
nSlides = n // 4 + ceil((n / 4) - (n // 4))
allPlaces.append([prod, range(1, nSlides), nSlides])
# params = {'no_of_slides':nSlides, 'range': range(1,nSlides),'product': products}
# allProds = [[products, range(1, nSlides), nSlides],
# [products, range(1, nSlides), nSlides]]
params = {'allPlaces':allPlaces}
return render(request, 'blog/index.html', params)
def about(request):
return render(request, 'blog/about.html')
def home(request):
return render(request, 'blog/home.html')
def contact(request):
return render(request, 'blog/contact.html')
def siteView(request, myid):
# Fetch the product using the id
site = SiteLoc.objects.filter(id=myid)
return render(request, 'blog/siteView.html', {'site':site[0]})
def addContact(request):
if request.method == "POST":
name = request.POST.get('name')
email = request.POST.get('email')
# print(name)
# print(email)
send_mail('Welcome to JourneyWithDeva',
'Thanks for being a part of this exciting journey. We will notify you with recent updates.',
'akshayraikar94@gmail.com',
[email],
fail_silently=False)
usr = User(user_name=name, user_email=email)
usr.save()
return render(request, 'blog/about.html')
|
import dna
import unittest
class Test(unittest.TestCase):
def test_dna(self):
d = 'AGCTTTTCATTCTGACTGCAACGGGCAATATGTCTCTGTGTGGATTAAAAAAAGAGTGTCTGATAGCAGC'
actual = dna.count_nucleotides(d)
self.assertEqual([20, 12, 17, 21], actual)
if __name__ == "__main__":
unittest.main()
|
from util import *
from copy import deepcopy
class Instance:
def __init__(self, coord, datum):
self.coord = coord
self.datum = datum
self.unknown = False
def Coord(self):
return [self.coord.x, self.coord.y, self.klass()]
def klass(self):
return self.datum[-1]
class InstanceCollection:
def __init__(self, data_collection):
self.instances = []
self.max_x = 0
self.max_y = 0
self.datums = data_collection.datums
east, west = data_collection.exhaustive_find_poles()
#east, west = data_collection.find_poles()
d = distance(east, west)
#for i in range(30):
# n_east, n_west = data_collection.find_poles()
# if distance(n_east, n_west) > d:
# east = n_east
# west = n_west
# d = distance(east, west)
#
self.east = east
self.west = west
count = 0
while not self.finish_instances():
count += 1
#print "COUNT,", count
def finish_instances(self):
completed = True
base_d = distance(self.east, self.west)
for datum in self.datums:
a = distance(self.west, datum)
b = distance(self.east, datum)
if distance(self.east, datum) > distance(self.east, self.west) or distance(self.west,datum) > distance(self.east, self.west):
if distance(self.east,datum) > distance(self.west, datum):
self.datums.append(self.west)
self.west = datum
self.datums.remove(datum)
else:
self.datums.append(self.east)
self.east = datum
self.datums.remove(datum)
completed = False
self.instances = []
break
x = (b**2 - base_d**2 - a**2) / (-2 * base_d)
if x > self.max_x:
self.max_x = x
try:
y = math.sqrt(a**2 - x**2)
except ValueError:
y = 0
if y > self.max_y:
self.max_y = y
self.instances.append(Instance(DataCoordinate(x,y), datum))
return completed
def normalize_coordinates(self):
for instance in self.instances:
instance.coord.x = instance.coord.x / self.max_x
instance.coord.y = instance.coord.y / self.max_y
def log_x_coordinates(self):
for instance in self.instances:
instance.coord.x = math.log(instance.coord.x + 0.0001)
def log_y_coordinates(self):
for instance in self.instances:
instance.coord.y = math.log(instance.coord.y + 0.0001)
def klasses(self):
return [ inst.klass for inst in self.instances ]
def coords(self):
return [ inst.coord for inst in self.instances ]
def datums(self):
return [ inst.datum for inst in self.instances ]
def k_fold_stratified_cross_val(self, k=10):
bins = []
bin_count = []
random.shuffle(self.instances,random.random)
if not isnumeric(self.instances[0].klass()):
data = sort_by_class(self.instances)
for i in range(k):
bins.append([])
bin_count.append(0)
for instance in self.instances:
try:
index = bin_count.index(0)
bins[index].append(instance)
bin_count[index] = 1
except:
for i in range(k):
bin_count[i]=0
index = bin_count.index(0)
bins[index].append(instance)
bin_count[index] = 1
return bins
def stratified_cross_val(self, option):
random.shuffle(self.instances, random.random)
if not isnumeric(self.instances[0].klass()):
data = sort_by_class(self.instances)
train_count = 0
test_count = 0
train = []
test = []
for instance in self.instances:
if train_count < option[0]:
train_count = train_count + 1
train.append(instance)
elif test_count < option[1]:
test_count = test_count + 1
test.append(instance)
if train_count == option[0] and test_count == option[1]:
train_count = 0
test_count = 0
return train, test
def two_bins(self):
random.shuffle(self.instances, random.random)
g1 = self.instances[0:len(self.instances)/2]
g2 = self.instances[(len(self.instances)/2)+1:-1]
return g1, g2
def shuffle(self):
random.shuffle(self.instances, random.random)
class DataCoordinate:
def __init__(self, x, y):
self.x = x
self.y = y
class DataCollection:
def __init__(self, datums):
self.datums = datums
self.backup = deepcopy(datums)
def add_datum(self, datum):
datums.append(datum)
def find_poles(self):
this = random_element(self.datums)
self.datums.remove(this)
east = farthest_from(this, self.datums)
self.datums.remove(east)
self.datums.append(this)
west = farthest_from(east, self.datums)
self.datums.append(east)
self.datums.append(west)
return east, west
def exhaustive_find_poles(self):
d = [-1, None]
for datum in self.datums:
for other_datum in self.datums:
if distance(datum, other_datum) > d[0]:
d[0] = distance(datum, other_datum)
d[1] = (datum, other_datum)
return d[1][0], d[1][1]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.