text stringlengths 38 1.54M |
|---|
import tensorflow as tf
from tensorflow.python.framework import graph_util
from preprocessing import *
import pickle
import json
def predict(rnn_graph,texts,vocab_to_int):
# textcnn 预测
x = rnn_graph.get_tensor_by_name('prefix/Inputs/batch_ph:0')
seq_length= rnn_graph.get_tensor_by_name('prefix/Inputs/seq_len_ph:0')
keep_prob = rnn_graph.get_tensor_by_name('prefix/Inputs/keep_prob_ph:0')
logits = rnn_graph.get_tensor_by_name('prefix/Fully_connected_layer/y_hat:0')
# prediction data
# the cutted sentence
#sentence = list(cut('一起奸杀案之后, 她成为头条新闻的标题'))
#sentence = list(cut('相比同时代手机产品,华为P20 Pro最为闪耀的特性即是搭载史无前例的后置三摄像头模组'))
# sentence=list(cut('女排最大难题已在酝酿!郎平或被迫放弃一接班人'))
# sentence = list(cut('趁父母不在,带男友回房间打炮自拍!'))
# sentence = list(cut('传说是新疆夫妻自拍但有狼友说不是'))
sentences=[cut(text) for text in texts]
sentences_to_int = get_sentence2int(sentences, vocab_to_int, 30)#[0][np.newaxis, :]
#sequence_len=list(x_to_int[0]).index(0)
sequences_len=[]
for sentence_to_int in sentences_to_int:
if 0 in sentence_to_int:
sequences_len.append(list(sentence_to_int).index(0))
else:
sequences_len.append(30)
feed = {
x: sentences_to_int,
seq_length:sequences_len,
keep_prob: 1.0
}
with tf.Session(graph=rnn_graph) as sess:
log = sess.run([logits], feed)
#print('logits:{}'.format(log))
prob=sess.run(tf.nn.softmax(log[0]))
if len(prob.shape)>1:
prob = prob[:, 1]
else:
prob =[prob[1]]
return [round(p, 2) for p in prob], [int(10 * p) for p in prob]
#加载固化模型
def load_pb(frozen_graph_filename):
with tf.Graph().as_default():
output_graph_def = tf.GraphDef()
with open(frozen_graph_filename, "rb") as f:
output_graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(output_graph_def, name="")
# We load the graph_def in the default graph
with tf.Graph().as_default() as graph:
tf.import_graph_def(
output_graph_def,
input_map=None,
return_elements=None,
name="prefix",
op_dict=None,
producer_op_list=None
)
return graph
if __name__=='__main__':
with open('vocab_to_int.pkl', 'rb') as pickle_file:
vocab_to_int = pickle.load(pickle_file)
rnn_graph = load_pb('freeze_model/textrnn.pb')
prob2=predict(rnn_graph,['后入河北衡水熟女'],vocab_to_int)
prob3= predict(rnn_graph,['娇小的宝贝口交坚挺阴茎酷坏坏汇聚全球经典潮吹成人视频在线诱惑成人视频大全最新在线色宅色情视频排行榜免费在线点播高清视频视频'],vocab_to_int)
#prob3= predict(rnn_graph,'令人印象深刻的怪物乳房亚洲跳舞')
prob4= predict(rnn_graph,['令人印象深刻的怪物乳房亚洲跳舞'],vocab_to_int)
prob5= predict(rnn_graph,['全部'],vocab_to_int)
#print(prob1)
print(prob2)
print(prob3)
print(prob4)
print(prob5)
|
# Exercise 1
# Basic scatter plot, log scale
plt.scatter(gdp_cap, life_exp)
plt.xscale('log')
# Strings
xlab = 'GDP per Capita [in USD]'
ylab = 'Life Expectancy [in years]'
title = 'World Development in 2007'
# Add axis labels
plt.xlabel(xlab)
plt.ylabel(ylab)
# Add title
plt.title(title)
# After customizing, display the plot
plt.show()
# Exercise 2
# Scatter plot
plt.scatter(gdp_cap, life_exp)
# Previous customizations
plt.xscale('log')
plt.xlabel('GDP per Capita [in USD]')
plt.ylabel('Life Expectancy [in years]')
plt.title('World Development in 2007')
# Definition of tick_val and tick_lab
tick_val = [1000,10000,100000]
tick_lab = ['1k','10k','100k']
# Adapt the ticks on the x-axis
plt.xticks(tick_val, tick_lab)
# After customizing, display the plot
plt.show()
# Exercise 3
# Import numpy as np
import numpy as np
# Store pop as a numpy array: np_pop
np_pop = np.array(pop)
# Double np_pop
np_pop = np_pop * 2
# Update: set s argument to np_pop
plt.scatter(gdp_cap, life_exp, s = np_pop)
# Previous customizations
plt.xscale('log')
plt.xlabel('GDP per Capita [in USD]')
plt.ylabel('Life Expectancy [in years]')
plt.title('World Development in 2007')
plt.xticks([1000, 10000, 100000],['1k', '10k', '100k'])
# Display the plot
plt.show()
# Exercise 4
# Specify c and alpha inside plt.scatter()
plt.scatter(x = gdp_cap, y = life_exp, s = np.array(pop) * 2, c = col, alpha = 0.8)
# Previous customizations
plt.xscale('log')
plt.xlabel('GDP per Capita [in USD]')
plt.ylabel('Life Expectancy [in years]')
plt.title('World Development in 2007')
plt.xticks([1000,10000,100000], ['1k','10k','100k'])
# Show the plot
plt.show()
# Exercise 5
# Scatter plot
plt.scatter(x = gdp_cap, y = life_exp, s = np.array(pop) * 2, c = col, alpha = 0.8)
# Previous customizations
plt.xscale('log')
plt.xlabel('GDP per Capita [in USD]')
plt.ylabel('Life Expectancy [in years]')
plt.title('World Development in 2007')
plt.xticks([1000,10000,100000], ['1k','10k','100k'])
# Additional customizations
plt.text(1550, 71, 'India')
plt.text(5700, 80, 'China')
# Add grid() call
plt.grid(True)
# Show the plot
plt.show() |
# -*- coding: utf-8 -*-
'''
Created on 2012/04/11
@author: 0000131307
'''
"""
numpyの紹介 → pylabの中にnumpy moduleが入っているので、numpyのreferenceに参照が入っている:
-numpy resource:
http://www.geocities.jp/showa_yojyo/note/python-numpy.html#array (Japanese)
http://www.ike-dyn.ritsumei.ac.jp/~uchida/scipy-lecture-notes/intro/numpy/numpy.html
http://www.scipy.org/Tentative_NumPy_Tutorial (English)
"""
#今回の目標、pylabのarrayの使い方
import pylab
zeros1D = pylab.zeros([5])
zeros2D = pylab.zeros([5,4])
zeros3D = pylab.zeros([5,4,2])
print "showing zeros1D"
print zeros1D
print "showing zeros2D"
print zeros2D
print "showing zeros3D"
print zeros3D
print "***********************"
print "creating a numbered list"
rangeList = range(10) #rangeをここに参照 http://www.python.jp/doc/release/library/functions.html#range
print "this is a normal list:", rangeList
print "pylab arrayに変換"
rangeArr = pylab.array(rangeList)
print "this is a pylab array:", rangeArr
print "***********************"
print "creating a 2D array"
rangeList2D = [rangeList]*3
print "2D rangeList", rangeList2D
print "converting to rangeList2D to a 2D array"
rangeArr2D = pylab.array(rangeList2D)
print rangeArr2D
print "***********************"
print "some basic array math in 1D"
array1 = pylab.array([1,2])
array2 = pylab.array([2,3])
print "array1 is:", array1
print "array2 is:", array2
print "array1+array2:", array1+array2
print "array1*2:", array1*2
print "array1/2:", array1/2
print "array1/2.0:", array1/2.0
print "array1 + 2:", array2 + 2
print "pylab.dot(array1, array2):", pylab.dot(array1, array2)
print "***********************"
print "some basic array math and matrix math in 2D"
array1 = pylab.array([[2,3],[4,5]])
array2 = pylab.array([[5,6],
[3,7]]) #このような定義でもOK
vector = pylab.array([1,2])
print "array1 is:"
print array1
print "array2 is:"
print array2
print "vector is:", vector
print "array1 + array2:"
print array1 + array2
print "array1 * array2:"
print array1 * array2
print "array1 + vector:"
print array1 + vector
print "array1 / vector:"
print array1 / vector
print "pylab.dot(array1, vector)"
print pylab.dot(array1, vector)
array1Inv = pylab.inv(array1)
print "pylab.inv(array1)"
print array1Inv
print "pylab.dot(array1, array1Inv)"
# [[1,0],
# [0,1]]になる
print pylab.dot(array1, array1Inv)
#pylab.crossもある |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class HbGoodsTradeRule(object):
def __init__(self):
self._max_buy_frequency = None
self._max_buy_quantity = None
self._min_buy_quantity = None
self._one_max_quantity = None
self._service_fee_ratio = None
@property
def max_buy_frequency(self):
return self._max_buy_frequency
@max_buy_frequency.setter
def max_buy_frequency(self, value):
self._max_buy_frequency = value
@property
def max_buy_quantity(self):
return self._max_buy_quantity
@max_buy_quantity.setter
def max_buy_quantity(self, value):
self._max_buy_quantity = value
@property
def min_buy_quantity(self):
return self._min_buy_quantity
@min_buy_quantity.setter
def min_buy_quantity(self, value):
self._min_buy_quantity = value
@property
def one_max_quantity(self):
return self._one_max_quantity
@one_max_quantity.setter
def one_max_quantity(self, value):
self._one_max_quantity = value
@property
def service_fee_ratio(self):
return self._service_fee_ratio
@service_fee_ratio.setter
def service_fee_ratio(self, value):
self._service_fee_ratio = value
def to_alipay_dict(self):
params = dict()
if self.max_buy_frequency:
if hasattr(self.max_buy_frequency, 'to_alipay_dict'):
params['max_buy_frequency'] = self.max_buy_frequency.to_alipay_dict()
else:
params['max_buy_frequency'] = self.max_buy_frequency
if self.max_buy_quantity:
if hasattr(self.max_buy_quantity, 'to_alipay_dict'):
params['max_buy_quantity'] = self.max_buy_quantity.to_alipay_dict()
else:
params['max_buy_quantity'] = self.max_buy_quantity
if self.min_buy_quantity:
if hasattr(self.min_buy_quantity, 'to_alipay_dict'):
params['min_buy_quantity'] = self.min_buy_quantity.to_alipay_dict()
else:
params['min_buy_quantity'] = self.min_buy_quantity
if self.one_max_quantity:
if hasattr(self.one_max_quantity, 'to_alipay_dict'):
params['one_max_quantity'] = self.one_max_quantity.to_alipay_dict()
else:
params['one_max_quantity'] = self.one_max_quantity
if self.service_fee_ratio:
if hasattr(self.service_fee_ratio, 'to_alipay_dict'):
params['service_fee_ratio'] = self.service_fee_ratio.to_alipay_dict()
else:
params['service_fee_ratio'] = self.service_fee_ratio
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = HbGoodsTradeRule()
if 'max_buy_frequency' in d:
o.max_buy_frequency = d['max_buy_frequency']
if 'max_buy_quantity' in d:
o.max_buy_quantity = d['max_buy_quantity']
if 'min_buy_quantity' in d:
o.min_buy_quantity = d['min_buy_quantity']
if 'one_max_quantity' in d:
o.one_max_quantity = d['one_max_quantity']
if 'service_fee_ratio' in d:
o.service_fee_ratio = d['service_fee_ratio']
return o
|
#!/usr/bin/python3.5
# ^^ note the python directive on the first line
# COMP 9414 agent initiation file
# requires the host is running before the agent
# designed for python 3.6
# typical initiation would be (file in working directory, port = 31415)
# python3 agent.py -p 31415
# created by Leo Hoare
# with slight modifications by Alan Blair
# Modified by Oscar Downing (z5114817) and Tim Thacker (z5115699)
# === DESIGN ===
# The data structure used to keep track of all the known parts of the map is a 160 x 160 2D array.
# This size is to accommodate an 80 x 80 map which starts the agent in the corner.
# In our data structure the agent starts in the middle so whichever way the map grows it will
# never go past the edge of the array.
# Each time a new map segment is received, the map data structure is updated with the new information.
# A Breadth first search is then performed on all of the objects
# we have different versions of the bfs avoiding certain objects and allowing some to be connections
# please see the functions for further detail
# We chose breadth first search because it always produces an optimal solution.
# Within the BFS we used a dictionary / hash table for the graph (list of connections)
# If a path to the treasure/key/axe/stone/tree/door cannot be found, the agent is
# instructed to move around and discover more of the map.
# This is repeated until a path can be found to the treasure and paths can be found to all the tools required to get to the treasure.
# The path to the treasure is analysed to determine which tools are required.
# This algorithm was chosen because a solution can be generated if a possible path to the treasure is found
# and and to all the required tools
import sys
import socket
import collections
import random
import time
import copy
from collections import deque
# keep track of tools
tools = []
# Obstacles Tools
# T tree a axe
# - door k key
# ~ water o stepping stone
# * wall $ treasure
# obstacles
wall, clear, covered, edge = "*", " ", "O", "."
tree, door, water = "T", "-", "~"
# tools
key, axe, stone, treasure = "k", "a", "o", "$"
# the x, y position change of the player
shift_x = 0
shift_y = 0
# position of the player
sx = 79
sy = 79
# current position
pos = "^"
# declaring visible grid to agent
view = [['' for _ in range(5)] for _ in range(5)]
# the map of we build from 5x5 grids
my_map = [['?' for _ in range(160)] for _ in range(160)]
# convert a 2d maze into a graph rep with exploring allowed
def special_maze2graph(maze, goal):
global tools
height = 160
width = 160
graph = {(i, j): [] for j in range(width) for i in range(height) if not maze[i][j] == wall}
if key in tools and goal == door:
for row, col in graph.keys():
if row < height - 1 and not maze[row + 1][col] == wall and not maze[row + 1][col] == edge and not maze[row + 1][col] == tree:
graph[(row, col)].append(("D", (row + 1, col)))
graph[(row + 1, col)].append(("U", (row, col)))
if col < width - 1 and not maze[row][col + 1] == wall and not maze[row][col + 1] == edge and not maze[row][col + 1] == tree:
graph[(row, col)].append(("R", (row, col + 1)))
graph[(row, col + 1)].append(("L", (row, col)))
elif axe in tools and goal == tree:
for row, col in graph.keys():
if row < height - 1 and not maze[row + 1][col] == wall and not maze[row + 1][col] == edge and not maze[row + 1][col] == door:
graph[(row, col)].append(("D", (row + 1, col)))
graph[(row + 1, col)].append(("U", (row, col)))
if col < width - 1 and not maze[row][col + 1] == wall and not maze[row][col + 1] == edge and not maze[row][col + 1] == door:
graph[(row, col)].append(("R", (row, col + 1)))
graph[(row, col + 1)].append(("L", (row, col)))
else :
for row, col in graph.keys():
if row < height - 1 and not maze[row + 1][col] == wall and not maze[row + 1][col] == edge and not maze[row + 1][col] == tree and not maze[row + 1][col] == door:
graph[(row, col)].append(("D", (row + 1, col)))
graph[(row + 1, col)].append(("U", (row, col)))
if col < width - 1 and not maze[row][col + 1] == wall and not maze[row][col + 1] == edge and not maze[row][col + 1] == tree and not maze[row][col + 1] == door:
graph[(row, col)].append(("R", (row, col + 1)))
graph[(row, col + 1)].append(("L", (row, col)))
return graph
# convert a 2d maze into a graph rep with exploring not allowed (don't search unknown areas)
def maze2graph(maze, goal):
global tools
height = 160
width = 160
graph = {(i, j): [] for j in range(width) for i in range(height) if not maze[i][j] == wall}
if key in tools and goal == door:
for row, col in graph.keys():
if row < height - 1 and not maze[row + 1][col] == wall and not maze[row + 1][col] == edge and not maze[row + 1][col] == "?" and not maze[row + 1][col] == tree:
graph[(row, col)].append(("D", (row + 1, col)))
graph[(row + 1, col)].append(("U", (row, col)))
if col < width - 1 and not maze[row][col + 1] == wall and not maze[row][col + 1] == edge and not maze[row][col + 1] == "?" and not maze[row][col + 1] == tree:
graph[(row, col)].append(("R", (row, col + 1)))
graph[(row, col + 1)].append(("L", (row, col)))
elif axe in tools and goal == tree:
for row, col in graph.keys():
if row < height - 1 and not maze[row + 1][col] == wall and not maze[row + 1][col] == edge and not maze[row + 1][col] == "?" and not maze[row + 1][col] == door:
graph[(row, col)].append(("D", (row + 1, col)))
graph[(row + 1, col)].append(("U", (row, col)))
if col < width - 1 and not maze[row][col + 1] == wall and not maze[row][col + 1] == edge and not maze[row][col + 1] == "?" and not maze[row][col + 1] == door:
graph[(row, col)].append(("R", (row, col + 1)))
graph[(row, col + 1)].append(("L", (row, col)))
else :
for row, col in graph.keys():
if row < height - 1 and not maze[row + 1][col] == wall and not maze[row + 1][col] == edge and not maze[row + 1][col] == "?" and not maze[row + 1][col] == tree and not maze[row + 1][col] == door:
graph[(row, col)].append(("D", (row + 1, col)))
graph[(row + 1, col)].append(("U", (row, col)))
if col < width - 1 and not maze[row][col + 1] == wall and not maze[row][col + 1] == edge and not maze[row][col + 1] == "?" and not maze[row][col + 1] == tree and not maze[row][col + 1] == door:
graph[(row, col)].append(("R", (row, col + 1)))
graph[(row, col + 1)].append(("L", (row, col)))
return graph
# standard bfs - avoids water unless we have a boat
def bfs(maze, goal, x, y):
global tools
queue = deque([("",(x,y))])
visited = set()
graph = maze2graph(maze,goal)
i = 0
while queue:
path, current = queue.popleft()
if maze[current[0]][current[1]] == goal and i > 0:# and current[0] != x and current[1] != y:
path += "g"
return path
if current in visited or (maze[current[0]][current[1]] == water and tree not in tools) :
continue
visited.add(current)
for direction, neighbour in graph[current]:
queue.append((path + direction, neighbour))
i+=1
return None
#exploring bfs - goes to unknown areas including crossing water
def exploring_bfs(maze, goal, x, y):
global tools
queue = deque([("",(x,y))])
visited = set()
graph = special_maze2graph(maze,goal)
i = 0
while queue:
path, current = queue.popleft()
if maze[current[0]][current[1]] == goal and i > 0:# and current[0] != x and current[1] != y:
path = path[:-1]
path += "g"
return path
if current in visited or (maze[current[0]][current[1]] == water and tree not in tools):
continue
visited.add(current)
for direction, neighbour in graph[current]:
queue.append((path + direction, neighbour))
i+=1
return None
# careful exploring bfs - goes to unknown areas not including crossing water
def careful_exploring_bfs(maze, goal, x, y):
global tools
queue = deque([("",(x,y))])
visited = set()
graph = special_maze2graph(maze,goal)
i = 0
while queue:
path, current = queue.popleft()
if maze[current[0]][current[1]] == goal and i > 0:# and current[0] != x and current[1] != y:
path = path[:-1]
path += "g"
return path
if current in visited or (maze[current[0]][current[1]] == water):
continue
visited.add(current)
for direction, neighbour in graph[current]:
queue.append((path + direction, neighbour))
i+=1
return None
# updates our map with the given 5x5 segment
def update_map(view):
global my_map, sx, sy, shift_y, shift_x, start, start_pos, prev_pos, tools
x = sx
y = sy
x -= 2
y -= 2
for i in range(5):
for j in range(5):
if i == 2 and j == 2 and my_map[i+x][j+y] != water:
my_map[i + x][j + y] = ' '
continue
if my_map[i + x][j + y] == "?" :#or (my_map[i + x][j + y] == door and key in tools) or (my_map[i + x][j + y] == tree and axe in tools):
my_map[i + x][j + y] = view[i][j]
if view[i][j] == "O":
my_map[i + x][j + y] = ' '
my_map[79][79] = "s"
# helper function for the different bfs's
def solve_view(maze, startX, startY, goal, mode):
if mode == 0:
path = bfs(maze, goal, startX, startY)
return path
elif mode == 1:
return exploring_bfs(maze, goal, startX, startY)
elif mode == 2:
return careful_exploring_bfs(maze, goal, startX, startY)
# decides the action for the AI to make
def get_action(view):
# update map
update_map(view)
global pos
# start cords
init_x = 2
init_y = 2
# which direction the player is facing
pos = view[init_x][init_y]
# solve the map we have constructed from the various 5x5 grid
# solve the given 5x5 grid giving the various tools
# as the goals, working out way up in the list of most
# valuable or whichever is found first
# usually only one can be found so all other are false
# so we go with the true option
global my_map, sx, sy
# special end move for object such as trees and doors
end_move = ""
global shift_x, shift_y, tools
#print(tools)
# determine which are reachable
if "$" in tools and solve_view(my_map,sx,sy,"s",0) != None:
path = solve_view(my_map,sx,sy,"s",0)
elif "k" not in tools and solve_view(my_map,sx,sy,"k",0) != None:
path = solve_view(my_map,sx,sy,"k",0)
#print(">>>>>>> key")
elif "k" in tools and solve_view(my_map,sx,sy,"-",0) != None :
end_move += "UF"
path = solve_view(my_map,sx,sy,"-",0)
#print(">>>>>>> door")
elif "a" not in tools and solve_view(my_map,sx,sy,"a",0) != None :
path = solve_view(my_map,sx,sy,"a",0)
#print(">>>>>>> axe")
elif "a" in tools and solve_view(my_map,sx,sy,"T",0) != None :
end_move += "CF"
path = solve_view(my_map,sx,sy,"T",0)
#print(">>>>>>> tree")
elif solve_view(my_map,sx,sy,"o",0) != None:
path = solve_view(my_map,sx,sy,"o",0)
#print(">>>>>>> stone")
elif solve_view(my_map,sx,sy,"$",0) != None:
path = solve_view(my_map,sx,sy,"$",0)
#print(">>>>>>> prise")
elif solve_view(my_map,sx,sy,"?",2) != None:
#print(">>>>>>> careful")
path = solve_view(my_map,sx,sy,"?",2)
else:
#print(">>>>>>> default ")
path = solve_view(my_map,sx,sy,"?",1)
global start_pos
ret = ""
i,j = sx,sy
prev_obj = my_map[i][j]
curr_obj = my_map[i][j]
for p in path:
if my_map[i][j] == key or my_map[i][j] == axe or my_map[i][j] == stone or my_map[i][j] == treasure or (my_map[i][j] == tree and axe in tools):
tools.append(my_map[i][j])
my_map[i][j] = " "
if prev_obj == water and curr_obj == " " and stone in tools:
my_map[prev_x][prev_y] = " "
tools.remove(stone)
elif prev_obj == water and curr_obj == " " and tree in tools and stone not in tools:
tools.remove(tree)
if prev_obj == " " and curr_obj == water and tree not in tools:
i = prev_x
j = prev_y
ret = ret[:-1]
if pos == ">":
ret += "L"
elif pos == "<":
ret += "R"
elif pos == "v":
ret += "RR"
pos = "^"
break
if p == "g" :
ret += end_move
if pos == ">":
ret += "L"
elif pos == "<":
ret += "R"
elif pos == "v":
ret += "RR"
pos = "^"
break
if p == "U":
prev_x = i
prev_y = j
if pos == "^":
ret += "F"
elif pos == ">":
ret += "LF"
elif pos == "<":
ret += "RF"
elif pos == "v":
ret += "RRF"
pos = "^"
i-=1
elif p == "D":
prev_x = i
prev_y = j
if pos == "^":
ret += "RRF"
elif pos == ">":
ret += "RF"
elif pos == "<":
ret += "LF"
elif pos == "v":
ret += "F"
pos = "v"
i+=1
elif p == "L":
prev_x = i
prev_y = j
if pos == "^":
ret += "LF"
elif pos == ">":
ret += "RRF"
elif pos == "<":
ret += "F"
elif pos == "v":
ret += "RF"
pos = "<"
j-=1
elif p == "R":
prev_x = i
prev_y = j
if pos == "^":
ret += "RF"
elif pos == ">":
ret += "F"
elif pos == "<":
ret += "LLF"
elif pos == "v":
ret += "LF"
pos = ">"
j+=1
prev_obj = curr_obj
curr_obj = my_map[i][j]
shift_x = i - sx
shift_y = j - sy
prev_pos = ret
sx += shift_x
sy += shift_y
return ret
# helper function to print the grid
def print_grid(view):
for ln in view:
print("|"+str(ln[0])+str(ln[1])+str(ln[2])+str(ln[3])+str(ln[4])+"|")
print('+-----+')
if __name__ == "__main__":
# checks for correct amount of arguments
if len(sys.argv) != 3:
print("Usage Python3 "+sys.argv[0]+" -p port \n")
sys.exit(1)
port = int(sys.argv[2])
# checking for valid port number
if not 1025 <= port <= 65535:
print('Incorrect port number')
sys.exit()
# creates TCP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
# tries to connect to host
# requires host is running before agent
sock.connect(('localhost',port))
except (ConnectionRefusedError):
print('Connection refused, check host is running')
sys.exit()
# navigates through grid with input stream of data
i=0
j=0
while 1:
data=sock.recv(100)
if not data:
exit()
for ch in data:
if (i==2 and j==2):
view[i][j] = '^'
view[i][j+1] = chr(ch)
j+=1
else:
view[i][j] = chr(ch)
j+=1
if j>4:
j=0
i=(i+1)%5
if j==0 and i==0:
#print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
#print_grid(view) # COMMENT THIS OUT ON SUBMISSION
action = get_action(view) # gets new actions
#print(">>>>>>>>>>>"+action)
sock.send(action.encode('utf-8'))
time.sleep(0.1)
sock.close()
|
#!/bin/python
# Do fitting for M samples of K columns from correlation data files (???.txt)
# For residues in RESIDS, and calculate appropriate R1,R2,R2/R1 for each sample
# using fit3.sh script, and based on samples in COLUMNS file (see below)
#
# If GENERATE_COL is 1, also generates a COLUMNS file that holds the columns to sample from
##### IMPORTS #####
import numpy.random as nprnd
import glob
import subprocess
import multiprocessing
###### PARAMS ######
M=50 # number of samples
K=4 # number of columns to sample
RESIDS=range(1,20,1) # resids to sample from
GENERATE_COL=1 # if zero, use an old COLUMN file
##### METHODS #####
def call_fit(resid,columns):
# call fit3 for resid.txt file, with columns specified as a list of integers
cmd="bash ./fit7_intervals.sh F%d.txt %d %d %d %d" % (resid, columns[0], columns[1],columns[2], columns[3])
print cmd
subprocess.call(cmd,shell=True);
##### Main: #####
if __name__=='__main__':
# Make F files - averaged over all six repeats in each of the time interaval
cmd='for k in `seq 1 19`; do paste $((251+$k)).txt $((270+$k)).txt $((289+$k)).txt $((308+$k)).txt $((327+$k)).txt $((346+$k)).txt | awk \'{printf("%.2f ",$1); for(i=2;i<=10;i++){printf "%.4f ",(($(i)+$(i+10)+$(i+20)+$(i+30)+$(i+40)+$(i+50))/6.0)} print ""}\' > F$k.txt; done'
subprocess.call(cmd,shell=True)
# Save list of random columns:
if GENERATE_COL:
C=[(nprnd.choice(9,K,replace=False)+2) for r in xrange(M)];
F=open('COLUMNS','w');
for columns in C:
for c in columns:
print >>F, "%3d" % c,
print >>F
F.close()
# Process columns for each specified residue
pool=multiprocessing.Pool(processes=1)
F=open('COLUMNS','r');
for line in F:
columns=[int(x) for x in line.split()] # convert to int to verify proper input
for resid in RESIDS:
pool.apply_async(call_fit, [resid,columns])
pool.close()
pool.join()
F.close()
|
import jinja2
import os
from .compiler import generate
import glob
import re
_lib_js = {}
def lib(minified=False):
global _lib_js
key = "minified" if minified else "normal"
if key not in _lib_js:
runtime = os.path.join(os.path.dirname(__file__),"lib/jinja2.runtime.min.js" if minified else "lib/jinja2.runtime.js")
_lib_js[key] = open(runtime,'r').read()
return _lib_js[key]
class JsJinja (object):
js_environment = 'Jinja2'
def __init__(self,environment=None):
self.environment = environment or jinja2.Environment()
def generate_node(self,node,name):
return generate(node,self.environment,name,name,env=self.js_environment)
def generate(self,filename):
source, fn, _ = self.environment.loader.get_source(self.environment,filename)
return self._generate(source,filename)
def _generate(self,source,name):
node = self.environment._parse(source,name,name)
return self.generate_node(node,name)
def generate_all(self):
if not self.environment.loader:
raise Exception("The Jinja2 environment doesn't have a template loader associated.\nYou must specify it for using the generate_all method.")
templates = self.environment.list_templates()
return ';'+';\n'.join(map(self.generate,templates))+';'
def generate_source(self,source,name=None):
return self._generate(source,name)
lib = staticmethod(lib)
def generate_template():
from optparse import OptionParser
j = JsJinja()
usage = "usage: %prog [options] files"
parser = OptionParser(usage)
parser.add_option("-o", "--output", dest="output",default=None,
help="write output to FILE", metavar="FILE")
parser.add_option("-b", "--base", dest="base",default=None,
help="Set tempalte dir for dropping it in template name", metavar="FILE")
parser.add_option("-l", "--lib", dest="lib",default=False,
help="Include Jinja2 runtime lib", action="store_true")
(options, args) = parser.parse_args()
if not args:
raise Exception('You must specify input files')
files = []
for a in args:
files += glob.glob(a)
generated = [lib(True)] if options.lib else []
for f in files:
source = open(f).read()
if options.base:
f = re.sub('^'+options.base+'/?', '', f)
gen = j.generate_source(source, f)
generated.append(gen)
generated = ';'+';\n'.join(generated)+';'
output = options.output
if output:
with open(output,'w') as f:
f.write(generated)
else:
print generated
# j = jsjinja()
# print j.generate('template.tmpl')
|
# Generated by Django 2.1.5 on 2019-02-19 08:41
from django.db import migrations, models
import modules.core.models
class Migration(migrations.Migration):
dependencies = [
('core', '0002_auto_20190217_1233'),
]
operations = [
migrations.AlterField(
model_name='commoninfo',
name='contract_offer',
field=models.FileField(default=None, help_text='Максимальный размер файла 5 MB', upload_to=modules.core.models.CommonInfo.get_file_path, validators=[modules.core.models.CommonInfo.file_size], verbose_name='Договор оферты'),
),
migrations.AlterField(
model_name='commoninfo',
name='privacy_policy',
field=models.FileField(default=None, help_text='Максимальный размер файла 5 MB', upload_to=modules.core.models.CommonInfo.get_file_path, validators=[modules.core.models.CommonInfo.file_size], verbose_name='Политика конфиденциальности'),
),
migrations.AlterField(
model_name='course',
name='reviews_visible',
field=models.BooleanField(default=False, verbose_name='Показать отзывы?'),
),
migrations.AlterField(
model_name='course',
name='stub',
field=models.ImageField(default=None, help_text='Максимальный размер файла 1 MB; изображение формата 16:9', upload_to=modules.core.models.Course.get_image_path, validators=[modules.core.models.Course.file_size], verbose_name='Фоновое изображение'),
),
migrations.AlterField(
model_name='courseskills',
name='picture',
field=models.ImageField(default=None, help_text='Максимальный размер файла 1 MB; изображение формата 16:9', upload_to=modules.core.models.CourseSkills.get_image_path, validators=[modules.core.models.CourseSkills.file_size], verbose_name='Фоновое изображение'),
),
]
|
import os
import re
MAX_LEN_NAME = 50
def cut_scheme(url):
return re.sub('http[s]?://', '', url)
def make_kebab_case(name):
name = re.sub('\\W|_', '-', name)
if len(name) > MAX_LEN_NAME:
return name[:MAX_LEN_NAME]
return name
def make_file_name(url):
without_scheme = cut_scheme(url)
path, extension = os.path.splitext(without_scheme)
if not extension:
extension = '.html'
return make_kebab_case(path) + extension
def make_dir_name(url):
without_scheme = cut_scheme(url)
return make_kebab_case(without_scheme) + '_files'
|
import pymongo
from pymongo import MongoClient
from operator import attrgetter
client = MongoClient()
db = client.social_net
answer = db.tweets.find()
def GetAllUniqueUsers():
print(len(db.tweets.distinct("user")))
def GetMostLinkingUsers():
pipeline = [
{'$match':{'text':{'$regex':"@\w+"}}},
{'$addFields': {"mentions":1}},
{'$group':{"_id":"$user", "mentions":{'$sum':1}}},
{'$sort':{"mentions":-1}},
{'$limit':10}]
cursorlist = db.tweets.aggregate(pipeline)
for cursor in cursorlist:
print(cursor)
def GetMostLinkedUsers():
pipeline = [
{'$addFields': {'words': {'$split': ['$text', ' ']}}}, # split text
{'$unwind': "$words"}, # reconstruct an array of words
{'$match': {'words': {'$regex': "@\w+", '$options': 'm'}}}, # match the @ from the words list
{'$group': {'_id': "$words", 'total': {'$sum': 1}}},
{'$sort': {'total': -1}}, # sort the total
{'$limit': 5},
]
tweets = db.tweets.aggregate(pipeline)
for tweet in tweets:
print(tweet)
def GetMostActiveUser():
pipeline = [
{'$group': {'_id': "$user", 'total': {'$sum': 1}}},
{'$sort': {'total': -1}},
{'$limit': 10},
]
users = db.tweets.aggregate(pipeline)
for user in users:
print(user)
def GetMostNegativeTweets():
pipeline = [
{'$match':{'text': {'$regex':'pissed|mad|angry|sad|furious|outraged','$options':'g'}}},
{'$group':{'_id':"$user", 'emotion': {'$avg':"$polarity"}, 'total_negative_tweets': {'$sum': 1}}},
{'$sort':{ 'emotion': 1, 'total_negative_tweets':-1}},
{'$limit':5}
]
negativeUser = db.tweets.aggregate(pipeline)
for negUser in negativeUser:
print(negUser)
def GetMostPositiveTweets():
pipeline = [
{'$match':{'text': {'$regex':'happy|excited|great|amazing|love|enticed','$options':'g'}}},
{'$group':{'_id':"$user", 'emotion': {'$avg':"$polarity"},'total_positive_tweets': {'$sum': 1}}},
{'$sort':{ 'emotion': -1, 'total_positive_tweets':-1}},
{'$limit':5}
]
positiveUser = db.tweets.aggregate(pipeline)
for posUser in positiveUser:
print(posUser)
print("Get Total Number Of Unique Users")
GetAllUniqueUsers()
print("Get Most Linking Users:")
GetMostLinkingUsers()
print("Get Most Linked Users:")
GetMostLinkedUsers()
print("Get Most Active Users:")
GetMostActiveUser()
print("Get The Happiest Users:")
GetMostPositiveTweets()
print("Get The Saddest Users:")
GetMostNegativeTweets()
|
import requests,re,ast,json
class quizlet_words(object):
def __init__(self,id):
self.id = id
self.link = "https://quizlet.com/%s/flashcards" % id
self.wd = []
if self.isValid():
self.wd = self.__reqData()
@classmethod
def initFromLink(cls,link):
try:
id = re.findall(r"com/.*?/", link)[0][4:-1:]
return cls(id)
except:
return cls("-1")
def __reqData(self):
wd = None
try:
raw = requests.get(self.link)
raw.encoding = "utf-8"
for i in re.finditer(r"<script>.*?</script>", raw.text):
if "Quizlet.cardsModeData" in i.group():
wd = i.group()
wd = wd[wd.find("=") + 2:wd.find("};") + 1:]
wd = json.loads(wd)
break
wd = [{"word": i["word"], "def": i["definition"]} for i in wd["terms"]]
except:
pass
if wd is None:
return []
return wd
def isNone(self):
return not bool(self.wd)
def isValid(self):
if self.id =="-1":
return False
return True
def getData(self,ps=30):
parsedlist = [[] for i in range((len(self.wd) - 1) // ps + 1)]
for i in range(len(self.wd)):
parsedlist[i // ps].append(self.wd[i])
return parsedlist
|
class Solution:
def isPalindrome(self, s: str) -> bool:
if len(s) < 2:
return True
value = 'abcdefghijklmnopqrstuvwxyz0123456789'
value_list = list(value)
s_list = list(s.lower())
pure_s = []
for i in range(len(s_list)):
if s_list[i] in value_list:
pure_s.append(s_list[i])
length = len(pure_s)
if length % 2 == 0:
for i in range(length // 2):
if pure_s[i] != pure_s[length-1-i]:
return False
else:
middle = length // 2
for i in range(length // 2 + 1):
if pure_s[middle-i] != pure_s[middle+i]:
return False
return True
if __name__ == '__main__':
sol = Solution()
output = sol.isPalindrome("A man, a plan, a canal: Panama")
print(output)
|
#!/usr/bin/python
#
# Determine if this tool can run a test based on a test spec.
#
import datetime
import sys
import pscheduler
logger = pscheduler.Log(prefix='tool-bwctliperf3', quiet=True)
json = pscheduler.json_load(exit_on_error=True)
logger.debug("can-run for %s" % json)
try:
if json['type'] != 'throughput':
pscheduler.succeed_json({
"can-run": False,
"reasons": [ "Unsupported test type" ]
})
except KeyError:
pscheduler.succeed_json({
"can-run": False,
"reasons": [ "Missing test type" ]
})
if not json.get('spec'):
pscheduler.succeed_json({
"can-run": False,
"reasons": [ "Missing test spec" ]
})
try:
spec = json["spec"]
pscheduler.json_check_schema(spec, 1)
except KeyError:
pscheduler.succeed_json({
"can-run": False,
"reasons": ["Missing test specification"]
})
except ValueError as ex:
pscheduler.succeed_json({
"can-run": False,
"reasons": [str(ex)]
})
errors = []
try:
source = spec['source']
except KeyError:
source = None
try:
destination = spec['dest']
except KeyError:
errors.append("Missing dest argument in spec")
unsupported_options = [ 'congestion', 'zero-copy', 'flow-label', 'client-cpu-affinity', 'server-cpu-affinity', 'reverse' ]
for unsupported_option in unsupported_options:
if spec.has_key(unsupported_option):
errors.append("BWCTL does not support %s" % unsupported_option)
# This digs up everything we can on the hosts involved in a
# time-predictable way.
has = pscheduler.api_has_services([source, destination], timeout=3)
src_has_psc = has[source]["pscheduler"]
dst_has_psc = has[destination]["pscheduler"]
if src_has_psc and dst_has_psc:
errors.append("Both sides have pscheduler, no need for BWCTL")
if not has[destination]["bwctl"]:
logger.debug("No BWCTL at destination")
errors.append("No BWCTL on %s" % destination)
logger.debug("can-run succeeded")
result = {
"can-run": len(errors) == 0
}
if len(errors) > 0:
result["reasons"] = errors
pscheduler.succeed_json(result)
|
def longestValidParentheses(self, s):
dp, ans = [0]*(len(s)+1), 0
for i in range(2, len(s)+1):
if s[i-1]==')':
if i-dp[i-1]-2>=0 and s[i-dp[i-1]-2]=='(': dp[i] = dp[i-1] + 2
dp[i] += dp[i-dp[i]]
ans = max(ans, dp[i])
return ans
###
let dp[i] is the number of longest valid Parentheses ended with the i - 1 position of s, then we have the following relationship: dp[i + 1] = dp[p] + i - p + 1 where p is the position of '(' which can matches current ')' in the stack.
def longestValidParentheses(self, s):
dp, stack = [0]*(len(s) + 1), []
for i in range(len(s)):
if s[i] == '(':
stack.append(i)
else:
if stack:
p = stack.pop()
dp[i + 1] = dp[p] + i - p + 1
return max(dp)
#####
class Solution(object):
def longestValidParentheses(self, s):
"""
:type s: str
:rtype: int
"""
# use 1D DP
# dp[i] records the longestValidParenthese EXACTLY ENDING at s[i]
dp = [0 for x in xrange(len(s))]
max_to_now = 0
for i in xrange(1,len(s)):
if s[i] == ')':
# case 1: ()()
if s[i-1] == '(':
# add nearest parentheses pairs + 2
dp[i] = dp[i-2] + 2
# case 2: (())
# i-dp[i-1]-1 is the index of last "(" not paired until this ")"
elif i-dp[i-1]-1 >= 0 and s[i-dp[i-1]-1] == '(':
if dp[i-1] > 0: # content within current matching pair is valid
# add nearest parentheses pairs + 2 + parentheses before last "("
dp[i] = dp[i-1] + 2 + dp[i-dp[i-1]-2]
else:
# otherwise is 0
dp[i] = 0
max_to_now = max(max_to_now, dp[i])
return max_to_now
#####
class Solution(object):
def longestValidParentheses(self, s):
"""
:type s: str
:rtype: int
"""
dic = {}
dic[-1] = 0
stack = []
stack.append(-1)
result = 0
for i in xrange(0, len(s)):
if s[i] == '(':
dic[i] = 0
stack.append(i)
else:
if len(stack) < 2:
result = max(result, dic[stack[-1]])
dic[stack[-1]] = 0
continue
dic[stack[-2]] += i - stack[-1] + 1
result = max(result, dic[stack[-2]])
stack.pop()
return result
###
def longestValidParentheses(self, s):
stack, longest, index = [0], 0, 0
for index in xrange(0, len(s)):
if s[index] == ")" and len(stack) != 1:
length, last = stack.pop(), stack.pop()
total_length = last + length + 2
stack.append(total_length)
longest = max(longest, total_length)
elif s[index] == "(":
stack.append(0)
else:
stack = [0]
return longest
#####
class Solution:
# @param {string} s
# @return {integer}
def longestValidParentheses(self, s):
if not s:
return 0
record = [0]*len(s)
left = []
for index, char in enumerate(s):
if char == "(":
left.append(index)
elif left:
leftIndex = left.pop()
if index>0 and index - leftIndex == record[index-1]+1:
record[index] = record[index-1] + 2
if leftIndex > 0:
record[index] += record[leftIndex-1]
return max(record)
###
def longestValidParentheses(self, s):
""" as the ")" will not effect the final result, which acts as a dummy element to
make the all the original elements of s equivalently,
otherwise the first element needs to be dealt with separately.
"""
s = ")" + s
stack, ans = [], 0
for index in xrange(len(s)):
element = s[index]
if element == ")" and stack and stack[-1][1] == "(":
stack.pop()
ans = max(ans, index - stack[-1][0])
else:
stack.append((index, element))
return ans
|
def fabonacci(nth):
#nth=nth term of the number
if nth<0:
print("Enter a positive number.")
elif nth==1:
return 0
elif nth==2:
return 1
else:
return fabonacci(nth-1)+fabonacci(nth-2)
print(fabonacci(10))
|
# 1.7 Flip it!
def flip_vertical_axis(matrix):
rows = len(matrix)
columns = len(matrix[0])
odd = False
if columns % 2 != 0:
odd = True
for row in range(0, rows):
middle = None
new_row = []
middle_found = False
for column in range((columns // 2), columns):
# finish first half of new row
if odd and not middle_found:
middle = matrix[row][column]
middle_found = True
continue
new_row.append(matrix[row][column])
if odd:
new_row.append(middle)
for column in range(0, columns // 2):
new_row.append(matrix[row][column])
matrix[row] = new_row
|
import tensorflow as tf
from utility.tf_utils import assert_rank
from core.module import Ensemble
from nn.func import rnn
from algo.ppo.nn import Encoder, Actor, Value
class PPO(Ensemble):
def __init__(self, config, env, **kwargs):
super().__init__(
model_fn=create_components,
config=config,
env=env,
**kwargs)
@tf.function
def action(self, x, state, mask, evaluation=False,
prev_action=None, prev_reward=None, **kwargs):
assert x.shape.ndims % 2 == 0, x.shape
x, state = self.encode(
x, state, mask, prev_action, prev_reward)
act_dist = self.actor(x, evaluation=evaluation)
action = self.actor.action(act_dist, evaluation)
if evaluation:
return action, state
else:
value = self.value(x)
logpi = act_dist.log_prob(action)
terms = {'logpi': logpi, 'value': value}
# intend to keep the batch dimension for later use
out = (action, terms)
return out, state
@tf.function(experimental_relax_shapes=True)
def compute_value(self, x, state, mask,
prev_action=None, prev_reward=None, return_state=False):
x, state = self.encode(
x, state, mask, prev_action, prev_reward)
value = self.value(x)
if return_state:
return value, state
else:
return value
def encode(self, x, state, mask, prev_action=None, prev_reward=None):
if x.shape.ndims % 2 == 0:
x = tf.expand_dims(x, 1)
if mask.shape.ndims < 2:
mask = tf.reshape(mask, (-1, 1))
assert_rank(mask, 2)
x = self.encoder(x)
if hasattr(self, 'rnn'):
additional_rnn_input = self._process_additional_input(
x, prev_action, prev_reward)
x, state = self.rnn(x, state, mask,
additional_input=additional_rnn_input)
else:
state = None
if x.shape[1] == 1:
x = tf.squeeze(x, 1)
return x, state
def _process_additional_input(self, x, prev_action, prev_reward):
results = []
if prev_action is not None:
if self.actor.is_action_discrete:
if prev_action.shape.ndims < 2:
prev_action = tf.reshape(prev_action, (-1, 1))
prev_action = tf.one_hot(prev_action, self.actor.action_dim, dtype=x.dtype)
else:
if prev_action.shape.ndims < 3:
prev_action = tf.reshape(prev_action, (-1, 1, self.actor.action_dim))
assert_rank(prev_action, 3)
results.append(prev_action)
if prev_reward is not None:
if prev_reward.shape.ndims < 2:
prev_reward = tf.reshape(prev_reward, (-1, 1, 1))
elif prev_reward.shape.ndims == 2:
prev_reward = tf.expand_dims(prev_reward, -1)
assert_rank(prev_reward, 3)
results.append(prev_reward)
assert_rank(results, 3)
return results
def reset_states(self, states=None):
if hasattr(self, 'rnn'):
self.rnn.reset_states(states)
def get_initial_state(self, inputs=None, batch_size=None, dtype=None):
return self.rnn.get_initial_state(
inputs, batch_size=batch_size, dtype=dtype) \
if hasattr(self, 'rnn') else None
def create_components(config, env):
action_dim = env.action_dim
is_action_discrete = env.is_action_discrete
if 'cnn_name' in config['encoder']:
config['encoder']['time_distributed'] = True
models = dict(
encoder=Encoder(config['encoder']),
rnn=rnn(config['rnn']),
actor=Actor(config['actor'], action_dim, is_action_discrete),
value=Value(config['value'])
)
return models
def create_model(config, env, **kwargs):
return PPO(config, env, **kwargs)
|
from DataManipulation import UseFirebase, XMLManipulate
def check_article_connection(): #Make sure there are articles, and can be connected
""" function to ensure DB connection and format """
assert len(UseFirebase.get_articles()) > 0 #There are articles in the DB that I manually seeded to check the connection
def check_feed_connection(): #Make sure there are feeds, and can be connected
""" function to ensure DB connection and format """
assert len(UseFirebase.get_articles()) > 0 #There are articles in the DB that I manually seeded to check the connection
def check_raw_data_get(): #Make sure dict contains proper data
""" function to make sure data gets properly formatted. """
output = XMLManipulate.convert_raw_data("https://www.google.com/alerts/feeds/00698824683610429194/1098921106065488311")
assert isinstance(output, dict)
assert len(output["entry"]) > 0 #Google Alerts feeds prepopulate, and given that this is a COVID feed, it should have 20 articles
assert output["entry"][0]["title"]["#text"] #Make sure title exists
print(output["entry"][0]["title"]["#text"]) #Make sure title is properly formatted
def check_data_formatter():
""" function to make sure data gets properly formatted. """
#Run function and save output
output = XMLManipulate.raw_data_to_dict(XMLManipulate.convert_raw_data("https://www.google.com/alerts/feeds/00698824683610429194/1098921106065488311"))
assert isinstance(output, dict)
assert len(output["entries"]) > 0 #Google Alerts feeds prepopulate, and given that this is a COVID feed, it should have 20 articles
assert output["entries"][0]["title"] #Make sure title exists under proper name
print(output["entries"][0]["title"]) #Make sure title is properly formatted
print(output["entries"][0]["published"]) #Make sure title is a real date
def run_tests(): #GET THEM TESTS BOI
""" function to run all tests """
check_feed_connection()
check_article_connection()
check_raw_data_get()
check_data_formatter() |
from entities import RateCategoryEntity, RoundEntity, TrackEntity, RateEntity, PairEntity
from models import RateCategory, Round, Track, Rate, Pair
def rate_category_orm_to_entity(rate_category_orm: RateCategory) -> RateCategoryEntity:
return RateCategoryEntity(
id=rate_category_orm.id,
name=rate_category_orm.name,
)
def round_orm_to_entity(round_category_orm: Round) -> RoundEntity:
return RoundEntity(
id=round_category_orm.id,
number=round_category_orm.number,
theme=round_category_orm.theme,
style=round_category_orm.style,
type=str(round_category_orm.type.value),
last_day=round_category_orm.last_day,
)
def track_orm_to_entity(track_orm: Track, user_username: str, user_color: str) -> TrackEntity:
return TrackEntity(
id=track_orm.id,
name=track_orm.name,
user_id=track_orm.user_id,
user_username=user_username,
user_color=user_color,
)
def rate_orm_to_entity(rate_orm: Rate, user_username: str, user_color: str) -> RateEntity:
return RateEntity(
category_id=rate_orm.category_id,
user_id=rate_orm.user_id,
track_id=rate_orm.track_id,
user_username=user_username,
user_color=user_color,
)
def pair_orm_to_entity(pair_orm: Pair) -> PairEntity:
return PairEntity(
id=pair_orm.id,
user_one_id=pair_orm.user_one_id,
user_two_id=pair_orm.user_two_id,
user_three_id=pair_orm.user_three_id,
round_id=pair_orm.round_id,
) |
import argparse
import crossref_commons.retrieval
import json
import os
import requests
from bs4 import BeautifulSoup
from datetime import datetime
def current_timestamp():
'''
Returns current timestamp for logging purposes
'''
return datetime.now().strftime('%Y-%m-%d %H:%M:%S')
def repec_page(base, journal):
'''
Returns total page for a certain journal in RePEc
'''
repec = f'{base}/{journal}.html'
print(f'{current_timestamp()}: {repec}')
status_code = []
while status_code not in [200, 404]:
try:
res = requests.get(repec, timeout=5)
status_code = res.status_code
content = BeautifulSoup(res.content, features='html.parser')
pagination = content.find('ul', {'class': 'pagination flex-wrap'})
href = [x for x in pagination.find_all('a')]
urls = []
for h in href:
try:
urls.append(h['href'])
except KeyError:
pass
try:
urls = set(urls)
return len(urls)
except UnboundLocalError:
pass
except Exception as e:
print(f'{current_timestamp()}: {e}')
pass
def repec_paper(base, page, journal):
'''
Returns a list of paper URLs from RePEc.
Examples:
- https://ideas.repec.org/a/oup/restud/v87y2020i6p2511-2541..html
- https://ideas.repec.org/a/oup/restud/v87y2020i6p2473-2510..html
'''
url = f'{base}/{journal}.html'
if page > 1:
url = f'{base}/{journal}{page}.html'
print(f'{current_timestamp()}: {url}')
status_code = []
while status_code not in [200, 404]:
try:
res = requests.get(url, timeout=5)
status_code = res.status_code
content = BeautifulSoup(res.content, features='html.parser')
content = content.find('div', {'id': 'content'})
href = content.find_all('a')
urls = []
for h in href:
try:
urls.append(h['href'])
except KeyError:
pass
except Exception as e:
print(f'{current_timestamp()}: {e}')
pass
return urls
def download(base, paper):
'''
Returns download URL from RePEc.
Examples:
- http://hdl.handle.net/10.1093/restud/rdaa026
- http://hdl.handle.net/10.1093/restud/rdz066
'''
url = f'{base}/{paper}'
print(f'{current_timestamp()}: {url}')
status_code = []
while status_code not in [200, 404]:
try:
res = requests.get(url, timeout=5)
status_code = res.status_code
content = BeautifulSoup(res.content, features='html.parser')
download = content.find('div', {'id': 'download'})
except Exception as e:
print(f'{current_timestamp()}: {e}')
pass
return download
def crossref(download, path):
'''
Download metadata from Crossref into a JSON file
'''
try:
paper = download.find('input', {'type': 'radio'})['value']
inputval = {
'https://hdl.handle.net/': 'https://doi.org/',
'http://hdl.handle.net/': 'https://doi.org/'
}
for key, value in inputval.items():
paper = paper.replace(key, 'https://doi.org/')
print(f'{current_timestamp()}: {paper}')
try:
try:
paper = crossref_commons.retrieval.get_publication_as_json(paper)
# no need to re-download existing file
file_name = paper['DOI'].replace('/', '-')
if not os.path.exists(f'{path}/{file_name}.json'):
with open(f'{path}/{file_name}.json', 'w') as f:
json.dump(paper, f, indent=4)
except ConnectionError:
pass
except ValueError:
print(f'DOI {paper} does not exist')
except Exception as e:
print(f'{current_timestamp()}: {e}')
pass
if __name__ == '__main__':
journals = [
# These are not an exhaustive list
's/aea/aecrev', # American Economic Review
's/aea/jeclit', # Journal of Economic Literature
's/aea/jecper', # Journal of Economic Perspectives
's/bla/jfinan', # Journal of Finance
's/eee/jfinec', # Journal of Financial Economics
's/kap/jecgro', # Journal of Economic Growth
's/oup/qjecon', # The Quarterly Journal of Economics
's/oup/restud', # Review of Economic Studies
's/oup/rfinst', # Review of Financial Studies
's/ucp/jpolec', # Journal of Political Economy
's/wly/emetrp' # Econometrica
]
PARSER = argparse.ArgumentParser()
PARSER.add_argument('-j', '--journal', type=str, choices=journals, help=f'Journal name: {journals}', metavar='')
ARGS = PARSER.parse_args()
JOURNAL = ARGS.journal
PATH = f"data/crossref/{JOURNAL.replace('/', '-')}"
if not os.path.exists(PATH):
os.mkdir(PATH)
BASE = 'https://ideas.repec.org'
page = repec_page(BASE, JOURNAL)
paper = [repec_paper(BASE, x, JOURNAL) for x in range(1, page+1)]
papers = []
for i in paper:
for j in i:
papers.append(j)
download = [download(BASE, x) for x in papers]
crossref = [crossref(x, PATH) for x in download]
|
from pandas import DataFrame
import blocks
import load_data
import numpy as np
from numpy.testing import assert_array_equal
def test_purity():
ids = range(10)
labels = [1, 1, 1, -1, -1, -1, 1, 1, -1, -1]
blocks_ = [1, 1, 1, 2, 2, 3, 3, 4, 4, 4]
df = DataFrame({'ids': ids, 'label': labels, 'block': blocks_})
res = blocks.purity(df).set_index('block')
assert res.loc[1].miss == 0
assert res.loc[4].miss == 1
print res.mean()
def test_recognizer():
path = 'data/sample1000/'
df = load_data.data_df(path).head(50)
blocks_ = np.zeros(len(df))
blocks_[2:45] = 1
blocks_[45:] = 2
df_true_blocks = DataFrame({'session_id': df.session_id,
'block': blocks_})
df_rec_blocks = blocks.recognizer(df)
assert_array_equal(df_true_blocks.block.values,
df_rec_blocks.block.values)
def test_postprocess():
ids = np.arange(20)
blocks_ = np.empty(20)
blocks_[0:2] = 0
blocks_[2:5] = 1
blocks_[5:10] = 2
blocks_[10:20] = 3
proba = np.empty(20, dtype=np.float)
proba[0:2] = np.random.uniform(low=.8, high=.9, size=2)
proba[2:5] = np.random.uniform(low=.01, high=.99, size=3)
proba[5:10] = np.random.uniform(low=.8, high=.9, size=5)
proba[10:20] = np.random.uniform(low=.1, high=.3, size=10)
exp_proba = np.empty(20, dtype=np.float)
exp_proba[0:2] = proba[0:2]
exp_proba[2:5] = proba[2:5]
exp_proba[5:10] = 1
exp_proba[10:20] = 0
df = DataFrame({'session_id': ids,
'block': blocks_,
'proba': proba})
df_processed = blocks.postprocess(df, min_size=3, lower_proba=.4,
upper_proba=.7)
assert_array_equal(df_processed.proba.values, exp_proba)
#print df
#print df_processed
if __name__ == "__main__":
test_postprocess()
test_purity()
test_recognizer()
|
# -*- coding: utf-8 -*-
data = u"""Apache Software License
Version 1.1
Copyright (c) 2000 The Apache Software Foundation. All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. The end-user documentation included with the redistribution, if any, must include the following acknowledgment:
"This product includes software developed by the Apache Software Foundation (http://www.apache.org/)."
Alternately, this acknowledgment may appear in the software itself, if and wherever such third-party acknowledgments normally appear.
4. The names "Apache" and "Apache Software Foundation" must not be used to endorse or promote products derived from this software without prior written permission. For written permission, please contact apache@apache.org.
5. Products derived from this software may not be called "Apache", nor may "Apache" appear in their name, without prior written permission of the Apache Software Foundation.
THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE APACHE SOFTWARE FOUNDATION OR ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
This software consists of voluntary contributions made by many individuals on behalf of the Apache Software Foundation. For more information on the Apache Software Foundation, please see <http://www.apache.org/>.
Portions of this software are based upon public domain software originally written at the National Center for Supercomputing Applications, University of Illinois, Urbana-Champaign."""
|
import re
from .ncbi_ranks import _TAXONOMIC_RANKS
from htstk.utils import log
_taxa_levels = [
'root',
'kingdom',
'phylum',
'class',
'order',
'family',
'genus',
'species'
]
class NCBITaxonomyInMem():
def __init__(self, path_nodes, path_names):
self.ranks = _TAXONOMIC_RANKS
self.path_nodes = path_nodes
self.path_names = path_names
self.from_dump()
def from_dump(self):
log('loading NCBI taxonomy dump')
# dump names
names = {}
with open(self.path_names, 'r') as fh:
for l in fh:
l = l.rstrip()
l = re.sub("\t\|$", "", l)
l = l.split('\t|\t')
if l[-1] != 'scientific name':
continue
l[1] = re.sub('[^A-Za-z0-9]', ' ', l[1].lower())
l[1] = re.sub(' +', ' ', l[1])
l[1] = l[1].rstrip()
names[l[0]] = l[1]
# dump nodes
nodes = {}
with open(self.path_nodes, 'r') as fh:
for l in fh:
l = l.rstrip()
l = re.sub("\t\|$", "", l)
l = l.split('\t|\t')
if l[0] == "1" and l[2] == 'no rank':
l[2] = 'root'
nodes[l[0]] = (names[l[0]], l[1], l[2])
self.names = {}
for _id, name in names.items():
self.names.setdefault(name, []).append(_id)
self.nodes = nodes
def prune_branches(self, taxa_levels=_taxa_levels):
'''
Prune the branches of the taxonomy tree, so it takes fewer steps to
walk to the desired tax level
'''
if 'root' not in taxa_levels:
tax_levels = ['root'] + taxa_levels
nodes = {}
log('start pruning the taxonomic tree')
for tax_name, node in self.nodes.items():
tax_id, parent_id, level = node
if level == 'no rank':
continue
if self.ranks[level] < self.ranks['genus'] \
and level not in taxa_levels:
continue
parent_node = self.get_parent_taxa(parent_id)
while (parent_node[2] < self.ranks['genus']
and parent_node[3] not in taxa_levels) \
or parent_node[3] == 'no rank':
parent_node = self.get_parent_taxa(parent_node[0])
node = (node[0], parent_node[0], node[2])
nodes[tax_name] = node
names = {}
for tax_id, node in nodes.items():
tax_name = node[0]
names.setdefault(tax_name, []).append(tax_id)
self.names = names
self.nodes = nodes
def get_tax_id(self, tax_name):
return self.names[tax_name]
def has_tax(self, tax_name):
return tax_name in self.names
def get_tax_rank(self, tax_id):
level = self.nodes[tax_id][2]
return self.ranks[level], level
def get_parent_taxa(self, tax_id):
parent_id = self.nodes[tax_id][1]
parent_node = self.nodes[parent_id]
parent_rank = self.ranks[parent_node[2]]
return parent_id, parent_node[0], parent_rank, parent_node[2]
def get_common_ancestor(self, tax_id1, tax_id2):
node1 = self.nodes[tax_id1]
tax_name1 = node1[0]
level1 = node1[2]
rank1 = self.ranks[level1]
node2 = self.nodes[tax_id2]
tax_name2 = node2[0]
level2 = node2[2]
rank2 = self.ranks[level2]
while tax_id1 != tax_id2:
if rank1 != rank2:
while rank1 != rank2:
if rank1 > rank2:
res = self.get_parent_taxa(tax_id1)
tax_id1, tax_name1, rank1, level1 = res
else:
res = self.get_parent_taxa(tax_id2)
tax_id2, tax_name2, rank2, level2 = res
else:
res = self.get_parent_taxa(tax_id1)
tax_id1, tax_name1, rank1, level1 = res
res = self.get_parent_taxa(tax_id2)
tax_id2, tax_name2, rank2, level2 = res
return tax_id1, tax_name1, rank1, level1
if __name__ == '__main__':
db = NCBITaxonomyInMem('../taxdump/nodes.dmp', '../taxdump/names.dmp') |
import torch
import numpy as np
def test_tensors():
# Initializing a Tensor
def initialize_a_tensor():
data = [[1, 2],[3, 4]]
# Directly from data
x_data = torch.tensor(data)
# From a NumPy array
np_array = np.array(data)
x_np = torch.from_numpy(np_array)
# From another tensor
x_ones = torch.ones_like(x_data)
x_rand = torch.rand_like(x_data, dtype=torch.float)
# With random or constant values:
shape = (2,3)
rand_tensor = torch.rand(shape)
ones_tensor = torch.ones(shape)
zeros_tensor = torch.zeros(shape)
print('Done - initialize_a_tensor\n')
initialize_a_tensor()
# Attributes of a Tensor
def attributes_of_a_tensor():
tensor = torch.rand([3,4])
print(f"Shape of tensor: {tensor.shape}")
print(f"Datatype of tensor: {tensor.dtype}")
print(f"Device of tensor is stored on: {tensor.device}")
print('Done - attributes_of_a_tensor\n')
attributes_of_a_tensor()
# Operations on Tensors
def operations_on_tensors():
if torch.cuda.is_available():
tensor = tensor.to('cuda')
# 1. Standard numpy-like indexing and slicing:
tensor = torch.rand(4, 4)
print('First row: ',tensor[0])
print('Last row:', tensor[-1, ...])
print('First column: ', tensor[:, 0])
print('Last column:', tensor[..., -1])
tensor[:,1] = 0
print(tensor)
# 2. Joining tensors
t1 = torch.cat([tensor, tensor, tensor], dim=1)
print(t1)
# 3. Arithmetic operations
# This computes the matrix multiplication between two tensors. y1, y2, y3 will have the same value
y1 = tensor @ tensor.T
y2 = tensor.matmul(tensor.T)
y3 = torch.rand_like(tensor)
torch.matmul(tensor, tensor.T, out=y3)
# This computes the element-wise product. z1, z2, z3 will have the same value
z1 = tensor * tensor
z2 = tensor.mul(tensor)
z3 = torch.rand_like(tensor)
torch.mul(tensor, tensor, out=z3)
# 4. Single-element tensors
agg = tensor.sum()
agg_item = agg.item()
print(agg_item, type(agg_item))
# In-place operations
print(tensor, "\n")
tensor.add_(5)
print(tensor)
print('Done - operations_on_tensors\n')
operations_on_tensors()
# Bridge with NumPy
def bridge_with_numpy():
# Tensor to NumPy array
# A change in the tensor reflects in the NumPy array
t = torch.ones(5)
print(f"t: {t}")
n = t.numpy()
print(f"n: {n}")
t.add_(1)
print(f"t: {t}")
print(f"n: {n}")
# NumPy array to Tensor
# Changes in the NumPy array reflects in the tensor
n = np.ones(5)
t = torch.from_numpy(n)
np.add(n, 1, out=n)
print(f"t: {t}")
print(f"n: {n}")
print('Done - bridge_with_numpy')
bridge_with_numpy()
if __name__ == '__main__':
test_tensors() |
#!C:\Python38\Python
import tkinter as tk
import random
import keyboard
import time
from PIL import Image, ImageTk
def goLeft():
global x_snake,y_snake,direction
if keyboard.is_pressed('left'):
print("left pressed")
x_snake-=50
direction='left'
elif direction=='left':
x_snake-=50
direction='left'
def goRight():
global x_snake,y_snake,direction
if keyboard.is_pressed('right'):
print("right pressed")
x_snake+=50
direction='right'
elif direction=='right':
x_snake+=50
direction='right'
def goUp():
global x_snake,y_snake,direction
if keyboard.is_pressed('up'):
print("up pressed")
y_snake-=50
direction='up'
elif direction=='up':
y_snake-=50
direction='up'
def goDown():
global x_snake,y_snake,direction
if keyboard.is_pressed('down'):
print("down pressed")
y_snake+=50
direction='down'
elif direction=='down':
y_snake+=50
direction='down'
def structure():
global direction
if keyboard.is_pressed('esc'):
img = ImageTk.PhotoImage(Image.open("snakeGame.png").resize((1536,864)))
canvas.create_image(752, 400, image=img)
canvas.update()
time.sleep(2)
root.destroy()
if direction=='up':
goLeft()
goRight()
goUp()
elif direction=='down':
goLeft()
goRight()
goDown()
elif direction=='left':
goUp()
goDown()
goLeft()
elif direction=='right':
goUp()
goDown()
goRight()
screenEnd()
root = tk.Tk()
root.geometry("1536x864")
# create a canvas
canvas = tk.Canvas(root,width=1536,height=864)
canvas.pack()
# create a mouse
x_mouse = random.randrange(18,1469)
y_mouse = random.randrange(7,808)
mouse=canvas.create_rectangle(x_mouse,y_mouse,x_mouse+50,y_mouse+50,fill='red')
# create a snake
x_snake = random.randrange(18,1369)
y_snake = random.randrange(7,808)
snake_body = []
for i in range(3):
x_snake+=50
snake_body.append(canvas.create_rectangle(x_snake,y_snake,x_snake+50,y_snake+50,fill='black'))
direction = 'up'
while(True):
structure()
canvas.update()
time.sleep(0.05)
root.mainloop()
|
from .._distutils.command import install_lib as orig
class install_lib(orig.install_lib):
def run(self) -> None: ...
def get_exclusions(self): ...
def copy_tree(
self, infile, outfile, preserve_mode: int = 1, preserve_times: int = 1, preserve_symlinks: int = 0, level: int = 1
): ...
def get_outputs(self): ...
|
import numpy as np
b = np.arange(9).reshape(3,3)
b = b * 1.0
var_1 = var_2 = var_3 = 99.
for i in range(3):
tmp = ('var_'+str(i+1))
b[2, i] = locals()[tmp]
c = np.zeros(shape=(100, 100), dtype=np.float32)
print(c)
print(c[:,0])
# result
#[[ 0. 1. 2.]
# [ 3. 4. 5.]
# [99. 99. 99.]] |
# This file is part of AltCanvas.
#
# http://code.google.com/p/altcanvas
#
# AltCanvas is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# AltCanvas is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with AltCanvas. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import cairo
from math import sqrt
from math import pi as PI
import random
class Log:
def __init__(self):
self.DEBUG = False
def write(self,str):
if self.DEBUG:
sys.stdout.write(str)
def writeln(self,str):
if self.DEBUG:
sys.stdout.write(str+'\n')
sys.stdout.flush()
log = Log()
class RGBA:
r = None
g = None
b = None
a = 1.0
def __init__(self,r=0.0,g=0.0,b=0.0,a=1.0):
self.r,self.g,self.b,self.a = (r,g,b,a)
def html2rgb(hr,hg,hb):
return ((1.0*hr/0xFF),(1.0*hg/0xFF),(1.0*hb/0xFF))
(LAYOUT_STEP,LAYOUT_UNIFORM_SPREAD,LAYOUT_UNIFORM_OVERLAP) = range(3)
def get_uniform_fit(count,max_x,max_y,
OVERLAP_FACTOR = 0.9,max_limit=0):
if count == 0:
return (0,0)
total_area = max_x*max_y
image_area = total_area/count
image_area = OVERLAP_FACTOR * image_area
img_side = int(sqrt(image_area))
img_side = min(img_side,max_limit)
return (img_side,img_side)
def get_image_locations(count,layout=LAYOUT_UNIFORM_SPREAD,
oheight=0,owidth=0,randomize=True):
'''
@param count: Number of locations to generate
@param layout: One of the predefined layouts
@param oheight: Hint in terms of Object height that will be placed
at the returned position. (Optional param)
@param owidth: Hint in terms of Object width that will be placed
at the returned position. (Optional param)
'''
if count == 0:
raise Exception('Need non-zero number of images')
LEFT_MARGIN_RATIO = 0.1
TOP_MARGIN_RATIO = 0.1
x_margin = 10
y_margin = 10
max_x = 800
max_y = 480
def randomize_arr(arr):
l = len(arr)
if l is 1:
return arr
for i in range(l):
pos = random.randint(1,l-1)
tmp = arr[pos]
arr[pos] = arr[0]
arr[0] = tmp
return arr
if layout == LAYOUT_STEP:
x = 20
y = 20
for i in range(count):
x = x+75
y = y+40
yield (x,y)
elif layout == LAYOUT_UNIFORM_OVERLAP:
if not oheight:
oheight = 100
if not owidth:
owidth = 100
'''
The spread of images should be in proportion to the lengths of the
side of the total area
eqn 1: x_count * max_y - y_count * max_x = 0
eqn 2: x_count * y_count = count
Therefore,
x_count = (count / x_count) * max_x / max_y
x_count = sqrt(count * max_x/max_y)
aspect_ratio = max_x/max_y
x_count = sqrt(count * aspect_ratio)
y_count = count / x_count
'''
cx0 = x_margin + owidth/2
cy0 = y_margin + oheight/2
cx1 = max_x - (x_margin+owidth/2)
cy1 = max_y - (y_margin+oheight/2)
aspect_ratio = max_x*1.0/max_y
x_count = int(sqrt(count * aspect_ratio))
y_count = int(count / x_count)+1
x_num_gaps = max(x_count - 1,1)
y_num_gaps = max(y_count - 1,1)
if randomize:
positions = randomize_arr(range(count))
else:
positions = range(count)
for i in positions:
xc = i % x_count
yc = i / x_count
x = cx0 + int(xc*((cx1-cx0)/x_num_gaps)) - owidth/2
y = cy0 + int(yc*((cy1-cy0)/y_num_gaps)) - oheight/2
RANDOM_POS_FACTOR = int(0.2*oheight)
dx = random.randint(-RANDOM_POS_FACTOR,+RANDOM_POS_FACTOR)
dy = random.randint(-RANDOM_POS_FACTOR,+RANDOM_POS_FACTOR)
x = x+dx
y = y+dy
yield(x,y)
elif layout == LAYOUT_UNIFORM_SPREAD:
if not oheight:
oheight = 100
if not owidth:
owidth = 100
avg_space = sqrt(max_x*max_y/count)
max_x_count = int(max_x/avg_space)
max_y_count = int(count/max_x_count)+1
oh = max_y/max_y_count
ow = max_x/max_x_count
xc = 0
yc = 0
for i in range(count):
xc = i % max_x_count
yc = i / max_x_count
x = xc*ow+int((ow-owidth)/2)
y = yc*oh+int((oh-oheight)/2)
yield(x,y)
(RECT_GRAD_EXPLOSION,RECT_GRAD_SHADOW,RECT_GRAD_TWISTED_SHADOW) = range(3)
def draw_grad_rect(inner=None,outer=None,border=None,
type=RECT_GRAD_EXPLOSION,color=RGBA(0,0,0,None)):
'''
@param inner: (x,y,w,h) for inner rectangle
@param outer: (x,y,w,h) for outer rectangle
@param border: width of the border between inner and outer
'''
if not inner and not outer:
raise Exception('Invalid params')
if (not inner or not outer) and not border:
raise Exception('Invalid params')
'''
ix,iy - (x,y) coordinates of four vertices of rectangle in clockwise
direction starting from top left
'''
# @todo: when outer is given
ix = [inner[0],inner[0]+inner[2],inner[0]+inner[2],inner[0]]
iy = [inner[1],inner[1],inner[1]+inner[3],inner[1]+inner[3]]
bx,by = (
# EXPLOSION
([-1,+1,+1,-1],[-1,-1,+1,+1]),
# SHADOW
([+1,+1,+1,+1],[+1,+1,+1,+1]),
# TWISTED SHADOW
([-1,-1,+1,+1],[-1,-1,+1,+1])
)[type]
ox = [ix[i]+border*bx[i] for i in range(len(ix))]
oy = [iy[i]+border*by[i] for i in range(len(iy))]
ow = max(ix)+border
oh = max(iy)+border
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32,ow,oh)
ctx = cairo.Context(surface)
def draw_polygon(ctx,vertices):
ctx.move_to(vertices[0][0],vertices[0][1])
for i in range(1,len(vertices)):
ctx.line_to(vertices[i][0],vertices[i][1])
ctx.line_to(vertices[0][0],vertices[0][1])
def make_linear_grad(x0,y0,x1,y1):
lingrad = cairo.LinearGradient(x0,y0,x1,y1)
lingrad.add_color_stop_rgba(0,color.r,color.g,color.b,1)
#lingrad.add_color_stop_rgba(0.1,0,0,0,0.7)
lingrad.add_color_stop_rgba(1,color.r,color.g,color.b,0)
return lingrad
# Draw gradients
# top
lingrad = make_linear_grad(ix[0],iy[0],ix[0],ox[0])
draw_polygon(ctx,((ox[0],oy[0]),
(ix[0],iy[0]),
(ix[1],iy[1]),
(ox[1],oy[1])))
ctx.set_source(lingrad)
ctx.fill()
# right
lingrad = make_linear_grad(ix[1],iy[1],ox[1],iy[1])
draw_polygon(ctx,
((ox[1],oy[1]),
(ox[2],oy[2]),
(ix[2],iy[2]),
(ix[1],iy[1])
))
ctx.set_source(lingrad)
ctx.fill()
# bottom
lingrad = make_linear_grad(ix[2],iy[2],ix[2],oy[2])
draw_polygon(ctx,
((ox[2],oy[2]),
(ox[3],oy[3]),
(ix[3],iy[3]),
(ix[2],iy[2])
))
ctx.set_source(lingrad)
ctx.fill()
# left
lingrad = make_linear_grad(ix[3],iy[3],ox[3],iy[3])
draw_polygon(ctx,
((ox[3],oy[3]),
(ox[0],oy[0]),
(ix[0],iy[0]),
(ix[3],iy[3])
))
ctx.set_source(lingrad)
ctx.fill()
return surface
def detect_platform():
sin,soe = os.popen4('uname -n')
line = soe.read()
if line.lower().find('nokia') >= 0:
return 'Nokia'
else:
return 'Desktop'
def show_multiline(w,hi,ctx,text,y_margin):
# @summary: word by word drawing, center justified
x_offset = 5
used = 0
line = 0
line_text = ''
_,_,_,_,space_x_adv,_ = ctx.text_extents(str(' '))
for word in text.split(' '):
x_bearing,y_bearing,width,height,x_adv,y_adv = ctx.text_extents(word)
if( used > 0 and used+width >= w):
x_b,y_b,wdt,hgt,x_a,y_a = ctx.text_extents(line_text)
ctx.move_to(x_offset+x_b+int((w-used)/2),line*hi+y_margin-y_b)
ctx.show_text(line_text)
line_text = ''
used = x_offset
line += 1
line_text += word+' '
used += x_adv + space_x_adv
# Deal with remaining text
if line_text != '':
x_bearing,y_bearing,width,height,x_adv,y_adv = ctx.text_extents(line_text)
ctx.move_to(x_offset+x_bearing+int((w-used)/2),line*hi+y_margin-y_bearing)
ctx.show_text(line_text)
def recalculate_clouds(widgetQ):
# Cleanup all the clouds before recalculating
for ww in widgetQ:
ww.widget.clouds = []
for top in range(len(widgetQ)):
newWidget = widgetQ[top]
for i in range(top):
ww = widgetQ[i]
ox0 = ww.x
oy0 = ww.y
ox1 = ox0 + ww.widget.w
oy1 = oy0 + ww.widget.h
nx0 = newWidget.x
ny0 = newWidget.y
nx1 = nx0 + newWidget.widget.w
ny1 = ny0 + newWidget.widget.h
if (ox0 < nx0 and ox1 < nx0) or (ox0 > nx1 and ox1 > nx1) or \
(oy0 < ny0 and oy1 < ny0) or (oy0 > ny1 and oy1 > ny1):
# There is no overlap
continue
else:
'''
There is an overlap
Calculate the intersection of two widgets' extents
and add it to the cloud list of the old widget
Also translate them into widget's coordinate system
These are top-left and bottom-right vertices of the rectangular
intersection of two widgets.
'''
ww.widget.clouds.append((max(ox0,nx0)-ox0,
max(oy0,ny0)-oy0,
min(ox1,nx1)-ox0,
min(oy1,ny1)-oy0))
def draw_rounded_rect(ctx,x,y,w,h,vr=None):
x = 0
y = 0
x1 = x+w
y1 = y+h
if not vr:
vr = int(min(w,h)/10)
ctx.move_to(x+vr,y)
ctx.line_to(x1-vr,y)
ctx.arc(x1-vr,y+vr,vr,3*PI/2,0)
ctx.line_to(x1,y1-vr)
ctx.arc(x1-vr,y1-vr,vr,0,PI/2)
ctx.line_to(x+vr,y1)
ctx.arc(x+vr,y1-vr,vr,PI/2,PI)
ctx.line_to(x,y+vr)
ctx.arc(x+vr,y+vr,vr,PI,3*PI/2)
ctx.fill()
def open_browser(url=None):
if detect_platform() == 'Nokia':
import libpub
import osso
ctx = osso.Context('publishr',libpub.VERSION,False)
osso_rpc = osso.Rpc(ctx)
osso_rpc.rpc_run("com.nokia.osso_browser","/com/nokia/osso_browser/request",
'com.nokia.osso_browser','load_url',rpc_args=(str(url),))
else:
os.system("%s '%s'" % ('firefox', str(url)))
if __name__ == '__main__':
print html2rgb(0xFF,0x33,0x33) |
from google.cloud import datastore
import cirq
from flask import Flask
import time
import sys
import datetime
import os
from networkx import Graph
from itertools import combinations
from cirq.contrib.routing import route_circuit
def get_error_qubits(project_id, processor_id, threshold):
# query for the latest calibration
engine = cirq.google.Engine(project_id=project_id)
processor = engine.get_processor(processor_id=processor_id)
latest_calibration = processor.get_current_calibration()
err_qubits = set()
for metric_name in latest_calibration:
for qubit_or_pair in latest_calibration[metric_name]:
metric_value = latest_calibration[metric_name][qubit_or_pair]
# find the qubits that have higher error probability(above the threshold)
if metric_value[0] > threshold:
# get all the qubits in the tuple from a metric key
for q in qubit_or_pair:
err_qubits.add(q)
return err_qubits
def naive_connectivity(gridqubits):
# Workaround because I can't get connectivity directly from device object
return Graph((q1,q2) for q1,q2 in combinations(gridqubits, 2) if q1.is_adjacent(q2))
def place_circuit(circuit, device, exclude_always):
if exclude_always is None:
exclude_always = set()
else:
exclude_always = set(exclude_always)
try:
return cirq.google.optimized_for_sycamore(circuit=circuit, new_device=device, optimizer_type='sycamore')
except ValueError as e:
pass
# Workaround to work with route_circuit, which unnecessarily doesn't support multi-qubit measures
def split_measure(measure_gate:'cirq.GateOperation') -> 'cirq.GateOperation':
if not cirq.protocols.is_measurement(measure_gate):
yield measure_gate
return
key = cirq.protocols.measurement_key(measure_gate)
yield cirq.Moment([cirq.measure(qubit, key=key+'.'+str(qubit)) for qubit in measure_gate.qubits])
circuit = cirq.Circuit(*map(split_measure, circuit.all_operations()))
available_qubits = device.qubit_set() - exclude_always
graph = naive_connectivity(available_qubits)
circuit = route_circuit(circuit=circuit, device_graph=graph, algo_name='greedy').circuit
circuit = cirq.google.optimized_for_sycamore(circuit=circuit, new_device=device, optimizer_type='sycamore')
# Workaround because SerializableDevice is not json-able
circuit = cirq.Circuit() + circuit
device.validate_circuit(circuit)
return circuit
def prepare_job(entity: 'datastore.Entity', device, err_qubits) -> 'datastore.Entity':
""" Run job on one of available handlers and update entity
Arg:
entity: unfinished job key-able entity
Returns:
entity with updated results or response message
"""
# ensure only running unfinished jobs
assert not entity['done'] and not entity['sent']
# parse circuit
# This could be done in the verifier, and we could pickle load the circuit here
# but that may be limited by datastore's ability to save binary data
try:
circuit = cirq.read_json(json_text=entity['circuit'])
except Exception as e:
entity['message'] = 'Exception observed while converting JSON to circuit:\n' + str(type(e)) + str(e) + '\n' + \
'With JSON:\n' + str(entity['circuit'])
entity['done'] = True
return entity, None, None
# conditionally map circuit
try:
#circuit, _ = next(multiplex_onto_sycamore([circuit], device, exclude_always=err_qubits))
circuit = place_circuit(circuit, device, err_qubits)
except Exception as e:
entity['message'] = 'Exception observed while mapping circuit:\n' + str(type(e)) + str(e)
entity['done'] = True
return entity, None, None
entity.exclude_from_indexes.add('mapped_circuit')
entity['mapped_circuit'] = cirq.to_json(circuit)
return entity, circuit, entity['repetitions']
def run_jobs(handler, circuits, repetitions):
circuits = list(circuits)
enginejob = handler._engine.run_batch(circuits, repetitions=max(repetitions), processor_ids=handler._processor_ids, gate_set=handler._gate_set)
yield from ([enginejob.program_id, enginejob.job_id, i] for i in range(len(circuits)))
def finalize_job(entity, result_key):
# update and return entity
entity.exclude_from_indexes.add('results')
entity['result_key'] = result_key
entity['message'] = 'Success'
entity['processed_timestamp'] = datetime.datetime.utcnow()
entity['processed_version'] = os.environ.get('GAE_VERSION')
entity['sent'] = True
return entity
#def run(client: datastore.Client) -> str:
def run(project_id, processor_id) -> str:
""" pull unfinished, verified jobs and run them
Returns:
string message with number of jobs run
"""
# Connect to datastore
client = datastore.Client(project_id)
# Initialize google cloud quantum engine
engine = cirq.google.Engine(project_id=client.project)
# get handler and device
handler = engine.sampler(processor_id=processor_id, gate_set=cirq.google.SYC_GATESET)
device = engine.get_processor(processor_id=processor_id).get_device([cirq.google.SYC_GATESET])
# get current error qubits from recent calibration
err_qubits = get_error_qubits(client.project, processor_id, 35)
# pull unfinished, verified job keys
query = client.query(kind="job")
query.add_filter("done", "=", False)
query.add_filter("sent", "=", False)
query.add_filter("verified", "=", True)
while True:
# get each job by key and run it in a transaction
transaction = client.transaction()
transaction.begin(timeout=600)
prepared = [prepare_job(entity, device, err_qubits) for entity in query.fetch(limit=20)]
if not prepared: break
to_run, complete = [],[]
for entity, circuit, repetitions in prepared:
if circuit and repetitions:
to_run.append((entity, circuit, repetitions))
else:
complete.append(entity)
assert len(to_run) + len(complete) == len(prepared)
if to_run:
entities, circuits, repetitions = zip(*to_run)
result_keys = run_jobs(handler, circuits, list(repetitions))
complete.extend(map(finalize_job, entities, result_keys))
#client.put_multi(complete)
for entity in complete:
transaction.put(entity)
transaction.commit()
# return number of jobs run
return 'Jobs run: '+str(len(prepared))
if __name__ == '__main__':
# processor id from argument
PROJECT_ID = str(sys.argv[1])
PROCESSOR_ID = str(sys.argv[2])
print(run(PROJECT_ID, PROCESSOR_ID))
|
from pyspark.sql import *
from pyspark.sql.functions import *
from pyspark.sql.functions import min
from pyspark.sql import SparkSession
from pyspark import SparkContext
from pyspark.sql.functions import lit
'''
Script functionality:
In order to filter and reduce even more the size of the datasets we dropped the
columns that mostly have no use for us, namely: author_flair_class,
author_flair_text, distinguished, edited, retrieved_on, stickied and
subreddit_id.
'''
spark = SparkSession.builder.getOrCreate()
spark.conf.set('spark.sql.session.timeZone', 'UTC')
sc = spark.sparkContext
sqlc = SQLContext(sc)
df_2016 = spark.read.parquet("sample2016.parquet")
to_drop = ['author_flair_css_class','author_flair_text','distinguished','edited','retrieved_on','stickied','subreddit_id']
df_2016_reduced = df_2016.drop(*to_drop)
df_2016_reduced.write.mode("overwrite").parquet("sampled_reduced_col_2016.parquet")
|
# http://rosalind.info/problems/fib/
def fib(n, k):
if n <= 1:
return n
return fib(n - 1, k) + k * fib(n - 2, k)
with open('datasets/rosalind_fib.txt') as f:
n, k = f.readline().split()
res = fib(int(n), int(k))
print(res)
|
import functions
class Node:
def __init__(self, char, pos):
self.char = char
self.i = pos[0]
self.j = pos[1]
self.isWall = self.char == "x" or self.char == "|" or self.char == "-"
self.isStart = self.char == "H"
self.isGoal = self.char == "T"
self.isTorch = self.char == "t"
self.isEmpty = self.char == " " or self.char == "h"
self.crMnStp = None # current smallest step count reached by pathfinder # chng nm!1!!!1!!!1!
self.maxTorchLeft = None # current maximum torch left reached by pathfinder
self.connections = []
def __repr__(self) -> str:
return self.char
def addConnection(self, node):
self.connections.append(node)
def addOpenConnection(self, node):
if not node.isWall:
self.addConnection(node)
def setTorchLeft(self, torchLeft):
self.maxTorchLeft = functions.maxWithNone(self.maxTorchLeft, torchLeft)
def outConnections(self) -> str:
out = ""
for connection in self.connections:
out += f"{self}->{connection}|"
if (len(self.connections) != 0):
out += "\n"
return out
def outAllConnections(nodeMatrix) -> str:
out = ""
for nodeRow in nodeMatrix:
for node in nodeRow:
out += node.outConnections()
out += "------\n"
return out
|
# -*- coding: utf-8 -*-
# Bibliotecas para manuseio dos dados
import pandas as pd
import numpy as np
# datasheet
dataset = pd.read_csv("2020_before_iqa.csv", encoding='utf8',
delimiter=',', engine='python')
print(dataset.info())
dataset['IQA'] = (dataset['Oxigenio dissolvido'].astype('float32') ** 0.17) * \
(dataset['Coliformes totais'].astype('float32') ** 0.15) * \
(dataset['pH in loco'].astype('float32') ** 0.12) * \
(dataset['Demanda Bioquimica de Oxigenio'].astype('float32') ** 0.1) * \
((dataset['Nitrogenio amoniacal total'].astype('float32') + dataset['Nitrogenio organico'].astype('float32')) ** 0.1) * \
(dataset['Fosforo total'].astype('float32') ** 0.1) * \
(dataset['Temperatura da agua'].astype('float32') ** 0.1) * \
(dataset['Turbidez'].astype('float32') ** 0.08) * \
(dataset['Solidos totais'].astype('float32') ** 0.08)
# remove valores not a number
dataset = dataset[dataset['IQA'].notna()]
newDataset = pd.DataFrame({
'IQA': dataset['IQA'],
'Estacao': dataset['estacao']
})
# newDataset = newDataset.sort_values(by='data')
# mean_col = newDataset.groupby('data')['IQA'].mean() # don't reset the index!
newDataset.to_csv('2020_iqa.csv', sep=',', encoding='utf-8')
|
"""
Необходимо написать скрипт, обрабатывающий лог файл Nginx и выводящий список IP адресов, с которых производились запросы. Адреса из общей подсети \24 необходимо группировать при выводе (например, 10.40.0.4 и 10.40.0.231 относятся к одной подсети).
Версия Python: 3.5
"""
import ipaddress
MASK = 24
f = open("access.log", "r")
total = 0
unique = 0
networks = dict()
ips = dict()
for line in f.readlines():
# IP address string
ip = line.split(" - - ")[0]
# Containing network
net = ipaddress.ip_network(ip + '/' + str(MASK), strict=False)
# Remember this network and this particular IP address
networks.setdefault(net, set())
ips.setdefault(ip, 0)
# Adjust counters
unique += ip not in networks[net]
total += 1
networks[net].add(ip)
ips[ip] += 1
print("Total: ", total)
print("Unique: ", unique)
print()
for k in networks:
if len(networks[k]) == 1:
ip = networks[k].pop()
print(ip, " (" + str(ips[ip]) + ")")
else:
print(k)
for ip in networks[k]:
print("\t==> " + ip + " (" + str(ips[ip]) + ")") |
# Generated by Django 3.1.7 on 2021-03-22 02:00
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('cartoons', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='cartoon',
name='final',
field=models.ImageField(upload_to='media/final/'),
),
migrations.CreateModel(
name='Draft',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('iteration', models.IntegerField()),
('cartoon', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cartoons.cartoon')),
],
),
]
|
'''
Created on May 26, 2017
@author: deckyal
'''
import tensorflow as tf
from tensorflow.contrib import rnn
import numpy as np
#Impor MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot = True)
#Parameters
learning_rate = 0.001
training_iters = 100000
batch_size = 128
display_step = 10
#Network Paramters
n_input = 28
n_steps = 28
n_hidden = 128
n_classes = 10
#TF graph inpu t
x = tf.placeholder("float", [None, n_steps,n_input])
y = tf.placeholder("float", [None, n_classes])
#define weights
weights = {
'out' : tf.Variable(tf.random_normal([n_hidden, n_classes]))
}
biases = {
'out' : tf.Variable(tf.random_normal([n_classes]))
}
def BiRNN(x,weights,biases):
x = tf.unstack(x,n_steps,1)
#forward direction cell
lstm_fw_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1)
#backward direction
lstm_bw_cell = rnn.BasicLSTMCell(n_hidden, forget_bias = 1)
#Getting lstm cell otuput
try :
outputs,_,_ = rnn.static_bidirectional_rnn(lstm_fw_cell,lstm_bw_cell,x,dtype=tf.float32)
except Exception:#old tensorflow only returns output, not states
outputs = rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell )
return tf.matmul(outputs[-1], weights['out']) + biases['out']
pred = BiRNN(x, weights, biases)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = pred, labels = y))
optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(cost)
#Evaluate model
correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
init = tf.global_variables_initializer()
with tf.Session() as sess :
sess.run(init)
step = 1
while step * batch_size < training_iters :
batch_x, batch_y = mnist.train.next_batch(batch_size)
#Reshape data into 28 seq of 28 elemetns
batch_x = batch_x.reshape((batch_size, n_steps, n_input))
sess.run(optimizer, feed_dict = {x:batch_x, y:batch_y})
if step % display_step == 0 :
acc = sess.run(accuracy, feed_dict = {x:batch_x, y:batch_y})
loss = sess.run(cost,feed_dict = {x:batch_x, y: batch_y})
print "Iter "+str(step*batch_size) + ", Minibatch loss = "+"{:.6f}".format(loss) + ", Training accuracy = " + "{:.5f}".format(acc)
step += 1
print "Optimization finished"
test_len = 128
test_data = mnist.test.images[:test_len].rehshape((-1,n_steps, n_input))
test_label = mnist.test.labels[:test_len]
print "Acc : ", sess.run(accuracy, feed_dict = {x:test_data, y: test_label})
|
import sys
from genStubs import *
stub = Stubs( "flashSectionStm32", sys.argv[1], sys.argv[2] )
### Include Headers
stub.include( "flashSectionStm32.h" )
### Used Namespaces
stub.newline()
stub.externC()
stub.stubFunction( ("flashSectionErr_t", "FLASH_SECTION_ERR_NONE"), "flashSectionInit", "flashSectionType_t", "SFlashSection_t *" )
stub.stubFunction( ("flashSectionErr_t", "FLASH_SECTION_ERR_NONE"), "flashSectionGetUsableSize_WORDS", "const SFlashSection_t *", "size_t *" )
stub.stubFunction( ("flashSectionErr_t", "FLASH_SECTION_ERR_NONE"), "flashSectionGetStartAddress", "const SFlashSection_t *", "const uint32_t **" )
stub.stubFunction( ("flashSectionErr_t", "FLASH_SECTION_ERR_NONE"), "flashSectionVerify", "const SFlashSection_t *", "bool *" )
stub.stubFunction( ("flashSectionErr_t", "FLASH_SECTION_ERR_NONE"), "flashSectionIsWritable", "const SFlashSection_t *", "bool *" )
stub.stubFunction( ("flashSectionErr_t", "FLASH_SECTION_ERR_NONE"), "flashSectionErase", "const SFlashSection_t *" )
stub.stubFunction( ("flashSectionErr_t", "FLASH_SECTION_ERR_NONE"), "flashSectionFinalize", "const SFlashSection_t *" )
stub.stubFunction( ("flashSectionErr_t", "FLASH_SECTION_ERR_NONE"), "flashWriteWords", "const uint32_t *", "const uint32_t *", "size_t" )
|
from ctypes import *
_libraries = {}
_libraries['librtlsdr.so'] = CDLL('librtlsdr.so')
STRING = c_char_p
int8_t = c_int8
int16_t = c_int16
int32_t = c_int32
int64_t = c_int64
uint8_t = c_uint8
uint16_t = c_uint16
uint32_t = c_uint32
uint64_t = c_uint64
int_least8_t = c_byte
int_least16_t = c_short
int_least32_t = c_int
int_least64_t = c_long
uint_least8_t = c_ubyte
uint_least16_t = c_ushort
uint_least32_t = c_uint
uint_least64_t = c_ulong
int_fast8_t = c_byte
int_fast16_t = c_long
int_fast32_t = c_long
int_fast64_t = c_long
uint_fast8_t = c_ubyte
uint_fast16_t = c_ulong
uint_fast32_t = c_ulong
uint_fast64_t = c_ulong
intptr_t = c_long
uintptr_t = c_ulong
intmax_t = c_long
uintmax_t = c_ulong
class rtlsdr_dev(Structure):
pass
rtlsdr_dev_t = rtlsdr_dev
rtlsdr_dev._fields_ = [
]
rtlsdr_get_device_count = _libraries['librtlsdr.so'].rtlsdr_get_device_count
rtlsdr_get_device_count.restype = uint32_t
rtlsdr_get_device_count.argtypes = []
rtlsdr_get_device_name = _libraries['librtlsdr.so'].rtlsdr_get_device_name
rtlsdr_get_device_name.restype = STRING
rtlsdr_get_device_name.argtypes = [uint32_t]
rtlsdr_open = _libraries['librtlsdr.so'].rtlsdr_open
rtlsdr_open.restype = c_int
rtlsdr_open.argtypes = [POINTER(POINTER(rtlsdr_dev_t)), uint32_t]
rtlsdr_close = _libraries['librtlsdr.so'].rtlsdr_close
rtlsdr_close.restype = c_int
rtlsdr_close.argtypes = [POINTER(rtlsdr_dev_t)]
rtlsdr_set_center_freq = _libraries['librtlsdr.so'].rtlsdr_set_center_freq
rtlsdr_set_center_freq.restype = c_int
rtlsdr_set_center_freq.argtypes = [POINTER(rtlsdr_dev_t), uint32_t]
rtlsdr_get_center_freq = _libraries['librtlsdr.so'].rtlsdr_get_center_freq
rtlsdr_get_center_freq.restype = c_int
rtlsdr_get_center_freq.argtypes = [POINTER(rtlsdr_dev_t)]
rtlsdr_set_freq_correction = _libraries['librtlsdr.so'].rtlsdr_set_freq_correction
rtlsdr_set_freq_correction.restype = c_int
rtlsdr_set_freq_correction.argtypes = [POINTER(rtlsdr_dev_t), c_int]
rtlsdr_get_freq_correction = _libraries['librtlsdr.so'].rtlsdr_get_freq_correction
rtlsdr_get_freq_correction.restype = c_int
rtlsdr_get_freq_correction.argtypes = [POINTER(rtlsdr_dev_t)]
rtlsdr_set_tuner_gain = _libraries['librtlsdr.so'].rtlsdr_set_tuner_gain
rtlsdr_set_tuner_gain.restype = c_int
rtlsdr_set_tuner_gain.argtypes = [POINTER(rtlsdr_dev_t), c_int]
rtlsdr_get_tuner_gain = _libraries['librtlsdr.so'].rtlsdr_get_tuner_gain
rtlsdr_get_tuner_gain.restype = c_int
rtlsdr_get_tuner_gain.argtypes = [POINTER(rtlsdr_dev_t)]
rtlsdr_set_sample_rate = _libraries['librtlsdr.so'].rtlsdr_set_sample_rate
rtlsdr_set_sample_rate.restype = c_int
rtlsdr_set_sample_rate.argtypes = [POINTER(rtlsdr_dev_t), uint32_t]
rtlsdr_get_sample_rate = _libraries['librtlsdr.so'].rtlsdr_get_sample_rate
rtlsdr_get_sample_rate.restype = c_int
rtlsdr_get_sample_rate.argtypes = [POINTER(rtlsdr_dev_t)]
rtlsdr_reset_buffer = _libraries['librtlsdr.so'].rtlsdr_reset_buffer
rtlsdr_reset_buffer.restype = c_int
rtlsdr_reset_buffer.argtypes = [POINTER(rtlsdr_dev_t)]
rtlsdr_read_sync = _libraries['librtlsdr.so'].rtlsdr_read_sync
rtlsdr_read_sync.restype = c_int
rtlsdr_read_sync.argtypes = [POINTER(rtlsdr_dev_t), c_void_p, c_int, POINTER(c_int)]
rtlsdr_async_read_cb_t = CFUNCTYPE(None, STRING, uint32_t, c_void_p)
rtlsdr_wait_async = _libraries['librtlsdr.so'].rtlsdr_wait_async
rtlsdr_wait_async.restype = c_int
rtlsdr_wait_async.argtypes = [POINTER(rtlsdr_dev_t), rtlsdr_async_read_cb_t, c_void_p]
rtlsdr_cancel_async = _libraries['librtlsdr.so'].rtlsdr_cancel_async
rtlsdr_cancel_async.restype = c_int
rtlsdr_cancel_async.argtypes = [POINTER(rtlsdr_dev_t)]
__all__ = ['rtlsdr_read_sync', 'rtlsdr_set_center_freq',
'rtlsdr_get_freq_correction', 'int32_t', 'uint_least64_t',
'uintptr_t', 'uintmax_t', 'int_fast32_t', 'int16_t',
'int64_t', 'int_fast16_t', 'rtlsdr_set_sample_rate',
'rtlsdr_set_tuner_gain', 'int_fast64_t',
'rtlsdr_async_read_cb_t', 'uint8_t', 'int_least8_t',
'rtlsdr_get_device_count', 'rtlsdr_open', 'uint_least16_t',
'rtlsdr_reset_buffer', 'uint_least32_t', 'int_least64_t',
'int_least16_t', 'int_fast8_t', 'uint_least8_t',
'rtlsdr_set_freq_correction', 'intptr_t', 'int_least32_t',
'int8_t', 'rtlsdr_wait_async', 'rtlsdr_dev',
'rtlsdr_dev_t', 'rtlsdr_get_tuner_gain',
'rtlsdr_get_sample_rate', 'rtlsdr_cancel_async',
'uint_fast32_t', 'uint_fast64_t', 'intmax_t',
'rtlsdr_close', 'rtlsdr_get_device_name', 'uint_fast16_t',
'rtlsdr_get_center_freq', 'uint32_t', 'uint64_t',
'uint16_t', 'uint_fast8_t']
|
from config import nyanpasu_id
from funcs.rep import user_make
def chat_stats(app, chat_id, chat, service, sleep, msg_id, nyanpasu_stats):
cc = '✅' if int(chat.cond) else '❌'
ct = '✅' if int(chat.ttsm) else '❌'
cn = '✅' if int(chat.nsfw) else '❌'
cg = '✅' if int(chat.greetc) else '❌'
if str(chat.lang) == 'ru':cl = '🇷🇺'
elif str(chat.lang) == 'en':cl = '🇺🇸'
txt = (service['count']+'\n\n'+
service['cond']+cc+'\n'+
service['nsfw']+cn+'\n'+
service['ttsm']+ct+'\n'+
service['greet']+cg+'\n'+
service['lang']+cl+'\n'+
service['mood']+str(chat.mood)+'\n\n'+
service['nyanc']+str(len(chat.nyanc))+'\n'+
service['lewdc']+str(len(chat.lewdc))+'\n'+
service['angrc']+str(len(chat.angrc))+'\n'+
service['scarc']+str(len(chat.scarc))+'\n\n'+
service['rep_nps']+str(chat.users[nyanpasu_id].karma))
if nyanpasu_stat == 'administrator': app.delete_messages(chat_id, msg_id)
msg = app.send_message(chat_id, txt)
sleep(10)
app.delete_messages(chat_id, msg.message_id)
def my_stats(chat, service, mmbr_id):
uc = '✅' if int(chat.users[mmbr_id].cond) else '❌'
us = '✅' if int(chat.users[mmbr_id].ship) else '❌'
txt = (service['use_stats']+'\n\n'+
service['use_cond']+uc+'\n'+
service['use_ship']+us+'\n'+
service['karma_use']+str(chat.users[mmbr_id].karma))
return txt
def stat(service, message, chat):
if message.reply_to_message:
reply = message.reply_to_message
reply_user = message.reply_to_message.from_user
if reply_user.username:
reply_username = str(reply_user.username)
else:
reply_username = str(reply_user.first_name)
if chat.users[reply_user.id].karma:
True
else:
user_make(message, chat, service)
karma = str(chat.users[reply_user.id].karma)
txt = service['karma_for']+' @'+reply_username+': '+karma
else:
txt = service['karma_err']
return txt |
import numpy as np
import math
import random
class ExperienceReplay():
def __init__(self, capacity):
self.capacity=capacity
self.memory=[]
self.position=0
def push(self, state,
action, new_state,
reward, done):
transition=(state,action,new_state,reward,done)
if self.position>=len(self.memory):
self.memory.append(transition)
else:
self.memory[self.position]=transition
self.position=(self.position+1)%self.capacity
def sample(self,batch_size):
return zip(*random.sample(self.memory, batch_size))
def __len__(self):
return len(self.memory)
|
import numpy as np
import forward_propagation as fp
def test_accuracy(X_test, Y_test, m_test, w, b):
A = fp.forward_propagation(w, b, X_test)
sum = 0
for i in range(m_test):
if (A[0][i] > 0.5 and Y_test[0][i] == 1) or (A[0][i] < 0.5 and Y_test[0][i] == 0):
sum += 1
accuracy = sum / m_test
return accuracy
|
import web
import random
render = web.template.render('templates/', cache=False, globals={})
render._keywords['globals']['render'] = render
def data(n):
d = []
for i in xrange(n):
d.append(random.uniform(2,50))
return d
|
def vowels():
print "Ei Ei Oh"
def oldman():
print "Old McDonald Had a Fahm"
def hehad(s):
print "And on that farm he had a:", s
def animal(f):
print " There's a",f,f,"here"
def verse(s,f):
oldman(),vowels(),hehad(s),vowels(),animal(f),vowels()
oldman()
vowels()
hehad("dog")
vowels()
animal("woof")
vowels()
verse("Chick","peep")
verse("Pig", "Oink")
verse("ManBearPig","ENWUH?")
verse("Bear", "Woof..")
|
f= open("./ExData/maxK", 'r')
n, k = map(int, f.readline().split())
lst = list(map(int, f.readline().split()))
print(n)
print(k)
print(lst)
lst.sort(reverse=True)
print(lst)
result =[]
for i in range(0,n):
for j in range(1,n):
for x in range(2,n):
result.append(lst[i] + lst[j] + lst[x])
print("lst[i] : {}, lst[j] : {}, lst[x] : {}".format(lst[i], lst[j], lst[x]))
print(result)
result.sort(reverse=True)
print(result)
result = set(result)
print(result)
result = sorted(list(result), reverse=True)
print(result)
#####################################################
# f = open("./ExData/maxK", 'r')
# n, k = map(int, f.readline().split())
# arr = list(map(int, f.readline().split()))
#
# print(arr)
# sumList = set()
# for i in range(n):
# for j in range(i+1, n):
# for l in range(j+1,n):
# sumList.add(arr[i] + arr[j] + arr[j]) # 추가되면서 중복이 제거됨.
#
# print(sumList)
# sumList = list(sumList)
# sumList.sort(reverse=True)
# print(sumList)
# print("세번째 큰 수는 : {}".format(sumList[k-1]))
|
import unittest
from oslo.config import cfg
from dnh import consumer as dnh_consumer
from dnh.handler import nsd4
class DnhTestCase(unittest.TestCase):
def setUp(self):
cfg.CONF.set_override('handlers', ['nsd4'],
group=dnh_consumer.CFG_GRP)
cfg.CONF.set_override('servers', ['host1:4242', 'host2'],
group=nsd4.CFG_GRP)
cfg.CONF.set_override('pattern', 'slave',
group=nsd4.CFG_GRP)
def get_create_notification(self):
return {
'_context_roles': [],
'_context_request_id': 'req-1328a110-c878-437b-8455-1b94eefc66b6',
'_context_original_tenant_id': None,
'event_type': 'dns.domain.create',
'timestamp': u'2013-10-25 19:27:51.420937',
'_context_auth_token': None,
'_context_show_deleted': False,
'_context_tenant': None,
'message_id': u'2c60ea8c-60c7-40d2-8413-be7c12080477',
'_unique_id': u'3e0bf71968624a96831e17def3c6d6a4',
'_context_is_admin': True,
'_context_read_only': False,
'_context_tenant_id': None,
'_context_user': None,
'_context_user_id': None,
'publisher_id': u'central.wr0k',
'payload': {
'status': u'ACTIVE',
'retry': 600,
'name': 'example.com.',
'deleted': '0',
'tenant_id': None,
'created_at': u'2013-10-25T19:27:51.373368',
'version': 1,
'updated_at': None,
'refresh': 3600,
'id': '56248a86-ba74-4a53-aab4-abc6a472d32a',
'minimum': 3600,
'parent_domain_id': None,
'expire': 86400,
'ttl': 3600,
'serial': 1382729271,
'deleted_at': None,
'email':
'admin@example.com',
'description': None,
},
'priority': 'INFO'
}
def get_delete_notification(self):
return {
'_context_roles': [],
'_context_request_id': 'req-eab56feb-0354-4ef7-900a-0b7cf8a21967',
'_context_original_tenant_id': None,
'event_type': 'dns.domain.delete',
'timestamp': '2013-10-25 19:40:13.310722',
'_context_auth_token': None,
'_context_show_deleted': False,
'_context_tenant': None,
'message_id': 'b7ab44c5-4444-405e-a2f6-16d192c66c8e',
'_unique_id': '15c791dd5c0e4538a8b26dd2a519bfa3',
'_context_is_admin': True,
'_context_read_only': False,
'_context_tenant_id': None,
'_context_user': None,
'_context_user_id': None,
'publisher_id': 'central.wr0k',
'payload': {
'status': 'ACTIVE',
'retry': 600,
'name': 'example.com.',
'deleted': '0',
'tenant_id': None,
'created_at': '2013-10-25T19:27:51.000000',
'version': 1,
'updated_at': None,
'refresh': 3600,
'id': '56248a86-ba74-4a53-aab4-abc6a472d32a',
'minimum': 3600,
'parent_domain_id': None,
'expire': 86400,
'ttl': 3600,
'serial': 1382729271,
'deleted_at': None,
'email': 'admin@example.com',
'description': None,
},
'priority': 'INFO',
}
|
#!/usr/bin/python
import tweepy
import os
import picamera
import time
auth = tweepy.OAuthHandler('X', 'X')
auth.set_access_token('X', 'X')
twitter_api = tweepy.API(auth)
camera = picamera.PiCamera()
camera.capture('test1.jpg')
time.sleep(1)
photo = '/home/pi/test1.jpg'
status = 'This is a test using the Raspberry Pi Camera module via Tweepy!'
twitter_api.update_with_media(photo,status=status)
|
from django.urls import path
from Bigflow.ServiceManagement import views
from django.conf.urls import url, include
urlpatterns = [
url(r'^ServiceManagement/(?P<template_name>[\w-]+)/$', views.Service_Management_Template, name='Service_Management_Template'),
# path('AMC_Maker_Summary/' ,views.amc_Maker_Summary, name='servicesummary'),
# path('AMC_Create/' ,views.amc_Create, name='amc_create'),
path('Get_Service_Management/',views.get_Service_Management,name='Get_Service_Details'),
path('Set_Service_Management/',views.set_Service_Management,name='Set_Service_Managament'),
path('Get_Category/',views.category_data,name="Get_Category_Data"),
path('Set_AMC_Details/',views.set_AMC_Details,name="Set_Amc_Details"),
path('Get_AMC_Details/', views.get_AMC_Details, name="Get_Amc_Details"),
path('Get_All_Table_Metadata/',views.Get_All_Table_Metadata,name="Get_All_Table_Metadata"),
path('Get_Employee/',views.Get_Employee_Data,name="Get_All_Table_Metadata"),
path('SMS_Followup_File_Upload/',views.sms_File_Upload,name="Get_All_Table_Metadata"),
path('Session_Set_SMS_Data/', views.Session_Set_SMS_Data, name='Session_Set_Expense_Data'),
path('Session_Get_SMS_Data/', views.Session_Get_SMS_Data, name='Session_Get_Expnese_Data'),
] |
import torch
import numpy as np
class RNN(torch.nn.Module):
def __init__(self, input_vector, hidden_vector):
super().__init__()
self.weights_x = torch.nn.Parameter(torch.randn((input_vector, hidden_vector)))
self.weights_h = torch.nn.Parameter(torch.randn((hidden_vector, hidden_vector)))
self.biases = torch.nn.Parameter(torch.randn((hidden_vector,)))
def forward(self, input_x, input_h):
input_x = torch.mm(input_x, self.weights_x)
input_h = torch.mm(input_h, self.weights_h)
output = torch.add(input_x, input_h)
output = torch.add(output, self.biases)
return output
class TimeRNN(torch.nn.Module):
def __init__(self, input_vector, hidden_vector, times):
super().__init__()
self.layers = []
self.hs =
for t in range(times):
rnn = RNN(input_vector, hidden_vector)
self.layers.append(rnn)
def forward(self, xs, h):
for index, layer in enumerate(self.layers):
h = layer.forward(xs[:, index, :], h)
|
import re
import csv
import sys
import random
from datetime import datetime, timedelta
class HourConverter():
def dateConverter(self, date):
hour = date.split(":")[0]
minute = date.split(":")[1]
minute = minute.split(" ")[0]
day = date.split(" ")[2]
month = self.monthConverter(date.split(" ")[3])
year = date.split(" ")[4]
newdate = datetime(int(year), int(month), int(day), int(hour), int(minute))
return newdate
def monthConverter(self, month):
if month == 'janv.':
return 1
if month == 'févr.':
return 2
if month == 'mars':
return 3
if month == 'avri.':
return 4
if month == 'mai':
return 5
if month == 'juin':
return 6
if month == 'juil.':
return 7
if month == 'août':
return 8
if month == 'sept.':
return 9
if month == 'octb.':
return 10
if month == 'nove.':
return 11
if month == 'dece.':
return 12
return 0
def reverseMonthConverter(self, month):
if month == 1:
return "janv."
if month == 2:
return 'févr.'
if month == 3:
return 'mars'
if month == 4:
return 'avri.'
if month == 5:
return 'mai'
if month == 6:
return 'juin'
if month == 7:
return 'juil.'
if month == 8:
return 'août'
if month == 9:
return 'sept.'
if month == 10:
return 'octb.'
if month == 11:
return 'nove.'
if month == 12:
return 'dece.'
def add7h(self, date):
sevenmorehour = date + timedelta(hours=7)
return sevenmorehour
def unConvert(self, date):
strdate=str(date)
print(strdate)
dat = strdate.split(" ")[0]
year = dat.split("-")[0]
month = dat.split("-")[1]
month = self.reverseMonthConverter(int(month))
day = dat.split("-")[2]
hours = strdate.split(" ")[1]
hour = hours.split(":")[0]
minute = hours.split(":")[1]
turbodate = hour + ":" + minute + " - " + day + " " + month + " " + year
return turbodate
def convert7(self, data):
csv100 = open('resUpdate7.csv', 'w', encoding="utf-8")
for row in data:
date = row.split(",")[1]
newdate = self.dateConverter(date)
date7 = self.add7h(newdate)
date = self.unConvert(date7)
texte = row.split(",")[2]
user = row.split(",")[0]
csv100.write("{},{},{},\n".format(user, date, texte))
return 0
fichier = "resCarambarApres.csv"
f = open(fichier, 'rt', encoding='utf-8')
c = HourConverter()
thisdate = datetime(int(2013), int(3), int(31), int(22), int(30))
c.convert7(f)
|
#!/usr/bin/python
import os, sys, random
def roll_dice():
digits=""
for i in range(0,5):
digits += str(random.choice(range(1,7)))
return digits
def loading_diceware_list():
dwl={}
dir_path=os.path.dirname(os.path.realpath(__file__))
try:
with open(dir_path+os.sep+"dicepass.lst") as f:
lines = f.readlines()
for line in lines:
tok = line.strip().split()
dwl[tok[0]] = tok[1]
except IOError:
print "Error reading the Diceware list!"
exit(1)
return dwl
def generate_passphrase(dwl,nwords):
words=[]
while len(words) < nwords:
rd = roll_dice()
word = dwl[rd]
if not word in words:
words.append(word)
return ' '.join(words)
def get_int(n):
try:
number = int(n)
except:
number = 6
return 6 if number < 6 else number
def main():
nwords=6
if len(sys.argv) >= 2:
nwords = get_int(sys.argv[1])
dwl = loading_diceware_list()
print generate_passphrase(dwl,nwords)
if __name__ == '__main__':
main()
|
import os , json
import sqlite3
__author__ = 'aleksandrsl'
def update_address_mapping():
app_root_path = os.getcwd()
addr_path = os.path.join(app_root_path, "configs", "address_mapping.json")
jobj = json.load(file(addr_path))
counter = 1
for item in jobj :
if not("id" in item):
item["id"] = counter
counter += 1
print "updated"
if not("record_history" in item):
item["record_history"]=False
f = open(addr_path, "w")
f.write(json.dumps(jobj, indent=True))
f.close()
print "saved"
def get_address_mapping_id():
app_root_path = os.getcwd()
addr_path = os.path.join(app_root_path, "configs", "address_mapping.json")
jobj = json.load(file(addr_path))
r = sorted(jobj,key = lambda item:item["id"])[-1]["id"]
print r
def update_global_config():
print "updating global.json"
app_root_path = os.getcwd()
addr_path = os.path.join(app_root_path, "configs", "global.json")
jobj = json.load(file(addr_path))
if not "db" in jobj:
jobj["db"]={"timeseries_enabled": True,"db_path": "/tmp/timeseries.db"}
print "global.json updated"
else :
# jobj["db"]["db_path"] = "/tmp/timeseries.db"
print "global.json is already up to date"
if not ("use_default_class_lookup" in jobj):
jobj["use_default_class_lookup"]=True
if "system" in jobj:
jobj["system"]["version"]="1.5.5"
jobj["mqtt"]["enable_sys"]=False
jobj["system"]["http_server_port"]=5000
jobj["system"]["distro_server_uri"]="http://lego.fiicha.net/blackfly"
jobj["system"]["ui_security_disabled"]=False
jobj["system"]["platform"]="sg"
jobj["system"]["sid"] = ""
else :
print "******************* YOUR EXISTING BLACKFLY INSTALLATION IS TOO OLD.PLEASE DELETE EXISTING INSTALLATION AND RUN INSTALLATION AGAIN ******************"
if not ("app_store" in jobj):
jobj["app_store"]={ "api_url":"http://lego.fiicha.net/bfhub/api",
"username":"alivinco",
"password":""
}
if not ("smartly" in jobj):
jobj["smartly"]={"sdc_uri":"https://prov-stg.service.smartly.no"}
if not ("influxdb" in jobj):
jobj["influxdb"] = {"enabled": False,
"host": "localhost",
"db_name": "blackfly",
"username": "root",
"password": "root",
"port": 8086
}
f = open(addr_path, "w")
f.write(json.dumps(jobj, indent=True))
f.close()
def update_cmd_class_mapping ():
print "updating msg_class_mapping.json"
app_root_path = os.getcwd()
addr_path = os.path.join(app_root_path, "configs", "msg_class_mapping.json")
jobj = json.load(file(addr_path))
for item in jobj :
if item["msg_class"]=="config.set":
item["ui_mapping"]["properties_are_key_value"] = True
print "config.set command definition updated"
f = open(addr_path, "w")
f.write(json.dumps(jobj, indent=True))
f.close()
def update_db():
app_root_path = os.getcwd()
addr_path = os.path.join(app_root_path, "configs", "global.json")
jobj = json.load(file(addr_path))
db_path = jobj["db"]["db_path"]
conn = sqlite3.connect(db_path)
# check if script needs to run upda
cur = conn.cursor()
need_for_update = False
try :
cur.execute("select id from timeseries limit 1")
need_for_update = True
print "DB teble will be updated "
except:
print "DB is up to date"
cur.close()
if need_for_update:
update_timeseries_sql = "CREATE TEMPORARY TABLE timeseries_backup(timestamp integer , dev_id integer , value real );" \
"INSERT INTO timeseries_backup SELECT timestamp,dev_id,value FROM timeseries;" \
"DROP TABLE timeseries;" \
"create table timeseries (timestamp integer , dev_id integer , value real );" \
"INSERT INTO timeseries SELECT timestamp,dev_id,value FROM timeseries_backup;" \
"DROP TABLE timeseries_backup;"
conn.executescript(update_timeseries_sql)
conn.close()
print "timeseries table updated"
update_address_mapping()
update_global_config()
update_cmd_class_mapping()
update_db()
#get_address_mapping_id()
'''
To add
{
"ui_mapping": {
"ui_element": "msg_class_ui",
"num_type": "int",
"properties_are_key_value":true,
"override_properties": false
},
"msg_type": "command",
"msg_class": "config.set"
}
''' |
from multiprocessing import Process,Pipe
from random import randint
#YOLO
def random_int():
lst = [randint(0, 20), randint(0, 20),randint(0, 20),randint(0, 20)]
return lst
while True:
def f(child_conn):
msg = random_int()
child_conn.send(msg)
child_conn.close() |
import Pyro4
class ChatBox(object):
def __init__(self):
self.users = [] # registered users [user --> (nick, client callback)]
self.nicks = [] # all registered nicks on this server
def getNicks(self):
return self.nicks
def join(self, nick, callback):
if not nick:
raise ValueError('Invalid nickname')
if nick in self.nicks:
raise ValueError('This nickname is already in use')
self.users.append((nick, callback))
self.nicks.append(nick)
callback._pyroOneway.add('jobdone') # make sure clients transfer control back to the server, non blocking call
print("%s JOINED" % nick)
self.publish('SERVER', '** '+nick+' joined **')
return [n for (n, c) in self.users] # return all nicks in this server
def leave(self, nick):
for(n, c) in self.users:
if n == nick:
self.users.remove((n, c))
break
self.publish('SERVER', '** '+nick+' left **')
self.nicks.remove(nick)
print("%s LEFT" % nick)
def publish(self, nick, msg):
for(n, c) in self.users[:]: # use a copy of the list --> [:]
try:
c.message(nick, msg)
except Pyro4.errors.ConnectionClosedError:
# connection dropped, remove the listener if it's still there
# check for existance because other thread may have killed it already
if(n, c) in self.users:
self.users.remove((n, c))
print('Remove dead listener %s %s' % (n, c))
with Pyro4.core.Daemon() as daemon:
with Pyro4.naming.locateNS() as ns:
uri = daemon.register(ChatBox())
ns.register("chatbox", uri)
# enter the service loop
print('Chatbox open.')
daemon.requestLoop() |
#!flask/bin/python
# -*- coding: utf-8 -*-
from flask import Flask, jsonify
import requests
from lxml import html
import json
import http.client, urllib.parse
import datetime, time
from flask import send_file
from flask_cors import CORS
from multiprocessing import Queue
from flask import request
from flask_swagger import swagger
app = Flask(__name__)
cors = CORS(app, resources={r"/*": {"origins": "*"}})
i = 0
Q = {}
respone = []
q_settings = {}
statistic = {}
# <multiprocessing.queues.Queue object at 0x1081c12b0>
Q["queue1"] = Queue()
Q["queue2"] = Queue()
Q["queue3"] = Queue()
Q["queue4"] = Queue()
statistic["queue1"] = {}
statistic["queue2"] = {}
statistic["queue3"] = {}
statistic["queue4"] = {}
statistic["queue1"]["count_mes"] = 0
statistic["queue2"]["count_mes"] = 0
statistic["queue3"]["count_mes"] = 0
statistic["queue4"]["count_mes"] = 0
# Блокировка / вечное ожидание...
"""
http://127.0.0.1:5000/Create_queue?Q=new_queue
http://127.0.0.1:5000/get_q_list
http://127.0.0.1:5000/add_mes?Q=new_queue&M=%22get%20some%20work%22
http://127.0.0.1:5000/getmes?Q=new_queue
"""
@app.route('/getmes', methods=['GET'])
def getmes():
Queue_name = request.args.get('Q')
try:
statistic[Queue_name]["count_mes"] -= 1
return jsonify("Success", str(Q[Queue_name].get()))
except:
return jsonify("failed or empty")
@app.route('/Create_queue', methods=['GET'])
def Create_queue():
Queue_name = request.args.get('Q')
try:
Q[Queue_name] = Queue()
statistic[Queue_name] = {}
statistic[Queue_name]["count_mes"] = 0
return jsonify("Succes")
except:
return jsonify("failed")
@app.route('/add_mes', methods=['GET'])
def add_mes():
Queue_name = request.args.get('Q')
mes = request.args.get('M')
try:
Q[Queue_name].put(mes)
statistic[Queue_name]["count_mes"] += 1
return jsonify("Success")
except:
return jsonify("failed")
@app.route('/get_q_list', methods=['GET'])
def get_q():
respone.clear()
try:
for key in Q:
respone.append(key)
return jsonify("Queues List",respone)
except:
return jsonify("failed")
@app.route('/get_settings', methods=['GET'])
def get_settings():
try:
return jsonify(q_settings)
except:
return jsonify("failed")
@app.route('/get_statistic', methods=['GET'])
def get_stat():
try:
return jsonify(statistic)
except:
return jsonify("failed")
if __name__ == '__main__':
app.run(debug=True) |
import numpy as np
import pandas as pd
from datetime import datetime
import time
while True:
url_dict = {
'GTX1050ti' : 'http://www.nowinstock.net/computers/videocards/nvidia/gtx1050ti/'
,'GTX1060' : 'http://www.nowinstock.net/computers/videocards/nvidia/gtx1060/'
,'GTX1070' : 'http://www.nowinstock.net/computers/videocards/nvidia/gtx1070/'
,'GTX1070ti' : 'http://www.nowinstock.net/computers/videocards/nvidia/gtx1070ti/'
,'GTX1080' : 'http://www.nowinstock.net/computers/videocards/nvidia/gtx1080/'
,'GTX1080ti' : 'http://www.nowinstock.net/computers/videocards/nvidia/gtx1080ti/'
,'GTX2060' : 'http://www.nowinstock.net/computers/videocards/nvidia/rtx2060/'
,'GTX2070' : 'http://www.nowinstock.net/computers/videocards/nvidia/rtx2070/'
,'GTX2080' : 'http://www.nowinstock.net/computers/videocards/nvidia/rtx2080/'
,'GTX2080ti' : 'http://www.nowinstock.net/computers/videocards/nvidia/rtx2080ti/'
}
def union_df(df, tempdf):
if df.empty:
df = tempdf
else:
df = pd.concat([df, tempdf]) #union all in sql...basically
return df
df = pd.DataFrame()
for gpu, url in url_dict.items():
tempdf = pd.read_html(url, header=0)
tempdf = tempdf[0]
tempdf['GPU'] = gpu
df = union_df(df, tempdf)
df = df[df.Name != 'Ebay : All Models'] #filter out ebay rows
df = df[df['Status1'] == 'In Stock'] #select where in stock
df['Last Price1'] = df['Last Price1'].str.replace('$', '')
df['Last Price1'] = df['Last Price1'].str.replace(',', '').astype(float)
#df = df[df['Last Price1'] <= 1000.00] #select where in stock
df['Name'] = df['Name'].str.lower() #convert to lower for the conditional check below
#hard code gtx1060 gpus as 6g or 3g
df['GPUdd'] = pd.np.where(df.Name.str.contains("6g|1060 fe|p10600a-10l"), df['GPU'] + ' 6g',
pd.np.where(df.Name.str.contains("3g|p10610a-10l"), df['GPU'] + ' 3g',
df['GPU']))
df['Name'] = df['Name'].str.upper() #covnert back to upper
df.sort_values(['GPUdd', 'Last Price1'], ascending=[True, True], inplace=True) #sort by GPUs and
df['Last Price1'] = '$' + df['Last Price1'].astype(str)
#Add a blank row after each GPU
gpu_list = df['GPUdd'].unique()
blankline_df = pd.DataFrame()
for gpu in gpu_list:
tempdf = df[df['GPUdd'] == gpu]
blankline_df = union_df(blankline_df, tempdf)
blank = pd.Series([],index=[]) #adds a blank row to the end of dataframe
blankline_df = blankline_df.append(blank, ignore_index=True)
blankline_df.fillna('-', inplace=True)
blankline_df.rename(index=str, columns={'GPUdd': 'GPU Type', 'Name' : 'Model','Last Price1': 'Last Price','Last Stock1': 'Last Stock'}, inplace=True)
#Next section converts the table to an html table
html_output = pd.DataFrame.to_html(blankline_df[['GPU Type', 'Model', 'Last Price', 'Last Stock']], index=False, classes="table table-sm", border="0")
html_output = html_output.replace('<tr style="text-align: right;">', '<tr>' )
html_start = """
<!DOCTYPE html>
<html lang="en">
<head>
<title>GPUs</title>
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0-beta.2/css/bootstrap.min.css">
</head>
<body>
<div class="container">
<h2>Current costs for in stock GPUs</h2>
"""
html_end = '</div></body></html>'
updated_time = '<i>Updated: ' + datetime.now().strftime('%Y-%m-%d') + ' at ' + datetime.now().strftime('%H:%M') + '</i><br>'
html_all = html_start + updated_time + html_output + html_end
Html_file = open("GPUs.html","w")
Html_file.write(html_all)
Html_file.close()
x = np.random.randint(600, 900)
time.sleep(x)
|
from sys import stdin
def parse_program(line):
head, tail = line.split('<->')
id = int(head)
connections = [int(s) for s in tail.split(',')]
return id, connections
def main():
programs = dict(parse_program(line) for line in stdin)
groups = {}
stack = []
group_count = 0
for id in programs:
if id not in groups:
group_count += 1
group = set()
stack.append(id)
while stack:
connection = stack.pop()
if connection not in group:
group.add(connection)
groups[connection] = group
stack.extend(programs[connection])
print(group_count)
if __name__ == '__main__':
main()
|
f = open ('text.txt', 'w')
str_list = input('Введите текст: \n')
while str_list:
f.writelines(str_list)
str_list = input('Введите текст: \n')
if not str_list:
break
f.close()
with open ('text.txt') as f:
for line in f:
print(line)
f.close() |
import urllib
from bs4 import BeautifulSoup
from uritools import urijoin
import re
from sqlite3 import dbapi2 as sqlite
# words to ignore
ignorewords=set(['the','of','to','and','a','in','is','it'])
class crawler:
# initialize the crawler with the dbname
def __init__(self, dbname):
self.con = sqlite.connect(dbname)
# clear all records on end
def __del__(self):
self.con.close()
def dbcommit(self):
self.con.commit()
def createindextables(self):
self.con.execute('create table urllist(url)')
self.con.execute('create table wordlist(word)')
self.con.execute('create table wordlocation(urlid,wordid,location)')
self.con.execute('create table link(fromid integer,toid integer)')
self.con.execute('create table linkwords(wordid,linkid)')
self.con.execute('create index wordidx on wordlist(word)')
self.con.execute('create index urlidx on urllist(url)')
self.con.execute('create index wordurlidx on wordlocation(wordid)')
self.con.execute('create index urltoidx on link(toid)')
self.con.execute('create index urlfromidx on link(fromid)')
self.dbcommit()
# getting an entry id or adding it if not present
def getentryid(self,table, field, value, createnew=True):
cur = self.con.execute(
"select rowid from {} where {}='{}'".format(table,field,value))
res = cur.fetchone()
if(res == None):
if createnew:
cur = self.con.execute(
"insert into {}({}) values ('{}')".format(table,field,value))
return cur.lastrowid
else:
return None
else:
return res[0]
# index a page
def addtoindex(self, url, soup):
if self.isindexed(url): return
print('Indexing {}'.format(url))
text = self.gettextonly(soup)
words = self.separatewords(text)
urlid = self.getentryid('urllist','url',url)
for i in range(len(words)):
word = words[i]
if word in ignorewords: continue
wordid = self.getentryid('wordlist', 'word', word)
self.con.execute('insert into wordlocation(urlid,wordid,location) \
values ({},{},{})'.format(urlid,wordid,i))
# extract a page only
def gettextonly(self, soup):
v = soup.string
if v == None:
c = soup.contents
resulttext=''
for t in c:
# recurse to traverse the dom
subtext = self.gettextonly(t)
resulttext += subtext + '\n'
return resulttext
else:
return v.strip()
# separate words by any non-whitespace character
def separatewords(self, text):
splitter = re.compile('\\W*')
return [s.lower() for s in splitter.split(text) if s != '']
def isindexed(self, url):
cur = self.con.execute("select rowid from urllist where url={}".format(url))
res = cur.fetchone()
if res:
# check whether the urlid has been indexed, we could have a link that's in urlist, but hasn't actually been trawled
v = self.con.execute("select * from wordlocation where urlid={}".format(res[0])).fetchone()
if v:
return True
return False
def addlinkref(self, urlFrom, urlTo, linkText):
words = self.separatewords(linkText)
fromid = self.getentryid('urllist','url',urlfrom)
toid = self.getentryid('urllist','url', urlTo)
if fromid == toid:
return
cur = self.con.execute("insert into link(fromid,toid) values({},{})".format(fromid,toid))
linkid = cur.lastrowid
for word in words:
if word in ignorewords: continue
wordid = self.getentryid('wordlist','word',word)
self.con.execute("insert into linkwords(linkid,wordid) values({},{})".format(linkid,wordid))
def crawl(self, pages, depth=2):
# iterate for each depth
for i in range(depth):
# use a set to prevent repeats
newpages = set()
# for each page in pages list
for page in pages:
c = None
try:
c = urllib2.urlopen(page)
except:
print("Could not open {}".format(page))
if not c:
continue
# after retrieving the html
soup = BeautifulSoup(c.read())
# index page (as in add all the words into the words table)
self.addtoindex(page, soup)
# iterate through all the links in the page
links = soup('a')
for link in links:
if('href' in dict(link.attrs)):
url = urijoin(page, link['href'])
# check for quotes
if url.find("'")!= -1: continue
# remove location fragments
url = url.split('#')[0]
# is the result a valid url
if url[0:4] == 'http' and not self.isindexed(url):
newpages.add(url)
# create a link between the two pages
linkText = self.gettextonly(link)
self.addlinkref(page, url, linkText)
# store the db
self.dbcommit()
# recurse
pages =newpages
def calculatepagerank(self, iterations=20):
self.con.execute('drop table if exists pagerank')
self.con.execute('create table pagerank(urlid primary key, score)')
# initialize pagerank as (urls, 1.0) for each url in urllist
self.con.execute('insert into pagerank select rowid, 1.0 from urllist')
self.dbcommit()
for i in range(iterations):
print("Iteration {}".format(i))
# for each url in pagerank
for (urlid,) in self.con.execute('select rowid from urllist'):
pr=0.15
# select all unique urls pointing to the url
for (linker,) in self.con.execute( \
'select distinct fromid from link where toid={}'.format(urlid)):
# get the score for the fromurl
linkingpr=self.con.execute(
'select score from pagerank where urlid={}'.format(linker)).fetchone()[0]
# count all the links from the fromurl
linkingcount=self.con.execute( \
'select count(*) from link where fromid={}'.format(linker)).fetchone()[0]
pr += 0.85*(linkingpr/linkingcount)
self.con.execute(
'update pagerank set score={} where urlid={}'.format(pr,urlid))
self.dbcommit()
|
x=int(input("Enter 1st Number="))
y=int(input("Enter 2nd Number="))
z=x+y
print(z)
'''(The String as Input)
ch=input("enter the char")[0:2]
print(ch)
'''
'''(The String as Input)
ch=input("enter the char")
print(ch[5])
'''
'''("Sol of expressions")
result=eval(input("Enter the expression"))
print(result)
'''
''' (User input in cmd)
import sys
x=int(sys.argv[1])
y=int(sys.argv[2])
z=x+y
print(z)
'''
''' (Home Work)
import math
import sys
x=int(sys.argv[1])
y=math.pow(x,3)
print(y)
'''
|
# Link: https://leetcode.com/problems/climbing-stairs/
# It takes 'n' steps to reach the top of a staircase. You can either climb 1 or 2 steps. How many distinct ways can you climb to the top?
class Solution:
def climbStairs(self, n: int) -> int:
ans = [0, 1, 2]
if n == 1:
return ans[1]
if n == 2:
return ans[2]
for i in range(3, n+1):
ans.append(ans[i-1] + ans[i-2])
return ans[n] |
"""
4. Write a script that receives a directory as argument and creates a JSON file with data about all the files in that
directory. For each file, the following information will be displayed: file_name, md5_file, sha256_file, size_file
(in bytes), time when the file was created (human-readable) and the absolute path to the file.
"""
import hashlib
import json
import os
import sys
def run():
if len(sys.argv) < 2:
print("Run the project as following:")
print("python.exe ex_4.py <directory-path>")
exit()
directory_path = sys.argv[1]
assert os.path.isdir(directory_path), "path is not dir"
all_files = []
for root, directories, files in os.walk(directory_path):
all_files += [
os.path.abspath(os.path.join(root, file_name)) for file_name in files if
os.path.isfile(os.path.join(root, file_name)) and os.access(os.path.join(root, file_name), os.R_OK)
]
json_dict = []
for file in all_files:
try:
md5_file = hashlib.md5()
sha256_file = hashlib.sha256()
with open(file, 'rb') as fd:
while True:
data = fd.read(1024)
md5_file.update(data)
sha256_file.update(data)
if not data:
break
except Exception as e:
print(e)
exit()
json_dict.append({
"file_name": file.split('/')[-1],
"md5_file": md5_file.digest().decode('latin-1'),
"sha256_file": sha256_file.digest().decode('latin-1'),
"size_file": os.path.getsize(file),
"time": os.path.getctime(file),
"absolute_path": file,
})
with open("files_json.json", "w") as fd:
json.dump(json_dict, fd)
if __name__ == "__main__":
run()
|
import numpy as np
import matplotlib.pyplot as plt
import torch
from tiered_algorithm import state_to_actions
from variables import Actions
device = torch.device("cpu")
def set_device(_device):
global device
device = _device
class maze_game:
def __init__(self, maze_list, max_steps=100, max_rounds=100, verbose=False):
self.maze_list = maze_list
self.verbose = verbose
self.max_steps = max_steps
self.state = {}
@staticmethod
def state_to_channels(state):
c1 = torch.from_numpy(state['grid']).to(device)
c2 = torch.zeros(state['grid'].shape).to(device)
c3 = torch.zeros(state['grid'].shape).to(device)
c2[tuple(state['agent'])] = 1.0
c3[tuple(state['target'])] = 1.0
envstate = torch.stack([c1, c2, c3]).float()
return envstate
def get_state(self):
return self.state_to_channels(self.state)
def reset(self):
self.map_number = np.random.randint(0,len(self.maze_list))
self.state['grid'] = self.maze_list[self.map_number]
self.state['target'] = np.random.randint(0, self.state['grid'].shape[-1], 2)
while self.state['grid'][tuple(self.state['target'])]:
self.state['target'] = np.random.randint(0, self.state['grid'].shape[-1], 2)
self.state['agent'] = np.random.randint(0, self.state['grid'].shape[-1], 2)
while self.state['grid'][tuple(self.state['agent'])] or np.all(self.state['target']==self.state['agent']):
self.state['agent'] = np.random.randint(0, self.state['grid'].shape[-1], 2)
self.hist = [self.state['agent'].tolist()]
self.steps = 0
self.rounds = 0
pass
def set_position(self, pos):
self.state['agent'] = pos
def set_target(self, pos):
self.state['target'] = pos
def step(self, actions):
reward_total = 0
self.rounds += 1
reward = -0.04
for action in actions:
self.steps += 1
if action.value == 4:
reward = -0.04
reward_total += reward
continue
dx = np.sin(action.value * np.pi/2).astype(int)
dy = -np.cos(action.value * np.pi/2).astype(int)
invalid = False
if self.state['agent'][0] + dy < 0 or self.state['agent'][0] + dy >= self.state['grid'].shape[0]:
invalid = True
elif self.state['agent'][1] + dx < 0 or self.state['agent'][1] + dx >= self.state['grid'].shape[-1]:
invalid = True
elif self.state['grid'][self.state['agent'][0]+dy, self.state['agent'][1]+dx]:
invalid = True
if not invalid:
self.state['agent'][1] += dx
self.state['agent'][0] += dy
if self.state['agent'].tolist() in self.hist:
reward = -0.25
if invalid:
reward = -0.75
elif np.all(self.state['target']==self.state['agent']):
reward = 1.0
self.hist.append(self.state['agent'].tolist())
reward_total += reward
if self.steps > self.max_steps or np.all(self.state['target']==self.state['agent']): break
status = 'not_over'
if self.steps > self.max_steps:
status = 'lose'
elif np.all(self.state['target']==self.state['agent']):
status = 'win'
if reward != 1:
reward_total -= 0.2
# state reward done win
return self.state_to_channels(self.state), reward_total, status!='not_over', status=='win'
def observe(self):
return self.state
def valid_actions(self):
valid = []
if not self.state['agent'][0] + 1 >= self.state['grid'].shape[0] and self.state['grid'][self.state['agent'][0]+1, self.state['agent'][1]+0] == 0:
valid.append(Actions.SOUTH)
if not self.state['agent'][0] - 1 < 0 and self.state['grid'][self.state['agent'][0]-1, self.state['agent'][1]+0] == 0:
valid.append(Actions.NORTH)
if not self.state['agent'][1] + 1 >= self.state['grid'].shape[1] and self.state['grid'][self.state['agent'][0]+0, self.state['agent'][1]+1] == 0:
valid.append(Actions.EAST)
if not self.state['agent'][1] - 1 < 0 and self.state['grid'][self.state['agent'][0]-0, self.state['agent'][1]-1] == 0:
valid.append(Actions.WEST)
return valid
def best_seq(self, n_actions=4):
actions = state_to_actions(self.state, limit_n=n_actions)
actions += [Actions.NONE] * (n_actions-len(actions))
return torch.tensor([i.value for i in actions]).view(n_actions, 1, 1).to(device)
def is_complete(self):
return np.all(self.state['target']==self.state['agent']) or self.steps > self.max_steps
def show(self):
plt.grid('on')
nrows, ncols = self.state['grid'].shape
ax = plt.gca()
ax.set_xticks(np.arange(0.5, nrows, 1))
ax.set_yticks(np.arange(0.5, ncols, 1))
ax.set_xticklabels([])
ax.set_yticklabels([])
canvas = 1-self.state['grid'].copy()
for row,col in self.hist:
canvas[row,col] = 0.6
if self.state['agent'] is not None:
canvas[tuple(self.state['agent'])] = 0.3
if self.state['target'] is not None:
canvas[tuple(self.state['target'])] = 0.8
img = plt.imshow(canvas, interpolation='none', cmap='gray')
return img |
from extra.makeTree import *
import queue
def levelOrder(head):
cur_node = head.root
help_queue = queue.Queue()
help_queue.put(cur_node)
while not help_queue.empty():
cur_node = help_queue.get()
print(cur_node.elem)
if cur_node.lChild is not None:
help_queue.put(cur_node.lChild)
if cur_node.rChild is not None:
help_queue.put(cur_node.rChild)
def isCBT(head):
"""
思路:
一棵树按层遍历
如果碰到某个节点有右孩子但没有左孩子 直接返回False
如果碰到某个节点孩子不全 即除去第一种情况还有两种情况 有左没右以及左右都没有
碰到这种情况该节点之后的节点必须都是叶节点 此时需要加入一个开启判断
一旦满足开启条件 是否开启变为True 然后后续的节点必须都得是叶节点
"""
cur_node = head.root
help_queue = queue.Queue()
help_queue.put(cur_node)
while not help_queue.empty():
cur_node = help_queue.get()
if cur_node.rChild is not None and (cur_node.lChild is None or cur_node.lChild.elem is None):
return False
elif cur_node.rChild is not None and cur_node.lChild is not None:
help_queue.put(cur_node.lChild)
help_queue.put(cur_node.rChild)
else:
# 左边为不为空右边为空或两边都为空
while not help_queue.empty():
cur_node = help_queue.get()
if cur_node.lChild is not None or cur_node.rChild is not None:
return False
return True
return True
if __name__ == '__main__':
tree = Tree()
tree.add(5)
tree.add(3)
tree.add(8)
tree.add(2)
tree.add(4)
tree.add(6)
tree.add(10)
# tree.add(None)
# tree.add(None)
# tree.add(11)
levelOrder(tree)
ele = Node(11)
tree.root.lChild.lChild.rChild = ele
res = isCBT(tree)
print(res)
|
from sqlalchemy import Column, Integer, String
from flask_apispec import use_kwargs, marshal_with
from marshmallow import Schema, fields
from webargs.flaskparser import use_args
from hc_flask.api import FlaskApi
from hc_flask.database import SqlAlchemyBase
class ScreenResolution(SqlAlchemyBase, FlaskApi):
"""Defines the table and API for a ScreenResolution log entry."""
client = Column(String(255), primary_key=True)
hor_res = Column(Integer)
ver_res = Column(Integer)
class InputSchema(Schema):
client = fields.Str(required=True)
hor_res = fields.Int(required=True)
ver_res = fields.Int(required=True)
@classmethod
@use_args(InputSchema(many=True, strict=True),
locations=('json', ))
def put_endpoint(cls, body):
return super(ScreenResolution, cls).put_endpoint(body)
def __str__(self):
return "{site}\\{client} ({hor_res}x{ver_res})".format(**self.to_dict())
|
import os
from collections import defaultdict
import re
import pprint as pp
import configparser
def create_ticker_name_dict():
'''
create a dictionary that matches korean ticker's naming
:return: tickers['ticker'] = name
'''
cnt = 0
limit = 20 # the limited number of tickers
tickers = defaultdict()
config = configparser.ConfigParser()
config.read('src/config.ini', encoding='utf-8')
path = config['PATH']['tickernames']
with open(path, 'r', encoding='utf-8') as f:
for line in f.readlines():
cnt += 1
ticker = re.findall('[a-zA-Z].*', line)[0]
ko_name = line.replace(ticker, '').strip()
tickers[ticker] = ko_name
if cnt == limit:
break
return tickers
|
##########################################################
from math import atan2,cos,sin,pi
#面向对象中的设计模式-实现复数和分数的运算
#首先定义一个数的抽象类
class Number():
def __add__(self, another):
return self.add(another)
def __mul__(self, another):
return self.mul(another)
#############
#这是后期实现
#############
def add_rational_and_complex(r,c):
return ComplexRI(c.real + r.b / r.a, c.imag)
def add_complex_and_rational(c,r):
return add_rational_and_complex(r,c)
class Number():
adders_relation = { ('com','rat') : add_complex_and_rational, ('rat','com') : add_rational_and_complex }
def __add__(self, another):
if self.tag == another.tag:
return self.add(another)
#首先要注意各种self必须加上,因为后面继承的对象可能如果使用adders_relation一定要用到self,并不像C++一样
elif (self.tag, another.tag) in self.adders_relation:
#第二这样是不能索引字典的
fn = self.adders_relation[(self.tag, another.tag)]
return fn(self, another)
def __mul__(self, another):
if self.tag == another.tag:
return self.mul(another)
#实现和add类似
#然后定义一个复数的抽象类,继承自Number
class Complex(Number):
def add(self, another):
return ComplexRI(self.real + another.real, self.imag + another.imag)
def mul(self,another):
return ComplexMA(self.dis * another.dis, self.ang + another.ang)
#分别定义两种类型
class ComplexRI(Complex):
def __init__(self, x, y):
self.real = x
self.imag = y
@property
def dis(self):
return (self.real**2 + self.imag**2)**0.5
@property
def ang(self):
return atan2(self.imag, self.real)
def __str__(self):
return 'ComplexMA({0:g},{1:g})'.format(self.real, self.imag)
class ComplexMA(Complex):
def __init__(self, dis, ang):
self.dis = dis
self.ang = ang
@property
def imag(self):
return self.dis * sin(self.ang)
@property
def real(self):
return self.dis * cos(self.ang)
def __str__(self):
return 'ComplexMA({0:g},{1:g}*pi)'.format(self.dis, self.ang/pi)
#面向对象的程序设计,制作一个分数
from fractions import gcd
class Ration(Number):
def __init__(self, a, b):#a分母b分子
g = gcd(a, b)
self.a = a // g
self.b = b // g
def add(self, another):
a1, b1 = self.a, self.b
a2, b2 = another.a, another.b
return Ration(a1 * a2, b1 * a2 + b2 * a1)
def mul(self, another):
return Ration(self.a*another.a, self.b*another.b)
def __str__(self):
return 'Ration({0:g},{1:g})'.format(self.b, self.a)
#检测类型可以使用
#print(isinstance())
#我们可以加标签来实现rational和complex之间的加法和乘法
Ration.tag = 'rat'
Complex.tag = 'com'
#具体实现见上面“后期实现”
####测试####
ma = ComplexMA(2, pi / 2)
print(ma)
ri = ComplexRI(1, 2)
print(ri)
print(ri + ma)
tar = ri * ma
print(tar)
print(tar.real)
print(tar.imag)
zhang = Ration(1, 2)
tar = ri + zhang
print(tar) |
import csv
import cv2
import dataset_creator as DC
import feature_extractor as FE
import glob
import numpy as np
import os
import read_files as RF
import time
import platform
from scipy import stats
from commonfunctions import *
start_time = time.time()
###################################################################
#WIP
def ReadAndSegment(ii):
if os.path.exists("image_label_pair.csv"):
os.remove("image_label_pair.csv")
Path = './Pattern Data Set/scanned/'
textPath = './Pattern Data Set/text/'
Number_Of_Files = 4000
#Number_Of_Files = 1
gen = glob.iglob(Path+ "*.png")
for i in range(Number_Of_Files):
py = next(gen)
input_image = cv2.imread(py)
splitted = None
if platform.system() == "Windows":
splitted = py.split("\\")
splitted = splitted[1].split(".")
else:
splitted = py.split("/")
splitted = splitted[3].split(".")
splitted = splitted[0]
splitted += ".txt"
list_of_letters = RF.read_text_file(textPath,splitted)
all_words = FE.extractSeparateLettersWholeImage(input_image)
DC.createDataSet(all_words,list_of_letters)
ii += 1
print("Ended Images Index: ",str(ii))
j = 0
ReadAndSegment(j)
print("Running Time In Seconds: {0:.3f}".format(time.time() - start_time))
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
# CGI处理模块
import cgi,cgitb
# 创建FieldStorage的实例化
form = cgi.FieldStorage()
if form.getvalue('google'):
google_flag = "是"
else:
google_flag = "否"
if form.getvalue("baidu"):
baidu_flag = "是"
else:
baidu_flag = "否"
print "Content-type:text/html"
print
print "<html>"
print "<head>"
print '<meta charset:"utf-8">'
print "</head>"
print "<body>"
print "<h2>是否选择了谷歌: %s</h2>" % google_flag
print "<h2>是否选择了百度: %s</h2>" % baidu_flag
print "</body>"
print "</html>"
|
# Generated by Django 3.2 on 2021-04-24 17:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bookings', '0011_rename_at_theater_seats_present_in_theater'),
]
operations = [
migrations.RemoveField(
model_name='seats',
name='seq_code',
),
migrations.AddField(
model_name='seats',
name='name',
field=models.CharField(max_length=24, null=True),
),
]
|
a = [2,6,7,9,12]
b = [99,56,3,5,87]
c = a+b
c.sort()#ordena sequencialmente
print(c)
print()
print(a*2)#Ele vai esquever os elementos de A Duas vezes
print()
print(2 in a) # ver se tem o numero na lista ou a string(nomes)
print()
print(15 in a) |
def splitter(data,n):
l = zip(data.Hr_Length.tolist(),data.Cluster.tolist())
numresult = [[] for i in range(n)]
idxresult = [[] for i in range(n)]
sums = {i:0 for i in range(n)}
c = 0
for e,g in l:
for i in sums:
if c == sums[i]:
numresult[i].append(e)
idxresult[i].append(g)
break
sums[i] += e
c = min(sums.values())
return numresult, idxresult
def driverOptimizer(Spec_count1,Drivers,Min_driver_work,Max_driver_work,First_driver_hr,Last_driver_hr ,First_del_hr,Last_del_hr):
import pandas
import numpy
driverSched = pandas.DataFrame(columns = ['Driver_ID','Route'])
driverSched['Driver_ID'] = range(1,Drivers+1)
#Add column for Driver ID assignment to each cluster
Spec_count1['Driver'] = numpy.nan
temp_Spec_count = pandas.DataFrame()
D_count = 1
tooLong = (Spec_count1.Hr_Length>Max_driver_work).sum()
#Downselect the fullday routes and the too-long routes
for index,driver in driverSched.iterrows():
#Create temp dataframe with all clusters that are not assigned
temp_Spec_count = Spec_count1[Spec_count1['Driver'].isnull()]
count = 0
for index,cluster in temp_Spec_count.iterrows():
#Check if route exceeds max route length
#if cluster.Hr_Length > Max_driver_work:
#tooLong = tooLong + 1
#Check if route is an all-day route
if cluster.Hr_Length > Min_driver_work and count == 0:
Spec_count1.Driver[cluster.Cluster] = driver['Driver_ID']
count = count+1
D_count = D_count + 1
#Collect the remaining unassigned routes
temp_Spec_count = Spec_count1[Spec_count1['Driver'].isnull()]
chunking_results = pandas.DataFrame(columns = ['numChunks','chunkidx','utilization'])
chunking_results['numChunks']=range(1,1+len(temp_Spec_count))
chunking_results['chunkidx'] = chunking_results['chunkidx'].astype(object)
chunking_results['utilization'] = chunking_results['utilization'].astype(float)
#Use greedy algo to optimize the assignment of chunks of remaining routes
for i in range(1,1+len(temp_Spec_count)):
chunks,chunkIdx = splitter(temp_Spec_count,i)
#Tracks the cluster IDs in each chunk
chunking_results.set_value(i-1,'chunkidx',chunkIdx)
#Count the number of groups that fit within workday limits
counter = 0
for j in chunks:
temp = sum(j)
if temp>=Min_driver_work:
if temp<= Max_driver_work:
counter = counter +1
#Calculate a utilization for each number of chunks (# of "acceptable working hr" chunks / total chunks)
chunk_utilization = float(counter) / i
chunking_results.utilization[chunking_results['numChunks']==(i)] = chunk_utilization
pairings = chunking_results['chunkidx'].ix[chunking_results['utilization'].idxmax()]
numChunks = chunking_results['numChunks'].ix[chunking_results['utilization'].idxmax()]
maxutil = chunking_results['utilization'].ix[chunking_results['utilization'].idxmax()]
#count = count + 1
for item in pairings:
Spec_count1.Driver[Spec_count1['Cluster'].isin(item)] = D_count
D_count = D_count + 1
return pairings,numChunks,maxutil, tooLong
|
from flask import Flask, request,jsonify, render_template
import json
import sqlite3
app = Flask(__name__)
#an in memory students storage(using a list)
#students = []
#instead of a list,we need to create a connection to database where we store students
def db_connection():
conn = None
try:
conn = sqlite3.connect('students.sqlite')
except sqlite3.error as e:
print(e)
return conn
@app.route("/students" , methods=["GET","POST"])
def students():
#access the db connection
conn = db_connection()
#access the cursor object
cursor = conn.cursor()
#createing our GET request for all students
if request.method == "GET":
cursor = conn.execute("SELECT * FROM students")
students = [
dict(id = row[0], firstname = row[1], lastname = row[2], gender = row[3] , age = row[4])
for row in cursor.fetchall()
]
if students is not None:
return jsonify(students)
#createing our POST request for a student
if request.method == "POST":
firstname = request.form["firstname"]
lastname = request.form["lastname"]
gender = request.form["gender"]
age = request.form["age"]
#SQL query to INSERT a student INTO our database
sql = """INSERT INTO students (firstname, lastname, gender, age)
VALUES (?, ?, ?, ?) """
cursor = cursor.execute(sql, (firstname, lastname, gender, age))
conn.commit()
return f"Student with id: {cursor.lastrowid} created successfully"
#a route with all the neccesary request methods for a single student
@app.route('/student/<int:id>',methods=[ "GET", "PUT", "DELETE" ])
def student(id):
conn = db_connection()
cursor = conn.cursor()
student = None
#createing our GET request for a student
if request.method == "GET":
cursor.execute("SELECT * FROM students WHERE id=?",(id,) )
rows = cursor.fetchall()
for row in rows:
student = row
if student is not None:
return jsonify(student), 200
else:
return "Something went wrong", 404
#createing our PUT request for a student
if request.method == "PUT":
sql = """ UPDATE students SET firstname = ?,lastname = ?, gender = ? , age = ?
WHERE id = ? """
firstname = request.form["firstname"]
lastname = request.form["lastname"]
gender = request.form["gender"]
age = request.form["age"]
updated_student = {
"id": id,
"firstname": firstname,
"lastname" : lastname,
"gender" : gender,
"age" : age
}
conn.execute(sql,(firstname, lastname, gender, age, id))
conn.commit()
return jsonify(updated_student)
#createing our DELETE request for a student
if request.method == "DELETE":
sql= """ DELETE FROM students WHERE id=? """
conn.execute(sql, (id,))
conn.commit()
return "The Student with id: {} has been deleted.".format(id),200
if __name__ == '__main__':
app.run(debug=True) |
def calculator_base_message(message):
operators = ['+', '-', '*', '/']
message = message[2:-2]
if len(message) != 0:
if ' ' not in message:
if message[-1] == '=':
for i in range(len(operators)):
if operators[i] in message:
operator = operators[i]
values = message.split(operator)
value_1 = values[0]
value_2 = values[1].replace('=', '')
try:
value_1 = int(value_1)
value_2 = int(value_2)
except ValueError:
value_1 = float(value_1)
value_2 = float(value_2)
return calculator(value_1, value_2, operator)
else:
return 'Ошибка! Отсутствует знак оператора.'
else:
return 'Ошибка! Отсутсвует знак равно. '
else:
return 'Ошибка! В строке не должно быть пробелов. '
else:
return 'Ошибка! Вы ввели пустую строку! '
def calculator(value_1, value_2, operator):
if operator == '+':
return value_1 + value_2
elif operator == '-':
return value_1 - value_2
elif operator == '*':
return value_1 * value_2
elif operator == '/':
try:
return value_1 / value_2
except ZeroDivisionError:
return 'Ошибка! Деление на ноль.'
if __name__ == '__main__':
pass
|
# -*- coding: utf-8 -*-
import Spider
if __name__ == "__main__":
obj = Spider.SpiderMain()
obj.getAllCityInfo()
obj.con_cursor.close()
obj.connection.commit()
obj.connection.close() |
from random import random
from time import perf_counter
DARTS = 10000*10000
hits = 0.0
start = perf_counter()
for i in range(DARTS):
x, y = random(), random()
dist = pow(x**2+y**2, 0.5)
if dist < 1.0:
hits += 1
pi = 4*hits/DARTS
print('圆周率是:{:.6f}'.format(pi))
print('运行时间:{:.3f}'.format(perf_counter()-start))
|
# def init(shared_arr_):
# global shared_arr
# shared_arr = shared_arr_ # must be inhereted, not passed as an argument
from multiprocessing import sharedctypes
from multiprocess import Pool
import ctypes
global_vars = {}
class A(object):
def __init__(self, size):
self.size = size
# self.x = np.random.random((size,))
global global_vars
global_vars['volume_f'] = np.random.random((size,))
def func2(self, t):
print t
return len(self.x)
def func(self, t):
print t
return len(global_vars['volume_f'])
def func_parallel(self, processes=4):
# global shared_arr
# shared_arr = sharedctypes.RawArray(ctypes.c_double, self.size)
# arr = np.frombuffer(shared_arr)
# arr[:] = self.x
# arr_orig = arr.copy()
# p = Pool(processes=4, initializer=init, initargs=(shared_arr,))
p = Pool(processes=processes)
res = p.map(self.func, range(processes))
p.close()
p.join()
print res
# def func_parallel(self):
# pool = Pool(processes=10)
# results = pool.map(self.func, range(16))
# return results
|
""" Wrapper for pyevolove library to support multiprocessing
and more convenient usage.
"""
import copy_reg
import types
import time
from multiprocessing import Pool
from gaengine import GAInstance
from funclib import *
def _pickle_method(method):
func_name = method.im_func.__name__
obj = method.im_self
cls = method.im_class
return _unpickle_method, (func_name, obj, cls)
def _unpickle_method(func_name, obj, cls):
for cls in cls.mro():
try:
func = cls.__dict__[func_name]
except KeyError:
pass
else:
break
return func.__get__(obj, cls)
copy_reg.pickle(types.MethodType, _pickle_method, _unpickle_method)
class Population(object):
def __init__(self):
self.funclist = []
self.funccnt = 0
self.score_func = None
self.maxGen = -1
self.lb, self.hb = 0, 1
self.mutateRate = 0.5
self.crossoverRate = 0.1
self.stopAfterGen = 1000
self.filename = 'default.dat'
def setDataRange(self, low_bound, high_bound):
self.lb = low_bound
self.hb = high_bound
def getDataRange(self):
return [self.lb, self.hb]
def setStopAfter(self, n):
self.stopAfterGen = n
def getStopAfter(self):
return self.stopAfterGen
def setFileName(self, filename):
self.filename = filename
def getFileName(self):
return self.filename
def setFuncList(self, funclist):
self.funclist = funclist
self.funccnt = len(self.funclist)
def getFuncList(self):
return self.funclist
def getFuncCnt(self):
return self.funccnt
def setCrossoverRate(self, n):
self.crossoverRate = n
def getCrossoverRate(self):
return self.crossoverRate
def setMutateRate(self, n):
self.mutateRate = n
def getMutateRate(self):
return self.mutateRate
def setReportRate(self, n):
self.reportRate = n
def getReportRate(self):
return self.reportRate
def setMaxAlgSize(self, n):
self.maxAlgSize = n
def getMaxAlgSize(self):
return self.maxAlgSize
def setMaxSpecies(self, n):
self.maxSpecies = n
def getMaxSpecies(self):
return self.maxSpecies
def setArgsReq(self, n):
self.argsReq = n
def getArgsReq(self):
return self.argsReq
def setMaxGen(self, n):
self.maxGen = n
def getMaxGen(self):
return self.maxGen
def setScoreF(self, f):
self.score_func = f
def getScoreF(self):
return self.score_func
def start(self):
ga = GAInstance(self)
ga.evolve(self.getMaxGen())
class GAWrapper(object):
def __init__(self, max_proc = 2):
self.populations = []
self.maxProc = max_proc
self.pool = Pool(processes = self.maxProc, maxtasksperchild = 1)
def addPopulation(self, pop):
self.populations.append(pop)
def startAll(self):
results = []
for pop in self.populations:
results.append(self.pool.apply_async(pop.start))
for res in results:
res.wait()
|
import ConfigParser
import os
from scipy import misc
import matplotlib.pyplot as plt
import skimage.transform as sk_transform
from abhishek import *
config = ConfigParser.RawConfigParser(allow_no_value=True)
with open('/media/adv/Shared/PROJECTS/CSE515_MWDB/Code/variables.cfg') as config_file:
config.readfp(config_file)
HAND_IMAGE_DATASET_DIR = config.get('PATH', 'hand_image_dataset_dir')
if __name__ == '__main__':
k = input("Enter value of k: ")
model = input("Emter the model - 1. HOG \t 2. SIFT\t")
file_name = raw_input("Select one of the images \n" + str(os.listdir(HAND_IMAGE_DATASET_DIR)) + "\n:\t")
sorted_similarity_values = {}
if model == 1:
sorted_similarity_values = extract_hog_features_for_all_images(file_name)
elif model == 2:
sorted_similarity_values = extract_sift_features_for_all_images(file_name)
else:
exit(1)
if config.get('config_key', 'visualize') == "True":
src_image = sk_transform.rescale(misc.imread(HAND_IMAGE_DATASET_DIR + os.sep + file_name), 0.5,
anti_aliasing=True)
plt.imshow(misc.imread(HAND_IMAGE_DATASET_DIR + os.sep + file_name))
plt.title("Source Image: " + file_name)
plt.show()
similarity_values_iter = iter(sorted_similarity_values)
for i in range(k):
image_file = similarity_values_iter.next()
similarity_value = sorted_similarity_values[image_file]
similar_image = sk_transform.rescale(misc.imread(HAND_IMAGE_DATASET_DIR + os.sep + image_file), 0.5,
anti_aliasing=True)
print similarity_value, image_file
if config.get('config_key', 'visualize') == "True":
plt.imshow(similar_image)
plt.title(
"Similar Image " + str(i + 1) + ": " + image_file + " Similarity Score = " + str(similarity_value))
plt.show()
|
import os
import networkx as nx # https://networkx.org/
import re
def create_graph(raw_bag: str) -> nx.MultiDiGraph:
graph = nx.MultiDiGraph()
lines = raw_bag.splitlines()
for line in lines:
if "contain no other bag" in line:
continue
definition_bag, raw_containing_bags = line.split(" bags contain ")
graph.add_node(definition_bag)
raw_containing_bags = raw_containing_bags.split(", ")
for bag in raw_containing_bags:
definition = bag.split(" bag")[0]
match = re.match(r"([0-9]+) *([a-z ]+)", definition)
num, color_bag = match.groups()
graph.add_node(color_bag)
for i in range(int(num)):
graph.add_edge(definition_bag, color_bag)
return graph
def get_all_predecessors(bag: str, graph: nx.MultiDiGraph) -> set:
parent_bags = []
for parent_bag in graph.predecessors(bag):
parent_bags.append(parent_bag)
parent_bags += (get_all_predecessors(parent_bag, graph))
return parent_bags
def get_all_successors(bag: str, graph: nx.MultiDiGraph) -> set:
child_bags = []
for child_bag in graph.successors(bag):
succ = get_all_successors(child_bag, graph)
for i in range(graph.number_of_edges(bag, child_bag)):
child_bags.append(child_bag)
child_bags += succ
return child_bags
if __name__ == "__main__":
with open(os.path.join(os.path.dirname(__file__), "input.txt")) as file:
raw_bag = file.read()
graph = create_graph(raw_bag)
predecessors = get_all_successors("shiny gold", graph)
print(len(predecessors))
|
import math
import random
import puzzleRandomizer
from datetime import datetime
from copy import deepcopy
import itertools
import numpy as np
# ------------------------------------------------------------ #
# ------------------ SET PUZZLE SIZE HERE -------------------- #
algorithm = 1 # use dictionary (0), neural network (1) or lgbm regressor (2)
puzzleSize = 3 # Size 3 means 3x3 puzzle
# ------------------------------------------------------------ #
# ------------------------------------------------------------ #
print("initializing puzzle..")
if algorithm == 0:
import qlearn as learner
elif algorithm == 1:
import qlearn_nn as learner
elif algorithm == 2:
import qlearn_lgbm as learner
if algorithm == 0:
if puzzleSize == 2: # hardest instance takes 5 moves
epsilonSteps = 1000 # over how many steps epsilon is reduced to its final value
epsilonStartVal = 0.05 # chance to take a random action
epsilonEndVal = 0.001
alphaVal = 1 # learning rate
gammaVal = 0.99 # discount factor for future rewards
rewardVal = 1 # reward for solving the puzzle
punishVal = -1 # punishment for doing nothing
elif puzzleSize == 3: # hardest instance takes 31 moves to solve
epsilonSteps = 4500000
epsilonStartVal = 0.20
epsilonEndVal = 0.01
alphaVal = 1
gammaVal = 0.999
rewardVal = 1
punishVal = -1
elif algorithm == 1:
if puzzleSize == 2:
epsilonSteps = 50000
epsilonStartVal = 0.20
epsilonEndVal = 0.01
alphaVal = 0.01
gammaVal = 0.99
rewardVal = 1
punishVal = -1
elif puzzleSize == 3:
epsilonSteps = 24500000
epsilonStartVal = 0.9
epsilonEndVal = 0.01
alphaVal = 0.0001
gammaVal = 0.95
rewardVal = 10
punishVal = -1
elif algorithm == 2:
if puzzleSize == 2:
epsilonSteps = None
epsilonStartVal = 0.3
epsilonEndVal = None
alphaVal = 0.05
gammaVal = 0.99
rewardVal = 1
punishVal = -1
elif puzzleSize == 3:
epsilonSteps = None
epsilonStartVal = 1
epsilonEndVal = None
alphaVal = 0.05
gammaVal = 0.99
rewardVal = 100
punishVal = -1
# ------------------------------------------------------------ #
# ------------------------------------------------------------ #
class Puzzle():
def __init__(self, puzzleSize):
# alpha ... learning rate between 0-1 (0 means never update Q-values)
# gamma ... discount factor between 0-1 (higher means the algorithm looks farther into the future - at 1
# infinite rewards possible -> dont go to 1)
# epsilon ... exploration factor between 0-1 (chance of taking a random action)
# set values, epsilon will be periodically overwritten (see pre train section farther down) until it reaches 0
# testing alpha = 1 instead of 0.1
self.ai = learner.QLearn(puzzleSize = puzzleSize, epsilon=epsilonStartVal, alpha=alphaVal, gamma=gammaVal)
self.lastState = None
self.lastAction = None
self.solved = 0
self.age = 0
# all tile swaps that have been done
self.movesDone = 0
# all actions that have been taken = all attempted swaps
self.actionsTaken = 0
self.puzzleSize = puzzleSize
# 2d array containing values describing which numbers are in which positions [pos] = value
# for size = 2, state[1][0] = c:
# a b
# c d
self.randomizer = puzzleRandomizer.Randomizer(self.puzzleSize)
# create random solvable puzzle start
self.state = self.randomizer.makeRandomPuzzle(self.solved)
# describes position of the empty cell (value = 0) (x,y)
self.emptyCellPos = self.initEmptyCellPos()
# up, down, left, right
self.direction_list = [(-1, 0), (1, 0), (0, -1), (0, 1)]
# create dict of cells in the puzzle that are neighbours to each other
self.neighbours = self.initNeighbours()
# create dict to get 2d-positions from 1d-position: (x,y)
self.positionConverter = self.init1dTo2dPositionConverter()
# create array equal to state, but with the expected solutions instead
self.solution = self.initSolvedPosition()
# self.display = display.makeDisplay(self)
# init variables to calc averages
self.solveCount = 0
self.totalMoves = 0
self.totalTime = 0
self.steps = 0
#self.currentManhattan = self.getManhattanDistance(self.state, self.solution)
#self.lastManhattan = self.currentManhattan
self.goalPositions = self.createGoalPositionsPerTile()
# get manhattan distance for tile num at pos y,x via self.manhattanPerTile[num][(y,x)]
self.manhattanPerTile = self.createManhattanPerTile()
# get manhattan distance for a board state [[1,2,3],[4,5,6],[7,8,0]]
# via self.manhattanPerBoard[(1,2,3,4,5,6,7,8,0)]
self.manhattanPerBoard = self.createManhattanPerBoard()
# create neighbours dict which has a list of neighbour-positions for each position
def initNeighbours(self):
neighbours = {}
pos = 0
for y in range(self.puzzleSize):
for x in range(self.puzzleSize):
n = []
# space to the left
if (x - 1 >= 0):
n.append([x - 1, y])
# space to the right
if (x + 1 < self.puzzleSize):
n.append([x + 1, y])
# space above
if (y - 1 >= 0):
n.append([x, y - 1])
# space below
if (y + 1 < self.puzzleSize):
n.append([x, y + 1])
neighbours[pos] = n
pos += 1
return neighbours
def init1dTo2dPositionConverter(self):
conv = []
for y in range(0, self.puzzleSize):
for x in range(0, self.puzzleSize):
conv.append(np.array([x, y]))
return conv
def initSolvedPosition(self):
sol = [[0 for i in range(self.puzzleSize)] for j in range(self.puzzleSize)]
num = 1
for y in range(0, self.puzzleSize):
for x in range(0, self.puzzleSize):
sol[y][x] = num
num += 1
sol[self.puzzleSize - 1][self.puzzleSize - 1] = 0
return sol
def initEmptyCellPos(self):
for x in range(0, self.puzzleSize):
for y in range(0, self.puzzleSize):
if (self.state[y][x] == 0):
return np.array([x, y])
# try to move the tile at position
# if that move is possible, move and return True
# else do not move and return False
def moveTile(self, direction):
self.age += 1
self.actionsTaken += 1
# if the cell at position has the empty cell as a neighbour -> swap it with the empty cell and return True
#if self.emptyCellPos in self.neighbours[position]:
# print(self.emptyCellPos)
# print(self.direction_list[direction])
newPos = self.emptyCellPos + self.direction_list[direction]
if not (np.sum(newPos >= self.puzzleSize) or np.sum(newPos < 0)):
# curPos = self.positionConverter[direction]
newPosX = newPos[0]
newPosY = newPos[1]
newPosValue = self.state[newPosY][newPosX]
emptyCellPos = self.emptyCellPos
emptyPosX = emptyCellPos[0]
emptyPosY = emptyCellPos[1]
# swap values in self.state
self.state[emptyPosY][emptyPosX] = newPosValue
self.state[newPosY][newPosX] = 0
# set new emptyCellPos
self.emptyCellPos = newPos
self.movesDone += 1
return True
# else do not move and return False
else:
return False
# calc reward based on current state and if puzzle solved -> create random new puzzle
# if this is not the first state after new puzzle created -> Q-learn(s,a,r,s')
# choose an action and perform that action
def update(self):
reward = None
hasMoved = True
currentState = deepcopy(self.state)
if not self.isPuzzleSolved():
# reward based on manhattan distance
mh = self.getManhattanForBoard(currentState)
#print("Manhattan: " + str(mh))
if self.puzzleSize == 2:
reward = ((1 / math.sqrt(mh) - 1) - 0.001) / 20000
elif self.puzzleSize == 3:
reward = ((1 / math.sqrt(mh) - 1) - 0.001) / 20
# if last action was not legal -> useless action -> punish
if (self.lastState == currentState):
reward = punishVal
hasMoved = False
# observe the reward and update the Q-value
else:
endTime = datetime.now()
reward = rewardVal
if self.lastState is not None:
if algorithm == 2:
self.ai.learn(self.lastState, self.lastAction, reward, currentState, True, hasMoved)
else: #TODO why None instead of solved state?
self.ai.learn(self.lastState, self.lastAction, reward, None, True, hasMoved)
self.lastState = None
self.solved += 1
self.solveCount += 1
self.state = self.randomizer.makeRandomPuzzle(self.solved)
self.emptyCellPos = self.initEmptyCellPos()
self.steps = 0
totalTime = endTime - self.startTime
# calculate time difference
timeDif = totalTime.seconds + 1.0 * totalTime.microseconds / 1000 / 1000
# reset average calculation every few puzzles
if self.solved % 35 == 0:
self.totalTime = 0
self.totalMoves = 0
self.solveCount = 1
print("\nresetting calculation of average")
self.totalTime += timeDif
self.totalMoves += self.movesDone
if algorithm != 2:
print((
"\navg moves: %f \tavg time: %f seconds \tmoves: %d \ttime: %f seconds \tactions: %d \t\tepsilon: %f \tsolved: %d"
% (self.totalMoves / (self.solveCount * 1.0), self.totalTime / (self.solveCount * 1.0),
self.movesDone, timeDif, self.actionsTaken, self.ai.epsilon, self.solved)).expandtabs(
18))
print(datetime.now())
file.write(("%f,%f,%d,%f,%d,%f,%d\n"
% (self.totalMoves / (self.solveCount * 1.0), self.totalTime / (self.solveCount * 1.0),
self.movesDone, timeDif, self.actionsTaken, self.ai.epsilon, self.solved)).expandtabs(18))
file.flush()
else:
print((
"\navg moves: %f \tavg time: %f seconds \tmoves: %d \ttime: %f seconds \tactions: %d \t\tepsilon: %f \tsolved: %d"
% (self.totalMoves / (self.solveCount * 1.0), self.totalTime / (self.solveCount * 1.0),
self.movesDone, timeDif, self.actionsTaken, self.ai.get_exploration_rate(), self.solved)).expandtabs(
18))
print(datetime.now())
file.write(("%f,%f,%d,%f,%d, %f, %d\n"
% (self.totalMoves / (self.solveCount * 1.0), self.totalTime / (self.solveCount * 1.0),
self.movesDone, timeDif, self.actionsTaken, self.ai.get_exploration_rate(), self.solved)).expandtabs(
18))
file.flush()
self.movesDone = 0
self.actionsTaken = 0
self.startTime = datetime.now()
return
if self.lastState is not None:
self.ai.learn(self.lastState, self.lastAction, reward, currentState, False, hasMoved)
# get updated state (puzzle might have been recreated after being solved), choose a new action and execute it
currentState = self.state
action = self.ai.chooseAction(currentState)
self.lastState = deepcopy(currentState)
self.lastAction = action
# move chosen tile, if it can not be moved do nothing
self.moveTile(action)
self.steps += 1
if self.steps > 10000:
print("resetting..")
self.lastState = None
self.state = self.randomizer.makeRandomPuzzle(self.solved)
self.emptyCellPos = self.initEmptyCellPos()
self.steps = 0
#endTime = datetime.now()
#totalTime = endTime - self.startTime
# calculate time difference
#timeDif = totalTime.seconds + 1.0 * totalTime.microseconds / 1000 / 1000
#self.totalTime += timeDif
#self.totalMoves += self.movesDone
#self.movesDone = 0
#self.actionsTaken = 0
#self.startTime = datetime.now()
if algorithm==2:
self.ai.lgbm.last_memory.clear()
elif algorithm==1:
if random.random() > 0.1:
self.ai.batch.clear()
self.ai.mem_count = 0
return
def isPuzzleSolved(self):
return (self.state == self.solution)
def createGoalPositionsPerTile(self):
goalPositions = {}
for y in range(0, self.puzzleSize):
for x in range(0, self.puzzleSize):
goalPositions[self.solution[y][x]] = (y,x)
return goalPositions
def createManhattanPerTile(self):
manhattanPerTile = {}
# for each numbered tile calculate each position's manhattan distance
for num in range(1,self.puzzleSize**2):
manhattanForThisTile = {}
for y in range(0, self.puzzleSize):
for x in range(0, self.puzzleSize):
dif = abs(y - self.goalPositions[num][0]) + abs(x - self.goalPositions[num][1])
manhattanForThisTile[(y,x)] = dif
manhattanPerTile[num] = manhattanForThisTile
return manhattanPerTile
def createManhattanPerBoard(self):
manhattanPerBoard = {}
numbers = range(0, self.puzzleSize**2)
permutations = list(itertools.permutations(numbers))
for perm in permutations:
dist = 0
# get manhattan distance for tile num at pos y,x via self.manhattanPerTile[num][(y,x)]
for i in range(0,len(perm)):
(x,y) = self.positionConverter[i]
num = perm[i]
if num != 0:
dist += self.manhattanPerTile[num][y,x]
manhattanPerBoard[perm] = dist
return manhattanPerBoard
def getManhattanForBoard(self, state):
# state like [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
flat_list = [item for sublist in state for item in sublist]
dist = self.manhattanPerBoard[tuple(flat_list)]
return dist
# ----------------------------------
# start learning
# ----------------------------------
puzzle = Puzzle(puzzleSize=puzzleSize)
if algorithm != 2:
# learning factor
epsilonX = (0, epsilonSteps) # for how many time steps epsilon will be > 0, TODO value experimental
epsilonY = (puzzle.ai.epsilon, epsilonEndVal) # start and end epsilon value
# decay rate for epsilon so it hits the minimum value after epsilonX[1] time steps
epsilonM = (epsilonY[1] - epsilonY[0]) / (epsilonX[1] - epsilonX[0])
puzzle.startTime = datetime.now()
print("puzzle start: %s" %puzzle.startTime)
# create log file
fname = ""
if algorithm == 1:
fname = fname + "nn"
elif algorithm == 0:
fname = fname + "simple"
elif algorithm == 2:
fname = fname + "lgbm"
fname = "..\\log\\" + str(puzzleSize) + "\\" + fname + "_" + str(puzzle.startTime).replace(":", "-") + ".csv"
with open(fname,"w+") as file:
print("Starting training..")
#with open("fname.name as.csv","w+") as file:
#write header
file.write("avg moves, avg time, moves, time, actions, epsilon, solved\n")
# train the player
#while puzzle.age < learningSteps:
firstVictoryAge = 0
while True:
# calls update on puzzle (do action and learn) and then updates score and redraws screen
puzzle.update()
if algorithm != 2:
# every 100 time steps, decay epsilon (only after first puzzle is solved)
if (puzzle.solved > 0) & (puzzle.age % 100 == 0):
relevantAge = puzzle.age - firstVictoryAge
# this gradually decreases epsilon from epsilonY[0] to epsilonY[1] over the course of epsilonX[0] to [1]
# -> at epsilonX[1] epsilon will reach epsilonY[1] and stay there
puzzle.ai.epsilon = (epsilonY[0] if relevantAge < epsilonX[0] else
epsilonY[1] if relevantAge > epsilonX[1] else
epsilonM * (relevantAge - epsilonX[0]) + epsilonY[0])
# alternatively just multiply by some factor... harder to set right I guess
# puzzle.ai.epsilon *= 0.9995
elif puzzle.solved < 0:
firstVictoryAge = puzzle.age + 1
# every .. steps show current averageStepsPerPuzzle and stuff and then reset stats to measure next ... steps
if puzzle.age % 100000 == 0:
print("\nage: " + str(puzzle.age))
if algorithm != 2:
print("epsilon: " + str(puzzle.ai.epsilon))
else:
print("epsilon: " + str(puzzle.ai.get_exploration_rate()))
print(datetime.now())
#print("manhattan: " + str(puzzle.getManhattanDistance(puzzle.state, puzzle.solution)))
# print puzzle dict (for qlearn.py)
#if(len(puzzle.ai.q) > 2200000):
# print("WRITING")
# f = open("output.txt","w")
# #f.write(str(puzzle.ai.q))
# for (key,value) in puzzle.ai.q.items():
# f.write("%s: %d%s" %(str(key), value,"\n"))
# f.close()
# break
# ----------------------------------
# show off
# ----------------------------------
endTime = datetime.now()
totalTime = endTime - startTime
print("total time: ", divmod(totalTime.days * 86400 + totalTime.seconds, 60))
# after pre training - show off the player ai (while still training, but slower because it has to render now)
# PAGEUP to render less and thus learn faster
# PAGEDOWN to reverse the effect
# SPACE to pause
#puzzle.display.activate(size=30)
#puzzle.display.delay = 1
#print("enter show off mode")
while 1 & False:
puzzle.update()
|
from django.db import models
from django.utils.translation import ugettext as _
class Config(models.Model):
key = models.CharField(_('Key'), max_length=100, unique=True)
value = models.BooleanField(_('Value'), default=False)
message = models.TextField(_('Message'))
def __unicode__(self):
return self.key
|
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
#
# class Solution:
# def nextLargerNodes(self, head: ListNode):
# nums = []
# node = head
# while node != None :
# nums.append(node.val)
# node = node.next
# stack = []
# stack_loc = []
# res = [0] * len(nums)
#
# for i in range(len(nums)):
# while stack and stack[-1] < nums[i]:
# res[stack_loc[-1]] = nums[i]
# stack.pop()
# stack_loc.pop()
# stack.append(nums[i])
# stack_loc.append(i)
#
# return res
# print(Solution().nextLargerNodes(head=0))
# """遍历2,由于2是第一个数,直接入栈。2比一小吗?否,1入栈。1比5小吗?是。1的下一个大数是5.弹出1;2比5小吗,是,2的下一个最大值是5.弹出2.
# 栈空了。5入栈,5比7小吗,是,5的下一个最大数是7,弹出5,7入栈。7是最后一个元素。for循环结束。"""
# def max_next():
# nums = [2, 1, 5 ,7]
# stack = []
# stack_loc = []
# res = [0] * len(nums)
# for i in range(len(nums)):
# while stack and stack[-1] < nums[i]:
# res[stack_loc[-1]] = nums[i]
# stack.pop()
# stack_loc.pop()
# stack.append(nums[i])
# stack_loc.append(i)
# return res
# print(max_next())
print('需要面额为{} 的 {} 张'.format(20, 12)) |
import pandas as pd
import numpy as np
def hard_voting(target):
N, B = target.shape[0], target.shape[1]
result = np.zeros_like(target[0])
for i in range(B):
result[i] = np.bincount(target[:, i]).argmax()
return result
files_list = [
'./ensamble/deeplabv3_resnet101 StepLR.csv',
'./ensamble/DeepLabV3Plus_dpn92_2_mIoU_TTA_average.csv',
'./ensamble/DeepLABV3plus_resnet101_2021_05_06_3.csv',
# './ensamble/DeepLabV3Plus_dpn92_2_mIoU_TTA_hardvote.csv'
]
df_list = [0] * len(files_list)
# file = './submission/DeepLabV3Plus_dpn92_2_mIoU_TTA_average.csv'
# df = pd.read_csv(file, index_col=None)
for i, file in enumerate(files_list):
df_list[i] = pd.read_csv(file, index_col=None)
final_result = np.zeros((len(df_list[0]), 65536), dtype=np.int32)
for i in range(len(df_list[0])):
predictions = np.zeros((len(files_list),65536), dtype=np.int32)
for j in range(len(files_list)):
predictions[j] = np.fromstring(df_list[j]['PredictionString'][i], dtype=np.int32, sep=' ')
final_result[i] = hard_voting(predictions)
final_result = np.array(final_result, dtype=np.int32)
submission = pd.read_csv('./submission/sample_submission.csv', index_col=None)
# PredictionString 대입
for file_name, string in zip(df_list[0]['image_id'], final_result):
submission = submission.append({"image_id" : file_name, "PredictionString" : ' '.join(str(e) for e in string.tolist())},
ignore_index=True)
# submission.csv로 저장
submission.to_csv(f"./ensamble/final_submission.csv", index=False) |
from tkinter import *
root = Tk()
root.title("Пример меню на Python")
root.geometry("400x300")
main_menu = Menu()
file_menu = Menu(font=("Comic sans", 10, "bold"), tearoff=0, activebackground="red")
file_menu.add_command(label="New file")
file_menu.add_command(label="Save")
file_menu.add_command(label="Save as ")
file_menu.add_command(label="Open")
file_menu.add_separator()
file_menu.add_command(label="Exit")
main_menu.add_cascade(label="File", menu=file_menu)
edit_menu = Menu(font=("Comic sans", 10, "bold"), tearoff=0, activebackground="red")
edit_menu.add_command(label="Delete")
edit_menu.add_command(label="Add")
main_menu.add_cascade(label="Edit", menu=edit_menu)
view_menu = Menu(font=("Comic sans", 10, "bold"), tearoff=0, activebackground="red")
view_menu.add_command(label="Change view")
main_menu.add_cascade(label="View", menu=view_menu)
root.config(menu=main_menu)
root.mainloop()
|
# Generated by Django 3.1.2 on 2020-11-06 13:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myapp', '0006_auto_20201106_1506'),
]
operations = [
migrations.AlterField(
model_name='order',
name='amount',
field=models.PositiveIntegerField(default=1),
),
migrations.AlterField(
model_name='user',
name='funds',
field=models.PositiveIntegerField(default=1000),
),
]
|
#!/bin/python3
import numpy as np
import pickle
from lnm.kneser_ney import kneser_ney
from lnm.witten_bell import witten_bell
from sklearn.model_selection import train_test_split
corpus = pickle.load(open('resources/sentences.pkl','rb'))
X, y = train_test_split(corpus, test_size=0.2)
X_cor = ''
kn_df = []
wb_df = []
pickle.dump(X, open('X.pkl', 'wb'))
pickle.dump(y, open('y.pkl', 'wb'))
for line in X:
X_cor+=line+' '
kn_model = kneser_ney(X_cor)
wb_model = witten_bell(X_cor)
for i in range(1, 2):
kn_tmp = []
wb_tmp = []
for sent in y:
kn_tmp.append(kn_model.estimate(sent, i))
wb_tmp.append(kn_model.estimate(sent, i))
kn_df.append([kn_tmp])
wb_df.append([wb_tmp])
import matplotlib.pyplot as plt
plt.plot([i for i in range(758)], np.reshape(kn_df[0], (758,)), 'r')
plt.plot([i for i in range(758)], np.reshape(wb_df[0], (758,)), 'b')
plt.show()
|
# myiterator.py
# 此示例示意用迭代器来遍历列表
L = [2, 3, 5, 7]
it = iter(L) # 让L提供迭代器
while True:
try:
x = next(it)
print(x)
except StopIteration:
break
print("---------------")
L = [2, 3, 5, 7]
for x in L:
print(x)
print("程序结束")
|
import sys, os
from math import sin,pi
from random import choice
from Utils.observer import *
from Utils.signal import *
from Utils.wav_audio import save_wav
from tkinter import Tk,Canvas, messagebox
from tkinter import filedialog
# TODO: removed unused imports
class Signal(Subject):
"""Signal class"""
def __init__(self, magnitude=1.0, frequency=1.0, phase=0.0, N_harm=0, duration=2, keyname="", color=None):
Subject.__init__(self)
self.set(magnitude, frequency, phase, N_harm, duration, keyname, color)
self.values = None
self.wavname = None
self.isplaying = False
def __str__(self):
if self.wavname is None:
if(self.keyname):
return self.keyname+".wav"
return self.get_wavname_by_data()[:-4]
else:
return self.wavname
def set(self, magnitude=1.0, frequency=1.0, phase=0.0, N_harm=0, duration=2, keyname="", color=None):
self.magnitude = magnitude
self.frequency = frequency
self.phase = phase
self.N_harm = N_harm
self.duration = duration
self.keyname = keyname
if color is None:
self.color = "#" + "".join([choice("0123456789ABCDEF") for _ in range(6)])
else:
self.color = color
def harmonize(self, t, N=0):
a,f,p=self.magnitude,self.frequency,self.phase
return sum([(a/h)*sin(2*pi*(f*h)*t-p) for h in range(1, N+2)])
def generate(self, period=2, samples=100):
Tech = period/samples
print("Tech",Tech,period,samples)
self.values = [(t*Tech,self.harmonize(t*Tech, self.N_harm)) for t in range(int(samples)+1)]
print(self)
self.notify()
return self.values
def unset_values(self):
if(self.values is not None):
self.values.clear()
self.notify()
def generate_sound(self, force=False):
wavname = self.get_wavname_by_data()
existing_file = os.path.exists("Sounds/"+wavname)
print(existing_file)
if(not force and existing_file):
self.wavname = wavname
return 1 #already generated file
try:
framerate = 8000
wav_values = [self.harmonize(t/framerate, self.N_harm) for t in range(int(framerate*self.duration))]
save_wav(wavname, wav_values, framerate)
success = True
except Exception as e:
print(e)
success = False
if(success):
self.wavname = wavname
return 0 #sucess
else:
return -1 #error
def play(self):
if self.wavname is not None:
self.isplaying = True
self.notify()
self.isplaying = False
return 0
else:
return -1
def get_wavname_by_data(self):
return "{0}_{1:.2f}_{2}_{3}_{4}_{5}.wav".format(self.keyname, self.frequency, self.N_harm, self.duration, self.magnitude, self.phase)
def reset_wavname(self):
self.wavname = None
self.notify()
def set_wavname(self, name=None):
if name is None:
self.wavname = self.get_wavname_by_data()
else:
self.wavname = name
return self.wavname
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index),
url(r'^validate$', views.validate),
url(r'^dashboard$', views.dashboard),
url(r'^create$', views.create),
url(r'^additem$', views.additem),
url(r'^wishes/(?P<id>\d+)$', views.wishes),
url(r'^logout$', views.logout),
url(r'^removeitem/(?P<id>\d+)$', views.removeitem),
url(r'^deleteitem/(?P<id>\d+)$', views.deleteitem),
url(r'^addfromanother/(?P<id>\d+)$', views.addfromanother)
]
|
from django.contrib import admin
from esign.models import SigEnvelope, SignRequest, Recipient
admin.site.register(SigEnvelope)
admin.site.register(SignRequest)
admin.site.register(Recipient)
|
# O(nlogn) Time | O(1) Space, where n is length of coins
def nonConstructibleChange(coins):
# Write your code here.
canConstructUpto = 0
coins.sort()
for coin in coins:
if coin > canConstructUpto + 1:
break
else:
canConstructUpto += coin
return canConstructUpto + 1
# O(n*n^n) Time | O(n) Space, where n is length of coins
def nonConstructibleChange(coins):
# Write your code here.
coins.sort()
sumCoins = sum(coins)
for index in range(sumCoins):
isPossible = canConstruct(index, coins)
if not isPossible:
return index
return sumCoins + 1
def canConstruct(target, coins):
if target < 0:
return False
if target == 0:
return True
for index in range(len(coins)):
newTarget = target - coins[index]
if newTarget < 0:
continue
isPossible = canConstruct(newTarget, coins[:index] + coins[index + 1:])
if isPossible:
return True
return False
|
import re
# a = re.compile('fChain->SetBranchAddress("^[\w\s\S&]$"^[\w\s\S&,]$);')
a = re.compile('SetBranchAddress')
num = 10
count = 0
for i in open("branch_need_toSet.Cpp","r").readlines():
if count<num:
if 'SetBranchAddress' in i:
new_text = a.sub(r'hhh',i)
print new_text
count += 1
text='hhhh 12/18/19'
print(re.sub(r'(\d+)/(\d+)/(\d+)',r'\3-\1-\2',text))
print(text)
|
# -*- coding: utf-8 -*-
import os
class Configuration(object):
APPLICATION_DIR = os.path.dirname(os.path.realpath(__file__))
DEBUG = True
## dialect+driver://username:password@host:port/database
## postgresql://postgres:secretpassword@localhost:5432/blog_db
SQLALCHEMY_DATABASE_URI = 'sqlite:///%s/sqlite.db' % APPLICATION_DIR
SQLALCHEMY_TRACK_MODIFICATIONS = True # to mute the warning
|
# -*- coding: utf-8 -*-
import socket
import time
def egcd(a, b):
(x, lastx) = (0, 1)
(y, lasty) = (1, 0)
while b != 0:
q = a // b
(a, b) = (b, a % b)
(x, lastx) = (lastx - q * x, x)
(y, lasty) = (lasty - q * y, y)
return (lastx, lasty, a)
def modinv(a, m):
(inv, q, gcd_val) = egcd(a, m)
return inv % m
def calc(n,c1,c2):
m = (c2 + 2*c1 - 1) * modinv(c2 - c1 + 2, n) % n
return m
def first(clientsock):
line1 = clientsock.recv(1024)
line2 = clientsock.recv(1024)
n, c1 = map(int,line1.split("\n"))
c2 = int(line2.strip())
m = calc(n,c1,c2)
print "m=",m
clientsock.sendall(str(m)+"\n")
time.sleep(0.5)
def mid(clientsock):
line = clientsock.recv(1024)
print line
n ,c1,c2 = map(int, line.split("\n")[1:4])
m = calc(n,c1,c2)
print "m=",m
clientsock.sendall(str(m)+"\n")
time.sleep(0.5)
def last(clientsock):
line = clientsock.recv(1024)
print line
n ,c1,c2 = map(int, line.split("\n")[1:4])
m = calc(n,c1,c2)
print "m=",m
print
print "flag:", hex(m)[2:-1].decode('hex')
host = 'localhost'
port = 8888
clientsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
clientsock.connect((host,port))
first(clientsock)
for i in range(1,9):
print i
mid(clientsock)
last(clientsock)
clientsock.close()
|
from Model import *
from DataType import *
from TemplateFile import *
class MyModel(Model):
def __init__(self):
Model.__init__(self, 'Model')
self.setupDataTypes()
self.setupDatabases()
self.setupPythonModules()
def setupDataTypes(self):
self.createDataType('Integer')
self.createDataType('Unicode')
self.createDataType('Float')
self.createDataType('Date')
self.createDataType('Time')
self.createDataType('Timestamp')
def setupDatabases(self):
self.createDatabase('profilelogger')
self.setupTablesInProfileLogger()
def setupTablesInProfileLogger(self):
db = self.createDatabase('profilelogger')
s = db.createSchema('data')
self.setupTableGraphicPrimitives(s)
self.setupTableLengthUnits(s)
self.setupTableProjects(s)
self.setupTableProfiles(s)
self.setupTableProfileColumns(s)
self.setupTableProfileColumnsInProfile(s)
self.setupTableBeds(s)
self.setupTableColors(s)
self.setupTableColorsInBed(s)
self.setupTableOutcropTypes(s)
self.setupTableOutcropTypesInBed(s)
self.setupTableFacies(s)
self.setupTableFaciesInBed(s)
self.setupTableLithologies(s)
self.setupTableLithologiesInBed(s)
self.setupTableTectonicUnitTypes(s)
self.setupTableTectonicUnits(s)
self.setupTableTectonicUnitsInBed(s)
self.setupTableGrainSizeTypes(s)
self.setupTableGrainSizes(s)
self.setupTableGrainSizesInBed(s)
self.setupTableLithologicUnitTypes(s)
self.setupTableLithologicUnits(s)
self.setupTableLithologicUnitsInBed(s)
self.setupTableSedimentologicUnitTypes(s)
self.setupTableSedimentologicUnits(s)
self.setupTableSedimentologicUnitsInBed(s)
self.setupTableStratigraphicUnitTypes(s)
self.setupTableStratigraphicUnits(s)
self.setupTableStratigraphicUnitsInBed(s)
self.setupTableFossils(s)
self.setupTableFossilsInBed(s)
self.setupTableBeddingTypes(s)
self.setupTableBeddingTypesInBed(s)
self.setupTableBoundaryTypes(s)
self.setupTableBoundaryTypesInBed(s)
self.setupTableSedimentStructures(s)
self.setupTableSedimentStructuresInBed(s)
self.setupTableCustomSymbols(s)
self.setupTableCustomSymbolsInBed(s)
self.setupTableFieldBooks(s)
self.setupTableFieldBookEntries(s)
def setupTableProfileColumns(self, s):
t = s.createTable('profile_columns', hasNameColumn=True, nameColumnIsUnique=True, hasDescriptionColumn=True)
def setupTableProfileColumnsInProfile(self, s):
t = s.createTable('profile_columns_in_profiles')
t.createColumn('profile_id', nullable=False, referencedColumn=s.table('profiles').column('id'))
t.createColumn('profile_column_id', nullable=False, referencedColumn=s.table('profile_columns').column('id'))
t.createColumn('position', self.dataType('Integer'), nullable=False, defaultValue=0)
def setupTableGraphicPrimitives(self, s):
t = s.createTable('graphic_primitives', hasNameColumn=True, nameColumnIsUnique=True, hasDescriptionColumn=True)
t.createColumn('svg_data', self.dataType('Unicode'), nullable=False, defaultText='')
t.createColumn('original_path', self.dataType('Unicode'), nullable=False, defaultText='')
def setupTableTectonicUnitTypes(self, s):
t = s.createTable('tectonic_unit_types', hasNameColumn=True, nameColumnIsUnique=True, hasDescriptionColumn=True)
t.createColumn('project_id', nullable=False, referencedColumn=s.table('projects').column('id'))
t.createUniqueConstraint('u_tectonic_unit_types_name', [t.column('name')])
def setupTableTectonicUnits(self, s):
t = s.createTable('tectonic_units', hasNameColumn=True, hasDescriptionColumn=True)
t.createColumn('project_id', nullable=False, referencedColumn=s.table('projects').column('id'))
t.createColumn('tectonic_unit_type_id', nullable=False, referencedColumn=s.table('tectonic_unit_types').column('id'))
t.createColumn('graphic_primitive_id', nullable=False, referencedColumn=s.table('graphic_primitives').column('id'))
t.createUniqueConstraint('u_tectonic_unit_in_project', [t.column('name'), t.column('project_id')])
def setupTableTectonicUnitsInBed(self, s):
t = s.createTable('tectonic_units_in_beds', hasDescriptionColumn=True)
t.createColumn('base', self.dataType('Integer'), nullable=False, defaultValue=0)
t.createColumn('top', self.dataType('Integer'), nullable=False, defaultValue=100)
t.createColumn('bed_id', nullable=False, referencedColumn=s.table('beds').column('id'))
t.createColumn('tectonic_unit_id', nullable=False, referencedColumn=s.table('tectonic_units').column('id'))
t.createRangeCheckConstraint('chk_tectonic_units_in_beds_base_in_range', t.column('base'), 0, 100)
t.createRangeCheckConstraint('chk_tectonic_units_in_beds_top_in_range', t.column('top'), 0, 100)
def setupTableGrainSizeTypes(self, s):
t = s.createTable('grain_size_types', hasNameColumn=True, nameColumnIsUnique=True, hasDescriptionColumn=True)
t.createColumn('project_id', nullable=False, referencedColumn=s.table('projects').column('id'))
t.createUniqueConstraint('u_grain_size_types_name', [t.column('name')])
def setupTableGrainSizes(self, s):
t = s.createTable('grain_sizes', hasNameColumn=True, hasDescriptionColumn=True)
t.createColumn('project_id', nullable=False, referencedColumn=s.table('projects').column('id'))
t.createColumn('grain_size_type_id', nullable=False, referencedColumn=s.table('grain_size_types').column('id'))
t.createColumn('graphic_primitive_id', nullable=False, referencedColumn=s.table('graphic_primitives').column('id'))
t.createColumn('percent_from_max', self.dataType('Integer'), nullable=False, defaultValue=0)
t.createUniqueConstraint('u_grain_size_in_project', [t.column('name'), t.column('project_id')])
t.createRangeCheckConstraint('chk_grain_sizes_percent_from_max_range', t.column('percent_from_max'), 0, 100)
def setupTableGrainSizesInBed(self, s):
t = s.createTable('grain_sizes_in_beds', hasDescriptionColumn=True)
t.createColumn('base', self.dataType('Integer'), nullable=False, defaultValue=0)
t.createColumn('top', self.dataType('Integer'), nullable=False, defaultValue=100)
t.createColumn('bed_id', nullable=False, referencedColumn=s.table('beds').column('id'))
t.createColumn('grain_size_id', nullable=False, referencedColumn=s.table('grain_sizes').column('id'))
t.createRangeCheckConstraint('chk_grain_sizes_in_beds_base_in_range', t.column('base'), 0, 100)
t.createRangeCheckConstraint('chk_grain_sizes_in_beds_top_in_range', t.column('top'), 0, 100)
def setupTableLithologicUnitTypes(self, s):
t = s.createTable('lithologic_unit_types', hasNameColumn=True, nameColumnIsUnique=True, hasDescriptionColumn=True)
t.createColumn('project_id', nullable=False, referencedColumn=s.table('projects').column('id'))
t.createUniqueConstraint('u_lithologic_unit_types_name', [t.column('name')])
def setupTableLithologicUnits(self, s):
t = s.createTable('lithologic_units', hasNameColumn=True, hasDescriptionColumn=True)
t.createColumn('project_id', nullable=False, referencedColumn=s.table('projects').column('id'))
t.createColumn('lithologic_unit_type_id', nullable=False, referencedColumn=s.table('lithologic_unit_types').column('id'))
t.createColumn('graphic_primitive_id', nullable=False, referencedColumn=s.table('graphic_primitives').column('id'))
t.createUniqueConstraint('u_lithologic_unit_in_project', [t.column('name'), t.column('project_id')])
def setupTableLithologicUnitsInBed(self, s):
t = s.createTable('lithologic_units_in_beds', hasDescriptionColumn=True)
t.createColumn('base', self.dataType('Integer'), nullable=False, defaultValue=0)
t.createColumn('top', self.dataType('Integer'), nullable=False, defaultValue=100)
t.createColumn('bed_id', nullable=False, referencedColumn=s.table('beds').column('id'))
t.createColumn('lithologic_unit_id', nullable=False, referencedColumn=s.table('lithologic_units').column('id'))
t.createRangeCheckConstraint('chk_lithologic_units_in_beds_base_in_range', t.column('base'), 0, 100)
t.createRangeCheckConstraint('chk_lithologic_units_in_beds_top_in_range', t.column('top'), 0, 100)
def setupTableSedimentologicUnitTypes(self, s):
t = s.createTable('sedimentologic_unit_types', hasNameColumn=True, nameColumnIsUnique=True, hasDescriptionColumn=True)
t.createColumn('project_id', nullable=False, referencedColumn=s.table('projects').column('id'))
t.createUniqueConstraint('u_sedimentologic_unit_types_name', [t.column('name')])
def setupTableSedimentologicUnits(self, s):
t = s.createTable('sedimentologic_units', hasNameColumn=True, hasDescriptionColumn=True)
t.createColumn('project_id', nullable=False, referencedColumn=s.table('projects').column('id'))
t.createColumn('sedimentologic_unit_type_id', nullable=False, referencedColumn=s.table('sedimentologic_unit_types').column('id'))
t.createColumn('graphic_primitive_id', nullable=False, referencedColumn=s.table('graphic_primitives').column('id'))
t.createUniqueConstraint('u_sedimentologic_unit_in_project', [t.column('name'), t.column('project_id')])
def setupTableSedimentologicUnitsInBed(self, s):
t = s.createTable('sedimentologic_units_in_beds', hasDescriptionColumn=True)
t.createColumn('base', self.dataType('Integer'), nullable=False, defaultValue=0)
t.createColumn('top', self.dataType('Integer'), nullable=False, defaultValue=100)
t.createColumn('bed_id', nullable=False, referencedColumn=s.table('beds').column('id'))
t.createColumn('sedimentologic_unit_id', nullable=False, referencedColumn=s.table('sedimentologic_units').column('id'))
t.createRangeCheckConstraint('chk_sedimentologic_units_in_beds_base_in_range', t.column('base'), 0, 100)
t.createRangeCheckConstraint('chk_sedimentologic_units_in_beds_top_in_range', t.column('top'), 0, 100)
def setupTableStratigraphicUnitTypes(self, s):
t = s.createTable('stratigraphic_unit_types', hasNameColumn=True, nameColumnIsUnique=True, hasDescriptionColumn=True)
t.createColumn('project_id', nullable=False, referencedColumn=s.table('projects').column('id'))
t.createUniqueConstraint('u_stratigraphic_unit_types_name', [t.column('name')])
def setupTableStratigraphicUnits(self, s):
t = s.createTable('stratigraphic_units', hasNameColumn=True, hasDescriptionColumn=True)
t.createColumn('project_id', nullable=False, referencedColumn=s.table('projects').column('id'))
t.createColumn('stratigraphic_unit_type_id', nullable=False, referencedColumn=s.table('stratigraphic_unit_types').column('id'))
t.createColumn('graphic_primitive_id', nullable=False, referencedColumn=s.table('graphic_primitives').column('id'))
t.createUniqueConstraint('u_stratigraphic_unit_in_project', [t.column('name'), t.column('project_id')])
def setupTableStratigraphicUnitsInBed(self, s):
t = s.createTable('stratigraphic_units_in_beds', hasDescriptionColumn=True)
t.createColumn('base', self.dataType('Integer'), nullable=False, defaultValue=0)
t.createColumn('top', self.dataType('Integer'), nullable=False, defaultValue=100)
t.createColumn('bed_id', nullable=False, referencedColumn=s.table('beds').column('id'))
t.createColumn('stratigraphic_unit_id', nullable=False, referencedColumn=s.table('stratigraphic_units').column('id'))
t.createRangeCheckConstraint('chk_stratigraphic_units_in_beds_base_in_range', t.column('base'), 0, 100)
t.createRangeCheckConstraint('chk_stratigraphic_units_in_beds_top_in_range', t.column('top'), 0, 100)
def setupTableColors(self, s):
t = s.createTable('colors', hasNameColumn=True, hasDescriptionColumn=True)
t.createColumn('project_id', nullable=False, referencedColumn=s.table('projects').column('id'))
t.createColumn('graphic_primitive_id', nullable=False, referencedColumn=s.table('graphic_primitives').column('id'))
t.createUniqueConstraint('u_colors_name', [t.column('name'), t.column('project_id'), ])
def setupTableColorsInBed(self, s):
t = s.createTable('colors_in_beds', hasDescriptionColumn=True)
t.createColumn('base', self.dataType('Integer'), nullable=False, defaultValue=0)
t.createColumn('top', self.dataType('Integer'), nullable=False, defaultValue=100)
t.createColumn('bed_id', nullable=False, referencedColumn=s.table('beds').column('id'))
t.createColumn('color_id', nullable=False, referencedColumn=s.table('colors').column('id'))
t.createRangeCheckConstraint('chk_colors_in_beds_base_in_range', t.column('base'), 0, 100)
t.createRangeCheckConstraint('chk_colors_in_beds_top_in_range', t.column('top'), 0, 100)
def setupTableOutcropTypes(self, s):
t = s.createTable('outcrop_types', hasNameColumn=True, hasDescriptionColumn=True)
t.createColumn('project_id', nullable=False, referencedColumn=s.table('projects').column('id'))
t.createColumn('graphic_primitive_id', nullable=False, referencedColumn=s.table('graphic_primitives').column('id'))
t.createUniqueConstraint('u_outcrop_types_name', [t.column('name'), t.column('project_id'), ])
def setupTableOutcropTypesInBed(self, s):
t = s.createTable('outcrop_types_in_beds', hasDescriptionColumn=True)
t.createColumn('base', self.dataType('Integer'), nullable=False, defaultValue=0)
t.createColumn('top', self.dataType('Integer'), nullable=False, defaultValue=100)
t.createColumn('bed_id', nullable=False, referencedColumn=s.table('beds').column('id'))
t.createColumn('outcrop_type_id', nullable=False, referencedColumn=s.table('outcrop_types').column('id'))
t.createRangeCheckConstraint('chk_outcrop_types_in_beds_base_in_range', t.column('base'), 0, 100)
t.createRangeCheckConstraint('chk_outcrop_types_in_beds_top_in_range', t.column('top'), 0, 100)
def setupTableFacies(self, s):
t = s.createTable('facies', hasNameColumn=True, hasDescriptionColumn=True)
t.createColumn('project_id', nullable=False, referencedColumn=s.table('projects').column('id'))
t.createColumn('graphic_primitive_id', nullable=False, referencedColumn=s.table('graphic_primitives').column('id'))
t.createUniqueConstraint('u_facies_name', [t.column('name'), t.column('project_id'), ])
def setupTableFaciesInBed(self, s):
t = s.createTable('facies_in_beds', hasDescriptionColumn=True)
t.createColumn('base', self.dataType('Integer'), nullable=False, defaultValue=0)
t.createColumn('top', self.dataType('Integer'), nullable=False, defaultValue=100)
t.createColumn('bed_id', nullable=False, referencedColumn=s.table('beds').column('id'))
t.createColumn('facies_id', nullable=False, referencedColumn=s.table('facies').column('id'))
t.createRangeCheckConstraint('chk_facies_in_beds_base_in_range', t.column('base'), 0, 100)
t.createRangeCheckConstraint('chk_facies_in_beds_top_in_range', t.column('top'), 0, 100)
def setupTableLithologies(self, s):
t = s.createTable('lithologies', hasNameColumn=True, hasDescriptionColumn=True)
t.createColumn('project_id', nullable=False, referencedColumn=s.table('projects').column('id'))
t.createColumn('graphic_primitive_id', nullable=False, referencedColumn=s.table('graphic_primitives').column('id'))
t.createUniqueConstraint('u_lithologies_name', [t.column('name'), t.column('project_id'), ])
def setupTableLithologiesInBed(self, s):
t = s.createTable('lithologies_in_beds', hasDescriptionColumn=True)
t.createColumn('base', self.dataType('Integer'), nullable=False, defaultValue=0)
t.createColumn('top', self.dataType('Integer'), nullable=False, defaultValue=100)
t.createColumn('bed_id', nullable=False, referencedColumn=s.table('beds').column('id'))
t.createColumn('lithology_id', nullable=False, referencedColumn=s.table('lithologies').column('id'))
t.createRangeCheckConstraint('chk_lithologies_in_beds_base_in_range', t.column('base'), 0, 100)
t.createRangeCheckConstraint('chk_lithologies_in_beds_top_in_range', t.column('top'), 0, 100)
def setupTableFossils(self, s):
t = s.createTable('fossils', hasNameColumn=True, hasDescriptionColumn=True)
t.createColumn('project_id', nullable=False, referencedColumn=s.table('projects').column('id'))
t.createColumn('graphic_primitive_id', nullable=False, referencedColumn=s.table('graphic_primitives').column('id'))
t.createUniqueConstraint('u_fossils_name', [t.column('name'), t.column('project_id'), ])
def setupTableFossilsInBed(self, s):
t = s.createTable('fossils_in_beds', hasDescriptionColumn=True)
t.createColumn('base', self.dataType('Integer'), nullable=False, defaultValue=0)
t.createColumn('top', self.dataType('Integer'), nullable=False, defaultValue=100)
t.createColumn('bed_id', nullable=False, referencedColumn=s.table('beds').column('id'))
t.createColumn('fossil_id', nullable=False, referencedColumn=s.table('fossils').column('id'))
t.createRangeCheckConstraint('chk_fossils_in_beds_base_in_range', t.column('base'), 0, 100)
t.createRangeCheckConstraint('chk_fossils_in_beds_top_in_range', t.column('top'), 0, 100)
def setupTableBeddingTypes(self, s):
t = s.createTable('bedding_types', hasNameColumn=True, hasDescriptionColumn=True)
t.createColumn('project_id', nullable=False, referencedColumn=s.table('projects').column('id'))
t.createColumn('graphic_primitive_id', nullable=False, referencedColumn=s.table('graphic_primitives').column('id'))
t.createUniqueConstraint('u_bedding_types_name', [t.column('name'), t.column('project_id'), ])
def setupTableBeddingTypesInBed(self, s):
t = s.createTable('bedding_types_in_beds', hasDescriptionColumn=True)
t.createColumn('base', self.dataType('Integer'), nullable=False, defaultValue=0)
t.createColumn('top', self.dataType('Integer'), nullable=False, defaultValue=100)
t.createColumn('bed_id', nullable=False, referencedColumn=s.table('beds').column('id'))
t.createColumn('bedding_type_id', nullable=False, referencedColumn=s.table('bedding_types').column('id'))
t.createRangeCheckConstraint('chk_bedding_types_in_beds_base_in_range', t.column('base'), 0, 100)
t.createRangeCheckConstraint('chk_bedding_types_in_beds_top_in_range', t.column('top'), 0, 100)
def setupTableBoundaryTypes(self, s):
t = s.createTable('boundary_types', hasNameColumn=True, hasDescriptionColumn=True)
t.createColumn('project_id', nullable=False, referencedColumn=s.table('projects').column('id'))
t.createColumn('graphic_primitive_id', nullable=False, referencedColumn=s.table('graphic_primitives').column('id'))
t.createUniqueConstraint('u_boundary_types_name', [t.column('name'), t.column('project_id'), ])
def setupTableBoundaryTypesInBed(self, s):
t = s.createTable('boundary_types_in_beds', hasDescriptionColumn=True)
t.createColumn('base', self.dataType('Integer'), nullable=False, defaultValue=0)
t.createColumn('top', self.dataType('Integer'), nullable=False, defaultValue=100)
t.createColumn('bed_id', nullable=False, referencedColumn=s.table('beds').column('id'))
t.createColumn('boundary_type_id', nullable=False, referencedColumn=s.table('boundary_types').column('id'))
t.createRangeCheckConstraint('chk_boundary_types_in_beds_base_in_range', t.column('base'), 0, 100)
t.createRangeCheckConstraint('chk_boundary_types_in_beds_top_in_range', t.column('top'), 0, 100)
def setupTableSedimentStructures(self, s):
t = s.createTable('sediment_structures', hasNameColumn=True, hasDescriptionColumn=True)
t.createColumn('project_id', nullable=False, referencedColumn=s.table('projects').column('id'))
t.createColumn('graphic_primitive_id', nullable=False, referencedColumn=s.table('graphic_primitives').column('id'))
t.createUniqueConstraint('u_sediment_structures_name', [t.column('name'), t.column('project_id'), ])
def setupTableSedimentStructuresInBed(self, s):
t = s.createTable('sediment_structures_in_beds', hasDescriptionColumn=True)
t.createColumn('base', self.dataType('Integer'), nullable=False, defaultValue=0)
t.createColumn('top', self.dataType('Integer'), nullable=False, defaultValue=100)
t.createColumn('bed_id', nullable=False, referencedColumn=s.table('beds').column('id'))
t.createColumn('sediment_structure_id', nullable=False, referencedColumn=s.table('sediment_structures').column('id'))
t.createRangeCheckConstraint('chk_sediment_structures_in_beds_base_in_range', t.column('base'), 0, 100)
t.createRangeCheckConstraint('chk_sediment_structures_in_beds_top_in_range', t.column('top'), 0, 100)
def setupTableCustomSymbols(self, s):
t = s.createTable('custom_symbols', hasNameColumn=True, hasDescriptionColumn=True)
t.createColumn('project_id', nullable=False, referencedColumn=s.table('projects').column('id'))
t.createColumn('graphic_primitive_id', nullable=False, referencedColumn=s.table('graphic_primitives').column('id'))
t.createUniqueConstraint('u_custom_symbols_name', [t.column('name'), t.column('project_id'), ])
def setupTableCustomSymbolsInBed(self, s):
t = s.createTable('custom_symbols_in_beds', hasDescriptionColumn=True)
t.createColumn('base', self.dataType('Integer'), nullable=False, defaultValue=0)
t.createColumn('top', self.dataType('Integer'), nullable=False, defaultValue=100)
t.createColumn('bed_id', nullable=False, referencedColumn=s.table('beds').column('id'))
t.createColumn('custom_symbol_id', nullable=False, referencedColumn=s.table('custom_symbols').column('id'))
t.createRangeCheckConstraint('chk_custom_symbols_in_beds_base_in_range', t.column('base'), 0, 100)
t.createRangeCheckConstraint('chk_custom_symbols_in_beds_top_in_range', t.column('top'), 0, 100)
def setupTableLengthUnits(self, s):
t = s.createTable('length_units', hasNameColumn=True, nameColumnIsUnique=True, hasDescriptionColumn=True)
t.createColumn('micro_metres', self.dataType('Integer'), nullable=False, defaultValue=0, isUnique=True)
def setupTableBeds(self, s):
t = s.createTable('beds', hasDescriptionColumn=True)
t.createColumn('position', self.dataType('Integer'), nullable=False)
t.createColumn('bed_number', self.dataType('Unicode'), nullable=False, isUnique=True, notEmpty=True)
t.createColumn('profile_id', nullable=False, referencedColumn=s.table('projects').column('id'))
t.createColumn('height', self.dataType('Integer'), nullable=False, defaultValue=0)
t.createColumn('height_length_unit_id', nullable=False, referencedColumn=s.table('length_units').column('id'))
t.createUniqueConstraint('u_bed_number_in_profile', [t.column('profile_id'), t.column('bed_number'),])
def setupTableFieldBookEntries(self, s):
t = s.createTable('field_book_entries')
t.createColumn('field_book_id', nullable=False, referencedColumn=s.table('projects').column('id'))
def setupTableFieldBooks(self, s):
t = s.createTable('field_books', hasDescriptionColumn=True)
t.createColumn('title', self.dataType('Unicode'), defaultText='new field book', notEmpty=True)
def setupTableProjects(self, s):
t = s.createTable('projects', hasNameColumn=True, hasDescriptionColumn=True)
def setupTableProfiles(self, s):
t = s.createTable('profiles', hasNameColumn=True, hasDescriptionColumn=True)
t.createColumn('project_id', nullable=False, referencedColumn=s.table('projects').column('id'))
t.createUniqueConstraint('u_profile_name_in_project', [t.column('project_id'), t.column('name'),])
def setupPythonModules(self):
logic = self.createPythonModule('Logic')
self.pythonDataModule = logic.createPythonModule('Model')
finders = logic.createPythonModule('Finders')
presistance = logic.createPythonModule('Persistance')
gui = self.createPythonModule('Gui')
self.comboBoxModule = gui.createPythonModule('ComboBoxes')
self.itemModelModule = gui.createPythonModule('ItemModels')
self.treeViewModule = gui.createPythonModule('TreeViews')
self.managementDialogModule = gui.createPythonModule('ManagementDialogs')
self.editorDialogModule = gui.createPythonModule('EditorDialogs')
self.setupDataClasses(self.pythonDataModule)
self.setupFinderModule(finders)
self.setupComboBoxModule(self.comboBoxModule)
self.setupItemModelModule(self.itemModelModule)
self.setupTreeViewModule(self.treeViewModule)
self.setupManagementDialogModule(self.managementDialogModule)
self.setupEditorDialogModule(self.editorDialogModule)
def setupEditorDialogModule(self, module):
globalTmpl = TemplateFile('templates/Gui/EditorDialogs/EditorDialogTemplate.py')
self.createEditorDialogClasses(module, [['GraphicPrimitive', globalTmpl, ],
['Project', globalTmpl, ],
['LengthUnit', globalTmpl, ],
['Profile', globalTmpl, ],
['ProfileColumn', globalTmpl, ],
['ProfileColumnInProfile', globalTmpl, ],
['Bed', globalTmpl, ],
['Color', globalTmpl, ],
['ColorGlobal', globalTmpl, ],
['OutcropType', globalTmpl, ],
['OutcropTypeGlobal', globalTmpl, ],
['Facies', globalTmpl, ],
['FaciesGlobal', globalTmpl, ],
['Lithology', globalTmpl, ],
['LithologyGlobal', globalTmpl, ],
['TectonicUnitType', globalTmpl, ],
['TectonicUnit', globalTmpl, ],
['TectonicUnitGlobal', globalTmpl, ],
['GrainSizeType', globalTmpl, ],
['GrainSize', globalTmpl, ],
['GrainSizeGlobal', globalTmpl, ],
['LithologicUnitType', globalTmpl, ],
['LithologicUnit', globalTmpl, ],
['LithologicUnitGlobal', globalTmpl, ],
['SedimentologicUnitType', globalTmpl, ],
['SedimentologicUnit', globalTmpl, ],
['SedimentologicUnitGlobal', globalTmpl, ],
['StratigraphicUnitType', globalTmpl, ],
['StratigraphicUnit', globalTmpl, ],
['StratigraphicUnitGlobal', globalTmpl, ],
['Fossil', globalTmpl, ],
['FossilGlobal', globalTmpl, ],
['BeddingType', globalTmpl, ],
['BeddingTypeGlobal', globalTmpl, ],
['BoundaryType', globalTmpl, ],
['BoundaryTypeGlobal', globalTmpl, ],
['SedimentStructure', globalTmpl, ],
['SedimentStructureGlobal', globalTmpl, ],
['CustomSymbol', globalTmpl, ],
['CustomSymbolGlobal', globalTmpl, ],
['BeddingType', globalTmpl, ],
['BeddingTypeGlobal', globalTmpl, ],
['FieldBook', globalTmpl, ],
['FieldBookEntry', globalTmpl, ],])
def setupManagementDialogModule(self, module):
globalTmpl = TemplateFile('templates/Gui/ManagementDialogs/GlobalManagementDialogTemplate.py')
inProjectTmpl = TemplateFile('templates/Gui/ManagementDialogs/InProjectManagementDialogTemplate.py')
inProfileTmpl = TemplateFile('templates/Gui/ManagementDialogs/InProfileManagementDialogTemplate.py')
inBedTmpl = TemplateFile('templates/Gui/ManagementDialogs/InBedManagementDialogTemplate.py')
inFieldBookTmpl = TemplateFile('templates/Gui/ManagementDialogs/InFieldBookManagementDialogTemplate.py')
self.createManagementDialogClasses(module, [['GraphicPrimitive', globalTmpl, 'Graphic Primitives', ],
['Project', globalTmpl, 'Projects', ],
['LengthUnit', globalTmpl, 'Length Units', ],
['FieldBook', globalTmpl, 'Field Books', ],
['Profile', inProjectTmpl, 'Profiles', ],
['Bed', inProfileTmpl, 'Beds', ],
['Color', inProjectTmpl, 'Colors', ],
['OutcropType', inProjectTmpl, 'Outcrop Types', ],
['Facies', inProjectTmpl, 'Facies', ],
['Lithology', inProjectTmpl, 'Lithologies', ],
['TectonicUnitType', inProjectTmpl, 'Tectonic Unit Types', ],
['TectonicUnit', inProjectTmpl, 'Tectonic Units', ],
['GrainSizeType', inProjectTmpl, 'Grain Size Types', ],
['GrainSize', inProjectTmpl, 'Grain Sizes', ],
['LithologicUnitType', inProjectTmpl, 'Lithological Unit Types', ],
['LithologicUnit', inProjectTmpl, 'Lithological Units', ],
['SedimentologicUnitType', inProjectTmpl, 'Sedimentologic Unit Types', ],
['SedimentologicUnit', inProjectTmpl, 'Sedimentologic Units', ],
['StratigraphicUnitType', inProjectTmpl, 'Stratigraphic Unit Types', ],
['StratigraphicUnit', inProjectTmpl, 'Stratigraphic Units', ],
['Fossil', inProjectTmpl, 'Fossils', ],
['BeddingType', inProjectTmpl, 'Bedding Types', ],
['BoundaryType', inProjectTmpl, 'Boundary Types', ],
['SedimentStructure', inProjectTmpl, 'Sediment Structures', ],
['CustomSymbol', inProjectTmpl, 'Custom Symbols', ],
['BeddingType', inProjectTmpl, 'Bedding Types', ],
['ProfileColumn', inProfileTmpl, 'Profile Columns', ],
['ProfileColumnInProfile', inProfileTmpl, 'Profile Column In Profile', ],
['ColorInBed', inBedTmpl, 'Colors', ],
['OutcropTypeInBed', inBedTmpl, 'Outcrop Types', ],
['FaciesInBed', inBedTmpl, 'Facies', ],
['LithologyInBed', inBedTmpl, 'Lithologies', ],
['TectonicUnitInBed', inBedTmpl, 'Tectonic Units', ],
['GrainSizeInBed', inBedTmpl, 'Grain Sizes', ],
['LithologicUnitInBed', inBedTmpl, 'Lithologic Units', ],
['SedimentologicUnitInBed', inBedTmpl, 'Sedimentologic Units', ],
['StratigraphicUnitInBed', inBedTmpl, 'Stratigraphic Units', ],
['FossilInBed', inBedTmpl, 'Fossils', ],
['BeddingTypeInBed', inBedTmpl, 'Bedding Types', ],
['BoundaryTypeInBed', inBedTmpl, 'Boundary Types', ],
['SedimentStructureInBed', inBedTmpl, 'Sediment Structures', ],
['CustomSymbolInBed', inBedTmpl, 'Custom Symbols', ],
['BeddingTypeInBed', inBedTmpl, 'Bedding Types', ],
['FieldBookEntry', inFieldBookTmpl, 'Field Book Entry', ],])
def setupItemModelModule(self, module):
globalTmpl = TemplateFile('templates/Gui/ItemModels/GlobalItemModelTemplate.py')
inProjectTmpl = TemplateFile('templates/Gui/ItemModels/InProjectItemModelTemplate.py')
inProfileTmpl = TemplateFile('templates/Gui/ItemModels/InProfileItemModelTemplate.py')
inBedTmpl = TemplateFile('templates/Gui/ItemModels/InBedItemModelTemplate.py')
inFieldBookTmpl = TemplateFile('templates/Gui/ItemModels/InFieldBookItemModelTemplate.py')
self.createItemModelClasses(module, [['GraphicPrimitive', globalTmpl, ['Graphic Primitives', ], ],
['Project', globalTmpl, ['Projects',], ],
['LengthUnit', globalTmpl, ['Length Units', ], ],
['FieldBook', globalTmpl, ['Field Books', ], ],
['Profile', inProjectTmpl, ['Profiles', ], ],
['Bed', inProfileTmpl, ['Beds', ], ],
['Color', inProjectTmpl, ['Colors', ], ],
['OutcropType', inProjectTmpl, ['Outcrop Types', ], ],
['Facies', inProjectTmpl, ['Facies', ], ],
['Lithology', inProjectTmpl, ['Lithologies', ], ],
['TectonicUnitType', inProjectTmpl, ['Tectonic Unit Types', ], ],
['TectonicUnit', inProjectTmpl, ['Tectonic Units', ], ],
['GrainSizeType', inProjectTmpl, ['Grain Size Types', ], ],
['GrainSize', inProjectTmpl, ['Grain Sizes', ], ],
['LithologicUnitType', inProjectTmpl, ['Lithological Unit Types', ], ],
['LithologicUnit', inProjectTmpl, ['Lithological Units', ], ],
['SedimentologicUnitType', inProjectTmpl, ['Sedimentologic Unit Types', ], ],
['SedimentologicUnit', inProjectTmpl, ['Sedimentologic Units', ], ],
['StratigraphicUnitType', inProjectTmpl, ['Stratigraphic Unit Types', ], ],
['StratigraphicUnit', inProjectTmpl, ['Stratigraphic Units', ], ],
['Fossil', inProjectTmpl, ['Fossils', ], ],
['BeddingType', inProjectTmpl, ['Bedding Types', ], ],
['BoundaryType', inProjectTmpl, ['Boundary Types', ], ],
['SedimentStructure', inProjectTmpl, ['Sediment Structures', ], ],
['CustomSymbol', inProjectTmpl, ['Custom Symbols', ], ],
['BeddingType', inProjectTmpl, ['Bedding Types', ], ],
['ProfileColumn', inProfileTmpl, ['Profile Columns', ], ],
['ProfileColumnInProfile', inProfileTmpl, ['Profile Column In Profile', ], ],
['ColorInBed', inBedTmpl, ['Colors', ], ],
['OutcropTypeInBed', inBedTmpl, ['Outcrop Types', ], ],
['FaciesInBed', inBedTmpl, ['Facies', ], ],
['LithologyInBed', inBedTmpl, ['Lithologies', ], ],
['TectonicUnitInBed', inBedTmpl, ['Tectonic Units', ], ],
['GrainSizeInBed', inBedTmpl, ['Grain Sizes', ], ],
['LithologicUnitInBed', inBedTmpl, ['Lithologic Units', ], ],
['SedimentologicUnitInBed', inBedTmpl, ['Sedimentologic Units', ], ],
['StratigraphicUnitInBed', inBedTmpl, ['Stratigraphic Units', ], ],
['FossilInBed', inBedTmpl, ['Fossils', ], ],
['BeddingTypeInBed', inBedTmpl, ['Bedding Types', ], ],
['BoundaryTypeInBed', inBedTmpl, ['Boundary Types', ], ],
['SedimentStructureInBed', inBedTmpl, ['Sediment Structures', ], ],
['CustomSymbolInBed', inBedTmpl, ['Custom Symbols', ], ],
['BeddingTypeInBed', inBedTmpl, ['Bedding Types', ], ],
['FieldBookEntry', inFieldBookTmpl, ['Field Book Entry', ], ],])
def setupTreeViewModule(self, module):
globalTmpl = TemplateFile('templates/Gui/TreeViews/GlobalTreeViewTemplate.py')
inProjectTmpl = TemplateFile('templates/Gui/TreeViews/InProjectManagementTreeViewTemplate.py')
inProfileTmpl = TemplateFile('templates/Gui/TreeViews/InProfileManagementTreeViewTemplate.py')
inBedTmpl = TemplateFile('templates/Gui/TreeViews/InBedManagementTreeViewTemplate.py')
inFieldBookTmpl = TemplateFile('templates/Gui/TreeViews/InFieldBookManagementTreeViewTemplate.py')
self.createTreeViewClasses(module, [['GraphicPrimitive', globalTmpl, ],
['Project', globalTmpl, ],
['LengthUnit', globalTmpl, ],
['FieldBook', globalTmpl, ],
['Profile', inProjectTmpl, ],
['Bed', inProfileTmpl, ],
['Color', inProjectTmpl, ],
['OutcropType', inProjectTmpl, ],
['Facies', inProjectTmpl, ],
['Lithology', inProjectTmpl, ],
['TectonicUnitType', inProjectTmpl, ],
['TectonicUnit', inProjectTmpl, ],
['GrainSizeType', inProjectTmpl, ],
['GrainSize', inProjectTmpl, ],
['LithologicUnitType', inProjectTmpl, ],
['LithologicUnit', inProjectTmpl, ],
['SedimentologicUnitType', inProjectTmpl, ],
['SedimentologicUnit', inProjectTmpl, ],
['StratigraphicUnitType', inProjectTmpl, ],
['StratigraphicUnit', inProjectTmpl, ],
['Fossil', inProjectTmpl, ],
['BeddingType', inProjectTmpl, ],
['BoundaryType', inProjectTmpl, ],
['SedimentStructure', inProjectTmpl, ],
['CustomSymbol', inProjectTmpl, ],
['BeddingType', inProjectTmpl, ],
['ProfileColumn', inProfileTmpl, ],
['ProfileColumnInProfile', inProfileTmpl, ],
['ColorInBed', inBedTmpl, ],
['OutcropTypeInBed', inBedTmpl, ],
['FaciesInBed', inBedTmpl, ],
['LithologyInBed', inBedTmpl, ],
['TectonicUnitInBed', inBedTmpl, ],
['GrainSizeInBed', inBedTmpl, ],
['LithologicUnitInBed', inBedTmpl, ],
['SedimentologicUnitInBed', inBedTmpl, ],
['StratigraphicUnitInBed', inBedTmpl, ],
['FossilInBed', inBedTmpl, ],
['BeddingTypeInBed', inBedTmpl, ],
['BoundaryTypeInBed', inBedTmpl, ],
['SedimentStructureInBed', inBedTmpl, ],
['CustomSymbolInBed', inBedTmpl, ],
['BeddingTypeInBed', inBedTmpl, ],
['FieldBookEntry', inFieldBookTmpl, ],])
def setupComboBoxModule(self, module):
globalTmpl = TemplateFile('templates/Gui/ComboBoxes/GlobalComboBoxTemplate.py')
inProjectTmpl = TemplateFile('templates/Gui/ComboBoxes/InProjectComboBoxTemplate.py')
inProfileTmpl = TemplateFile('templates/Gui/ComboBoxes/InProfileComboBoxTemplate.py')
inBedTmpl = TemplateFile('templates/Gui/ComboBoxes/InBedComboBoxTemplate.py')
inFieldBookTmpl = TemplateFile('templates/Gui/ComboBoxes/InFieldBookComboBoxTemplate.py')
self.createComboBoxClasses(module, [['GraphicPrimitive', globalTmpl, ],
['Project', globalTmpl, ],
['LengthUnit', globalTmpl, ],
['FieldBook', globalTmpl, ],
['Profile', inProjectTmpl, ],
['Bed', inProfileTmpl, ],
['Color', inProjectTmpl, ],
['OutcropType', inProjectTmpl, ],
['Facies', inProjectTmpl, ],
['Lithology', inProjectTmpl, ],
['TectonicUnitType', inProjectTmpl, ],
['TectonicUnit', inProjectTmpl, ],
['GrainSizeType', inProjectTmpl, ],
['GrainSize', inProjectTmpl, ],
['LithologicUnitType', inProjectTmpl, ],
['LithologicUnit', inProjectTmpl, ],
['SedimentologicUnitType', inProjectTmpl, ],
['SedimentologicUnit', inProjectTmpl, ],
['StratigraphicUnitType', inProjectTmpl, ],
['StratigraphicUnit', inProjectTmpl, ],
['Fossil', inProjectTmpl, ],
['BeddingType', inProjectTmpl, ],
['BoundaryType', inProjectTmpl, ],
['SedimentStructure', inProjectTmpl, ],
['CustomSymbol', inProjectTmpl, ],
['BeddingType', inProjectTmpl, ],
['ProfileColumn', inProfileTmpl, ],
['ProfileColumnInProfile', inProfileTmpl, ],
['ColorInBed', inBedTmpl, ],
['OutcropTypeInBed', inBedTmpl, ],
['FaciesInBed', inBedTmpl, ],
['LithologyInBed', inBedTmpl, ],
['TectonicUnitInBed', inBedTmpl, ],
['GrainSizeInBed', inBedTmpl, ],
['LithologicUnitInBed', inBedTmpl, ],
['SedimentologicUnitInBed', inBedTmpl, ],
['StratigraphicUnitInBed', inBedTmpl, ],
['FossilInBed', inBedTmpl, ],
['BeddingTypeInBed', inBedTmpl, ],
['BoundaryTypeInBed', inBedTmpl, ],
['SedimentStructureInBed', inBedTmpl, ],
['CustomSymbolInBed', inBedTmpl, ],
['BeddingTypeInBed', inBedTmpl, ],
['FieldBookEntry', inFieldBookTmpl, ],])
def setupFinderModule(self, module):
globalTmpl = TemplateFile('templates/Logic/Finders/GlobalFinderTemplate.py')
inProjectTmpl = TemplateFile('templates/Logic/Finders/InProjectFinderTemplate.py')
inProfileTmpl = TemplateFile('templates/Logic/Finders/InProfileFinderTemplate.py')
inBedTmpl = TemplateFile('templates/Logic/Finders/InBedFinderTemplate.py')
inFieldBookTmpl = TemplateFile('templates/Logic/Finders/InFieldBookFinderTemplate.py')
self.createFinderClasses(module, [['GraphicPrimitive', globalTmpl, ],
['Project', globalTmpl, ],
['LengthUnit', globalTmpl, ],
['Profile', inProjectTmpl, ],
['ProfileColumn', inProfileTmpl, ],
['ProfileColumnInProfile', inProfileTmpl, ],
['Bed', inProfileTmpl, ],
['Color', inProjectTmpl, ],
['ColorInBed', inBedTmpl, ],
['OutcropType', inProjectTmpl, ],
['OutcropTypeInBed', inBedTmpl, ],
['Facies', inProjectTmpl, ],
['FaciesInBed', inBedTmpl, ],
['Lithology', inProjectTmpl, ],
['LithologyInBed', inBedTmpl, ],
['TectonicUnitType', inProjectTmpl, ],
['TectonicUnit', inProjectTmpl, ],
['TectonicUnitInBed', inBedTmpl, ],
['GrainSizeType', inProjectTmpl, ],
['GrainSize', inProjectTmpl, ],
['GrainSizeInBed', inBedTmpl, ],
['LithologicUnitType', inProjectTmpl, ],
['LithologicUnit', inProjectTmpl, ],
['LithologicUnitInBed', inBedTmpl, ],
['SedimentologicUnitType', inProjectTmpl, ],
['SedimentologicUnit', inProjectTmpl, ],
['SedimentologicUnitInBed', inBedTmpl, ],
['StratigraphicUnitType', inProjectTmpl, ],
['StratigraphicUnit', inProjectTmpl, ],
['StratigraphicUnitInBed', inBedTmpl, ],
['Fossil', inProjectTmpl, ],
['FossilInBed', inBedTmpl, ],
['BeddingType', inProjectTmpl, ],
['BeddingTypeInBed', inBedTmpl, ],
['BoundaryType', inProjectTmpl, ],
['BoundaryTypeInBed', inBedTmpl, ],
['SedimentStructure', inProjectTmpl, ],
['SedimentStructureInBed', inBedTmpl, ],
['CustomSymbol', inProjectTmpl, ],
['CustomSymbolInBed', inBedTmpl, ],
['BeddingType', inProjectTmpl, ],
['BeddingTypeInBed', inBedTmpl, ],
['FieldBook', globalTmpl, ],
['FieldBookEntry', inFieldBookTmpl, ],])
def setupDataClasses(self, module):
template = TemplateFile('templates/Logic/Persistance/Entity.py')
classEntity = module.createClass('Entity', None, None, template=template)
self.setupLengthUnitClass(module, classEntity, self.database.schema('data').table('length_units'), template)
self.setupGraphicPrimitiveClass(module, classEntity, self.database.schema('data').table('graphic_primitives'), template)
self.setupProjectClass(module, classEntity, self.database.schema('data').table('projects'), template)
self.setupTectonicUnitTypeClass(module, classEntity, self.database.schema('data').table('tectonic_unit_types'), template)
self.setupTectonicUnitClass(module, classEntity, self.database.schema('data').table('tectonic_units'), template)
self.setupTectonicUnitInBedClass(module, classEntity, self.database.schema('data').table('tectonic_units_in_beds'), template)
self.setupGrainSizeTypeClass(module, classEntity, self.database.schema('data').table('grain_size_types'), template)
self.setupGrainSizeClass(module, classEntity, self.database.schema('data').table('grain_sizes'), template)
self.setupGrainSizeInBedClass(module, classEntity, self.database.schema('data').table('grain_sizes_in_beds'), template)
self.setupLithologicUnitTypeClass(module, classEntity, self.database.schema('data').table('lithologic_unit_types'), template)
self.setupLithologicUnitClass(module, classEntity, self.database.schema('data').table('lithologic_units'), template)
self.setupLithologicUnitInBedClass(module, classEntity, self.database.schema('data').table('lithologic_units_in_beds'), template)
self.setupSedimentologicUnitTypeClass(module, classEntity, self.database.schema('data').table('sedimentologic_unit_types'), template)
self.setupSedimentologicUnitClass(module, classEntity, self.database.schema('data').table('sedimentologic_units'), template)
self.setupSedimentologicUnitInBedClass(module, classEntity, self.database.schema('data').table('sedimentologic_units_in_beds'), template)
self.setupStratigraphicUnitTypeClass(module, classEntity, self.database.schema('data').table('stratigraphic_unit_types'), template)
self.setupStratigraphicUnitClass(module, classEntity, self.database.schema('data').table('stratigraphic_units'), template)
self.setupStratigraphicUnitInBedClass(module, classEntity, self.database.schema('data').table('stratigraphic_units_in_beds'), template)
self.setupColorClass(module, classEntity, self.database.schema('data').table('colors'), template)
self.setupColorInBedClass(module, classEntity, self.database.schema('data').table('colors_in_beds'), template)
self.setupOutcropTypeClass(module, classEntity, self.database.schema('data').table('outcrop_types'), template)
self.setupOutcropTypeInBedClass(module, classEntity, self.database.schema('data').table('outcrop_types_in_beds'), template)
self.setupFaciesClass(module, classEntity, self.database.schema('data').table('facies'), template)
self.setupFaciesInBedClass(module, classEntity, self.database.schema('data').table('facies_in_beds'), template)
self.setupLithologyClass(module, classEntity, self.database.schema('data').table('lithologies'), template)
self.setupLithologyInBedClass(module, classEntity, self.database.schema('data').table('lithologies_in_beds'), template)
self.setupFossilClass(module, classEntity, self.database.schema('data').table('fossils'), template)
self.setupFossilInBedClass(module, classEntity, self.database.schema('data').table('fossils_in_beds'), template)
self.setupBeddingTypeClass(module, classEntity, self.database.schema('data').table('bedding_types'), template)
self.setupBeddingTypeInBedClass(module, classEntity, self.database.schema('data').table('bedding_types_in_beds'), template)
self.setupBoundaryTypeClass(module, classEntity, self.database.schema('data').table('boundary_types'), template)
self.setupBoundaryTypeInBedClass(module, classEntity, self.database.schema('data').table('boundary_types_in_beds'), template)
self.setupSedimentStructureClass(module, classEntity, self.database.schema('data').table('sediment_structures'), template)
self.setupSedimentStructureInBedClass(module, classEntity, self.database.schema('data').table('sediment_structures_in_beds'), template)
self.setupCustomSymbolClass(module, classEntity, self.database.schema('data').table('custom_symbols'), template)
self.setupCustomSymbolInBedClass(module, classEntity, self.database.schema('data').table('custom_symbols_in_beds'), template)
self.setupProfileClass(module, classEntity, self.database.schema('data').table('profiles'), template)
self.setupFieldBookClass(module, classEntity, self.database.schema('data').table('field_books'), template)
self.setupFieldBookEntryClass(module, classEntity, self.database.schema('data').table('field_book_entries'), template)
self.setupBedClass(module, classEntity, self.database.schema('data').table('beds'), template)
self.setupProfileColumnClass(module, classEntity, self.database.schema('data').table('profile_columns'), template)
self.setupProfileColumnInProfileClass(module, classEntity, self.database.schema('data').table('profile_columns_in_profiles'), template)
def setupProfileColumnClass(self, module, baseClass, table, template):
c = module.createClass('ProfileColumn', baseClass, table, createIdField=True, createNameField=True, createDescriptionField=True, template=template)
def setupProfileColumnInProfileClass(self, module, baseClass, table, template):
c = module.createClass('ProfileColumnInProfile', baseClass, table, template=template)
c.createField(table.column('profile_id'), 'profile', backrefName='profileColumns', relationClass='Profile', cascade='all')
c.createField(table.column('profile_column_id'), 'profileColumn', backrefName='profile', relationClass='ProfileColumn', cascade='all')
c.createField(table.column('position'), 'position')
c.addSortOrder(c.field('position'), ascending=True)
def setupGraphicPrimitiveClass(self, module, baseClass, table, template):
c = module.createClass('GraphicPrimitive', baseClass, table, createIdField=True, createNameField=True, createDescriptionField=True, template=template)
c.createField(table.column('svg_data'), 'svgData')
c.createField(table.column('original_path'), 'originalPath')
def setupTectonicUnitTypeClass(self, module, baseClass, table, template):
c = module.createClass('TectonicUnitType', baseClass, table, createIdField=True, createNameField=True, createDescriptionField=True, template=template)
c.createField(table.column('project_id'), 'project', backrefName='tectonic_unit_types', relationClass='Project', cascade='all')
c.addSortOrder(c.field('name'), ascending=True)
def setupTectonicUnitClass(self, module, baseClass, table, template):
c = module.createClass('TectonicUnit', baseClass, table, createIdField=True, createNameField=True, createDescriptionField=True, template=template)
c.createField(table.column('project_id'), 'project', backrefName='tectonic_units', relationClass='Project', cascade='all')
c.createField(table.column('tectonic_unit_type_id'), 'tectonicUnitType', backrefName='tectonicUnits', relationClass='TectonicUnitType', cascade='all')
c.createField(table.column('graphic_primitive_id'), 'graphicPrimitive', backrefName='tectonicUnits', relationClass='GraphicPrimitive', cascade='all')
c.addSortOrder(c.field('name'), ascending=True)
def setupTectonicUnitInBedClass(self, module, baseClass, table, template):
c = module.createClass('TectonicUnitInBed', baseClass, table, createIdField=True, createDescriptionField=True, template=template)
c.createField(table.column('base'), 'base')
c.createField(table.column('top'), 'top')
c.createField(table.column('bed_id'), 'bed', backrefName='tectonic_units', relationClass='Bed', cascade='all')
c.createField(table.column('tectonic_unit_id'), 'tectonic_unit', backrefName='beds', relationClass='TectonicUnit', cascade='all')
def setupGrainSizeTypeClass(self, module, baseClass, table, template):
c = module.createClass('GrainSizeType', baseClass, table, createIdField=True, createNameField=True, createDescriptionField=True, template=template)
c.createField(table.column('project_id'), 'project', backrefName='grain_size_types', relationClass='Project', cascade='all')
c.addSortOrder(c.field('name'), ascending=True)
def setupGrainSizeClass(self, module, baseClass, table, template):
c = module.createClass('GrainSize', baseClass, table, createIdField=True, createNameField=True, createDescriptionField=True, template=template)
c.createField(table.column('project_id'), 'project', backrefName='grain_sizes', relationClass='Project', cascade='all')
c.createField(table.column('grain_size_type_id'), 'grainSizeType', backrefName='grainSizes', relationClass='GrainSizeType', cascade='all')
c.createField(table.column('graphic_primitive_id'), 'graphicPrimitive', backrefName='grainSizes', relationClass='GraphicPrimitive', cascade='all')
c.createField(table.column('percent_from_max'), 'percentFromMax')
c.addSortOrder(c.field('name'), ascending=True)
def setupGrainSizeInBedClass(self, module, baseClass, table, template):
c = module.createClass('GrainSizeInBed', baseClass, table, createIdField=True, createDescriptionField=True, template=template)
c.createField(table.column('base'), 'base')
c.createField(table.column('top'), 'top')
c.createField(table.column('bed_id'), 'bed', backrefName='grain_sizes', relationClass='Bed', cascade='all')
c.createField(table.column('grain_size_id'), 'grain_size', backrefName='beds', relationClass='GrainSize', cascade='all')
def setupSedimentologicUnitTypeClass(self, module, baseClass, table, template):
c = module.createClass('SedimentologicUnitType', baseClass, table, createIdField=True, createNameField=True, createDescriptionField=True, template=template)
c.createField(table.column('project_id'), 'project', backrefName='sedimentologic_unit_types', relationClass='Project', cascade='all')
c.addSortOrder(c.field('name'), ascending=True)
def setupSedimentologicUnitClass(self, module, baseClass, table, template):
c = module.createClass('SedimentologicUnit', baseClass, table, createIdField=True, createNameField=True, createDescriptionField=True, template=template)
c.createField(table.column('project_id'), 'project', backrefName='sedimentologic_units', relationClass='Project', cascade='all')
c.createField(table.column('sedimentologic_unit_type_id'), 'sedimentologicUnitType', backrefName='sedimentologicUnits', relationClass='SedimentologicUnitType', cascade='all')
c.createField(table.column('graphic_primitive_id'), 'graphicPrimitive', backrefName='sedimentologicUnits', relationClass='GraphicPrimitive', cascade='all')
c.addSortOrder(c.field('name'), ascending=True)
def setupSedimentologicUnitInBedClass(self, module, baseClass, table, template):
c = module.createClass('SedimentologicUnitInBed', baseClass, table, createIdField=True, createDescriptionField=True, template=template)
c.createField(table.column('base'), 'base')
c.createField(table.column('top'), 'top')
c.createField(table.column('bed_id'), 'bed', backrefName='sedimentologic_units', relationClass='Bed', cascade='all')
c.createField(table.column('sedimentologic_unit_id'), 'sedimentologic_unit', backrefName='beds', relationClass='SedimentologicUnit', cascade='all')
def setupStratigraphicUnitTypeClass(self, module, baseClass, table, template):
c = module.createClass('StratigraphicUnitType', baseClass, table, createIdField=True, createNameField=True, createDescriptionField=True, template=template)
c.createField(table.column('project_id'), 'project', backrefName='stratigraphic_unit_types', relationClass='Project', cascade='all')
c.addSortOrder(c.field('name'), ascending=True)
def setupStratigraphicUnitClass(self, module, baseClass, table, template):
c = module.createClass('StratigraphicUnit', baseClass, table, createIdField=True, createNameField=True, createDescriptionField=True, template=template)
c.createField(table.column('project_id'), 'project', backrefName='stratigraphic_units', relationClass='Project', cascade='all')
c.createField(table.column('stratigraphic_unit_type_id'), 'stratigraphicUnitType', backrefName='stratigraphicUnits', relationClass='StratigraphicUnitType', cascade='all')
c.createField(table.column('graphic_primitive_id'), 'graphicPrimitive', backrefName='stratigraphicUnits', relationClass='GraphicPrimitive', cascade='all')
c.addSortOrder(c.field('name'), ascending=True)
def setupStratigraphicUnitInBedClass(self, module, baseClass, table, template):
c = module.createClass('StratigraphicUnitInBed', baseClass, table, createIdField=True, createDescriptionField=True, template=template)
c.createField(table.column('base'), 'base')
c.createField(table.column('top'), 'top')
c.createField(table.column('bed_id'), 'bed', backrefName='stratigraphic_units', relationClass='Bed', cascade='all')
c.createField(table.column('stratigraphic_unit_id'), 'stratigraphic_unit', backrefName='beds', relationClass='StratigraphicUnit', cascade='all')
def setupLithologicUnitTypeClass(self, module, baseClass, table, template):
c = module.createClass('LithologicUnitType', baseClass, table, createIdField=True, createNameField=True, createDescriptionField=True, template=template)
c.createField(table.column('project_id'), 'project', backrefName='lithologic_unit_types', relationClass='Project', cascade='all')
c.addSortOrder(c.field('name'), ascending=True)
def setupLithologicUnitClass(self, module, baseClass, table, template):
c = module.createClass('LithologicUnit', baseClass, table, createIdField=True, createNameField=True, createDescriptionField=True, template=template)
c.createField(table.column('project_id'), 'project', backrefName='lithologic_units', relationClass='Project', cascade='all')
c.createField(table.column('lithologic_unit_type_id'), 'lithologicUnitType', backrefName='lithologicUnits', relationClass='LithologicUnitType', cascade='all')
c.createField(table.column('graphic_primitive_id'), 'graphicPrimitive', backrefName='lithologicUnits', relationClass='GraphicPrimitive', cascade='all')
c.addSortOrder(c.field('name'), ascending=True)
def setupLithologicUnitInBedClass(self, module, baseClass, table, template):
c = module.createClass('LithologicUnitInBed', baseClass, table, createIdField=True, createDescriptionField=True, template=template)
c.createField(table.column('base'), 'base')
c.createField(table.column('top'), 'top')
c.createField(table.column('bed_id'), 'bed', backrefName='lithologic_units', relationClass='Bed', cascade='all')
c.createField(table.column('lithologic_unit_id'), 'lithologic_unit', backrefName='beds', relationClass='LithologicUnit', cascade='all')
def setupColorClass(self, module, baseClass, table, template):
c = module.createClass('Color', baseClass, table, createIdField=True, createNameField=True, createDescriptionField=True, template=template)
c.createField(table.column('project_id'), 'project', backrefName='colors', relationClass='Project', cascade='all')
c.createField(table.column('graphic_primitive_id'), 'graphicPrimitive', backrefName='colors', relationClass='GraphicPrimitive', cascade='all')
c.addSortOrder(c.field('name'), ascending=True)
def setupColorInBedClass(self, module, baseClass, table, template):
c = module.createClass('ColorInBed', baseClass, table, createIdField=True, createDescriptionField=True, template=template)
c.createField(table.column('base'), 'base')
c.createField(table.column('top'), 'top')
c.createField(table.column('bed_id'), 'bed', backrefName='colors', relationClass='Bed', cascade='all')
c.createField(table.column('color_id'), 'color', backrefName='beds', relationClass='Color', cascade='all')
def setupOutcropTypeClass(self, module, baseClass, table, template):
c = module.createClass('OutcropType', baseClass, table, createIdField=True, createNameField=True, createDescriptionField=True, template=template)
c.createField(table.column('project_id'), 'project', backrefName='outcrop_types', relationClass='Project', cascade='all')
c.createField(table.column('graphic_primitive_id'), 'graphicPrimitive', backrefName='outcrop_types', relationClass='GraphicPrimitive', cascade='all')
c.addSortOrder(c.field('name'), ascending=True)
def setupOutcropTypeInBedClass(self, module, baseClass, table, template):
c = module.createClass('OutcropTypeInBed', baseClass, table, createIdField=True, createDescriptionField=True, template=template)
c.createField(table.column('base'), 'base')
c.createField(table.column('top'), 'top')
c.createField(table.column('bed_id'), 'bed', backrefName='outcrop_types', relationClass='Bed', cascade='all')
c.createField(table.column('outcrop_type_id'), 'outcrop_type', backrefName='beds', relationClass='OutcropType', cascade='all')
def setupFaciesClass(self, module, baseClass, table, template):
c = module.createClass('Facies', baseClass, table, createIdField=True, createNameField=True, createDescriptionField=True, template=template)
c.createField(table.column('project_id'), 'project', backrefName='facies', relationClass='Project', cascade='all')
c.createField(table.column('graphic_primitive_id'), 'graphicPrimitive', backrefName='facies', relationClass='GraphicPrimitive', cascade='all')
c.addSortOrder(c.field('name'), ascending=True)
def setupFaciesInBedClass(self, module, baseClass, table, template):
c = module.createClass('FaciesInBed', baseClass, table, createIdField=True, createDescriptionField=True, template=template)
c.createField(table.column('base'), 'base')
c.createField(table.column('top'), 'top')
c.createField(table.column('bed_id'), 'bed', backrefName='facies', relationClass='Bed', cascade='all')
c.createField(table.column('facies_id'), 'facies', backrefName='beds', relationClass='Facies', cascade='all')
def setupLithologyClass(self, module, baseClass, table, template):
c = module.createClass('Lithology', baseClass, table, createIdField=True, createNameField=True, createDescriptionField=True, template=template)
c.createField(table.column('project_id'), 'project', backrefName='lithologies', relationClass='Project', cascade='all')
c.createField(table.column('graphic_primitive_id'), 'graphicPrimitive', backrefName='lithologies', relationClass='GraphicPrimitive', cascade='all')
c.addSortOrder(c.field('name'), ascending=True)
def setupLithologyInBedClass(self, module, baseClass, table, template):
c = module.createClass('LithologyInBed', baseClass, table, createIdField=True, createDescriptionField=True, template=template)
c.createField(table.column('base'), 'base')
c.createField(table.column('top'), 'top')
c.createField(table.column('bed_id'), 'bed', backrefName='lithologies', relationClass='Bed', cascade='all')
c.createField(table.column('lithology_id'), 'lithology', backrefName='beds', relationClass='Lithology', cascade='all')
def setupFossilClass(self, module, baseClass, table, template):
c = module.createClass('Fossil', baseClass, table, createIdField=True, createNameField=True, createDescriptionField=True, template=template)
c.createField(table.column('project_id'), 'project', backrefName='fossils', relationClass='Project', cascade='all')
c.createField(table.column('graphic_primitive_id'), 'graphicPrimitive', backrefName='fossils', relationClass='GraphicPrimitive', cascade='all')
c.addSortOrder(c.field('name'), ascending=True)
def setupFossilInBedClass(self, module, baseClass, table, template):
c = module.createClass('FossilInBed', baseClass, table, createIdField=True, createDescriptionField=True, template=template)
c.createField(table.column('base'), 'base')
c.createField(table.column('top'), 'top')
c.createField(table.column('bed_id'), 'bed', backrefName='fossils', relationClass='Bed', cascade='all')
c.createField(table.column('fossil_id'), 'fossil', backrefName='beds', relationClass='Fossil', cascade='all')
def setupBeddingTypeClass(self, module, baseClass, table, template):
c = module.createClass('BeddingType', baseClass, table, createIdField=True, createNameField=True, createDescriptionField=True, template=template)
c.createField(table.column('project_id'), 'project', backrefName='beddingTypes', relationClass='Project', cascade='all')
c.createField(table.column('graphic_primitive_id'), 'graphicPrimitive', backrefName='beddingTypes', relationClass='GraphicPrimitive', cascade='all')
c.addSortOrder(c.field('name'), ascending=True)
def setupBeddingTypeInBedClass(self, module, baseClass, table, template):
c = module.createClass('BeddingTypeInBed', baseClass, table, createIdField=True, createDescriptionField=True, template=template)
c.createField(table.column('base'), 'base')
c.createField(table.column('top'), 'top')
c.createField(table.column('bed_id'), 'bed', backrefName='colors', relationClass='Bed', cascade='all')
c.createField(table.column('bedding_type_id'), 'beddingType', backrefName='beds', relationClass='BeddingType', cascade='all')
def setupBoundaryTypeClass(self, module, baseClass, table, template):
c = module.createClass('BoundaryType', baseClass, table, createIdField=True, createNameField=True, createDescriptionField=True, template=template)
c.createField(table.column('project_id'), 'project', backrefName='boundaryTypes', relationClass='Project', cascade='all')
c.createField(table.column('graphic_primitive_id'), 'graphicPrimitive', backrefName='boundaryTypes', relationClass='GraphicPrimitive', cascade='all')
c.addSortOrder(c.field('name'), ascending=True)
def setupBoundaryTypeInBedClass(self, module, baseClass, table, template):
c = module.createClass('BoundaryTypeInBed', baseClass, table, createIdField=True, createDescriptionField=True, template=template)
c.createField(table.column('base'), 'base')
c.createField(table.column('top'), 'top')
c.createField(table.column('bed_id'), 'bed', backrefName='colors', relationClass='Bed', cascade='all')
c.createField(table.column('boundary_type_id'), 'boundaryType', backrefName='beds', relationClass='BoundaryType', cascade='all')
def setupSedimentStructureClass(self, module, baseClass, table, template):
c = module.createClass('SedimentStructure', baseClass, table, createIdField=True, createNameField=True, createDescriptionField=True, template=template)
c.createField(table.column('project_id'), 'project', backrefName='sedimentStructures', relationClass='Project', cascade='all')
c.createField(table.column('graphic_primitive_id'), 'graphicPrimitive', backrefName='sedimentStructures', relationClass='GraphicPrimitive', cascade='all')
c.addSortOrder(c.field('name'), ascending=True)
def setupSedimentStructureInBedClass(self, module, baseClass, table, template):
c = module.createClass('SedimentStructureInBed', baseClass, table, createIdField=True, createDescriptionField=True, template=template)
c.createField(table.column('base'), 'base')
c.createField(table.column('top'), 'top')
c.createField(table.column('bed_id'), 'bed', backrefName='colors', relationClass='Bed', cascade='all')
c.createField(table.column('sediment_structure_id'), 'sedimentStructure', backrefName='beds', relationClass='SedimentStructure', cascade='all')
def setupCustomSymbolClass(self, module, baseClass, table, template):
c = module.createClass('CustomSymbol', baseClass, table, createIdField=True, createNameField=True, createDescriptionField=True, template=template)
c.createField(table.column('project_id'), 'project', backrefName='customSymbols', relationClass='Project', cascade='all')
c.createField(table.column('graphic_primitive_id'), 'graphicPrimitive', backrefName='customSymbols', relationClass='GraphicPrimitive', cascade='all')
c.addSortOrder(c.field('name'), ascending=True)
def setupCustomSymbolInBedClass(self, module, baseClass, table, template):
c = module.createClass('CustomSymbolInBed', baseClass, table, createIdField=True, createDescriptionField=True, template=template)
c.createField(table.column('base'), 'base')
c.createField(table.column('top'), 'top')
c.createField(table.column('bed_id'), 'bed', backrefName='colors', relationClass='Bed', cascade='all')
c.createField(table.column('custom_symbol_id'), 'customSymbol', backrefName='beds', relationClass='CustomSymbol', cascade='all')
def setupLengthUnitClass(self, module, baseClass, table, template):
c = module.createClass('LengthUnit', baseClass, table, createIdField=True, createNameField=True, createDescriptionField=True, template=template)
c.createField(table.column('micro_metres'), 'microMetres')
def setupBedClass(self, module, baseClass, table, template):
c = module.createClass('Bed', baseClass, table, template=template)
c.createField(table.column('id'), 'id')
c.createField(table.column('position'), 'position')
c.createField(table.column('bed_number'), 'bedNumber')
c.createField(table.column('profile_id'), 'profile', backrefName='beds', relationClass='Profile', cascade='all')
c.createField(table.column('height'), 'height')
c.createField(table.column('height_length_unit_id'), 'heightLenghtUnit', relationClass='LengthUnit', cascade='all')
def setupFieldBookEntryClass(self, module, baseClass, table, template):
c = module.createClass('FieldBookEntry', baseClass, table, template=template)
c.createField(table.column('id'), 'id')
c.createField(table.column('field_book_id'), 'fieldBook', backrefName='entries', relationClass='FieldBook', cascade='all')
def setupFieldBookClass(self, module, baseClass, table, template):
c = module.createClass('FieldBook', baseClass, table, createIdField=True, createDescriptionField=True, template=template)
c.createField(table.column('title'), 'title')
c.addSortOrder(c.field('title'), ascending=True)
def setupColorClass(self, module, baseClass, table, template):
c = module.createClass('Color', baseClass, table, createIdField=True, createNameField=True, createDescriptionField=True, template=template)
c.createField(table.column('project_id'), 'project', backrefName='colors', relationClass='Project', cascade='all')
c.createField(table.column('graphic_primitive_id'), 'graphicPrimitive', backrefName='colors', relationClass='GraphicPrimitive', cascade='all')
c.addSortOrder(c.field('name'), ascending=True)
def setupProjectClass(self, module, baseClass, table, template):
c = module.createClass('Project', baseClass, table, createIdField=True, createNameField=True, createDescriptionField=True, template=template)
c.addSortOrder(c.field('name'), ascending=True)
def setupProfileClass(self, module, baseClass, table, template):
c = module.createClass('Profile', baseClass, table, createIdField=True, createNameField=True, createDescriptionField=True, template=template)
c.createField(table.column('project_id'), 'project', backrefName='profiles', relationClass='Project', cascade='all')
c.addSortOrder(c.field('name'), ascending=True)
|
'''
Created on 2020-09-21 11:55:42
Last modified on 2020-09-22 07:57:56
@author: L. F. Pereira (lfpereira@fe.up.pt))
Main goal
---------
Show how the supercompressible example can be coded in a non object-oriented
strategy.
'''
# imports
# abaqus
from caeModules import * # allow noGui
from abaqus import mdb, backwardCompatibility
from abaqusConstants import (THREE_D, DEFORMABLE_BODY, ON, OFF, STANDARD,
WHOLE_SURFACE, KINEMATIC, STANDALONE,
MIDDLE_SURFACE, FROM_SECTION,
CARTESIAN, IMPRINT, CONSTANT, BEFORE_ANALYSIS,
N1_COSINES, B31, FINER, ANALYTIC_RIGID_SURFACE,
NODAL)
import mesh
# standard library
import os
import sys
import re
# third-party
import numpy as np
# function definition
# TODO: create general supercompressible with if
def lin_buckle(name, job_name, n_longerons, bottom_diameter, top_diameter,
pitch, young_modulus, shear_modulus, cross_section_props,
twist_angle=0., transition_length_ratio=1., n_storeys=1,
power=1., include_name='include_mesh', **kwargs):
# create model
model = mdb.Model(name=name)
backwardCompatibility.setValues(reportDeprecated=False)
if 'Model-1' in mdb.models.keys():
del mdb.models['Model-1']
# meshing
cone_slope = (bottom_diameter - top_diameter) / bottom_diameter
_meshing(model, n_longerons, bottom_diameter, pitch, cross_section_props,
young_modulus, shear_modulus, cone_slope, include_name=include_name)
# create linear buckling inp
with open('{}.inp'.format(job_name), 'w') as File:
File.write('** Include file with mesh of structure:\n')
File.write('*INCLUDE, INPUT={}.inp\n'.format(include_name))
File.write('** \n')
File.write('** STEP: Step-1\n')
File.write('** \n')
File.write('*Step, name=Step-1\n')
File.write('*Buckle, eigensolver=lanczos\n')
File.write('20, 0., , , \n')
File.write('** \n')
File.write('** BOUNDARY CONDITIONS\n')
File.write('** \n')
File.write('** Name: BC_Zminus Type: Displacement/Rotation\n')
File.write('*Boundary\n')
File.write('RP_ZmYmXm, 1, 6\n')
File.write('** \n')
File.write('** LOADS\n')
File.write('** \n')
File.write('** Name: Applied_Moment Type: Moment\n')
File.write('*Cload\n')
File.write('RP_ZpYmXm, 3, -1.00\n')
File.write('** \n')
File.write('*Node File\n')
File.write('U \n')
File.write('** \n')
File.write('*EL PRINT,FREQUENCY=1\n')
File.write('*NODE PRINT,FREQUENCY=1\n')
File.write('*MODAL FILE\n')
File.write('*OUTPUT,FIELD,VAR=PRESELECT\n')
File.write('*OUTPUT,HISTORY,FREQUENCY=1\n')
File.write('*MODAL OUTPUT\n')
File.write('*End Step\n')
def post_process_lin_buckle(odb):
# initialization
match_number = re.compile('-?\ *[0-9]+\.?[0-9]*(?:[Ee]\ *[-+]?\ *[0-9]+)?')
# Determine the number of steps in the output database.
mySteps = odb.steps
stepKey = mySteps.keys()[0]
step = mySteps[stepKey]
numFrames = len(step.frames)
#
maxDisp = np.zeros((numFrames))
RP_Zplus_nSet = odb.rootAssembly.nodeSets['RP_ZPYMXM']
entireSTRUCTURE_nSet = odb.rootAssembly.nodeSets[' ALL NODES']
#
# Read critical buckling load
P_crit = np.zeros(numFrames - 1)
coilable = np.zeros(numFrames - 1)
for iFrame_step in range(1, numFrames):
# Read critical buckling load
MODELframe = odb.steps[mySteps.keys()[0]].frames[iFrame_step]
eigenValue = [float(x) for x in re.findall(match_number, MODELframe.description)]
# eigenValue[1] is the eigenValue for Mode = eigenValue[0]
P_crit[iFrame_step - 1] = eigenValue[1]
# Now check if this is a coilable mode
UR_Field = MODELframe.fieldOutputs['UR']
UR_SubField = UR_Field.getSubset(region=RP_Zplus_nSet, position=NODAL)
UR_Mode1_RP_ZpYmXm = UR_SubField.values[0].data
U_Field = MODELframe.fieldOutputs['U']
U_SubField = U_Field.getSubset(region=RP_Zplus_nSet, position=NODAL)
U_Mode1_RP_ZpYmXm = U_SubField.values[0].data
if (abs(UR_Mode1_RP_ZpYmXm[2]) > 1.0e-4) and (abs(U_Mode1_RP_ZpYmXm[0]) < 1.0e-4) and (abs(U_Mode1_RP_ZpYmXm[1]) < 1.0e-4):
coilable[iFrame_step - 1] = 1
else:
coilable[iFrame_step - 1] = 0
#
UR_Field = MODELframe.fieldOutputs['UR']
UR_SubField = UR_Field.getSubset(region=entireSTRUCTURE_nSet, position=NODAL)
#
if isinstance(UR_SubField.values[0].data, float):
# Then variable is a scalar
max_UR = np.zeros((numFrames))
else:
# Variable is an array
max_UR = np.zeros((numFrames, len(UR_SubField.values[0].data)))
for iFrame_step in range(numFrames):
# Read frame
MODELframe = odb.steps[mySteps.keys()[0]].frames[iFrame_step]
# Variable: UR
UR_Field = MODELframe.fieldOutputs['UR']
UR_SubField = UR_Field.getSubset(region=entireSTRUCTURE_nSet, position=NODAL)
#
if isinstance(UR_SubField.values[0].data, float):
# Then variable is a scalar:
for strainValue in UR_SubField.values:
if abs(strainValue.data) > abs(max_UR[iFrame_step]):
max_UR[iFrame_step] = abs(strainValue.data[0])
#
else:
# Variable is an array:
for strainValue in UR_SubField.values:
for j in range(0, len(UR_SubField.values[0].data)):
if abs(strainValue.data[j]) > abs(max_UR[iFrame_step, j]):
max_UR[iFrame_step, j] = abs(strainValue.data[j])
max_UR[iFrame_step, j] = abs(strainValue.data[j])
maxDisp[iFrame_step] = max(max_UR[iFrame_step, :])
STRUCTURE_variables = {'loads': P_crit, 'max_disps': maxDisp,
'coilable': coilable}
return STRUCTURE_variables
def riks(job_name, imperfection, previous_model_results,
include_name='include_mesh', **kwargs):
'''
Notes
-----
1. Assumes linear buckling analyses was run previously.
'''
# get .fil
for name in os.listdir('.'):
if name.endswith('.fil'):
fil_filename = name.split('.')[0]
break
# create riks inp
with open('{}.inp'.format(job_name), 'w') as File:
# if previous_model_results['coilable'][0] == 0:
# sys.exit() # do not bother running the RIKS analysis because the material will not coil...
# #
File.write('** Include file with mesh of structure:\n')
File.write('*INCLUDE, INPUT={}.inp\n'.format(include_name))
File.write('** \n')
File.write('** INTERACTION PROPERTIES\n')
File.write('** \n')
File.write('*SURFACE INTERACTION,NAME=IMP_TARG\n')
File.write('1.,\n')
File.write('*Surface Behavior, no separation, pressure-overclosure=HARD\n')
File.write('***Surface Interaction, name=IntProp-1\n')
File.write('** \n')
File.write('** INTERACTIONS\n')
File.write('** \n')
File.write('***CONTACT PAIR,INTERACTION=IMP_TARG\n')
File.write('**longerons-1-1.all_longerons_surface, AnalyticSurf-1-1.rigid_support\n')
File.write('** Interaction: Int-1\n')
File.write('*Contact Pair, interaction=IMP_TARG, type=SURFACE TO SURFACE, no thickness\n')
File.write('longerons-1-1.all_longerons_surface, AnalyticSurf-1-1.rigid_support\n')
File.write('**\n')
File.write('** Seed an imperfection:\n')
File.write('*IMPERFECTION, FILE={}, STEP=1\n'.format(fil_filename))
mode_amplitude = imperfection / previous_model_results['max_disps'][1]
File.write('1, ' + str(mode_amplitude) + '\n')
File.write('** \n')
File.write('** STEP: Step-1\n')
File.write('** \n')
File.write('*Step, name=Step-RIKS, nlgeom=YES, inc=400\n')
File.write('*Static, riks\n')
File.write('5.0e-2,1.0,,0.5\n')
File.write('** \n')
File.write('** BOUNDARY CONDITIONS\n')
File.write('** \n')
File.write('** Name: BC_Zminus Type: Displacement/Rotation\n')
File.write('*Boundary\n')
File.write('RP_ZmYmXm, 1, 6\n')
File.write('** Name: BC_Zplus Type: Displacement/Rotation\n')
File.write('*Boundary, type=displacement\n')
File.write('RP_ZpYmXm, 3, 3, -1.23858e+02\n')
File.write('** \n')
File.write('** \n')
File.write('** OUTPUT REQUESTS\n')
File.write('** \n')
File.write('** FIELD OUTPUT: F-Output-1\n')
File.write('** \n')
File.write('*Output, field, variable=PRESELECT, frequency=1\n')
File.write('** \n')
File.write('** HISTORY OUTPUT: H-Output-2\n')
File.write('** \n')
File.write('*Output, history, frequency=1\n')
File.write('*Node Output, nset=RP_ZmYmXm\n')
File.write('RF1, RF2, RF3, RM1, RM2, RM3, U1, U2\n')
File.write('U3, UR1, UR2, UR3\n')
File.write('** \n')
File.write('** HISTORY OUTPUT: H-Output-3\n')
File.write('** \n')
File.write('*Node Output, nset=RP_ZpYmXm\n')
File.write('RF1, RF2, RF3, RM1, RM2, RM3, U1, U2\n')
File.write('U3, UR1, UR2, UR3\n')
File.write('** \n')
File.write('** HISTORY OUTPUT: H-Output-1\n')
File.write('** \n')
File.write('*Output, history, variable=PRESELECT, frequency=1\n')
File.write('*End Step\n')
def post_process_riks(odb):
# Determine the number of steps in the output database.
mySteps = odb.steps
numSteps = len(mySteps)
#
RP_Zplus_nSet = odb.rootAssembly.nodeSets['RP_ZPYMXM']
RP_Zminus_nSet = odb.rootAssembly.nodeSets['RP_ZMYMXM']
#
# For each step, obtain the following:
# 1) The step key.
# 2) The number of frames in the step.
# 3) The increment number of the last frame in the step.
#
totalNumFrames = 0
for iStep in range(numSteps):
stepKey = mySteps.keys()[iStep]
step = mySteps[stepKey]
numFrames = len(step.frames)
totalNumFrames = totalNumFrames + numFrames
#
# Preallocate quantities for speed
MODELframe = odb.steps[mySteps.keys()[0]].frames[0] # Undeformed config.
U_Field = MODELframe.fieldOutputs['U']
U_RP_Zplus_SubField = U_Field.getSubset(region=RP_Zplus_nSet, position=NODAL)
U_RP_Zminus_SubField = U_Field.getSubset(region=RP_Zminus_nSet, position=NODAL)
#
if isinstance(U_RP_Zplus_SubField.values[0].data, float):
# Then variable is a scalar
U_RP_Zplus = np.zeros((totalNumFrames))
U_RP_Zminus = np.zeros((totalNumFrames))
else:
# Variable is an array
U_RP_Zplus = np.zeros((totalNumFrames, len(U_RP_Zplus_SubField.values[0].data)))
U_RP_Zminus = np.zeros((totalNumFrames, len(U_RP_Zminus_SubField.values[0].data)))
UR_Field = MODELframe.fieldOutputs['UR']
UR_RP_Zplus_SubField = UR_Field.getSubset(region=RP_Zplus_nSet, position=NODAL)
UR_RP_Zminus_SubField = UR_Field.getSubset(region=RP_Zminus_nSet, position=NODAL)
#
if isinstance(UR_RP_Zplus_SubField.values[0].data, float):
# Then variable is a scalar
UR_RP_Zplus = np.zeros((totalNumFrames))
UR_RP_Zminus = np.zeros((totalNumFrames))
else:
# Variable is an array
UR_RP_Zplus = np.zeros((totalNumFrames, len(UR_RP_Zplus_SubField.values[0].data)))
UR_RP_Zminus = np.zeros((totalNumFrames, len(UR_RP_Zminus_SubField.values[0].data)))
RF_Field = MODELframe.fieldOutputs['RF']
RF_RP_Zplus_SubField = RF_Field.getSubset(region=RP_Zplus_nSet, position=NODAL)
RF_RP_Zminus_SubField = RF_Field.getSubset(region=RP_Zminus_nSet, position=NODAL)
#
if isinstance(RF_RP_Zplus_SubField.values[0].data, float):
# Then variable is a scalar
RF_RP_Zplus = np.zeros((totalNumFrames))
RF_RP_Zminus = np.zeros((totalNumFrames))
else:
# Variable is an array
RF_RP_Zplus = np.zeros((totalNumFrames, len(RF_RP_Zplus_SubField.values[0].data)))
RF_RP_Zminus = np.zeros((totalNumFrames, len(RF_RP_Zminus_SubField.values[0].data)))
RM_Field = MODELframe.fieldOutputs['RM']
RM_RP_Zplus_SubField = RM_Field.getSubset(region=RP_Zplus_nSet, position=NODAL)
RM_RP_Zminus_SubField = RM_Field.getSubset(region=RP_Zminus_nSet, position=NODAL)
#
if isinstance(RM_RP_Zplus_SubField.values[0].data, float):
# Then variable is a scalar
RM_RP_Zplus = np.zeros((totalNumFrames))
RM_RP_Zminus = np.zeros((totalNumFrames))
else:
# Variable is an array
RM_RP_Zplus = np.zeros((totalNumFrames, len(RM_RP_Zplus_SubField.values[0].data)))
RM_RP_Zminus = np.zeros((totalNumFrames, len(RM_RP_Zminus_SubField.values[0].data)))
# Loop over the Frames of this Step to extract values of variables
numFrames = 0
stepKey = mySteps.keys()[0]
step = mySteps[stepKey]
numFrames = len(step.frames)
#
for iFrame_step in range(0, numFrames):
MODELframe = odb.steps[stepKey].frames[iFrame_step]
#
# Variable: U
U_Field = MODELframe.fieldOutputs['U']
U_RP_Zplus_SubField = U_Field.getSubset(region=RP_Zplus_nSet, position=NODAL)
U_RP_Zminus_SubField = U_Field.getSubset(region=RP_Zminus_nSet, position=NODAL)
#
if isinstance(U_RP_Zplus_SubField.values[0].data, float):
# Then variable is a scalar:
U_RP_Zplus[iFrame_step] = U_RP_Zplus_SubField.values[0].data
U_RP_Zminus[iFrame_step] = U_RP_Zminus_SubField.values[0].data
#
else:
# Variable is an array:
for j in range(0, len(U_RP_Zplus_SubField.values[0].data)):
U_RP_Zplus[iFrame_step, j] = U_RP_Zplus_SubField.values[0].data[j]
U_RP_Zminus[iFrame_step, j] = U_RP_Zminus_SubField.values[0].data[j]
#
#
# Finished saving this variable!
# Variable: UR
UR_Field = MODELframe.fieldOutputs['UR']
UR_RP_Zplus_SubField = UR_Field.getSubset(region=RP_Zplus_nSet, position=NODAL)
UR_RP_Zminus_SubField = UR_Field.getSubset(region=RP_Zminus_nSet, position=NODAL)
#
if isinstance(UR_RP_Zplus_SubField.values[0].data, float):
# Then variable is a scalar:
UR_RP_Zplus[iFrame_step] = UR_RP_Zplus_SubField.values[0].data
UR_RP_Zminus[iFrame_step] = UR_RP_Zminus_SubField.values[0].data
#
else:
# Variable is an array:
for j in range(0, len(UR_RP_Zplus_SubField.values[0].data)):
UR_RP_Zplus[iFrame_step, j] = UR_RP_Zplus_SubField.values[0].data[j]
UR_RP_Zminus[iFrame_step, j] = UR_RP_Zminus_SubField.values[0].data[j]
#
#
# Finished saving this variable!
# Variable: RF
RF_Field = MODELframe.fieldOutputs['RF']
RF_RP_Zplus_SubField = RF_Field.getSubset(region=RP_Zplus_nSet, position=NODAL)
RF_RP_Zminus_SubField = RF_Field.getSubset(region=RP_Zminus_nSet, position=NODAL)
#
if isinstance(RF_RP_Zplus_SubField.values[0].data, float):
# Then variable is a scalar:
RF_RP_Zplus[iFrame_step] = RF_RP_Zplus_SubField.values[0].data
RF_RP_Zminus[iFrame_step] = RF_RP_Zminus_SubField.values[0].data
#
else:
# Variable is an array:
for j in range(0, len(RF_RP_Zplus_SubField.values[0].data)):
RF_RP_Zplus[iFrame_step, j] = RF_RP_Zplus_SubField.values[0].data[j]
RF_RP_Zminus[iFrame_step, j] = RF_RP_Zminus_SubField.values[0].data[j]
#
#
# Finished saving this variable!
# Variable: RM
RM_Field = MODELframe.fieldOutputs['RM']
RM_RP_Zplus_SubField = RM_Field.getSubset(region=RP_Zplus_nSet, position=NODAL)
RM_RP_Zminus_SubField = RM_Field.getSubset(region=RP_Zminus_nSet, position=NODAL)
#
if isinstance(RM_RP_Zplus_SubField.values[0].data, float):
# Then variable is a scalar:
RM_RP_Zplus[iFrame_step] = RM_RP_Zplus_SubField.values[0].data
RM_RP_Zminus[iFrame_step] = RM_RP_Zminus_SubField.values[0].data
#
else:
# Variable is an array:
for j in range(0, len(RM_RP_Zplus_SubField.values[0].data)):
RM_RP_Zplus[iFrame_step, j] = RM_RP_Zplus_SubField.values[0].data[j]
RM_RP_Zminus[iFrame_step, j] = RM_RP_Zminus_SubField.values[0].data[j]
STRUCTURE_variables = {'U': U_RP_Zplus, 'UR': UR_RP_Zplus,
'RF': RF_RP_Zplus, 'RM': RM_RP_Zplus}
return STRUCTURE_variables
def _meshing(model, VertexPolygon, MastDiameter, MastPitch,
cross_section_props, Emodulus, Gmodulus,
ConeSlope, nStories=1, power=1., Twist_angle=0.,
transition_length_ratio=1., include_name='include_mesh'):
'''
Parameters
----------
VertexPolygon : int
Number of vertices (sides) of the polygon base.
MastDiameter : float
Radius of the circumscribing circle of the polygon.
MastPitch : float
Pitch length of the strut (i.e. a single AstroMast!).
cross_section_props : dict
Stores the information about the cross-section. Specify the type
of the cross section using 'type'. An empty 'type' will be
understood as generalized cross section. Different types of
sections are allowed:
-'circular': requires 'd'
-'generalized': requires 'Ixx', 'Iyy', 'J', 'area'
Emodulus : float
Youngus Modulus.
Gmodulus : float
Shear Modulus.
ConeSlope : float
Slope of the longerons (0 = straight, <0 larger at the top, >0 larger
at the bottom).
nStories : int
Number of stories in HALF of the strut (i.e. in a single AstroMast!).
power: float
Power law exponent establishing the evolution of the spacing between
battens.
Twist_angle : float
Do you want to twist the longerons?
transition_length_ratio : float
Transition zone for the longerons.
'''
# initialization
MastRadius = MastDiameter / 2.0
MastHeight = nStories * MastPitch
# TODO: change section assignment
Longeron_CS = cross_section_props['area']
Ix = cross_section_props['Ixx']
Iy = cross_section_props['Iyy']
J = cross_section_props['J']
Mesh_size = min(MastRadius, MastPitch) / 300.0
# Create all the joints of the a single Deployable Mast:
joints = np.zeros((nStories + 1, VertexPolygon, 3))
joints_outter = np.zeros((nStories + 1, VertexPolygon, 3))
for iStorey in range(0, nStories + 1, 1):
for iVertex in range(0, VertexPolygon, 1):
# Constant spacing between each storey (linear evolution):
Zcoord = MastHeight / nStories * iStorey
# Power-law spacing between each storey (more frequent at the fixed end):
# Zcoord = MastHeight*(float(iStorey)/float(nStories))**power
# Power-law spacing between each storey (more frequent at the rotating end):
# Zcoord = -MastHeight/(float(nStories)**power)*(float(nStories-iStorey)**power)+MastHeight
# Exponential spacing between each storey
# Zcoord =(MastHeight+1.0)/exp(float(nStories))*exp(float(iStorey))
#
Xcoord = MastRadius * np.cos(2.0 * np.pi / VertexPolygon * iVertex + Twist_angle * min(Zcoord / MastHeight / transition_length_ratio, 1.0))
Ycoord = MastRadius * np.sin(2.0 * np.pi / VertexPolygon * iVertex + Twist_angle * min(Zcoord / MastHeight / transition_length_ratio, 1.0))
# Save point defining this joint:
joints[iStorey, iVertex, :] = (Xcoord * (1.0 - min(Zcoord, transition_length_ratio * MastHeight) / MastHeight * ConeSlope), Ycoord * (1.0 - min(Zcoord, transition_length_ratio * MastHeight) / MastHeight * ConeSlope), Zcoord)
#
# center = (0.0, 0.0)
# vec = joints[iStorey, iVertex, 0:2] - center
# norm_vec = np.linalg.norm(vec)
joints_outter[iStorey, iVertex, 2] = joints[iStorey, iVertex, 2]
joints_outter[iStorey, iVertex, 0:2] = joints[iStorey, iVertex, 0:2]
# end iSide loop
# end iStorey loop
# Create the longerons:
p_longerons = model.Part(name='longerons', dimensionality=THREE_D,
type=DEFORMABLE_BODY)
p_longerons = model.parts['longerons']
# d_longerons, r_longerons = p_longerons.datums, p_longerons.referencePoints
LocalDatum_list = [] # List with local coordinate system for each longeron
long_midpoints = [] # List with midpoints of longerons (just to determine a set containing the longerons)
e_long = p_longerons.edges
for iVertex in range(0, VertexPolygon, 1):
# First create local coordinate system (useful for future constraints, etc.):
iStorey = 0
origin = joints[iStorey, iVertex, :]
point2 = joints[iStorey, iVertex - 1, :]
name = 'Local_Datum_' + str(iVertex)
LocalDatum_list.append(p_longerons.DatumCsysByThreePoints(origin=origin, point2=point2, name=name,
coordSysType=CARTESIAN, point1=(0.0, 0.0, 0.0)))
#
# Then, create the longerons
templist = [] # List that will contain the points used to make each longeron
for iStorey in range(0, nStories + 1, 1):
templist.append(joints[iStorey, iVertex, :])
if iStorey != 0: # Save midpoints of bars
long_midpoints.append([(joints[iStorey - 1, iVertex, :] + joints[iStorey, iVertex, :]) / 2, ])
# end if
# end iStorey loop
p_longerons.WirePolyLine(points=templist,
mergeType=IMPRINT, meshable=ON)
# Create set for each longeron (to assign local beam directions)
for i in range(0, len(templist)): # loop over longerons edges
if i == 0:
select_edges = e_long.findAt([templist[0], ]) # Find the first edge
else:
# Now find remaining edges in longerons
temp = e_long.findAt([templist[i], ])
select_edges = select_edges + temp
# end if
# end i loop
longeron_name = 'longeron-' + str(iVertex) + '_set'
p_longerons.Set(edges=select_edges, name=longeron_name)
# end for iVertex loop
# Longerons set:
e_long = p_longerons.edges
select_edges = []
for i in range(0, len(long_midpoints)): # loop over longerons edges
if i == 0:
select_edges = e_long.findAt(long_midpoints[0]) # Find the first edge
else:
# Now find remaining edges in longerons
temp = e_long.findAt(long_midpoints[i])
select_edges = select_edges + temp
# end if
# end i loop
p_longerons.Set(edges=select_edges, name='all_longerons_set')
all_longerons_set_edges = select_edges
p_longerons.Surface(circumEdges=all_longerons_set_edges, name='all_longerons_surface')
# Create a set with all the joints:
v_long = p_longerons.vertices
select_vertices = []
select_top_vertices = []
select_bot_vertices = []
for iStorey in range(0, nStories + 1, 1):
for iVertex in range(0, VertexPolygon, 1):
# Select all the joints in the longerons:
current_joint = v_long.findAt([joints[iStorey, iVertex, :], ]) # Find the first vertex
current_joint_name = 'joint-' + str(iStorey) + '-' + str(iVertex)
# Create a set for each joint:
p_longerons.Set(vertices=current_joint, name=current_joint_name)
#
if iStorey == 0 and iVertex == 0:
select_vertices = current_joint # Instantiate the first point in set
else:
select_vertices = select_vertices + current_joint # Instantiate the first point in set
# endif iStorey == 0 and iVertex == 0
#
if iStorey == 0: # Also save the bottom nodes separately
if iVertex == 0:
# Start selecting the bottom joints for implementing the boundary conditions
select_bot_vertices = current_joint
else:
select_bot_vertices = select_bot_vertices + current_joint
# endif iStorey == 0:
elif iStorey == nStories: # Also save the top nodes separately
if iVertex == 0:
# Start selecting the top joints for implementing the boundary conditions
select_top_vertices = current_joint
else: # remaining vertices:
select_top_vertices = select_top_vertices + current_joint
# end if
# end iVertex loop
# end iStorey loop
p_longerons.Set(vertices=select_vertices, name='all_joints_set')
p_longerons.Set(vertices=select_bot_vertices, name='bot_joints_set')
p_longerons.Set(vertices=select_top_vertices, name='top_joints_set')
#
# Create materials:
# model.Material(name='NiTi_alloy')
# model.materials['NiTi_alloy'].Elastic(table=((83.0E3, 0.31),
# ))
# model.materials['NiTi_alloy'].Density(table=((1.0E-3, ), ))
# model.Material(name='PC')
# model.materials['PC'].Elastic(table=((2134, 0.27),
# ))
# model.materials['PC'].Density(table=((1.19E-3, ), ))
# model.Material(name='PLA')
# model.materials['PLA'].Elastic(table=((Emodulus, nu),
# ))
# model.materials['PLA'].Density(table=((1.24E-3, ), ))
# model.Material(name='CNT')
# model.materials['CNT'].Elastic(table=((1000.0E3, 0.3),
# ))
# model.materials['CNT'].Density(table=((1.0E-3, ), ))
# Create beam profiles and beam sections:
model.GeneralizedProfile(name='LongeronsProfile', area=Longeron_CS, i11=Ix, i12=0.0, i22=Iy, j=J, gammaO=0.0, gammaW=0.0)
model.BeamSection(name='LongeronsSection', integration=BEFORE_ANALYSIS, poissonRatio=0.31, beamShape=CONSTANT,
profile='LongeronsProfile', density=0.00124, thermalExpansion=OFF,
temperatureDependency=OFF, dependencies=0, table=((Emodulus, Gmodulus), ),
alphaDamping=0.0, betaDamping=0.0, compositeDamping=0.0, centroid=(0.0,
0.0), shearCenter=(0.0, 0.0), consistentMassMatrix=False)
# Assign respective sections:
p_longerons.SectionAssignment(offset=0.0,
offsetField='', offsetType=MIDDLE_SURFACE, region=p_longerons.sets['all_longerons_set'],
sectionName='LongeronsSection', thicknessAssignment=FROM_SECTION)
# Assing beam orientation:
for iVertex in range(0, VertexPolygon, 1):
iStorey = 0
dir_vec_n1 = joints[iStorey, iVertex, :] - (0., 0., 0.) # Vector n1 perpendicular to the longeron tangent
longeron_name = 'longeron-' + str(iVertex) + '_set'
region = p_longerons.sets[longeron_name]
p_longerons.assignBeamSectionOrientation(region=region, method=N1_COSINES, n1=dir_vec_n1)
# end for iVertex
#
# delta = Mesh_size / 100.0
########################################################################
# Mesh the structure
# refPlane = p_longerons.DatumPlaneByPrincipalPlane(principalPlane=XYPLANE, offset=L/2)
# d = p.datums
# All_faces = facesLeafs+facesDoubleThickBoom
# p.PartitionFaceByDatumPlane(datumPlane=d[refPlane.id], faces=All_faces)
# #
# p = model.parts['reducedCF_TRAC_boom']
p_longerons.seedPart(size=Mesh_size, deviationFactor=0.04, minSizeFactor=0.001,
constraint=FINER)
p_longerons.seedEdgeBySize(edges=all_longerons_set_edges, size=Mesh_size, deviationFactor=0.04,
constraint=FINER)
elemType_longerons = mesh.ElemType(elemCode=B31, elemLibrary=STANDARD) # Element type
p_longerons.setElementType(regions=(all_longerons_set_edges, ), elemTypes=(elemType_longerons, ))
p_longerons.generateMesh()
#######################################################################
# Make Analytical surfaces for contact purposes
s1 = model.ConstrainedSketch(name='__profile__',
sheetSize=MastRadius * 3.0)
# g, v, d, c = s1.geometry, s1.vertices, s1.dimensions, s1.constraints
g = s1.geometry
s1.setPrimaryObject(option=STANDALONE)
s1.Line(point1=(0.0, -MastRadius * 1.1), point2=(0.0, MastRadius * 1.1))
s1.VerticalConstraint(entity=g[2], addUndoState=False)
p_surf = model.Part(name='AnalyticSurf', dimensionality=THREE_D,
type=ANALYTIC_RIGID_SURFACE)
p_surf = model.parts['AnalyticSurf']
p_surf.AnalyticRigidSurfExtrude(sketch=s1, depth=MastRadius * 2.2)
s1.unsetPrimaryObject()
rigid_face = p_surf.faces
# surf_select = f.findAt((0.0,MastRadius*1.05,0.0))
# surf_select = f[0]
p_surf.Surface(side1Faces=rigid_face, name='rigid_support')
# p_surf.Set(faces=surf_select, name='support_surface_set')
# p_surf.sets['all_diagonals_set']
#
# Make assembly:
a = model.rootAssembly
a.DatumCsysByDefault(CARTESIAN)
# Create reference points to assign boundary conditions
RP_ZmYmXm = a.ReferencePoint(point=(0.0, 0.0, -1.1 * MastRadius))
refpoint_ZmYmXm = (a.referencePoints[RP_ZmYmXm.id],)
a.Set(referencePoints=refpoint_ZmYmXm, name='RP_ZmYmXm')
#
RP_ZpYmXm = a.ReferencePoint(point=(0.0, 0.0, MastHeight + 1.1 * MastRadius))
refpoint_ZpYmXm = (a.referencePoints[RP_ZpYmXm.id],)
a.Set(referencePoints=refpoint_ZpYmXm, name='RP_ZpYmXm')
#
# Create longerons
a_long = a.Instance(name='longerons-1-1', part=p_longerons, dependent=ON)
# Create bottom surface
a_surf_bot = a.Instance(name='AnalyticSurf-1-1', part=p_surf, dependent=ON)
# Now rotate the plane to have the proper direction
a.rotate(instanceList=('AnalyticSurf-1-1', ), axisPoint=(0.0, 0.0, 0.0),
axisDirection=(0.0, 1.0, 0.0), angle=90.0)
#
# Create set with surface
select_bot_surf = a_surf_bot.surfaces['rigid_support']
# Perhaps we need to define a set instead of a face
# AnalyticSurf_surface=a_surf_bot.Surface(side1Faces=select_bot_surf, name='support_surf_bot-1')
model.RigidBody(name='Constraint-RigidBody_surf_bot-1', refPointRegion=refpoint_ZmYmXm,
surfaceRegion=select_bot_surf)
for iVertex in range(0, VertexPolygon, 1):
#
# Select appropriate coordinate system:
DatumID = LocalDatum_list[iVertex].id
datum = a_long.datums[DatumID]
for iStorey in range(0, nStories + 1, 1):
# Current joint:
current_joint_name = 'joint-' + str(iStorey) + '-' + str(iVertex)
# Define COUPLING constraints for all the joints:
if iStorey == 0: # Bottom base:
#
master_region = a.sets['RP_ZmYmXm'] # Note that the master is the Reference Point
#
slave_region = a_long.sets[current_joint_name]
# Make constraint for this joint:
Constraint_name = 'RP_ZmYmXm_PinConstraint-' + str(iStorey) + '-' + str(iVertex)
model.Coupling(name=Constraint_name, controlPoint=master_region,
surface=slave_region, influenceRadius=WHOLE_SURFACE, couplingType=KINEMATIC,
localCsys=datum, u1=ON, u2=ON, u3=ON, ur1=OFF, ur2=ON, ur3=ON)
#
# Constraint_name = 'RP_ZmYmXm_FixedConstraint-'+str(iStorey)+'-'+str(iVertex)
# model.Coupling(name=Constraint_name, controlPoint=master_region,
# surface=slave_region, influenceRadius=WHOLE_SURFACE, couplingType=KINEMATIC,
# localCsys=datum, u1=ON, u2=ON, u3=ON, ur1=ON, ur2=ON, ur3=ON)
# Make constraint for this joint:
elif iStorey == nStories: # Top base:
#
master_region = a.sets['RP_ZpYmXm'] # Note that the master is the Reference Point
#
slave_region = a_long.sets[current_joint_name]
# Make constraint for this joint:
Constraint_name = 'RP_ZpYmXm_PinConstraint-' + str(iStorey) + '-' + str(iVertex)
model.Coupling(name=Constraint_name, controlPoint=master_region,
surface=slave_region, influenceRadius=WHOLE_SURFACE, couplingType=KINEMATIC,
localCsys=datum, u1=ON, u2=ON, u3=ON, ur1=OFF, ur2=ON, ur3=ON)
#
# Constraint_name = 'RP_ZpYmXm_FixedConstraint-'+str(iStorey)+'-'+str(iVertex)
# model.Coupling(name=Constraint_name, controlPoint=master_region,
# surface=slave_region, influenceRadius=WHOLE_SURFACE, couplingType=KINEMATIC,
# localCsys=datum, u1=ON, u2=ON, u3=ON, ur1=ON, ur2=ON, ur3=ON)
# Make constraint for this joint:
else: # Middle stories:
master_region = a_long.sets[current_joint_name]
#
slave_region = a_bat.sets[current_joint_name]
# Make constraint for this joint:
# endif iStorey
#
# end for iStorey
# end for iVertex
#
# Create hinges:
# select_joints=a.instances['deployable_mast-1'].sets['all_joints_set']
# select_RefPoint=a.sets['RP_joints']
# model.RigidBody(name='JointsContraint', refPointRegion=select_RefPoint,
# pinRegion=select_joints)
#
# Export mesh to .inp file
#
modelJob = mdb.Job(name=include_name, model=model.name)
modelJob.writeInput(consistencyChecking=OFF)
# End of python script
|
#!/usr/bin/python3
"""Outsiders"""
import os
PROJECT_DIR = os.path.dirname(os.path.dirname(
os.path.dirname(os.path.abspath(__file__))))
DATASETS_DIR = PROJECT_DIR + '/datasets/'
MEDIA_DIR = PROJECT_DIR + '/media/'
def prepare_query(query):
"""Strip all except words"""
return ''.join(e for e in query if e.isalnum())
|
# Problem Set 2, hangman.py
# Name: Haley Bates-Tarasewicz
# Collaborators: William Kalb
# Time spent: 2:00
# Hangman Game
# -----------------------------------Provided Code
import random
import string
WORDLIST_FILENAME = "words.txt"
def load_words():
print "Loading word list from file..."
# inFile: file
inFile = open(WORDLIST_FILENAME, 'r', 0)
# line: string
line = inFile.readline()
# wordlist: list of strings
wordlist = string.split(line)
print " ", len(wordlist), "words loaded."
return wordlist
def choose_word(wordlist):
return random.choice(wordlist)
# -----------------------------------Global Variables
wordlist = load_words()
# -----------------------------------Hangman Functions
#Takes a string secret_word and a list letters_guessed and returns true if all of the letters in secret_word have been guessed.
#Returns false otherwise
def is_word_guessed(secret_word, letters_guessed):
for i in secret_word:
if i not in letters_guessed:
return False
else:
pass
return True
#Takes a string secret_word and a list letters_guessed and returns secret_word with all of the unguessed letters as asterisks
def get_guessed_word(secret_word, letters_guessed):
secret_list = list(secret_word)
count = 0
for i in secret_word:
if i not in letters_guessed:
secret_list[count] = '*'
else:
pass
count += 1
return "".join(secret_list)
#takes a list letters_guessed and returns the alphabet with the letters in the list letters_guessed removed
def get_available_letters(letters_guessed):
letters = list(string.ascii_lowercase)
for i in letters_guessed:
letters.remove(i)
return "".join(letters)
# -----------------------------------Hangman Main Loop
def hangman(secret_word):
print "Welcome to the game Hangman!"
print "I am thinking of a word that is ", len(secret_word), 'letters long.'
guesses = 6
letters_guessed = []
vowels = ['a', 'e', 'i', 'o', 'u']
while True:
print '-------------'
print 'you have', guesses, 'guesses left.'
print 'Available letters: ', get_available_letters(letters_guessed)
guess = raw_input('Please guess a letter:')
if guess in letters_guessed:
print "Oops! You've already guessed that letter:", get_guessed_word(secret_word, letters_guessed)
guesses -= 1
elif guess in secret_word:
letters_guessed.append(guess)
print 'Good guess:', get_guessed_word(secret_word, letters_guessed)
else:
letters_guessed.append(guess)
print 'Oops! That letter is not in my word:', get_guessed_word(secret_word, letters_guessed)
if guess in vowels:
guesses -= 2
else:
guesses -= 1
if is_word_guessed(secret_word, letters_guessed):
print 'Congratulations, you won!'
break
elif guesses <= 0:
print 'Sorry, you ran out of guesses. The word was', str(secret_word) + '.'
break
else:
pass
# -----------------------------------Calls the Hangman Function
hangman(choose_word(wordlist))
# -----------------------------------Hangman With Hints Functions
#takes two strings and returns true if they match exactly, or are the same length with non-matching asterisk characters.
#Returns false otherwise
def match_with_gaps(my_word, other_word):
count = 0
if len(my_word) != len(other_word):
return False
else:
pass
for i in my_word:
if i != '*':
if my_word[count] != other_word[count]:
return False
count += 1
else:
count += 1
pass
return True
#takes a string my_word with a mix of letters and asterisks with asterisks representing unknown letters
#returns a string every word my_word could be based on the placement of the asterisks
def show_possible_matches(my_word):
megastring = []
for word in wordlist:
if match_with_gaps(my_word, word):
megastring.append(word)
else:
pass
print " ".join(megastring)
# -----------------------------------Hangman With Hints Main Loop
def hangman_with_hints(secret_word):
print "Welcome to the game Hangman!"
print "I am thinking of a word that is ", len(secret_word), 'letters long.'
guesses = 6
letters_guessed = []
vowels = ['a', 'e', 'i', 'o', 'u']
while True:
print '-------------'
print 'you have', guesses, 'guesses left.'
print 'Available letters: ', get_available_letters(letters_guessed)
guess = raw_input('Please guess a letter:')
if guess == '*':
print 'Possible word matches are:'
show_possible_matches(get_guessed_word(secret_word, letters_guessed))
elif guess in letters_guessed:
print "Oops! You've already guessed that letter:", get_guessed_word(secret_word, letters_guessed)
guesses -= 1
elif guess in secret_word:
letters_guessed.append(guess)
print 'Good guess:', get_guessed_word(secret_word, letters_guessed)
else:
letters_guessed.append(guess)
print 'Oops! That letter is not in my word:', get_guessed_word(secret_word, letters_guessed)
if guess in vowels:
guesses -= 2
else:
guesses -= 1
if is_word_guessed(secret_word, letters_guessed):
print 'Congratulations, you won!'
break
elif guesses <= 0:
print 'Sorry, you ran out of guesses. The word was', str(secret_word) + '.'
break
else:
pass
# -----------------------------------Calls the Hangman With Hints Function
hangman_with_hints(choose_word(wordlist).lower())
|
# -*- coding:utf8 -*-
'''
Created on 2018年6月1日
@author: chenzf
'''
import requests
import re
from requests.exceptions import RequestException
from multiprocessing import Pool
from bs4 import BeautifulSoup
from urllib.parse import urljoin
import json
import os,sys
parentdir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.insert(0,parentdir)
from com_tools import utils
save_dir = os.path.basename(sys.argv[0]).split(".")[0]
utils.dir_path = "d:\\Pictures\\Hegre-Art\\Hegre Girls\\"+save_dir+"\\{file_path}"
'''
parse_page
@author: chenzf
'''
def parse_page(html):
image = []
soup = BeautifulSoup(html, 'lxml')
#items
items = soup.find_all('div', class_="node-grid")
for item in items:
image.append({
'name': utils.format_name(item.select_one('.grid-meta h4 a').string),
'board': item.select_one('.content .field-type-image a img').get('src'),
'url': urljoin('http://www.hegregirls.com/', item.select_one('.content .field-type-image a').get('href')),
})
return image
'''
parse_page
@author: chenzf
'''
def parse_page_detail(html):
image = {}
soup = BeautifulSoup(html, 'lxml')
#items
try:
date_release = ""
for s in soup.find(class_="title-subtitle").strings:
date_release += s
video = soup.select_one('.mejs-mediaelement video source')
if video:
video = soup.select_one('.mejs-mediaelement video source').get('src')
else:
video = None
image = {
'small':soup.select_one('.field-name-massage-board a img').get('src'),
'mid':soup.select_one('.mejs-mediaelement video').get('poster'),
'large':soup.select_one('.field-name-massage-board a').get('href'),
'video':video,
'date':date_release
}
except:
print('error-' + soup.select_one('#page-title').string)
return image
'''
process_image
@author: chenzf
'''
def process_image(image):
dir_name = utils.dir_path.format(file_path=image.get('name'))
if not os.path.exists(dir_name):
os.makedirs(dir_name)
with open(dir_name+'\\info.json', 'w') as f:
json.dump(image, f)
for subkeys in ['board']:
url = image.get(subkeys)
if url:
utils.download_file(url, utils.get_file_path(url, image.get('name')+
'\\'+ subkeys))
detail = image.get('detail')
if detail:
for subkeys in ['large', 'mid', 'small']:
url = detail.get(subkeys)
if url:
utils.download_file(url, utils.get_file_path(url, image.get('name')+
'\\'+ image.get('name')))
break;
video = detail.get('video')
if video:
utils.download_file(video, utils.get_video_file_path(video, image.get('name')+
'\\video'))
'''
process_image_detail
@author: chenzf
'''
def process_image_detail(url):
detail = None
html = utils.get_page_by_chrome(url, '.mejs-poster')
if html:
detail = parse_page_detail(html)
return detail
'''
main
@author: chenzf
'''
def main(page):
url = 'http://hegregirls.com/massage?page={page}'
html = utils.get_page(url.format(page=page))
if html:
images = parse_page(html)
if images:
for image in images:
image['detail'] = process_image_detail(image.get('url'))
process_image(image)
if __name__ == '__main__':
pool = Pool(3)
pool.map(main,[i for i in range(0,2)])
pool.close()
pool.join() |
## Predicitng Models
from keras.models import load_model
from keras.preprocessing import image
from keras.applications.vgg16 import preprocess_input
import numpy as np
model = load_model('model_vgg19.h5')
img = image.load_img('val/PNEUMONIA/person1946_bacteria_4874.jpeg', target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
img_data = preprocess_input(x)
classes = model.predict(img_data)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.