text stringlengths 38 1.54M |
|---|
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 7 20:19:18 2021
@author: Md.Abdullah
"""
from PIL import Image
img=Image.open("J:\Digital-Image-Processing\Images\messi.jpg")
#img.show()
new_img=Image.open("J:\Digital-Image-Processing\Images\messi.jpg").convert("L")
new_img.show() |
# Generated by Django 3.1.1 on 2020-12-09 23:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('login', '0004_shopuser_recipes'),
]
operations = [
migrations.AlterField(
model_name='shopuser',
name='recipes',
field=models.CharField(default='"lololol"', max_length=200),
),
]
|
import string, cgi, time, json
import threading
from os import curdir, sep
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from ConfigParser import *
from servotorComm import runMovement
class MyHandler(BaseHTTPRequestHandler):
def do_GET(self):
try:
self.send_response(200)
op = self.path[1:]
if op == "index.html" or op == "":
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write("<html><head>" +
"<meta http-equiv='Content-Type'" +
"content='text/html; charset=utf-8' />" +
"<script type='text/javascript' src='//ajax.googleapis.com/ajax/libs/jquery/1.9.0/jquery.min.js'></script>" +
"<script type='text/javascript'>" +
"function runCommand(name){ $.get(name, '' , " +
"function(data){ /*alert(data);*/ }); } "+
"</script>" +
"</head><body>" +
"<h1>Available commands</h1>")
for moveName in moves:
self.wfile.write("<a style='min-width:20em;margin:1em;font-size:120%;' href='javascript:" +
"runCommand(\""+moveName+"\")" +
"'>"+moveName+"</a><br>")
self.wfile.write("</body></html>")
else:
self.send_header('Content-type', 'text/plain')
self.end_headers()
moveName = op.replace('%20',' ')
if moveName in moves:
self.wfile.write(json.dumps({'operation': op}))
runMovement(move,moveName)
else:
self.wfile.write(json.dumps({'operation': 'not found'}))
return
except IOError:
self.send_error(404,'File Not Found: %s' % self.path)
class httpThread(threading.Thread):
def __init__(self, server):
threading.Thread.__init__(self)
self.function=self.run
self.server = server
self.start()
def run(self):
try:
print 'started http server...'
self.server.serve_forever()
except KeyboardInterrupt:
print '^C received, shutting down http server'
self.server.socket.close()
def startHttpServer(port):
server = HTTPServer(('', port), MyHandler)
httpThread(server)
print "Shutting down http server"
|
from flask import Flask, render_template, request , redirect
import MySQLdb
import pandas as pd
import json
from flask_mysqldb import MySQL
app = Flask(__name__)
@app.route("/")
def index():
return render_template("index22.html")
@app.route('/getAllBlogs')
def getAllBlogs():
conn = MySQLdb.connect(host='localhost',user='root',passwd='root',db='capstone2' ,use_unicode=True, charset="utf8")
cursor = conn.cursor()
df = pd.read_sql_query("select * from Summary_table where category ='Nation';", conn)
df=df.loc[:,['Headline','URL','Image','Category','Gen_summary']]
data_dic=df.to_dict(orient='records')
print(data_dic)
return json.dumps(data_dic)
@app.route('/Nation.html')
def Nation():
return render_template("Nation.html")
@app.route('/getAllNews')
def getAllNews():
conn = MySQLdb.connect(host='localhost',user='root',passwd='root',db='capstone2' ,use_unicode=True, charset="utf8")
cursor = conn.cursor()
df = pd.read_sql_query("select * from Summary_table where category ='News';", conn)
df=df.loc[:,['Headline','URL','Image','Category','Gen_summary']]
data_dic=df.to_dict(orient='records')
print(data_dic)
return json.dumps(data_dic)
@app.route('/News.html')
def News():
return render_template("News.html")
@app.route('/getAllEntertainment')
def getAllEntertainment():
conn = MySQLdb.connect(host='localhost',user='root',passwd='root',db='capstone2' ,use_unicode=True, charset="utf8")
cursor = conn.cursor()
df = pd.read_sql_query("select * from Summary_table where category ='Entertainment' ;", conn)
df=df.loc[:,['Headline','URL','Image','Category','Gen_summary']]
data_dic=df.to_dict(orient='records')
print(data_dic)
return json.dumps(data_dic)
@app.route('/Entertainment.html')
def Entertainment():
return render_template("Entertainment.html")
@app.route('/getAllBusiness')
def getAllBusiness():
conn = MySQLdb.connect(host='localhost',user='root',passwd='root',db='capstone2' ,use_unicode=True, charset="utf8")
cursor = conn.cursor()
df = pd.read_sql_query("select * from Summary_table where category ='Business' ;", conn)
df=df.loc[:,['Headline','URL','Image','Category','Gen_summary']]
data_dic=df.to_dict(orient='records')
print(data_dic)
return json.dumps(data_dic)
@app.route('/Business.html')
def Business():
return render_template("Business.html")
@app.route('/getAllSports')
def getAllSports():
conn = MySQLdb.connect(host='localhost',user='root',passwd='root',db='capstone2' ,use_unicode=True, charset="utf8")
cursor = conn.cursor()
df = pd.read_sql_query("select * from Summary_table where category ='Sports' ;", conn)
df=df.loc[:,['Headline','URL','Image','Category','Gen_summary']]
data_dic=df.to_dict(orient='records')
print(data_dic)
return json.dumps(data_dic)
@app.route('/Sports.html')
def Sports():
return render_template("Sports.html")
@app.route('/getAllOpinion')
def getAllOpinion():
conn = MySQLdb.connect(host='localhost',user='root',passwd='root',db='capstone2' ,use_unicode=True, charset="utf8")
cursor = conn.cursor()
df = pd.read_sql_query("select * from Summary_table where category ='Opinion' ;", conn)
df=df.loc[:,['Headline','URL','Image','Category','Gen_summary']]
data_dic=df.to_dict(orient='records')
print(data_dic)
return json.dumps(data_dic)
@app.route('/Opinion.html')
def Opinion():
return render_template("Opinion.html")
@app.route('/getAllWorld')
def getAllWorld():
conn = MySQLdb.connect(host='localhost',user='root',passwd='root',db='capstone2' ,use_unicode=True, charset="utf8")
cursor = conn.cursor()
df = pd.read_sql_query("select * from Summary_table where category ='World' ;", conn)
df=df.loc[:,['Headline','URL','Image','Category','Gen_summary']]
data_dic=df.to_dict(orient='records')
print(data_dic)
return json.dumps(data_dic)
@app.route('/World.html')
def World():
return render_template("World.html")
@app.route('/getAllTechnology')
def getAllTechnology():
conn = MySQLdb.connect(host='localhost',user='root',passwd='root',db='capstone2' ,use_unicode=True, charset="utf8")
cursor = conn.cursor()
df = pd.read_sql_query("select * from Summary_table where category ='Technology' ;", conn)
df=df.loc[:,['Headline','URL','Image','Category','Gen_summary']]
data_dic=df.to_dict(orient='records')
print(data_dic)
return json.dumps(data_dic)
@app.route('/Technology.html')
def Technology():
return render_template("Technology.html")
@app.route('/getAllLifestyle')
def getAllLifestyle():
conn = MySQLdb.connect(host='localhost',user='root',passwd='root',db='capstone2' ,use_unicode=True, charset="utf8")
cursor = conn.cursor()
df = pd.read_sql_query("select * from Summary_table where category ='Lifestyle' ;", conn)
df=df.loc[:,['Headline','URL','Image','Category','Gen_summary']]
data_dic=df.to_dict(orient='records')
print(data_dic)
return json.dumps(data_dic)
@app.route('/Lifestyle.html')
def Lifestyle():
return render_template("Lifestyle.html")
if __name__ == '__main__':
app.run(debug=False) |
""" There are three types of edits that can be performed on strings:
insert a character, remove a character, or replace a character.
Given two strings, write a function to check if they are one edit
or zero edits away.
pale, ple -> true
pales, pale -> true
pale, bale -> true
pale, bake -> false
create dictionary
check letters in one dictionary
"""
def one_away(str1, str2):
"""
>>> one_away('pale', 'ple')
True
>>> one_away('pales', 'pale')
True
>>> one_away('pale', 'bale')
True
>>> one_away('pale', 'bake')
False
"""
if abs(len(str1) - len(str1)) > 1:
return False
set1 = set(list(str1))
diff_count = 0
for ch in str2:
if diff_count > 1:
return False
if ch in set1:
set1.remove(ch)
else:
diff_count += 1
# removed all similarities
# checks if it is a replacement
if diff_count == len(set1):
return True
else:
diff_count += len(set1)
return diff_count <= 1
|
import sys
import gc
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from scipy.io import mmwrite
from scipy import sparse
import tables
import time
import numpy as np
import pickle
def store_sparse_mat(M, name, filename='store.h5'):
print(M.__class__)
assert(M.__class__ == sparse.csr.csr_matrix), 'M must be a csr matrix'
with tables.open_file(filename, 'a') as f:
for attribute in ('data', 'indices', 'indptr', 'shape'):
full_name = f'{name}_{attribute}'
# remove existing nodes
try:
n = getattr(f.root, full_name)
n._f_remove()
except AttributeError:
pass
# add nodes
arr = np.array(getattr(M, attribute))
atom = tables.Atom.from_dtype(arr.dtype)
ds = f.create_carray(f.root, full_name, atom, arr.shape)
ds[:] = arr
def load_sparse_mat(name, filename='store.h5'):
with tables.open_file(filename) as f:
# get nodes
attributes = []
for attribute in ('data', 'indices', 'indptr', 'shape'):
attributes.append(getattr(f.root, f'{name}_{attribute}').read())
# construct sparse matrix
M = sparse.csr_matrix(tuple(attributes[:3]), shape=attributes[3])
return M
def sumCSRMatrix(m1, m2):
mdim1, mdim2 = m1.get_shape()
tdim1, tdim2 = m2.get_shape()
#print(mdim1, mdim2)
#print(tdim1, tdim2)
tmp1 = csr_matrix((m1.data, m1.indices, m1.indptr), shape = (max(mdim1, tdim1), max(mdim2, tdim2)))
tmp2 = csr_matrix((m2.data, m2.indices, m2.indptr), shape = (max(mdim1, tdim1), max(mdim2, tdim2)))
return tmp1 + tmp2
# dump version
# callculates matrix and dump it to disk if threshold
def mkCoOccurMatrixDump(iterator, vocabulary, threshold_mb, file_name_prefix):
DIM = len(vocabulary)
#vocabulary = {}
data = []
row = []
col = []
dump_counter = 0
matrix = coo_matrix(([], ([],[])), shape=(DIM,DIM), dtype=np.int32).tocsr()
for (word, context) in iterator:
if (word not in vocabulary): continue
i = vocabulary[word]
for cword in context:
if (cword not in vocabulary): continue
j = vocabulary[cword]
data.append(1)
row.append(i)
col.append(j)
if (sys.getsizeof(data) / (1024 * 1024) > 2048):
print('matrix compression')
tmp = coo_matrix((data, (row, col)), shape=(DIM,DIM))
tmp.setdiag(0)
tmp = tmp.tocsr()
matrix = sumCSRMatrix(matrix, tmp)
print('current matrix size Mb: ', (matrix.data.nbytes + matrix.indices.nbytes + matrix.indptr.nbytes) / (1024 * 1024))
gc.collect()
data = []
row = []
col = []
gc.collect()
if ((matrix.data.nbytes + matrix.indices.nbytes + matrix.indptr.nbytes) / (1024 * 1024) > threshold_mb):
# dump matrix to disk
print('dump start')
print('current matrix size Mb: ', (matrix.data.nbytes + matrix.indices.nbytes + matrix.indptr.nbytes) / (1024 * 1024))
store_sparse_mat(matrix, "cooccur", f'{file_name_prefix}_{dump_counter}')
matrix = coo_matrix(([], ([],[])), shape=(DIM,DIM), dtype='i').tocsr()
dump_counter+=1
#print("Data array length:", len(data))
#print("Data array in memory:", sys.getsizeof(data) / (1024 * 1024))
#cooccurrence_matrix = coo_matrix((data, (row, col)))
tmp = coo_matrix((data, (row, col)), shape=(DIM,DIM))
tmp.setdiag(0)
tmp = tmp.tocsr()
cooccurrence_matrix = sumCSRMatrix(matrix, tmp)
store_sparse_mat(cooccurrence_matrix, "cooccur", f'{file_name_prefix}_{dump_counter}')
return None
# iterator, returns (line)
def _iterFile(file_name):
for line in open(file_name):
yield line
# iterator, returns (word, left context, right context)
def _iterWordContext(words_list, window_size = 2):
l = len(words_list)
for i, c in enumerate(words_list):
mn = max(0, i - window_size)
mx = min(l, i + window_size + 1)
mn_i = max(0, i)
mx_i = min(l, i + 1)
yield (c, words_list[mn:mn_i], words_list[mx_i:mx])
def iterCorpus(file_name, window_size):
for line in _iterFile(file_name):
for (w, lc, rc) in _iterWordContext(line.split(), window_size):
yield (w, lc + rc)
def writeDicToFile(data, file_name):
with open(file_name, 'wb') as f:
pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)
def readDicFromFile(file_name):
with open(file_name, 'rb') as f:
data = pickle.load(f)
return data
def getSubMatrix(co_occur_matrix, vocabulary, sub_vocabulary):
idxs = list(map(lambda x: vocabulary.get(x), sub_vocabulary))
idxs = list(filter(lambda x: x is not None, idxs))
return co_occur_matrix[idxs, :].tocsc()[:, idxs].todense()
if __name__ == '__main__':
if len(sys.argv) < 5:
print('Usage: python mk-co-occur-matrix.py context_size <text_file_in> <vocabulary_in> <matrix_out> [<dump_threshold>]')
sys.exit(1)
context_size = int(sys.argv[1])
in_f = sys.argv[2]
out_matrix = sys.argv[4]
in_vocabulary = sys.argv[3]
dump_threshold = int(sys.argv[5] if len(sys.argv) > 5 else 3048)
start_time = time.time()
vocab = readDicFromFile(in_vocabulary)
print('Vocabulary Length: ', len(vocab))
iter = iterCorpus(in_f, context_size)
mkCoOccurMatrixDump(iter, vocab, dump_threshold, out_matrix)
print("--- %s seconds ---" % (time.time() - start_time))
|
# 邮箱设置
EMAIL_USE_TLS = False #是否使用TLS安全传输协议(用于在两个通信应用程序之间提供保密性和数据完整性。)
EMAIL_USE_SSL = False #是否使用SSL加密,qq企业邮箱要求使用
EMAIL_HOST = 'smtp.163.com' #发送邮件的邮箱 的 SMTP服务器,这里用了163邮箱
EMAIL_PORT = 25 #发件箱的SMTP服务器端口
EMAIL_HOST_USER = 'louisyoung163@163.com' #发送邮件的邮箱地址
EMAIL_HOST_PASSWORD = 'YPTZUTEJJAXTYFWY' #发送邮件的邮箱密码(这里使用的是授权码)
EMAIL_FROM = 'louisyoung163Louis<louisyoung163@163.com>'
EMAIL_RECEIVE = '1462648167@qq.com'
EMAIL_SUBJECT = 'Louis交易系统' |
from discord.ext import commands
import discord
import platform
import os
class activeCommand(commands.Cog):
def __init__(self, bot):
self.bot = bot
cur_path = os.path.dirname(__file__)
# Update Command
@commands.command(pass_context=True)
@commands.has_permissions(manage_messages=True)
# @commands.has_any_role('Admin')
async def update(self, ctx, arg):
server = os.path.relpath(f'Aigner/server/{ctx.guild.id}.txt', self.cur_path)
if arg == 'on' or arg == 'ON':
open(server, 'a').close()
if f'{ctx.channel.id}' in open(server).read():
return await ctx.send('Announcement Updates have been added on this Channel.')
else:
with open(server, 'a') as f:
f.write(f'{ctx.channel.id}' + '\n')
return await ctx.send('Announcement Updates will be announced on this Channel.')
elif arg == 'off' or arg == 'OFF':
open(server, 'a').close()
if f'{ctx.channel.id}' in open(server).read():
with open(server) as f:
data = f.read()
x = data.split()
x.remove(f'{ctx.channel.id}')
open(server, 'w').close()
for delete in x:
with open(server, 'a') as f:
f.write(str(delete) + '\n')
return await ctx.send('Update Announcement has been deleted on this Channel.')
else:
return await ctx.send('Announcement Updates have not been added on this Channel.')
elif arg == 'list' or arg == 'LIST':
open(server, 'a').close()
with open(server) as f:
data = f.read()
x = data.split()
embed = discord.Embed(
title=f'{self.bot.get_emoji(670596720161456138)} **List Channel**',
description='Channel that gets __updated__',
colour=discord.Colour(16707843),
)
if x is not None:
for data in x:
embed.add_field(
name=f'__**{self.bot.get_channel(int(data))}**__', value=f'ID : `{data}`', inline=False)
if not x:
embed.add_field(name='__**NONE**__',
value='Channel has not been added.', inline=False)
embed.set_thumbnail(
url=f'{self.bot.user.avatar_url}')
await ctx.send(embed=embed)
else:
embed = discord.Embed(
title='__Active/Deactive__',
colour=discord.Colour(16707843)
)
embed.set_author(name=f'{self.bot.user.name} help',
icon_url=f'{self.bot.user.avatar_url}')
embed.add_field(name='Syntax:',
value='`sp!update on` : Enabling latest update on the current channel.\n`sp!update off` : Disabling latest update on the current channel.\n`sp!update list` : Get a list of active update on this server.', inline=False)
embed.set_footer(
text='AignerBot')
await ctx.send(embed=embed)
@update.error
async def clear_update_error(self, ctx, error):
if isinstance(error, commands.errors.MissingRequiredArgument):
embed = discord.Embed(
title='__Active/Deactive__',
colour=discord.Colour(16707843)
)
embed.set_author(name=f'{self.bot.user.name} help',
icon_url=f'{self.bot.user.avatar_url}')
embed.add_field(name='Syntax:',
value='`sp!update on` : Enabling latest update on the current channel.\n`sp!update off` : Disabling latest update on the current channel.\n`sp!update list` : Get a list of active update on this server.', inline=False)
embed.set_footer(
text='AignerBot')
await ctx.send(embed=embed)
@update.error
async def clear_missing_error(self, ctx, error):
if isinstance(error, commands.errors.MissingPermissions):
embed = discord.Embed(
colour=discord.Colour(16707843)
)
embed.set_author(name=f'{self.bot.user.name} help',
icon_url=f'{self.bot.user.avatar_url}')
embed.add_field(name='Information:',
value='Only Role with Manager Message can use this command.', inline=False)
embed.set_footer(
text='AignerBot')
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(activeCommand(bot))
|
from __future__ import division
from pprint import pprint
from googleapiclient import discovery
from oauth2client.client import GoogleCredentials
from gcloud import GenomicsOperation, OperationCostCalculator
from cromwell import Metadata
from collections import defaultdict
import json
import sys
import math
import argparse
class CromwellCostCalculator(object):
def __init__(self, pricelist):
credentials = GoogleCredentials.get_application_default()
self.service = discovery.build('genomics', 'v1', credentials=credentials)
self.calculator = OperationCostCalculator(pricelist)
def get_operation_metadata(self, name):
request = self.service.operations().get(name=name)
response = request.execute()
return response
@staticmethod
def dollars(raw_cost):
return math.ceil(raw_cost * 100) / 100
def calculate_cost(self, metadata_json):
metadata = Metadata(metadata_json)
total_cost = 0
max_samples = -1
summary_json = { 'tasks': [], 'total_cost': None, 'cost_per_shard': None }
for task, executions in metadata.calls().iteritems():
task_totals = defaultdict(int)
for e in executions:
if e.jobid() is None: continue
op = GenomicsOperation(self.get_operation_metadata(e.jobid()))
print 'operation: {}'.format(op)
task_totals[e.shard()] = task_totals[e.shard()] + self.dollars(self.calculator.cost(op))
total_cost += self.dollars(self.calculator.cost(op))
summary_json['tasks'].append({
'name': task,
'shards': len(task_totals),
'cost_per_shard': self.dollars(sum(task_totals.values())/len(task_totals)) if len(task_totals) != 0 else 0,
'total_cost': self.dollars(sum(task_totals.values()))
})
max_samples = max(max_samples, len(task_totals))
summary_json['total_cost'] = total_cost
summary_json['cost_per_shard'] = total_cost / max_samples
return summary_json
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('pricelist', type=argparse.FileType('r'), help='pricelist.json from Google containing cost information')
parser.add_argument('metadata', type=argparse.FileType('r'), help='metadata from a cromwell workflow from which to estimate cost')
args = parser.parse_args()
metadata = json.load(args.metadata)
pricelist = json.load(args.pricelist)
calc = CromwellCostCalculator(pricelist)
cost = calc.calculate_cost(metadata)
print json.dumps(cost, sort_keys=True, indent=4)
print 'Total: ${0}'.format(cost['total_cost'])
print 'Per Shard: ${0}'.format(cost['cost_per_shard'])
|
from edge import Edge
class Vertex:
def __init__(self, id):
self.id = id
self.edgesTo = []
self.edgesFrom = []
def addEdgeTo(self, edge):
self.edgesTo.append(edge)
def addEdgeFrom(self, edge):
self.edgesFrom.append(edge)
def toString(self):
list = "Id: " + str(self.id) + " Edges To: "
for e in self.edgesTo:
list += e.toString() + " "
list += " Edges From: "
for e in self.edgesFrom:
list += e.toString() + " "
return list
def toShortString(self):
return "Id: " + str(self.id) + " "
def __eq__(self, v):
return self.id is v.id
|
def solution(people, limit):
# 가벼운 사람부터 무거운 사람 순으로 sort 진행
people.sort()
# light한 사람과 무거운 사람을 비교하는 방식으로 진행.
# 그러다가 둘이 더해서 100 이하가 되면 light에 1 더해주고 heavy는 1 빼주고, count에 1 더해주는 방식으로 진행
# else 문으로는 heavy만 빼준다.
# 이걸 계속 반복하다가 light가 heavy보다 커지는 경우 break
# 어차피 탈 수 있는 사람은 최대 2명이고 limit 제한이 있기 때문에
# 사람수에서 2명만 탄 경우를 빼주면 구명보트가 몇개 필요한 지 나오게 된다.
length = len(people)
light = 0
heavy = length - 1
count = 0
while (light < heavy):
if people[light] + people[heavy] <= limit:
light += 1
heavy -= 1
count += 1
else:
heavy -= 1
answer = length - count
return answer |
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def getIntersectionNode(self, A, B):
if not A or not B:
return None
# Concatenate A and B
last = A
while last.next:
last = last.next
last.next = B
# Find the start of the loop
fast = slow = A
while fast and fast.next:
slow, fast = slow.next, fast.next.next
if slow == fast:
fast = A
while fast != slow:
slow, fast = slow.next, fast.next
last.next = None
return slow
# No loop found
last.next = None
return None
def list_to_linked_list(array_list):
head_node = ListNode(array_list[0])
node = head_node
for i in range(1, len(array_list)):
node.next = ListNode(array_list[i])
node = node.next
return head_node
def traverse_linked_list(linked_list_head: ListNode):
while linked_list_head:
print(linked_list_head.val)
linked_list_head = linked_list_head.next
def create_circle_in_linked_list(linked_list_head: ListNode, index: int):
# traverse to tail
tail_node = linked_list_head
while tail_node.next:
tail_node = tail_node.next
# create circle at index
head_node = linked_list_head
for i in range(index):
head_node = head_node.next
tail_node.next = head_node
if __name__ == '__main__':
nums = [3, 2, 0, -4]
head = list_to_linked_list(nums)
create_circle_in_linked_list(head, 0)
# traverse_linked_list(head)
list_a = [4, 1, 8, 4, 5]
list_b = [2, 5, 6, 1, 8, 4, 5]
list_a = [1, 8, 4, 5]
list_b = [2, 3, 8, 4, 5]
s = Solution()
head_a = list_to_linked_list(list_a)
head_b = list_to_linked_list(list_b)
print(s.getIntersectionNode(head_a, head_b))
|
from selenium import webdriver
from bs4 import BeautifulSoup
import requests
def init_driver():
options = webdriver.ChromeOptions()
options.add_argument('--headless')
# options.add_argument('window-size=1200x600')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
browser = webdriver.Chrome(chrome_options=options)
return browser
# ---- Funciones utiles para el scrap y bus
def cortar_lista(lista):
if len(lista)<=5:
return lista
else:
return lista[:5]
# wd = init_driver()
def unique(list1):
unique_list = []
# traverse for all elements
for x in list1:
# check if exists in unique_list or not
if x not in unique_list:
unique_list.append(x)
return unique_list
def create_pipe_string(lista):
cadena=""
for x in lista:
cadena=cadena+x+"|"
cadena=cadena[:-1]
return cadena
def cambia_sopa_scrap(palabra): #palabra en ingl
wd = init_driver()
url = "https://translate.google.com.mx/?hl=es#view=home&op=translate&sl=en&tl=es&text="+palabra
wd.get(url)
soup = BeautifulSoup(wd.page_source,'html.parser')
return soup
#consigue otras traducciones y traduccion
def scrap_transalation(palabra):
soup= cambia_sopa_scrap(palabra)
# soup= cambia_sopa_scrap(palabra) #truquillo manioso
dicci={}
lista_otros_verbos=[]
try:
textos=soup.find_all('span', attrs={'class' : 'tlid-translation translation'})
translation=textos[1].text #contando con que habra solo femenino,masculino y masculino sera el segundo
except: #si no significa que no tiene genero
translation=soup.find("span", attrs={'class' : 'tlid-translation translation'}).text
cadena_otros_verbos=""
try:
body=soup.find("tbody") #cambia el body segun la sopa
contador=0
for td in body:
try:
tipo=td.find("span", attrs={'class' : 'gt-cd-pos'}).text
except:
pass
if tipo=='sustantivo' or 'abjetivo' and contador<10: #si es verbo que se guarde en una cadena,contador para que sean menos de 10
try:
otro_verbo=td.find("span", attrs={'class' : 'gt-baf-cell gt-baf-word-clickable'}).text
#cadena_otros_verbos=cadena_otros_verbos+ otro_verbo +"|"
lista_otros_verbos.append(otro_verbo)
contador=contador+1
except:
cadena_otros_verbo=""
#cadena_otros_verbos=cadena_otros_verbos[:-1] #quitamos el ultimo |
except:
print("no se encontro body")
lista_otros_verbos=unique(lista_otros_verbos)
cadena_otros_verbos=create_pipe_string(lista_otros_verbos)
dicci['other_translation']=cadena_otros_verbos
dicci['translation']=translation
return dicci
def get_word_from_api(palabra): #palabra en ingl
url = "https://api.dictionaryapi.dev/api/v2/entries/en/"+palabra
economia = requests.get(url)
lista = economia.json()# como ya es la api ya no es necesario hacer la sopa
return lista # regresa una lista de diccionarios
### Necesita una grán refatorización, luego me encargaré de reahaccer el método
def extract_data_from_word(lista):
dicci={}
lista_definiciones=[]
lista_ejemplos=[]
lista_sinonimos=[]
element = lista[0]
try:
dicci['word'] = element.get('word')
fonetica=element.get('phonetics')[0]
audio=fonetica['audio']
phonetic=fonetica['text']
dicci['phonetic']=phonetic
dicci['audio']=audio
except:
pass
try:
meanings= element.get('meanings')
for i in meanings:
partOfSpeech = i.get('partOfSpeech') #aqui primero encuentra noun y luego reasinga intransitive
if not 'verb' in partOfSpeech:
dicci['partOfSpeech'] = partOfSpeech
definitions = i.get('definitions')
for diccionario in definitions:
if 'definition' in diccionario:
lista_definiciones.append(diccionario.get('definition'))
if 'example' in diccionario:
lista_ejemplos.append(diccionario.get('example'))
if 'synonyms' in diccionario:
sinonimos=diccionario.get('synonyms')# solo conseguir 5 de cada lista sinonimos
lista_sinonimos=lista_sinonimos + cortar_lista(sinonimos)
except:
print("no se encontro meanings")
#recordar las listas
lista_definiciones = cortar_lista(unique(lista_definiciones))
lista_ejemplos = cortar_lista(unique(lista_ejemplos))
lista_sinonimos = unique(lista_sinonimos)
try: #intentar recordar la lista hasta 15
lista_sinonimos = lista_sinonimos[:15]
except: #suponemos que la lista es mas pequenia
pass
dicci['definition'] = create_pipe_string(lista_definiciones)
dicci['example'] = create_pipe_string(lista_ejemplos)
dicci['synonyms'] = create_pipe_string(lista_sinonimos)
return dicci |
import math
import pickle
from pympler import tracker
import numpy as np
import cv2
from visnav.algo import tools
from visnav.algo.base import AlgorithmBase
from visnav.algo.keypoint import KeypointAlgo
from visnav.algo.tools import PositioningException, Stopwatch
from visnav.iotools import lblloader
from visnav.missions.didymos import DidymosSystemModel
from visnav.missions.rosetta import RosettaSystemModel
from visnav.settings import *
from visnav.render.render import RenderEngine
class InvalidSceneException(Exception):
pass
class FeatureDatabaseGenerator(AlgorithmBase):
def __init__(self, system_model, render_engine, obj_idx, render_z=None):
super(FeatureDatabaseGenerator, self).__init__(system_model, render_engine, obj_idx)
self.render_z = render_z or -self.system_model.min_med_distance
self._ransac_err = KeypointAlgo.DEF_RANSAC_ERROR
self.MAX_FEATURES = 10000
# for overriding rendering
self._sc_ast_lat = None
self._sc_ast_lon = None
self._light_lat = None
self._light_lon = None
# so that no need to always pass these in function call
self._ref_img_sc = self._cam.width / system_model.view_width
self._fdb_sc_ast_perms = None
self._fdb_light_perms = None
def generate_fdb(self, feat, view_width=None, view_height=None, fdb_tol=KeypointAlgo.FDB_TOL,
maxmem=KeypointAlgo.FDB_MAX_MEM, save_progress=False):
save_file = self.fdb_fname(feat, fdb_tol, maxmem)
view_width = view_width or self.system_model.view_width
view_height = view_height or self.system_model.view_height
assert view_width == self.render_engine.width and view_height == self.render_engine.height,\
'wrong size render engine: (%d, %d)' % (self.render_engine.width, self.render_engine.height)
self._ref_img_sc = self._cam.width / view_width
self._ransac_err = KeypointAlgo.DEF_RANSAC_ERROR
self.set_mesh(*self.calc_mesh(fdb_tol))
n1 = len(self._fdb_sc_ast_perms)
n2 = len(self._fdb_light_perms)
print('%d x %d = %d, <%dMB' % (n1, n2, n1*n2, n1*n2*(maxmem/1024/1024)))
# initialize fdb array
# fdb = np.full((n1, n2), None).tolist()
# fdb = (desc, 2d, 3d, idxs)
dlen = KeypointAlgo.BYTES_PER_FEATURE[feat]
n3 = int(maxmem / (dlen + 3*4))
if save_progress and os.path.exists(save_file):
# load existing fdb
status, sc_ast_perms, light_perms, fdb = self.load_fdb(save_file)
assert len(sc_ast_perms) == n1, \
'Wrong number of s/c - asteroid relative orientation scenes: %d vs %d'%(len(sc_ast_perms), n1)
assert len(light_perms) == n2, \
'Wrong number of light direction scenes: %d vs %d' % (len(light_perms), n2)
assert fdb[0].shape == (n1, n2, n3, dlen), 'Wrong shape descriptor array: %s vs %s'%(fdb[0].shape, (n1, n2, n3, dlen))
assert fdb[1].shape == (n1, n2, n3, 2), 'Wrong shape 2d img coord array: %s vs %s'%(fdb[1].shape, (n1, n2, n3, 2))
assert fdb[2].shape == (n1, n2, n3, 3), 'Wrong shape 3d coord array: %s vs %s'%(fdb[2].shape, (n1, n2, n3, 3))
assert fdb[3].shape == (n1, n2, n3), 'Wrong shape matched features array: %s vs %s'%(fdb[3].shape, (n1, n2, n3))
assert fdb[4].shape == (n1, n2), 'Wrong shape feature count array: %s vs %s'%(fdb[4].shape, (n1, n2))
else:
# create new fdb
status = {'stage': 1, 'i1': -1, 'time': 0}
fdb = [
np.zeros((n1, n2, n3, dlen), dtype='uint8'), # descriptors
np.zeros((n1, n2, n3, 2), dtype='float32'), # 2d image coords
np.zeros((n1, n2, n3, 3), dtype='float32'), # 3d real coords
np.zeros((n1, n2, n3), dtype='bool'), # feature has matched other feature
np.zeros((n1, n2), dtype='uint16'), # number of features
]
timer = Stopwatch(elapsed=status['time'])
timer.start()
# first phase, just generate max amount of features per scene
print(''.join(['_']*n1), flush=True)
if status['stage'] == 1:
for i1, (sc_ast_lat, sc_ast_lon) in enumerate(self._fdb_sc_ast_perms):
print('.', flush=True, end="")
if i1 <= status['i1']:
continue
for i2, (light_lat, light_lon) in enumerate(self._fdb_light_perms):
# tr = tracker.SummaryTracker()
tmp = self.scene_features(feat, maxmem, i1, i2)
# tr.print_diff()
if tmp is not None:
nf = tmp[0].shape[0]
fdb[0][i1, i2, 0:nf, :] = tmp[0]
fdb[1][i1, i2, 0:nf, :] = tmp[1]
fdb[2][i1, i2, 0:nf, :] = tmp[2]
fdb[4][i1, i2] = nf
if save_progress and (i1+1) % 30 == 0:
status = {'stage': 1, 'i1': i1, 'time': timer.elapsed}
self.save_fdb(status, fdb, save_file)
print('\n', flush=True, end="")
status = {'stage': 2, 'i1': -1, 'time': timer.elapsed}
else:
self._latest_detector, nfeats = KeypointAlgo.get_detector(feat, 0)
print(''.join(['.'] * n1), flush=True)
if False:
status['stage'] = 2
status['i1'] = 0
# second phase, match with neighbours, record matching features
if True or status['stage'] == 2:
visited = set()
for i1 in range(n1):
print('.', flush=True, end="")
if i1 <= status['i1']:
continue
for i2 in range(n2):
self._update_matched_features(fdb, visited, fdb_tol, i1, i2)
if save_progress and (i1+1) % 30 == 0:
status = {'stage': 2, 'i1': i1, 'time': timer.elapsed}
self.save_fdb(status, fdb, save_file)
print('\n', flush=True, end="")
# fdb[1] = None
status = {'stage': 3, 'i1': 0, 'time': timer.elapsed}
else:
print(''.join(['.'] * n1), flush=True)
# third phase, discard features that didn't match with any neighbours
# for i1 in range(n1):
# print('.', flush=True, end="")
# for i2 in range(n2):
# tmp = fdb[][i1][i2]
# if tmp is not None:
# a, b, c, idxs = tmp
# fdb[i1][i2] = (a[tuple(idxs), :], c[tuple(idxs), :])
# #fdb[i1][i2] = list(zip(*[(a[i], b[i], c[i]) for i in idxs]))
# print('\n', flush=True, end="")
# finished, save, then exit
if status['stage'] == 3:
status = {'stage': 4, 'i1': 0, 'time': timer.elapsed}
self.save_fdb(status, fdb, save_file)
timer.stop()
secs = timer.elapsed
else:
secs = status['time']
print('Total time: %.1fh, per scene: %.3fs'%(secs/3600, secs/n1/n2))
return fdb
def set_mesh(self, fdb_sc_ast_perms, fdb_light_perms):
self._fdb_sc_ast_perms = fdb_sc_ast_perms
self._fdb_light_perms = fdb_light_perms
def calc_mesh(self, fdb_tol):
# s/c-asteroid relative orientation, camera axis rotation zero, in opengl coords
fdb_sc_ast_perms = np.array(tools.bf2_lat_lon(fdb_tol))
#, lat_range=(-fdb_tol, fdb_tol)))
# light direction in opengl coords
# z-axis towards cam, x-axis to the right => +90deg lat==sun ahead, -90deg sun behind
# 0 deg lon => sun on the left
fdb_light_perms = np.array(
tools.bf2_lat_lon(fdb_tol, lat_range=(-math.pi/2, math.radians(90 - self.system_model.min_elong)))
) #lat_range=(-fdb_tol, fdb_tol)))
return fdb_sc_ast_perms, fdb_light_perms
def scene_features(self, feat, maxmem, i1, i2):
try:
ref_img, depth = self.render_scene(i1, i2)
except InvalidSceneException:
return None
# get keypoints and descriptors
ref_kp, ref_desc, self._latest_detector = KeypointAlgo.detect_features(ref_img, feat, maxmem=maxmem,
max_feats=self.MAX_FEATURES, for_ref=True)
# save only 2d image coordinates, scrap scale, orientation etc
ref_kp_2d = np.array([p.pt for p in ref_kp], dtype='float32')
# get 3d coordinates
ref_kp_3d = KeypointAlgo.inverse_project(self.system_model, ref_kp_2d, depth, self.render_z, self._ref_img_sc)
if False:
mm_dist = self.system_model.min_med_distance
if False:
pos = (0, 0, -mm_dist)
qfin = tools.ypr_to_q(sc_ast_lat, 0, sc_ast_lon)
light_v = tools.spherical2cartesian(light_lat, light_lon, 1)
reimg = self.render_engine.render(self.obj_idx, pos, qfin, light_v)
img = np.concatenate((cv2.resize(ref_img, (self.system_model.view_width, self.system_model.view_height)), reimg), axis=1)
else:
ref_kp = [cv2.KeyPoint(*self._cam.calc_img_xy(x, -y, -z-mm_dist), 1) for x, y, z in ref_kp_3d]
img = cv2.drawKeypoints(ref_img, ref_kp, ref_img.copy(), (0, 0, 255), flags=cv2.DRAW_MATCHES_FLAGS_DEFAULT)
cv2.imshow('res', img)
cv2.waitKey()
return np.array(ref_desc), ref_kp_2d, ref_kp_3d
def render_scene(self, i1, i2, get_depth=True):
self._sc_ast_lat, self._sc_ast_lon = self._fdb_sc_ast_perms[i1]
self._light_lat, self._light_lon = self._fdb_light_perms[i2]
depth = None
img = self.render(depth=get_depth, shadows=True)
if get_depth:
img, depth = img
# scale to match scene image asteroid extent in pixels
img = cv2.resize(img, None, fx=self._ref_img_sc, fy=self._ref_img_sc, interpolation=cv2.INTER_CUBIC)
return img, depth
def _render_params(self, discretize_tol=False, center_model=False):
# called at self.render, override based on hidden field values
#qfin = tools.fdb_relrot_to_render_q(self._sc_ast_lat, self._sc_ast_lon)
qfin = tools.ypr_to_q(self._sc_ast_lat, 0, self._sc_ast_lon)
#light_v = tools.fdb_light_to_render_light(self._light_lat, self._light_lon)
light_v = tools.spherical2cartesian(self._light_lat, self._light_lon, 1)
# if qfin & light_v not reasonable, e.g. because solar elong < 45 deg:
# seems that not needed, left here in case later notice that useful
# raise InvalidSceneException()
return (0, 0, self.render_z), qfin, light_v
def get_neighbours(self, fdb_tol, i1, i2):
coef = math.sqrt(2.5) # math.sqrt(math.sqrt(2)**2 + (math.sqrt(2)/2)**2)
nearest1 = tools.find_nearest_n(self._fdb_sc_ast_perms, self._fdb_sc_ast_perms[i1], r=fdb_tol*coef, fun=tools.wrap_rads)
nearest2 = tools.find_nearest_n(self._fdb_light_perms, self._fdb_light_perms[i2], r=fdb_tol*coef, fun=tools.wrap_rads)
neighbours = {(i1, i2, n1, n2) for n1 in nearest1 for n2 in nearest2} - {(i1, i2, i1, i2)}
return neighbours
def _update_matched_features(self, fdb, visited, fdb_tol, i1, i2):
if fdb[4][i1, i2] == 0:
return
visit = self.get_neighbours(fdb_tol, i1, i2) - visited
for i1, i2, j1, j2 in visit:
self._update_matched_features_inner(fdb, (i1, i2), (j1, j2))
visited.add((i1, i2, j1, j2))
visited.add((j1, j2, i1, i2))
def _update_matched_features_inner(self, fdb, idxs1, idxs2):
nf1 = fdb[4][idxs1[0], idxs1[1]]
nf2 = fdb[4][idxs2[0], idxs2[1]]
if idxs1 == idxs2 or nf1 == 0 or nf2 == 0:
return
sc1_desc = fdb[0][idxs1[0], idxs1[1], 0:nf1, :].reshape((nf1, fdb[0].shape[3]))
sc1_kp_2d = fdb[1][idxs1[0], idxs1[1], 0:nf1, :].reshape((nf1, fdb[1].shape[3]))
sc2_desc = fdb[0][idxs2[0], idxs2[1], 0:nf2, :].reshape((nf2, fdb[0].shape[3]))
sc2_kp_3d = fdb[2][idxs2[0], idxs2[1], 0:nf2, :].reshape((nf2, fdb[2].shape[3]))
try:
matches = KeypointAlgo.match_features(sc1_desc, sc2_desc, self._latest_detector.defaultNorm(), method='brute')
# solve pnp with ransac
ref_kp_3d = sc2_kp_3d[[m.trainIdx for m in matches], :]
sce_kp_2d = sc1_kp_2d[[m.queryIdx for m in matches], :]
rvec, tvec, inliers = KeypointAlgo.solve_pnp_ransac(self.system_model, sce_kp_2d, ref_kp_3d, self._ransac_err)
# check if solution ok
ok, err1, err2 = self.calc_err(rvec, tvec, idxs1[0], idxs2[0], warn=len(inliers) > 30)
if not ok:
raise PositioningException()
fdb[3][idxs1[0], idxs1[1], [matches[i[0]].queryIdx for i in inliers]] = True
fdb[3][idxs2[0], idxs2[1], [matches[i[0]].trainIdx for i in inliers]] = True
except PositioningException as e:
# assert inliers is not None, 'at (%s, %s): ransac failed'%(idxs1, idxs2)
pass
def calc_err(self, rvec, tvec, i1, j1, warn=False):
q_res = tools.angleaxis_to_q(rvec)
lat1, roll1 = self._fdb_sc_ast_perms[i1]
lat2, roll2 = self._fdb_sc_ast_perms[j1]
q_src = tools.ypr_to_q(lat1, 0, roll1)
q_trg = tools.ypr_to_q(lat2, 0, roll2)
q_rel = q_trg * q_src.conj()
# q_res.x = -q_res.x
# np.quaternion(0.707106781186547, 0, -0.707106781186547, 0)
m = self.system_model
q_frame = m.frm_conv_q(m.OPENGL_FRAME, m.OPENCV_FRAME)
q_res = q_frame * q_res.conj() * q_frame.conj()
err1 = math.degrees(tools.wrap_rads(tools.angle_between_q(q_res, q_rel)))
err2 = np.linalg.norm(tvec - np.array((0, 0, -self.render_z)).reshape((3, 1)))
ok = not (abs(err1) > 10 or abs(err2) > 0.10 * abs(self.render_z))
if not ok and warn:
print('at (%s, %s), err1: %.1fdeg, err2: %.1fkm\n\tq_real: %s\n\tq_est: %s' % (
i1, j1, err1, err2, q_rel, q_res))
return ok, err1, err2
def closest_scene(self, sc_ast_q=None, light_v=None):
""" in opengl frame """
if sc_ast_q is None:
sc_ast_q, _ = self.system_model.gl_sc_asteroid_rel_q()
if light_v is None:
light_v, _ = self.system_model.gl_light_rel_dir()
d_sc_ast_q, i1 = tools.discretize_q(sc_ast_q, points=self._fdb_sc_ast_perms)
err_q = sc_ast_q * d_sc_ast_q.conj()
c_light_v = tools.q_times_v(err_q.conj(), light_v)
d_light_v, i2 = tools.discretize_v(c_light_v, points=self._fdb_light_perms)
err_angle = tools.angle_between_v(light_v, d_light_v)
return i1, i2, d_sc_ast_q, d_light_v, err_q, err_angle
@staticmethod
def calculate_fdb_stats(fdb, feat):
fcounts = np.sum(fdb[3], axis=2).flatten()
totmem = 1.0 * np.sum(fcounts) * (KeypointAlgo.BYTES_PER_FEATURE[feat] + 3 * 4)
n_mean = np.mean(fcounts)
fails = np.sum(fcounts == 0)
stats = {
'min_feat_count': np.min(fcounts),
'avg_feat_count': n_mean,
'scene_count': len(fcounts),
'failed_scenes': fails,
'weak_scenes': np.sum(fcounts < 100) - fails,
'total_mem_usage (MB)': totmem/1024/1024,
'accepted_feature_percent': 100*(n_mean/fdb[3].shape[2]),
}
return stats
def fdb_fname(self, feat, fdb_tol=KeypointAlgo.FDB_TOL, maxmem=KeypointAlgo.FDB_MAX_MEM):
return os.path.join(CACHE_DIR, self.system_model.mission_id, 'fdb_%s_w%d_m%d_t%d.pickle' % (
feat,
self.system_model.view_width,
maxmem/1024,
10*math.degrees(fdb_tol)
))
def load_fdb(self, fname):
with open(fname, 'rb') as fh:
tmp = pickle.load(fh)
if len(tmp) == 3:
# backwards compatibility
fdb_sc_ast_perms = np.array(tools.bf2_lat_lon(KeypointAlgo.FDB_TOL))
fdb_light_perms = np.array(tools.bf2_lat_lon(KeypointAlgo.FDB_TOL,
lat_range=(-math.pi / 2, math.radians(90 - self.system_model.min_elong))))
n1, n2 = len(fdb_sc_ast_perms), len(fdb_light_perms)
status, scenes, fdb = tmp
assert len(scenes) == n1 * n2, \
'Wrong amount of scenes in loaded fdb: %d vs %d' % (len(scenes), n1 * n2)
else:
status, fdb_sc_ast_perms, fdb_light_perms, fdb = tmp
# assert status['stage'] >= 3, 'Incomplete FDB status: %s' % (status,)
return status, fdb_sc_ast_perms, fdb_light_perms, fdb
def save_fdb(self, status, fdb, save_file):
with open(save_file+'.tmp', 'wb') as fh:
pickle.dump((status, self._fdb_sc_ast_perms, self._fdb_light_perms, fdb), fh, -1)
if os.path.exists(save_file):
os.remove(save_file)
os.rename(save_file+'.tmp', save_file)
def estimate_mem_usage(self, fdb_tol_deg, sc_mem_kb, acc_ratio=0.5):
fdb_tol = math.radians(fdb_tol_deg)
n1 = len(tools.bf2_lat_lon(fdb_tol))
n2 = len(tools.bf2_lat_lon(fdb_tol, lat_range=(-math.pi/2, math.radians(90 - self.system_model.min_elong))))
print('%d x %d = %d, <%dMB, ~%dMB'%(n1, n2, n1*n2, n1*n2*(sc_mem_kb/1024), n1*n2*(sc_mem_kb/1024)*acc_ratio))
if __name__ == '__main__':
# Didw - ORB:
# * 10 deg, 128kb
# * 12 deg, 512kb
# Didw - AKAZE:
# * 12 deg, 512kb, 0.133, 1186MB
# Didy - ORB:
# * 10 deg, 128kb, 0.415, 1984MB
# * 12 deg, 512kb, ?
# Didy - AKAZE:
# * 10 deg, 128kb,
# * 12 deg, 512kb,
# Rose - ORB:
# * 10 deg, 128kb, 0.469, 2246MB
# * 11 deg, 256kb,
# * 12 deg, 512kb, 0.267, 2393MB
# Rose - AKAZE:
# * 10 deg, 128kb, 0.558, 2670MB
# * 11 deg, 256kb, 0,291, 1948MB
# * 12 deg, 512kb, 0.131, 1177MB
# Rose - SIFT:
# * 10 deg, 128kb, 0,441, 2111MB
# * 11 deg, 256kb,
# * 12 deg, 512kb, 0.217, 1945MB
# Rose - SURF:
# \* 10 deg, 128kb,
# \* 12 deg, 512kb,
sm = RosettaSystemModel(hi_res_shape_model=True) # rose
# sm = DidymosSystemModel(hi_res_shape_model=True, use_narrow_cam=True) # didy
# sm = DidymosSystemModel(hi_res_shape_model=True, use_narrow_cam=False) # didw
# sm.view_width = sm.cam.width
sm.view_width = 512
feat = KeypointAlgo.ORB
fdb_tol = math.radians(11)
maxmem = 256 * 1024
re = RenderEngine(sm.view_width, sm.view_height, antialias_samples=0)
obj_idx = re.load_object(sm.asteroid.real_shape_model, smooth=sm.asteroid.render_smooth_faces)
fdbgen = FeatureDatabaseGenerator(sm, re, obj_idx)
if True:
fdb = fdbgen.generate_fdb(feat, fdb_tol=fdb_tol, maxmem=maxmem, save_progress=True)
else:
fname = fdbgen.fdb_fname(feat, fdb_tol, maxmem)
status, sc_ast_perms, light_perms, fdb = fdbgen.load_fdb(fname)
print('status: %s' % (status,))
#fdbgen.estimate_mem_usage(12, 512, 0.25)
#quit()
stats = FeatureDatabaseGenerator.calculate_fdb_stats(fdb, feat)
print('FDB stats:\n'+str(stats))
# print('Total time: %.1fh, per scene: %.3fs' % (status['time'] / 3600, status['time'] / len(scenes)))
fdb = None
# feat = KeypointAlgo.ORB
# fdb_tol = math.radians(12)
# maxmem = 384 * 1024
# fname = os.path.join(CACHE_DIR, sm.mission_id, 'fdb_%s_w%d_m%d_t%d.pickle' % (
# feat,
# sm.view_width,
# maxmem / 1024,
# 10 * math.degrees(fdb_tol)
# ))
# scenes, fdb = fdbgen.generate_fdb(feat, fname, fdb_tol=fdb_tol, maxmem=maxmem)
# stats = fdbgen.calculate_fdb_stats(scenes, fdb, feat)
# print('fdb stats:\n'+str(stats))
|
import tensorflow as tf
import sys
import hyperparams as hyp
def read_and_decode(filename_queue):
compress = tf.python_io.TFRecordOptions(
compression_type=tf.python_io.TFRecordCompressionType.GZIP)
reader = tf.TFRecordReader(options=compress)
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example,
features={
'gene_raw': tf.FixedLenFeature([], tf.string),
'cat_id': tf.FixedLenFeature([], tf.int64),
})
gene = tf.decode_raw(features['gene_raw'], tf.float32)
cat = tf.cast(features['cat_id'], tf.int64)
gene = tf.reshape(gene, [hyp.N, 1])
cat = tf.reshape(cat, [])
return (gene,cat)
|
import pandas as pd
import matplotlib.pyplot as plt
def read_vegetable_data(filename):
df = pd.DataFrame()
# Please, introduce your answer here
return df
def generate_plot_1(df):
# Code to generate Plot 1 (show or save to file)
# Please, introduce your answer here
print('Plot 1 not completed yet.') # Remove this line when completed.
def generate_plot_2(df, vegetable):
# Code to generate Plot 2 (show or save to file)
# Please, introduce your answer here
print('Plot 2 not completed yet.') # Remove this line when completed.
def find_cheapest_month(df, vegetable, organic):
cheapest_month = 'Not completed yet' # Change/remove this line
# Please, introduce your answer here
return cheapest_month
if __name__ == '__main__':
''' You might modify the values of
these variables to try different aspects
of your code. You might also want to try
different files that have the same format
but different numbers for the prices.
Running this file should produce all the
outputs you need, two plots and the desired value'''
filename = 'data/Vegetables.csv' # You can change this value for testing
# Parameters for plot 2
vegetable_for_plot_2 = 'Lettuce' # You can change this value for testing
# Parameters for finding the cheapest month:
vegetable_for_cheapest_month = 'Carrots' # You can change this value for testing
organic_for_cheapest_month = False # You can change this value for testing
# The code below is for your reference, so you can visualise your answer
df = read_vegetable_data(filename)
# Generate plots:
generate_plot_1(df)
generate_plot_2(df, vegetable_for_plot_2)
# Cheapest month info:
cheapest_month = find_cheapest_month(df,
vegetable_for_cheapest_month,
organic_for_cheapest_month)
print('For', vegetable_for_cheapest_month,
'(organic =', organic_for_cheapest_month,
') the cheapest month was:', cheapest_month)
|
import tushare as ts
import pandas as pd
from scipy import stats
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
year = 2019
quarter = 3
basics_df = ts.get_stock_basics("2019-09-30")
profit_df = ts.get_profit_data(year, quarter)
growth_df = ts.get_growth_data(year, quarter)
debtpaying_df = ts.get_debtpaying_data(year, quarter)
cashflow_df = ts.get_cashflow_data(year, quarter)
df1 = pd.merge(basics_df, profit_df, on=['code']) # 并表
df2 = pd.merge(df1, growth_df, on=['code'])
df3 = pd.merge(df2, debtpaying_df, on=['code'])
df = pd.merge(df3, cashflow_df, on=['code'])
df = df.drop_duplicates(subset=['code'], keep='first') # 删除code的重复行
df = df.dropna(axis=0, how='any') # 删除空值行
df = df.loc[:, ~df.columns.duplicated()] # 删除重复列
col_a1 = df['business_income']
col_a2 = df['totalAssets']
col_a3 = df['net_profits']
col_a4 = df['sheqratio']
roa = col_a1 / col_a2
tassrat = col_a3 / col_a2
dbastrt = 1 - col_a4
df.insert(5, 'roa', roa) # roa资产回报率
df.insert(6, 'tassrat', tassrat) # tassrat总资产周转率
df.insert(7, 'dbastrt', dbastrt) # dbastrt资产负债率
col = ['code', 'name', 'industry', 'roe', 'roa', 'tassrat', 'mbrg', 'targ',
'currentratio', 'dbastrt', 'cf_sales']
df = pd.DataFrame(df, columns=col)
df = df[~df.industry.str.contains('金融行业')] # 数据预处理 删除异常值
df = df[~df.name.str.contains('ST', 'PT')]
df = df[df['mbrg'] > 1.5]
df = df[df['dbastrt'] < 1]
winsorize_percent = 0.01 # 尾数处理
df['roe'] = stats.mstats.winsorize(df['roe'], (winsorize_percent, winsorize_percent))
df['roa'] = stats.mstats.winsorize(df['roa'], (winsorize_percent, winsorize_percent))
df['tassrat'] = stats.mstats.winsorize(df['tassrat'], (winsorize_percent, winsorize_percent))
df['mbrg'] = stats.mstats.winsorize(df['mbrg'], (winsorize_percent, winsorize_percent))
df['targ'] = stats.mstats.winsorize(df['targ'], (winsorize_percent, winsorize_percent))
df['currentratio'] = pd.to_numeric(df['currentratio'])
df['currentratio'] = stats.mstats.winsorize(df['currentratio'], (winsorize_percent, winsorize_percent))
df['dbastrt'] = stats.mstats.winsorize(df['dbastrt'], (winsorize_percent, winsorize_percent))
df['cf_sales'] = stats.mstats.winsorize(df['cf_sales'], (winsorize_percent, winsorize_percent))
print('------------------------------统计信息--------------------------------')
print(df.describe().T) # 查看变量df中各个字段的计数、平均值、标准差、最小值、下四分位数、中位数、上四分位、最大值
print(df.drop(['code', 'name', 'industry'], axis=1).corr()) # 相关系数
print('------------------------------统计信息--------------------------------')
y = df[['roe', 'roa']]
df = df.drop(['code', 'name', 'industry', 'roe', 'roa'], axis=1)
# df = (df-df.min())/(df.max()-df.min())
# print(df)
X_train, X_test, y_train, y_test = train_test_split(df.values, y, test_size=0.1, random_state=0)
# print(X_train, X_test, y_train, y_test)
lr = LinearRegression()
lr.fit(X_train, y_train)
print('------------------------------训练、测试结果--------------------------------')
print(lr.coef_)
print(lr.intercept_)
print(lr.score(X_train, y_train))
print(lr.score(X_test, y_test))
print('------------------------------训练、测试结果--------------------------------')
|
"""
A second, custom AdminSite -- see tests.CustomAdminSiteTests.
"""
from __future__ import absolute_import
from django.conf.urls import patterns
from django.contrib import admin
from django.http import HttpResponse
from . import models, forms, admin as base_admin
class Admin2(admin.AdminSite):
login_form = forms.CustomAdminAuthenticationForm
login_template = 'custom_admin/login.html'
logout_template = 'custom_admin/logout.html'
index_template = 'custom_admin/index.html'
password_change_template = 'custom_admin/password_change_form.html'
password_change_done_template = 'custom_admin/password_change_done.html'
# A custom index view.
def index(self, request, extra_context=None):
return super(Admin2, self).index(request, {'foo': '*bar*'})
def get_urls(self):
return patterns('',
(r'^my_view/$', self.admin_view(self.my_view)),
) + super(Admin2, self).get_urls()
def my_view(self, request):
return HttpResponse("Django is a magical pony!")
site = Admin2(name="admin2")
site.register(models.Article, base_admin.ArticleAdmin)
site.register(models.Section, inlines=[base_admin.ArticleInline])
site.register(models.Thing, base_admin.ThingAdmin)
site.register(models.Fabric, base_admin.FabricAdmin)
site.register(models.ChapterXtra1, base_admin.ChapterXtra1Admin)
|
from tools.color_utils import ColorUtils
# 方形大小
class Rect:
width: int
height: int
# 特征 所有点加起来的值
feature: int
# 特征点1 坐标0,0的颜色
feature_1: int
# 特征点2 坐标中心点的颜色
offset_x2: int
offset_y2: int
feature_2: int
# 特征点3 坐标最右下角点的颜色
# 3个特征点都符合的情况下再计算总值
feature_3: int
def __init__(self, width, height, feature):
self.width = width
self.height = height
self.feature = feature
# 方形范围
class RectRange:
start_x: int
start_y: int
end_x: int
end_y: int
def __init__(self, start_x, start_y, end_x, end_y):
self.start_x = start_x
self.start_y = start_y
self.end_x = end_x
self.end_y = end_y
class RectDetect:
def __init__(self):
return
def detect(self, img, rect: Rect, detect_range: RectRange):
start_x: int
start_y: int
end_x: int
end_y: int
if detect_range is not None:
start_x = detect_range.start_x
start_y = detect_range.start_y
end_x = detect_range.end_x
end_y = detect_range.end_y
else:
start_x = 0
start_y = 0
end_x = img.width()
end_y = img.height()
for y in range(start_y, end_y):
for x in range(start_x, end_x):
if img.pixel(x, y) == rect.feature_1 \
and img.pixel(x + rect.offset_x2, y + rect.offset_y2) == rect.feature_2 \
and img.pixel(x + rect.width, y + rect.height) == rect.feature_3 \
and self.calculate_color(img, x, y, rect) == rect.feature:
return x, y
print("找完了")
# 相加区域所有颜色
def calculate_color(self, img, start_x, start_y, rect: Rect):
color_all: int = 0
for x in range(start_x, start_x + rect.width):
for y in range(start_y, start_y + rect.height):
color_all = color_all + img.pixel(x, y)
return color_all
|
# -*- coding: utf-8 -*-
"""
1.3 URLify: Write a method to replace all spaces in a string with '%20: You may assume that the string
has sufficient space at the end to hold the additional characters, and that you are given the "true"
length of the string. (Note: If implementing in Java, please use a character array so that you can
perform this operation in place.)
EXAMPLE
Input: "Mr John Smith "J 13
Output: "Mr%20J ohn%20Smith"""
s = "Mr John Smith "
s = s.strip()
s = s.replace(' ', "%20")
print(s)
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2018 Kyoto University (Hirofumi Inaguma)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Base class for loading dataset for the CTC and attention-based model.
In this class, all data will be loaded at each step.
You can use the multi-GPU version.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import kaldiio
import numpy as np
import os
import pandas as pd
from neural_sp.datasets.loader_base import Base
from neural_sp.datasets.token_converter.character import Char2idx
from neural_sp.datasets.token_converter.character import Idx2char
from neural_sp.datasets.token_converter.phone import Idx2phone
from neural_sp.datasets.token_converter.phone import Phone2idx
from neural_sp.datasets.token_converter.word import Idx2word
from neural_sp.datasets.token_converter.word import Word2idx
from neural_sp.datasets.token_converter.wordpiece import Idx2wp
from neural_sp.datasets.token_converter.wordpiece import Wp2idx
np.random.seed(1)
class Dataset(Base):
def __init__(self, tsv_path, dict_path,
unit, batch_size, nlsyms=False, n_epochs=None,
is_test=False, min_n_frames=40, max_n_frames=2000,
shuffle=False, sort_by_input_length=False,
short2long=False, sort_stop_epoch=None,
n_ques=None, dynamic_batching=False,
ctc=False, subsample_factor=1,
wp_model=False, corpus='',
tsv_path_sub1=False, dict_path_sub1=False, unit_sub1=False,
wp_model_sub1=False,
ctc_sub1=False, subsample_factor_sub1=1,
wp_model_sub2=False,
tsv_path_sub2=False, dict_path_sub2=False, unit_sub2=False,
ctc_sub2=False, subsample_factor_sub2=1,
discourse_aware=False, skip_thought=False):
"""A class for loading dataset.
Args:
tsv_path (str): path to the dataset tsv file
dict_path (str): path to the dictionary
unit (str): word or wp or char or phone or word_char
batch_size (int): size of mini-batch
nlsyms (str): path to the non-linguistic symbols file
n_epochs (int): max epoch. None means infinite loop.
is_test (bool):
min_n_frames (int): exclude utterances shorter than this value
max_n_frames (int): exclude utterances longer than this value
shuffle (bool): shuffle utterances.
This is disabled when sort_by_input_length is True.
sort_by_input_length (bool): sort all utterances in the ascending order
short2long (bool): sort utterances in the descending order
sort_stop_epoch (int): After sort_stop_epoch, training will revert
back to a random order
n_ques (int): number of elements to enqueue
dynamic_batching (bool): change batch size dynamically in training
ctc (bool):
subsample_factor (int):
wp_model (): path to the word-piece model for sentencepiece
corpus (str): name of corpus
discourse_aware (bool):
skip_thought (bool):
"""
super(Dataset, self).__init__()
self.set = os.path.basename(tsv_path).split('.')[0]
self.is_test = is_test
self.unit = unit
self.unit_sub1 = unit_sub1
self.batch_size = batch_size
self.max_epoch = n_epochs
self.shuffle = shuffle
self.sort_stop_epoch = sort_stop_epoch
self.sort_by_input_length = sort_by_input_length
self.n_ques = n_ques
self.dynamic_batching = dynamic_batching
self.corpus = corpus
self.discourse_aware = discourse_aware
self.skip_thought = skip_thought
self.vocab = self.count_vocab_size(dict_path)
self.eos = 2
self.pad = 3
# NOTE: reserved in advance
self.idx2token = []
self.token2idx = []
# Set index converter
if unit in ['word', 'word_char']:
self.idx2token += [Idx2word(dict_path)]
self.token2idx += [Word2idx(dict_path, word_char_mix=(unit == 'word_char'))]
elif unit == 'wp':
self.idx2token += [Idx2wp(dict_path, wp_model)]
self.token2idx += [Wp2idx(dict_path, wp_model)]
elif unit == 'char':
self.idx2token += [Idx2char(dict_path)]
self.token2idx += [Char2idx(dict_path, nlsyms=nlsyms)]
elif 'phone' in unit:
self.idx2token += [Idx2phone(dict_path)]
self.token2idx += [Phone2idx(dict_path)]
else:
raise ValueError(unit)
for i in range(1, 3):
dict_path_sub = locals()['dict_path_sub' + str(i)]
wp_model_sub = locals()['wp_model_sub' + str(i)]
unit_sub = locals()['unit_sub' + str(i)]
if dict_path_sub:
setattr(self, 'vocab_sub' + str(i), self.count_vocab_size(dict_path_sub))
# Set index converter
if unit_sub:
if unit_sub == 'wp':
self.idx2token += [Idx2wp(dict_path_sub, wp_model_sub)]
self.token2idx += [Wp2idx(dict_path_sub, wp_model_sub)]
elif unit_sub == 'char':
self.idx2token += [Idx2char(dict_path_sub)]
self.token2idx += [Char2idx(dict_path_sub, nlsyms=nlsyms)]
elif 'phone' in unit_sub:
self.idx2token += [Idx2phone(dict_path_sub)]
self.token2idx += [Phone2idx(dict_path_sub)]
else:
raise ValueError(unit_sub)
else:
setattr(self, 'vocab_sub' + str(i), -1)
# Load dataset tsv file
self.df = pd.read_csv(tsv_path, encoding='utf-8', delimiter='\t')
self.df = self.df.loc[:, ['utt_id', 'speaker', 'feat_path',
'xlen', 'xdim', 'text', 'token_id', 'ylen', 'ydim']]
for i in range(1, 3):
if locals()['tsv_path_sub' + str(i)]:
df_sub = pd.read_csv(locals()['tsv_path_sub' + str(i)], encoding='utf-8', delimiter='\t')
df_sub = df_sub.loc[:, ['utt_id', 'speaker', 'feat_path',
'xlen', 'xdim', 'text', 'token_id', 'ylen', 'ydim']]
setattr(self, 'df_sub' + str(i), df_sub)
else:
setattr(self, 'df_sub' + str(i), None)
self.input_dim = kaldiio.load_mat(self.df['feat_path'][0]).shape[-1]
if corpus == 'swbd':
self.df['session'] = self.df['speaker'].apply(lambda x: str(x).split('-')[0])
else:
self.df['session'] = self.df['speaker'].apply(lambda x: str(x))
if discourse_aware or skip_thought:
max_n_frames = 10000
min_n_frames = 100
# Sort by onset
self.df = self.df.assign(prev_utt='')
if corpus == 'swbd':
self.df['onset'] = self.df['utt_id'].apply(lambda x: int(x.split('_')[-1].split('-')[0]))
elif corpus == 'csj':
self.df['onset'] = self.df['utt_id'].apply(lambda x: int(x.split('_')[1]))
elif corpus == 'wsj':
self.df['onset'] = self.df['utt_id'].apply(lambda x: x)
else:
raise NotImplementedError
self.df = self.df.sort_values(by=['session', 'onset'], ascending=True)
# Extract previous utterances
if not skip_thought:
# self.df = self.df.assign(line_no=list(range(len(self.df))))
groups = self.df.groupby('session').groups
self.df['n_session_utt'] = self.df.apply(
lambda x: len([i for i in groups[x['session']]]), axis=1)
# self.df['prev_utt'] = self.df.apply(
# lambda x: [self.df.loc[i, 'line_no']
# for i in groups[x['session']] if self.df.loc[i, 'onset'] < x['onset']], axis=1)
# self.df['n_prev_utt'] = self.df.apply(lambda x: len(x['prev_utt']), axis=1)
elif is_test and corpus == 'swbd':
# Sort by onset
self.df['onset'] = self.df['utt_id'].apply(lambda x: int(x.split('_')[-1].split('-')[0]))
self.df = self.df.sort_values(by=['session', 'onset'], ascending=True)
# Remove inappropriate utterances
if is_test:
print('Original utterance num: %d' % len(self.df))
n_utts = len(self.df)
self.df = self.df[self.df.apply(lambda x: x['ylen'] > 0, axis=1)]
print('Removed %d empty utterances' % (n_utts - len(self.df)))
else:
print('Original utterance num: %d' % len(self.df))
n_utts = len(self.df)
self.df = self.df[self.df.apply(lambda x: min_n_frames <= x[
'xlen'] <= max_n_frames, axis=1)]
self.df = self.df[self.df.apply(lambda x: x['ylen'] > 0, axis=1)]
print('Removed %d utterances (threshold)' % (n_utts - len(self.df)))
if ctc and subsample_factor > 1:
n_utts = len(self.df)
self.df = self.df[self.df.apply(lambda x: x['ylen'] <= (x['xlen'] // subsample_factor), axis=1)]
print('Removed %d utterances (for CTC)' % (n_utts - len(self.df)))
for i in range(1, 3):
df_sub = getattr(self, 'df_sub' + str(i))
ctc_sub = locals()['ctc_sub' + str(i)]
subsample_factor_sub = locals()['subsample_factor_sub' + str(i)]
if df_sub is not None:
if ctc_sub and subsample_factor_sub > 1:
df_sub = df_sub[df_sub.apply(
lambda x: x['ylen'] <= (x['xlen'] // subsample_factor_sub), axis=1)]
if len(self.df) != len(df_sub):
n_utts = len(self.df)
self.df = self.df.drop(self.df.index.difference(df_sub.index))
print('Removed %d utterances (for CTC, sub%d)' % (n_utts - len(self.df), i))
for j in range(1, i + 1):
setattr(self, 'df_sub' + str(j),
getattr(self, 'df_sub' + str(j)).drop(getattr(self, 'df_sub' + str(j)).index.difference(self.df.index)))
# Sort tsv records
if not is_test:
if discourse_aware:
self.utt_offset = 0
self.n_utt_session_dict = {}
self.session_offset_dict = {}
for session_id, ids in sorted(self.df.groupby('session').groups.items(), key=lambda x: len(x[1])):
n_utt = len(ids)
# key: n_utt, value: session_id
if n_utt not in self.n_utt_session_dict.keys():
self.n_utt_session_dict[n_utt] = []
self.n_utt_session_dict[n_utt].append(session_id)
# key: session_id, value: id for the first utterance in each session
self.session_offset_dict[session_id] = ids[0]
self.n_utt_session_dict_epoch = copy.deepcopy(self.n_utt_session_dict)
# if discourse_aware == 'state_carry_over':
# self.df = self.df.sort_values(by=['n_session_utt', 'utt_id'], ascending=short2long)
# else:
# self.df = self.df.sort_values(by=['n_prev_utt'], ascending=short2long)
elif sort_by_input_length:
self.df = self.df.sort_values(by=['xlen'], ascending=short2long)
elif shuffle:
self.df = self.df.reindex(np.random.permutation(self.df.index))
self.rest = set(list(self.df.index))
def make_batch(self, df_indices):
"""Create mini-batch per step.
Args:
df_indices (np.ndarray):
Returns:
batch (dict):
xs (list): input data of size `[T, input_dim]`
xlens (list): lengths of each element in xs
ys (list): reference labels in the main task of size `[L]`
ys_sub1 (list): reference labels in the 1st auxiliary task of size `[L_sub1]`
ys_sub2 (list): reference labels in the 2nd auxiliary task of size `[L_sub2]`
utt_ids (list): name of each utterance
speakers (list): name of each speaker
sessions (list): name of each session
"""
# inputs
if self.skip_thought:
xs = []
else:
xs = [kaldiio.load_mat(self.df['feat_path'][i]) for i in df_indices]
# outputs
if self.is_test:
ys = [self.token2idx[0](self.df['text'][i]) for i in df_indices]
else:
ys = [list(map(int, str(self.df['token_id'][i]).split())) for i in df_indices]
ys_hist = [[] for _ in range(len(df_indices))]
if self.discourse_aware:
for j, i in enumerate(df_indices):
for idx in self.df['prev_utt'][i]:
ys_hist[j].append(list(map(int, str(self.df['token_id'][idx]).split())))
ys_prev, ys_next = [], []
text_prev, text_next = [], []
if self.skip_thought:
for i in df_indices:
if i - 1 in self.df.index and self.df['session'][i - 1] == self.df['session'][i]:
ys_prev += [list(map(int, str(self.df['token_id'][i - 1]).split()))]
text_prev += [self.df['text'][i - 1]]
else:
ys_prev += [[]]
text_prev += [''] # first utterance
if i + 1 in self.df.index and self.df['session'][i + 1] == self.df['session'][i]:
ys_next += [list(map(int, str(self.df['token_id'][i + 1]).split()))]
text_next += [self.df['text'][i + 1]]
else:
ys_next += [[]] # last utterance
text_next += ['']
ys_sub1 = []
if self.df_sub1 is not None:
ys_sub1 = [list(map(int, str(self.df_sub1['token_id'][i]).split())) for i in df_indices]
elif self.vocab_sub1 > 0 and not self.is_test:
ys_sub1 = [self.token2idx[1](self.df['text'][i]) for i in df_indices]
ys_sub2 = []
if self.df_sub2 is not None:
ys_sub2 = [list(map(int, str(self.df_sub2['token_id'][i]).split())) for i in df_indices]
elif self.vocab_sub2 > 0 and not self.is_test:
ys_sub2 = [self.token2idx[2](self.df['text'][i]) for i in df_indices]
batch_dict = {
'xs': xs,
'xlens': [self.df['xlen'][i] for i in df_indices],
'ys': ys,
'ys_hist': ys_hist,
'ys_sub1': ys_sub1,
'ys_sub2': ys_sub2,
'utt_ids': [self.df['utt_id'][i] for i in df_indices],
'speakers': [self.df['speaker'][i] for i in df_indices],
'sessions': [self.df['session'][i] for i in df_indices],
'text': [self.df['text'][i] for i in df_indices],
'feat_path': [self.df['feat_path'][i] for i in df_indices], # for plot
'ys_prev': ys_prev,
'text_prev': text_prev,
'ys_next': ys_next,
'text_next': text_next,
}
return batch_dict
|
import hoomd
import hoomd.md
import hoomd.dump
import hoomd.group
from hoomd.htf import tfcompute
import tensorflow as tf
from sys import argv as argv
from math import sqrt
if(len(argv) != 3):
print('Usage: basic_ann_ff.py [N_PARTICLES] [training_dir]')
exit(0)
N = int(argv[1])
training_dir = argv[2]
with hoomd.htf.tfcompute(bootstrap=training_dir
) as tfcompute:
hoomd.context.initialize('--gpu_error_checking')
sqrt_N = int(sqrt(N)) # MAKE SURE THIS IS A WHOLE NUMBER???
rcut = 3.0
system = hoomd.init.create_lattice(unitcell=hoomd.lattice.sq(a=2.0),
n=[sqrt_N, sqrt_N])
nlist = hoomd.md.nlist.cell(check_period=1)
# we're loading forces now, so no HOOMD calculation
hoomd.md.integrate.mode_standard(dt=0.005)
hoomd.md.integrate.langevin(group=hoomd.group.all(), kT=0.2, seed=42)
hoomd.md.constrain.oneD(group=hoomd.group.all(),
constraint_vector=[1, 1, 0])
# equilibrate for 4k steps first
hoomd.run(4000)
tfcompute.attach(nlist, r_cut=rcut,
feed_func=lambda x: {'keep_prob:0': 0.5})
hoomd.analyze.log(filename='INFERENCE_log.log',
quantities=['potential_energy', 'temperature'],
period=10,
overwrite=True)
hoomd.dump.gsd(filename='INFERENCE_trajectory.gsd',
period=10, group=hoomd.group.all(), overwrite=True)
# run for 5k steps with dumped trajectory and logged PE and T
hoomd.run(5000)
|
import sys
alphabet = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
foundLetters = []
with open('commentFromOcrHtml.txt') as f:
for line in f:
for letter in alphabet:
if letter in line:
index = line.index(letter)
foundLetters.append(line[index])
sys.stdout.write(''.join(foundLetters))
sys.stdout.flush()
|
import unittest
from apidaze.http import Http, HttpMethodEnum
from urllib3_mock import Responses
import json
responses = Responses('urllib3')
class TestHttp(unittest.TestCase):
@property
def httpInstance(self):
return Http(
api_key='API_KEY',
api_secret='API_SECRET',
api_url='http://api.url')
@responses.activate
def prepare_request(self, method, status_code, body):
http = self.httpInstance
responses.add(method=method,
url=f'/API_KEY/endpoint',
body=json.dumps(body),
status=status_code,
adding_headers={'content-type': 'application/json'}
)
response = http.request(
method=HttpMethodEnum[method],
endpoint='/endpoint',
payload={},
headers={}
)
self.assertEqual(body, response.json())
self.assertEqual(status_code, response.status_code)
def test_enum_get(self):
self.assertTrue(isinstance(HttpMethodEnum.GET, HttpMethodEnum))
self.assertEqual(HttpMethodEnum.GET.value, 'get')
def test_enum_post(self):
self.assertTrue(isinstance(HttpMethodEnum.POST, HttpMethodEnum))
self.assertEqual(HttpMethodEnum.POST.value, 'post')
def test_request_post_success(self):
body = {
'success': True
}
self.prepare_request('POST', 200, body)
def test_request_post_failure(self):
body = {
'success': False
}
self.prepare_request('POST', 401, body)
def test_request_get_success(self):
body = {
'success': True
}
self.prepare_request('GET', 200, body)
def test_request_get_failure(self):
body = {
'success': False
}
self.prepare_request('GET', 401, body)
def test_request_delete_success(self):
body = {
'success': True
}
self.prepare_request('DELETE', 200, body)
def test_request_delete_failure(self):
body = {
'success': False
}
self.prepare_request('DELETE', 401, body)
|
import requests
import Cookie
import sys
import random
from bs4 import BeautifulSoup
from urlparse import urljoin
outputFP = './public/graphFile.json'
# Function to write urls from DFS to json file for
# d3
# Args: urls in order from DFS, filePath to save
# Returns: None
def writeToFile(urls, filePath):
with open(filePath, 'w+') as f:
f.write('{\n \"nodes\": [')
for counter, url in enumerate(urls):
f.write('\n {\n "name\": \"URL\",\n "label\": \"' + url + '\",\n "id\":' + str(counter+1) + '\n }')
if counter < len(urls)-1:
f.write(',')
f.write('\n ],\n \"links\": [')
for i in range(1, len(urls)):
f.write('\n {\n \"source\": ' + str(i) + ',\n \"target\": ' + str(i+1) + ',\n \"type\": \"Links_To\"\n }')
if i < len(urls)-1:
f.write(',')
f.write('\n ]\n}')
# Function to write urls from DFS to json cookie for d3
# Args: urls in order from DFS, cookie name
# Returns: None
def writeToCookie(urls):
jsonString = ''
jsonString += '{\n \"nodes\": ['
for counter, url in enumerate(urls):
jsonString += '\n {\n "name\": \"URL\",\n "label\": \"' + url + '\",\n "id\":' + str(counter+1) + '\n }'
if counter < len(urls)-1:
jsonString += ','
jsonString += '\n ],\n \"links\": ['
for i in range(1, len(urls)):
jsonString += '\n {\n \"source\": ' + str(i) + ',\n \"target\": ' + str(i+1) + ',\n \"type\": \"Links_To\"\n }'
if i < len(urls)-1:
jsonString += ','
jsonString += '\n ]\n}'
print(jsonString)
# Function to parse page for all links
# Args: url for page in question, list of links from previous page
# Returns: array/list of links
def parsePage(url, links):
#fetch page
sys.stderr.write("Fetching page..\n")
try:
r = requests.get(url)
page = r.content
sys.stderr.write("Parsing..\n")
soup = BeautifulSoup(page, "html.parser")
if not soup.body:
raise Exception("Page not parsed\n")
links = []
for link in soup.find_all('a'):
links.append(urljoin(url, link.get('href')))
except Exception as e:
#pick another link if available
sys.stderr.write("Error: " + str(e))
if len(links) < 1:
sys.stderr.write("No other links available.\n")
return []
else:
sys.stderr.write("Choosing a different link..\n")
randNum = random.randint(0, len(links)-1)
url = links[randNum]
del links[randNum]
return parsePage(url, links)
else:
return links
# Function that performs the Depth First Traversal of links for a
# source page
# Args: valid url, depth to traverse, list of links
# Returns: None
def DFT(url, depth, urlChain, links, keyword):
#add url to chain
urlChain.append(url)
#parse page for all links
links = parsePage(url, links)
#base case
if depth == 0 or links == [] or (any(keyword in link for link in urlChain) and keyword):
#write urlChain to file for graph and exit
sys.stderr.write("Depth reached, keyword found, no links on page, or url format incorrect. Generating results..\n")
writeToFile(urlChain, outputFP)
#writeToCookie(urlChain)
return
#choose one link at random
sys.stderr.write("Choosing link at random..\n")
randNum = random.randint(0, len(links)-1)
url = links[randNum]
del links[randNum]
sys.stderr.write("Random url is: " + url + "\n")
#decrement and recurse
depth -= 1
DFT(url, depth, urlChain, links, keyword)
def main():
#Take starting url as argument?
if len(sys.argv) < 3:
sys.stderr.write("No url and/or depth argument provided\n")
exit(1)
#Initialize list of variables
initUrl = sys.argv[1]
depth = int(sys.argv[2])
keyword = sys.argv[3]
urlChain = []
links = []
sys.stderr.write("keyword is: " + keyword + "\n")
DFT(initUrl, depth, urlChain, links, keyword)
if __name__ == "__main__":
main()
|
# Generated by Django 2.2.3 on 2019-07-30 08:15
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Attendee',
fields=[
('first_name', models.CharField(max_length=32, verbose_name='first name')),
('middle_name', models.CharField(blank=True, default=None, max_length=64, null=True, verbose_name='middle name')),
('last_name', models.CharField(max_length=32, verbose_name='last name')),
('phone_number', models.CharField(max_length=128, verbose_name='phone number')),
('email', models.CharField(max_length=32, verbose_name='email')),
('ticket', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False, verbose_name='ticket')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
options={
'ordering': ('first_name', 'last_name'),
},
),
]
|
import os
SRC_SUFFIX = ['.c', '.cpp', '.cc', '.cxx']
class Workspace:
def __init__(self, include_paths):
self.include_paths = include_paths
def calculate(self):
paths = []
for path in self.include_paths:
if os.path.isdir(path):
paths.extend(self._walk(path))
elif self.should_include(path):
paths.append(path)
return paths
def should_include(self, name):
return name.lower().endswith(tuple(SRC_SUFFIX))
def _walk(self, path):
paths = []
for root, _directories, files in os.walk(path):
for name in files:
if self.should_include(name):
path = os.path.join(root, name)
paths.append(path)
return paths
|
import pytest
from shop_restapi.models.item import ItemModel
from shop_restapi.models.store import StoreModel
from shop_restapi.models.user import UserModel
@pytest.fixture
def item():
item = ItemModel(name='item_name', price=1.00, store_id=1)
return item
@pytest.fixture
def store():
store = StoreModel(name='store_name')
return store
@pytest.fixture
def user():
user = UserModel(username='user_name', password='password')
return user
|
# -*- coding: utf-8 -*-
import os
from .common_utils import ScriptRunner, forceIP
from .exceptions import ParamProcessingError, NetworkError
__all__ = ('ParamProcessingError', 'processHost', 'processSSHKey')
def processHost(param, process_args=None):
"""
Given parameter is a hostname, try to change it to IP address
"""
localhost = process_args and \
process_args.get('allow_localhost', False)
try:
return forceIP(param, allow_localhost=localhost)
except NetworkError, ex:
raise ParamProcessingError(str(ex))
def processSSHKey(param, process_args=None):
"""
Generates SSH key if given key in param doesn't exist. In case param
is an empty string it generates default SSH key ($HOME/.ssh/id_rsa).
"""
def create_key(path):
local = ScriptRunner()
# create new ssh key
local.append('ssh-keygen -f %s -N ""' % path)
local.execute()
if not param:
key_file = '%s/.ssh/id_rsa' % os.environ["HOME"]
param = '%s.pub' % key_file
if not os.path.isfile(param):
create_key(key_file)
elif not os.path.isfile(param):
key_file = param.endswith('.pub') and param[:-4] or param
create_key(key_file)
return param
|
# https://pymotw.com/3/asyncio/control.html
"""
wait() can be used to pause one coroutine until th other background
operations complete - if order of execution doesn't matter.
"""
import asyncio
async def phase(i):
print('in phase {}'.format(i))
await asyncio.sleep(0.1 * i)
print('done with phase {}'.format(i))
return 'phase {} result'.format(i)
async def main(num_phases):
print('starting main')
phases = [phase(i) for i in range(num_phases)]
await asyncio.sleep(2) # Prove nothing will run yet
print('waiting for phases to complete')
completed, pending = await asyncio.wait(phases)
results = [t.result() for t in completed]
print('results: {!r}'.format(results))
event_loop = asyncio.get_event_loop()
try:
event_loop.run_until_complete(main(3))
finally:
event_loop.close()
|
'''
Set in python:
Important points to set:
1. Set ko {} is braket se denote karete hain.
2. Set values ka koi order nahi hota hai. Means values ko agar index position ke behalf par feach karna caho to error milega.
3. Set me values hamesa unique honge.
4. Set values hamesa suffle karte rahete hain.
Set ka use collection of set me se dublicacy remove karne ke liye kiya jata hai.
Set ka use ek se jada set ko apas me mager karn ke liye aur sath me dublicate valeus ko filter karne ke liye kiya jata hai.
'''
# create a noraml set
set_a = {1,2,3,4,5,6,7,8}
# check datatype of myset
print('\nCehck datatype: ', type(set_a))
# chekc is set index work or not
# print('\nGet 1 by index of set_a: ', set_a[1])
# print set_a
print('\nset_a: ', set_a)
# check particular value is present in set or not
print('\nCheck 1 is avalibale in set or not: ', 1 in set_a)
# add single value in set by add()
set_a.add(9)
print('\nAdd 9 in set: ', set_a)
# add a multiple values at once in set so use update()
set_a.update([10,11,12])
print('\nAdd multiple values in set: ', set_a)
# find a length of set by len()
print('\nCheck len of set: ', len(set_a))
# remove values from set by remove()
# when try to remove same number again you will get KeyErro for handling that error you need another method: discard()
set_a.remove(12)
# get user input to remove values form set
# number = input('Enter number b/w 1 to 12: ')
# set_a.remove(number)
print('\nDelete 12 from set a: ', set_a)
# For handline error regarding unavailable value use discard()
set_a.discard(11)
set_a.discard(11)
print('\nDiscard 12 again: ', set_a)
# for removein vlaue we also use pop() but remember set have not index facility so that you don't know which value is on last field.
set_a.pop()
print('\nPop: ', set_a)
# copy set to anohter varaible by copy()
set_b = set_a.copy()
print('\nset b: ', set_b)
# for empty the set use clear()
set_a.clear()
print('\nset a: ', set_a)
# del keyword use to delete ka set_a
del set_a
# print(set_a)
# new set for joing
set_a = {2,4,6,8,0,11,13,15}
# join 2 set using union()
print('\nset a: ',set_a)
print('\nset b: ', set_b)
set_c = set_a.union(set_b)
print('\nset c',set_c)
# update method use to mearge set b in set a
set_a.update(set_b)
print('\nset a: ', set_a)
# check set_b is subset of set a or not
print('\nIs set_b, subset or set_a: ', set_b.issubset(set_a))
# check set a is super set of set b
print('\nCheck set a is subper set of set b: ', set_a.issuperset(set_b))
# check difference
diff = set_a.difference(set_b)
print('\nDifference: ', diff)
# intersecation
inter = set_a.intersection(set_b)
print('\nInter: ', inter) |
import requests
import json
from datetime import datetime
from mirrorpy.plugin import Plugin
class WeatherPlugin(Plugin):
baseurl = "http://api.openweathermap.org/data/2.5/{0}?units=metric&APPID=1ba44d08af637d2899097f510bf9f882"
query_url = None
def __init__(self, name="", query=None, city=None, coords=None):
self.name = name
if city is not None:
self.query_url = self.baseurl + "&city=%s" % city
elif coords is not None:
self.query_url = self.baseurl + "&lat=%s&lon=%s" % coords
elif query is not None:
self.query_url = self.baseurl + "&q=%s" % query
else:
raise ValueError("Neither city nor coords provided")
def get(self):
template = """
<div class="row">
<div class="col-xs-4 weather-location">{2}</div>
<div class="col-xs-7 weather-temp-current">{0}° {1}</div>
</div>
"""
print(self.query_url.format("weather"))
response = requests.get(self.query_url.format("weather"))
j = json.loads(response.content.decode('utf-8'))
tmp = ""
for i in j["weather"]:
tmp += "<img class='weather-icon-current' src='http://openweathermap.org/img/w/%s.png' />" % (i["icon"])
result = template.format(round(j["main"]["temp"]), tmp, self.name)
response = requests.get(self.query_url.format("forecast"))
j = json.loads(response.content.decode('utf-8'))
result += """<div class="row">"""
template = """<div class="col-xs-1 weather-temp-forecast">{0}<br />{2}<br/>{1}°</div>"""
cnt = 0
for i in j["list"]:
if cnt >= 9:
break
tmp = ""
for k in i["weather"]:
tmp += "<img class='weather-icon-forecast' src='http://openweathermap.org/img/w/%s.png' />" % (k["icon"])
result += template.format(datetime.fromtimestamp(i["dt"]).hour, round(i["main"]["temp"]), tmp)
cnt += 1
result += "</div>"
return result
|
hero_ids = {
1: 'antimage',
2: 'axe',
3: 'bane',
4: 'bloodseeker',
5: 'crystal_maiden',
6: 'drow_ranger',
7: 'earthshaker',
8: 'juggernaut',
9: 'mirana',
10: 'morhpling'
} |
import tensorflow as tf
import numpy as np
from tensorflow.contrib.distributions import Normal
from ..ops import backward_warp, forward_warp
from .image_warp import image_warp
DISOCC_THRESH = 0.8
def length_sq(x):
return tf.reduce_sum(tf.square(x), 3, keepdims=True)
def compute_losses(im1, im2, flow_fw, flow_bw,
border_mask=None,
mask_occlusion='',
data_max_distance=1):
losses = {}
im2_warped = image_warp(im2, flow_fw)
im1_warped = image_warp(im1, flow_bw)
im_diff_fw = im1 - im2_warped
im_diff_bw = im2 - im1_warped
disocc_fw = tf.cast(forward_warp(flow_fw) < DISOCC_THRESH, tf.float32)
disocc_bw = tf.cast(forward_warp(flow_bw) < DISOCC_THRESH, tf.float32)
if border_mask is None:
mask_fw = create_outgoing_mask(flow_fw)
mask_bw = create_outgoing_mask(flow_bw)
else:
mask_fw = border_mask
mask_bw = border_mask
flow_bw_warped = image_warp(flow_bw, flow_fw)
flow_fw_warped = image_warp(flow_fw, flow_bw)
flow_diff_fw = flow_fw + flow_bw_warped
flow_diff_bw = flow_bw + flow_fw_warped
mag_sq_fw = length_sq(flow_fw) + length_sq(flow_bw_warped)
mag_sq_bw = length_sq(flow_bw) + length_sq(flow_fw_warped)
occ_thresh_fw = 0.01 * mag_sq_fw + 0.5
occ_thresh_bw = 0.01 * mag_sq_bw + 0.5
fb_occ_fw = tf.cast(length_sq(flow_diff_fw) > occ_thresh_fw, tf.float32)
fb_occ_bw = tf.cast(length_sq(flow_diff_bw) > occ_thresh_bw, tf.float32)
if mask_occlusion == 'fb':
mask_fw *= (1 - fb_occ_fw)
mask_bw *= (1 - fb_occ_bw)
elif mask_occlusion == 'disocc':
mask_fw *= (1 - disocc_bw)
mask_bw *= (1 - disocc_fw)
occ_fw = 1 - mask_fw
occ_bw = 1 - mask_bw
losses['sym'] = (charbonnier_loss(occ_fw - disocc_bw) +
charbonnier_loss(occ_bw - disocc_fw))
losses['occ'] = (charbonnier_loss(occ_fw) +
charbonnier_loss(occ_bw))
losses['photo'] = (photometric_loss(im_diff_fw, mask_fw) +
photometric_loss(im_diff_bw, mask_bw))
losses['grad'] = (gradient_loss(im1, im2_warped, mask_fw) +
gradient_loss(im2, im1_warped, mask_bw))
losses['smooth_1st'] = (smoothness_loss(flow_fw) +
smoothness_loss(flow_bw))
losses['smooth_2nd'] = (second_order_loss(flow_fw) +
second_order_loss(flow_bw))
losses['fb'] = (charbonnier_loss(flow_diff_fw, mask_fw) +
charbonnier_loss(flow_diff_bw, mask_bw))
losses['ternary'] = (ternary_loss(im1, im2_warped, mask_fw,
max_distance=data_max_distance) +
ternary_loss(im2, im1_warped, mask_bw,
max_distance=data_max_distance))
return losses
def ternary_loss(im1, im2_warped, mask, max_distance=1):
patch_size = 2 * max_distance + 1
with tf.variable_scope('ternary_loss'):
def _ternary_transform(image):
intensities = tf.image.rgb_to_grayscale(image) * 255
#patches = tf.extract_image_patches( # fix rows_in is None
# intensities,
# ksizes=[1, patch_size, patch_size, 1],
# strides=[1, 1, 1, 1],
# rates=[1, 1, 1, 1],
# padding='SAME')
out_channels = patch_size * patch_size
w = np.eye(out_channels).reshape((patch_size, patch_size, 1, out_channels))
weights = tf.constant(w, dtype=tf.float32)
patches = tf.nn.conv2d(intensities, weights, strides=[1, 1, 1, 1], padding='SAME')
transf = patches - intensities
transf_norm = transf / tf.sqrt(0.81 + tf.square(transf))
return transf_norm
def _hamming_distance(t1, t2):
dist = tf.square(t1 - t2)
dist_norm = dist / (0.1 + dist)
dist_sum = tf.reduce_sum(dist_norm, 3, keepdims=True)
return dist_sum
t1 = _ternary_transform(im1)
t2 = _ternary_transform(im2_warped)
dist = _hamming_distance(t1, t2)
transform_mask = create_mask(mask, [[max_distance, max_distance],
[max_distance, max_distance]])
return charbonnier_loss(dist, mask * transform_mask)
def occlusion(flow_fw, flow_bw):
mag_sq = length_sq(flow_fw) + length_sq(flow_bw)
flow_bw_warped = image_warp(flow_bw, flow_fw)
flow_fw_warped = image_warp(flow_fw, flow_bw)
flow_diff_fw = flow_fw + flow_bw_warped
flow_diff_bw = flow_bw + flow_fw_warped
occ_thresh = 0.01 * mag_sq + 0.5
occ_fw = tf.cast(length_sq(flow_diff_fw) > occ_thresh, tf.float32)
occ_bw = tf.cast(length_sq(flow_diff_bw) > occ_thresh, tf.float32)
return occ_fw, occ_bw
#def disocclusion(div):
# """Creates binary disocclusion map based on flow divergence."""
# return tf.round(norm(tf.maximum(0.0, div), 0.3))
#def occlusion(im_diff, div):
# """Creates occlusion map based on warping error & flow divergence."""
# gray_diff = tf.image.rgb_to_grayscale(im_diff)
# return 1 - norm(gray_diff, 20.0 / 255) * norm(tf.minimum(0.0, div), 0.3)
def divergence(flow):
with tf.variable_scope('divergence'):
filter_x = [[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]] # sobel filter
filter_y = np.transpose(filter_x)
weight_array_x = np.zeros([3, 3, 1, 1])
weight_array_x[:, :, 0, 0] = filter_x
weights_x = tf.constant(weight_array_x, dtype=tf.float32)
weight_array_y = np.zeros([3, 3, 1, 1])
weight_array_y[:, :, 0, 0] = filter_y
weights_y = tf.constant(weight_array_y, dtype=tf.float32)
flow_u, flow_v = tf.split(axis=3, num_or_size_splits=2, value=flow)
grad_x = conv2d(flow_u, weights_x)
grad_y = conv2d(flow_v, weights_y)
div = tf.reduce_sum(tf.concat(axis=3, values=[grad_x, grad_y]), 3, keepdims=True)
return div
def norm(x, sigma):
"""Gaussian decay.
Result is 1.0 for x = 0 and decays towards 0 for |x > sigma.
"""
dist = Normal(0.0, sigma)
return dist.pdf(x) / dist.pdf(0.0)
def diffusion_loss(flow, im, occ):
"""Forces diffusion weighted by motion, intensity and occlusion label similarity.
Inspired by Bilateral Flow Filtering.
"""
def neighbor_diff(x, num_in=1):
weights = np.zeros([3, 3, num_in, 8 * num_in])
out_channel = 0
for c in range(num_in): # over input channels
for n in [0, 1, 2, 3, 5, 6, 7, 8]: # over neighbors
weights[1, 1, c, out_channel] = 1
weights[n // 3, n % 3, c, out_channel] = -1
out_channel += 1
weights = tf.constant(weights, dtype=tf.float32)
return conv2d(x, weights)
# Create 8 channel (one per neighbor) differences
occ_diff = neighbor_diff(occ)
flow_diff_u, flow_diff_v = tf.split(axis=3, num_or_size_splits=2, value=neighbor_diff(flow, 2))
flow_diff = tf.sqrt(tf.square(flow_diff_u) + tf.square(flow_diff_v))
intensity_diff = tf.abs(neighbor_diff(tf.image.rgb_to_grayscale(im)))
diff = norm(intensity_diff, 7.5 / 255) * norm(flow_diff, 0.5) * occ_diff * flow_diff
return charbonnier_loss(diff)
def photometric_loss(im_diff, mask):
return charbonnier_loss(im_diff, mask, beta=255)
def conv2d(x, weights):
return tf.nn.conv2d(x, weights, strides=[1, 1, 1, 1], padding='SAME')
def _smoothness_deltas(flow):
with tf.variable_scope('smoothness_delta'):
mask_x = create_mask(flow, [[0, 0], [0, 1]])
mask_y = create_mask(flow, [[0, 1], [0, 0]])
mask = tf.concat(axis=3, values=[mask_x, mask_y])
filter_x = [[0, 0, 0], [0, 1, -1], [0, 0, 0]]
filter_y = [[0, 0, 0], [0, 1, 0], [0, -1, 0]]
weight_array = np.ones([3, 3, 1, 2])
weight_array[:, :, 0, 0] = filter_x
weight_array[:, :, 0, 1] = filter_y
weights = tf.constant(weight_array, dtype=tf.float32)
flow_u, flow_v = tf.split(axis=3, num_or_size_splits=2, value=flow)
delta_u = conv2d(flow_u, weights)
delta_v = conv2d(flow_v, weights)
return delta_u, delta_v, mask
def _gradient_delta(im1, im2_warped):
with tf.variable_scope('gradient_delta'):
filter_x = [[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]] # sobel filter
filter_y = np.transpose(filter_x)
weight_array = np.zeros([3, 3, 1, 2])
for c in range(1):
weight_array[:, :, c, 2 * c] = filter_x
weight_array[:, :, c, 2 * c + 1] = filter_y
weights = tf.constant(weight_array, dtype=tf.float32)
im1_grad = conv2d(im1, weights)
im2_warped_grad = conv2d(im2_warped, weights)
diff = im1_grad - im2_warped_grad
return diff
def gradient_loss(im1, im2_warped, mask):
with tf.variable_scope('gradient_loss'):
mask_x = create_mask(im1, [[0, 0], [1, 1]])
mask_y = create_mask(im1, [[1, 1], [0, 0]])
gradient_mask = tf.tile(tf.concat(axis=3, values=[mask_x, mask_y]), [1, 1, 1, 3])
diff = _gradient_delta(im1, im2_warped)
return charbonnier_loss(diff, mask * gradient_mask)
def smoothness_loss(flow):
with tf.variable_scope('smoothness_loss'):
delta_u, delta_v, mask = _smoothness_deltas(flow)
loss_u = charbonnier_loss(delta_u, mask)
loss_v = charbonnier_loss(delta_v, mask)
return loss_u + loss_v
def _second_order_deltas(flow):
with tf.variable_scope('_second_order_deltas'):
mask_x = create_mask(flow, [[0, 0], [1, 1]])
mask_y = create_mask(flow, [[1, 1], [0, 0]])
mask_diag = create_mask(flow, [[1, 1], [1, 1]])
mask = tf.concat(axis=3, values=[mask_x, mask_y, mask_diag, mask_diag])
filter_x = [[0, 0, 0],
[1, -2, 1],
[0, 0, 0]]
filter_y = [[0, 1, 0],
[0, -2, 0],
[0, 1, 0]]
filter_diag1 = [[1, 0, 0],
[0, -2, 0],
[0, 0, 1]]
filter_diag2 = [[0, 0, 1],
[0, -2, 0],
[1, 0, 0]]
weight_array = np.ones([3, 3, 1, 4])
weight_array[:, :, 0, 0] = filter_x
weight_array[:, :, 0, 1] = filter_y
weight_array[:, :, 0, 2] = filter_diag1
weight_array[:, :, 0, 3] = filter_diag2
weights = tf.constant(weight_array, dtype=tf.float32)
flow_u, flow_v = tf.split(axis=3, num_or_size_splits=2, value=flow)
delta_u = conv2d(flow_u, weights)
delta_v = conv2d(flow_v, weights)
return delta_u, delta_v, mask
def second_order_loss(flow):
with tf.variable_scope('second_order_loss'):
delta_u, delta_v, mask = _second_order_deltas(flow)
loss_u = charbonnier_loss(delta_u, mask)
loss_v = charbonnier_loss(delta_v, mask)
return loss_u + loss_v
def charbonnier_loss(x, mask=None, truncate=None, alpha=0.45, beta=1.0, epsilon=0.001):
"""Compute the generalized charbonnier loss of the difference tensor x.
All positions where mask == 0 are not taken into account.
Args:
x: a tensor of shape [num_batch, height, width, channels].
mask: a mask of shape [num_batch, height, width, mask_channels],
where mask channels must be either 1 or the same number as
the number of channels of x. Entries should be 0 or 1.
Returns:
loss as tf.float32
"""
with tf.variable_scope('charbonnier_loss'):
try:
batch, height, width, channels = tf.unstack(tf.shape(x))
normalization = tf.cast(batch * height * width * channels, tf.float32)
except:
batch, height = tf.unstack(tf.shape(x))
normalization = tf.cast(batch * height, tf.float32)
error = tf.pow(tf.square(x * beta) + tf.square(epsilon), alpha)
if mask is not None:
error = tf.multiply(mask, error)
if truncate is not None:
error = tf.minimum(error, truncate)
return tf.reduce_sum(error) / normalization
def create_mask(tensor, paddings):
with tf.variable_scope('create_mask'):
shape = tf.shape(tensor)
inner_width = shape[1] - (paddings[0][0] + paddings[0][1])
inner_height = shape[2] - (paddings[1][0] + paddings[1][1])
inner = tf.ones([inner_width, inner_height])
mask2d = tf.pad(inner, paddings)
mask3d = tf.tile(tf.expand_dims(mask2d, 0), [shape[0], 1, 1])
mask4d = tf.expand_dims(mask3d, 3)
return tf.stop_gradient(mask4d)
def create_border_mask(tensor, border_ratio=0.1):
with tf.variable_scope('create_border_mask'):
num_batch, height, width, _ = tf.unstack(tf.shape(tensor))
min_dim = tf.cast(tf.minimum(height, width), 'float32')
sz = tf.cast(tf.ceil(min_dim * border_ratio), 'int32')
border_mask = create_mask(tensor, [[sz, sz], [sz, sz]])
return tf.stop_gradient(border_mask)
def create_outgoing_mask(flow):
"""Computes a mask that is zero at all positions where the flow
would carry a pixel over the image boundary."""
with tf.variable_scope('create_outgoing_mask'):
num_batch, height, width, _ = tf.unstack(tf.shape(flow))
grid_x = tf.reshape(tf.range(width), [1, 1, width])
grid_x = tf.tile(grid_x, [num_batch, height, 1])
grid_y = tf.reshape(tf.range(height), [1, height, 1])
grid_y = tf.tile(grid_y, [num_batch, 1, width])
flow_u, flow_v = tf.unstack(flow, 2, 3)
pos_x = tf.cast(grid_x, dtype=tf.float32) + flow_u
pos_y = tf.cast(grid_y, dtype=tf.float32) + flow_v
inside_x = tf.logical_and(pos_x <= tf.cast(width - 1, tf.float32),
pos_x >= 0.0)
inside_y = tf.logical_and(pos_y <= tf.cast(height - 1, tf.float32),
pos_y >= 0.0)
inside = tf.logical_and(inside_x, inside_y)
return tf.expand_dims(tf.cast(inside, tf.float32), 3)
|
import numpy as np
# Finding unsige cell
# File name: SUDOKU.py
def FindUnsignedLocation(Board, l):
for row in range(0, 9):
for col in range(0, 9):
if (Board[row][col] == 0):
l[0] = row
l[1] = col
return True
return False
# Hàm kiểm tra tính an toàn của những ô trong hàng
def InRow(Board, row, num):
for i in range(0, 9):
if (Board[row][i] == num):
return True
return False
# Hàm kiểm tra tính an toàn của những ô trong cột
def InCol(Board, col, num):
for i in range(0, 9):
if (Board[i][col] == num):
return True
return False
# Hàm kiểm tra tính an toàn của các ô trong một ô lớn 3x3
def InBox(Board, row, col, num):
for i in range(0, 3):
for j in range(0, 3):
if (Board[i + row][j + col] == num):
return True
return False
# Kiểm tra trạng thái an toàn tại một vị trí
def isSafe(Board, row, col, num):
return not InCol(Board, col, num) and not InRow(Board, row, num) and not InBox(Board, row - row % 3, col - col % 3, num)
def SolveSudoku(Board):
l = [0, 0]
if (not FindUnsignedLocation(Board, l)):
return True
row = l[0]
col = l[1]
for num in range(1, 10):
if (isSafe(Board, row, col, num)):
Board[row][col] = num
if (SolveSudoku(Board)):
print(Board)
break
Board[row][col] = 0
return False
Board = [[5, 3, 0, 0, 7, 0, 0, 0, 0],
[6, 0, 0, 1, 9, 5, 0, 0, 0],
[0, 9, 8, 0, 0, 0, 0, 6, 0],
[8, 0, 0, 0, 6, 0, 0, 0, 3],
[4, 0, 0, 8, 0, 3, 0, 0, 1],
[7, 0, 0, 0, 2, 0, 0, 0, 6],
[0, 6, 0, 0, 0, 0, 2, 8, 0],
[0, 0, 0, 4, 1, 9, 0, 0, 5],
[0, 0, 0, 0, 8, 0, 0, 7, 9]]
#SolveSudoku(Board) |
#! /usr/bin/env python
#
def multiperm_enum ( n, k, counts ):
#*****************************************************************************80
#
## MULTIPERM_ENUM enumerates multipermutations.
#
# Discussion:
#
# A multipermutation is a permutation of objects, some of which are
# identical.
#
# While there are 6 permutations of the distinct objects A,B,C, there
# are only 3 multipermutations of the objects A,B,B.
#
# In general, there are N! permutations of N distinct objects, but
# there are N! / ( ( M1! ) ( M2! ) ... ( MK! ) ) multipermutations
# of N objects, in the case where the N objects consist of K
# types, with M1 examples of type 1, M2 examples of type 2 and so on,
# and for which objects of the same type are indistinguishable.
#
# Example:
#
# Input:
#
# N = 5, K = 3, COUNTS = (/ 1, 2, 2 /)
#
# Output:
#
# Number = 30
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 19 December 2014
#
# Author:
#
# John Burkardt
#
# Parameters:
#
# Input, integer N, the number of items in the multipermutation.
#
# Input, integer K, the number of types of items.
# 1 <= K. Ordinarily, K <= N, but we allow any positive K, because
# we also allow entries in COUNTS to be 0.
#
# Input, integer COUNTS(K), the number of items of each type.
# 0 <= COUNTS(1:K) <= N and sum ( COUNTS(1:K) ) = N.
#
# Output, integer VALUE, the number of multipermutations.
#
if ( n < 0 ):
value = -1
return value
if ( n == 0 ):
value = 1
return value
if ( k < 1 ):
value = -1
return value
if ( any ( counts < 0 ) ):
value = -1
return value
if ( sum ( counts ) != n ):
number = -1
return value
#
# Ready for computation.
# By design, the integer division should never have a remainder.
#
top = 0
value = 1
for i in range ( 0, k ):
for j in range ( 1, counts[i] + 1 ):
top = top + 1
value = round ( ( value * top ) / j )
return value
def multiperm_enum_test ( ):
#*****************************************************************************80
#
## MULTIPERM_ENUM_TEST tests MULTIPERM_ENUM.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 19 December 2014
#
# Author:
#
# John Burkardt
#
from compnz_random import compnz_random
from i4_uniform_ab import i4_uniform_ab
n = 5
seed = 123456789
test_num = 5
print ''
print 'MULTIPERM_ENUM_TEST:'
print ' MULTIPERM_ENUM enumerates multipermutations.'
print ''
print ' N is the number of objects to be permuted.'
print ' K is the number of distinct types of objects.'
print ' COUNTS is the number of objects of each type.'
print ' NUMBER is the number of multipermutations.'
print ''
print ' Number N K Counts(1:K)'
print ''
for test in range ( 0, test_num ):
k, seed = i4_uniform_ab ( 1, n, seed )
counts, seed = compnz_random ( n, k, seed )
number = multiperm_enum ( n, k, counts )
print ' %6d %6d %6d' % ( number, n, k ),
for i in range ( 0, k ):
print ' %4d' % ( counts[i] ),
print ''
#
# Terminate.
#
print ''
print 'MULTIPERM_ENUM_TEST:'
print ' Normal end of execution.'
return
if ( __name__ == '__main__' ):
from timestamp import timestamp
timestamp ( )
multiperm_enum_test ( )
timestamp ( )
|
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.chrome.options import Options
import pandas as pd
from ansm_utils import get_selectors
import time
import content_agent
url = "https://ansm.sante.fr/S-informer/Informations-de-securite-Lettres-aux-professionnels-de-sante"
# # variables contains xpath
# # xpath including search_indices function
# search_filed_xpath = "//input[@id='global_search_text']"
# search_icon_xpath = '//*[@id="btn-header-icon"]'
# search_button_xpath = '//*[@id="btn-header-search"]'
# pagination_xpath = "//a[normalize-space()='>']"
# each_article = '//*[@id="wrapper"]/div/div/article['
# # xpath including select_date function
# date_btn_xpath = "//a[normalize-space()='Date']"
# startDate_xpath = '//*[@id="filter_startDate"]'
# endDate_xpath = '//*[@id="filter_endDate"]'
# valider_xpath = "//i[contains(@class,'fa fa-check')]"
selectors = get_selectors()
# calling api to retrieve elements selector from database
def get_selectors():
pass
def search_indices():
# read input_output xlsx file to get search and date period for ANSM database
getIndices = get_indices()
# search_string = getIndices[0]
search_string = ['covid', 'VAXZEVRIA']
date_from = getIndices[1]
date_to = getIndices[2]
options = Options()
options.add_argument("--start-maximized")
options.add_argument("headless")
driver = webdriver.Chrome(ChromeDriverManager().install(), options=options)
driver.get(url)
driver.refresh()
links = []
for key in search_string:
# driver.get(url)
search_field = driver.find_element_by_xpath(selectors["search_filed_xpath"])
driver.execute_script('''
var elem = arguments[0];
var value = arguments[1];
elem.value = value;
''', search_field, key)
try:
search_icon = driver.find_element_by_xpath(selectors["search_icon_xpath"])
driver.execute_script('arguments[0].click();', search_icon)
except:
return links
driver.implicitly_wait(2)
try:
search_button = driver.find_element_by_xpath(selectors["search_button_xpath"])
driver.execute_script('arguments[0].click();', search_button)
except:
return links
select_date(driver, date_from, date_to)
time.sleep(2)
while True:
try:
# get href links
articles = driver.find_elements_by_tag_name('article')
for i in range(1, len(articles)+1):
data = driver.find_element_by_xpath(selectors["each_article"] + str(i) +']')
href = data.find_element_by_tag_name('a').get_attribute('href')
links.append(href)
driver.refresh()
except:
break
try:
pagination = driver.find_element_by_xpath(selectors["pagination_xpath"])
driver.execute_script('arguments[0].click();', pagination)
except:
break
time.sleep(5)
# driver.quit()
return content_agent.get_contents(links)
# return links
def select_date(driver, date_from, date_to):
try:
# select date button
date_btn = driver.find_element_by_xpath(selectors["date_btn_xpath"])
driver.execute_script("arguments[0].click();", date_btn)
time.sleep(2)
except:
return
try:
# set start date
startDate = driver.find_element_by_xpath(selectors["startDate_xpath"])
value = driver.execute_script('return arguments[0].value;', startDate)
driver.execute_script('''
var elem = arguments[0];
var value = arguments[1];
elem.value = value;
''', startDate, date_from)
except:
pass
try:
# set end date
endDate = driver.find_element_by_xpath(selectors["endDate_xpath"])
value = driver.execute_script('return arguments[0].value;', endDate)
driver.execute_script('''
var elem = arguments[0];
var value = arguments[1];
elem.value = value;
''', endDate, date_to)
except:
pass
try:
# select valider button and hit enter
valider = driver.find_element_by_xpath(selectors["valider_xpath"])
driver.execute_script("arguments[0].click();", valider)
except:
return
def get_indices():
data = pd.read_excel(r'../read_data/Template_Input_Output.xlsx', header=None, sheet_name='SearchStrategy')
search_strings = data.loc[7:9][19].values
date_from = str(data.loc[2:3][1].values[0]).split(" ")[0]
date_to = str(data.loc[2:3][1].values[1]).split(" ")[0]
return (search_strings, date_from, date_to)
if __name__ == "__main__":
datas = search_indices()
for data in datas:
print(data)
|
# _*_ coding: utf-8 _*_
"""
ctask.py by xianhu
"""
import re
from typing import TypeVar
class Task(object):
"""
class of Task, to define task of fetcher, parser and saver
"""
# class variable, which to define type of parameters
TypeContent = TypeVar("TypeContent", str, tuple, list, dict)
TypeItem = TypeVar("TypeItem", str, tuple, list, dict)
# class variable, which to parse error message to get a TaskFetch()
re_obj = re.compile(r"priority=(?P<p>\d+),\s*keys=(?P<k>.+?),\s*deep=(?P<d>\d+),\s*url=(?P<u>.*)$", flags=re.IGNORECASE)
def __init__(self, priority: int = 0, keys: dict = None, deep: int = 0, url: str = None):
"""
constructor
"""
self.priority = priority
self.keys = keys or {}
self.deep = deep
self.url = url or ""
return
def __lt__(self, other):
"""
compare function
"""
return self.priority < other.priority
def __str__(self):
"""
string function
"""
return f"priority={self.priority}, keys={self.keys}, deep={self.deep}, url={self.url}"
class TaskFetch(Task):
"""
class of TaskFetch, to define task of fetcher
"""
def __init__(self, priority=0, keys=None, deep=0, url=None, repeat: int = 0):
"""
constructor
"""
super().__init__(priority, keys, deep, url)
self.repeat = repeat
return
@staticmethod
def from_task_fetch(task_fetch):
"""
initial a TaskFetch() from task_fetch, repeat += 1
"""
priority, keys, deep, url = task_fetch.priority, task_fetch.keys, task_fetch.deep, task_fetch.url
return TaskFetch(priority=priority, keys=keys, deep=deep, url=url, repeat=task_fetch.repeat + 1)
@staticmethod
def from_task_parse(task_parse, url_new: str = None):
"""
initial a TaskFetch() from task_parse and url_new, priority += 1, deep += 1
"""
priority, keys, deep, url = task_parse.priority, task_parse.keys, task_parse.deep, task_parse.url
return TaskFetch(priority=priority + 1, keys=keys, deep=deep + 1, url=url_new, repeat=0)
@staticmethod
def from_error_message(error_message: str):
"""
initial a TaskFetch() from error_message
"""
reg = Task.re_obj.search(error_message)
priority, keys, deep, url = [reg.group(i) for i in ["p", "k", "d", "u"]]
return TaskFetch(priority=int(priority), keys=eval(keys), deep=int(deep), url=url.strip(), repeat=0)
class TaskParse(Task):
"""
class of TaskParse, to define task of parser
"""
def __init__(self, priority=0, keys=None, deep=0, url=None, content: Task.TypeContent = None):
"""
constructor
"""
super().__init__(priority, keys, deep, url)
self.content = content
return
@staticmethod
def from_task_fetch(task_fetch: Task, content: Task.TypeContent = None):
"""
initial a TaskParse() from task_fetch and content
"""
priority, keys, deep, url = task_fetch.priority, task_fetch.keys, task_fetch.deep, task_fetch.url
return TaskParse(priority=priority, keys=keys, deep=deep, url=url, content=content)
class TaskSave(Task):
"""
class of TaskSave, to define task of saver
"""
def __init__(self, priority=0, keys=None, deep=0, url=None, item: Task.TypeItem = None):
"""
constructor
"""
super().__init__(priority, keys, deep, url)
self.item = item
return
@staticmethod
def from_task_parse(task_parse: Task, item: Task.TypeItem = None):
"""
initial a TaskSave() from task_parse and item
"""
priority, keys, deep, url = task_parse.priority, task_parse.keys, task_parse.deep, task_parse.url
return TaskSave(priority=priority, keys=keys, deep=deep, url=url, item=item)
|
import csv
from DataCollection.User import User
__author__ = 'lizzybradley'
class FixTotalEdits:
user_list = []
def __init__(self, filename):
with open(filename) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
self.user_list.append(User(row))
def fix(self):
for user in self.user_list:
total_edits = user.article_edits + user.article_talk_edits + user.user_edits + user.user_talk_edits + \
user.file_edits + user.file_talk_edits + user.template_edits + user.template_talk_edits + \
user.wikipedia_edits + user.wikipedia_talk_edits + user.mediaWiki_edits + user.mediaWiki_talk_edits + \
user.category_edits + user.category_talk_edits + user.draft_edits + user.draft_talk_edits + \
user.talk_edits
user.total_edits = total_edits
print user
fix_total_edits = FixTotalEdits('../Data/5_no_double_votes.csv')
fix_total_edits.fix()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# DateTime : 2019-06-03 14:04:35
# Author : dongchuan
# Version : v1.0
# Desc : Http过滤器:过滤掉正常的URL,减少后续机器学习的数据压力
import re
import sys
import urlparse
import simplejson
from urldetect.conf.config import Config
from urldetect.utils.common import Common
from urldetect.utils.URLTokenizer import URLTokenizer
def httpFilter(request):
if "method" not in request:
return True
if "response.code" not in request:
return True
if str(request["response.code"]) != "200":
return True
if request["method"] not in ["GET", "POST"]:
return True
if "request.url" not in request:
return True
if Common.filter_static(request["realUrl"]):
return True
return False
def paramValFilter(paramVal):
if not paramVal:
return True
if sys.getsizeof(paramVal) >= 32000: #参数大于32Kb 忽略
return True
#str.isdigit(): True 只包含数字
if paramVal.isdigit():
return True
#str.isalpha():True 只包含字母
if paramVal.isalpha():
return True
#str.isalnum():True 只包含字母或者数字
if paramVal.isalnum():
return True
if Common.filterChinese(paramVal):
return True
return False
def getDeepJsonVal(data, result=[]):
if isinstance(data, dict):
for key, value in data.items():
getDeepJsonVal(value, result)
elif isinstance(data, list):
for value in data:
getDeepJsonVal(value)
else:
if isinstance(data, unicode) and ('{' in data or '[' in data):
try:
getDeepJsonVal(simplejson.loads(data))
except:
if not paramValFilter(data):
result.append(data)
else:
if not paramValFilter(data):
result.append(data)
return result
def parseJson(request):
try:
reqbody = request["request.body"]
return getDeepJsonVal(simplejson.loads(reqbody))
except:
try:
result = []
m = re.findall(r'"\w+":".*?"',reqbody)
if m:
for p in m:
result.append(p.split(':', 1)[1])
else:
m = re.findall(r'"\w+":\d+',reqbody)
if m:
for p in m:
result.append(p.split(':', 1)[1])
return result
except Exception, e:
# print "parseJson:", e, request
return []
def getQueryString(request):
data = []
try:
url = request["request.url"]
result = urlparse.urlparse(url)
query = result.query
# urlparse.parse_qsl解析url请求切割参数时,遇到';'会截断,导致获取的参数值缺失';'后面的内容
if ";" in query:
query = re.sub(r';', '@@@@', query)
params = urlparse.parse_qsl(query, True)
for k, v in params:
if not v:
continue
# 恢复分号
if '@@@@' in v:
v = re.sub(r'@@@@', ';', v)
if paramValFilter(v):
continue
data.append(v)
except Exception, e:
print "parse query error:", e, request
return data
if __name__ == '__main__':
parser = URLTokenizer()
with open("data/log.txt") as f:
for line in f:
try:
request = Common.json_load(line)
if httpFilter(request):
continue
if request["method"].upper() == "GET":
if "request.params" not in request or not request["request.params"]:
continue
paramValList = getQueryString(request)
if request["method"].upper() == "POST":
if "request.body" not in request or not request["request.body"]:
continue
paramValList = getDeepJsonVal(parseJson(request))
flag = "normal"
for p in paramValList:
try:
print p, parser.URLRunParser(str(p))
if not parser.URLRunParser(str(p)): # 所有参数符合词法-标白,否则流入下一步处理
flag = "abnormal"
break
except Exception,e:
print "URLTokenizer:",e
if flag == "normal":
print "normal", request["request.url"]
else:
print "abnormal",request["request.url"]
except Exception, e:
print "main:", e, request
|
from Gui.Base.widget import Widget
class ButtonPressedController:
def __init__(self, widget: Widget):
self.widget = widget
for i in range(len(self.widget.buttons)):
func = self.__getattribute__("button{}_on_click".format(i))
self.widget.buttons[i].clicked.connect(func)
class MousePressedController:
def __init__(self, widget: Widget):
self.widget = widget
self.widget.mousePressEvent = self.__getattribute__("widget_on_pressed")
def widget_on_pressed(self, e):
pass |
import pytest
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from webdriver_manager.chrome import ChromeDriverManager
from selene.support._extensions.webdriver_manager import ChromeType
from selene import Config, Browser, support
@pytest.fixture(scope='function')
def driver_per_test():
chrome_driver = webdriver.Chrome(
service=Service(
support._extensions.webdriver_manager.patch._to_find_chromedrivers_from_115(
ChromeDriverManager(chrome_type=ChromeType.GOOGLE)
).install()
)
)
yield chrome_driver
chrome_driver.quit()
@pytest.fixture(scope='function')
def browser(driver_per_test):
yield Browser(
Config(
driver=driver_per_test,
)
)
|
from rest_framework import status
from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from mapbox import Distance
from ..profiles.model import Profile
from ..hitches.model import Hitch
from .model import Drive
from django.contrib.auth.models import User
from .serializer import DriveSerializer
import json
from django.core import serializers
from ..users.serializer import UserSerializer
import datetime
from django.utils.timezone import get_current_timezone
from django.utils.dateparse import parse_datetime
from django.core.files.base import ContentFile
import base64
from math import sqrt
from django.db.models import Q
# Gets all user's drives.
# 401 - un-authorized.
# 200 - got all of the user's drives.
@api_view(['GET'])
def list_all(request):
# Check if the user is anonymous.
if request.user.is_anonymous:
return Response(status=status.HTTP_401_UNAUTHORIZED)
drives = Drive.objects.filter(driver=request.user)
driveSerializer = DriveSerializer(drives,many=True)
return Response(driveSerializer.data, status.HTTP_200_OK)
# Creates a drive for the user.
# 400 - Bad request.
# 201 - created a drive for that user.
# 401 - User is un-authorized
@api_view(['POST'])
def create(request):
# Check if the user is anonymous.
if request.user.is_anonymous:
return Response(status=status.HTTP_401_UNAUTHORIZED)
data = json.loads(request.body)
driveSerializer = DriveSerializer(data=data, context={'driver_id': request.user.id})
if driveSerializer.is_valid():
driveSerializer.save()
return Response(driveSerializer.data, status=status.HTTP_201_CREATED)
else:
return Response(driveSerializer.errors, status=status.HTTP_400_BAD_REQUEST)
# Gets the details of the given drive.
# 400 - Bad request.
# 200 - returned the user's drives..
# 401 - User is un-authorized
@api_view(['POST'])
def detail(request):
# Get the drive
try:
data = json.loads(request.body)
drive = Drive.objects.get(pk=data['id'])
except:
return Response(status.HTTP_400_BAD_REQUEST)
# Check if the user is authorized to view that drive.
if request.user.id != drive.user.id:
return Response(status=status.HTTP_401_UNAUTHORIZED)
driveSerializer = DriveSerializer(drive)
return Response(driveSerializer.data, status=status.HTTP_200_OK)
# Deletes a drive.
# 400 - Bad request.
# 204 - Deleted the user's drive.
# 401 - User is un-authorized
@api_view(['DELETE'])
def delete(request):
# Get the drive
try:
data = json.loads(request.body)
drive = Drive.objects.get(pk=data['id'])
except:
return Response(status.HTTP_400_BAD_REQUEST)
# Check if the user is authorized to view that drive.
if request.user.id != drive.user.id:
return Response(status=status.HTTP_401_UNAUTHORIZED)
drive.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
# Search for drives given coordinates and time frame.
# 400 - Bad request.
# 200 - Returned available drives (it could be empty).
@api_view(['POST'])
def search(request):
# Parse json object.
try:
data = json.loads(request.body)
pick_up_lat = data["pick_up_lat"]
pick_up_long = data["pick_up_long"]
drop_off_lat = data["drop_off_lat"]
drop_off_long = data["drop_off_long"]
start_date_time = parse_datetime(data["start_date_time"])
end_date_time = parse_datetime(data["end_date_time"])
except:
return Response(status=status.HTTP_400_BAD_REQUEST)
print(data)
# Build search query.
# First filter off of date range.
query = Drive.objects.all().filter(Q(repeated_week_days__len__gt = 0) | Q(start_date_time__range=(start_date_time,end_date_time)))
query = query.filter(Q(start_date_time__lte=end_date_time) | Q(repeated_week_days__len=0))
# Ensure that we're aren't pulling the user's drives.
if not request.user.is_anonymous:
query = query.exclude(driver_id = request.user.id)
# Padding defines how far outside bounds you're able to go.
padding = 1.0
# Filter based off of pick_up.
query = query.filter(max_lat__gte = pick_up_lat - padding, min_lat__lte = pick_up_lat + padding)
query = query.filter(max_long__gte = pick_up_long - padding, min_long__lte = pick_up_long + padding)
# Filter based off of drop_off.
query = query.filter(max_lat__gte = drop_off_lat - padding, min_lat__lte = drop_off_lat + padding)
query = query.filter(max_long__gte = drop_off_long - padding, min_long__lte = drop_off_long + padding)
# Perform further programmatic filtering.
pick_up_point = (pick_up_lat,pick_up_long)
drop_off_point = (drop_off_lat, drop_off_long)
filteredDrives = []
for drive in query:
# Filter each drive based off of distances from pick up / drop off to start / end
start_point = (drive.start_lat, drive.start_long)
end_point = (drive.end_lat, drive.end_long)
start_to_pick_up = distBetweenPoints(start_point, pick_up_point)
start_to_drop_off = distBetweenPoints(start_point, drop_off_point)
# If the hitch is going the same direction as the drive.
if start_to_pick_up <= start_to_drop_off:
end_to_pick_up = distBetweenPoints(end_point, pick_up_point)
end_to_drop_off = distBetweenPoints(end_point, drop_off_point)
if end_to_drop_off <= end_to_pick_up:
# Drive passed first round of filtering.
# Filter by distance api.
#print(filterByDistance([start_point, pick_up_point, drop_off_point, end_point]))
# Calculate an estimated time of pick up.
pick_up_to_drop_off_dist = distBetweenPoints(pick_up_point, drop_off_point)
start_to_end_min = (drive.end_date_time - drive.start_date_time).seconds / 60.0
start_to_end_dist = start_to_pick_up + pick_up_to_drop_off_dist + end_to_drop_off
start_to_end_straight = distBetweenPoints(start_point, end_point)
drive_min_per_deg = start_to_end_min / start_to_end_dist * (start_to_end_dist / start_to_end_straight)
min_from_pick_up_to_end = drive_min_per_deg * (end_to_drop_off + pick_up_to_drop_off_dist)
est_pick_up_time = drive.end_date_time - datetime.timedelta(minutes=int(min_from_pick_up_to_end))
# Convert drive to json and add special fields.
serializedDrive = DriveSerializer(drive).data
serializedDrive['estimated_pick_up_date_time'] = est_pick_up_time
filteredDrives.append(serializedDrive)
return Response(filteredDrives, status=status.HTTP_200_OK)
# Accepts a hitch request.
# 400 - Bad request.
# 200 - Accepted.
# 401 - User is un-authorized
@api_view(['POST'])
def accept_hitch (request):
if request.user.is_anonymous():
return Response(status=status.HTTP_401_UNAUTHORIZED)
# Get JSON.
data = json.loads(request.body)
try:
drive = Drive.objects.get(id=data["drive_id"])
if drive.driver_id != request.user.id:
return Response(status=status.HTTP_401_UNAUTHORIZED)
hitch = Hitch.objects.get(id=data["hitch_id"])
hitch.accepted = True
hitch.save()
# Notify the hitchhiker.
except:
return Response(status=status.HTTP_400_BAD_REQUEST)
return Response(status=status.HTTP_200_OK)
# Function that finds the distance between two points.
def distBetweenPoints (pointA, pointB):
return sqrt((pointA[0] - pointB[0])**2 + (pointA[1] - pointB[1])**2)
# Function that filters a list of points using mapbox distance api.
def filterByDistance(points):
service = Distance()
geoJsonList = []
for point_iter in range(len(points)):
geoJsonList.append({
'type': 'Feature',
'properties': {'name': ['start','pick_up','drop_off','end'][point_iter]},
'geometry': {
'type': 'Point',
'coordinates': [points[point_iter][0], points[point_iter][1]]
}
})
response = service.distances(geoJsonList, 'driving')
print(geoJsonList)
return(response.json()['durations'])
|
from kivy.app import App
from kivy.uix.label import Label
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.button import Button
from kivy.app import App
from kivy.uix.image import Image
# import kivy
class AlternateApp(App):
def build(self):
self.layout=BoxLayout()
self.settings=Button(text='Settings',color=(0,1,0,0.8),on_press=self.detect,size_hint = (1, 0.3),
background_color = [1, 0, 0, 1])
# color=(0,1,0,0.8) means the word, Settings, is green in color and 80% opaque
# size_hint = (1, 0.3) means the button occupies 100% of the x-axis and 30% of the y-axis
# background_color = [1,0,0,1] means the button is red in color and 100% opaque
self.layout.add_widget(self.settings)
self.quit=Button(text='Quit',on_press=self.detect)
self.layout.add_widget(self.quit)
wag = Image(source='c:/Pinardy/SUTD/Digital World/cohort_classroom.jpg',pos_hint={'center_x': 0, 'center_y': 0.5})
# add image smiley.jpg and the position is at the center
self.layout.add_widget(wag)
return self.layout
def detect(self,instance):
print 'You have clicked me!'
AlternateApp().run() |
import csv
from collections import Counter
with open("height_weight.csv",newline="") as f:
reader=csv.reader(f)
file_data=list(reader)
file_data.pop(0)
newData=[]
for i in range(len(file_data)):
num=file_data[i][1]
newData.append(float(num))
data=Counter(newData)
mode_for_data_range={"50-60":0,"60-70":0,"70-80":0}
for height,occurence in data.items():
if 50<float(height)<60:
mode_for_data_range["50-60"]+=occurence
elif 60<float(height)<70:
mode_for_data_range["60-70"]+=occurence
elif 70<float(height)<80:
mode_for_data_range["70-80"]+=occurence
modeRange,modeOcurrence=0,0
for range,occurence in mode_for_data_range.items():
if occurence>modeOcurrence:
modeRange,modeOcurrence=[int(range.split("-")[0]),int(range.split("-")[1])],occurence
mode=float((modeRange[0]+modeRange[1])/2)
print("Mode is= ", str(mode))
|
from django.shortcuts import render
from .models import Team
def teamlist(request):
items = Team.objects.all()
return render(request, 'MyApp/teamplayer.html', {'items': items})
|
from django.db import models
class ContentType(models.Model):
name = models.CharField(max_length=128)
#Include model into same app
class Meta:
app_label = 'communications'
def __unicode__(self):
return self.name
class Submissions(models.Model):
name = models.CharField(max_length=128)
email = models.EmailField(max_length=254)
description = models.TextField()
date = models.DateTimeField(auto_now_add=True)
contentType = models.ForeignKey(ContentType)
#Include model into same app
class Meta:
app_label = 'communications'
def __unicode__(self):
return self.name |
# -*- coding: cp1252 -*-
'''
Created on 30/09/2013
@author: Pedro
'''
def contarelementos(lista):
if lista==[]:
return 0
else:
return 1+contarelementos(lista[1:])
def multiplicarelementos(multi,lista,resp=[]):
if lista==[]:
return resp
else:
return multiplicarelementos(multi,lista[1:],resp+[lista[0]*multi])
def obtenerelementospares(lista,resp=[]):
if lista==[]:
return resp
else:
if lista[0]%2==0:
return obtenerelementospares(lista[1:],resp+[lista[0]])
else:
return obtenerelementospares(lista[1:],resp)
def confirmarunpar(lista):
if lista==[]:
return False
else:
if lista[0]%2==0:
return True
else:
return confirmarunpar(lista[1:])
def confirmartodopar(lista):
if lista==[]:
return True
else:
if lista[0]%2==0:
return confirmartodopar(lista[1:])
else:
return False
def separarparimpar(lista,par=[],impar=[]):
if lista==[]:
return [par,impar]
else:
if lista[0]%2==0:
return separarparimpar(lista[1:],par+[lista[0]],impar)
else:
return separarparimpar(lista[1:],par,impar+[lista[0]])
def aparicion1lista(elem,lista,listan=[],cont=0):
if lista==[]:
return listan
else:
if lista[0]==elem and cont==0:
return aparicion1lista(elem,lista[1:],listan,cont+1)
else:
return aparicion1lista(elem,lista[1:],listan+[lista[0]],cont)
def apariciontodolista(elem,lista,listan=[]):
if lista==[]:
return listan
else:
if lista[0]==elem:
return apariciontodolista(elem,lista[1:],listan)
else:
return apariciontodolista(elem,lista[1:],listan+[lista[0]])
def mayornum(lista,may=0,cont=0):
if lista==[]:
return may
else:
if cont==0:
may=lista[0]
return mayornum(lista[1:],may,cont+1)
else:
if lista[0]>may:
may=lista[0]
return mayornum(lista[1:],may,cont)
def cambiarpalabra(lista,p1,p2):
if lista==[]:
return lista
else:
if lista[0]==p1:
return [p2]+ cambiarpalabra(lista[1:],p1,p2)
else:
return [lista[0]] + cambiarpalabra(lista[1:],p1,p2)
def eliminarapariciones(lista,pal):
if lista==[]:
return lista
else:
if lista[0]==pal:
return []+ eliminarapariciones(lista[1:],pal)
else:
return [lista[0]] + eliminarapariciones(lista[1:],pal)
def invertirlista(lista,cont=0):
n=len(lista)//2
if n-cont==0:
return lista
else:
primelem=lista[0+cont]
ultimelem=lista[-1-cont]
lista[0+cont]=ultimelem
lista[-1-cont]=primelem
return invertirlista(lista,cont+1)
def eliminarlistaporlista(lista1,lista2,n=0):
if lista2==[]:
return lista1
else:
return eliminarlistaporlista(eliminarapariciones(lista1,lista2[0]),lista2[1:],n+1)
def fusionlistas(lista1,lista2,nuevalista=[]):
if lista1==[]:
return nuevalista
elif lista2==[]:
if lista1[0] in nuevalista:
return fusionlistas(lista1[1:],lista2,nuevalista)
else:
return fusionlistas(lista1[1:],lista2,nuevalista+[lista1[0]])
else:
if lista2[0] in nuevalista:
return fusionlistas(lista1,lista2[1:],nuevalista)
else:
return fusionlistas(lista1,lista2[1:],nuevalista+[lista2[0]])
def xorlistas1(lista1,lista2,nuevalista=[]):
if lista2==[]:
return lista1+nuevalista
else:
if lista2[0] not in lista1:
return xorlistas1(lista1,lista2[1:],nuevalista+[lista2[0]])
else:
return xorlistas1(lista1,lista2[1:],nuevalista)
def xorlistas2(lista1,lista2,nuevalista=[]):
if lista1==[]:
return lista2+nuevalista
else:
if lista1[0] not in lista2:
return xorlistas2(lista1[1:],lista2,nuevalista+[lista1[0]])
else:
return xorlistas2(lista1[1:],lista2,nuevalista)
def andlistas(lista1,lista2,nuevalista=[]):
if lista2==[]:
return nuevalista
else:
if lista2[0] in lista1:
return andlistas(lista1,lista2[1:],nuevalista+[lista2[0]])
else:
return andlistas(lista1,lista2[1:],nuevalista)
def aparicion1vezlista(elem,lista,listan=[],cont=0):
if lista==[]:
return listan
else:
if lista[0]==elem and cont==0:
return lista[1:]
else:
return aparicion1lista(elem,lista[1:],listan+[lista[0]],cont)
def removerrepetidas(lista):
if lista==[]:
return lista
else:
cont=lista.count(lista[0])
if cont!=1:
return removerrepetidas(aparicion1vezlista(lista[0],lista))
else:
return [lista[0]]+removerrepetidas(lista[1:])
def operacionU(a,u0,b,terminos=[]):
if |
def INT(): return int(input())
def MAP(): return map(int, input().split())
def LIST(): return list(map(int, input().split()))
N = INT()
ans = 0
for i in range(1, N+1):
if i % 3 == 0 or i % 5 == 0:
continue
else:
ans += i
print(ans)
|
from flask.ext.wtf import Form, TextField, TextAreaField, SubmitField
# import classes above
#fill in variables
class ContactForm(Form):
name = TextField("name")
email = TextField("email")
message = TextAreaField("message")
submit = SubmitField("submit")
|
import logging
import signal
logger = logging.getLogger(__name__)
class GracefulKiller:
kill_now = False
def __init__(self):
signal.signal(signal.SIGINT, self.exit_gracefully)
signal.signal(signal.SIGTERM, self.exit_gracefully)
def exit_gracefully(self, signum, frame):
logger.debug('Got signal: %s', signum)
self.kill_now = True
|
import telebot
import config
import cityWeather as cw
import wear
bot = telebot.TeleBot(config.TOKEN)
@bot.message_handler(commands=['start','help','reroll'])
def req(mes):
if mes.text == '/help':
bot.send_message(mes.chat.id, 'Для получения информации о погоде в городе, введите его и отправьте мне.')
elif mes.text == '/start':
bot.send_message(mes.chat.id, 'Начнем! Для справки выберите команду /help.')
elif mes.text == '/reroll':
try:
if(message1 == ''):
bot.send_message(message1.chat.id, 'Город еще не был введен. Для справки выберите команду /help.')
else:
cw.cityWeather(message1)
res = wear.clothes(int(cw.temp), cw.w.status)
bot.send_message(message1.chat.id, str(cw.result) + '\n' + str(res))
except:
bot.send_message(message1.chat.id, 'Город еще не был введен. Для справки выберите команду /help.')
else:
bot.send_message(mes.chat.id, 'Введена несуществующая команда. Для справки выберите команду \n /help.')
message1 = ''
@bot.message_handler(content_types=['text'])
def main(message):
try:
global message1
message1 = message
cw.cityWeather(message)
res = wear.clothes(int(cw.temp), cw.w.status)
bot.send_message(message.chat.id, str(cw.result) + '\n' + str(res))
except:
bot.send_message(message.chat.id, 'Введен несуществующий город.')
bot.polling(none_stop=True) |
#################################################################################################################
# ewstools
# Description: Python package for computing, analysing and visualising
# early warning signals (EWS) in time-series data
# Author: Thomas M Bury
# Web: https://www.thomasbury.net/
# Code repo: https://github.com/ThomasMBury/ewstools
# Documentation: https://ewstools.readthedocs.io/
#
# The MIT License (MIT)
#
# Copyright (c) 2019 Thomas Bury
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#################################################################################################################
#---------------------------------
# Import relevant packages
#--------------------------------
# For numeric computation and DataFrames
import numpy as np
import pandas as pd
# To compute power spectrum using Welch's method
from scipy import signal
import scipy.linalg
# For fitting power spectrum models and computing AIC weights
from lmfit import Model
def pspec_welch(yVals,
dt,
ham_length=40,
ham_offset=0.5,
w_cutoff=1,
scaling='spectrum'):
'''
Computes the power spectrum of a time-series using Welch's method.
The time-series is assumed to be stationary and to have equally spaced
measurements in time. The power spectrum is computed using Welch's method,
which computes the power spectrum over a rolling window of subsets of the
time-series and then takes the average.
Args
----
yVals: array of floats
Array of time-series values.
dt: float
Seperation between data points.
ham_length: int
Length of Hamming window (number of data points).
ham_offset: float
Hamming offset as a proportion of the Hamming window size.
w_cutoff: float
Cutoff frequency used in power spectrum. Given as a proportion of the
maximum permissable frequency in the empirical
power spectrum.
scaling: {'spectrum', 'density'}
Whether to compute the power spectrum ('spectrum') or
the power spectral density ('density'). The power spectral density
is the power spectrum normalised (such that the area underneath equals one).
Returns
-------
pd.Series:
Power values indexed by frequency
'''
## Assign properties of *series* to parameters
# Compute the sampling frequency
fs = 1/dt
# Number of data points
num_points = len(yVals)
# If ham_length given as a proportion - compute number of data points in ham_length
if 0 < ham_length <= 1:
ham_length = num_points * ham_length
# If Hamming length given is less than the length of the t-series, make ham_length=length of tseries.
if ham_length >= num_points:
ham_length = num_points
# Compute number of points in offset
ham_offset_points = int(ham_offset*ham_length)
## Compute the periodogram using Welch's method (scipy.signal function)
pspec_raw = signal.welch(yVals,
fs,
nperseg=ham_length,
noverlap=ham_offset_points,
return_onesided=False,
scaling=scaling)
# Put into a pandas series and index by frequency (scaled by 2*pi)
pspec_series = pd.Series(pspec_raw[1], index=2*np.pi*pspec_raw[0], name='power')
pspec_series.index.name = 'frequency'
# Sort into ascending frequency
pspec_series.sort_index(inplace=True)
# Append power spectrum with first value (by symmetry)
pspec_series.at[-min(pspec_series.index)] = pspec_series.iat[0]
# Impose cutoff frequency
wmax = w_cutoff*max(pspec_series.index) # cutoff frequency
pspec_output = pspec_series[-wmax:wmax] # subset of power spectrum
return pspec_output
#------------Functional forms of power spectra to fit------------#
def psd_fold(w,sigma,lam):
'''
Analytical approximation for the power spectrum prior to a Fold bifurcation
'''
return (sigma**2 / (2*np.pi))*(1/(w**2+lam**2))
def psd_flip(w,sigma,r):
'''
Analytical approximation for the power spectrum prior to a Flip bifurcation
'''
return (sigma**2 / (2*np.pi))*(1/(1 + r**2 - 2*r*np.cos(w)))
def psd_hopf(w,sigma,mu,w0):
'''
Analytical approximation for the power spectrum prior to a Hopf bifurcation
'''
return (sigma**2/(4*np.pi))*(1/((w+w0)**2+mu**2)+1/((w-w0)**2 +mu**2))
def psd_null(w,sigma):
'''
Power spectrum of white noise (flat).
'''
return sigma**2/(2*np.pi) * w**0
#-------Obtain 'best guess' intitialisation parameters for optimisation------%
def shopf_init(smax, stot, wdom):
'''
Compute the 'best guess' initialisation values for sigma, mu and w0,
when fitting sHopf to the empirical power spectrum.
Args
----
smax: float
Maximum power in the power spectrum.
stot: float
Total power in the power spectrum.
wdom: float
Frequency that has the highest power.
Return
------
list of floats:
List containing the initialisation parameters [sigma, mu, w0]
'''
# Define chunky term (use \ to continue eqn to new line)
def alpha(smax, stot, wdom):
return stot**3 \
+ 9*(np.pi**2)*(wdom**2)*(smax**2)*stot \
+3*np.sqrt(3)*np.pi*np.sqrt(
64*(np.pi**4)*(wdom**6)*(smax**6) \
-13*(np.pi**2)*(wdom**4)*(smax**4)*(stot**2) \
+2*(wdom**2)*(smax**2)*(stot**4) \
)
# Initialisation for mu
mu = -(1/(3*np.pi*smax))*(stot \
+alpha(smax,stot,wdom)**(1/3) \
+(stot**2-12*(np.pi**2)*(wdom**2)*(smax**2))/(alpha(smax,stot,wdom)**(1/3)))
# Initialisation for sigma
sigma = np.sqrt(
-2*mu*stot)
# Initialisation for w0
w0 = wdom
# Return list
return [sigma, mu, w0]
def sfold_init(smax, stot):
'''
Compute the 'best guess' initialisation values for sigma and lamda
when fitting sfold to the empirical power spectrum.
Args
--------------
smax: float
Maximum power in the power spectrum.
stot: float
Total power in the power spectrum.
Return
-----------------
list of floats:
List containing the initialisation parameters [sigma, lambda]
'''
# Initialisation for sigma
sigma = np.sqrt(2*stot**2/(np.pi*smax))
# Initialisation for lamda
lamda = -stot/(np.pi*smax)
# Return list
return [sigma, lamda]
def sflip_init(smax, stot):
'''
Compute the 'best guess' initialisation values for sigma and r
when fitting sflip to the empirical power spectrum.
Args
--------------
smax: float
Maximum power in the power spectrum.
stot: float
Total power in the power spectrum.
Return
-----------------
list of floats:
List containing the initialisation parameters [sigma, r]
'''
# Initialisation for r
r =(stot - 2*np.pi*smax)/(stot + 2*np.pi*smax)
# Initialisation for sigma
sigma = np.sqrt(stot*(1-r**2))
# Return list
return [sigma, r]
def snull_init(stot):
'''
Compute the 'best guess' initialisation values for sigma
when fitting snull to the empirical power spectrum.
Args
--------------
stot: float
Total power in the power spectrum.
Return
-----------------
list of floats:
List containing the initialisation parameters [sigma].
'''
# Initialisation for sigma
sigma = np.sqrt(stot)
# Return list
return [sigma]
#---------Run optimisation to compute best fits-----------#
# Fold fit
def fit_fold(pspec, init):
'''
Fit the Fold power spectrum model to pspec and compute AIC score.
Uses the package LMFIT for optimisation.
Args
--------------
pspec: pd.Series
Power spectrum data as a Series indexed by frequency.
init: list of floats
Initial parameter guesses of the form [sigma_init, lambda_init].
Returns
----------------
list:
Form [aic, result] where aic is the AIC score for the model fit,
and result is a handle that contains further information on the fit.
'''
# Put frequency values and power values as a list to use LMFIT
freq_vals = pspec.index.tolist()
power_vals = pspec.tolist()
sigma_init, lambda_init = init
# Assign model object
model = Model(psd_fold)
# Set up constraint S(wMax) < psi_fold*S(0)
psi_fold = 0.5
wMax = max(freq_vals)
# Parameter constraints for sigma
model.set_param_hint('sigma', value=sigma_init, min=0, max=10*sigma_init)
# Parameter constraints for lambda
model.set_param_hint('lam', min=-np.sqrt(psi_fold/(1-psi_fold))*wMax, max=0, value=lambda_init)
# Assign initial parameter values and constraints
params = model.make_params()
# Fit model to the empircal spectrum
result = model.fit(power_vals, params, w=freq_vals)
# Compute AIC score
aic = result.aic
# Export AIC score and model fit
return [aic, result]
# Fold fit
def fit_flip(pspec, init):
'''
Fit the Flip power spectrum model to pspec and compute AIC score.
Uses the package LMFIT for optimisation.
Args
--------------
pspec: pd.Series
Power spectrum data as a Series indexed by frequency.
init: list of floats
Initial parameter guesses of the form [sigma_init, r_init].
Returns
----------------
list:
Form [aic, result] where aic is the AIC score for the model fit,
and result is a handle that contains further information on the fit.
'''
# Put frequency values and power values as a list to use LMFIT
freq_vals = pspec.index.tolist()
power_vals = pspec.tolist()
sigma_init, r_init = init
# Assign model object
model = Model(psd_flip)
# Parameter constraints for sigma
model.set_param_hint('sigma', value=sigma_init, min=0, max=10*sigma_init)
# Parameter constraints for r
model.set_param_hint('r', min=-1, max=0, value=r_init)
# Assign initial parameter values and constraints
params = model.make_params()
# Fit model to the empircal spectrum
result = model.fit(power_vals, params, w=freq_vals)
# Compute AIC score
aic = result.aic
# print('flip aic is {}'.format(aic))
# Export AIC score and model fit
return [aic, result]
# Function to fit Hopf model to empirical specrum with specified initial parameter guess
def fit_hopf(pspec, init):
'''
Fit the Hopf power spectrum model to pspec and compute AIC score.
Uses the package LMFIT for optimisation.
Args
--------------
pspec: pd.Series
Power spectrum data as a Series indexed by frequency
init: list of floats
Initial parameter guesses of the form [sigma_init, mu_init, w0_init]
Returns
----------------
list:
Form [aic, result] where aic is the AIC score for the model fit,
and result is a handle that contains further information on the fit.
'''
# Put frequency values and power values as a list to use LMFIT
freq_vals = pspec.index.tolist()
power_vals = pspec.tolist()
# Assign labels to initialisation values
sigma_init, mu_init, w0_init = init
# If any labels are nan, resort to default values
if np.isnan(sigma_init) or np.isnan(mu_init) or np.isnan(w0_init):
sigma_init, mu_init, w0_init = [1,-0.1,1]
# Constraint parameter
psi_hopf = 0.2
# Compute initialisation value for the dummy variable delta (direct map with w0)
# It must be positive to adhere to constraint - thus if negative set to 0.
delta_init = max(
w0_init + (mu_init/(2*np.sqrt(psi_hopf)))*np.sqrt(4-3*psi_hopf + np.sqrt(psi_hopf**2-16*psi_hopf+16)),
0.0001)
# Assign model object
model = Model(psd_hopf)
## Set initialisations parameters in model attributes
# Sigma must be positive, and set a (high) upper bound to avoid runaway computation
model.set_param_hint('sigma', value=sigma_init, min=0)
# Psi is a fixed parameter (not used in optimisation)
model.set_param_hint('psi', value=psi_hopf, vary=False)
# Mu must be negative
model.set_param_hint('mu', value=mu_init, max=0, vary=True)
# Delta is a dummy parameter, satisfying d = w0 - wThresh (see paper for wThresh). It is allowed to vary, in place of w0.
model.set_param_hint('delta', value = delta_init, min=0, vary=True)
# w0 is a fixed parameter dependent on delta (w0 = delta + wThresh)
model.set_param_hint('w0',expr='delta - (mu/(2*sqrt(psi)))*sqrt(4-3*psi + sqrt(psi**2-16*psi+16))',max=2.5,vary=False)
# Assign initial parameter values and constraints
params = model.make_params()
# Fit model to the empircal spectrum
result = model.fit(power_vals, params, w=freq_vals)
# Compute AIC score
aic = result.aic
# print('hopf aic is {}'.format(aic))
# Export AIC score and model fit
return [aic, result]
# Function to fit Null model to empirical specrum with specified initial parameter guess
def fit_null(pspec, init):
'''
Fit the Null power spectrum model to pspec and compute AIC score.
Uses the package LMFIT for optimisation.
Args
--------------
pspec: pd.Series
Power spectrum data as a Series indexed by frequency
init: list of floats
Initial parameter guesses of the form [sigma_init]
Returns
----------------
list:
Form [aic, result] where aic is the AIC score for the model fit,
and result is a handle that contains further information on the fit.
'''
# Put frequency values and power values as a list to use LMFIT
freq_vals = pspec.index.tolist()
power_vals = pspec.tolist()
sigma_init = init[0]
# Assign model object
model = Model(psd_null)
# Initial parameter value for Null fit
model.set_param_hint('sigma', value=sigma_init, vary=True, min=0, max=10*sigma_init)
# Assign initial parameter values and constraints
params = model.make_params()
# Fit model to the empircal spectrum
result = model.fit(power_vals, params, w=freq_vals)
# Compute AIC score
aic = result.aic
# Export AIC score and model fit
return [aic, result]
def aic_weights(aic_scores):
'''
Computes AIC weights, given AIC scores.
Args
----------------
aic_scores: np.array
An array of AIC scores
Returns
-----------------
np.array
Array of the corresponding AIC weights
'''
# Best AIC score
aic_best = min(aic_scores)
# Differences in score from best model
aic_diff = aic_scores - aic_best
# Likelihoods for each model
llhd = np.exp(-(1/2)*aic_diff)
# Normalise to get AIC weights
return llhd/sum(llhd)
#-----------Compute spectral metrics (EWS) from power spectrum------#
def pspec_metrics(pspec,
ews = ['smax','cf','aic'],
aic = ['Fold','Hopf','Null'],
sweep = False):
'''
Compute the metrics associated with pspec that can be
used as EWS.
Args
-------------------
pspec: pd.Series
Power spectrum as a Series indexed by frequency
ews: list of {'smax', 'cf', 'aic'}
EWS to be computed. Options include peak in the power spectrum ('smax'),
coherence factor ('cf'), AIC weights ('aic').
aic: AIC weights to compute
sweep: bool
If 'True', sweep over a range of intialisation
parameters when optimising to compute AIC scores, at the expense of
longer computation. If 'False', intialisation parameter is taken as the
'best guess'.
Return
-------------------
dict:
A dictionary of spectral EWS obtained from pspec
'''
# Initialise a dictionary for EWS
spec_ews = {}
## Compute Smax
if 'smax' in ews:
smax = max(pspec)
# add to DataFrame
spec_ews['Smax'] = smax
## Compute the coherence factor
if 'cf' in ews:
# frequency at which peak occurs
w_peak = abs(pspec.idxmax())
# power of peak frequency
power_peak = pspec.max()
# compute the first frequency from -w_peak at which power<power_peak/2
w_half = next( (w for w in pspec[-w_peak:].index if pspec.loc[w] < power_peak/2 ), 'None')
# if there was no such frequency, or if peak crosses zero frequency,
# set w_peak = 0 (makes CF=0)
if w_half == 'None' or w_half > 0:
w_peak = 0
else:
# double the difference between w_half and -w_peak to get the width of the peak
w_width = 2*(w_half - (-w_peak))
# compute coherence factor (height/relative width)
coher_factor = power_peak/(w_width/w_peak) if w_peak != 0 else 0
# add to dataframe
spec_ews['Coherence factor'] = coher_factor
## Compute AIC weights of fitted analytical forms
if 'aic' in ews:
# Compute the empirical metrics that allow us to choose sensible initialisation parameters
# Peak in power spectrum
smax = pspec.max()
# Area underneath power spectrum (~ variance)
stot = pspec.sum()*(pspec.index[1]-pspec.index[0])
# Dominant frequency (take positive value)
wdom = abs(pspec.idxmax())
## Create array of initialisation parmaeters
# Sweep values (as proportion of baseline guess) if sweep = True
sweep_vals = np.array([0.5,1,1.5]) if sweep else np.array([1])
# Baseline parameter initialisations (computed using empirical spectrum)
# Sfold
[sigma_init_fold, lambda_init] = sfold_init(smax,stot)
# Sflip
[sigma_init_flip, r_init] = sflip_init(smax,stot)
# Shopf
[sigma_init_hopf, mu_init, w0_init] = shopf_init(smax,stot,wdom)
# Snull
[sigma_init_null] = snull_init(stot)
# Arrays of initial values
init_fold_array = {'sigma': sweep_vals*sigma_init_fold,
'lambda': sweep_vals*lambda_init}
# r parameter cannot go below -1
r_sweep_vals = [0.5*r_init,r_init,0.5*r_init+0.5] if sweep else [r_init]
init_flip_array = {'sigma': sweep_vals*sigma_init_flip,
'r': r_sweep_vals}
init_hopf_array = {'sigma': sweep_vals*sigma_init_hopf,
'mu': sweep_vals*mu_init,
'w0': sweep_vals*w0_init}
init_null_array = {'sigma': sweep_vals*sigma_init_null}
## Compute AIC values and fits
## Fold
# Initialise list to store AIC and model fits
fold_aic_fits = []
# Sweep over initial parameter guesses and pick best convergence
for i in range(len(init_fold_array['sigma'])):
for j in range(len(init_fold_array['lambda'])):
# Initial parameter guess
init_fold = [init_fold_array['sigma'][i],init_fold_array['lambda'][j]]
# Compute fold fit and AIC score
[aic_temp, model_temp] = fit_fold(pspec, init_fold)
# Store in list
fold_aic_fits.append([aic_temp, model_temp])
# Put list into array
array_temp = np.array(fold_aic_fits)
# Pick out the best model
[aic_fold, model_fold] = array_temp[array_temp[:,0].argmin()]
## Flip
# Initialise list to store AIC and model fits
flip_aic_fits = []
# Sweep over initial parameter guesses and pick best convergence
for i in range(len(init_flip_array['sigma'])):
for j in range(len(init_flip_array['r'])):
# Initial parameter guess
init_flip = [init_flip_array['sigma'][i],init_flip_array['r'][j]]
# Compute fold fit and AIC score
[aic_temp, model_temp] = fit_flip(pspec, init_flip)
# Store in list
flip_aic_fits.append([aic_temp, model_temp])
# Put list into array
array_temp = np.array(flip_aic_fits)
# Pick out the best model
[aic_flip, model_flip] = array_temp[array_temp[:,0].argmin()]
## Hopf
# Initialise list to store AIC and model fits
hopf_aic_fits = []
# Sweep over initial parameter guesses and pick best convergence
for i in range(len(init_hopf_array['sigma'])):
for j in range(len(init_hopf_array['mu'])):
for k in range(len(init_hopf_array['w0'])):
# Initial parameter guess
init_hopf = [init_hopf_array['sigma'][i],init_hopf_array['mu'][j],init_hopf_array['w0'][k]]
# Compute fold fit and AIC score
[aic_temp, model_temp] = fit_hopf(pspec, init_hopf)
# Store in list
hopf_aic_fits.append([aic_temp, model_temp])
# Put list into array
array_temp = np.array(hopf_aic_fits)
# Pick out the best model
[aic_hopf, model_hopf] = array_temp[array_temp[:,0].argmin()]
## Null
# Initialise list to store AIC and model fits
null_aic_fits = []
# Sweep over initial parameter guesses and pick best convergence
for i in range(len(init_null_array['sigma'])):
# Initial parameter guess
init_null = [init_null_array['sigma'][i]]
# Compute fold fit and AIC score
[aic_temp, model_temp] = fit_null(pspec, init_null)
# Store in list
null_aic_fits.append([aic_temp, model_temp])
# Put list into array
array_temp = np.array(null_aic_fits)
# Pick out the best model
[aic_null, model_null] = array_temp[array_temp[:,0].argmin()]
# Compute chosen AIC weights from the AIC scores
aic_scores = {}
if 'Fold' in aic:
aic_scores['Fold']=aic_fold
if 'Flip' in aic:
aic_scores['Flip']=aic_flip
if 'Hopf' in aic:
aic_scores['Hopf']=aic_hopf
if 'Null' in aic:
aic_scores['Null']=aic_null
aicw = aic_weights(np.array([aic_scores[x] for x in aic]))
aic_dict = dict(zip(aic,aicw))
# Add to Dataframe
if 'Fold' in aic:
spec_ews['AIC fold'] = aic_dict['Fold']
if 'Flip' in aic:
spec_ews['AIC flip'] = aic_dict['Flip']
if 'Hopf' in aic:
spec_ews['AIC hopf'] = aic_dict['Hopf']
if 'Null' in aic:
spec_ews['AIC null'] = aic_dict['Null']
# Add fitted parameter values to DataFrame
spec_ews['Params fold'] = dict((k, model_fold.values[k]) for k in ('sigma','lam')) # don't include dummy params
spec_ews['Params flip'] = dict((k, model_flip.values[k]) for k in ('sigma','r'))
spec_ews['Params hopf'] = dict((k, model_hopf.values[k]) for k in ('sigma','mu','w0','delta','psi'))
spec_ews['Params null'] = model_null.values
# Return DataFrame of metrics
return spec_ews
#------------------------
## Function to compute lag-1 autocovariance matrix
def compute_autocov(df_in):
'''
Computes the autocovariance (lag-1) matrix of n
time series provided in df_in.
Using the definition
phi_ij = < X_i(t+1) X_j(t) >
for each element of the autocovariance matrix phi.
Args
-------------------
df_in: DataFrame with n columns indexed by time
Return
-------------------
np.array:
autocovariance matrix
'''
# Obtain column names of df_in
col_names = df_in.columns
# Number of variables
n = len(col_names)
# Define function to compute autocovariance of two columns
def autocov_cols(a,b):
'''
Computes autocovariance of two columns (can be the same)
Note that this does not commute (a<->b) in general
Input:
a,b: Series indexed by time
Output:
float: autocovariance between the columns
'''
# Shift the column of a by 1
a_shift = a.shift(1)
# Put into a dataframe
df_temp = pd.concat([a_shift,b], axis=1)
# Compute covariance of columns a and b_shift
cov = df_temp.cov().iloc[0,1]
# Output
return cov
# Compute elements of autocovariance matrix
list_elements = []
for i in range(n):
for j in range(n):
a = df_in[col_names[i]]
b = df_in[col_names[j]]
# Compute autocovaraince between cols
autocov = autocov_cols(a,b)
# Append to list of elements
list_elements.append(autocov)
# Create autocovariance matrix from list of elements
ar_autocov = np.array(list_elements).reshape(n,n)
# Output
return ar_autocov
'''
Computes the autocovariance (lag-1) matrix of n
time series provided in df_in.
Using the definition
phi_ij = < X_i(t+1) X_j(t) >
for each element of the autocovariance matrix phi.
Args
-------------------
df_in: DataFrame with n columns indexed by time
Return
-------------------
np.array:
autocovariance matrix
'''
#---------------------------------------
## Function to do Jacobian and eval reconstruction
def eval_recon(df_in):
'''
Constructs estimate of Jacobian matrix from stationary time-series data
and outputs the eigenvalues, eigenvectors and jacobian.
Args
-------------------
df_in: DataFrame with two columns indexed by time
Return
-------------------
dict
Consists of 'Eigenvalues': np.array of eigenvalues.
'Eigenvectors': np.array of eigenvectors. 'Jacobian': pd.DataFrame of
Jacobian entries.
'''
# Get the time-separation between data points
dt = df_in.index[1] -df_in.index[0]
# Compute autocovaraince matrix from columns
ar_autocov = compute_autocov(df_in)
# Compute the covariance matrix (built in function)
ar_cov = df_in.cov()
# Estimate of discrete Jacobian (formula in Williamson (2015))
# Requires computation of an inverse matrix
jac = np.matmul(ar_autocov, np.linalg.inv(ar_cov))
# Write the Jacobian as a df for output (so we have col lables)
df_jac = pd.DataFrame(jac, columns = df_in.columns, index=df_in.columns)
# Compute eigenvalues and eigenvectors
evals, evecs = np.linalg.eig(jac)
# Dictionary of data output
dic_out = {'Eigenvalues':evals,
'Eigenvectors':evecs,
'Jacobian':df_jac}
return dic_out
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Option',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('key', models.CharField(unique=True, max_length=64, verbose_name='Key')),
('description', models.TextField(null=True, verbose_name='Description', blank=True)),
('value', models.CharField(max_length=32, verbose_name='Value')),
],
options={
'ordering': ('key',),
'verbose_name': 'Option',
'verbose_name_plural': 'Options',
},
bases=(models.Model,),
),
]
|
#
# This computer program is the confidential information and proprietary trade
# secret of Anuta Networks, Inc. Possessions and use of this program must
# conform strictly to the license agreement between the user and
# Anuta Networks, Inc., and receipt or possession does not convey any rights
# to divulge, reproduce, or allow others to use this program without specific
# written authorization of Anuta Networks, Inc.
#
# Copyright (c) 2015-2016 Anuta Networks, Inc. All Rights Reserved.
#
#
#DO NOT EDIT THIS FILE ITS AUTOGENERATED ONE
#ALL THE CUSTOMIZATIONS REGARDING DATAPROCESSING SHOULD BE WRITTEN INTO service_customization.py FILE
#
"""
Tree Structure of Handled XPATH:
services
|
day1services
|
day1service
|
aaa
Schema Representation:
/services/day1services/day1service/aaa
"""
from servicemodel import util
from servicemodel import yang
from servicemodel import devicemgr
from tclday0config.tclday0config_lib import getLocalObject
from tclday0config.tclday0config_lib import getDeviceObject
from tclday0config.tclday0config_lib import getCurrentObjectConfig
from tclday0config.tclday0config_lib import ServiceModelContext
from tclday0config.tclday0config_lib import getParentObject
from tclday0config.tclday0config_lib import log
from servicemodel.controller.devices.device import aaa_group
import service_customization
class Aaa(yang.AbstractYangServiceHandler):
_instance = None
def __init__(self):
self.delete_pre_processor = service_customization.DeletePreProcessor()
self.create_pre_processor = service_customization.CreatePreProcessor()
self.opaque_args = {}
def create(self, id, sdata):
sdata.getSession().addYangSessionPreReserveProcessor(self.create_pre_processor)
#Fetch Local Config Object
config = getCurrentObjectConfig(id, sdata, 'aaa')
#Fetch Service Model Context Object
smodelctx = None
#Fetch Parent Object
parentobj = getParentObject(sdata)
dev = []
inputkeydict = {}
devbindobjs={}
inputdict = {}
opaque_args = self.opaque_args
vrf = None
#rcpath = util.get_parent_rcpath(sdata.getRcPath())
#xml_output = yang.Sdk.getData(rcpath, '', sdata.getTaskId())
#parentobj = util.parseXmlString(xml_output)
if hasattr(parentobj.day1service,'vrf'):
vrf = parentobj.day1service.vrf
# START OF FETCHING THE LEAF PARAMETERS
inputdict['tacacs_server_group'] = config.get_field_value('tacacs_server_group')
inputdict['vrf'] = vrf
inputdict['source_interface'] = config.get_field_value('source_interface')
inputdict['aaa_new_model'] = config.get_field_value('aaa_new_model')
if inputdict.get('aaa_new_model') is None:
inputdict['aaa_new_model'] = 'False'
# END OF FETCHING THE LEAF PARAMETERS
_Gen_obj = getLocalObject(sdata, 'day1service')
device_mgmt_ip_address = _Gen_obj.day1service.device_ip
#Fetch Device Object
dev = getDeviceObject(device_mgmt_ip_address, sdata)
# START OF FETCHING THE PARENT KEY LEAF PARAMETERS
inputkeydict['day1services_day1service_device_ip'] = sdata.getRcPath().split('/')[-2].split('=')[1]
# END OF FETCHING THE PARENT KEY LEAF PARAMETERS
#Use the custom methods to process the data
service_customization.ServiceDataCustomization.process_service_create_data(smodelctx, sdata, dev, id=id, device=dev, parentobj=parentobj, inputdict=inputdict, inputkeydict=inputkeydict, config=config, hopaque=opaque_args)
#Start of Device binding with python bindings
aaa_group_object = aaa_group.aaa_group()
aaa_group_object.tacacs_server_group = inputdict.get('tacacs_server_group')
aaa_group_object.vrf = inputdict.get('vrf')
aaa_group_object.source_interface = inputdict.get('source_interface')
aaa_group_object.aaa_new_model = inputdict.get('aaa_new_model')
#End of Device binding
devbindobjs['aaa_group_object'] = aaa_group_object
#Use the custom method to process/create payload
service_customization.ServiceDataCustomization.process_service_device_bindings(smodelctx, sdata, dev, id=id, device=dev, inputdict=inputdict, inputkeydict=inputkeydict, parentobj=parentobj, config=config, devbindobjs=devbindobjs, hopaque=opaque_args)
aaa_group_object_payload = aaa_group_object.getxml(filter=True)
#log('aaa_group_object_payload: %s' % (aaa_group_object_payload))
for dev_iterator in dev:
yang.Sdk.createData(dev_iterator.url+'',aaa_group_object_payload, sdata.getSession(), True)
def update(self, id, sdata):
#Fetch Local Config Object
config = getCurrentObjectConfig(id, sdata, 'aaa')
opaque_args = self.opaque_args
#Fetch Service Model Context Object
smodelctx = None
#Fetch Parent Object
parentobj = getParentObject(sdata)
dev = []
inputkeydict = {}
devbindobjs={}
inputdict = {}
opaque_args = self.opaque_args
# START OF FETCHING THE LEAF PARAMETERS
inputdict['tacacs_server_group'] = config.get_field_value('tacacs_server_group')
inputdict['vrf'] = config.get_field_value('vrf')
inputdict['source_interface'] = config.get_field_value('source_interface')
inputdict['aaa_new_model'] = config.get_field_value('aaa_new_model')
if inputdict.get('aaa_new_model') is None:
inputdict['aaa_new_model'] = 'False'
# END OF FETCHING THE LEAF PARAMETERS
_Gen_obj = getLocalObject(sdata, 'day1service')
device_mgmt_ip_address = _Gen_obj.day1service.device_ip
#Fetch Device Object
dev = getDeviceObject(device_mgmt_ip_address, sdata)
#Use the custom method to process the data
service_customization.ServiceDataCustomization.process_service_update_data(smodelctx, sdata, id=id, dev=dev, parentobj=parentobj, config=config, hopaque=opaque_args, inputdict=inputdict)
def delete(self, id, sdata):
sdata.getSession().addYangSessionPreReserveProcessor(self.delete_pre_processor)
#Fetch Local Config Object
config = getCurrentObjectConfig(id, sdata, 'aaa')
opaque_args = self.opaque_args
#Fetch Service Model Context Object
smodelctx = None
#Fetch Parent Object
parentobj = getParentObject(sdata)
dev = []
inputkeydict = {}
devbindobjs={}
inputdict = {}
opaque_args = self.opaque_args
# START OF FETCHING THE LEAF PARAMETERS
inputdict['tacacs_server_group'] = config.get_field_value('tacacs_server_group')
inputdict['vrf'] = config.get_field_value('vrf')
inputdict['source_interface'] = config.get_field_value('source_interface')
inputdict['aaa_new_model'] = config.get_field_value('aaa_new_model')
if inputdict.get('aaa_new_model') is None:
inputdict['aaa_new_model'] = 'False'
# END OF FETCHING THE LEAF PARAMETERS
_Gen_obj = getLocalObject(sdata, 'day1service')
device_mgmt_ip_address = _Gen_obj.day1service.device_ip
#Fetch Device Object
dev = getDeviceObject(device_mgmt_ip_address, sdata)
#Use the custom method to process the data
service_customization.ServiceDataCustomization.process_service_delete_data(smodelctx, sdata, id=id, dev=dev, parentobj=parentobj, config=config, hopaque=opaque_args, inputdict=inputdict)
@staticmethod
def getInstance():
if(Aaa._instance == None):
Aaa._instance = Aaa()
return Aaa._instance
def rollbackCreate(self, id, sdata):
log('rollback: id = %s, sdata = %s' % (id, sdata))
self.delete(id,sdata)
if __name__ == 'aaa':
from servicemodel.yang import YangServiceData
sdata = YangServiceData()
instance = Aaa().getInstance()
instance.create(None, sdata)
instance.delete(None, sdata)
instance.update(None, sdata)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import unittest
import sys
from vectors import Vector
from colors import Color
import actors
import actions
import language
import logs
test_string = '''
Cast:
bob = Square size 50x50 color red
star = Star color white
'''
#print(table_of_symbols)
#print(result.asList())
logger = logs.create(__name__)
class TestInteger(unittest.TestCase):
def test_positive_integer(self):
from language import Integer
l = Integer.parseString('45')
self.assertEqual(l[0], 45)
def test_negative_integer(self):
from language import Integer
l = Integer.parseString('-32')
self.assertEqual(l[0], -32)
def test_zero(self):
from language import Integer
l = Integer.parseString('0')
self.assertEqual(l[0], 0)
l = Integer.parseString('-0')
self.assertEqual(l[0], 0)
class TestVector(unittest.TestCase):
def test_positive_positive_vector(self):
from language import vector
l = vector.parseString('45x72')
self.assertEqual(l[0], Vector(45, 72))
def test_positive_negative_vector(self):
from language import vector
l = vector.parseString('45x-72')
self.assertEqual(l[0], Vector(45, -72))
def test_negative_positive_vector(self):
from language import vector
l = vector.parseString('-45x72')
self.assertEqual(l[0], Vector(-45, 72))
def test_negative_negative_vector(self):
from language import vector
l = vector.parseString('-45x-72')
self.assertEqual(l[0], Vector(-45, -72))
class TestInterval(unittest.TestCase):
def test_number_minus_number(self):
from language import Interval
l = Interval.parseString('23-45')
self.assertEqual(l[0], (23, 45))
def test_number_plus_number(self):
from language import Interval
l = Interval.parseString('23+45')
self.assertEqual(l[0], (23, 68))
def test_single_number(self):
from language import Interval
l = Interval.parseString('23')
self.assertEqual(l[0], (23, 24))
class TestActionLine(unittest.TestCase):
def test_action_exit(self):
from language import ActionLine
l = ActionLine.parseString('50 Exit bob')
t = l[0]
self.assertEqual(t[0], (50, 51))
self.assertEqual(t[1], 'Exit')
self.assertEqual(t[2], 'bob')
self.assertEqual(len(t), 3)
# for i,_ in enumerate(t):
# logger.error('{}) {}'.format(i, _))
def test_action_foreground(self):
from language import ActionLine
l = ActionLine.parseString('0-1 Foreground bob')
t = l[0]
self.assertEqual(t[0], (0, 1))
self.assertEqual(t[1], 'Foreground')
self.assertEqual(t[2], 'bob')
# for i,_ in enumerate(t):
# logger.error('{}) {}'.format(i, _))
def test_action_move(self):
from language import ActionLine
l = ActionLine.parseString('1-23 Move bob 50x50')
t = l[0]
self.assertEqual(t[0], (1, 23))
self.assertEqual(t[1], 'Move')
self.assertEqual(t[2], 'bob')
self.assertEqual(t[3], Vector(50, 50))
def test_action_move_relative_interval(self):
from language import ActionLine
l = ActionLine.parseString('10+20 Move bob 50x50')
t = l[0]
self.assertEqual(t[0], (10, 30))
self.assertEqual(t[1], 'Move')
self.assertEqual(t[2], 'bob')
self.assertEqual(t[3], Vector(50, 50))
class TestCastLine(unittest.TestCase):
def test_attr_size(self):
from language import attr
l = attr.parseString('size 10x10')
self.assertEqual(l[0], 'size')
self.assertEqual(l[1], Vector(10, 10))
def test_attr_color_name(self):
from language import attr
l = attr.parseString('color gold')
self.assertEqual(l[0], 'color')
self.assertEqual(l[1], Color('gold'))
def test_attr_color_html_color_code(self):
from language import attr
l = attr.parseString('color #FF4433')
self.assertEqual(l[0], 'color')
self.assertEqual(l[1], Color(0xFF, 0x44, 0x33))
def test_attr_pos(self):
from language import attr
l = attr.parseString('pos 32x67')
self.assertEqual(l[0], 'pos')
self.assertEqual(l[1], Vector(32, 67))
def test_attr_num(self):
from language import attr
from pyparsing import ParseException
l = attr.parseString('num 1')
self.assertEqual(l[0], 'num')
self.assertEqual(l[1], 1)
self.assertRaises(
ParseException,
attr.parseString,
'num 7'
)
def test_attr_radius(self):
from language import attr
l = attr.parseString('radius 100')
self.assertEqual(l[0], 'radius')
self.assertEqual(l[1], 100)
def test_attr_width(self):
from language import attr
l = attr.parseString('width 100')
self.assertEqual(l[0], 'width')
self.assertEqual(l[1], 100)
def test_attr_height(self):
from language import attr
l = attr.parseString('height 100')
self.assertEqual(l[0], 'height')
self.assertEqual(l[1], 100)
def test_attr_text_simple_quotes(self):
from language import attr
l = attr.parseString("text 'this is a goat'")
self.assertEqual(l[0], 'text')
self.assertEqual(l[1], 'this is a goat')
def test_attr_text_double_quotes(self):
from language import attr
l = attr.parseString('text "hello"')
self.assertEqual(l[0], 'text')
self.assertEqual(l[1], 'hello')
def test_attr_alpha(self):
from language import attr
l = attr.parseString('alpha .33')
self.assertEqual(l[0], 'alpha')
self.assertEqual(l[1], 0.33)
l = attr.parseString('alpha 0.75')
self.assertEqual(l[0], 'alpha')
self.assertEqual(l[1], 0.75)
def test_attr_points(self):
from language import attr
l = attr.parseString('points (33x22, 10x10, 50x50)')
self.assertEqual(l[0], 'points')
self.assertEqual(len(l[1]), 3)
self.assertEqual(l.points[0], Vector(33, 22))
self.assertEqual(l.points[1], Vector(10, 10))
self.assertEqual(l.points[2], Vector(50, 50))
l = attr.parseString('points (33x22, 10x10, 50x50,12x12)')
self.assertEqual(l[0], 'points')
self.assertEqual(len(l[1]), 4)
self.assertEqual(l.points[0], Vector(33, 22))
self.assertEqual(l.points[1], Vector(10, 10))
self.assertEqual(l.points[2], Vector(50, 50))
self.assertEqual(l.points[3], Vector(12, 12))
def test_cast_line(self):
from language import castline
r = castline.parseString('bob = Square size 50x50 color red')
self.assertEqual(r.name, 'bob')
self.assertEqual(r.role, 'Square')
params = language.params_to_dict(r.params)
self.assertEqual(params['size'], Vector(50, 50))
self.assertEqual(params['color'], Color('red'))
if __name__ == '__main__':
unittest.main()
|
"""
职责链模式:
链条上的每个环节有自己的职责范围,在自己的职责范围内就立即处理,入股超过自己的职业,那么就传给链条的一下环节
"""
from abc import ABC, abstractmethod
class BaseHandler(ABC):
@abstractmethod
def hande(self, money):
pass
class Kuaiji(BaseHandler):
def __init__(self):
self.next_handler = None
def set_next_handler(self, next_handler):
assert isinstance(next_handler, BaseHandler)
self.next_handler = next_handler
def hande(self, money):
if 0 < money <= 100:
print(f"区区{money}块,我这个会计批了")
else:
print("超过100了,这个我没有权限,等等,我问问上级...")
self.next_handler.hande(money)
class CuWuJingLi(BaseHandler):
def __init__(self):
self.next_handler = None
def set_next_handler(self, next_handler):
assert isinstance(next_handler, BaseHandler)
self.next_handler = next_handler
def hande(self, money):
if 100 < money <= 1000:
print(f"哦,{money}块呀,行吧,我这个财务经理准了")
else:
print("超过1000了,这个我没有权限,等等,我问问上级...")
self.next_handler.hande(money)
class Dongshizhang(BaseHandler):
def __init__(self):
self.next_handler = None
def set_next_handler(self, next_handler):
assert isinstance(next_handler, BaseHandler)
self.next_handler = next_handler
def hande(self, money):
if 1000 < money <= 10000:
print(f"额,{money}块呀,花都花了,我这个董事长也没办法啊,准了")
else:
print(f"都超过10000了,我这个董事长也花不了这么多啊,不准!") # 链条要有一个终点,不能无休止的传递下去.
if __name__ == '__main__':
kuaiji_1 = Kuaiji()
jingli_1 = CuWuJingLi()
dongshizhag_1 = Dongshizhang()
kuaiji_1.set_next_handler(jingli_1)
jingli_1.set_next_handler(dongshizhag_1)
print("++ 申请报销50 ++")
money = 50
kuaiji_1.hande(money)
print("++ 申请报销500 ++")
money = 500
kuaiji_1.hande(money)
print("++ 申请报销5000000 ++")
money = 5000000
kuaiji_1.hande(money) |
a=int(input())
sum=0
b=input().split()
for i in range(len(b)):
x=int(b[i])
for j in range(i):
if(int(b[j])<x):
sum=sum+int(b[j])
print(sum) |
# from continent import *
from itertools import combinations
from color import Color
from card import Card, add_card, find_card, remove_card, total_wildcards
from troop import Troop
class Player():
def __init__(self, color, troops, cards=[]):
''' [color] is a Color object. [troops] is int. [cards] is a list of
Card objects. No duplicates, i.e. each player has their own unique
color.
Note: [troops] refers to new troops that the player gets in the
beginning of each turn, as well as the initial troops they have
during territory claim in the very beginning of the game.'''
self.color = color
self.troops = troops
self.cards = cards
def get_color(self):
return self.color
def get_troops(self):
return self.troops
def get_cards(self):
return self.cards
def set_troops(self, troops):
self.troops = troops
def add_troops(self, troops):
self.troops += troops
def subtract_troops(self, troops):
self.troops -= troops
# Note: [card] must be a Card object.
def give_card(self, card):
self.cards = add_card(card, self.cards)
def take_card(self, territory):
'''Returns the card with given [territory] while removing it from
the player's hand. [territory] must be a Node.'''
card = find_card(territory, self.cards)
self.cards = remove_card(territory, self.cards)
return card
# def string_cards(self):
# if len(self.cards) == 0:
# return "[]"
# res = "["
# for i in range(len(self.cards)-1):
# # For some reason, __str__() method in Card class doesn't work
# # as intended. Instead, it keeps printing __str__() for Node.
# card = self.cards[i]
# res += str(self.cards[i])
# res += ", "
# res += str(self.cards[-1])
# res += "]"
# return res
def count_wildcards(self):
'''Helper function for combine_cards().
Returns the number wildcards owned by player and a copy of player's hand
without the wildcards (Card list). Doesn't change player's hand.'''
count = 0
no_wildcards = []
for card in self.cards:
if card.get_troop_type() == Troop.WILDCARD:
assert card.get_node() == None, "A wildcard with non-empty territory!"
count += 1
else:
no_wildcards.append(card)
assert count <= total_wildcards, "%s Player has too many wildcards!" % self.color.name
return count, no_wildcards
@staticmethod
def count_wildcards_list(card_lst):
'''Counts wildcards in [card_lst].'''
count = 0
for card in card_lst:
if card.get_troop_type() == Troop.WILDCARD:
count += 1
return count
@staticmethod
def two_same(card_lst):
'''
Helper function for possible_combos().
Given [card_lst], returns a list of all possible combinations of two
cards where the two cards are of the same kind. Could return an
empty list.
Precondition: [card_lst] has no wildcards.
'''
if len(card_lst) < 2:
return []
elif len(card_lst) == 2 and card_lst[0].get_troop_type() == card_lst[1].get_troop_type():
return card_lst
def possible_combos(self):
'''
Helper function for decide().
Finds all possible card combinations available for the player.
Returns a possibly empty list of all possibilities of cards (Card list list).
Preconditions: there are only two wildcards in the desk,
i.e. [num_wildcards] == 2. Author might possibly make the function
compatible with more wildcards in deck in the future.
'''
res = []
if self.cards == []:
return []
wildcards_owned, other_cards = self.count_wildcards()
# Initialize a wild card to possibly add to the output.
wildcard = Card(Troop.WILDCARD, None)
# Player has 2 wildcards. Any other card will make a combo.
if wildcards_owned == 2 and len(self.cards) > 2:
for card in other_cards:
res.append([wildcard, wildcard, card])
# Player at least one wildcard. Any 2 cards of either the same or
# different types will make a combination.
if wildcards_owned >= 1:
two_comb = combinations(other_cards, 2)
for el in list(two_comb):
res.append([wildcard] + list(el))
# Check all 3-card combos without wildcards.
three_comb = combinations(other_cards, 3)
for comb in list(three_comb):
if Card.combine(comb) > -1:
res.append(comb)
return res
def count_bonus(self, card_lst, deploy=False):
'''Given a valid card combination, calculates the total bonus given
to the player (+2 troops per territory on a card that is owned by the
player). If [deploy] is False, it will just count territorial bonus
without actually deploying troops.
Preconditions: [card_lst] is a valid 3-card combination that can bring
bonus troops. No more than 2 wildcards allowed.'''
assert len(card_lst) == 3
card_bonus = 0
total_bonus = 0
wildcards = Player.count_wildcards_list(card_lst)
troops = set()
# Count territory bonus and troop types.
for card in card_lst:
if card.get_troop_type() != Troop.WILDCARD:
troops.add(card.get_troop_type())
# Check for territorial bonus.
node = card.get_node()
if node.get_owner() == self.color:
total_bonus += 2
if deploy:
print("2 bonus troops deployed in %s." %
node.get_name())
node.add_troops(2)
if len(troops) == 3 or (len(troops) == 2 and wildcards == 1) or (wildcards == 2):
card_bonus = 10
else:
card_bonus = troops.pop().value
# # Count card bonus depending on wildcards in [card_lst].
# if wildcards == 1:
# if len(troops) == 2:
# card_bonus = 10
# else:
# card_bonus = troops.pop().value
# elif wildcards == 2:
# card_bonus = 10
# # No wildcards.
# else:
# if len(troops) == 3:
# card_bonus = 10
# elif len(troops) == 1:
# card_bonus = troops.pop().value
total_bonus += card_bonus
return card_bonus, total_bonus
def decide(self):
'''Based on player's hand, picks the best possible hand.'''
best_hand = []
best_hand_wildcards = 0
max_bonus = 0
card_combos = self.possible_combos()
for combo in card_combos:
# print("\nBest hand: %s." % str(best_hand))
# print("Best wildcards: %i. Best card bonus: %i. Best total bonus: %i." %
# (best_hand_wildcards, self.count_bonus(combo, False)[0], self.count_bonus(combo, False)[1]))
# print("\nCurrent combo: %s." % str(combo))
wildcards = Player.count_wildcards_list(combo)
card_bonus, total_bonus = self.count_bonus(combo, False)
# print("Wildcards: %i. Card bonus: %i. Total bonus: %i." %
# (wildcards, card_bonus, total_bonus))
# Pick the highest bonus with least wildcards used.
if total_bonus > max_bonus or (total_bonus == max_bonus and wildcards < best_hand_wildcards):
best_hand = combo
best_hand_wildcards = wildcards
max_bonus = total_bonus
return list(best_hand)
def use_cards(self, card_lst):
card_bonus, total_bonus = self.count_bonus(card_lst, True)
print("You have %i total troops in bonus." % total_bonus)
for card in card_lst:
_ = self.take_card(card.get_node())
return card_bonus
# p1 = Player(Color.RED, 0, [
# Card(Troop.WILDCARD, None),
# Card(Troop.WILDCARD, None),
# Card(Troop.INFANTRY, E1)
# ])
# p2 = Player(Color.RED, 0, [
# Card(Troop.WILDCARD, None),
# Card(Troop.WILDCARD, None)
# ])
# p3 = Player(Color.RED, 0, [
# Card(Troop.INFANTRY, E2),
# Card(Troop.INFANTRY, E3),
# Card(Troop.INFANTRY, E4),
# ])
# p4 = Player(Color.RED, 0, [
# Card(Troop.INFANTRY, E5),
# Card(Troop.INFANTRY, E6),
# Card(Troop.CAVALRY, E7),
# ])
# p5 = Player(Color.RED, 0, [
# Card(Troop.INFANTRY, AF1),
# Card(Troop.CAVALRY, AF2),
# Card(Troop.ARTILLERY, AF3),
# ])
# p6 = Player(Color.RED, 0, [
# Card(Troop.WILDCARD, None),
# Card(Troop.ARTILLERY, AF4),
# Card(Troop.INFANTRY, AF5),
# ])
# p7 = Player(Color.RED, 0, [
# Card(Troop.WILDCARD, None),
# Card(Troop.ARTILLERY, AS1),
# Card(Troop.INFANTRY, AS2),
# Card(Troop.INFANTRY, AS3),
# Card(Troop.CAVALRY, AS4),
# ])
# p8 = Player(Color.RED, 0, [
# Card(Troop.WILDCARD, None),
# Card(Troop.WILDCARD, None),
# Card(Troop.ARTILLERY, AS5),
# Card(Troop.INFANTRY, AS6),
# Card(Troop.CAVALRY, AS7),
# Card(Troop.INFANTRY, AS8),
# Card(Troop.ARTILLERY, AS9),
# ])
# p9 = Player(Color.RED, 0, [
# Card(Troop.INFANTRY, AS10),
# Card(Troop.CAVALRY, AS11),
# Card(Troop.ARTILLERY, AS12),
# Card(Troop.INFANTRY, AU1),
# Card(Troop.CAVALRY, AU2),
# Card(Troop.ARTILLERY, AU3),
# ])
# p10 = Player(Color.RED, 0, [
# Card(Troop.WILDCARD, None),
# Card(Troop.ARTILLERY, NA1),
# Card(Troop.INFANTRY, NA2),
# Card(Troop.INFANTRY, NA3),
# Card(Troop.ARTILLERY, NA4),
# ])
# p11 = Player(Color.RED, 0, [
# Card(Troop.INFANTRY, NA5),
# Card(Troop.INFANTRY, NA6),
# Card(Troop.CAVALRY, NA7),
# Card(Troop.CAVALRY, NA8),
# ])
|
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseRedirect
from django.shortcuts import render
from woid.core.models import Organization
from woid.core.forms import OrganizationForm
def home(request):
user = request.user
if user.is_authenticated():
organization_name = user.account.organization.name
return HttpResponseRedirect(reverse('organization', args=(organization_name,)))
else:
return render(request, 'core/cover.html')
@login_required
def organization(request, organization_name):
return render(request, 'core/index.html')
@login_required
def manage(request, organization_name):
organization = request.user.account.organization
form = OrganizationForm(instance=organization)
return render(request, 'core/manage.html', { 'form' : form })
|
#import java.util.ArrayList;
# this class implements the getPossibleActions for each type of piece
import Utils
from Position import Position
from Action import Action
from State import State
class Piece:
# this method must be completed with all the possible pieces
def __init__(self):
self.m_color = -1
self.m_type = -1
def getPossibleActions(self, state):
return None # never arrive here
# horizontal left moves
def getHorizontalLeftMoves(self, state):
l = []
agentColor = self.m_color
row0, col0 = state.m_agentPos.row, state.m_agentPos.col;
busyCell = False
for c in range(col0-1,-1,-1):
if not busyCell:
if state.m_board[row0][c] == Utils.empty: # add action
action = Action(state.m_agentPos, Position(row0,c))
l.append(action)
else:
busyCell = True
if agentColor != Utils.getColorPiece(state.m_board[row0][c]): # capture piece
action = Action(state.m_agentPos, Position(row0,c))
l.append(action)
return l
# horizontal right moves
def getHorizontalRightMoves(self, state):
l = []
agentColor = self.m_color
row0, col0 = state.m_agentPos.row, state.m_agentPos.col;
busyCell = False
for c in range(col0+1,state.m_boardSize):
if not busyCell:
if state.m_board[row0][c] == Utils.empty: # add action
action = Action(state.m_agentPos, Position(row0,c))
l.append(action)
else:
busyCell = True
if agentColor != Utils.getColorPiece(state.m_board[row0][c]): # capture piece
action = Action(state.m_agentPos, Position(row0,c))
l.append(action)
return l
# vertical up moves
def getVerticalUpMoves(self, state):
l = []
agentColor = self.m_color
row0, col0 = state.m_agentPos.row, state.m_agentPos.col;
busyCell = False
for r in range(row0-1,-1,-1):
if not busyCell:
if state.m_board[r][col0] == Utils.empty: # add action
action = Action(state.m_agentPos, Position(r,col0))
l.append(action)
else:
busyCell = True
if agentColor != Utils.getColorPiece(state.m_board[r][col0]): # capture piece
action = Action(state.m_agentPos, Position(r,col0))
l.append(action)
return l
# vertical down moves
def getVerticalDownMoves(self, state):
l = []
agentColor = self.m_color
row0, col0 = state.m_agentPos.row, state.m_agentPos.col;
busyCell = False
for r in range(row0+1,state.m_boardSize):
if not busyCell:
if state.m_board[r][col0] == Utils.empty: # add action
action = Action(state.m_agentPos, Position(r,col0))
l.append(action)
else:
busyCell = True
if agentColor != Utils.getColorPiece(state.m_board[r][col0]): # capture piece
action = Action(state.m_agentPos, Position(r,col0))
l.append(action)
return l
# diagonal up right moves
def getDiagonalRightUpMoves(self, state):
l = []
agentColor = self.m_color
row0, col0 = state.m_agentPos.row, state.m_agentPos.col;
busyCell = False
for r in range(row0-1,-1,-1):
for c in range(col0+1,state.m_boardSize):
if not busyCell:
if ((col0+row0)==(c+r)):
if state.m_board[r][c] == Utils.empty: # add action
action = Action(state.m_agentPos, Position(r,c))
l.append(action)
else:
busyCell = True
if agentColor != Utils.getColorPiece(state.m_board[r][c]): # capture piece
action = Action(state.m_agentPos, Position(r,c))
return l
# diagonal up left moves
def getDiagonalLeftUpMoves(self, state):
l = []
agentColor = self.m_color
row0, col0 = state.m_agentPos.row, state.m_agentPos.col;
busyCell = False
for r in range(row0-1,-1,-1):
for c in range(col0-1,-1,-1):
if not busyCell:
if ((col0+row0)==(c+r)):
if state.m_board[r][c] == Utils.empty: # add action
action = Action(state.m_agentPos, Position(r,c))
l.append(action)
else:
busyCell = True
if agentColor != Utils.getColorPiece(state.m_board[r][c]): # capture piece
action = Action(state.m_agentPos, Position(r,c))
l.append(action)
return l
# diagonal down right moves
def getDiagonalRightDownMoves(self, state):
l = []
agentColor = self.m_color
row0, col0 = state.m_agentPos.row, state.m_agentPos.col;
busyCell = False
for r in range(row0+1,state.m_boardSize):
for c in range(col0+1,state.m_boardSize):
if not busyCell:
if ((col0+row0)==(c+r)):
if state.m_board[r][c] == Utils.empty: # add action
action = Action(state.m_agentPos, Position(r,c))
l.append(action)
else:
busyCell = True
if agentColor != Utils.getColorPiece(state.m_board[r][c]): # capture piece
action = Action(state.m_agentPos, Position(r,c))
l.append(action)
return l
# diagonal down left moves
def getDiagonalLeftDownMoves(self, state):
l = []
agentColor = self.m_color
row0, col0 = state.m_agentPos.row, state.m_agentPos.col;
busyCell = False
for r in range(row0+1, state.m_boardSize):
for c in range(col0-1,-1,-1):
if not busyCell:
if ((col0+row0)==(c+r)):
if state.m_board[r][c] == Utils.empty: # add action
action = Action(state.m_agentPos, Position(r,c))
l.append(action)
else:
busyCell = True
if agentColor != Utils.getColorPiece(state.m_board[r][c]): # capture piece
action = Action(state.m_agentPos, Position(r,c))
l.append(action)
return l
# Knight up right moves
def getKnightRightUpMoves(self, state):
l = []
agentColor = self.m_color
row0, col0 = state.m_agentPos.row, state.m_agentPos.col;
busyCell = False
for r in range(row0-1,-1,-1):
for c in range(col0+1, state.m_boardSize):
if not busyCell:
if ((row0-r)*(row0-r)+(col0-c)*(col0-c)==5):
if state.m_board[r][c] == Utils.empty: # add action
action = Action(state.m_agentPos, Position(r,c))
l.append(action)
else:
busyCell = True
if agentColor != Utils.getColorPiece(state.m_board[r][c]): # capture piece
action = Action(state.m_agentPos, Position(r,c))
l.append(action)
return l
# Knight up left moves
def getKnightLeftUpMoves(self, state):
l = []
agentColor = self.m_color
row0, col0 = state.m_agentPos.row, state.m_agentPos.col;
busyCell = False
for r in range(row0-1,-1,-1):
for c in range(col0-1,-1,-1):
if not busyCell:
if ((row0-r)*(row0-r)+(col0-c)*(col0-c)==5):
if state.m_board[r][c] == Utils.empty: # add action
action = Action(state.m_agentPos, Position(r,c))
l.append(action)
else:
busyCell = True
if agentColor != Utils.getColorPiece(state.m_board[r][c]): # capture piece
action = Action(state.m_agentPos, Position(r,c))
l.append(action)
return l
# Knight down right moves
def getKnightRightDownMoves(self, state):
l = []
agentColor = self.m_color
row0, col0 = state.m_agentPos.row, state.m_agentPos.col;
busyCell = False
for r in range(row0+1, state.m_boardSize):
for c in range(col0+1, state.m_boardSize):
if not busyCell:
if ((row0-r)*(row0-r)+(col0-c)*(col0-c)==5):
if state.m_board[r][c] == Utils.empty: # add action
action = Action(state.m_agentPos, Position(r,c))
l.append(action)
else:
busyCell = True
if agentColor != Utils.getColorPiece(state.m_board[r][c]): # capture piece
action = Action(state.m_agentPos, Position(r,c))
l.append(action)
return l
# Knight down left moves
def getKnightLeftDownMoves(self, state):
l = []
agentColor = self.m_color
row0, col0 = state.m_agentPos.row, state.m_agentPos.col;
busyCell = False
for r in range(row0+1, state.m_boardSize):
for c in range(col0-1,-1,-1):
if not busyCell:
if ((row0-r)*(row0-r)+(col0-c)*(col0-c)==5):
if state.m_board[r][c] == Utils.empty: # add action
action = Action(state.m_agentPos, Position(r,c))
l.append(action)
else:
busyCell = True
if agentColor != Utils.getColorPiece(state.m_board[r][c]): # capture piece
action = Action(state.m_agentPos, Position(r,c))
l.append(action)
return l |
# example 4: ToUpper
text = data.getObject()
data.setProperty("input",text)
outputText = text.upper()
data.setProperty("output",outputText)
data.setObject(outputText) |
# Generated by Django 3.2.8 on 2021-10-19 02:33
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('pages', '0003_auto_20211018_2040'),
]
operations = [
migrations.AddField(
model_name='newsletter',
name='fecha',
field=models.DateTimeField(default=django.utils.timezone.now),
),
]
|
"""
Utility functions for the firex_keeper package.
"""
from collections import namedtuple
import gzip
import json
import os
import stat
from firexapp.submit.uid import Uid
from firexapp.events.event_aggregator import FireXEventAggregator
from firexapp.events.model import FireXTask
FireXTreeTask = namedtuple('FireXTreeTask', FireXTask._fields + ('children', 'parent'))
def get_keeper_dir(logs_dir):
return os.path.join(logs_dir, Uid.debug_dirname, 'keeper')
def load_event_file(db_manager, event_file):
event_aggregator = FireXEventAggregator()
real_rec = os.path.realpath(event_file)
if real_rec.endswith('.gz'):
with gzip.open(real_rec, 'rt', encoding='utf-8') as rec:
event_lines = rec.readlines()
else:
with open(event_file) as rec:
event_lines = rec.readlines()
for event_line in event_lines:
if not event_line:
continue
event = json.loads(event_line)
new_task_data_by_uuid = event_aggregator.aggregate_events([event])
db_manager.insert_or_update_tasks(new_task_data_by_uuid,
event_aggregator.root_uuid)
def can_any_write(file_path: str) -> bool:
any_read = stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH
return bool(os.stat(file_path).st_mode & any_read)
def remove_write_permissions(file_path: str) -> None:
disable_each_write = ~stat.S_IWUSR & ~stat.S_IWGRP & ~stat.S_IWOTH
os.chmod(file_path, os.stat(file_path).st_mode & disable_each_write)
|
#!/usr/bin/python3
#--------------绘制barChart直方图(多色)包括x轴数据+y轴数据 使用append插入单行-----------------
from openpyxl import Workbook
from openpyxl.chart import (
Reference,
Series,
BarChart
)
book = Workbook()
sheet = book.active
# Create some data and add it to the cells of the active sheet.
rows = [
("USA", 46),
("China", 38),
("UK", 29),
("Russia", 22),
("South Korea", 13),
("Germany", 11)
]
#print(rows[0]) # ("USA", 46) 类型list
for row in rows: #list导入到excel
sheet.append(row) #使用append每次插入一行
# Create a REference object that refers to the numbers of golden medals 设定y轴数据
data = Reference(sheet, min_col=2, min_row=1, max_col=2, max_row=6)
# Create category axises as text labels representing names of countries.设定x轴数据
categs = Reference(sheet, min_col=1, min_row=1, max_row=6)
# Create a bar chart and set it data and categories.
chart = BarChart() #直方图类型
chart.add_data(data) #添加y轴数据
chart.set_categories(categs) #添加x轴数据
# Turn off the legends and major grid lines
chart.legend = None #图例设置为None
chart.y_axis.majorGridlines = None
# Let each bar has a different colour. 設置顔色 彩色
chart.varyColors = True
# Set chart's title
chart.title = "Olympic Gold medals in London"
sheet.add_chart(chart, "D2")
book.save("barChart.xlsx") |
from django.urls import path
from . import views
urlpatterns = [
path ('',views.main, name="main"),
path ('contact.html',views.contact, name="contact"),
path ('gallery.html',views.gallery, name="gallery"),
path ('about.html',views.about, name="about"),
] |
from django.db import models
# Create your models here.
class Module(object):
def __init__(self, moduleName, pin, pinStatus, key, error):
self.moduleName = moduleName
self.pin = pin
self.pinStatus=pinStatus
self.key=key
self.error=error
|
import sys
import numpy as np
import math
exec(open("tracts_mod.py").read())
# Read in the parameters
rate = sys.argv[1]
tstart = int(sys.argv[2])
npts = int(sys.argv[3])
maxlen = int(sys.argv[4])
pop = int(sys.argv[5])
Ls = [1]
thefile = open("psivec.txt","r")
psivec = list(np.loadtxt("psivec.txt"))
# Run tracts code
bins = np.arange(0,maxlen*(1+.5/npts),float(maxlen)/npts)
mig = continuous_mig_hybridzone(rate, tstart)
model = demographic_model(mig, psivec)
nDist = model.popNdist(1)
thefile = open("/home/joelsmith/Projects/dmis/code/dmitracts/nDist.txt", 'w+')
for item in nDist:
thefile.write("%s\n" % item)
thefile.close()
|
# Copyright 2018 SEDA Group at CU Boulder
# Created by:
# Liam Kilcommons
# Space Environment Data Analysis Group (SEDA)
# Colorado Center for Astrodynamics Research (CCAR)
# University of Colorado, Boulder (CU Boulder)
"""
ssj_auroral_boundary
--------------------
Figure of Merit boundary identification for DMSP SSJ5
Modules
-------
absatday
abpolarpass
absegment
abscv
files
dmsp_spectrogram
"""
from __future__ import print_function
__version__ = str("0.1.1")
#Prefix for all package loggers
loggername = 'ssj_auroral_boundary'
__all__ = ['absatday', 'abpolarpass', 'absegment', 'abcsv', 'files',
'dmsp_spectrogram']
# Explicitly import all modules (in addition to defining __all__)
from ssj_auroral_boundary import (absatday,
abpolarpass,
absegment,
abcsv,
files,
dmsp_spectrogram)
|
# file mygame/typeclasses/latin_noun.py
from evennia import DefaultObject
# adding the following for colors in names for pluralization
from evennia.utils import ansi
# adding the following for redefinition of 'return_appearance'
from collections import defaultdict
# from evennia.utils.utils import list_to_string
class LatinNoun(DefaultObject):
def at_first_save(self):
"""
This is called by the typeclass system whenever an instance of
this class is saved for the first time. It is a generic hook
for calling the startup hooks for the various game entities.
When overloading you generally don't overload this but
overload the hooks called by this method.
"""
self.basetype_setup()
# moving the below line to just before basetype_posthook
# at the bottom of this defenitiion
# self.at_object_creation()
if hasattr(self, "_createdict"):
# this will only be set if the utils.create function
# was used to create the object. We want the create
# call's kwargs to override the values set by hooks.
cdict = self._createdict
updates = []
if not cdict.get("key"):
if not self.db_key:
self.db_key = "#%i" % self.dbid
updates.append("db_key")
elif self.key != cdict.get("key"):
updates.append("db_key")
self.db_key = cdict["key"]
if cdict.get("location") and self.location != cdict["location"]:
self.db_location = cdict["location"]
updates.append("db_location")
if cdict.get("home") and self.home != cdict["home"]:
self.home = cdict["home"]
updates.append("db_home")
if cdict.get("destination") and self.destination != cdict["destination"]:
self.destination = cdict["destination"]
updates.append("db_destination")
if updates:
self.save(update_fields=updates)
if cdict.get("permissions"):
self.permissions.batch_add(*cdict["permissions"])
if cdict.get("locks"):
self.locks.add(cdict["locks"])
if cdict.get("aliases"):
self.aliases.batch_add(*cdict["aliases"])
if cdict.get("location"):
cdict["location"].at_object_receive(self, None)
self.at_after_move(None)
if cdict.get("tags"):
# this should be a list of tags, tuples (key, category) or (key, category, data)
self.tags.batch_add(*cdict["tags"])
if cdict.get("attributes"):
# this should be tuples (key, val, ...)
self.attributes.batch_add(*cdict["attributes"])
if cdict.get("nattributes"):
# this should be a dict of nattrname:value
for key, value in cdict["nattributes"]:
self.nattributes.add(key, value)
del self._createdict
self.at_object_creation()
self.basetype_posthook_setup()
# adding the pluralization rules; so far they will only work for
# nominative plurals for when you look in a new room; not sure
# what to do with other plural functionality
# redefine list_to_stringfor this function to use "et"
def list_to_string(inlist, endsep="et", addquote=False):
"""
This pretty-formats a list as string output, adding an optional
alternative separator to the second to last entry. If `addquote`
is `True`, the outgoing strings will be surrounded by quotes.
Args:
inlist (list): The list to print.
endsep (str, optional): If set, the last item separator will
be replaced with this value.
addquote (bool, optional): This will surround all outgoing
values with double quotes.
Returns:
liststr (str): The list represented as a string.
Examples:
```python
# no endsep:
[1,2,3] -> '1, 2, 3'
# with endsep=='and':
[1,2,3] -> '1, 2 and 3'
# with addquote and endsep
[1,2,3] -> '"1", "2" and "3"'
```
"""
if not endsep:
endsep = ","
else:
endsep = " " + endsep
if not inlist:
return ""
if addquote:
if len(inlist) == 1:
return '"%s"' % inlist[0]
return ", ".join('"%s"' % v for v in inlist[:-1]) + "%s %s" % (endsep, '"%s"' % inlist[-1])
else:
if len(inlist) == 1:
return str(inlist[0])
return ", ".join(str(v) for v in inlist[:-1]) + "%s %s" % (endsep, inlist[-1])
def get_numbered_name(self, count, looker, **kwargs):
"""
Return the numbered (Singular, plural) forms of this object's key.
This is by default called by return_appearance and is used for
grouping multiple same-named of this object. Note that this will
be called on *every* member of a group even though the plural name
will be only shown once. Also the singular display version, such as
'an apple', 'a tree' is determined from this method.
Args:
count (int): Number of objects of this type
looker (Object): Onlooker. Not used by default
Kwargs:
key (str): Optional key to pluralize, if given, use this instead of
the object's key
Returns:
singular (str): The singular form to display
plural (str): The determined plural form of the key, including count.
"""
key = kwargs.get("key", self.key)
key = ansi.ANSIString(key) # This is needed to allow inflection of colored names
if self.db.nom_pl:
plural = self.db.nom_pl[0]
else:
plural = self.key
plural = "%s %s" % (count, plural)
if self.db.nom_sg:
singular = self.key
else:
singular = self.key
if not self.aliases.get(plural, category="plural_key"):
# We need to wipe any old plurals/an/a in case key changed in the interim
self.aliases.clear(category="plural_key")
self.aliases.add(plural, category="plural_key")
# save the singular form as an alias here too so we can display "an egg"
# and also look at "an egg".
self.aliases.add(singular, category="plural_key")
return singular, plural
def return_appearance(self, looker, **kwargs):
"""
# Lightly editing to change "You see" to "Ecce"
# and 'Exits' to 'Ad hos locos ire potes:'
This formats a description. It is the hook a 'look' command
should call.
Args:
looker (Object): Object doing the looking.
**kwargs (dict): Arbitrary, optional arguments for users
overriding the call (unused by default).
"""
if not looker:
return ""
# get and identify all objects
visible = (con for con in self.contents if con != looker and con.access(looker, "view"))
exits, users, things = [], [], defaultdict(list)
# adjusted the exit name to take out the dbref so builders can
# click on the exits to go there
for con in visible:
key = con.get_display_name(looker)
if con.destination:
exits.append(con.key)
elif con.has_account:
if con.db.is_glowing:
users.append("|y(ardens)|n |c%s|n" % key)
else:
users.append("|c%s|n" % key)
else:
# things can be pluralized
things[key].append(con)
# get description, build string
string = "|c%s|n\n" % self.get_display_name(looker)
desc = self.db.desc
# JI (12/7/9) Adding the following lines to accommodate clothing
# Actually, added return_appearance to characters typeclass
# and commenting this new section out
# worn_string_list = []
# clothes_list = get_worn_clothes(self, exclude_covered=True)
# # Append worn, uncovered clothing to the description
# for garment in clothes_list:
# # if 'worn' is True, just append the name
# if garment.db.worn is True:
# # JI (12/7/19) append the accusative name to the description,
# # since these will be direct objects
# worn_string_list.append(garment.db.acc_sg)
# # Otherwise, append the name and the string value of 'worn'
# elif garment.db.worn:
# worn_string_list.append("%s %s" % (garment.name, garment.db.worn))
if desc:
string += "%s" % desc
# # Append worn clothes.
# if worn_string_list:
# string += "|/|/%s gerit: %s." % (self, list_to_string(worn_string_list))
# else:
# string += "|/|/%s nud%s est!" % (self, 'a' if self.db.gender == 1 else 'us')
# return string
# Thinking that the above, added for clothing, might need to only be in the
# character typeclass
if exits:
# Changing this string so that exits appear green
# string += "\n|wAd hos locos potes ire:|n\n " + LatinNoun.list_to_string(exits)
colorful_exits = []
for exit in exits:
colorful_exits.append(f"|lc{exit}|lt|g{exit}|n|le")
string += "\n|wAd hos locos potes ire:|n\n " + LatinNoun.list_to_string(colorful_exits)
if users or things:
# handle pluralization of things (never pluralize users)
thing_strings = []
for key, itemlist in sorted(things.items()):
nitem = len(itemlist)
if nitem == 1:
key, _ = itemlist[0].get_numbered_name(nitem, looker, key=key)
if itemlist[0].db.is_glowing:
key = "|y(ardens)|n " + key
else:
key = [item.get_numbered_name(nitem, looker, key=key)[1] for item in itemlist][
0
]
thing_strings.append(key)
string += "\n|wEcce:|n\n " + LatinNoun.list_to_string(users + thing_strings)
return string
|
# Generated by Django 3.1.1 on 2020-10-01 05:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('uploadingJson', '0005_auto_20201001_1030'),
]
operations = [
migrations.AlterField(
model_name='jsondatas',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
|
import numpy as np
import matplotlib.pyplot as plt
import mltools as ml
iris = np.genfromtxt("data/iris.txt", delimiter=None)
Y = iris[:,-1]
X = iris[:,0:2]
X,Y = ml.shuffleData(X,Y)
Xtr,Xte,Ytr,Yte = ml.splitData(X, Y, 0.75)
def partA(Xtr, Xte, Ytr, Yte):
knn = ml.knn.knnClassify()
# varying values of K
for k in [1, 5, 10, 50]:
knn.train(Xtr, Ytr, K=k)
ml.plotClassify2D(knn, Xtr, Ytr, axis=plt)
print "#A: Plot of K=" + str(k)
plt.show()
# computing error values of predictions
def partB(Xtr, Xte, Ytr, Yte):
ks = [1,2,5,10,50,100,200]
errTrainTr = []
errTrainTe = []
for i,k in enumerate(ks):
learner = ml.knn.knnClassify(Xtr, Ytr, K=k)
Yhattr = learner.predict(Xtr)
errTrainTr.append(np.mean(np.transpose(Yhattr) - Ytr))
Yhatte = learner.predict(Xte)
errTrainTe.append(np.mean(np.transpose(Yhatte) - Yte))
plt.semilogx(errTrainTr, color='r')
plt.semilogx(errTrainTe, color='g')
print "#B: Semilog plot of error"
plt.show()
print "What we see is that the kNN learner really needs a small amount of K to avoid fitness problems."
print "What you can see from this graph is that the 2nd and 3rd values are in the optimal area of fit."
print "This would mean training our data with K=2, K=5 would be optimal."
partA(Xtr, Xte, Ytr, Yte)
partB(Xtr, Xte, Ytr, Yte)
|
import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from secret import username, passwd
'''
1. Timer
2. Refresh every 5sec
3. Alternative route
4. Notify sms
5. Set selector
'''
# open chrome
driver = webdriver.Chrome()
driver.get("https://or.ump.edu.my/or/")
# login
element = driver.find_element_by_id("login")
element.send_keys(username)
element = driver.find_element_by_id("password")
element.send_keys(passwd)
element.send_keys(Keys.RETURN)
# register
driver.find_element_by_link_text('Course Registration').click()
# select subject
subject = driver.find_element_by_id('subject')
courses = [x for x in subject.find_elements_by_tag_name("option")]
with open('subject.txt') as f:
lines = f.readlines()
lines = [x.strip() for x in lines]
for line in lines:
line = line.split(',')
for course in courses:
# Select course code
if line[0] == course.get_attribute("value"):
course.click()
# Select section
section_element = driver.find_element_by_id('section')
for section in [x for x in section_element.find_elements_by_tag_name("option")]:
if line[1] == section.get_attribute("value"):
section.click()
time.sleep(0.5)
# Select Lab section
if line[2]:
lab_element = driver.find_element_by_id('tutorial')
for lab in [x for x in lab_element.find_elements_by_tag_name("option")]:
if line[2] == lab.get_attribute("value"):
lab.click()
driver.execute_script('window.scrollTo(0,3000)')
driver.find_element_by_id('Add').click()
|
# Generated by Django 3.1.1 on 2020-11-24 08:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0008_auto_20201124_0139'),
]
operations = [
migrations.CreateModel(
name='teacher',
fields=[
('name', models.CharField(max_length=32)),
('email', models.EmailField(max_length=254)),
('tid', models.AutoField(primary_key=True, serialize=False)),
],
),
migrations.RemoveField(
model_name='post',
name='teacher',
),
]
|
#!/usr/local/bin/python3
import numpy as np
np.random.seed(1337) # for reproducibility
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Merge
import matplotlib.pyplot as plt
import pickle
import json
import mfcc_model
import tempotrack_model
import spectral_contrast_peaks_model
import theano as T
from os.path import exists
import os
batch_size = 50
nb_epoch = 50
numGenres = 3
print("creating model")
# create model
y = pickle.load(open("pickled_vectors/mfcc_coefficients_label.pickle","rb"))
y_test = pickle.load(open("pickled_vectors/mfcc_coefficients_evaluation_label.pickle","rb"))
# X_1 = pickle.load(open("pickled_vectors/tempotracker_tempo_training_vector.pickle","rb"))
# X_test_1 = pickle.load(open("pickled_vectors/tempotracker_tempo_evaluation_training_vector.pickle","rb"))
# model_1 = tempotrack_model.tempotrack_model((X_1.shape[1],X_1.shape[2]))
X_1 = pickle.load(open("pickled_vectors/mfcc_coefficients_training_vector.pickle","rb"))
X_test_1 = pickle.load(open("pickled_vectors/mfcc_coefficients_evaluation_training_vector.pickle","rb"))
X_2 = pickle.load(open("pickled_vectors/spectral-contrast_peaks_training_vector.pickle","rb"))
X_test_2 = pickle.load(open("pickled_vectors/spectral-contrast_peaks_evaluation_training_vector.pickle","rb"))
model_1 = mfcc_model.mfcc_model((X_1.shape[1],X_1.shape[2]))
model_2 = spectral_contrast_peaks_model.model((X_2.shape[1],X_2.shape[2]))
print("y",y.shape)
print("y_test",y_test.shape)
# print("X_1",X_1.shape)
# print("X_test_1",X_test_1.shape)
print("X_1",X_1.shape)
print("X_test_1",X_test_1.shape)
print("X_2",X_2.shape)
print("X_test_2",X_test_2.shape)
#
#
merged = Merge([model_1,model_2],mode="concat")
final_model = Sequential()
final_model.add(merged)
final_model.add(Dense(1000))
final_model.add(Dense(numGenres, activation='softmax'))
final_model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy']
)
# plot(model_1,to_file="model_1.png")
# plot(model_1,to_file="model_1.png")
# plot(final_model,to_file="merged_model.png")
json_string = final_model.to_json()
with open("model_architecture/merged_model_architecture.json","w") as f:
f.write(json.dumps(json_string, sort_keys=True,indent=4, separators=(',', ': ')))
print("Fitting")
# #
# final_model.load_weights("model_weights/embeddings_10_sec_split_gztan_merged_model_weights.hdf5")
# #
# # for i in range(10):
# # print("epoch",i)
history = final_model.fit([X_1,X_2], y,
batch_size=batch_size,
nb_epoch=nb_epoch,
validation_data=([X_test_1,X_test_2], y_test),
shuffle="batch"
)
if not os.path.exists("model_weights"):
os.makedirs("model_weights")
final_model.save_weights("model_weights/merged_model_weights.hdf5",overwrite=True)
with open("experimental_results.json","w") as f:
f.write(json.dumps(history.history, sort_keys=True,indent=4, separators=(',', ': ')))
for k,v in history.history.items():
_keys = list(history.history.keys())
_keys.sort()
plt.subplot(411+_keys.index(k))
plt.title(k)
plt.plot(range(0,len(v)),v,marker="8",linewidth=1.5)
plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)
plt.show()
|
#!/bin/env python
# ^_^ encoding: utf-8 ^_^
# @date: 2015/8/27
__author__ = 'icejoywoo'
import numpy as np
# create a 3D numpy array
arr = np.zeros((3, 3, 3))
a = np.array([
[11, 12, 13],
[21, 22, 23],
[31, 32, 33],
])
print a.T
# 多维数据用 , 分割,表示对不同维度的操作
print a[1,:]
print np.nonzero(a[:,0])[0]
print np.eye(5)
print np.linalg.det(a) # det == 0.0 说明矩阵不可逆
print np.nonzero(a[a == 22])
print np.nonzero(a)
print np.nonzero(a > 22)
print np.nonzero(a.reshape(1, 9))
|
#!/usr/bin/python
import getopt
import os
import signal
import string
import sys
def file_to_pid(fname):
if (not os.path.exists(fname)):
return -1
f = open(fname, 'r')
pid = int(f.readline())
f.close()
return pid
def pid_to_file(fname, pid):
f = open(fname, 'w')
f.write(str(pid))
f.close()
def file_to_running(fname):
if (not os.path.exists(fname)):
return False
pid = file_to_pid(fname)
return os.path.isdir("/proc/" + str(pid))
class Node(object):
def __init__(self, ident, ty):
self.ident = ident
self.ty = ty
def get_pid_file(self):
return "/tmp/qjm.pid." + str(self.ty) + "." + str(self.ident)
def get_conf_dir(self):
if (self.ty == "journal"):
return "jn" + str(self.ident)
elif (self.ty == "name"):
return "nn" + str(self.ident)
elif (self.ty == "data"):
return "dn" + str(self.ident)
def get_log_file(self):
if (self.ty == "journal"):
return "/r/logs/jn" + str(self.ident) + ".log"
elif (self.ty == "name"):
return "/r/logs/nn" + str(self.ident) + ".log"
elif (self.ty == "data"):
return "/r/logs/dn" + str(self.ident) + ".log"
def get_hadoop_command(self):
cmd = [ "/home/cmccabe/cmccabe-hbin/doit",
"/home/cmccabe/cmccabe-hbin/doit",
self.get_conf_dir(),
"-redirect", self.get_log_file(),
"/h/bin/hdfs" ]
if (self.ty == "journal"):
cmd.append("journalnode")
elif (self.ty == "name"):
cmd.append("namenode")
elif (self.ty == "data"):
cmd.append("datanode")
return cmd
def __str__(self):
return self.ty + "(" + str(self.ident) + ")"
def start(self):
if (file_to_running(self.get_pid_file())):
print str(self) + " is already running as pid " + \
str(file_to_pid(self.get_pid_file()))
return
cmd = self.get_hadoop_command()
print string.join(cmd[1:])
pid = os.spawnv(os.P_NOWAIT, cmd[0], cmd[1:])
pid_to_file(self.get_pid_file(), pid)
print str(self) + " started as pid " + str(pid)
def stop(self):
if (not file_to_running(self.get_pid_file())):
print str(self) + " daemon is not running (expected pid: " + \
str(file_to_pid(self.get_pid_file())) + ")"
return
pid = file_to_pid(self.get_pid_file())
try:
os.kill(pid, signal.SIGTERM)
except Exception, e:
print "error while sending SIGTERM to " + str(pid) + ": " + str(e)
#################################################################################
journalnodes = [ Node(1, "journal"), Node(2, "journal"), Node(3, "journal") ]
namenodes = [ Node(1, "name"), Node(2, "name") ]
datanodes = [ Node(1, "data") ]
allnodes = journalnodes + namenodes + datanodes
#################################################################################
def usage():
print """
qjm: testing script for qjm-enabled clusters.
usage: qjm [options] [action]
options:
-d: apply to DataNodes only
-h: this help message
-j: apply to JournalNodes only
-n: apply to NameNodes only
actions:
start: start all daemons
stop: stop all daemons
"""
try:
optlist, next_args = getopt.getopt(sys.argv[1:], ':dhjn')
except getopt.GetoptError:
usage()
sys.exit(1)
target = [ "data", "journal", "name" ]
for opt in optlist:
if opt[0] == '-h':
usage()
sys.exit(0)
if opt[0] == '-d':
target = [ "data" ]
if opt[0] == '-j':
target = [ "journal" ]
if opt[0] == '-n':
target = [ "name" ]
if (len(next_args) < 1):
usage()
sys.exit(1)
elif (next_args[0] == "start"):
action = "start"
elif (next_args[0] == "stop"):
action = "stop"
else:
action = "help"
if action == "help":
usage()
sys.exit(0)
elif action == "start":
for node in allnodes:
if (node.ty in target):
node.start()
elif action == "stop":
for node in allnodes:
if (node.ty in target):
node.stop()
|
# -*- coding: utf-8 -*-
###########################################################################
## Python code generated with wxFormBuilder (version Oct 26 2018)
## http://www.wxformbuilder.org/
##
## PLEASE DO *NOT* EDIT THIS FILE!
###########################################################################
import wx
import wx.xrc
###########################################################################
## Class MainWindow
###########################################################################
class MainWindow ( wx.Frame ):
def __init__( self, parent ):
wx.Frame.__init__ ( self, parent, id = wx.ID_ANY, title = u"Hangman", pos = wx.DefaultPosition, size = wx.Size( 500,400 ), style = wx.DEFAULT_FRAME_STYLE|wx.TAB_TRAVERSAL )
self.SetSizeHints( wx.Size( 500,400 ), wx.DefaultSize )
self.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_WINDOW ) )
bSizer1 = wx.BoxSizer( wx.VERTICAL )
self.m_word = wx.TextCtrl( self, wx.ID_ANY, u"HANGMAN", wx.DefaultPosition, wx.DefaultSize, wx.TE_READONLY|wx.TE_CENTER )
self.m_word.SetFont( wx.Font( 36, wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD, False, "Consolas" ) )
bSizer1.Add( self.m_word, 0, wx.ALL|wx.EXPAND, 5 )
buttonsSizer = wx.GridSizer( 4, 7, 0, 0 )
self.m_btn_Key0 = wx.Button( self, wx.ID_ANY, u"A", wx.DefaultPosition, wx.DefaultSize, 0 )
buttonsSizer.Add( self.m_btn_Key0, 1, wx.ALL|wx.EXPAND, 5 )
self.m_btn_Key1 = wx.Button( self, wx.ID_ANY, u"B", wx.DefaultPosition, wx.DefaultSize, 0 )
buttonsSizer.Add( self.m_btn_Key1, 1, wx.ALL|wx.EXPAND, 5 )
self.m_btn_Key2 = wx.Button( self, wx.ID_ANY, u"C", wx.DefaultPosition, wx.DefaultSize, 0 )
buttonsSizer.Add( self.m_btn_Key2, 1, wx.ALL|wx.EXPAND, 5 )
self.m_btn_Key3 = wx.Button( self, wx.ID_ANY, u"D", wx.DefaultPosition, wx.DefaultSize, 0 )
buttonsSizer.Add( self.m_btn_Key3, 1, wx.EXPAND|wx.ALL, 5 )
self.m_btn_Key4 = wx.Button( self, wx.ID_ANY, u"E", wx.DefaultPosition, wx.DefaultSize, 0 )
buttonsSizer.Add( self.m_btn_Key4, 1, wx.ALL|wx.EXPAND, 5 )
self.m_btn_Key5 = wx.Button( self, wx.ID_ANY, u"F", wx.DefaultPosition, wx.DefaultSize, 0 )
buttonsSizer.Add( self.m_btn_Key5, 1, wx.ALL|wx.EXPAND, 5 )
self.m_btn_Key6 = wx.Button( self, wx.ID_ANY, u"G", wx.DefaultPosition, wx.DefaultSize, 0 )
buttonsSizer.Add( self.m_btn_Key6, 1, wx.ALL|wx.EXPAND, 5 )
self.m_btn_Key7 = wx.Button( self, wx.ID_ANY, u"H", wx.DefaultPosition, wx.DefaultSize, 0 )
buttonsSizer.Add( self.m_btn_Key7, 1, wx.ALL|wx.EXPAND, 5 )
self.m_btn_Key8 = wx.Button( self, wx.ID_ANY, u"I", wx.DefaultPosition, wx.DefaultSize, 0 )
buttonsSizer.Add( self.m_btn_Key8, 1, wx.ALL|wx.EXPAND, 5 )
self.m_btn_Key9 = wx.Button( self, wx.ID_ANY, u"J", wx.DefaultPosition, wx.DefaultSize, 0 )
buttonsSizer.Add( self.m_btn_Key9, 1, wx.ALL|wx.EXPAND, 5 )
self.m_btn_Key10 = wx.Button( self, wx.ID_ANY, u"K", wx.DefaultPosition, wx.DefaultSize, 0 )
buttonsSizer.Add( self.m_btn_Key10, 1, wx.ALL|wx.EXPAND, 5 )
self.m_btn_Key11 = wx.Button( self, wx.ID_ANY, u"L", wx.DefaultPosition, wx.DefaultSize, 0 )
buttonsSizer.Add( self.m_btn_Key11, 1, wx.ALL|wx.EXPAND, 5 )
self.m_btn_Key12 = wx.Button( self, wx.ID_ANY, u"M", wx.DefaultPosition, wx.DefaultSize, 0 )
buttonsSizer.Add( self.m_btn_Key12, 1, wx.ALL|wx.EXPAND, 5 )
self.m_btn_Key13 = wx.Button( self, wx.ID_ANY, u"N", wx.DefaultPosition, wx.DefaultSize, 0 )
buttonsSizer.Add( self.m_btn_Key13, 1, wx.ALL|wx.EXPAND, 5 )
self.m_btn_Key14 = wx.Button( self, wx.ID_ANY, u"O", wx.DefaultPosition, wx.DefaultSize, 0 )
buttonsSizer.Add( self.m_btn_Key14, 1, wx.ALL|wx.EXPAND, 5 )
self.m_btn_Key15 = wx.Button( self, wx.ID_ANY, u"P", wx.DefaultPosition, wx.DefaultSize, 0 )
buttonsSizer.Add( self.m_btn_Key15, 1, wx.ALL|wx.EXPAND, 5 )
self.m_btn_Key16 = wx.Button( self, wx.ID_ANY, u"Q", wx.DefaultPosition, wx.DefaultSize, 0 )
buttonsSizer.Add( self.m_btn_Key16, 1, wx.ALL|wx.EXPAND, 5 )
self.m_btn_Key17 = wx.Button( self, wx.ID_ANY, u"R", wx.DefaultPosition, wx.DefaultSize, 0 )
buttonsSizer.Add( self.m_btn_Key17, 1, wx.ALL|wx.EXPAND, 5 )
self.m_btn_Key18 = wx.Button( self, wx.ID_ANY, u"S", wx.DefaultPosition, wx.DefaultSize, 0 )
buttonsSizer.Add( self.m_btn_Key18, 1, wx.ALL|wx.EXPAND, 5 )
self.m_btn_Key19 = wx.Button( self, wx.ID_ANY, u"T", wx.DefaultPosition, wx.DefaultSize, 0 )
buttonsSizer.Add( self.m_btn_Key19, 1, wx.ALL|wx.EXPAND, 5 )
self.m_btn_Key20 = wx.Button( self, wx.ID_ANY, u"U", wx.DefaultPosition, wx.DefaultSize, 0 )
buttonsSizer.Add( self.m_btn_Key20, 1, wx.ALL|wx.EXPAND, 5 )
buttonsSizer.Add( ( 0, 0), 1, wx.EXPAND, 5 )
self.m_btn_Key21 = wx.Button( self, wx.ID_ANY, u"V", wx.DefaultPosition, wx.DefaultSize, 0 )
buttonsSizer.Add( self.m_btn_Key21, 1, wx.ALL|wx.EXPAND, 5 )
self.m_btn_Key22 = wx.Button( self, wx.ID_ANY, u"W", wx.DefaultPosition, wx.DefaultSize, 0 )
buttonsSizer.Add( self.m_btn_Key22, 1, wx.ALL|wx.EXPAND, 5 )
self.m_btn_Key23 = wx.Button( self, wx.ID_ANY, u"X", wx.DefaultPosition, wx.DefaultSize, 0 )
buttonsSizer.Add( self.m_btn_Key23, 1, wx.ALL|wx.EXPAND, 5 )
self.m_btn_Key24 = wx.Button( self, wx.ID_ANY, u"Y", wx.DefaultPosition, wx.DefaultSize, 0 )
buttonsSizer.Add( self.m_btn_Key24, 1, wx.ALL|wx.EXPAND, 5 )
self.m_btn_Key25 = wx.Button( self, wx.ID_ANY, u"Z", wx.DefaultPosition, wx.DefaultSize, 0 )
buttonsSizer.Add( self.m_btn_Key25, 1, wx.ALL|wx.EXPAND, 5 )
buttonsSizer.Add( ( 0, 0), 1, wx.EXPAND, 5 )
bSizer1.Add( buttonsSizer, 1, wx.EXPAND, 5 )
sbSizer1 = wx.StaticBoxSizer( wx.StaticBox( self, wx.ID_ANY, u"Lives" ), wx.VERTICAL )
bSizer4 = wx.BoxSizer( wx.HORIZONTAL )
self.m_gauge_lives = wx.Gauge( sbSizer1.GetStaticBox(), wx.ID_ANY, 6, wx.DefaultPosition, wx.DefaultSize, wx.GA_HORIZONTAL )
self.m_gauge_lives.SetValue( 6 )
bSizer4.Add( self.m_gauge_lives, 1, wx.EXPAND|wx.BOTTOM|wx.RIGHT|wx.LEFT, 5 )
bSizer5 = wx.BoxSizer( wx.VERTICAL )
bSizer5.Add( ( 0, 0), 1, wx.EXPAND, 5 )
bSizer6 = wx.BoxSizer( wx.HORIZONTAL )
self.m_livesCount = wx.StaticText( sbSizer1.GetStaticBox(), wx.ID_ANY, u"6", wx.DefaultPosition, wx.Size( 15,-1 ), wx.ALIGN_CENTER_HORIZONTAL )
self.m_livesCount.Wrap( -1 )
bSizer6.Add( self.m_livesCount, 0, wx.ALL, 5 )
self.m_spinBtn1 = wx.SpinButton( sbSizer1.GetStaticBox(), wx.ID_ANY, wx.DefaultPosition, wx.Size( 20,-1 ), 0 )
bSizer6.Add( self.m_spinBtn1, 1, wx.EXPAND, 5 )
bSizer5.Add( bSizer6, 0, 0, 5 )
bSizer5.Add( ( 0, 0), 1, wx.EXPAND, 5 )
bSizer4.Add( bSizer5, 0, wx.EXPAND, 5 )
sbSizer1.Add( bSizer4, 1, wx.EXPAND, 5 )
bSizer1.Add( sbSizer1, 0, wx.EXPAND, 5 )
sbSizer2 = wx.StaticBoxSizer( wx.StaticBox( self, wx.ID_ANY, u"Statistics" ), wx.VERTICAL )
bSizer2 = wx.BoxSizer( wx.HORIZONTAL )
self.m_staticText1 = wx.StaticText( sbSizer2.GetStaticBox(), wx.ID_ANY, u"Games played:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText1.Wrap( -1 )
bSizer2.Add( self.m_staticText1, 0, wx.ALL, 5 )
self.m_gamesPlayed = wx.StaticText( sbSizer2.GetStaticBox(), wx.ID_ANY, u"XX", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_gamesPlayed.Wrap( -1 )
bSizer2.Add( self.m_gamesPlayed, 0, wx.ALL, 5 )
bSizer2.Add( ( 0, 0), 1, wx.EXPAND, 5 )
self.m_staticText2 = wx.StaticText( sbSizer2.GetStaticBox(), wx.ID_ANY, u"Games won", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText2.Wrap( -1 )
bSizer2.Add( self.m_staticText2, 0, wx.ALL, 5 )
self.m_gamesWon = wx.StaticText( sbSizer2.GetStaticBox(), wx.ID_ANY, u"XX", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_gamesWon.Wrap( -1 )
bSizer2.Add( self.m_gamesWon, 0, wx.ALL, 5 )
bSizer2.Add( ( 0, 0), 1, wx.EXPAND, 5 )
self.m_staticText5 = wx.StaticText( sbSizer2.GetStaticBox(), wx.ID_ANY, u"Games lost", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText5.Wrap( -1 )
bSizer2.Add( self.m_staticText5, 0, wx.ALL, 5 )
self.m_gamesLost = wx.StaticText( sbSizer2.GetStaticBox(), wx.ID_ANY, u"XX", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_gamesLost.Wrap( -1 )
bSizer2.Add( self.m_gamesLost, 0, wx.ALL, 5 )
bSizer2.Add( ( 0, 0), 1, wx.EXPAND, 5 )
self.m_staticText7 = wx.StaticText( sbSizer2.GetStaticBox(), wx.ID_ANY, u"Succes percentage", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText7.Wrap( -1 )
bSizer2.Add( self.m_staticText7, 0, wx.ALL, 5 )
self.m_successPercent = wx.StaticText( sbSizer2.GetStaticBox(), wx.ID_ANY, u"XX", wx.DefaultPosition, wx.Size( 25,-1 ), wx.ALIGN_RIGHT )
self.m_successPercent.Wrap( -1 )
bSizer2.Add( self.m_successPercent, 0, wx.TOP|wx.BOTTOM|wx.LEFT, 5 )
self.m_staticText9 = wx.StaticText( sbSizer2.GetStaticBox(), wx.ID_ANY, u"%", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText9.Wrap( -1 )
bSizer2.Add( self.m_staticText9, 0, wx.TOP|wx.BOTTOM|wx.RIGHT, 5 )
sbSizer2.Add( bSizer2, 1, wx.EXPAND, 5 )
bSizer1.Add( sbSizer2, 0, wx.EXPAND, 5 )
gSizer3 = wx.GridSizer( 1, 2, 0, 0 )
self.m_btn_newGame = wx.Button( self, wx.ID_ANY, u"New Game", wx.DefaultPosition, wx.DefaultSize, 0 )
gSizer3.Add( self.m_btn_newGame, 1, wx.ALL|wx.EXPAND, 5 )
self.m_btn_Load = wx.Button( self, wx.ID_ANY, u"Load Words", wx.DefaultPosition, wx.DefaultSize, 0 )
gSizer3.Add( self.m_btn_Load, 1, wx.ALL|wx.EXPAND, 5 )
bSizer1.Add( gSizer3, 0, wx.EXPAND, 5 )
self.SetSizer( bSizer1 )
self.Layout()
self.Centre( wx.BOTH )
# Connect Events
self.m_btn_Key0.Bind( wx.EVT_BUTTON, self.LetterButtonClicked )
self.m_btn_Key1.Bind( wx.EVT_BUTTON, self.LetterButtonClicked )
self.m_btn_Key2.Bind( wx.EVT_BUTTON, self.LetterButtonClicked )
self.m_btn_Key3.Bind( wx.EVT_BUTTON, self.LetterButtonClicked )
self.m_btn_Key4.Bind( wx.EVT_BUTTON, self.LetterButtonClicked )
self.m_btn_Key5.Bind( wx.EVT_BUTTON, self.LetterButtonClicked )
self.m_btn_Key6.Bind( wx.EVT_BUTTON, self.LetterButtonClicked )
self.m_btn_Key7.Bind( wx.EVT_BUTTON, self.LetterButtonClicked )
self.m_btn_Key8.Bind( wx.EVT_BUTTON, self.LetterButtonClicked )
self.m_btn_Key9.Bind( wx.EVT_BUTTON, self.LetterButtonClicked )
self.m_btn_Key10.Bind( wx.EVT_BUTTON, self.LetterButtonClicked )
self.m_btn_Key11.Bind( wx.EVT_BUTTON, self.LetterButtonClicked )
self.m_btn_Key12.Bind( wx.EVT_BUTTON, self.LetterButtonClicked )
self.m_btn_Key13.Bind( wx.EVT_BUTTON, self.LetterButtonClicked )
self.m_btn_Key14.Bind( wx.EVT_BUTTON, self.LetterButtonClicked )
self.m_btn_Key15.Bind( wx.EVT_BUTTON, self.LetterButtonClicked )
self.m_btn_Key16.Bind( wx.EVT_BUTTON, self.LetterButtonClicked )
self.m_btn_Key17.Bind( wx.EVT_BUTTON, self.LetterButtonClicked )
self.m_btn_Key18.Bind( wx.EVT_BUTTON, self.LetterButtonClicked )
self.m_btn_Key19.Bind( wx.EVT_BUTTON, self.LetterButtonClicked )
self.m_btn_Key20.Bind( wx.EVT_BUTTON, self.LetterButtonClicked )
self.m_btn_Key21.Bind( wx.EVT_BUTTON, self.LetterButtonClicked )
self.m_btn_Key22.Bind( wx.EVT_BUTTON, self.LetterButtonClicked )
self.m_btn_Key23.Bind( wx.EVT_BUTTON, self.LetterButtonClicked )
self.m_btn_Key24.Bind( wx.EVT_BUTTON, self.LetterButtonClicked )
self.m_btn_Key25.Bind( wx.EVT_BUTTON, self.LetterButtonClicked )
self.m_spinBtn1.Bind( wx.EVT_SPIN_DOWN, self.OnLivesDown )
self.m_spinBtn1.Bind( wx.EVT_SPIN_UP, self.OnLivesUp )
self.m_btn_newGame.Bind( wx.EVT_BUTTON, self.NewGameButtonClicked )
self.m_btn_Load.Bind( wx.EVT_BUTTON, self.LoadButtonClicked )
def __del__( self ):
pass
# Virtual event handlers, overide them in your derived class
def LetterButtonClicked( self, event ):
event.Skip()
def OnLivesDown( self, event ):
event.Skip()
def OnLivesUp( self, event ):
event.Skip()
def NewGameButtonClicked( self, event ):
event.Skip()
def LoadButtonClicked( self, event ):
event.Skip()
|
"""
.. module:: wheel_control
:platform: Unix
:synopsis: Module for interfacing with the Commanduino core device in the Base Layer
.. moduleauthor:: Graham Keenan <https://github.com/ShinRa26>
"""
import os
import sys
import time
import inspect
HERE = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
root_path = os.path.join(HERE, "..", "..")
op_path = os.path.join(HERE, "..")
sys.path.append(op_path)
sys.path.append(root_path)
import constants.common as cst
from base_layer.commanduino_setup.core_device import CoreDevice
from commanduino.commanddevices.commanddevice import CommandTimeOutError
""" CONSTANTS """
FULL_WHEEL_TURN = 6400
PUMP_INCREMENT = 8000
MODULE_LOWER = 31000
REST_POSITION = 15000
WHEEL_CONFIG = os.path.join(HERE, "..", "configs", "wheel_config.json")
class WheelControl(CoreDevice):
"""
Class for controlling a Geneva Wheel system
Contains methods for rotation, modular drivers, pumps, etc.
Assumes the user has at least one Geneva wheel, one modular driver, and one peristaltic
pump attached to their rig.
Inherits:
CoreDevice: Commanduino Base Device
Args:
config (str): Path to the config
"""
def __init__(self):
CoreDevice.__init__(self, WHEEL_CONFIG)
self.home_all_modules()
def turn_wheel(self, n_turns: int, wait=True):
"""
Turns the Geneva Wheel n_turns times
Args:
n_turns (int): Number of turns
"""
try:
drive_wheel = self.get_device_attribute(cst.WHEEL_NAME)
for _ in range(n_turns):
drive_wheel.move(FULL_WHEEL_TURN, wait=wait)
except CommandTimeOutError:
print("Commanduino -- Timeout error, ignore.")
def move_module(self, mod_name: str, pos: int, wait=True):
"""
Moves the modular driver to a set position
Args:
mod_name (str): Name of the module
pos (int/float): Number of steps to move
wait (bool): Wait for the device to be idle, default set to True
"""
try:
module = self.get_device_attribute(mod_name)
module.move_to(pos, wait=wait) # -ve due to inverted direction
except CommandTimeOutError:
print("Commanduino -- Timeout error, ignore.")
def lower_module(self, mod_name: str, wait=True):
"""
Lowers the modular driver
Args:
mod_name (str): Name of the modular driver
wait (bool): Wait for the device to be idle, default set to true
"""
self.move_module(mod_name, MODULE_LOWER, wait=wait)
def home_module(self, mod_name: str, wait=True):
"""
Brings the module back to its home position
Args:
mod_name (str): Name of the module
wait (bool): Wait for the device to be idle, default set to true
"""
try:
module = self.get_device_attribute(mod_name)
module.home(wait=wait)
except CommandTimeOutError:
print("Commanduino -- Timeout error, ignore.")
def home_all_modules(self, wait=True):
"""Homes all the modules and places them in a rest position
For weight reasons
Keyword Arguments:
wait {bool} -- Whether to wait for th operations to complete (default: {True})
"""
self.home_module(cst.MSD1, wait=wait)
self.home_module(cst.DUAL_BODY, wait=wait)
self.home_module(cst.DUAL_SYRINGE, wait=wait)
self.move_module(cst.DUAL_SYRINGE, REST_POSITION, wait=wait)
self.move_module(cst.DUAL_BODY, REST_POSITION, wait=wait)
def run_peri_pump(self, pump_name: str, num_secs: int):
"""
Runs a peristaltic pump for num_secs time
Args:
pump_name (str): Name of the pump
num_secs (int/float): Number of seconds to run for
"""
pump = self.get_device_attribute(pump_name)
curr_time = time.time()
while time.time() < (curr_time + num_secs):
pump.move(PUMP_INCREMENT)
def set_ring_stir_rate(self, value: int):
"""Sets the stirrer rate for the main ring
Arguments:
value {int} -- Value to set the PWM to
"""
if self.valid_device(cst.RING):
ring = self.get_device_attribute(cst.RING)
ring.set_pwm_value(value)
else:
print("\"{}\" is not recognised in the manager!".format(cst.RING))
|
from src.program import get_secret_number
def test_secret_number_in_range():
secret_number = get_secret_number()
assert 1 <= secret_number <= 100
|
#!/usr/bin/env python
from StringIO import StringIO
from PIL import Image
import cv2
import numpy as np
import math
def list_camera_ids():
return ['0', '1']
class Camera(object):
def __init__(self, camera_id, size, fps):
self.width = size[0]
self.height = size[1]
self.count = 0
self.last_state = 0
self.cap = cv2.VideoCapture('vid.mp4')
self.fgbg = cv2.BackgroundSubtractorMOG()
self.fourcc = cv2.cv.CV_FOURCC('M','J','P','G')
#fourcc = int(self.cap.get(cv2.cv.CV_CAP_PROP_FOURCC))
self.out = cv2.VideoWriter('output.avi',self.fourcc, self.cap.get(cv2.cv.CV_CAP_PROP_FPS), (int(self.cap.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH)),int(self.cap.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT))))
def get_frame(self):
ret, img = self.cap.read()
image1 = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# ret,image = cv2.threshold(image,127,255,cv2.THRESH_BINARY)
image2 = self.fgbg.apply(image1)
x, y, w, h = rf_overlay(img)
ref = image2[y:y+h, x:x+w]
white = cv2.countNonZero(ref)
if white > 0 and self.last_state == 0:
self.count += 1
self.last_state = 1
if white == 0 and self.last_state == 1:
self.last_state = 0
cv2.putText(img, str(self.count), (x-5,y-5), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), 2)
ret, image2 = cv2.imencode('.jpg', img)
if self.count < 3:
self.out.write(image2)
else:
self.out.release()
image2 = Image.fromarray(image2)
#image = Image.new('RGB', (self.width, self.height), 'black')
# buf = StringIO()
# image.save(buf, 'JPEG')
return image2.tobytes()
def rf_overlay(image):
height, width = image.shape[:2]
x1 = int(width*0.3)
y1 = int(height*0.8)
rf_width = int(width*0.15)
rf_height = int(height*0.1)
cv2.rectangle(image,
(x1,y1),
(x1+rf_width, y1+rf_height ),
(0,255,0),2)
return (x1, y1, rf_width, rf_height)
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import time
import numpy as np
import scipy as sp
import scipy.io as sio
import sys
import os
#import MySQLdb
dict_clean = {}
with open('train.txt','r') as f:
for line in f.readlines():
key_val = line[:-1].split(' ')
dict_clean[key_val[0]] = key_val[1]
def CNNclean(para):
global dict_clean
return dict_clean[para]
def listpuredir(dir):
fo = open('cb.txt','w+')
list=os.listdir(dir)
for line in list:
path = dir+'/'+line
print path
rt=CNNclean(path)
temp = path+' '+str(rt)
fo.write(temp)
fo.write('\n')
#print "%d" %(rt)
fo.close()
checkdir="/home/wang/gf/image"
#listpuredir(checkdir)
#rt=gf("/home/second/tsinghua_image/test/FXPXC0120140410170244ZT003.jpg")
#print "%d" %(rt)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayEbppJfexportChargeinstQueryModel(object):
def __init__(self):
self._biz_type = None
self._extend_field = None
self._page = None
self._page_query = None
self._page_size = None
self._sub_biz_type = None
@property
def biz_type(self):
return self._biz_type
@biz_type.setter
def biz_type(self, value):
self._biz_type = value
@property
def extend_field(self):
return self._extend_field
@extend_field.setter
def extend_field(self, value):
self._extend_field = value
@property
def page(self):
return self._page
@page.setter
def page(self, value):
self._page = value
@property
def page_query(self):
return self._page_query
@page_query.setter
def page_query(self, value):
self._page_query = value
@property
def page_size(self):
return self._page_size
@page_size.setter
def page_size(self, value):
self._page_size = value
@property
def sub_biz_type(self):
return self._sub_biz_type
@sub_biz_type.setter
def sub_biz_type(self, value):
self._sub_biz_type = value
def to_alipay_dict(self):
params = dict()
if self.biz_type:
if hasattr(self.biz_type, 'to_alipay_dict'):
params['biz_type'] = self.biz_type.to_alipay_dict()
else:
params['biz_type'] = self.biz_type
if self.extend_field:
if hasattr(self.extend_field, 'to_alipay_dict'):
params['extend_field'] = self.extend_field.to_alipay_dict()
else:
params['extend_field'] = self.extend_field
if self.page:
if hasattr(self.page, 'to_alipay_dict'):
params['page'] = self.page.to_alipay_dict()
else:
params['page'] = self.page
if self.page_query:
if hasattr(self.page_query, 'to_alipay_dict'):
params['page_query'] = self.page_query.to_alipay_dict()
else:
params['page_query'] = self.page_query
if self.page_size:
if hasattr(self.page_size, 'to_alipay_dict'):
params['page_size'] = self.page_size.to_alipay_dict()
else:
params['page_size'] = self.page_size
if self.sub_biz_type:
if hasattr(self.sub_biz_type, 'to_alipay_dict'):
params['sub_biz_type'] = self.sub_biz_type.to_alipay_dict()
else:
params['sub_biz_type'] = self.sub_biz_type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayEbppJfexportChargeinstQueryModel()
if 'biz_type' in d:
o.biz_type = d['biz_type']
if 'extend_field' in d:
o.extend_field = d['extend_field']
if 'page' in d:
o.page = d['page']
if 'page_query' in d:
o.page_query = d['page_query']
if 'page_size' in d:
o.page_size = d['page_size']
if 'sub_biz_type' in d:
o.sub_biz_type = d['sub_biz_type']
return o
|
#Written by 《ERFAN》
#t.me/ErfanMAfshar
import hashlib,os
def main() :
os.system('clear')
print("---------------------------------")
print("| |")
print("| Hash Generator |")
print("| |")
print("| {00} Generate |")
print("| {99} Exit |")
print("| |")
print("---------------------------------")
i = input("~~> ")
def pyGreen(skk) : print("\033[92m {}\033[00m".format(skk))
if i == "00" :
os.system('clear')
print("-------------------------")
print(" ")
print(" Choose your hash type ")
print(" ")
print(" 1)Md5 Hash ")
print(" 2)Sha1 Hash ")
print(" 3)Sha256 Hash ")
print(" 4)Sha512 Hash ")
print(" 5)Sha224 Hash ")
print(" 6)Sha384 Hash ")
print(" ")
print("-------------------------")
hh = input("--> ")
if hh == "1" :
string = input("Enter Your Text: ")
result = hashlib.md5(string.encode())
pyGreen("Success!")
print("Your MD5 Hash is --> \n", end="")
print(result.hexdigest())
aa = input("\nPress Enter")
if aa == "" :
main()
elif hh == "2":
string1 = input("Enter Your Text: ")
result1 = hashlib.sha1(string1.encode())
pyGreen("Success!")
print("Your Sha1 Hash is --> \n", end="")
print(result1.hexdigest())
aa = input("\nPress Enter")
if aa == "" :
main()
elif hh == "3":
string2 = input("Enter Your Text: ")
result2 = hashlib.sha256(string2.encode())
pyGreen("Success!")
print("Your Sha256 Hash is --> \n", end="")
print(result2.hexdigest())
aa = input("\nPress Enter")
if aa == "" :
main()
elif hh == "4":
string3 = input("Enter Your Text: ")
result3 = hashlib.sha512(string3.encode())
pyGreen("Success!")
print("Your Sha512 Hash is --> \n", end="")
print(result3.hexdigest())
aa = input("\nPress Enter")
if aa == "" :
main()
elif hh == "5":
string4 = input("Enter Your Text: ")
result4 = hashlib.sha224(string4.encode())
pyGreen("Success!")
print("Your Sha224 Hash is --> \n", end="")
print(result4.hexdigest())
aa = input("\nPress Enter")
if aa == "" :
main()
elif hh == "6":
string5 = input("Enter Your Text: ")
result5 = hashlib.sha384(string5.encode())
pyGreen("Success!")
print("Your Sha384 Hash is --> \n", end="")
print(result5.hexdigest())
aa = input("\nPress Enter")
if aa == "" :
main()
else :
print("Please select the correct option!")
aa = input("\nPress Enter")
if aa == "":
main()
elif i == "99" :
print("...GoodBye...")
exit()
else :
print("Please select the correct option!")
aa = input("\nPress Enter")
if aa == "":
main()
main()
|
# -*- coding: utf-8 -*-
"""
Append simulation results into one projection file per turn (i.e. sum up all separate simualations, but keep turns separated).
"""
import numpy as np
import os
#import matplotlib.pyplot as plt
for folder in ('70100644Phantom_labelled_no_bed',
'70114044Phantom_labelled_no_bed',
'70122044Phantom_labelled_no_bed',
'70135144Phantom_labelled_no_bed',
'70141544Phantom_labelled_no_bed',
'70153744Phantom_labelled_no_bed'):
path = os.path.join('/my/output/',folder)
phantomName = '/helical_proj_120kVSpectrum_' + folder + '_2000photons_per_Sim'
iter = 0
for turnNumber in range(0,23):
for number_of_simu in range(60):
print('Running sim no: %i, turn no: %i' %(number_of_simu, turnNumber))
myFile = (path + phantomName +
'_Sim_num_{}'.format(number_of_simu) +
'_Turn_num_{}'.format(turnNumber) +
'.npy')
if iter == 0: #turnNumber == 0:
projections = np.load(myFile)
else:
#None
projections = np.append(projections,np.load(myFile),axis = 0)
if number_of_simu > 0:
projectionsTot += projections
else:
projectionsTot = projections
saveName = path + '/HelicalSkullCT_' + folder +'_Dose150mGy_Turn_' + str(turnNumber) + '.data.npy'
print('Saving: %s' % saveName)
np.save(saveName,projectionsTot)
#plt.imshow(projections[1:-1:20,:,10])
|
import players
import string
'''
Squash league management UI
Author: Ed Jones
Date: 18 May 2016
'''
#
# default data file
data_file = "../data/players.txt"
#
# null edit value
edit_none = players.edit_none
#
# if anything has changed the data then prompt
modified = 0;
# always load the players.txt file
players.load_players(data_file)
# using dictionaries to control data
while True:
print "\n - Please choose from the following options:\n"
print "\t1 - To print out the list of players and all relevant information for notice board\n"
print "\t2 - To print out list of players e-mail addresses\n"
print "\t3 - To print a list of players from division (1-6) \n"
print "\t4 - To add players to division\n"
print "\t5 - To delete players from a division\n"
print "\t6 - To edit information on players\n"
print "\t7 - To see division standings\n"
print "\t8 - Enter player points\n"
print "\t9 - Rollup divisions\n"
print "Or enter 'q' to quit\n"
option = raw_input("\t: ")
#9 possible outcomes & quit
if option == "1":
#
# Print the league tables for the notice board
#
players.print_notice()
elif option == "2":
#
# Print all players names and emails
#
players.print_emails()
elif option == "3":
#
# Print contact details for players for a division
#
division = raw_input("\n\t\tPlease enter the division number: ")
try:
players.print_division(int(division))
except:
print("Division not found")
elif option == "4":
#
# Enter new player
#
forename = ''
surname = ''
email = ''
phone = ''
division = 0
print ("* required values")
while forename == '':
forename = raw_input("\n\t\t*First name: ")
while surname == '':
surname = raw_input("\n\t\t*Surname name: ")
while email == '':
email = raw_input("\n\t\t*Email: ")
phone = raw_input("\n\t\tPhone: ")
try:
div = int(raw_input("\n\t\t*Division (1-6): "))
except:
div = 0
while (div < 1) or (div > 6):
try:
div = int(raw_input("\n\t\t*Division (1-6): "))
except:
div = 0
players.add_player(forename,surname,email,phone,division)
# trigger save on exit
modified = 1
elif option == "5":
#
# Delete specified player
#
forename = ''
surname = ''
print ("* required values")
while forename == '':
forename = raw_input("\n\t\t*First name: ")
while surname == '':
surname = raw_input("\n\t\t*Surname name: ")
players.delete_player(forename,surname)
# trigger save on exit
modified = 1
elif option == "6":
#
# Edit player info
#
forename = ''
surname = ''
email = ''
phone_number = ''
division_current = 0
points_current = 0
division_previous = 0
point_previous = 0
print ("* required values")
while forename == '':
forename = raw_input("\n\t\t*First name: ")
while surname == '':
surname = raw_input("\n\t\t*Surname name: ")
# use names as key as easier to remember than email but that is unique
info = players.get_player_info(forename,surname)
# no found
if len(info) > 0:
# prompt showing current values
# null string '' no changes
# edit_none no changes
email = raw_input("\n\t\tEmail ("+ info['email'] +") : ")
phone_number = raw_input("\n\t\tPnone number ("+ info['phone_number'] +") : ")
# catch exceptions for blank input
try:
print "use 0 for unavailable this round"
print "use -1 as away until further notice"
division_current = int(raw_input("\n\t\tCurrent division ("+ str(info['division_current']) +") : "))
except:
# null value pass edit_none as 0 & -1 are valid inputs
division_current = edit_none
try:
points_current = int(raw_input("\n\t\tCurrent points ("+ str(info['points_current']) +") : "))
except:
points_current = edit_none
try:
division_previous = int(raw_input("\n\t\tPrevious division ("+ str(info['division_previous']) +") : "))
except:
division_previous = edit_none
try:
points_previous = int(raw_input("\n\t\tPrevious points ("+ str(info['points_previous']) +") : "))
except:
points_previous = edit_none
players.edit_player(forename,surname,email,phone_number,division_current,points_current,division_previous,points_previous)
# trigger save on exit
modified = 1
else:
print "\nPLAYER %s %s NOT FOUND\n" % (forename, surname)
elif option == "7":
#
# Print vision standings
#
division = raw_input("\n\t\tPlease enter the division number: ")
try:
players.print_standing(int(division))
except:
print("Division not found")
elif option == "8":
#
# Update points for all players
#
points = 0
player_list = players.get_players()
for player in player_list:
# get a valid points value
while points == 0:
try:
points = int(raw_input("\n\t\tPoints this round for " + player['forename'] + " " + player['surname'] + ": "))
except:
# default to zero if bad input
points = 0
players.edit_player(player['forename'],player['surname'],'','',edit_none,points+player['points_current'],edit_none,edit_none)
points = 0
# trigger save on exit
modified = 1
elif option == "9":
#
# Update divisions get the number of players to promote
#
try:
num_promote = int(raw_input("\n\t\tNumber players to promote: "))
except:
num_promote = 2
print "\nUpdate divisions y/n?\n"
option = raw_input("\t: ")
if string.lower(option) == "y":
players.update_divisions(num_promote)
# trigger save on exit
modified = 1
elif option == "q":
#
# if the data has changed then prompt to save file
#
if modified > 0:
print "\nSave players changes y/n?\n"
option = raw_input("\t: ")
if string.lower(option) != "n":
# save the file if not answered n
players.save_players(data_file)
break
|
#
# Copyright (c) 2007 Hyperic, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os, sigar;
sg = sigar.open()
mem = sg.mem()
swap = sg.swap()
sg.close()
print "\tTotal\tUsed\tFree"
print "Mem:\t",\
(mem.total() / 1024), \
(mem.used() / 1024), \
(mem.free() / 1024)
print "Swap:\t", \
(swap.total() / 1024), \
(swap.used() / 1024), \
(swap.free() / 1024)
print "RAM:\t", mem.ram(), "MB"
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-10-18 13:52
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import smart_selects.db_fields
class Migration(migrations.Migration):
initial = True
dependencies = [
('djcloudbridge', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='AppCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, choices=[('FEATURED', 'Featured'), ('GALAXY', 'Galaxy'), ('SCALABLE', 'Scalable'), ('VM', 'Virtual machine')], max_length=100, null=True, unique=True)),
],
options={
'verbose_name_plural': 'App categories',
},
),
migrations.CreateModel(
name='Application',
fields=[
('added', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=60)),
('slug', models.SlugField(max_length=100, primary_key=True, serialize=False)),
('status', models.CharField(blank=True, choices=[('DEV', 'Development'), ('CERTIFICATION', 'Certification'), ('LIVE', 'Live')], default='DEV', max_length=50, null=True)),
('summary', models.CharField(blank=True, max_length=140, null=True)),
('maintainer', models.CharField(blank=True, max_length=255, null=True)),
('description', models.TextField(blank=True, max_length=32767, null=True)),
('info_url', models.URLField(blank=True, max_length=2048, null=True)),
('icon_url', models.URLField(blank=True, max_length=2048, null=True)),
('default_launch_config', models.TextField(blank=True, help_text='Application-wide initial configuration data to parameterize the launch with.', max_length=16384, null=True)),
('category', models.ManyToManyField(blank=True, null=True, to='cloudlaunch.AppCategory')),
],
options={
'ordering': ['name'],
},
),
migrations.CreateModel(
name='ApplicationDeployment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('added', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=60)),
('archived', models.BooleanField(default=False)),
('provider_settings', models.TextField(blank=True, help_text='Cloud provider specific settings used for this launch.', max_length=16384, null=True)),
('application_config', models.TextField(blank=True, help_text='Application configuration data used for this launch.', max_length=16384, null=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ApplicationDeploymentTask',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('added', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('celery_id', models.TextField(blank=True, help_text='Celery task id for any background jobs running on this deployment', max_length=64, null=True, unique=True)),
('action', models.CharField(blank=True, choices=[('LAUNCH', 'Launch'), ('HEALTH_CHECK', 'Health check'), ('RESTART', 'Restart'), ('DELETE', 'Delete')], max_length=255, null=True)),
('_result', models.TextField(blank=True, db_column='result', help_text='Result of Celery task', max_length=16384, null=True)),
('_status', models.CharField(blank=True, db_column='status', max_length=64, null=True)),
('traceback', models.TextField(blank=True, help_text='Celery task traceback, if any', max_length=16384, null=True)),
('deployment', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tasks', to='cloudlaunch.ApplicationDeployment')),
],
),
migrations.CreateModel(
name='ApplicationVersion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('version', models.CharField(max_length=30)),
('frontend_component_path', models.CharField(blank=True, max_length=255, null=True)),
('frontend_component_name', models.CharField(blank=True, max_length=255, null=True)),
('backend_component_name', models.CharField(blank=True, max_length=255, null=True)),
('default_launch_config', models.TextField(blank=True, help_text='Version specific configuration data to parameterize the launch with.', max_length=16384, null=True)),
('application', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='versions', to='cloudlaunch.Application')),
('default_cloud', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='djcloudbridge.Cloud')),
],
),
migrations.CreateModel(
name='ApplicationVersionCloudConfig',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('default_instance_type', models.CharField(blank=True, max_length=256, null=True)),
('default_launch_config', models.TextField(blank=True, help_text='Cloud specific initial configuration data to parameterize the launch with.', max_length=16384, null=True)),
('application_version', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='app_version_config', to='cloudlaunch.ApplicationVersion')),
('cloud', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='app_version_config', to='djcloudbridge.Cloud')),
],
),
migrations.CreateModel(
name='Image',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('added', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=60)),
('image_id', models.CharField(max_length=50, verbose_name='Image ID')),
('description', models.CharField(blank=True, max_length=255, null=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Usage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('added', models.DateTimeField(auto_now_add=True)),
('app_config', models.TextField(blank=True, max_length=16384, null=True)),
('app_deployment', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='app_version_cloud_config', to='cloudlaunch.ApplicationDeployment')),
('app_version_cloud_config', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='app_version_cloud_config', to='cloudlaunch.ApplicationVersionCloudConfig')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'Usage',
'ordering': ['added'],
},
),
migrations.CreateModel(
name='CloudImage',
fields=[
('image_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='cloudlaunch.Image')),
('cloud', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='djcloudbridge.Cloud')),
],
options={
'abstract': False,
},
bases=('cloudlaunch.image',),
),
migrations.AddField(
model_name='applicationdeployment',
name='application_version',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cloudlaunch.ApplicationVersion'),
),
migrations.AddField(
model_name='applicationdeployment',
name='owner',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='applicationdeployment',
name='target_cloud',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='djcloudbridge.Cloud'),
),
migrations.AddField(
model_name='application',
name='default_version',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='cloudlaunch.ApplicationVersion'),
),
migrations.AddField(
model_name='applicationversioncloudconfig',
name='image',
field=smart_selects.db_fields.ChainedForeignKey(chained_field='cloud', chained_model_field='cloud', on_delete=django.db.models.deletion.CASCADE, to='cloudlaunch.CloudImage'),
),
migrations.AlterUniqueTogether(
name='applicationversion',
unique_together=set([('application', 'version')]),
),
migrations.AlterUniqueTogether(
name='applicationversioncloudconfig',
unique_together=set([('application_version', 'cloud')]),
),
]
|
"""
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: MIT-0
"""
import re
from cfnlint.rules import CloudFormationLintRule
from cfnlint.rules import RuleMatch
class OutputNameStyle(CloudFormationLintRule):
"""Check if Outputs follow style guide"""
id = 'E9500'
shortdesc = 'Output names follow proper structure'
description = 'Outputs begin with a lowercase \'o\' and are in oCamelCase'
source_url = ''
tags = ['outputs']
def match(self, cfn):
"""Check CloudFormation Outputs"""
matches = []
pattern = re.compile("^([o][A-Z_0-9]+[a-zA-Z0-9]*)+$")
outputs = cfn.template.get('Outputs', {})
if outputs:
for outputname, val in outputs.items():
if not pattern.match(outputname):
message = 'Output {0} should begin with a lowercase \'o\' and follow oCamelCase'
matches.append(RuleMatch(
['Outputs', outputname],
message.format(outputname)
))
return matches
|
import sys
stdin = ["4", "1 2", "3 4", "5 6", "7 8"]
# In Python 3, this question doesn't apply. The plain int type is unbounded.
for line in sys.stdin:
temp = line.split(" ")
if len(temp) < 2:
continue
print(int(temp[0]) + int(temp[1])) |
import requests
from bs4 import BeautifulSoup
import random
response = requests.get('https://cookpad.com/kondate/categories/6')
#print(response)
#print(response.text)
data = BeautifulSoup(response.text, 'html.parser')
recipe_class_data = data.find_all(class_="kondate_title")
recipe_name_list = []
recipe_url_list = []
for i in range(8):
recipe_title = recipe_class_data[i].a.string
recipe_url = recipe_class_data[i].a['href']
#print(recipe_class_data)
#print(recipe_title)
#print(recipe_url)
cookpad_url = 'https://cookpad.com/'
recipe_name_list.append(recipe_title)
recipe_url_list.append(cookpad_url+recipe_url)
today_recipe_number = random.randint(0,7)
print(recipe_name_list[today_recipe_number])
print(recipe_url_list[today_recipe_number]) |
import math # Подключение математического модуля
try: # Защищенный блок 1
b = float(input("Введите B="))
d = float(input("Введите D="))
x = float(input("Введите X="))
try: # Защищенный блок 2
if x >= 8:
y = (x-2)/(x**2)
else:
y= (b**2)*d+4*(x**2)
print("y = %.1f" % y)
except: # Обработчик ошибок для защищенного блока 1
print("Нет решения!")
except: # Обработчик ошибок для защищенного блока 2
print("Неверные входные данные!")
input("Нажмите Enter для выхода") # Задержка перед выходом из программы
|
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
import numpy as np
import csv
from afinn import Afinn
import sys
DEBUG = False
infilename = sys.argv[1]
'''
VADER gives every text a score for its sentiment. These thresholds are determined by a grid-search. They are tuned on the Semeval 2017 task 4 benchmark dataset.
Every value below the negthres will be classified as negative. Every value above posthres will be classified as positive. Every value in between is neutral.
'''
VADERNEGTHRES = -0.45
VADERPOSTHRES = 0.35
'''
Afinn also gives a sentiment score. Thresholds are also tuned on the Semeval 2017 task 4 benchmark dataset.
It turned out that the best settings were:
score = 0 --> neutral
score > 0 --> positive
score < 0 --> negative
'''
AFINNNEGTHRES = 0
AFINNPOSTHRES = 0
def read_csv_file(filename):
'''
:param filename: name of the file to read
:return: np-array with the data
'''
with open(filename) as f:
reader = csv.reader(f, dialect="excel-tab")
data = [row for row in reader]
data = np.array(data)
if DEBUG: print('Data loaded. Shape: {}'.format(data.shape))
return data
def do_VADER_SA(texts, negthres, posthres):
analyzer = SentimentIntensityAnalyzer()
annotated = []
for text in texts:
vs = analyzer.polarity_scores(text)
compound = vs['compound']
if compound < negthres:
annotated.append('n')
elif compound > posthres:
annotated.append('p')
else:
annotated.append('d')
return annotated
def do_afinn_SA(texts, negthres, posthres):
analyzer = Afinn()
annotated = []
for text in texts:
compound = analyzer.score(text)
if compound < negthres:
annotated.append('n')
elif compound > posthres:
annotated.append('p')
else:
annotated.append('d')
return annotated
def make_confmat(listA, listB):
'''
:param listA: list of labels
:param listB: list of predictions
:return: np_array: confusion matrix
'''
all_labels = set(list(listA) + list(listB))
all_labels = ['p', 'd', 'n']
labeldict = {label:i for i, label in enumerate(all_labels)}
print(labeldict)
nr_unique = len(set(list(listA) + list(listB)))
#sortedlabels = sorted(list(labeldict.keys()))
m = np.zeros((nr_unique, nr_unique))
for true, pred in zip(listA, listB):
m[labeldict[true]][labeldict[pred]] += 1
return m
def get_confmatstring(m):
confmatstr = \
"""
predicted
p d n
true p {} {} {}
d {} {} {}
n {} {} {}
""".format(int(m[0, 0]), int(m[0, 1]), int(m[0, 2]),
int(m[1, 0]), int(m[1,1]), int(m[1,2]),
int(m[2,0]), int(m[2,1]), int(m[2,2]))
return confmatstr
def print_confmat(m):
confmatstr = get_confmatstring(m)
print(confmatstr)
def get_pr_rec_string(m):
precision_a = m[0, 0] / (m[0, 0] + m[1, 0] + m[2,0])
recall_a = m[0, 0] / (m[0, 0] + m[0, 1] + m[0,2])
precision_b = m[2, 2] / (m[2, 2] + m[0, 2] + m[1,2])
recall_b = m[2, 2] / (m[2, 2] + m[2, 0] + m[2,1])
pr_rec_string = \
"""
precision p:\t{}
recall p:\t{}
precision n:\t{}
recall n:\t{}
""".format(precision_a, recall_a, precision_b, recall_b)
return pr_rec_string
def evaluate_sentiment_analysis(m):
precision_positive = m[0, 0] / (m[0, 0] + m[1, 0] + m[2,0])
print('precision pos:\t{}'.format(precision_positive))
recall_positive = m[0, 0] / (m[0, 0] + m[0, 1] + m[0,2])
print('recall pos:\t{}'.format(recall_positive))
precision_neg = m[2, 2] / (m[2, 2] + m[0,2]+ m[1,2])
print('precision neg:\t{}'.format(precision_neg))
recall_neg = m[2, 2] / (m[2, 2] + m[2, 0] + m[2,1])
print('recall neg:\t{}'.format(recall_neg))
def print_error_analysis(Ypred, Ytrue, posts, m, logfilename = 'error_analysis.txt'):
log = open(logfilename, 'wt')
log.write('p: positive\n')
log.write('n: negative\n')
log.write('d: neutral\n')
log.write('{}\n'.format(get_confmatstring(m)))
log.write('{}\n'.format(get_pr_rec_string(m)))
log.write('---pred = n and true != n --------------------------------------------\n\n')
i = 0
for true, pred in zip(Ytrue, Ypred):
if pred == 'n' and true == 'p':
#print()
#print(posts[i])
log.write('\ntrue: {} - pred: {}\n{}\n'.format(true, pred, posts[i]))
i+=1
log.write('\n\n---pred = p and true != p --------------------------------------------\n\n')
i = 0
for true, pred in zip(Ytrue, Ypred):
if pred == 'p' and true == 'n':
#print()
#print(posts[i])
log.write('\ntrue: {} - pred: {}\n{}\n'.format(true, pred, posts[i]))
i+=1
log.close()
golden = read_csv_file(infilename)
header_golden = golden[0]
topiclabels = golden[1:,1]
forumposts = golden[1:,5]
vader_SA_labs = do_VADER_SA(forumposts, VADERNEGTHRES, VADERPOSTHRES)
m = make_confmat(topiclabels, vader_SA_labs)
print(get_confmatstring(m))
evaluate_sentiment_analysis(m)
print_error_analysis(vader_SA_labs, topiclabels, forumposts, m, logfilename='error_analysis_VADER.txt')
Afinn_SA_labs = do_afinn_SA(forumposts, AFINNNEGTHRES, AFINNPOSTHRES)
m = make_confmat(topiclabels, Afinn_SA_labs)
print(get_confmatstring(m))
evaluate_sentiment_analysis(m)
print_error_analysis(Afinn_SA_labs, topiclabels, forumposts, m, logfilename='error_analysis_AFINN.txt')
|
#coding:utf8
'''
'''
import os,sys
import urllib,urllib2
from bs4 import BeautifulSoup
from persistent_qutu import insert,dbconn,find_ele
def down_image(src, imgid):
"把图片保存到磁盘空间"
global img_dir
try:
#src = 'http://i1.taoqutu.com/2014/07/06110832568.jpg'
print '-1-',
fname = str(src.split('taoqutu.com')[1]).replace('/','-')[1:]
print '-2-',
img_path = os.path.join(img_dir, fname)
print '-3-',
urllib.urlretrieve(src, img_path)
print '-4-',
except:
print u'下面失败:src = ',src
print '-5-',
return img_path
def save_img(img, img_tag):
db_imgs = dbconn()
#所有的图片都有id
if img.get('id'):
ele = dict(id = img.get('id'),
width = img.get('width'),
height = img.get('height'),
title = img.get('alt'),
src = img.get('src'),
tag = img_tag,
)
if not find_ele(db_imgs,ele):
insert(db_imgs, ele)
#print '成功插入:',ele
#把图片保存到磁盘空间
img_path = down_image(img.get('src'), img.get('id'))
#更新图片文件的路径
#update({"_id": ObjectId("52818bad705d834f989b83af")},{"$set":{u'name': u'wxd1'}})
db_imgs.update(ele, {"$set":{u'img_path': img_path}})
print u'已保存:',img.get('src')
else:
print u'已经存在 :',ele
def crawler_page(soup):
for i in soup.find_all(attrs={"class": "itembox"}):
img_info = i.find('img')
img_tag = i.find(attrs={"class": "xinxi"})
if img_tag:
imgtag = img_tag.string
else:
imgtag = ''
#保存下来
if img_info:
save_img(img_info, imgtag)
if __name__ == '__main__':
print 'start...'
urls = ['http://www.taoqutu.com/',
'http://www.taoqutu.com/last_1000.html', #上次获取到的位置
]
end_url = 'http://www.taoqutu.com/last_1500.html'
img_dir = 'C:/data/imgs/qutu/1000_1500/'
url = urls[-1]
#下载起始的页面文档
html_doc = urllib2.urlopen(url).read()
html_doc = html_doc.decode('gbk').encode('utf8')
soup = BeautifulSoup(html_doc)
nextpage_tag = soup.find(attrs={"class": "easypagerNextpage"})
while nextpage_tag:
crawler_page(soup)
#找下一个页面
url = 'http://www.taoqutu.com' + nextpage_tag.get('href')
if url == end_url:
break
print u'***正在解析 :url = ',url
html_doc = urllib2.urlopen(url).read()
html_doc = html_doc.decode('gbk').encode('utf8')
soup = BeautifulSoup(html_doc)
nextpage_tag = soup.find(attrs={"class": "easypagerNextpage"})
#go(url)
print 'end....'
|
import unittest
from hello_source import hello
class MyTestCase(unittest.TestCase):
def test_something(self):
self.assertEqual("Hello, CIS 189!", hello.hello_message())
if __name__ == '__main__':
unittest.main()
|
import random
import operator
geen_pool = "abcçdefgğhıijklmnoöpqrsştuüvwxyzABCÇDEFGĞHIİJKLMNOÖPQRSŞTUVWXYZ 1234567890 ,.-;:_!#%&/()=?@${[]}'"
goal = "Çağıl İlhan Sözer"
global goal_length
goal_length = len(goal)
population = []
mutation = False
first_population = []
global say
say = 0
class Individual : #CREATING CHROMOSOMES with
def __init__(self,gen=None,fitness=None):
if(gen == None and fitness == None):
pass
else:
self.gen= self.make_gen()
self.fitness = self.calculate_fitness()
def make_gen(self): #this function creates cromosomes
temp_gen = []
for i in range(goal_length):
temp_gen.append(random.choice(geen_pool))
gen = temp_gen
return gen
def calculate_fitness(self,offspring=None):
global say
count = 0
fitness = 0
if(offspring==None):
for a in self.gen:
if(a != goal[count]):
fitness += 1
count += 1
else:
for a in offspring.gen:
if(a != goal[count]):
fitness += 1
count += 1
return fitness
def crossover(self,p1): #single point crossover technic
probability = random.randint(0,2)
crossover_rate = random.randint(0,10)
p2 = self.gen
offspring = None
crossover_point = (len(self.gen)//2)
if(crossover_rate<10):
if(probability == 0):
offspring=p1.gen[0:crossover_point] + p2[crossover_point::]
else:
offspring=p2[0:crossover_point] + p1.gen[crossover_point::]
else:
offspring = self.mutation(p2)
return offspring #offspring with crossover and chance of gettin mutate same time
def mutation(self,gen):
self.gen[random.randint(0,16)] = random.choice(geen_pool)
return gen
def main():
accomplish = False # Now we need to take 2 parent for the selection part of my population.
generation = 1
global population
for i in range (100):
subject = Individual(gen=True,fitness=True)
population.append(subject)
population.sort (key=lambda x: x.fitness)
while accomplish != True:
acomplish = True
print ("Generation ", generation, " : ", "".join ([p for p in population[0].gen]), " Fitness: ",
population[0].fitness)
if(population[0].fitness ==0):
acomplish = True
print("Congrats")
break
if (accomplish == True):
print("Congrats!")
print ("Generation ",generation," : ", "".join([p for p in population[0].gen]))
break
else:
new_generation = [] # We couldn't find the solution yet so we need to make crossover and mutation.
# We'll take 50 gen with %10 best fitness score and the other 50 are the gap of %10-%50 fittness socre
for i in range(50):
offspring = Individual ()
random_best_parent = random.randint(0,9)
random_best_parent1 = random.randint (0, 9)
offspring.gen = population[random_best_parent].crossover(population[random_best_parent1])
offspring.fitness = Individual.calculate_fitness(offspring)
new_generation.append(offspring)
for j in range(40):
offspring = Individual ()
random_average_parent = random.randint(10,49)
random_average_parent1 = random.randint (10, 49)
offspring.gen = population[random_average_parent].crossover(population[random_average_parent1])
offspring.fitness = Individual.calculate_fitness(offspring)
new_generation.append(offspring)
for k in range(10):
offspring = Individual ()
random_worst_parent = random.randint(50,99)
random_worst_parent1 = random.randint (50, 99)
offspring.gen = population[random_worst_parent].crossover(population[random_worst_parent1])
offspring.fitness = Individual.calculate_fitness(offspring)
new_generation.append(offspring)
population = new_generation
generation += 1
population.sort (key=lambda x: x.fitness)
main() |
jogo=int(input(" "))
v=3
e=2
d=1
x=-1
cont=0
acum=0
while(jogo!=x):
cont=v+e+d
acum=(cont+jogo)/100
cont=cont+1
print(acum)
|
#Implement functionality of find using find.py?(find.py /root/dirname “txt”)(use sys.argv)
#!/usr/bin/python
import os
import sys
import fnmatch
def find_file(path,my_file):
result = []
for roots,dirnames,filenames in os.walk(path):
for file in filenames:
if file.endswith(my_file):
result.append(os.path.join(roots,file))
return result
def main():
path = sys.argv[1]
my_file = sys.argv[2]
print(find_file(path, my_file))
if __name__ == "__main__":
main()
|
# coding=utf-8
from Deck import Deck
from Player import PlayerDeck
def war(player1_card, player2_card, player1, player2, loot):
# הוספת הקלפים שהוצאנו כבר אל השלל
loot.add_card(player1_card)
loot.add_card(player2_card)
# הוספת 2 קלפים מכל שחקן אל השלל
for i in range(2):
if not player1.is_empty():
temp_card = player1.pop_card()
loot.add_card(temp_card)
player1_card = temp_card
if not player2.is_empty():
temp_card = player2.pop_card()
loot.add_card(temp_card)
player2_card = temp_card
return player1_card, player2_card, loot
def main():
deck = Deck() # יצירת כל הקלפים הקיימים
deck.shuffle()
player1 = PlayerDeck() # חבילה של השחקן הראשון
player2 = PlayerDeck() # חבילה של השחקן השני
loot = PlayerDeck() # חבילת השלל
# חלוקת הקלפים ל- 2 השחקנים
deck.move_cards(player1, 26)
deck.move_cards(player2, 26)
while not player1.is_empty() and not player2.is_empty():
player1_card = player1.pop_card()
player2_card = player2.pop_card()
if player1_card.__cmp__(player2_card) == 0: # יש מלחמה
player1_card, player2_card, loot = war(player1_card, player2_card, player1, player2, loot)
elif player1_card.__cmp__(player2_card) > 0: # הקלף של השחקן הראשון גדול מהקלף של השחקן השני
player1.add_card(player2_card)
loot.move_cards(player1, len(loot.cards))
player1.shuffle()
else: # הקלף של השחקן השני גדול מהקלף של השחקן הראשון
player2.add_card(player1_card)
loot.move_cards(player2, len(loot.cards))
player1.shuffle()
if player1.is_empty():
print "Player 2 has won!"
else:
print "Player 1 has won!"
if __name__ == '__main__':
main()
|
#Бонус 4: программа переделывает слово или фразу так,
#чтобы после каждой согласной добавлялось 'aig'
def check_if_cons(s):
consonants = 'qwrtpsdfghjklzxcvbnm'
if s in consonants:
return True
else:
return False
def main():
s = input('Введите слово или фразу латиницей. ')
if s != '':
l = s.split()
paigy = ''
for i in l:
for j in i:
paigy += j
if check_if_cons(j):
paigy += 'aig'
paigy += ' '
else:
return 'Не введены слово или фраза.'
return paigy
if __name__ == "__main__":
print(main())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.