text stringlengths 38 1.54M |
|---|
# Copyright 2017 TensorHub, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
import os
_cwd = None
_guild_home = None
def set_cwd(cwd):
globals()["_cwd"] = cwd
def set_guild_home(path):
globals()["_guild_home"] = path
def cwd():
return _cwd or "."
def guild_home():
return _guild_home or os.path.expanduser("~/.guild")
|
import mysql.connector
user = 'root'
password = 'password'
# database = 'yelp'
# conn = mysql.connector.connect(user=user, passwd=password, database=database)
conn = mysql.connector.connect(user=user, passwd=password)
cur = conn.cursor()
sql = 'DROP DATABASE IF EXISTS yelp'
cur.execute(sql)
sql = 'CREATE DATABASE IF NOT EXISTS yelp CHARACTER SET utf8 collate utf8_general_ci;'
cur.execute(sql)
sql = 'USE yelp'
cur.execute(sql)
sql = 'DROP TABLE IF EXISTS business'
cur.execute(sql)
sql = 'DROP TABLE IF EXISTS review'
cur.execute(sql)
sql = 'DROP TABLE IF EXISTS tip'
cur.execute(sql)
sql = '''CREATE TABLE IF NOT EXISTS business (
bid VARCHAR(30),
name TEXT,
city VARCHAR(255),
state VARCHAR(10),
stars FLOAT,
review_count INT,
categories TEXT,
PRIMARY KEY (bid)
) character set utf8 collate utf8_general_ci'''
cur.execute(sql)
sql = '''CREATE TABLE IF NOT EXISTS review (
rid VARCHAR(30),
uid VARCHAR(30),
bid VARCHAR(30),
star INT,
text TEXT,
PRIMARY KEY (rid),
FOREIGN KEY (bid) REFERENCES business(bid) ON DELETE CASCADE
) character set utf8 collate utf8_general_ci'''
cur.execute(sql)
sql = '''CREATE TABLE IF NOT EXISTS tip (
text TEXT,
compliment_count INT,
bid VARCHAR(30),
uid VARCHAR(30),
FOREIGN KEY (bid) REFERENCES business(bid) ON DELETE CASCADE
) character set utf8 collate utf8_general_ci'''
cur.execute(sql)
conn.commit()
conn.close()
|
from GovOpendata.apps.model import db, Base
class Government(Base):
"""政府开放平台表"""
__tablename__ = 'governmment'
province = db.Column(db.String(255), comment="平台所属省")
region = db.Column(db.String(255), comment="平台所属行政区域, 如贵阳市")
dir_path = db.Column(db.String(255), unique=True, comment="该工程的文件目录名")
file_num = db.Column(db.Integer, comment="文件数量")
file_size = db.Column(db.Integer, comment="文件总大小")
dataset_num = db.Column(db.Integer, comment="数据集数量")
acquire_date = db.Column(db.DateTime, comment="采集时间")
|
import tensorflow as tf
from collections import namedtuple
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
import csv
import os
import pandas as pd
BNParams = namedtuple(
"BNParams",
[
"apply",
"phase", # phase=Ture -> training
"center",
"scale"
])
def create_BNParams(apply=False, phase=True, center=True, scale=True):
'''
create the parameters for batch normalization
:param apply: if we have to apply BN
:param phase: True if training, False O.W.
:param center: center the data
:param scale: scale the data
'''
return BNParams(
apply=apply,
phase=phase, # phase=Ture -> training
center=center,
scale=scale)
def leaky_relu(x, leakiness=.1, name=''):
'''ReLU.
alpha: slope of negative section.
'''
return tf.where(tf.less(x, 0.0), leakiness * x, x, name=name+'_leaky_relu')
def is_training(mode):
if mode == tf.contrib.learn.ModeKeys.TRAIN:
return True
else:
return False
def parametric_relu(_x):
alphas = tf.get_variable('alpha', _x.get_shape()[-1],
initializer=tf.constant_initializer(0.0),
dtype=tf.float32)
pos = tf.nn.relu(_x)
neg = alphas * (_x - abs(_x)) * 0.5
return pos + neg
def sum_regularizer(regularizer_list, scope=None):
"""Returns a function that applies the sum of multiple regularizers.
Args:
regularizer_list: A list of regularizers to apply.
scope: An optional scope name
Returns:
A function with signature `sum_reg(weights)` that applies the
sum of all the input regularizers.
"""
regularizer_list = [reg for reg in regularizer_list if reg is not None]
if not regularizer_list:
return None
def sum_reg(weights):
"""Applies the sum of all the input regularizers."""
regularizer_tensors = [reg(weights) for reg in regularizer_list]
return math_ops.add_n(regularizer_tensors)
return sum_reg
def apply_regularization(regularizer, weights_list=None):
"""Returns the summed penalty by applying `regularizer` to the `weights_list`.
Adding a regularization penalty over the layer weights and embedding weights
can help prevent overfitting the training data. Regularization over layer
biases is less common/useful, but assuming proper data preprocessing/mean
subtraction, it usually shouldn't hurt much either.
Args:
regularizer: A function that takes a single `Tensor` argument and returns
a scalar `Tensor` output.
weights_list: List of weights `Tensors` or `Variables` to apply
`regularizer` over. Defaults to the `GraphKeys.WEIGHTS` collection if
`None`.
Returns:
A scalar representing the overall regularization penalty.
Raises:
ValueError: If `regularizer` does not return a scalar output, or if we find
no weights.
"""
if not weights_list:
weights_list = ops.get_collection(ops.GraphKeys.WEIGHTS)
if not weights_list:
raise ValueError('No weights to regularize.')
with ops.name_scope('get_regularization_penalty', values=weights_list) as scope:
penalties = [regularizer(w) for w in weights_list]
for p in penalties:
if p.get_shape().ndims != 0:
raise ValueError('regularizer must return a scalar Tensor instead of a '
'Tensor with rank %d.' % p.get_shape().ndims)
summed_penalty = math_ops.add_n(penalties, name=scope)
ops.add_to_collection(ops.GraphKeys.REGULARIZATION_LOSSES, summed_penalty)
return summed_penalty
def export_to_csv(file_name, data_dic, path='./data'):
"""
export the data dictionary to a csv file
:param file_name:
:param data_dic:
:param path:
:return:
"""
data_frame = pd.DataFrame(data_dic)
full_path = os.path.join(path, file_name) + '.csv'
os.makedirs(os.path.dirname(full_path), exist_ok=True)
with open(full_path, 'w') as f: # Just use 'w' mode in 3.x
w = csv.DictWriter(f, data_dic.keys())
w.writeheader()
for row in data_dic.items():
w.writerow(row)
|
"""
1. 탐욕 알고리즘 이란?
Greedy algorithm 또는 탐욕 알고리즘 이라고 불리움
최적의 해에 가까운 값을 구하기 위해 사용됨
여러 경우 중 하나를 결정해야할 때마다, 매순간 최적이라고 생각되는 경우를 선택하는 방식으로 진행해서, 최종적인 값을 구하는 방식
"""
"""
문제1: 동전 문제
지불해야 하는 값이 4720원 일 때 1원 50원 100원, 500원 동전으로 동전의 수가 가장 적게 지불하시오.
가장 큰 동전부터 최대한 지불해야 하는 값을 채우는 방식으로 구현 가능
탐욕 알고리즘으로 매순간 최적이라고 생각되는 경우를 선택하면 됨
"""
coin_list = [1, 100, 50, 500]
def min_coin_count(value, coin_list):
total_coin_count = 0
datails = list()
coin_list.sort(reverse=True)
for coin in coin_list:
coin_num = value//coin
total_coin_count += coin_num
value -= coin_num * coin
datails.append([coin, coin_num])
return total_coin_count, datails
print(min_coin_count(4720, coin_list))
"""
문제2: 부분 배낭 문제 (Fractional Knapsack Problem)
무게 제한이 k인 배낭에 최대 가치를 가지도록 물건을 넣는 문제
각 물건은 무게(w)와 가치(v)로 표현될 수 있음
물건은 쪼갤 수 있으므로 물건의 일부분이 배낭에 넣어질 수 있음, 그래서 Fractional Knapsack Problem 으로 부름
Fractional Knapsack Problem 의 반대로 물건을 쪼개서 넣을 수 없는 배낭 문제도 존재함 (0/1 Knapsack Problem 으로 부름)
"""
data_list = [(10, 10), (15, 12), (20, 10), (25, 8), (30, 5)]
def get_max_value(data_list, capacity):
data_list = sorted(data_list, key=lambda x: x[1] / x[0], reverse=True)
total_value = 0
details = list()
for data in data_list:
if capacity - data[0] >=0:
capacity -= data[0]
total_value += data[1]
details.append([data, 1])
else:
fraction = capacity/ data[0]
total_value += data[1] * fraction
details.append([data, fraction])
break
return total_value, details
print(get_max_value(data_list, 30))
|
import curses.ascii
import os
import npyscreen
import yaml
from domg.configurator import EditRecord
from domg.dockerfile import finder
__author__ = 'cosmin'
class ImageList(npyscreen.MultiLineAction):
def __init__(self, *args, **keywords):
super(ImageList, self).__init__(*args, **keywords)
self.add_handlers({
curses.ascii.ESC: self.parent.parentApp.switchFormPrevious,
"^Q": quit
})
def display_value(self, vl):
return os.path.dirname(vl)
def actionHighlighted(self, act_on_this, keypress):
self.parent.parentApp.getForm('EDITRECORDFM').value = act_on_this[0]
self.parent.parentApp.switchForm('EDITRECORDFM')
class ContainerList(npyscreen.MultiLineAction):
def __init__(self, *args, **keywords):
super(ContainerList, self).__init__(*args, **keywords)
self.add_handlers({
"^A": self.when_add_record,
"^D": self.when_delete_record,
"^Q": quit
})
def display_value(self, vl):
return os.path.dirname(vl)
def actionHighlighted(self, act_on_this, keypress):
self.parent.parentApp.getForm('EDITRECORDFM').value = act_on_this[0]
self.parent.parentApp.switchForm('EDITRECORDFM')
def when_add_record(self, *args, **keywords):
self.parent.parentApp.getForm('EDITRECORDFM').value = None
self.parent.parentApp.switchForm('EDITRECORDFM')
def when_delete_record(self, *args, **keywords):
self.parent.parentApp.myDatabase.delete_record(self.values[self.cursor_line][0])
self.parent.update_list()
class RecordListDisplay(npyscreen.FormMutt):
MAIN_WIDGET_CLASS = ContainerList
def beforeEditing(self):
self.update_list()
self.wStatus1.value = "Configured containers:"
self.wStatus1.update()
self.wStatus2.value = "Ctrl-A to add, Ctrl-D to delete, Ctrl-Q to quit"
self.wStatus2.update()
def update_list(self):
config = self.parentApp.config
if 'containers' in config:
containers = config['containers']
else:
containers = []
self.wMain.values = containers
self.wMain.values = containers
self.wMain.display()
class DockerManager(npyscreen.NPSAppManaged):
def __init__(self, images_path, config_file):
if not os.path.isdir(images_path):
raise Exception('Images path is not a directory: %s' % images_path)
self.images_path = images_path
self.image_list = finder(images_path)
self.config_file = config_file
try:
self.config = Configuration(config_file)
except IOError:
with open(config_file, "w+"):
pass
self.config = Configuration(config_file)
print self.config.items()
super(DockerManager, self).__init__()
def onStart(self):
self.addForm("MAIN", RecordListDisplay, name="CEva")
self.addForm("EDITRECORDFM", EditRecord)
|
from datetime import datetime
import time
from station_code import StationCode
from color import Color
class Utility(object):
@classmethod
def red_color(cls, code):
return Color.red(code)
@classmethod
def green_color(cls, code):
return Color.green(code)
# 将历时转化为小时和分钟的形式
@classmethod
def get_duration(cls, time_str):
duration = time_str.replace(':', '时') + '分'
if duration.startswith('00'):
return duration[4:]
return duration
# 根据车站名获取电报码
@classmethod
def get_station_code(cls, station):
codes_dict = StationCode().get_codes_dict()
if station in codes_dict.keys():
return codes_dict[station]
# 输入出发地和目的地
@classmethod
def input_station(cls, code):
station = input('{}:\n'.format(code))
if not station in StationCode().get_codes_dict().keys():
print(Color.red('Error:车站列表里无法查询到{}'.format(station)))
station = input('{}:\n'.format(code))
return station
# 输入乘车日期
@classmethod
def input_train_date(cls):
train_date = input('请输入购票时间,格式为2019-05-01:\n')
try:
train_time_structure = time.strptime(train_date, "%Y-%m-%d")
except:
print('时间格式错误,请重新输入')
train_date = input('请输入购票时间,格式为2019-05-01:\n')
time_flag, train_date = Utility.check_date(train_date)
if not time_flag:
train_date = input('请输入购票时间,格式为2019-05-01:\n')
time_flag, train_date = Utility.check_date(train_date)
return train_date
@classmethod
def get_train_date(cls, date_str):
# 返回格式 Wed Aug 22 2018 00: 00:00 GMT + 0800 (China Standard Time)
# 转换成时间数组
time_array = time.strptime(date_str, "%Y%m%d")
# 转换成时间戳
timestamp = time.mktime(time_array)
# 转换成localtime
time_local = time.localtime(timestamp)
# 转换成新的时间格式
gmt_format = '%a %b %d %Y %H:%M:%S GMT+0800 (China Standard Time)'
time_str = time.strftime(gmt_format, time_local)
return time_str
# 获取一个时间是周几
@classmethod
def get_week_day(cls, date):
week_day_dict = {
0: '周一',
1: '周二',
2: '周三',
3: '周四',
4: '周五',
5: '周六',
6: '周天',
}
day = datetime.strptime(date, '%Y-%m-%d').weekday()
return week_day_dict[day]
# 转化日期格式
@classmethod
def get_date_format(cls, date):
month = ''
day = ''
# date格式为2019-05-01
date_list = date.split('-')
if date_list[1].startswith('0'):
month = date_list[1].replace('0', '')
if date_list[2].startswith('0'):
day = date_list[2].replace('0', '')
return '{}月{}日'.format(month, day)
# 检查购票日期是否合理
@classmethod
def check_date(cls, date):
local_time = time.localtime()
local_date = '%04d-%02d-%02d' % (local_time.tm_year, local_time.tm_mon, local_time.tm_mday)
# 获得当前时间时间戳
current_time_stamp = int(time.time())
# 预售时长的时间戳
delta_time_stamp = '2505600'
# 截至日期时间戳
dead_time_stamp = current_time_stamp + int(delta_time_stamp)
# 获取预售票的截止日期时间
dead_time = time.localtime(dead_time_stamp)
dead_date = '%04d-%02d-%02d' % (dead_time.tm_year, dead_time.tm_mon, dead_time.tm_mday)
# print(Colored.red('请注意合理的乘车日期范围是:{} 至 {}'.format(localDate, deadDate)))
# 判断输入的乘车时间是否在合理乘车时间范围内
# 将购票日期转换为时间数组
train_time_structure = time.strptime(date, "%Y-%m-%d")
# 转换为时间戳:
train_time_stamp = int(time.mktime(train_time_structure))
# 将购票时间修改为12306可接受格式 ,如用户输入2018-8-7则格式改为2018-08-07
train_time = time.localtime(train_time_stamp)
train_date = '%04d-%02d-%02d' % (train_time.tm_year, train_time.tm_mon, train_time.tm_mday)
# 比较购票日期时间戳与当前时间戳和预售截止日期时间戳
if current_time_stamp <= train_time_stamp <= dead_time_stamp:
return True, train_date
else:
print(Color.red('Error:您输入的乘车日期:{}, 当前系统日期:{}, 预售截止日期:{}'.format(train_date, local_date, dead_date)))
return False, None
|
from tkinter import ttk
from tkinter import filedialog
import tkglobals as tkg
import tkinter as tk
import tkutils as tku
import sharing
class ExportDbScreen(ttk.Frame):
""" Model the screen where a user can export his database. """
def __init__(self, parent, us):
""" Initialize the frame.
:param parent: the controller of the frame
:param us: the User who owns the database
"""
ttk.Frame.__init__(self, parent)
self.controller = parent
self.us = us
tku.prepare_centering(self)
self.container = ttk.Frame(self)
self.container.grid(row=1, column=1, sticky='nsew', pady=(30, 0))
self.path = tk.StringVar()
self.path.set('')
self.place_main_gui()
self.place_button_gui()
def place_main_gui(self):
""" Place the simple GUI objects in the frame. """
cont = self.container
self.title = ttk.Label(cont, text='Exportă baza de date')
self.title.config(font=tkg.title_font())
self.title.grid(row=0, column=0, sticky='w')
self.path_label = ttk.Label(cont, textvariable=self.path)
self.path_label.config(font=tkg.small_regular_font())
self.path_label.config(wraplength=220, justify=tk.CENTER)
self.path_label.grid(row=2, column=0, pady=(10, 0))
self.label1 = ttk.Label(cont, text='Alege o parolă')
self.label1.config(font=tkg.regular_font())
self.label1.grid(row=3, column=0, sticky='w', pady=(15, 0))
self.pass_entry1 = ttk.Entry(cont, show='*')
self.pass_entry1.config(font=tkg.regular_font())
self.pass_entry1.grid(row=4, column=0, sticky='ew', pady=(5, 0))
self.label2 = ttk.Label(cont, text='Parolă (din nou)')
self.label2.config(font=tkg.regular_font())
self.label2.grid(row=5, column=0, sticky='w', pady=(10, 0))
self.pass_entry2 = ttk.Entry(cont, show='*')
self.pass_entry2.config(font=tkg.regular_font())
self.pass_entry2.grid(row=6, column=0, sticky='ew', pady=(5, 0))
self.error_label = tk.Label(cont, text='')
self.error_label.config(font=tkg.small_regular_font(), fg='red')
self.error_label.grid(row=7, column=0, pady=(10, 0))
def place_button_gui(self):
""" Place the button-related GUI in the frame. """
cont = self.container
style = ttk.Style()
style.configure('EDS.TButton', font=tkg.button_regular_font_tuple())
self.choose_button = ttk.Button(cont, text='Alege numele fișierului')
self.choose_button.config(style='EDS.TButton', command=self.choose_click)
self.choose_button.grid(row=1, column=0, sticky='ew', pady=(30, 0))
# This container is used for centering.
but_cont = ttk.Frame(cont)
but_cont.grid(row=8, column=0)
tku.prepare_centering(but_cont)
# This container actually holds the buttons.
but_cont2 = ttk.Frame(but_cont)
but_cont2.grid(row=1, column=1, pady=(10, 0))
self.back_button = ttk.Button(but_cont2, text='Înapoi')
self.back_button.config(style='EDS.TButton', command=self.back_click)
self.back_button.grid(row=0, column=0, padx=5)
self.export_button = ttk.Button(but_cont2, text='Exportă')
self.export_button.config(style='EDS.TButton', command=self.export_click)
self.export_button.grid(row=0, column=1, padx=5)
def choose_click(self):
""" Open a file dialog to choose a filename. """
choice = filedialog.asksaveasfilename(title='Alege numele fișierului')
if not choice:
return
if not choice.endswith('.db'):
choice += '.db'
self.path.set(choice)
def back_click(self):
""" Go to the previous screen. """
self.controller.show_user_menu_screen(self.us)
def export_click(self):
""" Export the database at the chosen path. """
path = self.path.get()
if not path:
self.error_label.config(text='Alege numele fișierului.')
return
pass1 = self.pass_entry1.get()
pass2 = self.pass_entry2.get()
if pass1 == '' or pass2 == '':
self.error_label.config(text='Introdu parolele.')
return
if pass1 != pass2:
self.error_label.config(text='Parolele nu corespund.')
return
sharing.export_database(self.us, path, pass1)
self.error_label.config(text='Am creat fișierul.')
|
print("Start of program")
count = 0 #making a index name or variable
inputList = [""]*100 #initializing the name of the list
num = input("Enter a number or quit:")
#inputList = [""]*len(word)
while(num != "quit"):
num = int(num)
inputList[count] = num #populat ing the list(or putting the inputs into the list)
count = count + 1 #increasing the count by 1(iteration)
num = input("Enter a number or quit:")
print(inputList) #printing the list
print("End of program")
|
nodes=4
graph=[[0,1],[1,2],[2,0],[1,3]]
g={}
for x in graph:
if x[0] in g.keys():
g[x[0]].append(x[1])
else: g[x[0]]=[x[1]]
if x[1] in g.keys():
g[x[1]].append(x[0])
else: g[x[1]]=[x[0]]
graph=g
time=1
dis=[-1]*nodes
low=[-1]*nodes
ret=[]
vis=set()
def dfs(child,parent=-1):
global time
low[child]=time
dis[child]=time
time+=1
vis.add(child)
for connect in graph[child]:
if connect==parent or connect==child:
continue
if connect in vis:
low[child]=min(low[child],dis[connect])
else:
dfs(connect,child)
low[child]=min(low[child],low[connect])
if low[connect]>dis[child]:
ret.append([connect,child])
for i in range(nodes):
if i not in vis:
dfs(i)
print(ret)
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
try:
from ._models_py3 import DatadogAgreementProperties
from ._models_py3 import DatadogAgreementResource
from ._models_py3 import DatadogAgreementResourceListResponse
from ._models_py3 import DatadogApiKey
from ._models_py3 import DatadogApiKeyListResponse
from ._models_py3 import DatadogHost
from ._models_py3 import DatadogHostListResponse
from ._models_py3 import DatadogHostMetadata
from ._models_py3 import DatadogInstallMethod
from ._models_py3 import DatadogLogsAgent
from ._models_py3 import DatadogMonitorResource
from ._models_py3 import DatadogMonitorResourceListResponse
from ._models_py3 import DatadogMonitorResourceUpdateParameters
from ._models_py3 import DatadogOrganizationProperties
from ._models_py3 import DatadogSetPasswordLink
from ._models_py3 import DatadogSingleSignOnProperties
from ._models_py3 import DatadogSingleSignOnResource
from ._models_py3 import DatadogSingleSignOnResourceListResponse
from ._models_py3 import ErrorAdditionalInfo
from ._models_py3 import ErrorDetail
from ._models_py3 import ErrorResponse
from ._models_py3 import FilteringTag
from ._models_py3 import IdentityProperties
from ._models_py3 import LinkedResource
from ._models_py3 import LinkedResourceListResponse
from ._models_py3 import LogRules
from ._models_py3 import MetricRules
from ._models_py3 import MonitorProperties
from ._models_py3 import MonitorUpdateProperties
from ._models_py3 import MonitoredResource
from ._models_py3 import MonitoredResourceListResponse
from ._models_py3 import MonitoringTagRules
from ._models_py3 import MonitoringTagRulesListResponse
from ._models_py3 import MonitoringTagRulesProperties
from ._models_py3 import OperationDisplay
from ._models_py3 import OperationListResult
from ._models_py3 import OperationResult
from ._models_py3 import ResourceSku
from ._models_py3 import SystemData
from ._models_py3 import UserInfo
except (SyntaxError, ImportError):
from ._models import DatadogAgreementProperties # type: ignore
from ._models import DatadogAgreementResource # type: ignore
from ._models import DatadogAgreementResourceListResponse # type: ignore
from ._models import DatadogApiKey # type: ignore
from ._models import DatadogApiKeyListResponse # type: ignore
from ._models import DatadogHost # type: ignore
from ._models import DatadogHostListResponse # type: ignore
from ._models import DatadogHostMetadata # type: ignore
from ._models import DatadogInstallMethod # type: ignore
from ._models import DatadogLogsAgent # type: ignore
from ._models import DatadogMonitorResource # type: ignore
from ._models import DatadogMonitorResourceListResponse # type: ignore
from ._models import DatadogMonitorResourceUpdateParameters # type: ignore
from ._models import DatadogOrganizationProperties # type: ignore
from ._models import DatadogSetPasswordLink # type: ignore
from ._models import DatadogSingleSignOnProperties # type: ignore
from ._models import DatadogSingleSignOnResource # type: ignore
from ._models import DatadogSingleSignOnResourceListResponse # type: ignore
from ._models import ErrorAdditionalInfo # type: ignore
from ._models import ErrorDetail # type: ignore
from ._models import ErrorResponse # type: ignore
from ._models import FilteringTag # type: ignore
from ._models import IdentityProperties # type: ignore
from ._models import LinkedResource # type: ignore
from ._models import LinkedResourceListResponse # type: ignore
from ._models import LogRules # type: ignore
from ._models import MetricRules # type: ignore
from ._models import MonitorProperties # type: ignore
from ._models import MonitorUpdateProperties # type: ignore
from ._models import MonitoredResource # type: ignore
from ._models import MonitoredResourceListResponse # type: ignore
from ._models import MonitoringTagRules # type: ignore
from ._models import MonitoringTagRulesListResponse # type: ignore
from ._models import MonitoringTagRulesProperties # type: ignore
from ._models import OperationDisplay # type: ignore
from ._models import OperationListResult # type: ignore
from ._models import OperationResult # type: ignore
from ._models import ResourceSku # type: ignore
from ._models import SystemData # type: ignore
from ._models import UserInfo # type: ignore
from ._microsoft_datadog_client_enums import (
CreatedByType,
LiftrResourceCategories,
ManagedIdentityTypes,
MarketplaceSubscriptionStatus,
MonitoringStatus,
ProvisioningState,
SingleSignOnStates,
TagAction,
)
__all__ = [
'DatadogAgreementProperties',
'DatadogAgreementResource',
'DatadogAgreementResourceListResponse',
'DatadogApiKey',
'DatadogApiKeyListResponse',
'DatadogHost',
'DatadogHostListResponse',
'DatadogHostMetadata',
'DatadogInstallMethod',
'DatadogLogsAgent',
'DatadogMonitorResource',
'DatadogMonitorResourceListResponse',
'DatadogMonitorResourceUpdateParameters',
'DatadogOrganizationProperties',
'DatadogSetPasswordLink',
'DatadogSingleSignOnProperties',
'DatadogSingleSignOnResource',
'DatadogSingleSignOnResourceListResponse',
'ErrorAdditionalInfo',
'ErrorDetail',
'ErrorResponse',
'FilteringTag',
'IdentityProperties',
'LinkedResource',
'LinkedResourceListResponse',
'LogRules',
'MetricRules',
'MonitorProperties',
'MonitorUpdateProperties',
'MonitoredResource',
'MonitoredResourceListResponse',
'MonitoringTagRules',
'MonitoringTagRulesListResponse',
'MonitoringTagRulesProperties',
'OperationDisplay',
'OperationListResult',
'OperationResult',
'ResourceSku',
'SystemData',
'UserInfo',
'CreatedByType',
'LiftrResourceCategories',
'ManagedIdentityTypes',
'MarketplaceSubscriptionStatus',
'MonitoringStatus',
'ProvisioningState',
'SingleSignOnStates',
'TagAction',
]
|
from math import tan, sin, asin, pi, atan,cos
def galCoord(alphaHourFormat, deltaDeg):
a1 = pi * (192.0 / 180 + 25.0 / 60) - alphaHourFormat[0] / 24 - alphaHourFormat[1] / (24 * 60) - alphaHourFormat[2] / (24 * 3600)
a2 = (27.0 / 180.0 + 4.0 / 60.0)* pi
l = 303.0/180.0 * pi - atan(sin(a1) / (cos(a1) * sin(a2) - tan(deltaDeg * pi / 180.0) * cos(a2)))
return l
print(galCoord([13,42,11],28)) #M33
#print(galCoord([13,42,11],28)) #NGC7006
|
import numpy as np
import itertools
import torch
import torch.nn as nn
from torch.nn.modules.module import Module
import segmentation_models_pytorch as smp
class CNNModel(nn.Module):
def __init__(self):
super(CNNModel, self).__init__()
self.conv_layer1 = self._conv_layer_set(3, 4, 3)
self.conv_layer2 = self._conv_layer_set(4, 8, 3)
self.conv_layer3 = self._conv_layer_set(8, 8, 3)
self.fc1 = nn.Linear(78400, 2048)
self.fc = nn.Linear(2048*5, 2048)
self.fc2 = nn.Linear(2048, 128)
self.fc3 = nn.Linear(128, 16)
self.fc4 = nn.Linear(19, 1)
self.relu = nn.LeakyReLU()
self.sigmoid = nn.Sigmoid()
self.batch = nn.BatchNorm1d(128)
self.drop = nn.Dropout(p=0.2)
def _conv_layer_set(self, in_c, out_c, maxpool_size):
conv_layer = nn.Sequential(
nn.Conv3d(
in_c, out_c, kernel_size=(1, maxpool_size, maxpool_size), padding=0
),
nn.LeakyReLU(),
nn.MaxPool3d((1, 3, 3)),
)
return conv_layer
def forward(self, bag, annotation):
# Set 1
out = self.conv_layer1(bag)
out = self.conv_layer2(out)
out = self.conv_layer3(out)
#print(out.shape)
out = out.view(out.size(0), -1)
#print(out.shape)
out = self.fc1(out)
out = self.fc2(out)
out = self.relu(out)
out = self.drop(out)
out = self.fc3(out)
out = self.relu(out)
gender = annotation["GENDER"].unsqueeze(1).float()
age = annotation["AGE"].unsqueeze(1).float()
lymph_count = annotation["LYMPH_COUNT"].unsqueeze(1).float()
output = torch.cat((out, gender, age, lymph_count), 1)
# out = self.batch(out)
# out = self.drop(out)
out = self.fc4(output)
return self.sigmoid(out)
class TransferUNet(nn.Module):
def __init__(self):
super(TransferUNet, self).__init__()
self.UNet = smp.Unet(
encoder_name="resnet18", # choose encoder, e.g. mobilenet_v2 or efficientnet-b7
encoder_weights="imagenet", # use `imagenet` pre-trained weights for encoder initialization
in_channels=3, # model input channels (1 for gray-scale images, 3 for RGB, etc.)
classes=3, # model output channels (number of classes in your dataset)
)
self.encoder = self.UNet.encoder
self.fc = nn.Sequential(
nn.MaxPool3d(5),
nn.Flatten(),
nn.Linear(20_400, 2048),
nn.ReLU(),
nn.Linear(2048, 128),
nn.ReLU(),
nn.Linear(128, 16),
nn.ReLU(),
)
self.combine_image_and_annotation = nn.Sequential(
nn.Linear(19, 16),
nn.ReLU(),
nn.Linear(16, 1),
nn.Sigmoid(),
)
def forward(self, bag, annotation):
if bag.dim() == 5:
bag_tuple = torch.unbind(bag)
encoded_bag_tuple = list(
self.encoder(bag_unique)[-1] for bag_unique in bag_tuple
)
encoded_bag = torch.stack(encoded_bag_tuple)
else:
raise Exception("Works only with batch of data")
output_image = self.fc(encoded_bag)
gender = annotation["GENDER"].unsqueeze(1).float()
age = annotation["AGE"].unsqueeze(1).float()
lymph_count = annotation["LYMPH_COUNT"].unsqueeze(1).float()
output = torch.cat((output_image, gender, age, lymph_count), 1)
output = self.combine_image_and_annotation(output)
return output
# Inspired by https://arxiv.org/pdf/1802.04712.pdf
class Attention(nn.Module):
def __init__(self):
super(Attention, self).__init__()
self.conv_layer1 = self._conv_layer_set(3, 4, 3)
self.conv_layer2 = self._conv_layer_set(4, 8, 3)
self.conv_layer3 = self._conv_layer_set(8, 8, 3)
self.attention = nn.Sequential(
nn.Linear(49, 8),
nn.Tanh(),
nn.Linear(8, 1)
)
self.fc = nn.Sequential(
nn.Linear(1600, 512),
nn.Dropout(0.3),
nn.LeakyReLU(),
nn.Linear(512, 64),
nn.Dropout(0.3),
nn.LeakyReLU(),
nn.Linear(64, 16),
nn.Dropout(0.3),
nn.LeakyReLU()
)
self.combine_image_and_annotation = nn.Sequential(
nn.Linear(19, 16),
nn.ReLU(),
nn.Linear(16, 1),
nn.Sigmoid(),
)
def _conv_layer_set(self, in_c, out_c, maxpool_size):
conv_layer = nn.Sequential(
nn.Conv3d(
in_c, out_c, kernel_size=(1, maxpool_size, maxpool_size), padding=0
),
nn.LeakyReLU(),
nn.MaxPool3d((1, 3, 3)),
)
return conv_layer
def forward(self, bag, annotation):
# Set 1
out = self.conv_layer1(bag)
out = self.conv_layer2(out)
out = self.conv_layer3(out)
#print(out.shape)
out = out.view(out.size(0), out.size(1), out.size(2), 7*7)
#print(out.shape)
out = self.attention(out)
#print(out.shape)
out = out.view(out.size(0), -1)
out = self.fc(out)
#print(out.shape)
gender = annotation["GENDER"].unsqueeze(1).float()
age = annotation["AGE"].unsqueeze(1).float()
lymph_count = annotation["LYMPH_COUNT"].unsqueeze(1).float()
output = torch.cat((out, gender, age, lymph_count), 1)
out = self.combine_image_and_annotation(output)
return out
"""
def tryout(nn.Module):
def __init__(self):
super(tryout, self).__init__()
self.conv_layer1 = nn.Conv3d(in_c, out_c, kernel_size=(1, 3, 3), padding=0),
"""
# [32, 3, 1, 3, 3], but got 4-dimensional input of size [200, 224, 224, 3]
|
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 9 11:51:43 2019
@author: Kwoks
"""
import re
import sys
import time
import numpy as np
import pandas as pd
from tqdm import tqdm
from aip import AipNlp
import sqlalchemy as sq
from configparser import ConfigParser
sys.path.append('../other_function/')
import other_function
#mainly deal with the data table containg the certain metal and exclude sentences without certain metal
def preprocess(met, met_cn, df):
'''
:param met:str, the certain metal, like 'Cu'
:param df:dataframe, the original dataframe extracted from step2
:return df:dataframe, the dataframe after processing
'''
tmp_data = df['{}_action'.format(met)]
new_col = []
for i in tmp_data:
only_met = [ sen for sen in re.split('\.|。', i) if met_cn in sen and ' {} '.format(met_cn) not in sen]
new_col.append(only_met)
df['{}_new_action'.format(met)] = new_col
return df
#mainly extract the data we need
def get_metal_df(met, recommend):
'''
:param met: str, e.g. Cu, Pb
:param recommend: df, the total recommend df in the database, created by step 2
:return df:, df, datafram with the columns we need
'''
tmp = recommend[['url', 'company', 'news_type', 'published_date',
'date', 'title', met + '_fact', met + '_action']]
df = tmp.copy()
df.dropna(inplace = True)
df[met + '_action'] = df[met + '_action'].apply(lambda x : '。'.join(x.split('\n')[1:]))
df.sort_values(by = ['published_date'], inplace = True)
df.reset_index(inplace = True, drop = True)
return df
#build the new table, (need to be update)
def build_sentiment_article(con, met):
'''
:param con:object, sth used to link the database
:param met:str, the metal names, like 'Cu', 'Zn',
:param database:str, the database we use, it is not advised to use different for different steps
'''
con.execute("CREATE TABLE `{}`(`url` varchar(700) NOT NULL,`id` int(11) NOT NULL AUTO_INCREMENT,`company` varchar(20) DEFAULT NULL,`news_type` varchar(20) CHARACTER SET utf8 COLLATE utf8_general_ci DEFAULT NULL,`published_date` datetime DEFAULT NULL,`date` datetime DEFAULT NULL,`title` varchar(100) DEFAULT NULL,`{}_fact` mediumtext COMMENT '\n',`{}_action` mediumtext CHARACTER SET utf8 COLLATE utf8_general_ci,`{}_new_action` mediumtext CHARACTER SET utf8 COLLATE utf8_general_ci, `Sentiment` mediumtext CHARACTER SET utf8 COLLATE utf8_general_ci, `Sentiment_article` FLOAT NULL,PRIMARY KEY (`url`),KEY(`id`));".format('{}_sentiment'.format(met), met, met, met))
#change the type of some columns, please use this function carefully
def change_type(df):
'''
:param df:df, the dataframe we need to change the column type
:return df:df, the dataframe we have changed the column type
'''
stable_columns = ['published_date', 'date', 'Sentiment_article']
for col in df.columns:
if col not in stable_columns:
df[col] = df[col].apply(lambda x: str(x))
return df
#using the aip to get the sentiment score of the text
def calculate_sentiment(inputs, client, clean_words = None):
'''
:param inputs:list and elements are str, after preprocessing, the inputs should contain several sentences
:param client:object, the aip client
:param clean_words:list, if you want to clean some words we do not need.
:return result:list, the result of each sentence of the inputs
:return error:list, the error appear when we use the aip
'''
result =[]
error = []
existing_clean_words = [u'\xc1',u'\u2022',u'\ufeff']
if clean_words:
existing_clean_words += clean_words
try:
for sentence in inputs:
for char in existing_clean_words:
sentence = sentence.replace(char,"")
output = client.sentimentClassify(sentence)
result.append(output['items'][0])
except Exception as e:
error.append(str(e))
return result, error
#combine the result of an analyst report, as we each report we will divide it
#into several sentences, then we need to combine the result of each sentence.
def sen_art(inputs):
'''
:param inputs:list, the results we get from the aip
:return:float, the combined result
'''
result = 0
for i in inputs:
result+=(i['positive_prob']-i['negative_prob'])*i['confidence']
if len(inputs)==0:
return np.nan
else:
return result/len(inputs)
def main_function(metal_lst, metal_dict, recommend, conn, client):
'''
:param metal_lst: list and elements are str, the metal list, like ['Cu', 'Pb'....]
:param metal_dict: dict, the dictinary contains the content relevant to the certain metal, like {'Cu':'铜'}...
:param recommend: df, the total dataframe we have extracted from the html, finished in step2
:param conn: object, the object link the database
:param client: object, the client of the aip
:return raise_error: dict, this dictionary will contain all the error of the certain url, which we can check after running the code.
'''
raise_error = {}
for met in metal_lst:
#this part is to check whether we can find the table, if not then we will create one
result = conn.execute("SHOW TABLES LIKE '{}';".format(met+'_sentiment'))
if not result.first():
print('can not find {}_sentiment, will create it'.format(met))
build_sentiment_article(conn, met)
#these two will load the sentiment we have in the database, and all the recommendation of certain metal we have
local_met_sentiment = pd.read_sql('select * from {}_sentiment'.format(met), con=conn)
current_met_sentiment = get_metal_df(met, recommend)
#this part will exclude the url which we have used the aip to get the sentiment score.
wait_met_sentiment = current_met_sentiment[~current_met_sentiment['url'].isin(list(local_met_sentiment['url']))].copy()
wait_met_sentiment = wait_met_sentiment.reset_index(drop=True)
#this part we will do the post-process to clean some table of single word, then we will
#exclude the empty columns so as to save the time, (exclude such as [] after we do the post-process)
post_pre_sentiment = preprocess(met, metal_dict[met], wait_met_sentiment)
post_pre_sentiment = post_pre_sentiment[post_pre_sentiment['{}_new_action'.format(met)].str.len()!=0].reset_index(drop=True)
#in this part we will insert the result into the mysql
###need to be updated, insert partially into the mysql if the code crashes.
metal_sentiment = []
for i in tqdm(range(len(post_pre_sentiment)), desc = 'using aip for {}'.format(met)):
res, err = calculate_sentiment(post_pre_sentiment[met + '_new_action'][i], client)
time.sleep(1)
metal_sentiment.append(res)
raise_error[post_pre_sentiment['url'][i]] = err
tmp_to_sql_df = pd.DataFrame(post_pre_sentiment.loc[i, list(post_pre_sentiment.columns)]).T.reset_index(drop=True)
tmp_to_sql_df['published_date'] = tmp_to_sql_df['published_date'].apply(lambda x: pd.to_datetime(x).floor('D'))
tmp_to_sql_df['date'] = tmp_to_sql_df['date'].apply(lambda x: pd.to_datetime(x).floor('D'))
tmp_to_sql_df['Sentiment'] = [res]
tmp_to_sql_df['Sentiment_article'] = tmp_to_sql_df['Sentiment'].apply(lambda x: sen_art(x))
tmp_filter_df = tmp_to_sql_df[tmp_to_sql_df['Sentiment_article']!=np.nan].copy().reset_index(drop=True)
if len(tmp_filter_df)>0:
tmp_filter_df = change_type(tmp_filter_df)
tmp_filter_df.to_sql('{}_sentiment'.format(met), con=conn, if_exists='append', index=False, chunksize=1000)
#post_pre_sentiment['Sentiment'] = metal_sentiment
#post_pre_sentiment['Sentiment_article'] = post_pre_sentiment['Sentiment'].apply(lambda x: sen_art(x))
#filter_pre_sentiment = post_pre_sentiment[post_pre_sentiment['Sentiment_article']!=np.nan].copy().reset_index(drop=True)
#filter_pre_sentiment = change_type(filter_pre_sentiment)
#filter_pre_sentiment.to_sql('{}_sentiment'.format(met), con=conn, if_exists='append', index=False, chunksize=1000)
print('completed')
return raise_error
#run is to do the daily task
def run_function(metal_lst, metal_dict, recommend, conn, client, error_path):
'''
:param metal_lst: list and elements are str, the metal list, like ['Cu', 'Pb'....]
:param metal_dict: dict, the dictinary contains the content relevant to the certain metal, like {'Cu':'铜'}...
:param recommend: df, the total dataframe we have extracted from the html, finished in step2
:param conn: object, the object link the database
:param client: object, the client of the aip
:param error_path:str, the path we need to record the error
'''
already_error = other_function.load_json(error_path)
raise_error = main_function(metal_lst, metal_dict, recommend, conn, client)
if already_error != {}:
for k, v in already_error.items():
if k in raise_error.keys():
pass
else:
raise_error[k] = already_error[k]
other_function.dump_json(raise_error, error_path)
if raise_error != {}:
print('find some error, please check it')
print('completed')
#check is to retry the error url
def check_function(metal_lst, metal_dict, recommend, conn, client, error_path):
'''
:param metal_lst: list and elements are str, the metal list, like ['Cu', 'Pb'....]
:param metal_dict: dict, the dictinary contains the content relevant to the certain metal, like {'Cu':'铜'}...
:param recommend: df, the total dataframe we have extracted from the html, finished in step2
:param conn: object, the object link the database
:param client: object, the client of the aip
:param error_path:str, the path we need to record the error
'''
already_error = other_function.load_json(error_path)
if already_error == {}:
print('no error, no need to check')
return
else:
recommend = recommend[recommend['url'].isin(already_error)]
raise_error = main_function(metal_lst, metal_dict, recommend, conn, client)
other_function.dump_json(raise_error, error_path)
print('completed')
if __name__ == '__main__':
switch = sys.argv[1]
config_path = './step3_data/config.ini'
conf = ConfigParser()
conf.read(config_path)
#load the database param and construct the link
use_account = conf.get('database_param', 'account')
use_psw = conf.get('database_param', 'password')
use_host = conf.get('database_param', 'host')
use_port = conf.get('database_param', 'port')
use_database = conf.get('database_param', 'database')
engine = sq.create_engine("mysql+pymysql://{}:{}@{}:{}/{}?charset=utf8".format(use_account, use_psw, use_host, use_port, use_database))
conn = engine.connect()
#load the aip param and construct the aip client
app_id = conf.get('aip_param', 'app_id')
api_key = conf.get('aip_param', 'api_key')
secret_key = conf.get('aip_param', 'secret_key')
client = AipNlp(app_id, api_key, secret_key)
#not flexible, need to be updated
error_path = './step3_data/error_recommend.json'
recommend = pd.read_sql('select * from recommend', con=conn)
recommend = recommend.loc[recommend['published_date'].apply(lambda x: not isinstance(x,str))]
#here we deal with the wrong date
dat_type = [str(type(i)) for i in recommend['date']]
recommend['dat_type'] = dat_type
recommend = recommend[recommend['dat_type'].str.contains('time')]
recommend = recommend.reset_index(drop=True)
del recommend['dat_type']
#here we delete the id is because the id is the auto increment column of the table
del recommend['id']
#not flexible, need to be updated
metal_lst = ['Pb', 'Zn', 'Al', 'Ni', 'Xi', 'Cu']
metal_dict = {'Al':'铝', 'Zn':'锌', 'Cu':'铜', 'Ni':'镍', 'Xi':'锡', 'Pb':'铅'}
#two mode of running, run is to do the daily task, check is to retry the error url
#p.s. here i divide it into two function is because maybe later the logic will become complicated,
#which means each time we need to spend more time to know what we write before, hence I split it into
#two function, each is independent, which will save time.
if switch == 'run':
run_function(metal_lst, metal_dict, recommend, conn, client, error_path)
elif switch == 'check':
check_function(metal_lst, metal_dict, recommend, conn, client, error_path)
|
""""
Scrap the web, connect to the DB and update DB
IMDB Dataset Url = https://datasets.imdbws.com/
The title.basics.tsv.gz dataset contains the IMDB id, titleType (eg movie, series etc.), genres
"""
import os, requests
import gzip
import shutil
import math
from bs4 import BeautifulSoup
import sqlite3
import csv
from datetime import datetime
def getDataset_title_basics():
#Create Data folder if it doesn't exist
cwd = os.getcwd()
if not os.path.isdir("./Data"):
os.mkdir("Data")
datasetPath = cwd + "/Data/title.basics.tsv.gz"
titleBasicsUrl = "https://datasets.imdbws.com/title.basics.tsv.gz"
extractedDatasetPath = cwd + "/Data/title.basics.tsv"
print("Downloading title.basics.tsv from IMDB", titleBasicsUrl)
r = requests.get(titleBasicsUrl)
try:
with open(datasetPath, 'wb') as f:
f.write(r.content)
f.close()
print("Successfully downloaded title.basics.tsv from", titleBasicsUrl)
except:
print("Failed to download title.basics.tsv from", titleBasicsUrl)
print("Extracting title.basics.tsv")
try:
with gzip.open(datasetPath, 'rb') as f_in:
with open(extractedDatasetPath, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
print("Successfully extracted title.basics.tsv")
except:
print("Failed to extract title.basics.tsv")
def getCastFromImdb(imdbID):
"""Web Scrapping is used for this function since this data isn't available
in the dataset
Returns a list of actors (with spaces, eg " Weiwei Si") given a imdb ID"""
try:
castUrl = "https://www.imdb.com/title/{id}/fullcredits/".format(id = imdbID)
resp = requests.get(castUrl)
soup = BeautifulSoup(resp.text, 'lxml')
table_tags = soup.find('table', {"class": "cast_list"})
actorTags = table_tags.find_all('a', href=True)
actors = []
for i in actorTags:
if 'name' in str(i) and str(i.string).strip() != "None":
actor = i.string
if actor.endswith('\n'):
actor = actor[:-1]
actors.append(actor)
except:
return []
return actors
def getCrewFromImdb():
#Create Data folder if it doesn't exist
cwd = os.getcwd()
if not os.path.isdir("./Data"):
os.mkdir("Data")
datasetPath = cwd + "/Data/title.crew.tsv.gz"
titleBasicsUrl = "https://datasets.imdbws.com/title.crew.tsv.gz"
extractedDatasetPath = cwd + "/Data/title.crew.tsv"
print("Downloading title.crew.tsv from IMDB", titleBasicsUrl)
r = requests.get(titleBasicsUrl)
try:
with open(datasetPath, 'wb') as f:
f.write(r.content)
f.close()
print("Successfully downloaded title.crew.tsv from", titleBasicsUrl)
except:
print("Failed to download title.crew.tsv from", titleBasicsUrl)
print("Extracting title.crew.tsv")
try:
with gzip.open(datasetPath, 'rb') as f_in:
with open(extractedDatasetPath, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
print("Successfully extracted title.crew.tsv")
except:
print("Failed to extract title.crew.tsv")
def getKeywordsFromImdb(imdbID):
""" Web Scrapping is used for this function since this data isn't available
in the dataset
Returns a list of keywords given a imdb ID
Returns empty list if keywords can't be found
"""
try:
castUrl = "https://www.imdb.com/title/{id}/keywords/".format(id = imdbID)
resp = requests.get(castUrl)
soup = BeautifulSoup(resp.text, 'lxml')
table_tags = soup.find_all('td', {"class": "soda sodavote"})
keywords = []
for i in table_tags:
keywords.append(i["data-item-keyword"])
print(len(keywords))
except:
return []
return keywords
def getNameFromImdb(imdbID):
"""Web Scrapping is used for this function since this data isn't available
in the dataset
Returns a name given a name imdbID (eg nm0410331)
If there is an exception (eg wrong imdbID) it return nan (Not a Number)
"""
try:
castUrl = "https://www.imdb.com/name/{id}/".format(id = imdbID)
resp = requests.get(castUrl)
soup = BeautifulSoup(resp.text, 'lxml')
nameTag = soup.find('span', {"class": "itemprop"})
name = str(nameTag.string)
print(name)
except:
print("Failed to find a name for this id:", imdbID)
return math.nan
return name
def createDB():
if not os.path.isdir("./Data"):
os.mkdir("Data")
conn = sqlite3.connect('./Data/Data.db')
c = conn.cursor()
c.execute("""
CREATE TABLE IF NOT EXISTS info (
id integer PRIMARY KEY,
imdbID integer ,
primaryTitle text ,
originalTitle text,
genres text,
actors text,
director text,
writers text,
keywords text,
year integer,
soup text,
UNIQUE(imdbID)
);""")
return
def extractBasicTsvData(tsvFile):
tsv_file = open(tsvFile)
read_tsv = csv.reader(tsv_file, delimiter="\t")
"""A row now looks like this:
['tt0000009', 'movie', 'Miss Jerry', 'Miss Jerry', '0', '1894', '\\N', '45', 'Romance']
where:
[ImdbID, type, primaryTitle, originalTitle, isAdult, startYear, endYear, runtime, genres]
More info can be found there: https://www.imdb.com/interfaces/
"""
counter = 0
movies = []
conn = sqlite3.connect('./Data/Data.db')
c = conn.cursor()
for row in read_tsv:
if row[1] == 'movie':
movies.append(row)
# c.execute("INSERT OR IGNORE INTO info(imdbID, primaryTitle, originalTitle, genres) VALUES(:id, :pTitle, :oTitle, :gens)",{
# 'id': row[0],
# 'pTitle': row[2],
# 'oTitle': row[3],
# 'gens': row[8]
# }
# )
print("Processing row", counter)
counter += 1
try:
yearValue = int(row[5])
except:
yearValue = 0
c.executemany("INSERT OR IGNORE INTO info(imdbID, primaryTitle, originalTitle, genres, year) VALUES(?,?,?,?,?)", [(row[0], row[2] ,row[3], row[8], yearValue)])
conn.commit()
conn.close()
tsv_file.close()
print("# of movies:", len(movies))
return
def extractActorsToDB():
conn = sqlite3.connect('./Data/Data.db')
c = conn.cursor()
c.execute('SELECT * FROM info')
records = c.fetchall()
for row in records:
imdbID = row[1]
print(imdbID)
actors = getCastFromImdb(imdbID)
strActors = str(','.join(actors))
print(strActors)
c.executemany("""UPDATE info
SET actors = ?
WHERE imdbID = ?""", [(strActors, imdbID)])
conn.commit()
conn.close()
return
def main():
# StartTime = datetime.now()
# # getDataset_title_basics()
# #getCastFromImdb("tt1074638")
# # getCrewFromImdb()
# # getKeywordsFromImdb("tt1937149")
# # getNameFromImdb("nm0617588")
# # createDB()
# # extractBasicTsvData("./Data/title.basics.tsv")
# extractActorsToDB()
# # print(getKeywordsFromImdb("tt0085714"))
# print(str(datetime.now()-StartTime))
print(getCastFromImdb("tt1074638"))
return
if __name__ == "__main__":
main() |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import math
#读取数据
f = open("1.txt", "r")
rows = f.readlines()
#初始数据处理
list = []
for i in range(len(rows)):
column_list = rows[i].strip().split(",") #每一行以“,”为分隔符
column_list.pop() #去掉最后一列的分类属性
list.append(column_list) #加入list_source
a=np.array(list) #转化为np数组
a=a.astype(float) #转换为浮点类型
MeanVector=np.mean(a,axis=0) #均值向量
print("向量均值为:")
print(MeanVector)
center=a-MeanVector #中心化
innerProduct=np.dot(center.T,center)
print("内积为:")
print(innerProduct/len(center)) #打印计算出的内积
Kroneckerproduct=0
for i in range(len(center)):
Kroneckerproduct = Kroneckerproduct+center[i].reshape(len(center[0]),1)*center[i]
print("外积为:")
print(Kroneckerproduct/len(center)) #打印计算出的外积
#求方差
list=[]
for i in range(len(a[0])):
list.append(np.var(a.T[i]))
print("各列的方差:")
print(list)
maxIndex=list.index(max(list))
minIndex=list.index(min(list))
print("方差最大的属性所在列数为:",end=" ")
print(maxIndex+1)
print("方差最小的属性所在列数为:",end=" ")
print(minIndex+1)
#求矩阵协方差
Cov={}
for i in range(9):
for j in range(i+1,10):
st=str(i+1)+'-'+str(j+1)
Cov[st]= np.cov(a.T[i],a.T[j])[0][1] #遍历求协方差
print("协方差矩阵为:")
print(Cov) #打印协方差矩阵
print("哪对属性的协方差最大:",end=" ")
print(max(Cov, key=Cov.get)) #取最大值打印
print("哪对属性的协方差最小:",end=" ")
print(min(Cov, key=Cov.get)) #取最小值打印
t=center.T #通过中心化后的向量计算属性1和2的夹角
cor=np.corrcoef(t[0],t[1]) #计算第一列属性和第二列属性相关性
print("相关系数为:")
print(cor[0][1]) #打印相关系数
picture = plt.figure()
ax1 = picture.add_subplot(111) #设置标题
ax1.set_title("Correlation scatter plots")
plt.scatter(t[0],t[1],color='c',marker='o')
plt.xlabel('Attributes 1') #设置X轴标签
plt.ylabel('Attributes 2') #设置Y轴标签
plt.show()
#def normfun(x, E, S):
# pdf = np.exp(-((x - E) ** 2) / (2 * S ** 2)) / (S * np.sqrt(2 * np.pi))
# return pdf
#计算E和s
E=np.mean(a,axis=0)[0] #计算第一列均值
S=np.var(a.T[0]) #计算第一列标准差
fig = plt.figure()
ax1 = picture.add_subplot(111)
ax1.set_title("Probability density function")
# 绘制正态分布概率密度函数
x = np.linspace(E - 3 * S, E + 3 * S, 50)
y_sig = np.exp(-(x - E) ** 2 / (2 * S ** 2)) / (math.sqrt(2 * math.pi) * S)
plt.plot(x, y_sig, "k", linewidth=2)
plt.vlines(E, 0, np.exp(-(E - E) ** 2 / (2 * S ** 2)) / (math.sqrt(2 * math.pi) * S), colors="red",
linestyles="dashed")
plt.vlines(E + S, 0, np.exp(-(E + S - E) ** 2 / (2 * S ** 2)) / (math.sqrt(2 * math.pi) * S),
colors="m", linestyles="dotted")
plt.vlines(E - S, 0, np.exp(-(E - S - E) ** 2 / (2 * S ** 2)) / (math.sqrt(2 * math.pi) * S),
colors="m", linestyles="dotted")
plt.xticks([E - S, E, E + S], ['μ-σ', 'μ', 'μ+σ'])
plt.xlabel('Attributes 1')
plt.ylabel('Attributes 2')
plt.title('Normal Distribution: $\mu = %.2f, $σ=%.2f' % (E, S))
plt.grid(True)
plt.show()
|
import pytest
import render_little
import models
from sqlalchemy import create_engine
from datetime import datetime
import mock
import unittest
import os
import factory
import factory.alchemy
@pytest.fixture()
def setup_dirs(monkeypatch):
from zipfile import ZipFile
from shutil import rmtree
monkeypatch.setattr(render_little,
'PATH_DOWNLOAD',
str(TestProcess.test_tmp_download)
)
if os.path.exists(TestProcess.test_tmp_download):
rmtree(TestProcess.test_tmp_download)
if not os.path.exists(TestProcess.test_input_path):
os.makedirs(TestProcess.test_input_path)
try:
with ZipFile('test_tiffs_Archive.zip', 'r') as zip_file:
zip_file.extractall(TestProcess.test_input_path)
except IOError:
print("Archive does not exist - downloading files")
bands, input_path, scene_id = render_little.download_and_set(
TestProcess.fake_job_message)
@pytest.fixture(scope='session', autouse=True)
def connection(request):
engine = create_engine('postgresql://postgres@/test_bar')
models.Base.metadata.create_all(engine)
connection = engine.connect()
models.DBSession.registry.clear()
models.DBSession.configure(bind=connection)
models.Base.metadata.bind = engine
request.addfinalizer(models.Base.metadata.drop_all)
return connection
@pytest.fixture(autouse=True)
def db_session(request, connection):
from transaction import abort
trans = connection.begin()
request.addfinalizer(trans.rollback)
request.addfinalizer(abort)
from models import DBSession
return DBSession
@pytest.mark.usefixtures("connection", "db_session")
class JobFactory(factory.alchemy.SQLAlchemyModelFactory):
class Meta():
model = models.UserJob_Model
sqlalchemy_session = db_session
jobstatus = 0
starttime = datetime.utcnow()
lastmodified = datetime.utcnow()
band1 = u'4'
band2 = u'3'
band3 = u'2'
entityid = u'LC80470272015005LGN00'
email = u'test@test.com'
jobid = factory.Sequence(lambda n: n)
@pytest.mark.usefixtures("connection", "db_session")
class LogFactory(factory.alchemy.SQLAlchemyModelFactory):
class Meta():
model = models.WorkerLog
sqlalchemy_session = db_session
id = factory.Sequence(lambda n: n)
instanceid = u'i-6b62f69d'
date_time = datetime.utcnow()
statement = u'Test'
value = u'True'
@pytest.fixture(scope='class')
def fake_job1(db_session):
model_instance = models.UserJob_Model(
jobstatus=0,
starttime=datetime.utcnow(),
lastmodified=datetime.utcnow()
)
db_session.add(model_instance)
db_session.flush()
# @pytest.fixture()
# def write_activity_fix(monkeypatch, tmpdir):
# if tmpdir.join('log').exists():
# tmp_activity_log = tmpdir.join('log/tmp_act_log.txt')
# else:
# tmp_activity_log = tmpdir.mkdir('log').join('tmp_act_log.txt')
# monkeypatch.setattr(render_little,
# 'PATH_ACTIVITY_LOG',
# str(tmp_activity_log)
# )
# return tmp_activity_log
# @pytest.fixture()
# def write_error_fix(monkeypatch, tmpdir):
# if tmpdir.join('log').exists():
# tmp_error_log = tmpdir.join('log/tmp_error_log.txt')
# else:
# tmp_error_log = tmpdir.mkdir('log').join('tmp_error_log.txt')
# monkeypatch.setattr(render_little,
# 'PATH_ERROR_LOG',
# str(tmp_error_log)
# )
# return tmp_error_log
# --test db functionality tests
def test_db_lookup(db_session):
model_instance = models.UserJob_Model(jobstatus=0,
starttime=datetime.utcnow(),
lastmodified=datetime.utcnow())
db_session.add(model_instance)
db_session.flush()
assert 1 == db_session.query(models.UserJob_Model).count()
def test_db_is_rolled_back(db_session):
assert 0 == db_session.query(models.UserJob_Model).count()
# --module function tests
def test_cleanup_downloads():
test_dir = os.getcwd() + '/testdir'
if not os.path.exists(test_dir):
os.makedirs(test_dir)
f = open(test_dir + '/test.txt', 'a')
f.write('this is a test')
f.close()
# cleanup_downloads returns True if works
assert render_little.cleanup_downloads(test_dir)
# def test_write_activity(write_activity_fix):
# render_little.write_activity('test message')
# assert 'test message' in write_activity_fix.read()
# def test_write_error(write_error_fix):
# render_little.write_error('test message')
# assert 'test message' in write_error_fix.read()
# --jobs queue
@pytest.mark.usefixtures("connection", "db_session")
class TestQueue(unittest.TestCase):
@pytest.fixture(autouse=True)
def setup_tmpdir(self, tmpdir):
self.tmpdir = tmpdir
class Fake_Job_Class():
def __init__(self, message_content, message_attributes):
self.message_content = message_content
self.message_attributes = message_attributes
message = {'job_id': {'string_value': 1, 'data_type': 'Number'},
'band_2': {'string_value': 3, 'data_type': 'Number'},
'band_3': {'string_value': 2, 'data_type': 'Number'},
'band_1': {'string_value': 4, 'data_type': 'Number'},
'scene_id': {'string_value': 'LC80470272015005LGN00',
'data_type': 'String'},
'email': {'string_value': 'test@test.com',
'data_type': 'String'}}
fake_job_for_queue = [Fake_Job_Class("job", message)]
bad_fake_job = [Fake_Job_Class("job", ['test'])]
def test_get_job_attributes_returns_correctly(self):
result = render_little.get_job_attributes(self.fake_job_for_queue)
assert result == (
{'job_id': 1, 'band_2': 3, 'band_3': 2, 'band_1': 4, 'scene_id': 'LC80470272015005LGN00', 'email': 'test@test.com'})
# def test_get_job_attributes_logs_errors_correctly(self):
# render_little.get_job_attributes(self.bad_fake_job)
# assert "Attribute retrieval fail because" in str(self.tmpdir.join('log/tmp_act_log.txt').read())
# assert "Attribute retrieval traceback" in str(self.tmpdir.join('log/tmp_error_log.txt').read())
# --process tests
@pytest.mark.usefixtures("setup_dirs")
class TestImageFiles(unittest.TestCase):
"""These tests require real files"""
@mock.patch('worker.render_little.Downloader')
def test_download_returns_correct_values(self, Downloader):
bands, input_path, scene_id = (render_little.download_and_set(
TestProcess.fake_job_message))
self.assertEqual(input_path,
os.getcwd() + '/test_download/LC80470272015005LGN00')
self.assertEqual(bands, [u'4', u'3', u'2'])
self.assertEqual(scene_id, 'LC80470272015005LGN00')
@pytest.mark.usefixtures("connection", "db_session", "fake_job1")
class TestProcess(unittest.TestCase):
fake_job_message = {u'job_id': u'1',
u'band_2': u'3',
u'band_3': u'2',
u'band_1': u'4',
u'scene_id': u'LC80470272015005LGN00',
u'email': u'test@test.com'}
# bad_job_message is missing band_1
bad_job_message = {u'job_id': u'1',
u'band_2': u'3',
u'band_3': u'2',
u'scene_id': u'LC80470272015005LGN00',
u'email': u'test@test.com'}
test_tmp_download = os.getcwd() + '/test_download'
test_input_path = os.getcwd() + '/test_download/LC80470272015005LGN00'
test_bands = [u'4', u'3', u'2']
bad_test_bands = [u'4', u'3']
test_scene_id = 'LC80470272015005LGN00'
test_file_location = (os.getcwd() +
'/download/LC80470272015005LGN00/LC80470272015005LGN00_bands_432.TIF')
test_file_name = 'LC80470272015005LGN00_bands_432'
test_file_name_zip = 'LC80470272015005LGN00_bands_432.zip'
test_file_png = 'pre_LC80470272015005LGN00_bands_432.png'
test_file_tif = 'pre_LC80470272015005LGN00_bands_432.TIF'
@mock.patch('worker.render_little.Downloader')
def test_download_errors_correctly(self, Downloader):
with pytest.raises(Exception):
bands, input_path, scene_id = (render_little.download_and_set(
self.bad_job_message))
def test_resize_bands_creates_files(self):
"""If test files don't exist, make them exist
The files are either downloaded from a fileserver, or unzipped
from an archive file if it exists.
"""
delete_me, rename_me = (
render_little.resize_bands(self.test_bands, self.test_input_path,
self.test_scene_id)
)
expected_delete_me = (
[self.test_input_path + '/LC80470272015005LGN00_B4.TIF',
self.test_input_path + '/LC80470272015005LGN00_B3.TIF',
self.test_input_path + '/LC80470272015005LGN00_B2.TIF']
)
self.assertEqual(delete_me, expected_delete_me)
def test_resize_bands_fails_with_message(self):
with pytest.raises(Exception) as e:
delete_me, rename_me = (
render_little.resize_bands(self.bad_test_bands,
'',
self.test_scene_id)
)
print(e.value)
assert 'gdal_translate did not downsize images' in str(e.value)
@mock.patch('worker.render_little.os')
def test_remove_and_rename(self, mock_os):
render_little.remove_and_rename(['filelist1'], ['filelist2'])
mock_os.remove.assert_called_with('filelist1')
mock_os.rename.assert_called_with('filelist2', 'filelist1')
@mock.patch('worker.render_little.Process')
def test_merge_images(self, Process):
render_little.merge_images(self.test_input_path, self.test_bands)
render_little.Process.assert_called_with(
self.test_input_path,
dst_path=render_little.PATH_DOWNLOAD,
verbose=False,
bands=self.test_bands
)
@mock.patch('worker.render_little.Process')
def test_merge_images_fails_with_exception(self, Process):
render_little.Process.side_effect = Exception()
with pytest.raises(Exception) as e:
render_little.merge_images('', self.bad_test_bands)
assert 'Processing/landsat-util failed' in str(e.value)
def test_name_files(self):
file_location, file_name, file_tif = (
render_little.name_files(self.test_bands,
self.test_input_path,
self.test_scene_id))
assert file_location == (
self.test_input_path + '/LC80470272015005LGN00_bands_432.png')
assert file_name == 'LC80470272015005LGN00_bands_432'
assert file_tif == (
self.test_input_path + '/LC80470272015005LGN00_bands_432.TIF')
@mock.patch('worker.render_little.subprocess')
def test_tif_to_png(self, mock_subp):
file_png = render_little.tif_to_png(self.test_file_location,
self.test_file_name,
self.test_file_tif)
assert file_png == self.test_file_png
mock_subp.call.assert_called_with(['convert',
self.test_file_tif,
self.test_file_location])
@mock.patch('worker.render_little.Key')
@mock.patch('worker.render_little.boto')
def test_upload_to_s3(self, boto, Key):
self.assertIsNone(render_little.upload_to_s3(self.test_file_location,
self.test_file_png,
self.fake_job_message
))
@mock.patch('worker.render_little.Key')
@mock.patch('worker.render_little.boto')
def test_upload_to_s3_fails_with_exception(self, boto, Key):
# missing job argument to cause exception
render_little.boto.connect_s3.side_effect = Exception()
with pytest.raises(Exception) as e:
render_little.upload_to_s3(None,
'',
self.bad_job_message,
)
assert 'S3 Upload failed' in str(e.value)
@mock.patch('worker.render_little.rmtree')
def test_delete_files(self, mock_rmtree):
render_little.delete_files(self.test_input_path)
mock_rmtree.assert_called_with(self.test_input_path)
# check error checking:
render_little.rmtree.side_effect = Exception(OSError)
with pytest.raises(Exception):
render_little.delete_files('files')
@mock.patch('worker.render_little.Key')
@mock.patch('worker.render_little.boto')
def test_whole_process_run(Key, boto, setup_dirs):
result = render_little.process(TestProcess.fake_job_message)
# render_little.process returns True if it works:
assert result
|
contact_list = []
class Contacts(object):
def __init__(self, first_name, last_name, mobile = "", work_number = "", email = ""):
self.first_name = first_name
self.last_name = last_name
self.mobile = mobile
self.work_number = work_number
self.email = email
def send_text(self, message):
print "To: %s - %s" % (self.mobile, message)
# contacts = {("Diana", "Banana") : {"mobile" : "415-555-3625", "email" : "diana@hackbright.com", "birthday" :
# "february 21"}, ("Minnie", "Mouse") : {"mobile" : "415-666-6666", "email" : "mrsmickey@hackbright.com", "birthday" :
# "february 14"}}
# def add_contact():
# pass
# def delete_contact():
# pass
# def change_name():
# pass
# def update_contact():
# pass
def main():
contact_amy = Contacts("Amy", "Claussen", mobile = "345-567-7890", work_number = "555-666-8888", email = "amy@test.com")
contact_list.append(contact_amy)
contact_trushna = Contacts("trushna", "mehta", mobile = "123-562-1247", work_number = "332-125-8456", email = "trushna@test.com")
contact_list.append(contact_trushna)
contact_minnie = Contacts("Minnie", "Mouse", mobile = "415-666-6666", email = "mrsmickey@hackbright.com", work_number = "876-444-3333")
contact_list.append(contact_minnie)
for info in contact_list:
print info.first_name
print info.last_name
print info.mobile
print info.work_number
print info.email
print ""
for contact in contact_list:
contact.send_text("Hella cool")
# "february 14"}
# message_to_send = raw_input("What message do you want to send?")
# amy.send_text(message_to_send)
# for i in range(len(contacts.keys())):
# print str(i +1) + ". " + str(contacts.keys()[i])
# input = int(raw_input("What entry do you want to update?")
# for i in len(contacts.keys():
# if input == i + 1
# which name (key do you want)? (ignore updating the name for now)
# which category do you want to update?
# gimme what you want to update it with
if __name__ == '__main__':
main() |
# Nombre: Luis Alejandro Martinez
# Matricula: 2019-7725
from os import system
from time import sleep
import pandas as pd
import webbrowser as wb
import os
import folium
import playsound
TITLE = "Programa para registrar datos de personas desaparecidas\n"
SPACE = " "
data = []
tags = []
li_tag = []
li_tag_with_zodiac = []
# explosionSound = 'explosion.wav'
# FUNCTION SOUNDS
menu_sound = "function_sound/menu_sound.m4a"
file_generate_sound = "function_sound/file_generate_sound.m4a"
map_generate_sound = "function_sound/map_generate_sound.m4a"
invalid_option_sound = "function_sound/invalid_option_sound.m4a"
list_zodiac_sound = "function_sound/list_zodiac_sound.m4a"
not_data_sound = "function_sound/not_data_sound.m4a"
# ADD SOUNDS
born_day_sound = "add_missing_person_sounds/born_day_sound.m4a"
born_month_sound = "add_missing_person_sounds/born_month_sound.m4a"
born_year_sound = "add_missing_person_sounds/born_year_sound.m4a"
id_sound = "add_missing_person_sounds/id_sound.m4a"
write_name_sound = "add_missing_person_sounds/write_name_sound.m4a"
write_last_name_sound = "add_missing_person_sounds/write_last_name_sound.m4a"
write_gender_sound = "add_missing_person_sounds/write_gender_sound.m4a"
last_place_sound = "add_missing_person_sounds/last_place_sound.m4a"
latitude_sound = "add_missing_person_sounds/latitude_sound.m4a"
longitude_sound = "add_missing_person_sounds/longitude_sound.m4a"
another_info_sound = "add_missing_person_sounds/another_info_sound.m4a"
adding_last_sound = "add_missing_person_sounds/adding_last_sound.m4a"
# MODIFY SOUNDS
data_to_modify_sound = "modify_missing_person_sounds/data_to_modify_sound.m4a"
modifying_last_sound = "modify_missing_person_sounds/modifying_last_sound.m4a"
person_chosen_to_modify_sound = "modify_missing_person_sounds/person_chosen_to_modify_sound.m4a"
write_new_data_sound = "modify_missing_person_sounds/write_new_data_sound.m4a"
def ask_data(txt):
try:
global content
content = input(txt)
if len(content) == 0:
ask_data(txt)
except:
print(f">>>{content}" + " no es valido; Intente de nuevo!\n")
play_sound(invalid_option_sound)
ask_data(txt)
return content
def ask_number(txt):
try:
global number
number = input(txt)
number = int(number)
except:
print(f">>>{number}" + " no es una opcione validad; Elija Una!\n")
play_sound(invalid_option_sound)
ask_number(txt)
return number
def play_sound(sound_file):
playsound.playsound(sound_file, True)
def is_adult(age):
age = int((age.split("/")[2]))
if 2020 - age > 17:
return True
else:
return False
def save_to_csv(date, id, name, last_name, gender, last_place, latitude, longitude, other_information, zodiac_sign):
data.append([date, id, name, last_name, gender, last_place,
latitude, longitude, other_information, zodiac_sign])
dataFrame = pd.DataFrame(data, columns=[
"Fecha", "Cedula", "Nombre", "Apellidos", "Genero", "UltimoLugar", "Latitud", "Longitud", "Otra informacion", "Zodiaco"])
dataFrame.to_csv("missing_people_db.csv")
def get_from_csv():
if os.path.isfile("missing_people_db.csv"):
storaged_data = pd.read_csv("missing_people_db.csv").values.tolist()
for i in range(len(storaged_data)):
storaged_data[i].remove(i)
for person in storaged_data:
data.append(person)
def add_missing_person():
system('cls')
print("Le Solicitare Algunos Datos Del Desaparecido\n")
play_sound(born_day_sound)
born_day = ask_number("Digite Su Dia de nacimineto : ")
play_sound(born_month_sound)
born_month = ask_number("Digite Su mes de nacimineto : ")
play_sound(born_year_sound)
born_year = ask_number("Digite Su año de nacimineto : ")
born_date = f"{born_day}/{born_month}/{born_year}"
zodiac_sign = get_zodiac(born_day, born_month)
adult = is_adult(born_date)
if adult:
play_sound(id_sound)
id = ask_data("Digita Su Cedula: ")
else:
id = "No Posee"
play_sound(write_name_sound)
name = ask_data("Escribe Su Nombre: ")
play_sound(write_last_name_sound)
last_name = ask_data("Escribe Su Apellido: ")
play_sound(write_gender_sound)
gender = ask_data("Escribe Su Genero(M/F): ")
play_sound(last_place_sound)
last_place = ask_data("Escribe El Ultimo Lugar Donde Fue Visto: ")
play_sound(latitude_sound)
latitude = ask_data("Digita La latitud: ")
play_sound(longitude_sound)
longitude = ask_data("Digita La longitud: ")
play_sound(another_info_sound)
other_information = ask_data("Otra informacion: ")
save_to_csv(born_date, id, name, last_name, gender, last_place, latitude,
longitude, other_information, zodiac_sign)
print("\nPersona Agregada Con Existo!")
print(f"""\n\n
Fecha : {born_date}
Nombre: {name}
Apellido: {last_name}
Genero: {gender}
Ultimo lugar: {last_place}
Latitud: {latitude}
Longitud: {longitude}
Zodiaco: {zodiac_sign}
Otra informacion: {other_information}
""")
print("""
0.Volver A Menu De Inicio
1.Agregar Otro Persona
2.Modificar Desaparecido
3.Salir
""")
play_sound(adding_last_sound)
option = int(ask_number("Escribe Tu Opcion: "))
if option == 0:
Menu()
if option == 1:
add_missing_person()
elif option == 2:
modify_missing_person()
else:
return
def modify_missing_person():
system('cls')
print("A QUIEN QUIERES MODIFICAR ?\n")
if os.path.isfile("missing_people_db.csv"):
storaged_data = pd.read_csv("missing_people_db.csv").values.tolist()
for i in range(len(storaged_data)):
storaged_data[i].remove(i)
print("\nLISTA DE PERSONAS DESAPARECIDAS")
print("---------------------------------")
for person in range(len(storaged_data)):
print(
f"{person}) {storaged_data[person][0]}|{storaged_data[person][1]}|{storaged_data[person][2]}|{storaged_data[person][3]}|{storaged_data[person][4]}|{storaged_data[person][5]}|{storaged_data[person][6]}|{storaged_data[person][7]}")
play_sound(person_chosen_to_modify_sound)
print("\nELIGE EL NUMERO DE LA PERSONA QUE QUIERES MODIFICAR")
print("--------------------------------------------------------")
index_person_to_modify = int(ask_number("Escribe Tu Opcion: "))
while index_person_to_modify > len(storaged_data) - 1:
print(
f'El {index_person_to_modify} no se encuentra en la lista; Elija Uno Que Si Este!')
play_sound(invalid_option_sound)
index_person_to_modify = int(ask_number("Escribe Tu Opcion: "))
person_to_modify = storaged_data[index_person_to_modify]
system('cls')
print("\nDATOS DE LA PERSONA ELEGIDA")
print("--------------------------------")
print(f"""
Fecha : {person_to_modify[0]}
Cedula : {person_to_modify[1]}
Nombre: {person_to_modify[2]}
Apellido: {person_to_modify[3]}
Genero: {person_to_modify[4]}
Ultimo lugar: {person_to_modify[5]}
Latitud: {person_to_modify[6]}
Longitud: {person_to_modify[7]}
Otra informacion: {person_to_modify[8]}
""")
print("\nQUE QUIERES MODIFICAR DE ESTA PERSONA?")
print("----------------------------------------")
print("""
1)Fecha 2)Cedula 3)Nombre 4)Apellido 5)Genero
6) Ultimo Lugar 7) Latitud 8) Longitud 9) Otra Info """)
play_sound(data_to_modify_sound)
print("\nDIGITA EL NUMERO DEL DATO A MODIFICAR")
data_index_to_modify = ask_number("Escribe Tu Opcion: ")
while data_index_to_modify > 9:
print(f"El {data_index_to_modify} no es una opcion; Elija Una!")
play_sound(invalid_option_sound)
data_index_to_modify = ask_number("Escribe Tu Opcion: ")
play_sound(write_new_data_sound)
person_to_modify[data_index_to_modify -
1] = input("Escribe el nuevo dato: ")
system('cls')
print("\nDATO MODIFICADO CON EXISTO!")
print("--------------------------------")
print(f"""\n\n
Fecha : {person_to_modify[0]}
Cedula : {person_to_modify[1]}
Nombre: {person_to_modify[2]}
Apellido: {person_to_modify[3]}
Genero: {person_to_modify[4]}
Ultimo lugar: {person_to_modify[5]}
Latitud: {person_to_modify[6]}
Longitud: {person_to_modify[7]}
Otra informacion: {person_to_modify[8]}
""")
# Here I Updated the datas storage with new one
dataFrame = pd.DataFrame(storaged_data, columns=[
"Fecha", "Cedula", "Nombre", "Apellidos", "Genero", "UltimoLugar", "Latitud", "Longitud", "Otra informacion", "Zodiaco"])
dataFrame.to_csv("missing_people_db.csv")
print("\n----------------------------------------")
print("Persona modificada exitosamente!")
print("----------------------------------------")
print("""
0.Volver A Menu De Inicio
1.Agregar Desaparecido
2.Modificar Desaparecido
3.Salir
""")
play_sound(modifying_last_sound)
option = int(ask_number("Escribe Tu Opcion: "))
while option > 3:
print("Elija Una Opcion Validad")
play_sound(invalid_option_sound)
option = int(ask_number("Escribe Tu Opcion: "))
if option == 0:
Menu()
elif option == 1:
add_missing_person()
elif option == 2:
modify_missing_person()
else:
return
else:
print("No hay Ningun Dato Agregado!")
input("Presione cualquier tecla para volver al menu")
Menu()
def return_tag(storaged_data):
for person in range(len(storaged_data)):
tag = f"""
<div class="datas">
<span><span>Fecha:</span>{storaged_data[person][0]}</span>
<span><span>Cedula:</span>{storaged_data[person][1]}</span>
<span><span>Nombre:</span> {storaged_data[person][2]}</span>
<span><span>Apellido:</span>{storaged_data[person][3]}</span>
<span><span>Genero:</span>{storaged_data[person][4]}</span>
<span><span>Ultimo Lugar:</span>{storaged_data[person][5]}</span>
<span><span>Latitud:</span> {storaged_data[person][6]}</span>
<span><span>Longitud:</span> {storaged_data[person][7]}</span>
<span><span>Otra Info:</span>{storaged_data[person][8]}</span>
</div>"""
tags.append(tag)
def return_li_tag(storaged_data):
for person in range(len(storaged_data)):
# here I take from the storaged data the name and last_name of the person
tag = f"""
<li>{storaged_data[person][2]} {storaged_data[person][3]}</li>
"""
li_tag.append(tag)
def return_li_tag_with_zodiac(storaged_data):
for person in range(len(storaged_data)):
# here I take from the storaged data the name and last_name of the person
tag = f"""
<li>{storaged_data[person][2]} {storaged_data[person][3]} <b>{storaged_data[person][9]}</b></li>
"""
li_tag_with_zodiac.append(tag)
def export_missing_person_data():
if os.path.isfile("missing_people_db.csv"):
storaged_data = pd.read_csv("missing_people_db.csv").values.tolist()
for i in range(len(storaged_data)):
storaged_data[i].remove(i)
return_tag(storaged_data)
html = f"""<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Lista De Desaparecidos</title>
<link rel="stylesheet" href="style.css">
</head>
<body>
<h1>Lista De Desaparecidos</h1>
<div class="container">
{" ".join(tags)}
</div>
</body>
</html>
"""
html_style = """
* {
margin: 0;
padding: 0;
}
html,
body {
max-height: 100%;
}
body {
background: linear-gradient(45deg, #2a6f8a, #227a51 100%);
}
h1 {
margin-top: 30px;
color: white;
text-align: center;
text-shadow: -2px -2px 10px black;
font-size: 2.5em;
}
.container {
position: absolute;
display: flex;
flex-wrap: wrap;
top: 20%;
left: 50%;
transform: translateX(-50%);
width: 800px;
border: 2px solid white;
padding: 10px;
border-radius: 15px;
background: #ffffff1a;
}
.container .datas {
position: relative;
width: 100%;
display: flex;
flex-wrap: wrap;
border: 1px solid #ffffff71;
border-radius: 15px;
margin: 10px 10px;
background: #ffffff1a;
}
.container .datas:hover {
border: 1px solid #ffffffe8;
}
.datas > span {
width: fit-content;
height: 50px;
margin: 10px auto;
color: white;
font-weight: bolder;
padding: 50px auto;
}
.datas > span:last-child {
width: fit-content;
margin: 10px auto;
}
.datas > span > span {
color: lawngreen;
}
"""
os.makedirs("DatosDeDesaparecidos", exist_ok=True)
style_file = open("DatosDeDesaparecidos/style.css", "w")
style_file.write(html_style)
style_file.close()
html_file = open("DatosDeDesaparecidos/desaparecidos.html", "w")
html_file.write(html)
html_file.close()
wb.open_new_tab("DatosDeDesaparecidos/desaparecidos.html")
print("\n Sea generado una carpeta con la lista de los desaparecidos\n en la ruta donde esta el archivo .py")
print("""\n
1.Volver A Menu De Inicio
2.Agregar Desaparecido
3.Salir
""")
play_sound(file_generate_sound)
option = int(ask_number("Escribe Tu Opcion: "))
while option > 3:
print("Elija Una Opcion Validad")
play_sound(invalid_option_sound)
option = int(ask_number("Escribe Tu Opcion: "))
if option == 1:
Menu()
elif option == 2:
add_missing_person()
else:
return
else:
print("No hay Ningun Dato Agregado!")
play_sound(not_data_sound)
input("Presione cualquier tecla para volver al menu")
Menu()
def show_list_of_missing_person():
if os.path.isfile("missing_people_db.csv"):
storaged_data = pd.read_csv("missing_people_db.csv").values.tolist()
for i in range(len(storaged_data)):
storaged_data[i].remove(i)
return_li_tag(storaged_data)
html = f"""<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Lista De Desaparecidos</title>
<link rel="stylesheet" href="style.css">
</head>
<body>
<h1>Lista De Desaparecidos</h1>
<div class="container">
<ol class="datas">
{" ".join(li_tag)}
</ol>
</div>
</body>
</html>
"""
html_style = """
* {
margin: 0;
padding: 0;
}
html,
body {
max-height: 100%;
}
body {
background: linear-gradient(45deg, #2a6f8a, #227a51 100%);
}
h1 {
margin-top: 30px;
color: white;
text-align: center;
text-shadow: -2px -2px 10px black;
font-size: 2.5em;
text-decoration: underline;
}
.container {
position: absolute;
display: flex;
flex-wrap: wrap;
top: 15%;
left: 50%;
transform: translateX(-50%);
width: 300px;
border: 2px solid white;
border-radius: 5px;
border-top-right-radius: 50px;
padding: 30px;
background: #ffffff1a;
}
.container .datas {
position: relative;
width: 100%;
border: 1px solid #ffffff71;
border-radius: 15px;
border-right-color: transparent;
border-top-right-radius: 0px;
background: #ffffff1a;
padding: 10px;
}
.container .datas:hover {
border: 1px solid #ffffffe8;
}
.datas > li {
width: 88%;
margin: 10px 30px;
color: white;
font-weight: bolder;
font-size: 1.2em;
border: 1px solid #ffffff5e;
border-radius: 15px;
border-bottom-left-radius: 2px;
border-top-left-radius: 2px;
background: #2a6f8a8c;
text-align: center;
height: fit-content;
transition: margin .2s ease-in-out;
}
.datas > li:hover {
border: 1px solid #ffffffe0;
margin-left: 15px;
background: #359fc98c;
}
"""
os.makedirs("ListaDeDesaparecidos", exist_ok=True)
style_file = open("ListaDeDesaparecidos/style.css", "w")
style_file.write(html_style)
style_file.close()
html_file = open("ListaDeDesaparecidos/lista.html", "w")
html_file.write(html)
html_file.close()
wb.open("ListaDeDesaparecidos/lista.html")
print("\n Sea generado una carpeta con la lista de los desaparecidos\n en la ruta donde esta el archivo .py")
print("""\n
1.Volver A Menu De Inicio
2.Agregar Desaparecido
3.Salir
""")
play_sound(file_generate_sound)
option = int(ask_number("Escribe Tu Opcion: "))
while option > 3:
print("Elija Una Opcion Validad")
play_sound(invalid_option_sound)
option = int(ask_number("Escribe Tu Opcion: "))
if option == 1:
Menu()
elif option == 2:
add_missing_person()
else:
return
else:
print("No hay Ningun Dato Agregado!")
input("Presione cualquier tecla para volver al menu")
Menu()
def show_list_of_zodiac_with_person_name():
system('cls')
print("Numero De Desaparecido Por Signo Del Zodiaco")
sign_data = []
signs_counter = [["capricornio", 0], ["Acuario", 0], ["Piscis", 0], ["Aries", 0], ["Tauro", 0], ["Geminis", 0],
["Cancer", 0], ["Leo", 0], ["Virgo", 0], ["Libra", 0], ["Escorpio", 0], ["Saegitario", 0]]
if os.path.isfile("missing_people_db.csv"):
storaged_data = pd.read_csv("missing_people_db.csv").values.tolist()
for i in range(len(storaged_data)):
storaged_data[i].remove(i)
for person in storaged_data:
sign_data.append(person[9])
amount_of_mission_person = len(sign_data)
print(f"\nEl Total De Desaparecidos es: {amount_of_mission_person}\n")
return_li_tag_with_zodiac(storaged_data)
html = f"""<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Lista De Desaparecidos</title>
<link rel="stylesheet" href="style.css">
</head>
<body>
<h1>Lista De Desaparecidos</h1>
<div class="container">
<ol class="datas">
{" ".join(li_tag_with_zodiac)}
</ol>
</div>
</body>
</html>
"""
html_style = """
* {
margin: 0;
padding: 0;
}
html,
body {
max-height: 100%;
}
body {
background: linear-gradient(45deg, #2a6f8a, #227a51 100%);
}
h1 {
margin-top: 30px;
color: white;
text-align: center;
text-shadow: -2px -2px 10px black;
font-size: 2.5em;
text-decoration: underline;
}
.container {
position: absolute;
display: flex;
flex-wrap: wrap;
top: 15%;
left: 50%;
transform: translateX(-50%);
width: 400px;
border: 2px solid white;
border-radius: 5px;
border-top-right-radius: 50px;
padding: 30px;
background: #ffffff1a;
}
.container .datas {
position: relative;
width: 100%;
border: 1px solid #ffffff71;
border-radius: 15px;
border-right-color: transparent;
border-top-right-radius: 0px;
background: #ffffff1a;
padding: 10px;
}
.container .datas:hover {
border: 1px solid #ffffffe8;
}
.datas > li {
width: 88%;
margin: 10px 30px;
color: white;
font-weight: bolder;
font-size: 1.2em;
border: 1px solid #ffffff5e;
border-radius: 15px;
border-bottom-left-radius: 2px;
border-top-left-radius: 2px;
background: #2a6f8a8c;
text-align: center;
height: fit-content;
transition: margin .2s ease-in-out;
}
.datas > li:hover {
border: 1px solid #ffffffe0;
margin-left: 15px;
background: #359fc98c;
}
li > b {
float: right;
border-left: 2px solid white;
padding-left: 10px;
padding-right: 10px;
background: #ffffff7c;
border-radius: 15px;
}
"""
os.makedirs("ListaConZodiaco", exist_ok=True)
style_file = open("ListaConZodiaco/style.css", "w")
style_file.write(html_style)
style_file.close()
html_file = open("ListaConZodiaco/listaZodiaco.html", "w")
html_file.write(html)
html_file.close()
wb.open("ListaConZodiaco/listaZodiaco.html")
for i in range(len(signs_counter)):
for sign in sign_data:
if sign == signs_counter[i][0]:
signs_counter[i][1] = signs_counter[i][1] + 1
for data in signs_counter:
print(f"{data[0]}: {data[1]}")
print("\n Sea generado una carpeta con la lista de los desaparecidos con su zodiaco\n en la ruta donde esta el archivo .py")
print("""\n
1.Volver A Menu De Inicio
2.Agregar Desaparecido
3.Salir
""")
play_sound(list_zodiac_sound)
option = int(ask_number("Escribe Tu Opcion: "))
while option > 3:
print("Elija Una Opcion Validad")
play_sound(invalid_option_sound)
option = int(ask_number("Escribe Tu Opcion: "))
if option == 1:
Menu()
elif option == 2:
add_missing_person()
else:
return
else:
print("No hay Ningun Dato Agregado!")
input("Presione cualquier tecla para volver al menu")
Menu()
def generateMap():
storaged_data = pd.read_csv("missing_people_db.csv").values.tolist()
for i in range(len(storaged_data)):
storaged_data[i].remove(i)
lt = 18.735693
ln = -70.162651
map = folium.Map(location=[lt, ln], zoom_start=10)
fg = folium.FeatureGroup('my map')
for person in storaged_data:
fg.add_child(folium.Marker(
location=[person[6], person[7]], popup=f'<b>{person[2] + " " + person[3]}</b>'))
map.add_child(fg)
map.save('map.html')
wb.open("map.html")
print("\n Sea generado una archivo con el mapa de los desaparecidos\n en la ruta donde esta el archivo .py")
print("""\n
1.Volver A Menu De Inicio
2.Agregar Desaparecido
3.Salir
""")
play_sound(map_generate_sound)
option = int(ask_number("Escribe Tu Opcion: "))
while option > 3:
print("Elija Una Opcion Validad")
play_sound(invalid_option_sound)
option = int(ask_number("Escribe Tu Opcion: "))
if option == 1:
Menu()
elif option == 2:
add_missing_person()
else:
return
def get_zodiac(day, month):
signs = ("capricornio", "Acuario", "Piscis", "Aries", "Tauro", "Geminis",
"Cancer", "Leo", "Virgo", "Libra", "Escorpio", "Saegitario")
dates = (20, 19, 20, 20, 20, 21, 22, 23, 22, 22, 22, 21)
mes = month - 1
if day > dates[mes]:
mes + 1
if mes == 12:
mes = 0
zodiac_sign = signs[mes]
return zodiac_sign
def Menu():
system('cls')
print(TITLE)
print("""
1.Agregar Desaparecido
2.Modificar Desaparecido
3.Exportar Desaparecido
4.Mostrar Mapa Desaparecido
5.Lista De Desaparecido
6.Lista De Desaparecido Con Zodiaco
7.Repetir Opciones
8.Salir
""")
play_sound(menu_sound)
option = int(ask_number("Escribe Tu Opcion: "))
while option > 8:
print(f"El {option} No Es Una Opcion; Elige Una Opcion!")
play_sound(invalid_option_sound)
option = int(ask_number("Escribe Tu Opcion: "))
if option == 1:
add_missing_person()
elif option == 2:
modify_missing_person()
elif option == 3:
export_missing_person_data()
elif option == 4:
generateMap()
elif option == 5:
show_list_of_missing_person()
elif option == 6:
show_list_of_zodiac_with_person_name()
elif option == 7:
Menu()
else:
return
get_from_csv()
Menu()
|
import json
import requests
from battleforcastile.constants import BATTLEFORCASTILE_BACKEND_URL
def create_match(username: str, character: dict):
url = f'{BATTLEFORCASTILE_BACKEND_URL}/enqueue-match/'
return requests.post(url, data=json.dumps({
'first_user': {
'username': username,
'character': character
}
})) |
import os
import json
import pickle
import pandas as pd
from glob import glob
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
stopwords = stopwords.words("english")
def preprocess(data_path):
df_data = None
for fname in glob(os.path.join(data_path, '*.json')):
with open(fname, 'r') as f:
data = json.load(f)
temp_df = pd.DataFrame.from_dict(data)
if df_data is None:
df_data = temp_df
else:
df_data = df_data.append(temp_df, ignore_index=True)
# df_data = 'title', ' author', ' time', ' description', ' body', ' section'
#df_data['preprocessed_body'] = preprocess_body(df_data[' body'], df_data)
res = []
for raw_text in df_data[' body']:
res.append(word_tokenize(raw_text)) # TODO: what if we do this on other library spacey?because it is said this does well
print('tokenization done')
df_data['tokenized_body'] = res
# save the output
df_data.to_pickle('preprocess_result.pkl')
return df_data
def tokenize(word_list):
res = [None] * len(word_list)
for i, body in enumerate(word_list):
res[i] = word_tokenize(body)
return res
if __name__ == '__main__':
preprocess('./data')
|
command = '/var/www/venv/bin/gunicorn'
pythonpath = '/var/www/governmentjobstore/gjs-bl'
bind = '0.0.0.0:8001'
timeout ='30000000'
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Simulates a saved A3C agent with an rnn and meta-learning.
The agent's rnn is fed with additional rewards and actions for meta-learning.
Todo:
* Consider encapsulating code into a base clase which can be derived from
for other, specialized simulation needs.
* Add input arguments and a main function.
"""
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from IPython.display import HTML
from environments.hregion_search import gameEnv
from agents.ac_rnn_ra_network import AC_rnn_ra_Network
from util import process_frame
###################
# Hyperparameters #
###################
max_episode_length = 50
s_shape = [84,84,3] # Observations are rgb frames 84 x 84
a_size = 2 # Agent can move in a line
gameArgs = {}
load_model = False
model_path = './model'
#######################
# Simulate the Policy #
#######################
env_g = gameEnv(**gameArgs);
s = env_g.reset()
sarray = [s]
rarray = []
tf.reset_default_graph()
with tf.device("/cpu:0"):
global_episodes = tf.Variable(0,dtype=tf.int32,name='global_episodes',
trainable=False)
trainer = tf.train.AdamOptimizer(learning_rate=1e-5)
ac_net = AC_rnn_ra_Network(s_shape,a_size,'global_0',None)
saver = tf.train.Saver(max_to_keep=5)
with tf.Session() as sess:
print('Loading Model...')
ckpt = tf.train.get_checkpoint_state(model_path)
saver.restore(sess,ckpt.model_checkpoint_path)
rnn_state = ac_net.state_init
i=0
d=False
r = 0
a = np.array([0,0])
while i < max_episode_length and d == False:
s_p = process_frame(s)
# Take an action using probabilities from policy network output.
a,v,rnn_state = sess.run([ac_net.sample_a,
ac_net.value,
ac_net.state_out],
feed_dict={ac_net.inputs:[s_p],
ac_net.prev_actions:[a],
ac_net.prev_rewards:[[r]],
ac_net.is_training_ph:False,
ac_net.state_in[0]:rnn_state[0],
ac_net.state_in[1]:rnn_state[1]})
s,r,d = env_g.step(a)
sarray.append(s)
rarray.append(r)
i += 1
print(sum(rarray))
################################
# Animate and Show the Results #
################################
im = plt.imshow(sarray[0],animated=True);
def updatefig(i):
im.set_data(sarray[i]);
return im,
anim = animation.FuncAnimation(plt.gcf(), updatefig, frames=len(sarray),
interval=75, blit=True)
# anim.save('./frames/a3c_'+str(sum(rarray))+'.mp4')
plt.show()
# HTML(anim.to_html5_video())
# plt.close(fig)
|
import random
from datetime import date
from protorpc import messages
from google.appengine.ext import ndb
class Roll(ndb.Model):
"""Roll object """
user = ndb.KeyProperty(required=True, kind='User')
game = ndb.KeyProperty(required=True, kind='Game')
dice = ndb.PickleProperty(required=True)
count = ndb.IntegerProperty(required=True, default=0)
isScored = ndb.BooleanProperty(required=True, default=False)
@classmethod
def new_roll(cls, user, game):
"""Creates a new roll for a user"""
roll = Roll(user=user,
game=game)
roll.dice = []
for i in range(5):
value = random.choice(range(1, 7))
roll.dice.append(value)
print value
roll.count = 1
roll.put()
return roll
def reroll(self, keepers):
"""Rerolls the dice but keeps dice listed in keepers.
Keepers array contains 0 or 1 (replace, keep) for each die index."""
print 'keepers', keepers
print 'current dice', self.dice
for i in range(5):
if keepers[i] == 0:
value = random.choice(range(1, 7))
self.dice[i] = value
print value
self.count += 1
self.put()
game = self.game.get()
# Create entry for history.
entry = (self.count, self.dice)
game.history.append(entry)
# Save the game history.
game.put()
return self.to_form()
def to_form(self):
return RollResultForm(urlsafe_key=self.key.urlsafe(),
user_name=self.user.get().name,
dice=self.dice,
count=self.count,
isScored=self.isScored
)
class RollDiceForm(messages.Message):
"""Used to make a move in an existing game"""
user_name = messages.StringField(1, required=True)
class RollResultForm(messages.Message):
urlsafe_key = messages.StringField(1, required=True)
user_name = messages.StringField(2, required=True)
dice = messages.IntegerField(3, repeated=True)
count = messages.IntegerField(4, required=True)
isScored = messages.BooleanField(5, required=True)
class RerollDiceForm(messages.Message):
"""Used to reroll the dice but keep dice listed in keepers"""
keepers = messages.IntegerField(1, repeated=True)
class ScoreRollResultForm(messages.Message):
score = messages.IntegerField(1, required=True)
|
# messing with histograms
# Author: Andrew Beatty
import numpy as np
import matplotlib.pyplot as plt
'''
#np.random.seed(1)
normData = np.random.normal(size=10000)
plt.hist(normData)
plt.show()
'''
fruit = np.array(['Apples', 'Orange', 'Bannana'])
numbers = np.array([23,77,500])
plt.pie(numbers, labels = fruit)
plt.legend()
plt.show()
|
import numpy as np
import tensorflow as tf
import threading
import h5py
import functools
def hdf5baseGen(filepath, thread_idx, n_threads):
with h5py.File(filepath, 'r') as f:
keys = f.keys()
nb_data = f[keys[0]].shape[0]
idx = thread_idx
while True:
yield [np.expand_dims(f[key][idx], 0) for key in keys]
idx = (idx + n_threads) % nb_data
class GeneratorRunner():
"""
This class manage a multithreaded queue filled with a generator
"""
def __init__(self, generator, capacity):
"""
inputs: generator feeding the data, must have thread_idx
as parameter (but the parameter may be not used)
"""
self.generator = generator
_input = generator(0,1).__next__()
if type(_input) is not list:
raise ValueError("generator doesn't return" \
"a list: %r" % type(_input))
input_batch_size = _input[0].shape[0]
if not all(_input[i].shape[0] == input_batch_size for i in range(len(_input))):
raise ValueError("all the inputs doesn't have " + \
"the same batch size," \
"the batch sizes are: %s" % [_input[i].shape[0] \
for i in range(len(_input))])
self.data = []
self.dtypes = []
self.shapes = []
for i in range(len(_input)):
self.shapes.append(_input[i].shape[1:])
self.dtypes.append(_input[i].dtype)
self.data.append(tf.placeholder(dtype=self.dtypes[i], \
shape=(input_batch_size,) + self.shapes[i]))
self.queue = tf.FIFOQueue(capacity, shapes=self.shapes, \
dtypes=self.dtypes)
self.enqueue_op = self.queue.enqueue_many(self.data)
self.close_queue_op = self.queue.close(cancel_pending_enqueues=True)
def get_batched_inputs(self, batch_size):
"""
Return tensors containing a batch of generated data
"""
batch = self.queue.dequeue_many(batch_size)
return batch
def thread_main(self, sess, thread_idx=0, n_threads=1):
try:
for data in self.generator(thread_idx, n_threads):
sess.run(self.enqueue_op, feed_dict={i: d \
for i, d in zip(self.data, data)})
if self.stop_threads:
return
except RuntimeError:
pass
except tf.errors.CancelledError:
pass
def start_threads(self, sess, n_threads=1):
self.stop_threads = False
self.threads = []
for n in range(n_threads):
t = threading.Thread(target=self.thread_main, args=(sess, n, n_threads))
t.daemon = True
t.start()
self.threads.append(t)
return self.threads
def stop_runner(self, sess):
self.stop_threads = True
# j = 0
# while np.any([t.is_alive() for t in self.threads]):
# j += 1
# if j % 100 = 0:
# print [t.is_alive() for t in self.threads]
sess.run(self.close_queue_op)
def queueSelection(runners, sel, batch_size):
selection_queue = tf.FIFOQueue.from_list(sel, [r.queue for r in runners])
return selection_queue.dequeue_many(batch_size)
def doubleQueue(runner1, runner2, is_runner1, batch_size1, batch_size2):
return tf.cond(is_runner1, lambda: runner1.queue.dequeue_many(batch_size1), \
lambda: runner2.queue.dequeue_many(batch_size2))
if __name__ == '__main__':
def randomGen(img_size, enqueue_batch_size, thread_idx, n_threads):
while True:
batch_of_1_channel_imgs = np.random.rand(enqueue_batch_size, \
img_size, img_size, 1)
batch_of_labels = np.random.randint(0,11,enqueue_batch_size)
return [batch_of_1_channel_imgs, batch_of_labels]
TRAIN_BATCH_SIZE = 64
VALID_BATCH_SIZE = 10
train_runner = GeneratorRunner(functool.partial(randomGen, \
(128, 10)), TRAIN_BATCH_SIZE * 10)
valid_runner = GeneratorRunner(functool.partial(randomGen, \
(128, 10)), VALID_BATCH_SIZE * 10)
is_training = tf.Variable(True)
batch_size = tf.Variable(TRAIN_BATCH_SIZE)
enable_training_op = tf.group(tf.assign(is_training, True), \
tf.assign(batch_size, TRAIN_BATCH_SIZE))
disable_training_op = tf.group(tf.assign(is_training, False), \
tf.assign(batch_size, VALID_BATCH_SIZE))
img_batch, label_batch = queueSelection([valid_runner, train_runner], \
tf.cast(is_training, tf.int32), \
batch_size)
# img_batch, label_batch = doubleQueue(train_runner, valid_runner, \
# is_training, TRAIN_BATCH_SIZE, \
# VALID_BATCH_SIZE)
|
#-*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.cluster import KMeans #导入K均值聚类算法
model = u'F:/data/用电模式.csv'
user_info = u'F:/data/用户基本信息.xls'
user_info_1 = u'F:/data/电压等级1.xls'
user_info_2 = u'F:/data/电压等级2.xls'
user_info_3 = u'F:/data/电压等级3.xls'
user_info_4 = u'F:/data/电压等级4.xls'
user_info_5 = u'F:/data/电压等级5.xls'
user_info_data = pd.read_excel(user_info)
user_info_data = user_info_data[['CONS_NO',u'电压等级']]
kmeans_result = u'F:/data/用电模式聚类结果.csv'
model_data = pd.read_csv(model)
# model_data = model_data.as_matrix()
# k = 8
if __name__ == '__main__':
cons_no = model_data['CONS_NO']
model_data['total'] = 0
for i in model_data.columns:
model_data['total'] = model_data['total'] + model_data[i]
print model_data.head()
# model_data_merge = pd.merge(model_data,user_info_data,on=['CONS_NO'],how='left')
# model_data_1 = model_data[model_data[u'电压等级'] == 1]
# model_data_2 = model_data[model_data[u'电压等级'] == 2]
# model_data_3 = model_data[model_data[u'电压等级'] == 3]
# model_data_4 = model_data[model_data[u'电压等级'] == 4]
# model_data_5 = model_data[model_data[u'电压等级'] == 5]
# del model_data_5[u'电压等级']
# del model_data_5[u'CONS_NO']
# model_data_5 = model_data_5.as_matrix()
# print model_data[u'电压等级'].value_counts()
# kmodel = KMeans(n_clusters=k, n_jobs=5) # n_jobs是并行数,一般等于CPU数较好,
# kmodel.fit(model_data) # 训练模型
#
# print kmodel.cluster_centers_ # 查看聚类中心
# print kmodel.labels_ # 查看各样本对应的类别
# k = pd.Series(kmodel.labels_)
# print k.value_counts()
# pd.DataFrame(k,columns=[u'类别']).to_csv(kmeans_result,encoding='gbk')
# x = [201401,201402,201403,201404,201405,201406,201407, 201408, 201409, 201410, 201411, 201412, 201501, 201502, 201503, 201504, 201505, 201506, 201507, 201508,
# 201509, 201510, 201511, 201512,
# 201601, 201602, 201603, 201604, 201605, 201606]
# for i in range(len(kmodel.labels_)):
# if kmodel.labels_[i] == 1:
# plt.plot(x, model_data[i], 'r', linewidth=2)
# for i in model_data_5:
# print i
# plt.plot(i,'r', linewidth=2)
# plt.show() |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 5 23:06:55 2019
@author: Abdussamet
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
veriler = pd.read_csv("musteriler.csv")
X = veriler.iloc[:, 3:].values
#K-Means
from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters = 3, init = "k-means++")
kmeans.fit(X)
print(kmeans.cluster_centers_)#küme merkezlerini bastık
sonuclar = []
for i in range(1, 11):
kmeans = KMeans(n_clusters = i, init = "k-means++", random_state = 123)#her dönüşte random değişmemesi için bir başlangıç belirledik
#random_satate herhangi bir değer olabilir, önemli olan her döngünün aynı random ile başlmaması
kmeans.fit(X)
sonuclar.append(kmeans.inertia_)#wcss değerleini listeye atar
plt.plot(range(1, 11), sonuclar)#1 den 10 a kadarki değerleri alır
plt.show()
kmeans = KMeans(n_clusters = 4, init = "k-means++", random_state = 123)
Y_tahmin = kmeans.fit_predict(X)
print(Y_tahmin)
plt.scatter(X[Y_tahmin==0, 0], X[Y_tahmin==0, 1], s=100, color="red")
plt.scatter(X[Y_tahmin==1, 0], X[Y_tahmin==1, 1], s=100, color="blue")
plt.scatter(X[Y_tahmin==2, 0], X[Y_tahmin==2, 1], s=100, color="green")
plt.scatter(X[Y_tahmin==3, 0], X[Y_tahmin==3, 1], s=100, color="yellow")
plt.title("KMeans")
plt.show()
#HC
from sklearn.cluster import AgglomerativeClustering
ac = AgglomerativeClustering(n_clusters= 4, affinity = "euclidean", linkage= "ward")
Y_tahmin = ac.fit_predict(X)
print(Y_tahmin)
plt.scatter(X[Y_tahmin==0, 0], X[Y_tahmin==0, 1], s=100, color="red")
plt.scatter(X[Y_tahmin==1, 0], X[Y_tahmin==1, 1], s=100, color="blue")
plt.scatter(X[Y_tahmin==2, 0], X[Y_tahmin==2, 1], s=100, color="green")
plt.scatter(X[Y_tahmin==3, 0], X[Y_tahmin==3, 1], s=100, color="yellow")
plt.title("HC")
plt.show()
import scipy.cluster.hierarchy as sch
dendrogram = sch.dendrogram(sch.linkage(X, method = "ward"))
plt.show()
|
from gi.repository import Gtk, Pango
import math, re
SPACING = 6
def format_size(bytes):
unit = int(math.log(bytes+1, 1024) + .05)
if not unit: return str(bytes) + ' B'
return '%.2f %siB' % (bytes / 1024.0 ** unit, 'KMGT'[unit-1])
ENTITIES = dict(nbsp=' ',amp='&',gt='>',lt='<',quot='"',apos="'")
def _decode_entity(match):
e = match.group(1)
if e.startswith('#x') or e.startswith('#X'): return chr(int(e[2:], 16))
if e.startswith('#'): return chr(int(e[1:]))
return ENTITIES.get(e, '&'+e+';')
def decode_entities(text):
return re.sub('&([^;]*);', _decode_entity, text)
def NamedTreeStore(**params):
names = list(params)
class NamedTreeStore(Gtk.TreeStore):
def __init__(self):
Gtk.TreeStore.__init__(self, *params.values())
def append_named(self, parent, **params):
return self.append(parent, tuple(params[n] for n in names))
for col, name in enumerate(names):
setattr(NamedTreeStore, name, col)
return NamedTreeStore
def scrolled(widget):
s = Gtk.ScrolledWindow()
s.add(widget)
s.set_shadow_type(Gtk.ShadowType.IN)
return s
def textrenderer(width=None, align=None):
renderer = Gtk.CellRendererText()
if width is not None:
renderer.props.ellipsize = Pango.EllipsizeMode.END
renderer.props.width_chars = width
if align is not None:
renderer.props.xalign = align
return renderer
def column(label, renderer, column, sortcolumn=None):
col = Gtk.TreeViewColumn(label, renderer, text=column)
col.set_resizable(True)
col.set_sort_column_id(column if sortcolumn is None else sortcolumn)
return col
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" qp solver """
from typing import Optional, Tuple
import logging
import numpy as np
logger = logging.getLogger(__name__)
_HAS_CVXOPT = False
try:
from cvxopt import matrix, solvers
_HAS_CVXOPT = True
except ImportError:
logger.info('CVXOPT is not installed. See http://cvxopt.org/install/index.html')
def optimize_svm(kernel_matrix: np.ndarray,
y: np.ndarray,
scaling: Optional[float] = None,
max_iters: int = 500,
show_progress: bool = False) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
Solving quadratic programming problem for SVM; thus, some constraints are fixed.
The notation is follows the equation here:
http://cvxopt.org/userguide/coneprog.html#quadratic-programming
Args:
kernel_matrix: NxN array
y: Nx1 array
scaling: the scaling factor to renormalize the `y`, if it is None,
use L2-norm of `y` for normalization
max_iters: number of iterations for QP solver
show_progress: showing the progress of QP solver
Returns:
np.ndarray: Sx1 array, where S is the number of supports
np.ndarray: Sx1 array, where S is the number of supports
np.ndarray: Sx1 array, where S is the number of supports
Raises:
NameError: CVXOPT not installed.
"""
# pylint: disable=invalid-name
if not _HAS_CVXOPT:
raise NameError('CVXOPT is not installed. See http://cvxopt.org/install/index.html')
if y.ndim == 1:
y = y[:, np.newaxis]
H = np.outer(y, y) * kernel_matrix
f = -np.ones(y.shape)
if scaling is None:
scaling = np.sum(np.sqrt(f * f))
f /= scaling
tolerance = 1e-2
n = kernel_matrix.shape[1]
P = matrix(H)
q = matrix(f)
G = matrix(-np.eye(n))
h = matrix(np.zeros(n))
A = matrix(y, y.T.shape)
b = matrix(np.zeros(1), (1, 1))
solvers.options['maxiters'] = max_iters
solvers.options['show_progress'] = show_progress
ret = solvers.qp(P, q, G, h, A, b, kktsolver='ldl')
alpha = np.asarray(ret['x']) * scaling
avg_y = np.sum(y)
avg_mat = (alpha * y).T.dot(kernel_matrix.dot(np.ones(y.shape)))
b = (avg_y - avg_mat) / n
support = alpha > tolerance
logger.debug('Solving QP problem is completed.')
return alpha.flatten(), b.flatten(), support.flatten()
|
from textblob import TextBlob as tb
from textblob.classifiers import PositiveNaiveBayesClassifier
# 1. Parts of Speech Tagging using TextBlob Package
Sentance = tb("My Name is Shah Ayub Quadri, I am from India Hyderabad, Facinated by Deep Learning, CNN, RNN & NLP . This is my first attempt for using TextBlob Package")
POS= Sentance.tags
print(POS)
#2. Sentiment Analysis
# function -> text.sentiment
# result -> returns named polairty tuple (Polarity, subjectivity)
# Polarity -> range from [-1.0,1.0]
# Subjectivity -> ranges from [0.0,1.0]
# 0.0 -> very objective
# 0.0 -> very subjective
review = tb("The product release was effective, and the over all release was good as well as smooth")
SENT = review.sentiment
print(SENT)
# 3. Classification
sports_sentences = ['The team dominated the game','They lost the ball','The game was intense','The goalkeeper catched the ball','The other team controlled the ball']
various_sentences = ['The President did not comment','I lost the keys','The Game was Bad','The team won the game','Sara has two kids','The ball went off the court','The show is over']
classifier = PositiveNaiveBayesClassifier(positive_set=sports_sentences, unlabeled_set=various_sentences)
print(classifier.classify("My team lost the game"))
print(classifier.classify("The Game was ")) |
import numpy as np
import matplotlib.pyplot as plt
def rule_output(bits):
"""Przyjmuje wartości komórek i jej sąsiadów, zwraca nowy stan komórki"""
left, center, right = bits
output = 7 - (4 * left + 2 * center + right)
return output
def wolfram(init_state, rule_decimal, n):
"""Wyznacza n stanów automatu Wolframa dla zasady rule_decimal, zaczynając od stanu init_state"""
rule = np.binary_repr(rule_decimal, 8)
rule = np.array([int(i) for i in rule])
automaton = np.zeros((n, len(init_state)), dtype=np.int8)
automaton[0, :] = init_state
for i in range(1, n):
prev_state = automaton[i - 1, :]
bit_groups = np.stack([np.roll(prev_state, 1), prev_state, np.roll(prev_state, -1)])
automaton[i, :] = rule[np.apply_along_axis(rule_output, 0, bit_groups)]
return automaton
def draw_cells(rule, state_len, n_states, initial=None, single=False):
"""Wyświetla wizualizację automatu dla reguły rule, domyślnie dla inicjalizacji losowej"""
if initial is None:
if single:
initial = np.zeros(state_len, dtype=np.int8)
initial[int(np.round(state_len / 2))] = 1
title_add = 'Single cell initialization'
else:
initial = np.random.randint(0, 2, 300)
title_add = 'Random initialization'
else:
title_add = 'Custom initialization'
cells = wolfram(initial, rule, n_states)
plt.rcParams['image.cmap'] = 'binary'
fig, ax = plt.subplots(figsize=(16, 6))
ax.set_title(f'Cellular automaton - rule {rule} ({title_add})')
ax.set_xlabel('Bits')
ax.set_ylabel('State')
ax.matshow(cells)
plt.show()
draw_cells(105, 300, 100)
draw_cells(105, 300, 100)
draw_cells(105, 300, 100, single=True)
init_custom1 = np.zeros(300)
init_custom2 = np.zeros(300)
for i in range(len(init_custom1)):
if i % 2 == 0:
init_custom1[i] = 1
if i % 10 == 0:
init_custom2[i] = 1
draw_cells(105, 300, 100, init_custom1)
draw_cells(105, 300, 100, init_custom2)
|
# -*- coding: utf-8 -*-
###############################################################################
#
# ChunkedUpload
# Uploads larger files to Dropbox in multiple chunks, and offers a way to resume if an upload gets interrupted.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class ChunkedUpload(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the ChunkedUpload Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(ChunkedUpload, self).__init__(temboo_session, '/Library/Dropbox/FilesAndMetadata/ChunkedUpload')
def new_input_set(self):
return ChunkedUploadInputSet()
def _make_result_set(self, result, path):
return ChunkedUploadResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return ChunkedUploadChoreographyExecution(session, exec_id, path)
class ChunkedUploadInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the ChunkedUpload
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessTokenSecret(self, value):
"""
Set the value of the AccessTokenSecret input for this Choreo. ((required, string) The Access Token Secret retrieved during the OAuth process.)
"""
super(ChunkedUploadInputSet, self)._set_input('AccessTokenSecret', value)
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) The Access Token retrieved during the OAuth process.)
"""
super(ChunkedUploadInputSet, self)._set_input('AccessToken', value)
def set_AppKey(self, value):
"""
Set the value of the AppKey input for this Choreo. ((required, string) The App Key provided by Dropbox (AKA the OAuth Consumer Key).)
"""
super(ChunkedUploadInputSet, self)._set_input('AppKey', value)
def set_AppSecret(self, value):
"""
Set the value of the AppSecret input for this Choreo. ((required, string) The App Secret provided by Dropbox (AKA the OAuth Consumer Secret).)
"""
super(ChunkedUploadInputSet, self)._set_input('AppSecret', value)
def set_Chunk(self, value):
"""
Set the value of the Chunk input for this Choreo. ((conditional, string) A Base64 encoded chunk of data from the file being uploaded. If resuming and upload, the chunk should begin at the number of bytes into the file that equals the NextOffset.)
"""
super(ChunkedUploadInputSet, self)._set_input('Chunk', value)
def set_Offset(self, value):
"""
Set the value of the Offset input for this Choreo. ((conditional, string) The byte offset of this chunk, relative to the beginning of the full file. This is not required when uploading the first chunk of a file.)
"""
super(ChunkedUploadInputSet, self)._set_input('Offset', value)
def set_UploadID(self, value):
"""
Set the value of the UploadID input for this Choreo. ((conditional, string) The ID of the upload session returned after uploading the initial file chunk. This is not required when uploading the first chunk of a file. This value is returned in the UploadSessionID output.)
"""
super(ChunkedUploadInputSet, self)._set_input('UploadID', value)
class ChunkedUploadResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the ChunkedUpload Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Expires(self):
"""
Retrieve the value for the "Expires" output from this Choreo execution. ((string) The expiration time of the upload.)
"""
return self._output.get('Expires', None)
def get_NextOffset(self):
"""
Retrieve the value for the "NextOffset" output from this Choreo execution. ((string) The current byte offset that the server will expect. This value can be passed to the Offset input on subsequent requests when uploading chunks repeatedly.)
"""
return self._output.get('NextOffset', None)
def get_UploadSessionID(self):
"""
Retrieve the value for the "UploadSessionID" output from this Choreo execution. ((string) The upload ID returned after uploading an initial file chunk. This can be passed to the UploadID input for uploading subsequent chunks, and finally to the CommitChunkedUpload Choreo.)
"""
return self._output.get('UploadSessionID', None)
class ChunkedUploadChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return ChunkedUploadResultSet(response, path)
|
username = "" #Username on Torrentleech
password = "" #Password on Torrentleech
Pushover_token = "" #Pushover's token
Pushover_user = '' # Pushover's 'user
Comments = [] #list of comments
random_list = [3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]
|
#!/usr/bin/env python
""" Build the Areas and Divisions page for the website """
import dbconn, tmparms, xlrd, csv
import os, sys, urllib2
from simpleclub import Club
from tmutil import overrideClubs, removeSuspendedClubs
class Division():
divisions = {}
@classmethod
def find(self, division):
if division == '0D':
division = 'New'
if division not in self.divisions:
self.divisions[division] = Division(division)
return self.divisions[division]
def __init__(self, division):
self.areas = {}
self.name = division
self.director = None
def addarea(self, area):
self.areas[area.name] = area
def __repr__(self):
res = []
if self.director:
res.append("""%s""" % self.director.__repr__())
for a in sorted(self.areas):
res.append(self.areas[a].__repr__())
return '\n'.join(res)
def html(self):
res = []
res.append('<p>{tab %s|align_center|alias:%s}</p>' % (self.name.upper(), self.name))
res.append('<table class="table1">\n <tbody>\n')
res.append(' <tr><th style="background-color: #f2df74;" colspan="2"><strong>Division %s</strong></th></tr>' % self.name.upper())
if self.director:
res.append(' %s' % self.director.html())
else:
res.append('<tr><td></td><td>Division Director Position is Vacant</td><tr>')
for a in sorted(self.areas):
res.append(' %s' % self.areas[a].html())
res.append(' </tbody>\n</table>')
return '\n'.join(res)
class Area():
areas = {}
@classmethod
def find(self, division, area):
name = division + area
if name not in self.areas:
self.areas[name] = Area(division, area)
return self.areas[name]
def __init__(self, division, area):
self.parent = Division.find(division)
self.clubs = []
self.name = division + area
self.director = None
self.division = division
self.area = area
self.parent.addarea(self)
def addclub(self, club):
self.clubs.append(club)
def __repr__(self):
res = []
if self.director:
res.append(""" %s""" % self.director.__repr__())
elif self.parent.director:
res.append(""" *** Acting: %s """ % self.parent.director.__repr__())
else:
res.append(""" Area Director Position is Vacant""")
for c in sorted(self.clubs, key=lambda x:x.clubnumber.zfill(8)):
res.append(""" %s: %s %s""" % (c.clubnumber, c.clubname, c.getLink()))
return '\n'.join(res)
def html(self):
if self.area == '0A':
return ''
res = []
res.append('<tr><td style="background-color: #f2df74;" colspan="2"><strong>Area %s</strong></td></tr>' % self.name)
if self.director:
res.append(self.director.html())
elif self.parent.director:
res.append(self.parent.director.html(isacting=True))
else:
res.append('<tr><td></td><td>Area Director Position is Vacant</td></tr>')
for c in sorted(self.clubs, key=lambda x:x.clubnumber.zfill(8)):
res.append('<tr><td align="right">%s</td><td><a href="%s" target="_blank">%s</a></td></tr>' % (c.clubnumber, c.getLink(), c.clubname))
return '\n'.join(res)
class Director():
def __init__(self, position, first, last, email):
part = position.split()
if part[0] == 'Division':
division = part[1]
Division.find(division).director = self
elif part[0] == 'Area':
area = part[1][1]
division = part[1][0]
Area.find(division, area).director = self
self.first = first
self.last = last
self.email = email
self.position = part[0] + ' ' + part[1] + ' Director'
def html(self, isacting=False):
return """<tr>
<td align="right"><a href="mailto:%s" target="_blank">Email</a></td>
<td>%s%s %s %s</td>
</tr>
""" % ( self.email, '<strong>Acting: </strong>' if isacting else '',self.position, self.first, self.last)
def __repr__(self):
return "%s %s %s: %s" % (self.position, self.first, self.last, self.email)
# Make it easy to run under TextMate
if 'TM_DIRECTORY' in os.environ:
os.chdir(os.path.join(os.environ['TM_DIRECTORY'],'data'))
# Get around unicode problems
reload(sys).setdefaultencoding('utf8')
parms = tmparms.tmparms(description=__doc__)
parms.add_argument('--outfile', dest='outfile', default='areasanddivisions.html')
parms.add_argument('--newAlignment', dest='newAlignment', default=None, help='Overrides area/division data from the CLUBS table.')
parms.add_argument('--officers', dest='officers', help='URL of the CSV export form of a Google Spreadsheet with Area/Division Directors')
parms.parse()
# Connect to the database
conn = dbconn.dbconn(parms.dbhost, parms.dbuser, parms.dbpass, parms.dbname)
curs = conn.cursor()
# Get all clubs
clubs = Club.getClubsOn(curs)
if parms.newAlignment:
overrideClubs(clubs, parms.newAlignment)
# Remove suspended clubs
clubs = removeSuspendedClubs(clubs, curs)
# Now, assign clubs to Areas and Divisions
for c in sorted(clubs):
club = clubs[c]
Area.find(club.division, club.area).addclub(club)
# OK, now we have the club info. Let's get the Area Director/Division Director information.
officers = urllib2.urlopen(parms.officers)
reader = csv.DictReader(officers)
for row in reader:
for k in row:
row[k] = ' '.join(row[k].split()).strip()
if row['Title'] and row['First']:
Director(row['Title'], row['First'], row['Last'], row['Email'])
# And now we go through the Divisions and Areas and build the output.
outfile = open(parms.outfile, 'w')
for d in sorted(Division.divisions):
if d.lower() != 'new':
div = Division.divisions[d]
outfile.write(div.html())
outfile.write('\n')
|
from musicGame import MainGame
if __name__ == "__main__":
main_game = MainGame()
main_game.start()
|
# from pkg_resources import resource_string
import fconfig.loader as loader
from fconfig.config_data_object import ConfigDataObject
TEST_CONFIG_PACKAGE = "fconfig.test.resources"
def load_test_config_file(*test_filename: str) -> ConfigDataObject:
sources = []
for fn in test_filename:
source = loader.get_config_content_from_resource(TEST_CONFIG_PACKAGE, fn)
sources.append(source)
config_map = loader.load_config_data(*sources)
return config_map
|
class TicTacToe:
def __init__(self):
self.board = [[None, None, None],[None, None, None],[None, None, None]]
self.turn = 'X'
def mark_space(self, row, column):
if 0<= row <=3 and 0 <= column <= 3 and self.board[row][column] is None:
self.board[row][column] = self.turn
if self.turn == 'X':
self.turn = 'O'
else:
self.turn = 'X'
else:
return "Invalid Move"
def get_winner(self):
game = True
if game is True:
for i in range(3):
if self.board[i].count('X') == 3 or self.board[i].count('O') == 3:
game = False
return print(self.board[i][0], 'is the winner!')
break
for j in range(3):
column_lst = []
for k in range(3):
column_lst.append(self.board[k][j])
if column_lst.count('X') == 3 or column_lst.count('O') == 3:
game = False
return print(column_lst[0], 'is the winner!')
break
diag_lst = []
for l in range(3):
diag_lst.append(self.board[l][l])
if diag_lst.count('X') == 3 or diag_lst.count('O') == 3:
game = False
return print(diag_lst[0], 'is the winner!')
break
diag_lst1 = []
for m in range(3):
diag_lst1.append(self.board[m][-m-1])
if diag_lst1.count('X') == 3 or diag_lst1.count('O') == 3:
game = False
return print(diag_lst1[0], 'is the winner!')
break
if game is True and (None in self.board[0] or None in self.board[1] or None in self.board[2]):
return print('Ongoing')
else:
return print('Tie')
def __str__(self):
line_1 = (('{} | {} | {}\n'+'-----------\n').format(self.board[0][0],self.board[0][1],self.board[0][2]))
line_2 = (('{} | {} | {}\n' + '-----------\n').format(self.board[1][0],self.board[1][1],self.board[1][2]))
line_3 = (('{} | {} | {}\n').format(self.board[2][0],self.board[2][1],self.board[2][2]))
return (line_1 + line_2 + line_3)
ttt = TicTacToe()
ttt = TicTacToe()
ttt.mark_space(0, 0)
ttt.mark_space(0, 1)
ttt.mark_space(1, 0)
ttt.mark_space(2, 0)
ttt.mark_space(1, 1)
ttt.mark_space(2, 2)
ttt.mark_space(2, 1)
ttt.mark_space(1, 2)
ttt.mark_space(0, 2)
ttt.get_winner()
|
import os
from bs4 import BeautifulSoup
def read_input_file(path):
f = open(path)
content = f.read()
f.close()
return content
def read_input_filelines(path):
f = open(path)
content = f.readlines()
f.close()
return content
def html_parser_p(index_list):
'''takes an index of a directory and parses all of the <p>'s and writes
them to a directory called 'html_parsed' '''
for title in index_list:
content = read_input_file(str("html_index/{0}".format(title)))
#print(content) - check if i got the info
soup = BeautifulSoup(content, "html.parser")
outfile_name = "html_parsed/{0}".format(title)
outfile = open(outfile_name, "w") #TODO - put them in the same line?
all_p = soup.find_all("p")
data = []
for p in all_p:
data.append(p)
outfile.write(str(data))
outfile.close()
return
def html_to_text(index_list):
for title in index_list:
content = read_input_file(str("html_parsed/{0}".format(title))) #TODO - pull out the str
#print(content) - check if i got the info
soup = BeautifulSoup(content, "html.parser")
outfile_name = "txt_files/{0}.txt".format(title)
outfile = open(outfile_name, "w")
all_p = soup.get_text()
all_p = all_p.replace('., ', '.\n')
all_p = all_p.replace('. ', '.\n')
outfile.writelines(all_p)
outfile.close()
return
def search_for_food(index_list):
foods = read_input_filelines("food.txt")
foods = list(map(str.strip,foods))
#print(foods)
for title in index_list:
content = read_input_filelines(str("txt_files/{0}".format(title)))
#print(content) - check if i got the info
outfile_name = "search_match/matched{0}".format(title)
outfile = open(outfile_name, "w")
for food in foods:
for line in content:
if food in line:#todo - if line already exists don't add it
outfile.write(line)
outfile.close()
if os.path.getsize(outfile_name)==0:
os.remove(outfile_name)
return
def compile_text(index_list):#TODO - something goofy here - priority #1
content = []
for title in index_list:
if title == '.DS_Store':
continue
line = read_input_filelines("search_match/{0}".format(title))
content.append(line)
#print(content)# - check if i got the info
outfile_name = "compiled.txt"
outfile = open(outfile_name, "w")
i=0
for line in content:
for row in line:
outfile.writelines('{0:>4} {1:>6} {2}'.format(i, len(row), row))
#outfile.writelines(line)
i+=1
outfile.close()
return
def scrub_dir():
for title in os.listdir("/Users/patrickeells/PycharmProjects/1q84/html_parsed/"):
os.remove('html_parsed/{0}'.format(title))
for title in os.listdir("/Users/patrickeells/PycharmProjects/1q84/txt_files/"):
os.remove('txt_files/{0}'.format(title))
for title in os.listdir("/Users/patrickeells/PycharmProjects/1q84/search_match/"):
os.remove('search_match/{0}'.format(title))
if os._exists('compiled.txt')==True:
os.remove('complied.txt')
return
scrub_dir()
#html_index_list = os.listdir("/Users/patrickeells/PycharmProjects/1q84/html_index/") # returns list of html files in html_index/
#print(os.listdir("/Users/patrickeells/PycharmProjects/1q84/html_index/"))
html_parser_p(os.listdir("/Users/patrickeells/PycharmProjects/1q84/html_index/")) #parses for all <p>'s and puts them in html files in /html_parsed/
#html_parsed_list = os.listdir("/Users/patrickeells/PycharmProjects/1q84/html_parsed/") #returns list of html files in html_parsed/
html_to_text(os.listdir("/Users/patrickeells/PycharmProjects/1q84/html_parsed/")) #puts sentences on individual lines
search_for_food(os.listdir("/Users/patrickeells/PycharmProjects/1q84/txt_files/"))#searches for items in food.txt
compile_text(os.listdir("/Users/patrickeells/PycharmProjects/1q84/search_match/")) |
#coding= utf-8
'''
Created on 2012-1-30
@author: Lv9
'''
# 可以通过os.environ访问环境变量
import os;
print(os.environ['path']);
print(os.environ['java_home']);
print(os.environ['class_path']);
# 顺带一提 可以通过os.environ修改环境变量 修改环境变量会影响到当前运行的Python程序和此程序创建的子进程
# os.environ['FOO'] = 'BAR';
|
import numpy as np
import theano
import theano.tensor as T
import pickle
# compute accuracy
def compute_accuracy(y_target, y_predict):
correct_prediction = np.equal(y_target, y_predict)
accuracy = 1.0*np.sum(correct_prediction)/len(correct_prediction)
return accuracy
# fake data
N = 400 # training sample size
feats = 784 # number of input variables
D = (np.random.randn(N, feats), np.random.randint(size = N, low = 0, high = 2))
# declare variables
x = T.dmatrix('x')
y = T.dvector('y')
# construct graph
W = theano.shared(np.random.randn(feats), name = 'W')
b = theano.shared(0.1, name = 'b')
Wx_plus_b = T.dot(x, W) + b
activation_func = T.nnet.sigmoid(Wx_plus_b)
ans = activation_func > 0.5
cross_thropy = -y * T.log(activation_func) - (1-y) * T.log((1-activation_func))
# cost = cross_thropy.mean()
cost = cross_thropy.mean() + 0.1 * (W ** 2).sum() # L2 regularization
gW, gb = T.grad(cost, [W, b])
# compile
training_rate = 0.1
train = theano.function(inputs = [x, y],
outputs = [ans, cost],
updates = [(W, W - gW * training_rate),
(b, b - gb * training_rate)])
predict = theano.function(inputs = [x], outputs = ans)
# training
for i in xrange(1000):
pre, err = train(D[0], D[1])
if i % 50 == 0:
print err
print compute_accuracy(D[1], predict(D[0]))
# save model
with open('./model/classification_model.pickle','wb') as file:
model = [W.get_value(), b.get_value()]
pickle.dump(model, file)
print W.get_value()[:10]
# load model
with open('./model/classification_model.pickle','rb') as file:
model = pickle.load(file)
W.set_value(model[0])
b.set_value(model[1])
print W.get_value()[:10]
|
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management import *
from properties_config import properties_config
import sys
import os
from copy import deepcopy
def getDirectoriesToCreate( dirs ):
"This checks if exists the intermediate directories"
output = []
for dir in dirs:
folders = dir.split('/')
tmpFolder = ''
for folder in folders[1:]:
tmpFolder += '/' + folder
if not os.path.isdir(tmpFolder):
output.append(tmpFolder)
output.append(dir)
return output
def cassandra():
import params_dirs
Directory([params_dirs.log_dir, params_dirs.pid_dir, params_dirs.conf_dir],
owner=params_dirs.cassandra_user,
group=params_dirs.user_group
)
# Parametros intermedios para convertir en data_file_directories
# master_nodes_ips <-- 'none' or comma separated values
# master_data_file_directories <-- comma separated values directories for store data on the master(s)
# slave_data_file_directories <-- comma separated values directories for store data on the slave(s)
# Aqui va el directorio bueno bueno, por defecto el de los slaves (el mas probable)
dirs = params_dirs.slave_data_file_directories.split(',')
if params_dirs.master_nodes_ips != 'none':
mni = params_dirs.master_nodes_ips.split(',')
if params_dirs.listen_address in mni:
dirs = params_dirs.master_data_file_directories.split(',')
Directory(getDirectoriesToCreate(dirs), owner=params_dirs.cassandra_user, group=params_dirs.user_group)
File(format("{conf_dir}/cassandra.yaml"),
content=Template(
"cassandra.master.yaml.j2",
configurations = params_dirs),
owner=params_dirs.cassandra_user,
group=params_dirs.user_group
)
|
"""
Reading and writing of pmd files for ASE Atoms object.
"""
import numpy as np
from ase.constraints import FixScaled
def get_atom_conf_txt(atoms,specorder=[]):
if not specorder:
specorder = uniq(atoms.get_chemical_symbols())
specorder.sort()
# print 'atoms = ',atoms
txt= ''
#...specorder info as comment lines
txt= '!\n'
txt+='! specorder '
for s in specorder:
txt += ' {0:s}'.format(s)
txt += '\n'
txt += '!\n'
# no lattice constant in ASE
txt+=' 1.00000 \n'
# cell vectors
cell= atoms.get_cell()
a = np.linalg.norm(cell[0,:])
b = np.linalg.norm(cell[1,:])
c = np.linalg.norm(cell[2,:])
txt += ' {0:12.7f}'.format(cell[0,0]) \
+' {0:12.7f}'.format(cell[0,1]) \
+' {0:12.7f}\n'.format(cell[0,2])
txt += ' {0:12.7f}'.format(cell[1,0]) \
+' {0:12.7f}'.format(cell[1,1]) \
+' {0:12.7f}\n'.format(cell[1,2])
txt += ' {0:12.7f}'.format(cell[2,0]) \
+' {0:12.7f}'.format(cell[2,1]) \
+' {0:12.7f}\n'.format(cell[2,2])
txt += ' {0:12.7f} {1:12.7f} {2:12.7f}\n'.format(0.0,0.0,0.0)
txt += ' {0:12.7f} {1:12.7f} {2:12.7f}\n'.format(0.0,0.0,0.0)
txt += ' {0:12.7f} {1:12.7f} {2:12.7f}\n'.format(0.0,0.0,0.0)
# num of atoms
txt += ' {0:10d}\n'.format(len(atoms))
# extract unique constraints from atoms.constraints
fmvs,ifmvs = get_fmvs(atoms)
# atom positions
spos = atoms.get_scaled_positions()
vels = atoms.get_velocities()
if np.size(vels) != 3*len(atoms):
vels = np.zeros((len(atoms),3))
if not specorder:
specorder = uniq(atoms.get_chemical_symbols())
specorder.sort()
for i in range(len(atoms)):
atom= atoms[i]
ifmv = ifmvs[i]
txt += ' {0:s}'.format(get_tag(specorder,atom.symbol,i+1,ifmv))
#...Scaled positions
txt += ' {0:23.14e} {1:23.14e} {2:23.14e}'.format(spos[i,0],
spos[i,1],
spos[i,2])
#...Scaled velocities
txt += ' {0:15.7e} {1:15.7e} {2:15.7e}\n'.format(vels[i,0]/a,
vels[i,1]/b,
vels[i,2]/c)
# txt += ' 0.0 0.0'
# txt += ' 0.0 0.0 0.0 0.0 0.0 0.0\n'
return txt
def get_tag(specorder,symbol,atom_id,ifmv):
sid= specorder.index(symbol)+1
tag= float(sid) +ifmv*0.1 +atom_id*1e-14
return '{0:16.14f}'.format(tag)
def decode_tag(tag,specorder):
sid = int(tag)
symbol = specorder[sid-1]
ifmv = int((tag - sid)*10)
atom_id = int((tag - sid - ifmv*0.1)*1e+14)
return sid,symbol,ifmv,atom_id
def constraint2fmv(constraint):
fmv = [1.0,1.0,1.0]
for ii in range(3):
if constraint[ii]:
fmv[ii] = 0.0
return fmv
def fmv2constraint(fmv):
constraint = [False,False,False]
for ii in range(3):
if fmv[ii] < 0.01:
constraint[ii] = True
return constraint
def get_fmvs(atoms):
"""
Extract unique constraints from atoms.constraints and
return fmvs and ifmvs.
"""
# extract unique constraints from atoms.constraints
constraints = []
constraints.append([False,False,False])
ifmvs = np.zeros((len(atoms)),dtype=int)
ifmvs[:] = 1
if atoms.constraints:
for cnst in atoms.constraints:
if isinstance(cnst, FixScaled):
mask= cnst.mask
for i,c in enumerate(constraints):
matched = mask == c
if all(matched):
ifmvs[cnst.a] = i+1
break
#...If the mask does not exist in the constraints list, add it
constraints.append(mask)
ifmvs[cnst.a] = len(constraints)
#...Convert constraints to fmv
fmvs = []
for c in constraints:
fmv = np.array((1.0, 1.0, 1.0))
for ii in range(3):
if c[ii]:
fmv[ii] = 0.0
fmvs.append(fmv)
return fmvs,ifmvs
def uniq(lst):
newlst= []
for l in lst:
if not l in newlst:
newlst.append(l)
return newlst
|
import tkinter as tk
from PIL import ImageTk,Image
class SubjectPage:
def __init__(self):
#Window
self.subjectMenu = tk.Tk()
self.subjectMenu.title('Subject Menu')
self.subjectMenu.geometry('800x600')
self.subjectMenu.config(bg="#2E3441")
#header
self.frame_blueheader = tk.Frame(self.subjectMenu)
self.frame_blueheader.config(bg="#2E3441")
self.frame_blueheader.grid(row=0,column=1)
self.columnconfigure(1, minsize=800)
#mid
self.frame_mid = tk.Frame(self.subjectMenu)
canvasImage = tk.Canvas(self.frame_mid, width = 200, height = 200)
canvasImage.grid(row=1, column=0, padx=5,pady=5)
img = ImageTk.PhotoImage(Image.open("cb.jpeg"))
canvasImage.create_image(20,20, anchor="nw", image=img)
#feedback
img_feedback = tk.PhotoImage(file = r"C:\Users\Mbe\PycharmProjects\MARKBot_Project\feedback.png")
self.button_feedback = tk.Button(self.frame_mid, image = img_feedback).grid(row=2,column=2)
|
from Rate import *
class test(object):
# The class "constructor" - It's actually an initializer
def __init__(self):
self.name = 0
def main():
print "hello"
rate = Rate("instrument", "time", "1", "2");
print rate.instrument
if __name__ =='__main__':main() |
"""
Settings for DEMO_MODE.
Must set DEMO_MODE = True in local_settings.py.
"""
# Views that are visible in demo mode.
DEMO_SAFE_VIEWS = [
'django.contrib.auth.views.logout',
'main.demo_view_overrides.login_demo_account',
'main.template_xhrs.alignment_controls',
'main.template_xhrs.alignment_list_controls',
'main.template_xhrs.reference_genome_list_controls',
'main.template_xhrs.sample_list_controls',
'main.template_xhrs.variant_filter_controls',
'main.template_xhrs.variant_set_list_controls',
'main.views.alignment_list_view',
'main.views.alignment_view',
'main.views.compile_jbrowse_and_redirect',
'main.views.home_view',
'main.views.project_list_view',
'main.views.project_view',
'main.views.reference_genome_list_view',
'main.views.reference_genome_view',
'main.views.sample_alignment_error_view',
'main.views.sample_list_view',
'main.views.tab_root_analyze',
'main.views.variant_set_list_view',
'main.views.variant_set_view',
'main.template_xhrs.contig_list_controls',
'main.xhr_handlers.contigs_download',
'main.xhr_handlers.contigs_export_all',
'main.xhr_handlers.get_alignment_groups',
'main.xhr_handlers.get_contigs',
'main.xhr_handlers.get_gene_list',
'main.xhr_handlers.get_ref_genomes',
'main.xhr_handlers.get_samples',
'main.xhr_handlers.get_single_ref_genome',
'main.xhr_handlers.get_variant_list',
'main.xhr_handlers.get_variant_set_list',
'main.xhr_handlers.is_materialized_view_valid',
'main.xhr_handlers.refresh_materialized_variant_table',
]
|
# 문제 설명
# 프로그래머스 팀에서는 기능 개선 작업을 수행 중입니다. 각 기능은 진도가 100%일 때 서비스에 반영할 수 있습니다.
#
# 또, 각 기능의 개발속도는 모두 다르기 때문에 뒤에 있는 기능이 앞에 있는 기능보다 먼저 개발될 수 있고,
# 이때 뒤에 있는 기능은 앞에 있는 기능이 배포될 때 함께 배포됩니다.
#
# 먼저 배포되어야 하는 순서대로 작업의 진도가 적힌 정수 배열 progresses와 각 작업의 개발 속도가 적힌 정수 배열
# speeds가 주어질 각 배포마다 몇 개의 기능이 배포되는지를 return 하도록 solution 함수를 완성하세요.
#
# 제한 사항
# 작업의 개수(progresses, speeds배열의 길이)는 100개 이하입니다.
# 작업 진도는 100 미만의 자연수입니다.
# 작업 속도는 100 이하의 자연수입니다.
# 배포는 하루에 한 번만 할 수 있으며, 하루의 끝에 이루어진다고 가정합니다. 예를 들어 진도율이 95%인 작업의 개발 속도가 하루에 4%라면 배포는 2일 뒤에 이루어집니다.
# 입출력 예
# progresses speeds return
# [93,30,55] [1,30,5] [2,1]
# 입출력 예 설명
# 첫 번째 기능은 93% 완료되어 있고 하루에 1%씩 작업이 가능하므로 7일간 작업 후 배포가 가능합니다.
# 두 번째 기능은 30%가 완료되어 있고 하루에 30%씩 작업이 가능하므로 3일간 작업 후 배포가 가능합니다.
# 하지만 이전 첫 번째 기능이 아직 완성된 상태가 아니기 때문에 첫 번째 기능이 배포되는 7일째 배포됩니다.
# 세 번째 기능은 55%가 완료되어 있고 하루에 5%씩 작업이 가능하므로 9일간 작업 후 배포가 가능합니다.
#
# 따라서 7일째에 2개의 기능, 9일째에 1개의 기능이 배포됩니다.
def solution(progresses, speeds):
answer = []
day = 0
while True:
day += 1
print('day {} {} {}'.format(day, progresses, answer))
complete_works = 0
for i in range(len(progresses)):
progresses[i] = progresses[i] + speeds[i]
while len(progresses) > 0:
if progresses[0] >= 100:
complete_works += 1
del(progresses[0])
del(speeds[0])
else:
break
if complete_works > 0:
answer.append(complete_works)
if len(progresses) < 1:
break
print(answer)
return answer
arr1 = [[93,30,55], [15, 1, 14, 9, 29, 25, 17, 24, 4, 27, 7, 19, 29, 14, 23, 4, 21, 3, 8, 14], [40, 93, 30, 55, 60, 65]]
arr2 = [[1,30,5], [9, 11, 21, 16, 11, 21, 7, 5, 6, 30, 11, 24, 26, 18, 20, 18, 15, 30, 7, 18], [60, 1, 30, 5 , 10, 7]]
return_list = [[2,1], [6, 1, 13], [1,2,3]]
for i in range(len(arr1)):
if solution(arr1[i], arr2[i]) == return_list[i]:
print('case {} pass --------------'.format(str(i + 1)))
else:
print('case {} fail --------------'.format(str(i + 1)))
# 26 min |
import json
class BiophysParams(dict):
def __init__(self, *args, **kwargs ):
dict.__init__(self, *args, **kwargs )
self._update_parameter_overrides()
def _update_parameter_overrides(self):
for key in self.keys():
if key.startswith('genome.'):
val = self.pop(key)
self.update_nested(key, val)
def get_nested(self, prop_string):
group, section, name, mechanism = self._split_prop_string(prop_string)
prop_records = [rec for rec in self[group] if rec['name']==name and rec['section']==section]
if len(prop_records)==1:
return prop_records[0]['value']
elif len(prop_records)>1:
raise ValueError('Multiple records found for dynamics parameter {}'.format(prop_string))
else:
raise ValueError('No records found for dynamics parameter {}'.format(prop_string))
def update_nested(self, prop_string, value):
group, section, name, mechanism = self._split_prop_string(prop_string)
prop_records = [rec for rec in self[group] if rec['name']==name and rec['section']==section]
if len(prop_records)==1:
prop_records[0]['value'] = value
elif len(prop_records)>1:
raise ValueError('Multiple records found for dynamics parameter {}'.format(prop_string))
elif mechanism:
prop_record = {'section':section,
'name':name,
'value':value,
'mechanism':mechanism}
self['genome'].append(prop_record)
else:
raise KeyError('Error updating dynamics parameter {}'.format(prop_string))
@classmethod
def from_json(cls, params_path):
return cls(json.load(open(params_path, 'r')))
@staticmethod
def _split_prop_string(prop_string):
path_list = prop_string.split('.')
group, section, name = path_list[0:3]
mechanism = path_list[-1] if len(path_list)==4 else None
return group, section, name, mechanism |
from django import forms
from django.db import models
from django.shortcuts import reverse
from django.utils.http import urlencode
from django.utils.text import slugify
from django.utils.translation import gettext as _
from modelcluster.fields import ParentalKey
from modelcluster.models import ClusterableModel
from wagtail.admin.panels import (
FieldPanel,
FieldRowPanel,
InlinePanel,
MultiFieldPanel,
ObjectList,
TabbedInterface,
)
from wagtail.fields import RichTextField, StreamField
from wagtail.models import Orderable
from wagtail.snippets.models import register_snippet
from rca.programmes.models import ProgrammePage
from rca.utils.blocks import CallToActionBlock, SnippetChooserBlock, StepBlock
from rca.utils.filter import TabStyleFilter
from rca.utils.models import BasePage, ContactFieldsMixin, SluggedTaxonomy
from .blocks import ScholarshipsListingPageBlock
from .filters import ProgrammeTabStyleFilter
class ScholarshipFeeStatus(SluggedTaxonomy):
panels = [
FieldPanel("title"),
]
class Meta:
ordering = ("title",)
verbose_name_plural = "Scholarship Fee Statuses"
class ScholarshipFunding(SluggedTaxonomy):
panels = [
FieldPanel("title"),
]
class Meta:
ordering = ("title",)
class ScholarshipLocation(SluggedTaxonomy):
panels = [
FieldPanel("title"),
]
class Meta:
ordering = ("title",)
class ScholarshipEligibilityCriteria(SluggedTaxonomy):
panels = [
FieldPanel("title"),
]
class Meta:
ordering = ("title",)
@register_snippet
class Scholarship(models.Model):
title = models.CharField(max_length=50)
active = models.BooleanField(default=True)
summary = models.CharField(max_length=255)
value = models.CharField(max_length=100)
location = models.ForeignKey(
ScholarshipLocation, null=True, on_delete=models.SET_NULL
)
eligable_programmes = models.ManyToManyField(ProgrammePage)
funding_categories = models.ManyToManyField(ScholarshipFunding)
fee_statuses = models.ManyToManyField(ScholarshipFeeStatus)
class Meta:
ordering = ("title",)
def __str__(self) -> str:
text = self.title
if self.location:
text += f" ({self.location})"
return text
class ScholarshipsListingPage(ContactFieldsMixin, BasePage):
template = "patterns/pages/scholarships/scholarships_listing_page.html"
max_count = 1
introduction = models.CharField(max_length=500, blank=True)
body = StreamField(ScholarshipsListingPageBlock(), use_json_field=True)
# Scholarship listing fields
scholarship_listing_title = models.CharField(
max_length=50, verbose_name="Listing Title"
)
scholarship_listing_sub_title = models.CharField(
blank=True, max_length=100, verbose_name="Listing Subtitle"
)
scholarship_application_steps = StreamField(
[
("step", StepBlock()),
("step_snippet", SnippetChooserBlock("utils.StepSnippet")),
],
blank=True,
verbose_name="Application Steps",
use_json_field=True,
)
characteristics_disclaimer = models.CharField(
max_length=250,
blank=True,
help_text="A small disclaimer shown just above the scholarships listing.",
)
lower_body = StreamField(ScholarshipsListingPageBlock(), use_json_field=True)
# Scholarship form fields
key_details = RichTextField(features=["h3", "bold", "italic", "link"], blank=True)
form_introduction = models.CharField(max_length=500, blank=True)
cta_block = StreamField(
[("call_to_action", CallToActionBlock(label="text promo"))],
blank=True,
verbose_name="Call to action",
use_json_field=True,
)
content_panels = BasePage.content_panels + [
FieldPanel("introduction"),
FieldPanel("body"),
MultiFieldPanel(
[
FieldPanel("scholarship_listing_title"),
FieldPanel("scholarship_listing_sub_title"),
FieldPanel("scholarship_application_steps"),
FieldPanel("characteristics_disclaimer"),
],
heading="Scholarship listing",
),
FieldPanel("lower_body"),
MultiFieldPanel([*ContactFieldsMixin.panels], heading="Contact information"),
]
form_settings_pannels = [
FieldPanel("key_details"),
FieldPanel("form_introduction"),
FieldPanel("cta_block"),
]
edit_handler = TabbedInterface(
[
ObjectList(content_panels, heading="Content"),
ObjectList(BasePage.promote_panels, heading="Promote"),
ObjectList(BasePage.settings_panels, heading="Settings"),
ObjectList(form_settings_pannels, heading="Enquiry form settings"),
]
)
@property
def show_interest_bar(self):
return True
@property
def show_interest_link(self):
return True
def anchor_nav(self):
"""Build list of data to be used as in-page navigation"""
items = []
def process_block(block):
if block.block_type == "anchor_heading":
items.append({"title": block.value, "link": f"#{slugify(block.value)}"})
for block in self.body:
process_block(block)
# insert link for hardcoded "Scholarships for YYYY/YY" heading
items.append(
{
"title": self.scholarship_listing_title,
"link": "#scholarship-listing-title",
}
)
for block in self.lower_body:
process_block(block)
return items
def get_context(self, request, *args, **kwargs):
context = super().get_context(request, *args, **kwargs)
programme = None
results = []
queryset = Scholarship.objects.prefetch_related(
"eligable_programmes", "funding_categories", "fee_statuses"
)
filters = (
ProgrammeTabStyleFilter(
"Programme",
queryset=(
ProgrammePage.objects.filter(
id__in=queryset.values_list(
"eligable_programmes__id", flat=True
)
).live()
),
filter_by="eligable_programmes__slug__in",
option_value_field="slug",
),
TabStyleFilter(
"Location",
queryset=(
ScholarshipLocation.objects.filter(
id__in=queryset.values_list("location_id", flat=True)
)
),
filter_by="location__slug__in",
option_value_field="slug",
),
)
if "programme" in request.GET or "location" in request.GET:
# Apply filters
for f in filters:
queryset = f.apply(queryset, request.GET)
# Format scholarships for template
results = [
{
"value": {
"heading": s.title,
"introduction": s.summary,
"eligible_programmes": ", ".join(
str(x) for x in s.eligable_programmes.live()
),
"funding_categories": ", ".join(
x.title for x in s.funding_categories.all()
),
"fee_statuses": ", ".join(
x.title for x in s.fee_statuses.all()
),
"value": s.value,
}
}
for s in queryset
]
# Template needs the programme for title and slug
try:
programme = ProgrammePage.objects.get(slug=request.GET["programme"])
except Exception:
pass
# Create the link for the sticky CTA
interest_bar_link = reverse("scholarships:scholarship_enquiry_form")
if programme:
interest_bar_link += f"?{urlencode({'programme': programme.slug})}"
context.update(
anchor_nav=self.anchor_nav(),
filters={
"title": _("Filter by"),
"aria_label": "Filter results",
"items": filters,
},
interest_bar={
"action": _("Express interest"),
"link": interest_bar_link,
"message": _("Hold an offer and want to apply for these scholarships?"),
"link_same_page": True,
},
programme=programme,
results=results,
)
return context
class ScholarshipEnquiryFormSubmissionScholarshipOrderable(Orderable):
scholarship_submission = ParentalKey(
"scholarships.ScholarshipEnquiryFormSubmission",
related_name="scholarship_submission_scholarships",
)
scholarship = models.ForeignKey(
"scholarships.Scholarship",
on_delete=models.CASCADE,
)
panels = [
FieldPanel("scholarship"),
]
class ScholarshipEnquiryFormSubmission(ClusterableModel):
submission_date = models.DateTimeField(blank=True, null=True, auto_now_add=True)
first_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255)
email = models.EmailField()
rca_id_number = models.CharField(max_length=100)
programme = models.ForeignKey(
"programmes.ProgrammePage",
on_delete=models.CASCADE,
)
eligibility_criteria = models.ManyToManyField(
ScholarshipEligibilityCriteria, blank=True
)
is_read_data_protection_policy = models.BooleanField()
is_notification_opt_in = models.BooleanField()
panels = [
MultiFieldPanel(
[
FieldRowPanel(
[
FieldPanel("first_name", classname="fn"),
FieldPanel("last_name", classname="ln"),
]
),
FieldPanel("rca_id_number"),
],
heading="User details",
),
FieldPanel("programme"),
InlinePanel("scholarship_submission_scholarships", label="Scholarship"),
FieldPanel("eligibility_criteria", widget=forms.CheckboxSelectMultiple),
MultiFieldPanel(
[
FieldPanel("is_read_data_protection_policy"),
FieldPanel("is_notification_opt_in"),
],
heading="Legal & newsletter",
),
]
def __str__(self):
return f"{self.first_name} {self.last_name} - {self.rca_id_number}"
def get_scholarships(self):
return [s.scholarship for s in self.scholarship_submission_scholarships.all()]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun May 27 23:56:03 2018
@author: praveen
"""
import random
import logging
import re
from action_handlers.session import *
from action_handlers.activity import do_activity
from response_generators.response import *
from response_generators.messages import *
from models.common import *
from models.activities import *
from models.mock import sample_announcements, sample_homeworks, sample_lessons, sample_courses
def slot_filler(session, request):
response_text = random.choice(COURSE_SELECT)
subjects = [s.lower().replace('_',' ') for s in sample_courses.courses_subject_dict.keys()]
contexts = []
for context in request.get('queryResult').get('outputContexts'):
contexts.append(context)
return Response(response_text).text(response_text).\
setOutputContexts(contexts).suggestions(subjects).build()
def list_all(session, request):
error_text = 'Error, course not found'
subject = request.get('queryResult').get('parameters').get('subject')
grade = request.get('queryResult').get('parameters').get('grade') #TODO use students current grade
grade = grade if grade is not None else 8
logging.info('subject=%s, grade=%d', subject, grade)
if subject is None:
return Response(error_text).text(error_text).build()
else:
courses_by_subject=sample_courses.courses_subject_dict.get(subject)
if courses_by_subject is None or len(courses_by_subject)==0:
return Response(error_text).text(error_text).build()
else:
logging.debug('Courses with subject - '+str(courses_by_subject))
courseIds=[c.id for c in courses_by_subject if c.grade==grade]
logging.debug('Courses with subject and grade - '+str(courseIds))
if len(courseIds)==0:
return Response(error_text).text(error_text).build()
select_cards = []
lessons = sample_lessons.courses_id_dict.get(courseIds[0])
if lessons is not None:
if len(lessons)==1:
response_text = random.choice(LESSON_SELECT)
context = OutputContext(session, OUT_CONTEXT_LESSON, type=OUT_CONTEXT_LESSON)
return Response(response_text).text(response_text).card(Card(lessons[0].name, lessons[0].description, imageUri=lessons[0].imageUri)).outputContext(context).build()
elif len(lessons)>1:
for lesson in lessons:
card=Item('lesson '+lesson.id, lesson.name, lesson.description, imageUri=lesson.imageUri)
select_cards.append(card)
response_text = random.choice(LESSON_SELECT)
context = OutputContext(session, OUT_CONTEXT_LESSON, type=OUT_CONTEXT_LESSON)
return Response(response_text).text(response_text).select(response_text, select_cards).outputContext(context).build()
response_text='No lessons'
return Response(response_text).text(response_text).build()
def select_id(session, request):
error_text = 'Error, lesson not found'
lessonId=None
for context in request.get('queryResult').get('outputContexts'):
if context.get('name').endswith('actions_intent_option') and context.get('parameters').get('OPTION') is not None:
option_value=context.get('parameters').get('OPTION')
lessonId=option_value[option_value.startswith('lesson') and len('lesson')+1:]
logging.info('lesson=%s',lessonId)
if lessonId is None:
if request.get('queryResult').get('parameters').get('id') is not None:
lessonId=request.get('queryResult').get('parameters').get('id')
else:
return Response(error_text).text(error_text).build()
logging.info('lesson=%s',lessonId)
## duplicated in input.py
response_text = random.choice(LESSON_ACTIVITY_SELECT)
context = OutputContext(session, OUT_CONTEXT_LESSON_ACTIVITY, type=OUT_CONTEXT_LESSON_ACTIVITY)
select_cards = []
lesson = sample_lessons.lesson_id_dict.get(lessonId)
if lesson is not None:
if len(lesson.materials)==1:
return do_activity(session, lesson.materials[0]).build()
else:
for m in lesson.materials:
logging.debug('activity type=%s, %s',type(m), m)
if isinstance(m, Video):
select_cards.append(Item(id='activity '+m.id, title=m.title, imageUri=m.imageUri))
elif isinstance(m, Text):
select_cards.append(Item(id='activity '+m.id, title=m.title, imageUri=m.imageUri))
elif isinstance(m, Link):
select_cards.append(Item(id='activity '+m.id, title=m.title, imageUri=m.imageUri))
elif isinstance(m, Audio):
select_cards.append(Item(id='activity '+m.id, title=m.title, imageUri=m.imageUri))
elif isinstance(m, Quiz):
select_cards.append(Item(id='activity '+m.id, title=m.title, imageUri=m.imageUri))
return Response(response_text).text(response_text).select(response_text, select_cards).outputContext(context).build()
def select(session, request):
lessonType = request.get('queryResult').get('parameters').get('LessonType')
lessonName = request.get('queryResult').get('parameters').get('LessonName')
context = OutputContext(session, OUT_CONTEXT_LESSON_ACTIVITY, type=OUT_CONTEXT_LESSON_ACTIVITY)
activity = search(lessonName,lessonType)
if activity :
return do_activity(session,activity).build()
else:
return Response("Sorry no lesson found.").text("You asked {0} from lesson {1} but we didn't find anything realted".format(lessonType,lessonName)).outputContext(context).build()
def findActivity(lessonName,lessonType):
lesson = sample_lessons.lesson_name_dict[lessonName.upper()]
if sample_lessons.activity_typeDict.has_key(lessonType.lower()):
for activity in lesson.materials:
if isinstance(activity, sample_lessons.activity_typeDict[lessonType.lower()]):
return activity
return None
def search(lessonName,lessonType):
words = lessonName.split(' ')
lessons = sample_lessons.lessons
matches = []
if len(words) > 0:
for word in words:
regex=re.compile(".*({0}).*".format(word.lower()))
midList = []
midList = [match.group(0) for lesson in lessons for match in [regex.search(lesson.name.lower())] if match and not any(lsn == lesson.name.lower() for lsn in matches) ]
for ls in midList:
matches.append(ls)
mostMatches = dict()
for match in matches:
mostMatches[match] = [word.lower() in match.lower() for word in words].count(True)
mostMatched = max(mostMatches, key=mostMatches.get)
return findActivity(mostMatched,lessonType)
|
m2 = float ( input ( "Insira o espaço em metros quadrados:" ))
hectares = m2 * 0.0001
print ( hectares ) |
#!/usr/bin/python
# encoding:utf-8
"""
@author:zhangyajing
contact:hebeizhangyajing@126.com
@file: dictdemo.py
@time:2016/7/31 20:24
"""
#列表是动态的,所以不可hash
#x=[1,2,3,4,5]
#print hash(x)
#字符串是不可变的,所以是可hash的
y='1,2,3,4,5'
print hash(y)
#创建字典对象
z={'name':'zhang','age':30,'sex':'m'}
print z
print z['name']
#不在字典z里会报错
# print z['loc']
#查看loc在不在字典z中
print 'loc' in z
for (k,v) in z.items():
print k+'='+str(v) #强制类型转换
print z.keys()
print z.values()
#当key loc不存在时,就会自动返回none,防止抛异常,使用场景广泛
print z.get('loc',None)
#直接打印loc对应的value,假如没有,则插入一个,且返回loc的值beijing
print z.setdefault('loc','beijing')
#把m中所有对象加到z中,假如key一样,则更新其value值,此方法无返回值
m={'name':'li','def':2}
z.update(m)
print z
#删除所有元素
print z.clear()
|
# a form field for selecting a party, e.g. in the 'invitation for...' field on a production form
from django import forms
from django.utils.safestring import mark_safe
from django.core.exceptions import ValidationError
from submit_button_field import SubmitButtonInput
from parties.models import Party
# An object which encapsulates the state of a PartyWidget as derived from its posted data;
# this is what PartyWidget returns from value_from_datadict
class PartyLookup():
def __init__(self, search_term=None, party_id=None, redisplay=False):
self.search_term = search_term # the party name being looked up
self.party_id = party_id # the party ID previously chosen (cached in a hidden field)
self.redisplay = redisplay # whether we should redisplay the form even if we've successfully resolved the input to a party
self.validation_error = None
self.is_empty = False
self.party = None
if self.redisplay:
# force a redisplay of the form; i.e. produce a ValidationError whatever happens
if not self.search_term:
self.is_empty = True
self.validation_error = ValidationError("No party selected")
else:
# look for a party matching the search term
try:
self.party = Party.objects.get(name__iexact=self.search_term)
self.party_id = self.party.id
self.validation_error = ValidationError("Party '%s' found." % self.party.name)
except Party.DoesNotExist:
self.validation_error = ValidationError("No match found for '%s'" % self.search_term)
else:
if not self.search_term:
self.is_empty = True
else:
if self.party_id is not None and self.party_id != '':
try:
self.party = Party.objects.get(id=self.party_id)
except Party.DoesNotExist:
pass
if self.party is None or self.party.name != self.search_term:
# look for a party matching the search term
try:
self.party = Party.objects.get(name__iexact=self.search_term)
self.party_id = self.party.id
except Party.DoesNotExist:
self.validation_error = ValidationError("No match found for '%s'" % self.search_term)
def validate(self):
if self.validation_error:
raise self.validation_error
def commit(self):
return self.party
def __repr__(self):
return "PartyLookup: %s, %s" % (repr(self.party_id), self.search_term)
def __eq__(self, other):
if not isinstance(other, PartyLookup):
return False
return self.search_term == other.search_term and str(self.party_id) == str(other.party_id)
def __ne__(self, other):
return not self.__eq__(other)
@staticmethod
def from_value(value):
# value can be:
# a Party
# None
# an existing PartyLookup
if not value:
return PartyLookup()
elif isinstance(value, PartyLookup):
return PartyLookup(search_term=value.search_term, party_id=value.party_id, redisplay=value.redisplay)
elif isinstance(value, Party):
return PartyLookup(search_term=value.name, party_id=value.id, redisplay=False)
else:
raise Exception("Don't know how to handle %s as a party lookup" % repr(value))
class PartyWidget(forms.Widget):
def __init__(self, attrs=None):
self.search_widget = forms.TextInput(attrs={'class': 'party_field_search'})
self.lookup_widget = SubmitButtonInput(button_text='Find party', attrs={'class': 'party_field_lookup'})
self.party_id_widget = forms.HiddenInput(attrs={'class': 'party_field_party_id'})
super(PartyWidget, self).__init__(attrs=attrs)
def render(self, name, value, attrs=None):
party_lookup = PartyLookup.from_value(value)
output = [
self.search_widget.render(name + '_search', party_lookup.search_term, attrs=attrs),
self.lookup_widget.render(name + '_lookup', None, attrs=attrs),
self.party_id_widget.render(name + '_party_id', party_lookup.party_id, attrs=attrs),
'<div class="help_text">(if the party doesn\'t exist yet, <a href="/parties/new/" target="_blank">create it first</a>!)</div>'
]
return mark_safe(u'<div class="party_field">' + u''.join(output) + u'</div>')
def value_from_datadict(self, data, files, name):
search_term = self.search_widget.value_from_datadict(data, files, name + '_search')
redisplay = self.lookup_widget.value_from_datadict(data, files, name + '_lookup')
party_id = self.party_id_widget.value_from_datadict(data, files, name + '_party_id')
party_lookup = PartyLookup(search_term=search_term, redisplay=redisplay, party_id=party_id)
if party_lookup.is_empty:
return None
else:
return party_lookup
def _has_changed(self, initial, data):
initial = PartyLookup.from_value(initial)
data = PartyLookup.from_value(data)
return data != initial
class PartyField(forms.Field):
widget = PartyWidget
def clean(self, value):
if value is None:
return super(PartyField, self).clean(value)
else:
# if the value has come from a form submission, it will already be a PartyLookup
# (and therefore party_lookup.redisplay will be meaningful).
# If it has come from an 'initial' value, it will be a party object.
party_lookup = PartyLookup.from_value(value)
party_lookup.validate()
return party_lookup
|
from PyObjCTools.TestSupport import TestCase
import MLCompute
class TestMLCGraph(TestCase):
def test_methods(self):
self.assertArgIsBOOL(MLCompute.MLCGraph.nodeWithLayer_sources_disableUpdate_, 2)
self.assertResultIsBOOL(
MLCompute.MLCGraph.bindAndWriteData_forInputs_toDevice_batchSize_synchronous_
)
self.assertArgIsBOOL(
MLCompute.MLCGraph.bindAndWriteData_forInputs_toDevice_batchSize_synchronous_,
4,
)
self.assertResultIsBOOL(
MLCompute.MLCGraph.bindAndWriteData_forInputs_toDevice_synchronous_
)
self.assertArgIsBOOL(
MLCompute.MLCGraph.bindAndWriteData_forInputs_toDevice_synchronous_, 3
)
|
# You are given an n x n 2D matrix representing an image,
# rotate the image by 90 degrees (clockwise).
# You have to rotate the image in-place,
# which means you have to modify the input 2D matrix directly.
# DO NOT allocate another 2D matrix and do the rotation.
class Solution:
def rotate(self, matrix: List[List[int]]) -> None:
dimension = len(matrix)
for i in range(dimension):
for j in range(i, dimension):
matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j]
for i in range(dimension):
k = dimension - 1
for j in range(dimension // 2):
matrix[i][j], matrix[i][k] = matrix[i][k], matrix[i][j]
k -= 1 |
import discord
from discord.ext.commands import Bot
from discord.ext import commands
import asyncio
import time
Client = discord.Client()
client = commands.Bot(command_prefix = "!")
@client.event
async def on_ready():
print("Bot is online and connected to Discord")
@client.event
async def on_message(message):
if message.content.upper().startswith('!PING'):
userID = message.author.id
await client.send_message(message.channel, "<@%s> Pong!" % (userID))
if message.content.upper().startswith('!SAY'):
args = message.content.split(" ")
#args[0] = !SAY
#args[1] = Hey
#args[2] = There
#args[1:] = Hey There
await client.send_message(message.channel, "%s" % (" ".join(args[1:])))
client.run("<token>") #Insert your bots token here
|
# coding=utf-8
# my solution
def solution(n, words):
idx = None
for i in range(1, len(words)):
if words[i - 1][-1] != words[i][0] or words[i] in words[:i]:
idx = i + 1
break
else:
return [0, 0]
ans_1 = idx % n if idx % n != 0 else n
ans_2 = idx // n if idx % n == 0 else idx // n + 1
return [ans_1, ans_2]
# Status: Accepted
# Note: 나는 순서를 생각해서 idx를 i+1로 설정해준 뒤, 조금 복잡하게 계산했는데(그래서 idx 변수도 따로 만들어야 했음) 오히려 i를 그대로 쓰면 밑과 같이 더 간단히 쓸 수 있다.
def solution(n, words):
for i in range(1, len(words)):
if words[i-1][-1] != words[i][0] or words[i] in words[:i]:
return [i%n + 1, i//n + 1]
else:
return [0, 0] |
from django.shortcuts import render
from rest_framework.response import Response
from .models import Album, Musician
from .serializers import AlbumSerializer, MusicianSerializer
from rest_framework.decorators import api_view
from django.views.decorators.csrf import csrf_exempt
import io
from rest_framework.parsers import JSONParser
# Create your views here.
# get all events
@api_view(['GET'])
def albumList(request):
albums = Album.objects.all()
serializer = AlbumSerializer(albums, many=True)
return Response(serializer.data)
# get all musicians
@api_view(['GET'])
def musicianList(request):
musicians = Musician.objects.all()
serializer = MusicianSerializer(musicians, many=True)
return Response(serializer.data)
# create an album
@api_view(['POST'])
def albumCreate(request):
# convert incoming json to dictionary
streamData = io.BytesIO(request.body)
dictData = JSONParser().parse(streamData)
serializer = AlbumSerializer(data=dictData)
if serializer.is_valid():
serializer.save()
else:
print('error while creating event', serializer.error_messages)
return Response(serializer.data)
|
#!/usr/bin/python3.6
''' Patches the submission. '''
import pickle
import sys
import numpy as np
import pandas as pd
from tqdm import tqdm
if len(sys.argv) != 3:
print(f'usage: {sys.argv[0]} dest.csv source.csv')
sys.exit()
sub = pd.read_csv(sys.argv[2])
with open('obj_det.pkl', 'rb') as f:
boxes, labels, confs = pickle.load(f)
categories = [
'__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
'train', 'truck', 'boat', 'traffic', 'light', 'fire', 'hydrant', 'N/A', 'stop',
'sign', 'parking', 'meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
'elephant', 'bear', 'zebra', 'giraffe', 'N/A', 'backpack', 'umbrella', 'N/A', 'N/A',
'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports', 'ball',
'kite', 'baseball', 'bat', 'baseball', 'glove', 'skateboard', 'surfboard', 'tennis',
'racket', 'bottle', 'N/A', 'wine', 'glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot', 'dog', 'pizza',
'donut', 'cake', 'chair', 'couch', 'potted', 'plant', 'bed', 'N/A', 'dining', 'table',
'N/A', 'N/A', 'toilet', 'N/A', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell',
'phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'N/A', 'book',
'clock', 'vase', 'scissors', 'teddy', 'bear', 'hair', 'drier', 'toothbrush'
]
for i in tqdm(range(sub.shape[0])):
index = sub.index.values[i]
PERSON_MIN_CONF = 0.5
PERSON_MIN_AREA = 0.4
CAR_MIN_CONF = 0.5
CAR_MIN_AREA = 0.4
total_persons_area = 0.0
persons_count = 0
for L, conf, box in zip(labels[index], confs[index], boxes[index]):
box /= 800.0
area = (box[2] - box[0]) * (box[3] - box[1])
class_ = categories[L]
if class_ == 'person' and conf > PERSON_MIN_CONF:
total_persons_area += area
persons_count += 1
if class_ == 'car' and conf > CAR_MIN_CONF and area > CAR_MIN_AREA:
sub.landmarks.iloc[i] = ''
# if class_ == 'airplane' and conf > CAR_MIN_CONF and area > CAR_MIN_AREA:
# sub.landmarks.iloc[i] = ''
if total_persons_area > PERSON_MIN_AREA:
sub.landmarks.iloc[i] = ''
sub.to_csv(sys.argv[1], index=False)
|
a=int(5)
b=float(5.5)
resultat = type(a+b)
print("Le type du résultat d'une addition d'un", type(a), "et d'un", type(b), "donne un", resultat) |
try:
import checker
except ImportError:
print 'SizeCatalog -- (!) module do not found'
exit()
#
# SizeCatalog class
#
# helps to extract specific stats about a size catalog dataset
#
class SizeCatalog():
columns = [
'clothe_category',
'label',
'size_category',
'size_type_projections',
'brand',
'url',
'gender'
]
#constructor
def __init__(self, dataLines):
self.dataLines = []
if not checker.formatCheck(dataLines, len(self.columns)):
print checker.formatErrorMessage
else:
self.dataLines = dataLines
# returns a dictionary with the columns of the given line
def getColumns(self, line):
columns = {}
parts = line.split('\t')
for i in range(len(self.columns)):
columns[ self.columns[i] ] = parts[i]
return columns
|
#prints nice grids. Example output:
"""
+---+-----+----+----+
| X | $ | ( | ) |
+---+-----+----+----+
| 0 | | s1 | |
+---+-----+----+----+
| 1 | | s1 | s2 |
+---+-----+----+----+
| 2 | r2 | r2 | r2 |
+---+-----+----+----+
| 3 | | s1 | |
+---+-----+----+----+
| 4 | | | s5 |
+---+-----+----+----+
| 5 | r1 | r1 | r1 |
+---+-----+----+----+
| 6 | acc | | |
+---+-----+----+----+
"""
#adds spaces before string x until it is `width` characters long
def pad(x, width):
while len(x) < width:
x = " " + x
return x
#like regular join, but the seperator goes on the ends too.
def enclosedJoin(seperator, seq):
return seperator + seperator.join(seq) + seperator
#prints a sequence of items, each one padded according to its given width.
def rowPrint(row, widths):
return enclosedJoin("|",map(lambda x: pad(x[0], x[1]), zip(row, widths)))
#finds the widest element in one column of a grid
def maxWidth(grid, column):
max = 0
for row in grid:
if len(row[column]) > max:
max = len(row[column])
return max
#returns a printable grid representation
def gridPrint(grid):
widths = [maxWidth(grid, x) for x in range(len(grid[0]))]
rowSeperator = "\n" + enclosedJoin("+",map(lambda x: "-" * x, widths)) + "\n"
return enclosedJoin(rowSeperator, [rowPrint(row, widths) for row in grid])
#like map, but for 2d arrays.
def gridMap(func, grid):
return map(lambda x: map(func, x), grid)
#given a dict `d`, whose keys are 2 element tuples,
#makes a grid with the keys as axes and the values as interior values.
#ex. In the sample grid at the top of this page,
#0-6, $, (, ) are keys, and s1, r2, etc are values.
def gridFromDict(d):
firstKeys = list(set(map(lambda x: x[0], d)))
secondKeys = list(set(map(lambda x: x[1], d)))
firstKeys.sort()
secondKeys.sort()
ret = [["X"]]
for k2 in secondKeys:
ret[0].append(k2)
for k1 in firstKeys:
row = [k1]
for k2 in secondKeys:
key = (k1, k2)
row.append(d.get(key, ""))
ret.append(row)
return gridMap(str, ret)
#returns a printable representation of the dict, rendered as a grid.
def dictPrint(d):
l = gridFromDict(d)
l = gridMap(lambda x: " " + x + " ", l)
return gridPrint(l) |
#! /Python34/python.exe
# ----------------------------------------------------
# Dateiname: diffusion.py
# Simulation einer eindimensionalen Diffusion.
#
# Python 3, 6. Auflage, mitp 2016
# Kap. 30
# Michael Weigend 22.09.2016
# ----------------------------------------------------
import matplotlib.pyplot as plt
import numpy as np
n_particles = 200 # Anzahl der Partikel
t_max = 100 # Gesamter Beobachtungszeitraum
t = np.arange(t_max)
steps = 2 * np.random.randint(0, 2, (n_particles, t_max)) - 1
positions = np.cumsum(steps, axis=1)
sq_d = positions ** 2
mean_sq_d = np.mean(sq_d, axis=0)
plt.figure()
plt.plot(t, np.sqrt(mean_sq_d), 'b.',
np.sqrt(t), '-')
plt.xlabel("Zeit")
plt.ylabel("Mittlere Verschiebung")
plt.legend(("Simulation", "Theorie"), loc=(0.6, 0.1))
plt.show()
|
#!/usr/bin/env python
#-*-coding:utf8-*-
def funct(*args, **kwargs):
print args, kwargs
dic = {1:'a',2:'b'}
funct(dic='abc')
|
# Generated by Django 3.1.7 on 2021-04-27 14:39
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('tipoimoveis', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='AluguelCompra',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='Bairro',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='Estado',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome', models.CharField(max_length=30)),
('sigla', models.CharField(max_length=2)),
],
),
migrations.CreateModel(
name='Imovel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome', models.CharField(max_length=30)),
('description', models.TextField()),
('data_cadastro', models.DateTimeField()),
('rua', models.CharField(blank=True, default=False, max_length=40, null=True)),
('cep', models.CharField(blank=True, default=False, max_length=40, null=True)),
('price', models.DecimalField(decimal_places=2, max_digits=9)),
('quartos', models.IntegerField()),
('banheiros', models.IntegerField()),
('sqft', models.IntegerField(default=0)),
('garagem', models.IntegerField(default=0)),
('photo_main', models.ImageField(upload_to='photos/%Y/%m/%d/')),
('photo_1', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),
('photo_2', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),
('photo_3', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),
('photo_4', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),
('photo_5', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),
('photo_6', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),
('aluguel_compra', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='imoveis.aluguelcompra')),
('bairro', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='imoveis.bairro')),
('tipo_imovel', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='tipoimoveis.tipoimovel')),
],
),
migrations.CreateModel(
name='Cidade',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome', models.CharField(max_length=40)),
('estado', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='imoveis.estado')),
],
),
migrations.AddField(
model_name='bairro',
name='cidade',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='imoveis.cidade'),
),
]
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'inmueble.ui'
#
# Created: Fri Aug 25 14:02:49 2017
# by: PyQt4 UI code generator 4.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(300, 30)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(Form.sizePolicy().hasHeightForWidth())
Form.setSizePolicy(sizePolicy)
Form.setMinimumSize(QtCore.QSize(0, 30))
self.gridLayout = QtGui.QGridLayout(Form)
self.gridLayout.setMargin(0)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.fondo = QtGui.QLabel(Form)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.fondo.sizePolicy().hasHeightForWidth())
self.fondo.setSizePolicy(sizePolicy)
self.fondo.setMinimumSize(QtCore.QSize(300, 30))
self.fondo.setMaximumSize(QtCore.QSize(30, 16677215))
self.fondo.setStyleSheet(_fromUtf8("QLabel#fondo {border: 2px solid rgb(143,212,0);\n"
"border-radius: 15px;\n"
"background-color: rgb(255,255,255);\n"
" }"))
self.fondo.setText(_fromUtf8(""))
self.fondo.setScaledContents(False)
self.fondo.setAlignment(QtCore.Qt.AlignCenter)
self.fondo.setMargin(0)
self.fondo.setIndent(0)
self.fondo.setObjectName(_fromUtf8("fondo"))
self.gridLayout.addWidget(self.fondo, 0, 0, 1, 1)
self.ref = QtGui.QLabel(Form)
self.ref.setGeometry(QtCore.QRect(65, 7, 191, 16))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.ref.setFont(font)
self.ref.setAlignment(QtCore.Qt.AlignCenter)
self.ref.setObjectName(_fromUtf8("ref"))
self.icono = QtGui.QLabel(Form)
self.icono.setGeometry(QtCore.QRect(0, 0, 30, 30))
self.icono.setStyleSheet(_fromUtf8("QLabel#icono { border: 2px solid rgb(143,212,0);\n"
"border-radius: 15px\n"
"}"))
self.icono.setText(_fromUtf8(""))
self.icono.setPixmap(QtGui.QPixmap(_fromUtf8(":/icons/25694.png")))
self.icono.setScaledContents(True)
self.icono.setMargin(3)
self.icono.setObjectName(_fromUtf8("icono"))
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "Form", None))
self.ref.setText(_translate("Form", "TextLabel", None))
import ficheros_rc
|
import requests
import time
from base64 import b64decode
import os
import json
import pymongo
from operator import itemgetter
from itertools import groupby
from datetime import datetime, timedelta
from utils import make_meta
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from git import Repo, GitCommandError
from bson import json_util
from hashlib import sha1
CRIMES = 'http://data.cityofchicago.org/resource/ijzp-q8t2.json'
MOST_WANTED = 'http://api1.chicagopolice.org/clearpath/api/1.0/mostWanted/list'
MUGSHOTS = 'http://api1.chicagopolice.org/clearpath/api/1.0/mugshots'
WEATHER_KEY = os.environ['WEATHER_KEY']
AWS_KEY = os.environ['AWS_ACCESS_KEY']
AWS_SECRET = os.environ['AWS_SECRET_KEY']
MONGO_USER = os.environ['UPDATE_MONGO_USER']
MONGO_PW = os.environ['UPDATE_MONGO_PW']
class SocrataError(Exception):
def __init__(self, message):
Exception.__init__(self, message)
self.message = message
class WeatherError(Exception):
def __init__(self, message):
Exception.__init__(self, message)
self.message = message
class ClearPathError(Exception):
def __init__(self, message):
Exception.__init__(self, message)
self.message = message
# In Feature properties, define title and description keys. Can also
# define marker-color, marker-size, marker-symbol and marker-zoom.
def geocode_it(block, coll):
match = coll.find_one({'block': block, 'location.coordinates': {'$ne': None}})
if match:
return match['location']
else:
add_parts = block.split()
add_parts[0] = str(int(add_parts[0].replace('X', '0')))
address = '%s Chicago, IL' % ' '.join(add_parts)
bbox = "42.023134979999995,-87.52366115999999,41.644286009999995,-87.94010087999999"
key = 'Fmjtd|luub2d0rn1,rw=o5-9u2ggw'
params = {'location': address, 'key': key, 'boundingBox': bbox}
u = 'http://open.mapquestapi.com/geocoding/v1/address'
r = requests.get(u, params=params)
resp = json.loads(r.content.decode('utf-8'))
locations = resp['results'][0]['locations']
if locations:
p = (float(locations[0]['latLng']['lng']), float(locations[0]['latLng']['lat']))
feature = {'type': 'Point', 'coordinates': p}
return feature
else:
return None
def update_crimediffs(case_numbers):
c = pymongo.MongoClient()
db = c['chicago']
coll = db['crime']
db.authenticate(MONGO_USER, password=MONGO_PW)
dir_path = os.path.abspath(os.path.curdir)
repo_path = os.path.join(dir_path, '../crimediffs')
repo = Repo(repo_path)
g = repo.git
cases = coll.find({'case_number': {'$in': case_numbers}}, timeout=False)
committed = 0
skipped = 0
for case in cases:
fname = os.path.join(repo_path, 'reports/%s.json' % case['case_number'])
print fname
if os.path.exists(fname):
f = open(fname, 'rb')
written = f.read()
f.close()
stored = json_util.dumps(case, indent=4)
if sha1(written).hexdigest() == sha1(stored).hexdigest():
skipped += 1
continue
else:
f = open(fname, 'wb')
f.write(stored)
f.close()
else:
f = open(fname, 'wb')
f.write(json_util.dumps(case, indent=4))
f.close()
updated_on = case['updated_on'].strftime('%a, %d %b %Y %H:%M:%S %z')
os.environ['GIT_COMMITTER_DATE'] = updated_on
g.add(fname)
g.commit(message='Case Number %s updated at %s' % (case['case_number'], updated_on), author='eric@bahai.us')
committed += 1
if committed > 0:
o = repo.remotes.origin
pushinfo = o.push()
print pushinfo[0].summary
print 'Skipped: %s Committed: %s' % (skipped, committed)
return skipped, committed
def fetch_crimes(count):
crimes = []
for offset in range(0, count, 1000):
crime_offset = requests.get(CRIMES, params={'$limit': 1000, '$offset': offset, '$order': 'date DESC'})
if crime_offset.status_code == 200:
crimes.extend(crime_offset.json())
else:
raise SocrataError('Socrata API responded with a %s status code: %s' % (crimes.status_code, crimes.content[300:]))
return crimes
def get_crimes():
c = pymongo.MongoClient()
db = c['chicago']
coll = db['crime']
iucr_codes = db['iucr']
db.authenticate(MONGO_USER, password=MONGO_PW)
crimes = fetch_crimes(20000)
case_numbers = [c['case_number'] for c in crimes]
existing = 0
new = 0
dates = []
for crime in crimes:
try:
crime['location'] = {
'type': 'Point',
'coordinates': (float(crime['longitude']), float(crime['latitude']))
}
except KeyError:
crime['location'] = geocode_it(crime['block'], coll)
crime['updated_on'] = datetime.strptime(crime['updated_on'], '%Y-%m-%dT%H:%M:%S')
crime['date'] = datetime.strptime(crime['date'], '%Y-%m-%dT%H:%M:%S')
if crime['arrest'] == 'true':
crime['arrest'] = True
elif crime['arrest'] == 'false':
crime['arrest'] = False
if crime['domestic'] == 'true':
crime['domestic'] = True
elif crime['domestic'] == 'false':
crime['domestic'] = False
dates.append(crime['date'])
crime_update = {}
for k,v in crime.items():
new_key = '_'.join(k.split()).lower()
crime_update[new_key] = v
try:
iucr = str(int(crime_update['iucr']))
except ValueError:
iucr = crime_update['iucr']
crime_update['iucr'] = iucr
try:
crime_type = iucr_codes.find_one({'iucr': iucr})['type']
except (TypeError, KeyError):
crime_type = None
crime_update['type'] = crime_type
update = coll.update({'case_number': crime['case_number']}, crime_update, upsert=True)
if update['updatedExisting']:
existing += 1
else:
new += 1
# skipped, committed = update_crimediffs(case_numbers)
# unique_dates = list(set([datetime.strftime(d, '%Y%m%d') for d in dates]))
# weather_updated = get_weather(unique_dates)
return 'Updated %s, Created %s' % (existing, new)
def get_weather(dates):
c = pymongo.MongoClient()
db = c['chicago']
db.authenticate(MONGO_USER, password=MONGO_PW)
coll = db['weather']
for date in dates:
url = 'http://api.wunderground.com/api/%s/history_%s/q/IL/Chicago.json' % (WEATHER_KEY, date)
weat = requests.get(url)
weather = {
'CELSIUS_MAX': None,
'CELSIUS_MIN': None,
'FAHR_MIN': None,
'FAHR_MAX': None,
}
if weat.status_code == 200:
summary = weat.json()['history']['dailysummary'][0]
weather['CELSIUS_MAX'] = summary['maxtempm']
weather['CELSIUS_MIN'] = summary['mintempm']
weather['FAHR_MAX'] = summary['maxtempi']
weather['FAHR_MIN'] = summary['mintempi']
weather['DATE'] = datetime.strptime(date, '%Y%m%d')
update = {'$set': weather}
up = coll.update({'DATE': datetime.strptime(date, '%Y%m%d')}, update, upsert=True)
else:
raise WeatherError('Wunderground API responded with %s: %s' % (weat.status_code, weat.content[300:]))
time.sleep(7)
return 'Updated weather for %s' % ', '.join(dates)
def get_most_wanted():
wanted = requests.get(MOST_WANTED, params={'max': 100})
if wanted.status_code == 200:
s3conn = S3Connection(AWS_KEY, AWS_SECRET)
bucket = s3conn.get_bucket('crime.static-eric.com')
wanted_list = []
for person in wanted.json():
warrant = person['warrantNo']
wanted_list.append(warrant)
mugs = requests.get(MUGSHOTS, params={'warrantNo': warrant})
person['mugs'] = []
if mugs.status_code == 200:
for mug in mugs.json():
image_path = 'images/wanted/%s_%s.jpg' % (warrant, mug['mugshotNo'])
k = Key(bucket)
k.key = image_path
k.set_contents_from_string(b64decode(mug['image']))
k.set_acl('public-read')
person['mugs'].append({'angle': mug['mugshotNo'], 'image_path': image_path})
else:
raise ClearPathError('ClearPath API returned %s when fetching mugshots for %s: %s' % (mugs.status_code, warrant, mugs.content[300:]))
k = Key(bucket)
k.key = 'data/wanted/%s.json' % warrant
k.set_contents_from_string(json.dumps(person, indent=4))
k.set_acl('public-read')
k = Key(bucket)
k.key = 'data/wanted/wanted_list.json'
k = k.copy(k.bucket.name, k.name, {'Content-Type':'application/json'})
k.set_acl('public-read')
else:
raise ClearPathError('ClearPath API returned %s when getting most wanted list: %s' % (wanted.status_code, wanted.content[300:]))
if __name__ == '__main__':
get_crimes()
get_most_wanted()
|
from django.db import models
class SecretModel(models.Model):
id = id
hash = models.TextField()
secrettext = models.TextField()
createdat = models.DateTimeField(auto_now_add=True)
expiresat = models.DateTimeField(blank=True, null=True)
maximumviews = models.IntegerField()
currentviews = models.IntegerField()
class Meta:
managed = False
ordering = ['id']
db_table = 'secrets'
def __str__(self) -> str:
return self.secrettext
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-02-03 00:40
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('gestion_proveedores', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Proveedor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(blank=True, max_length=30)),
('email', models.CharField(blank=True, max_length=30)),
('direccion', models.CharField(blank=True, max_length=30)),
('localidad', models.CharField(blank=True, max_length=20)),
('pais', models.CharField(blank=True, max_length=20)),
('cod_postal', models.CharField(blank=True, max_length=5, null=True, verbose_name='Código postal')),
('telefono', models.CharField(blank=True, max_length=12, null=True)),
('fax', models.CharField(blank=True, max_length=9, null=True)),
('descripcion', models.TextField(blank=True, max_length=250, null=True)),
('foto', models.ImageField(blank=True, null=True, upload_to='proveedores/')),
],
),
]
|
import sqlite3
from os import path
ROOT = path.dirname(path.realpath(__file__))
def make_db():
conn = sqlite3.connect("data.db")
cur = conn.cursor()
cur.execute("""create table IF NOT EXISTS posts(
user_id integer not null,
title varchar(30) not null,
content text not null,
image varchar(60) not null,
likes_number INT NOT NULL default 0,
comments_number int not null default 0
)""")
conn.commit()
conn.close()
def add(user_id, title, content, image):
conn = sqlite3.connect("data.db")
cur = conn.cursor()
cur.execute("insert into posts(user_id, title, content, image) values(?,?,?,?)",
(user_id, title, content, image))
conn.commit()
conn.close()
def alter():
conn = sqlite3.connect("data.db")
cur = conn.cursor()
cur.execute("ALTER TABLE posts ADD likes_number INT NOT NULL DEFAULT 0")
conn.commit()
conn.close()
def get_all_with_comments_number():
conn = sqlite3.connect("data.db")
cur = conn.cursor()
# show the post and make the comments in one col together seperated by comma(,)
cur.execute("""SELECT posts.title,posts.likes_number from posts""")
# show all posts with one comment
# cur.execute("""SELECT posts.rowid, posts.title,comments.user_id,comments.content
# FROM posts
# JOIN comments ON posts.rowid = comments.post_id
# GROUP BY posts.rowid""")
conn.commit()
data = cur.fetchall()
conn.close()
return data
def get_one_with_comments_number(post_id):
conn = sqlite3.connect("data.db")
cur = conn.cursor()
# show the post and make the comments in one col together seperated by comma(,)
cur.execute("""SELECT posts.title,count(comments.content)
FROM posts
LEFT JOIN comments ON posts.rowid = comments.post_id
where posts.rowid = ? GROUP BY posts.rowid """,(post_id,))
# show all posts with one comment
# cur.execute("""SELECT posts.rowid, posts.title,comments.user_id,comments.content
# FROM posts
# JOIN comments ON posts.rowid = comments.post_id
# GROUP BY posts.rowid""")
conn.commit()
data = cur.fetchall()
conn.close()
return data
# print(get_all_with_comments())
def get_all():
conn = sqlite3.connect("data.db")
cur = conn.cursor()
# cur.execute("select rowid,* from posts order by likes_number desc")
cur.execute("select users.rowid,users.username,users.img,posts.rowid,posts.title,posts.content,posts.image,posts.likes_number,posts.user_id,posts.comments_number from users join posts on users.rowid = posts.user_id order by likes_number desc")
conn.commit()
data = cur.fetchall()
conn.close()
return data
def get_posts_i_liked(user_id):
conn = sqlite3.connect("data.db")
cur = conn.cursor()
# cur.execute("select rowid,* from posts order by likes_number desc")
cur.execute("select post_id from likes where likes.user_id = ?",(user_id,))
conn.commit()
data = cur.fetchall()
# change the list from [(1,),(2,),(3,)]...etc to [1,2,3]
def reduce(n):
return n[0]
data = map(reduce, data)
conn.close()
return list(data)
def get_likes_number(post_id):
conn = sqlite3.connect("data.db")
cur = conn.cursor()
cur.execute(
"select likes_number from posts where rowid = ?",(post_id,))
conn.commit()
data = cur.fetchone()
conn.close()
return data[0]
def delete(id_):
conn = sqlite3.connect("data.db")
cur = conn.cursor()
cur.execute("delete from posts where rowid = ?",(id_,))
conn.commit()
conn.close()
def update(likes_number,post_id):
conn = sqlite3.connect("data.db")
cur = conn.cursor()
cur.execute("update posts set likes_number = ? where rowid = ?",
(likes_number, post_id))
conn.commit()
conn.close()
def update_comments(comments_number,post_id):
conn = sqlite3.connect("data.db")
cur = conn.cursor()
cur.execute("update posts set comments_number = ? where rowid = ?",
(comments_number, post_id))
conn.commit()
conn.close()
def get_by(user_id):
conn = sqlite3.connect("data.db")
cur = conn.cursor()
cur.execute("select rowid,* from posts where user_id=? order by rowid desc", (user_id,))
conn.commit()
data = cur.fetchall()
conn.close()
return data
def get_by_id(post_id):
conn = sqlite3.connect("data.db")
cur = conn.cursor()
cur.execute(
"select users.rowid,users.username,users.img,posts.rowid,posts.title,posts.content,posts.image,posts.likes_number,posts.comments_number from users join posts on users.rowid = posts.user_id where posts.rowid=?", (post_id,))
conn.commit()
data = cur.fetchone()
conn.close()
return data
|
def leiaInt(msg):
while True:
try:
n = int(input(msg))
except (ValueError, TypeError):
print('\033[1;31mERR! Por favor, '
'digite um número inteiro válido.\033[m')
continue
except KeyboardInterrupt:
print('\n\033[1;31mEntrada de dados interrompida pelo usuário.\033[m')
return 0
else:
return n
def leiaFloat(msg):
while True:
try:
n = float(input(msg))
except (ValueError, TypeError):
print('\033[1;31mERR: Número digitado inválido.\033[m')
continue
except KeyboardInterrupt:
print('\n\033[1;31mEntrada de dados interrompida pelo usuário.\033[m')
return 0
else:
return n
num1 = leiaInt('Digite um inteiro: ')
num2 = leiaFloat('Digite um real: ')
print(f'O valor inteiro digitado foi {num1} e o real foi {num2}')
|
'''
Simple utilities for 2D intersections.
The normal is returned as a 2d numpy array pointing towards the circle.
The penetration is a float, >=0 (=0 if touching).
'''
from collections import namedtuple
from contracts import contract, new_contract
import numpy as np
PrimitiveIntersection = namedtuple('PrimitiveIntersection',
'normal penetration')
new_contract('point2', 'seq[2](number)')
new_contract('intersection', 'tuple((array[2],unit_length), >=0)')
@contract(c1='point2', r1='>0', c2='point2', r2='>0', solid2='bool',
returns='None|intersection')
def circle_circle_intersection(c1, r1, c2, r2, solid2=True):
''' The second circle might or might not be solid. '''
c1 = np.array(c1)
c2 = np.array(c2)
dist = np.linalg.norm(c1 - c2)
if dist > r1 + r2:
# The two solid circle do not touch
return None
else:
# The two solid circle DO touch
normal = c1 - c2
nn = np.linalg.norm(normal)
if nn > 0:
normal /= np.linalg.norm(normal)
else:
normal = np.array([1, 0])
if dist < r2 - r1:
# The first circle is completely inside the second
if not solid2:
return None
penetration = (r1 + r2) - dist
assert penetration >= 0
return PrimitiveIntersection(normal, penetration)
@contract(c1='point2', r1='>0', p1='point2', p2='point2',
returns='None|intersection')
def circle_segment_intersection(c1, r1, p1, p2):
c1 = np.array(c1)
p1 = np.array(p1)
p2 = np.array(p2)
# projection of c1 onto the line containing p1-p2
projection, normal, distance = projection_on_line(p=c1, a=p1, b=p2)
if distance > r1:
return None
# check that the projection is inside the segment
inside = ((projection - p1) * (projection - p2)).sum() <= 0
if not inside:
return None
penetration = r1 - distance
res = PrimitiveIntersection(normal, penetration)
return res
@contract(c1='point2', r1='>0', points='seq[>=2](point2)',
returns='None|intersection')
def circle_polyline_intersection(c1, r1, points):
closest = None
for i in range(len(points) - 1):
p1 = points[i]
p2 = points[i + 1]
collision = circle_segment_intersection(c1, r1, p1, p2)
if (collision is not None) and (closest is None
or collision.penetration >
closest.penetration):
closest = collision
return closest
@contract(p='point2', a='point2', b='point2',
returns='tuple(array[2], (array[2], unit_length), >=0)')
def projection_on_line(p='point2', a='point2', b='point2'):
'''
Projects a point onto the line containing the segment a-b.
Returns the projected point, the normal pointing to p,
and the distance of p to the line.
'''
t0 = a[0] - b[0]
t1 = a[1] - b[1]
one_on_r = 1 / np.sqrt(t0 * t0 + t1 * t1)
# normal
c = t1 * one_on_r
s = -t0 * one_on_r
rho = c * a[0] + s * a[1]
px = c * rho + s * s * p[0] - c * s * p[1]
py = s * rho - c * s * p[0] + c * c * p[1]
distance = np.abs(rho - (c * p[0] + s * p[1]))
proj = np.array([px, py])
normal = -np.array([c, s]) * np.sign(rho)
return (proj, normal, distance)
|
from keras.applications.resnet50 import ResNet50
from keras.preprocessing import image
from keras.applications.resnet50 import preprocess_input, decode_predictions
import numpy as np
import ssl
try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
# Legacy Python that doesn't verify HTTPS certificates by default
pass
else:
# Handle target environment that doesn't support HTTPS verification
ssl._create_default_https_context = _create_unverified_https_context
def predictImages(model,path, file_count, name):
print(name)
for count in range(0,file_count):
img_path = path + '/pizza' + str(count) + '.jpeg'
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
preds = model.predict(x)
# decode the results into a list of tuples (class, description, probability)
# (one such list for each sample in the batch)
print('Iter:' ,str(count), decode_predictions(preds, top=2)[0])
print(name)
print('\n\n\n')
model = ResNet50(weights='imagenet')
predictImages(model,'/Users/yazen/Desktop/datasets/cheesePizza', 30, 'cheese')
predictImages(model,'/Users/yazen/Desktop/datasets/blackOlivePizza', 33, 'blackOlive')
predictImages(model,'/Users/yazen/Desktop/datasets/pepperoniPizza', 32, 'pepperoni')
|
import json
import matplotlib.pyplot as plt
file_name="data/eq_data/eq_data_1_day_m1.json"
with open(file_name) as f:
load_data=json.load(f)
file_name2="data/readable_eq_data.json"
with open(file_name2,'w') as f:
json.dump(load_data, f, indent=4)
|
# from decouple import config
import json
import requests
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import pandas as pd
from sklearn.preprocessing import QuantileTransformer
# nltk.download('punkt')
from textblob import TextBlob
# Certificate error workaround
# https://stackoverflow.com/questions/38916452/nltk-download-ssl-certificate-verify-failed
'''import ssl
try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
pass
else:
ssl._create_default_https_context = _create_unverified_https_context
nltk.download()'''
def score_sentiment(tweet):
'''Extract sentiment (total and average) from multi-sentence string, sentence by sentence'''
text = tweet
blob = TextBlob(text)
return blob.sentiment.polarity
""" Google API only allows for 600 queries per day for free.
def score_sentiment(tweet):
'''Extract sentiment (total and average) from multi-sentence string, sentence by sentence'''
words = word_tokenize(tweet)
stop_words = set(stopwords.words('english'))
filtered_sentence = [w for w in words if w not in stop_words]
#Convert string into TextBlob
text = ''.join(filtered_sentence)
# blob = TextBlob(text)
# total_sentiment = 0
api_key = "AIzaSyAnwbBNaqHVuM82djR3-nybIezDsBu-X8Q"
url = ('https://commentanalyzer.googleapis.com/v1alpha1/comments:analyze' +
'?key=' + api_key)
data_dict = {
'comment': {'text': text},
'languages': ['en'],
'requestedAttributes': {'TOXICITY': {}}
}
response = requests.post(url=url, data=json.dumps(data_dict))
response_dict = json.loads(response.content)
print(response_dict)
avg_sentiment = response_dict["attributeScores"]["TOXICITY"]["summaryScore"]["value"]
# for sentence in blob.sentences:
# total_sentiment += sentence.sentiment.polarity
# avg_sentiment = total_sentiment/len(blob.sentences)
return avg_sentiment
# ,total_sentiment
"""
def scale_sentiments(sentiments):
'''
Use QuantileTransformer to convert sentiment score to value between 0 and 100
input: initial Sentiment Score pandas Series
returns: scaled Sentiment Score pandas Series
'''
scaler = QuantileTransformer()
scaler.fit_transform(sentiments)
sentiments = [score*100 for score in sentiments]
return pd.Series(sentiments) |
from __future__ import absolute_import
import six
from datetime import datetime
from django.core.urlresolvers import reverse
from sentry.testutils import APITestCase
class ProjectEventDetailsTest(APITestCase):
def test_simple(self):
self.login_as(user=self.user)
group = self.create_group()
prev_event = self.create_event(
event_id='a',
group=group,
datetime=datetime(2013, 8, 13, 3, 8, 24),
)
cur_event = self.create_event(
event_id='b',
group=group,
datetime=datetime(2013, 8, 13, 3, 8, 25),
)
next_event = self.create_event(
event_id='c',
group=group,
datetime=datetime(2013, 8, 13, 3, 8, 26),
)
url = reverse(
'sentry-api-0-project-event-details',
kwargs={
'event_id': cur_event.event_id,
'project_slug': cur_event.project.slug,
'organization_slug': cur_event.project.organization.slug,
}
)
response = self.client.get(url, format='json')
assert response.status_code == 200, response.content
assert response.data['id'] == six.text_type(cur_event.id)
assert response.data['nextEventID'] == six.text_type(next_event.event_id)
assert response.data['previousEventID'] == six.text_type(prev_event.event_id)
assert response.data['groupID'] == six.text_type(group.id)
|
from discord.ext import commands
import config
import praw
class Reddit:
def __init__(self, bot):
self.bot = bot
def loginR(self):
reddit = praw.Reddit(client_id=config.redditID, \
client_secret=config.redditSecret, \
user_agent='PLGbot', \
username='Vandann', \
password=config.redditPass)
return reddit
@commands.command()
async def destiny(self, ctx):
print("Running command: xur")
subreddit = self.loginR().subreddit('destiny2')
post = subreddit.hot(limit=2)
for f in subreddit.hot(limit=2):
post = f.url
return await ctx.send(post)
def setup(bot):
bot.add_cog(Reddit(bot))
|
# Generated by Django 2.1.1 on 2018-10-01 08:13
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('posts', '0010_auto_20181001_1038'),
]
operations = [
migrations.AlterModelOptions(
name='post',
options={},
),
migrations.AlterField(
model_name='post',
name='pub_date',
field=models.DateTimeField(default=datetime.datetime(2018, 10, 1, 11, 13, 4, 40375), null=True),
),
]
|
from tkinter import ttk
from tkinter import *
from Tools.sizingAdjust import sizingAdjust
import os
import threading
from Game import BoardGame
from Game import SwimmingGame
from Game import BGMain
from Lesson import ProjectileLessonMain
from Lesson import MomentumLessonMain
from Lesson import EllipseLessonMain
from DirectoryHandler import DirectoryHandler
from Tools.story import SlideData
from Tools.story import Episode
from Tools.Theme import Theme
class EllipseLessonIntro:
def __init__(self, window, sizings, user):
#Generates neccsesary attributes
self.Tasks = []
self.user = user
Fonts = [90, 80, 70, 60, 50, 40, 30, 20]
self.FontFamily = ["Microsoft YaHei UI Light", "Ebrima"]
self.interfaceDirectory = os.getcwd()
padding = 80
self.InterfaceWindow = window
#Generates neccesary sizings
sizing = sizings
self.sizing = sizing
self.padding = sizing.padding
self.screenWidth = sizing.width
self.screenHeight = sizing.height
self.FontSize = sizing.FontSize
self.DirectoryHandler = DirectoryHandler()
self.__Theme = Theme()
#Ensure canvas is in centre of screen
self.ScreenCanvas = Frame(self.InterfaceWindow)
self.ScreenCanvas.place(relx=sizing.canvasPosX, rely=sizing.canvasPosY, width=self.screenWidth,
height=self.screenHeight)
self.DirectoryHandler.changeDirectoryToBackground()
background = PhotoImage(file="gameIntro.png")
self.DirectoryHandler.changeDirectoryToMain()
BackgroundLabel = Label(self.ScreenCanvas, image=background)
BackgroundLabel.place(relx=0, rely=0)
self.ShowIntro()
#Binds window so game can start at command
self.InterfaceWindow.bind("<Key> ", lambda event: self.navigation(event))
threading.Thread(target=self.InterfaceWindow.mainloop(), args=()).start()
def navigation(self, event):
if event.keysym.upper() == "X":
self.ReturnToMenu()
def ShowIntro(self):
self.InfoFrame = Frame(self.ScreenCanvas, bg=self.__Theme.getBackground1())
self.InfoFrame.place(relx=.1, rely=.1, width=.8*self.sizing.width, height=.8*self.sizing.height)
fontStyle = (self.FontFamily[0], self.FontSize[0])
GameTitleLabel = Message(self.InfoFrame, text="Lesson- Ellipses", font=fontStyle, bg=self.__Theme.getBackground1(),
fg=self.__Theme.getForeground1(), width=int(round(650 * (self.sizing.width /1366))))
GameTitleLabel.place(relx=0, rely=0, width=.6 *self.sizing.width, height=.6*self.sizing.height)
fontStyle = (self.FontFamily[0], self.FontSize[7])
episodeInfo = "How can such interesting shpaes be used in basketball?"
self.GameDescriptionLabel = Message(self.InfoFrame, text=episodeInfo, font=fontStyle,
bg=self.__Theme.getBackground1(), fg=self.__Theme.getForeground1(),
width = int(round(600 * (self.sizing.width / 1366))))
self.GameDescriptionLabel.place(relx=0, rely=.7, width=.6 * self.sizing.width, height=.2*self.sizing.height)
moveBackButtonFont = (self.sizing.FontFamily[0], self.sizing.FontSize[5])
self.MoveBackButton = Button(self.InfoFrame, text="EXIT", bg=self.__Theme.getBackground2(), cursor="hand2",
fg=self.__Theme.getForeground1(), command=lambda: self.ReturnToMenu(),
font=moveBackButtonFont, bd=0, activebackground=self.__Theme.getBackground2())
self.MoveBackButton.place(relx=.8, rely=.505, height=.8*.15*self.sizing.height, width=.8*.15*self.sizing.width)
self.PlayGameButton = Button(self.InfoFrame, text="LEARN", bg=self.__Theme.getBackground2(),
fg=self.__Theme.getForeground1(), cursor="hand2",
font=moveBackButtonFont, bd=0, activebackground=self.__Theme.getBackground2())
self.PlayGameButton.place(relx=.8, rely=.345, height=.8*.15*self.sizing.height, width=.8*.15*self.sizing.width)
self.InterfaceWindow.bind("<Button-1>", lambda event: self.LoadGame(event))
self.InterfaceWindow.bind("<Key>", lambda event: self.LoadGame(event))
def LoadGame(self, event):
if event.widget == self.PlayGameButton:
self.GameDescriptionLabel['text'] = "Loading game..."
self.InterfaceWindow.after(3000, lambda: self.__Load())
def __Load(self):
self.PlayGameButton.destroy()
EllipseLessonMain(self.InterfaceWindow, self.sizing, self.user)
def ReturnToMenu(self):
from Menu import Main
# Returns to menu
from Menu import BasketballCourt
BasketballCourt(self.InterfaceWindow, self.sizing, self.user)
class MomentumLessonIntro:
def __init__(self, window, sizings, user):
#Generates neccsesary attributes
self.Tasks = []
self.user = user
Fonts = [90, 80, 70, 60, 50, 40, 30, 20]
self.FontFamily = ["Microsoft YaHei UI Light", "Ebrima"]
self.interfaceDirectory = os.getcwd()
padding = 80
self.InterfaceWindow = window
#Generates neccesary sizings
sizing = sizings
self.sizing = sizing
self.padding = sizing.padding
self.screenWidth = sizing.width
self.screenHeight = sizing.height
self.FontSize = sizing.FontSize
self.DirectoryHandler = DirectoryHandler()
self.__Theme = Theme()
#Ensure canvas is in centre of screen
self.ScreenCanvas = Frame(self.InterfaceWindow)
self.ScreenCanvas.place(relx=sizing.canvasPosX, rely=sizing.canvasPosY, width=self.screenWidth,
height=self.screenHeight)
self.DirectoryHandler.changeDirectoryToBackground()
background = PhotoImage(file="gameIntro.png")
self.DirectoryHandler.changeDirectoryToMain()
BackgroundLabel = Label(self.ScreenCanvas, image=background)
BackgroundLabel.place(relx=0, rely=0)
self.ShowIntro()
#Binds window so game can start at command
self.InterfaceWindow.bind("<Key> ", lambda event: self.navigation(event))
threading.Thread(target=self.InterfaceWindow.mainloop(), args=()).start()
def navigation(self, event):
if event.keysym.upper() == "X":
self.ReturnToMenu()
def ShowIntro(self):
self.InfoFrame = Frame(self.ScreenCanvas, bg=self.__Theme.getBackground1())
self.InfoFrame.place(relx=.1, rely=.1, width=.8*self.sizing.width, height=.8*self.sizing.height)
fontStyle = (self.FontFamily[0], self.FontSize[0])
GameTitleLabel = Message(self.InfoFrame, text="LESSON- MOMENTUM", font=fontStyle, bg=self.__Theme.getBackground1(),
fg=self.__Theme.getForeground1(), width=int(round(850 * (self.sizing.width /1366))))
GameTitleLabel.place(relx=0, rely=0, width=.65 *self.sizing.width, height=.6*self.sizing.height)
fontStyle = (self.FontFamily[0], self.FontSize[7])
episodeInfo = "What is momentum?"
self.GameDescriptionLabel = Message(self.InfoFrame, text=episodeInfo, font=fontStyle,
bg=self.__Theme.getBackground1(), fg=self.__Theme.getForeground1(),
width = int(round(600 * (self.sizing.width / 1366))))
self.GameDescriptionLabel.place(relx=0, rely=.7, width=.6 * self.sizing.width, height=.2*self.sizing.height)
moveBackButtonFont = (self.sizing.FontFamily[0], self.sizing.FontSize[5])
self.MoveBackButton = Button(self.InfoFrame, text="EXIT", bg=self.__Theme.getBackground2(), cursor="hand2",
fg=self.__Theme.getForeground1(), command=lambda: self.ReturnToMenu(),
font=moveBackButtonFont, bd=0, activebackground=self.__Theme.getBackground2())
self.MoveBackButton.place(relx=.8, rely=.505, height=.8*.15*self.sizing.height, width=.8*.15*self.sizing.width)
self.PlayGameButton = Button(self.InfoFrame, text="LEARN", bg=self.__Theme.getBackground2(),
fg=self.__Theme.getForeground1(), cursor="hand2",
font=moveBackButtonFont, bd=0, activebackground=self.__Theme.getBackground2())
self.PlayGameButton.place(relx=.8, rely=.345, height=.8*.15*self.sizing.height, width=.8*.15*self.sizing.width)
self.InterfaceWindow.bind("<Button-1>", lambda event: self.LoadGame(event))
self.InterfaceWindow.bind("<Key>", lambda event: self.LoadGame(event))
def LoadGame(self, event):
if event.widget == self.PlayGameButton:
self.GameDescriptionLabel['text'] = "Loading game..."
self.InterfaceWindow.after(3000, lambda: self.__Load())
def __Load(self):
self.PlayGameButton.destroy()
MomentumLessonMain(self.InterfaceWindow, self.sizing, self.user)
def ReturnToMenu(self):
from Menu import Main
# Returns to menu
from Menu import BasketballCourt
BasketballCourt(self.InterfaceWindow, self.sizing, self.user)
class ProjectileLessonIntro:
def __init__(self, window, sizings, user):
#Generates neccsesary attributes
self.Tasks = []
self.user = user
Fonts = [90, 80, 70, 60, 50, 40, 30, 20]
self.FontFamily = ["Microsoft YaHei UI Light", "Ebrima"]
self.interfaceDirectory = os.getcwd()
padding = 80
self.InterfaceWindow = window
#Generates neccesary sizings
sizing = sizings
self.sizing = sizing
self.padding = sizing.padding
self.screenWidth = sizing.width
self.screenHeight = sizing.height
self.FontSize = sizing.FontSize
self.DirectoryHandler = DirectoryHandler()
self.__Theme = Theme()
#Ensure canvas is in centre of screen
self.ScreenCanvas = Frame(self.InterfaceWindow)
self.ScreenCanvas.place(relx=sizing.canvasPosX, rely=sizing.canvasPosY, width=self.screenWidth,
height=self.screenHeight)
self.DirectoryHandler.changeDirectoryToBackground()
background = PhotoImage(file="gameIntro.png")
self.DirectoryHandler.changeDirectoryToMain()
BackgroundLabel = Label(self.ScreenCanvas, image=background)
BackgroundLabel.place(relx=0, rely=0)
self.ShowIntro()
#Binds window so game can start at command
self.InterfaceWindow.bind("<Key> ", lambda event: self.navigation(event))
threading.Thread(target=self.InterfaceWindow.mainloop(), args=()).start()
def navigation(self, event):
if event.keysym.upper() == "X":
self.ReturnToMenu()
def ShowIntro(self):
self.InfoFrame = Frame(self.ScreenCanvas, bg=self.__Theme.getBackground1())
self.InfoFrame.place(relx=.1, rely=.1, width=.8*self.sizing.width, height=.8*self.sizing.height)
fontStyle = (self.FontFamily[0], self.FontSize[0])
GameTitleLabel = Message(self.InfoFrame, text="LESSON- PROJECTILE", font=fontStyle, bg=self.__Theme.getBackground1(),
fg=self.__Theme.getForeground1(), width=int(round(650 * (self.sizing.width /1366))))
GameTitleLabel.place(relx=0, rely=0, width=.6 *self.sizing.width, height=.6*self.sizing.height)
fontStyle = (self.FontFamily[0], self.FontSize[7])
episodeInfo = "Learn about projectile Motion"
self.GameDescriptionLabel = Message(self.InfoFrame, text=episodeInfo, font=fontStyle,
bg=self.__Theme.getBackground1(), fg=self.__Theme.getForeground1(),
width = int(round(600 * (self.sizing.width / 1366))))
self.GameDescriptionLabel.place(relx=0, rely=.7, width=.6 * self.sizing.width, height=.2*self.sizing.height)
moveBackButtonFont = (self.sizing.FontFamily[0], self.sizing.FontSize[5])
self.MoveBackButton = Button(self.InfoFrame, text="EXIT", bg=self.__Theme.getBackground2(), cursor="hand2",
fg=self.__Theme.getForeground1(), command=lambda: self.ReturnToMenu(),
font=moveBackButtonFont, bd=0, activebackground=self.__Theme.getBackground2())
self.MoveBackButton.place(relx=.8, rely=.505, height=.8*.15*self.sizing.height, width=.8*.15*self.sizing.width)
self.PlayGameButton = Button(self.InfoFrame, text="LEARN", bg=self.__Theme.getBackground2(),
fg=self.__Theme.getForeground1(), cursor="hand2",
font=moveBackButtonFont, bd=0, activebackground=self.__Theme.getBackground2())
self.PlayGameButton.place(relx=.8, rely=.345, height=.8*.15*self.sizing.height, width=.8*.15*self.sizing.width)
self.InterfaceWindow.bind("<Button-1>", lambda event: self.LoadGame(event))
self.InterfaceWindow.bind("<Key>", lambda event: self.LoadGame(event))
def LoadGame(self, event):
if event.widget == self.PlayGameButton:
self.GameDescriptionLabel['text'] = "Loading game..."
self.InterfaceWindow.after(3000, lambda: self.__Load())
def __Load(self):
ProjectileLessonMain(self.InterfaceWindow, self.sizing, self.user)
def ReturnToMenu(self):
from Menu import Main
# Returns to menu
from Menu import BasketballCourt
BasketballCourt(self.InterfaceWindow, self.sizing, self.user)
class BasketballIntro:
def __init__(self, window, sizings, user):
#Generates neccsesary attributes
self.Tasks = []
self.user = user
Fonts = [90, 80, 70, 60, 50, 40, 30, 20]
self.FontFamily = ["Microsoft YaHei UI Light", "Ebrima"]
self.interfaceDirectory = os.getcwd()
padding = 80
self.InterfaceWindow = window
#Generates neccesary sizings
sizing = sizings
self.sizing = sizing
self.padding = sizing.padding
self.screenWidth = sizing.width
self.screenHeight = sizing.height
self.FontSize = sizing.FontSize
self.DirectoryHandler = DirectoryHandler()
self.__Theme = Theme()
#Ensure canvas is in centre of screen
self.ScreenCanvas = Frame(self.InterfaceWindow)
self.ScreenCanvas.place(relx=sizing.canvasPosX, rely=sizing.canvasPosY, width=self.screenWidth,
height=self.screenHeight)
self.DirectoryHandler.changeDirectoryToBackground()
background = PhotoImage(file="basketballBackground.png")
self.DirectoryHandler.changeDirectoryToMain()
BackgroundLabel = Label(self.ScreenCanvas, image=background)
BackgroundLabel.place(relx=0, rely=0)
self.ShowIntro()
#Binds window so game can start at command
self.InterfaceWindow.bind("<Key> ", lambda event: self.navigation(event))
threading.Thread(target=self.InterfaceWindow.mainloop(), args=()).start()
def navigation(self, event):
if event.keysym.upper() == "X":
self.ReturnToMenu()
def ShowIntro(self):
self.InfoFrame = Frame(self.ScreenCanvas, bg=self.__Theme.getBackground1())
self.InfoFrame.place(relx=.1, rely=.1, width=.8*self.sizing.width, height=.8*self.sizing.height)
fontStyle = (self.FontFamily[0], self.FontSize[0])
GameTitleLabel = Message(self.InfoFrame, text="GAME - Basketball", font=fontStyle, bg=self.__Theme.getBackground1(),
fg=self.__Theme.getForeground1(), width=int(round(650 * (self.sizing.width /1366))))
GameTitleLabel.place(relx=0, rely=0, width=.6 *self.sizing.width, height=.6*self.sizing.height)
fontStyle = (self.FontFamily[0], int(round(1 * self.FontSize[7])))
episodeInfo = "Shoot the ball in the lane corresponding to the correct answer. You have 5 balls so use them wisely!"
self.GameDescriptionLabel = Message(self.InfoFrame, text=episodeInfo, font=fontStyle,
bg=self.__Theme.getBackground1(), fg=self.__Theme.getForeground1(),
width = int(round(600 * (self.sizing.width / 1366))))
self.GameDescriptionLabel.place(relx=0, rely=.7, width=.6 * self.sizing.width, height=.2*self.sizing.height)
moveBackButtonFont = (self.FontFamily[0], int(round(self.FontSize[7])))
self.MoveBackButton = Button(self.InfoFrame, text="EXIT", bg=self.__Theme.getBackground2(), cursor="hand2",
fg=self.__Theme.getForeground1(), command=lambda: self.ReturnToMenu(),
font=moveBackButtonFont, bd=0, activebackground=self.__Theme.getBackground2())
self.MoveBackButton.place(relx=.8, rely=.665, height=.8*.15*self.sizing.height, width=.8*.15*self.sizing.width)
self.HardButton = Button(self.InfoFrame, text="HARD", bg=self.__Theme.getBackground2(),
fg=self.__Theme.getForeground1(), cursor="hand2",
font=moveBackButtonFont, bd=0, activebackground=self.__Theme.getBackground2())
self.HardButton.place(relx=.8, rely=.505, height=.8*.15*self.sizing.height, width=.8*.15*self.sizing.width)
self.MediumButton = Button(self.InfoFrame, text="MEDIUM", bg=self.__Theme.getBackground2(),
fg=self.__Theme.getForeground1(), cursor="hand2",
font=moveBackButtonFont, bd=0, activebackground=self.__Theme.getBackground2())
self.MediumButton.place(relx=.8, rely=.345, height=.8*.15*self.sizing.height, width=.8*.15*self.sizing.width)
self.EasyButton = Button(self.InfoFrame, text="EASY", bg=self.__Theme.getBackground2(),
fg=self.__Theme.getForeground1(), cursor="hand2",
font=moveBackButtonFont, bd=0, activebackground=self.__Theme.getBackground2())
self.EasyButton.place(relx=.8, rely=.185, height=.8*.15*self.sizing.height, width=.8*.15*self.sizing.width)
self.InterfaceWindow.bind("<Button-1>", lambda event: self.LoadGame(event))
self.InterfaceWindow.bind("<Key>", lambda event: self.LoadGame(event))
def LoadGame(self, event):
if event.widget == self.EasyButton:
self.GameDescriptionLabel['text'] = "Loading game..."
self.InterfaceWindow.after(3000, lambda: self.__Load("Easy"))
elif event.widget == self.MediumButton:
self.GameDescriptionLabel['text'] = "Loading game..."
self.InterfaceWindow.after(3000, lambda: self.__Load("Medium"))
elif event.widget == self.HardButton:
self.GameDescriptionLabel['text'] = "Loading game..."
self.InterfaceWindow.after(3000, lambda: self.__Load("Hard"))
def __Load(self, difficulty):
self.ScreenCanvas.destroy()
BGMain(self.InterfaceWindow, self.sizing, self.user, difficulty)
def ReturnToMenu(self):
from Menu import Main
# Returns to menu
from Menu import BasketballCourt
BasketballCourt(self.InterfaceWindow, self.sizing, self.user)
class SwimIntro:
def __init__(self, window, sizings, user):
#Generates neccsesary attributes
self.Tasks = []
self.user = user
Fonts = [90, 80, 70, 60, 50, 40, 30, 20]
self.FontFamily = ["Microsoft YaHei UI Light", "Ebrima"]
self.interfaceDirectory = os.getcwd()
padding = 80
self.InterfaceWindow = window
#Generates neccesary sizings
sizing = sizings
self.sizing = sizing
self.padding = sizing.padding
self.screenWidth = sizing.width
self.screenHeight = sizing.height
self.FontSize = sizing.FontSize
self.DirectoryHandler = DirectoryHandler()
self.__Theme = Theme()
#Ensure canvas is in centre of screen
self.ScreenCanvas = Frame(self.InterfaceWindow)
self.ScreenCanvas.place(relx=sizing.canvasPosX, rely=sizing.canvasPosY, width=self.screenWidth,
height=self.screenHeight)
self.DirectoryHandler.changeDirectoryToBackground()
background = PhotoImage(file="swimming.png")
self.DirectoryHandler.changeDirectoryToMain()
BackgroundLabel = Label(self.ScreenCanvas, image=background)
BackgroundLabel.place(relx=0, rely=0)
self.ShowIntro()
#Binds window so game can start at command
self.InterfaceWindow.bind("<Key> ", lambda event: self.navigation(event))
threading.Thread(target=self.InterfaceWindow.mainloop(), args=()).start()
def navigation(self, event):
if event.keysym.upper() == "X":
self.ReturnToMenu()
def ShowIntro(self):
self.InfoFrame = Frame(self.ScreenCanvas, bg=self.__Theme.getBackground1())
self.InfoFrame.place(relx=.1, rely=.1, width=.8*self.sizing.width, height=.8*self.sizing.height)
fontStyle = (self.FontFamily[0], self.FontSize[0])
GameTitleLabel = Message(self.InfoFrame, text="GAME - SWIMMING", font=fontStyle, bg=self.__Theme.getBackground1(),
fg=self.__Theme.getForeground1(), width=int(round(650 * (self.sizing.width /1366))))
GameTitleLabel.place(relx=0, rely=0, width=.6 *self.sizing.width, height=.6*self.sizing.height)
fontStyle = (self.FontFamily[0], self.FontSize[7])
episodeInfo = "Press the left, down and up arrows to select the respective option to move faster(the red player).\nClick on the pool to start a race."
self.GameDescriptionLabel = Message(self.InfoFrame, text=episodeInfo, font=fontStyle,
bg=self.__Theme.getBackground1(), fg=self.__Theme.getForeground1(),
width = int(round(600 * (self.sizing.width / 1366))))
self.GameDescriptionLabel.place(relx=0, rely=.7, width=.6 * self.sizing.width, height=.2*self.sizing.height)
moveBackButtonFont = (self.sizing.FontFamily[0], self.sizing.FontSize[5])
self.MoveBackButton = Button(self.InfoFrame, text="EXIT", bg=self.__Theme.getBackground2(), cursor="hand2",
fg=self.__Theme.getForeground1(), command=lambda: self.ReturnToMenu(),
font=moveBackButtonFont, bd=0, activebackground=self.__Theme.getBackground2())
self.MoveBackButton.place(relx=.8, rely=.505, height=.8*.15*self.sizing.height, width=.8*.15*self.sizing.width)
self.PlayGameButton = Button(self.InfoFrame, text="PLAY", bg=self.__Theme.getBackground2(),
fg=self.__Theme.getForeground1(), cursor="hand2",
font=moveBackButtonFont, bd=0, activebackground=self.__Theme.getBackground2())
self.PlayGameButton.place(relx=.8, rely=.345, height=.8*.15*self.sizing.height, width=.8*.15*self.sizing.width)
self.InterfaceWindow.bind("<Button-1>", lambda event: self.LoadGame(event))
self.InterfaceWindow.bind("<Key>", lambda event: self.LoadGame(event))
def LoadGame(self, event):
if event.widget == self.PlayGameButton:
self.GameDescriptionLabel['text'] = "Loading game..."
self.InterfaceWindow.after(3000, lambda: self.__Load())
def __Load(self):
self.ScreenCanvas.destroy()
SwimmingGame(self.InterfaceWindow, self.sizing, self.user)
def ReturnToMenu(self):
from Menu import Main
# Returns to menu
Main(self.InterfaceWindow, self.sizing, self.user)
class BoardGameIntro:
def __init__(self, window, sizings, user):
#Generates neccsesary attributes
self.Tasks = []
self.user = user
Fonts = [90, 80, 70, 60, 50, 40, 30, 20]
self.FontFamily = ["Microsoft YaHei UI Light", "Ebrima"]
self.interfaceDirectory = os.getcwd()
padding = 80
self.InterfaceWindow = window
#Generates neccesary sizings
sizing = sizings
self.sizing = sizing
self.padding = sizing.padding
self.screenWidth = sizing.width
self.screenHeight = sizing.height
self.FontSize = sizing.FontSize
self.DirectoryHandler = DirectoryHandler()
self.__Theme = Theme()
#Ensure canvas is in centre of screen
self.ScreenCanvas = Frame(self.InterfaceWindow)
self.ScreenCanvas.place(relx=sizing.canvasPosX, rely=sizing.canvasPosY, width=self.screenWidth,
height=self.screenHeight)
self.DirectoryHandler.changeDirectoryToBackground()
background = PhotoImage(file="force.png")
self.DirectoryHandler.changeDirectoryToMain()
BackgroundLabel = Label(self.ScreenCanvas, image=background)
BackgroundLabel.place(relx=0, rely=0)
self.ShowIntro()
threading.Thread(target=self.InterfaceWindow.mainloop(), args=()).start()
def ShowIntro(self):
self.InfoFrame = Frame(self.ScreenCanvas, bg=self.__Theme.getBackground1())
self.InfoFrame.place(relx=.1, rely=.1, width=.8*self.sizing.width, height=.8*self.sizing.height)
fontStyle = (self.FontFamily[0], self.FontSize[0])
GameTitleLabel = Message(self.InfoFrame, text="GAME - THE FORCE", font=fontStyle, bg=self.__Theme.getBackground1(),
fg=self.__Theme.getForeground1(), width=int(round(650 * (self.sizing.width /1366))))
GameTitleLabel.place(relx=0, rely=0, width=.6 *self.sizing.width, height=.6*self.sizing.height)
fontStyle = (self.FontFamily[0], self.FontSize[7])
episodeInfo = "Use your left and right arrows to return to menu/proceed with game. To skip intro press space."
self.GameDescriptionLabel = Message(self.InfoFrame, text=episodeInfo, font=fontStyle,
bg=self.__Theme.getBackground1(), fg=self.__Theme.getForeground1(),
width = int(round(600 * (self.sizing.width / 1366))))
self.GameDescriptionLabel.place(relx=0, rely=.7, width=.6 * self.sizing.width, height=.2*self.sizing.height)
moveBackButtonFont = (self.sizing.FontFamily[0], self.sizing.FontSize[5])
self.MoveBackButton = Button(self.InfoFrame, text="EXIT", bg=self.__Theme.getBackground2(), cursor="hand2",
fg=self.__Theme.getForeground1(), command=lambda: self.ReturnToMenu(),
font=moveBackButtonFont, bd=0, activebackground=self.__Theme.getBackground2())
self.MoveBackButton.place(relx=.8, rely=.505, height=.8*.15*self.sizing.height, width=.8*.15*self.sizing.width)
self.PlayGameButton = Button(self.InfoFrame, text="PLAY", bg=self.__Theme.getBackground2(),
fg=self.__Theme.getForeground1(), cursor="hand2",
font=moveBackButtonFont, bd=0, activebackground=self.__Theme.getBackground2())
self.PlayGameButton.place(relx=.8, rely=.345, height=.8*.15*self.sizing.height, width=.8*.15*self.sizing.width)
self.InterfaceWindow.bind("<Button-1>", lambda event: self.startStoryLine(event))
self.InterfaceWindow.bind("<Key>", lambda event: self.startStoryLine(event))
def startStoryLine(self, event):
print(event.keysym)
if event.keysym == "Left":
self.ReturnToMenu()
elif event.keysym == "Right":
self.GameDescriptionLabel['text'] = "Loading game..."
self.InterfaceWindow.after(100, lambda: self.createEpisode())
elif event.keysym.upper() == "X":
self.ReturnToMenu()
if event.widget == self.PlayGameButton:
self.GameDescriptionLabel['text'] = "Loading game..."
self.InterfaceWindow.after(100, lambda: self.createEpisode())
def createEpisode(self):
episodeText1 = "You are in the city of Shenmi. You have been sent on an errand by your father to buy some food from a nearby shop in a busy street."
episodeText2 = "As you approach the market, you notice a man that steals some apples and run off."
episodeText3 = "Stop right There"
episodeText4 = "After him!"
episodeText5 = "The man runs out the shop with the guards."
episodeText6 = "You leave the shop. The street is empty."
episodeText7 = "You then look around to find a dead man on the floor. A door to a shop on the opposite side of the road opens."
episodeText8 = "HEY! YOU THERE! WHO DO YOU THINK YOU ARE LOOKING AT?!"
episodeText9 = "You run."
episodeText10 = "He catches up with you."
episodeText11 = "You are stopped by another man on the other corner."
episodeText12 = "Going somewhere?"
episodeText13 = "Let’s see how tough you are by the time I’m finished with…"
episodeText14 = "That's where them - the Force"
episodeText15 = "You became an agent, part of the force"
episodeText16 = "They took you in and you've undergone lots of lessons"
episodeText17 = "Now, you must chase your enemy Rival through the grid before they get the stolen treasure"
episodeText18 = "The treasure is located ahead of a 100 block pathway similar to that of a snake and ladders game"
episodeText19 = "You and Rival are at this block for either to move they must answer a question correctly"
episodeText20 = "A machine determines whose turn it is and then the a question is asked to that one"
episodeText21 = "Once you get a question right a dice will be rolled to determine how far you move forward"
episodeText22 = "The same will happen for Rival when he is given a chance to go."
episodeText23 = "For this assigment remember: to diffrentiate you multiply the coefficient by the power"
episodeText24 = "and reduce the degree by -1"
episodeText25 = "Integration you do the opposite. You increase the power and divide the coedfficient by that"
episodeText26 = "Enjoy your mission"
Slide1 = SlideData("narration", ["market.png", episodeText1, "TC"])
Slide2 = SlideData("narration", ["market.png", episodeText2, "TC"])
Slide3 = SlideData("convo", ["market.png", "guard1", episodeText3])
Slide4 = SlideData("convo", ["market.png", "guard2", episodeText4])
Slide5 = SlideData("narration", ["street1.png", episodeText5, "TC"])
Slide6 = SlideData("narration", ["street1.png", episodeText6, "TC"])
Slide7 = SlideData("narration", ["street1.png", episodeText7, "TC"])
Slide8 = SlideData("convo", ["street1.png", "villain1", episodeText8])
Slide9 = SlideData("narration", ["street1.png", episodeText9, "TC"])
Slide10 = SlideData("narration", ["street1.png", episodeText10, "TC"])
Slide11 = SlideData("narration", ["street1.png", episodeText11, "TC"])
Slide12 = SlideData("convo", ["street1.png", "villain2", episodeText12])
Slide13 = SlideData("convo", ["street1.png", "villain1", episodeText13])
Slide14 = SlideData("narration", ["force.png", episodeText14, "TC"])
Slide15 = SlideData("narration", ["force.png", episodeText15, "TC"])
Slide16 = SlideData("narration", ["force.png", episodeText16, "TC"])
Slide17 = SlideData("narration", ["questInfo.png", episodeText17, "TC"])
Slide18 = SlideData("narration", ["questInfo.png", episodeText18, "TC"])
Slide19 = SlideData("narration", ["questInfo.png", episodeText19, "TC"])
Slide20 = SlideData("narration", ["questInfo.png", episodeText20, "TC"])
Slide21 = SlideData("narration", ["questInfo.png", episodeText21, "TC"])
Slide22 = SlideData("convo", ["calculus.png", "narrator", episodeText22])
Slide23 = SlideData("convo", ["calculus.png", "narrator", episodeText23])
Slide24 = SlideData("convo", ["calculus.png", "narrator", episodeText24])
Slide25 = SlideData("convo", ["calculus.png", "narrator", episodeText25])
Slide26 = SlideData("convo", ["calculus.png", "narrator", episodeText26])
slideArray = [Slide2, Slide3, Slide4, Slide5, Slide6, Slide7, Slide8, Slide9, Slide10,
Slide11, Slide12, Slide13, Slide14, Slide15, Slide16, Slide17, Slide18,
Slide19, Slide20, Slide21, Slide22, Slide23, Slide24, Slide25, Slide26]
self.Episode = Episode(Slide1, self.InterfaceWindow, self.sizing)
for slide in slideArray:
self.Episode.AddSlide(slide, self.InterfaceWindow, self.sizing)
self.Episode.StartEpisode()
# self.InterfaceWindow.bind("<space>", lambda event: self.moveToPreviousSlide(event))
self.InterfaceWindow.bind("<Key>", lambda event: self.moveToNextSlide(event))
def ReturnToMenu(self):
from Menu import Main
# Returns to menu
Main(self.InterfaceWindow, self.sizing, self.user)
def moveToNextSlide(self, event):
try:
if event.keysym == "Right":
try:
self.Episode.NextSlide()
except TypeError:
BoardGame(self.InterfaceWindow, self.sizing, self.user)
elif event.keysym == "Left":
try:
self.Episode.PreviousSlide()
except:
self.ReturnToMenu()
elif event.keysym == "space":
self.MoveBackButton.destroy()
BoardGame(self.InterfaceWindow, self.sizing, self.user)
except TypeError:
self.MoveBackButton.destroy()
BoardGame(self.InterfaceWindow, self.sizing, self.user)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from django.template import Context
from django.template.loader import get_template
from django.http.response import HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, render_to_response, get_object_or_404, redirect
from django.core.urlresolvers import reverse
from django.template.context import RequestContext
from django.contrib.auth.models import User
from django.utils import timezone
from django.views.generic.edit import CreateView
from django.views.generic import ListView, DeleteView, TemplateView
# from .forms import FormularioEvento
from django.core.urlresolvers import reverse_lazy
from servicio.models import *
from servicio.forms import *
from caja.forms import *
from caja.models import *
from abonado.models import *
from abonado.forms import *
from django.db.models import Q
from django.views.decorators.csrf import csrf_exempt
class ServView(TemplateView):
template_name = 'rg_servicio.html'
def get(self, request, *args, **kwargs):
abo = abonado.objects.all()
caj = caja.objects.all()
return render(request,self.template_name, {'data': abo, 'data': caj})
def post(self, request):
servicio.objects.create(cedula2=request.POST['cedula2'],
num_abonado3_id=int(request.POST['num_abonado3']),
nombre_abo_serv1=request.POST['nombre_abo_serv1'],
nombre_abo_serv2=request.POST['nombre_abo_serv2'],
apellido_abo_serv1=request.POST['apellido_abo_serv1'],
apellido_abo_serv2=request.POST['apellido_abo_serv2'],
direccion_abo_serv=request.POST['direccion_abo_serv'],
serial2_id=int(request.POST['serial2']),
descripcion=request.POST['descripcion'])
return redirect('rg_servicio') |
#!flask/bin/python
import os
extra_dirs = ['./app/templates', ]
extra_files = extra_dirs[:]
for extra_dir in extra_dirs:
for dirname, dirs, files in os.walk(extra_dir):
for filename in files:
filename = os.path.join(dirname, filename)
if os.path.isfile(filename):
extra_files.append(filename)
from app import app
app.run(debug=True, extra_files=extra_files)
|
from django.contrib import admin
from django.contrib.auth import get_user_model
from django.utils.html import mark_safe
from django.template.defaultfilters import truncatechars
from .models import EmailTokenAccess
from bookshelves.models import Bookshelf
User = get_user_model()
@admin.register(User)
class NopasswordAdmin(admin.ModelAdmin):
list_display = [
"email",
"name",
]
@admin.register(EmailTokenAccess)
class EmailTokenAccessAdmin(admin.ModelAdmin):
list_display = [
"user",
"created_at",
]
# @admin.register(User)
class UserAdmin(admin.ModelAdmin):
list_display = [
"avatar_url_image",
"username",
"email",
"is_staff",
"short_password",
"have_bookshelf",
]
actions = ["make_bookshelf"]
def short_password(self, obj):
return truncatechars(obj.password, 10)
def avatar_url_image(self, obj):
return mark_safe(f'<img src={obj.avatar_url} "width=30px" height="30px" />')
def have_bookshelf(self, obj):
return obj.bookshelf_set.exists()
@admin.action(description="Make BookShelf, if have not.")
def make_bookshelf(modelamin, request, queryset):
users = queryset.all()
for user in users:
if user.bookshelf_set.exists() == False:
user.bookshelf_set.create(name="default")
|
# TreasuryQuants.com Ltd.
# email: contact@treasuryquants.com
# Note: this software is provided "as-is" under the agreed terms of your account.
# For more information see https://treasuryquants.com/terms-of-services/
from TQapis import TQRequests, TQConnection
# API: describe
# Explanation: It lists and explains the APIs and the arguments that each API expects.
# Arguments: If no argument is given, it provide a list of all APIs available and a description for each. Once a name
# of the API is passed as the argument, it describes the API and lists the expected argument with
# a description for each.
#
# About this example: In this example we first run "describe" with no arguments just to get a list of APIs
# We then re-run "describe" with each API name to see 1) what the API does and 2) what
# arguments we need to run it with.
#
#configuration for this file
user_email="client.email@address.here"
target_url="http://operations.treasuryquants.com"
is_post=False # True = use POST method, False = use GET method
connection = TQConnection.Connection(user_email,is_post,target_url)
#
# Check if we have connections
#
request_ip_return = TQRequests.request_ip_return()
message = connection.send(request_ip_return)
if not message.is_OK:
print(message.is_OK, message.content)
exit()
#
# Get the list of all functions
#
request_function_describe = TQRequests.request_function_describe() #no arguments passed
message_describe = connection.send(request_function_describe)
if not message_describe.is_OK:
print(message_describe.is_OK, message_describe.content)
exit()
print("\nresult status:{}\ncost:{}\nbalance:{}\ncontent:{}".format(message.is_OK,connection.cost,connection.balance, message.content))
print("\n"+"-"*100)
for key, description in message_describe.content.items():
print("{}: {}".format(key, description))
print("\n"+"-"*100)
#
# Get the list of all input arguments for each functions
#
for function_name, description in message_describe.content.items():
request_function_describe = TQRequests.request_function_describe(function_name)
message = connection.send(request_function_describe)
if not message.is_OK:
print(message.is_OK, message.content)
exit()
print("Describe:{}".format(function_name)) # pass the name of the API as the argument
for key, description in message.content.items():
print("{}: {}".format(key, description), end="\n")
print('_'*100)
print("\n"+"-"*100) |
from Transition import transicion
from graphviz import Digraph
from Nodo import nodo
import TabNext
import Hojas
class tranTab:
def __init__(self, root):
self.states = []
self.contador = 0
self.states.append( ["S"+str(self.contador), root.first, [], False] )
self.contador += 1
for estado in self.states:
elementos = estado[1]
for hoja in elementos:
lexema, siguientes = TabNext.getSig(hoja)
estado_existe = False
estado_encontadorrado = ""
for e in self.states:
if "".join(str(v) for v in e[1]) == "".join(str(v) for v in siguientes):
estado_existe = True
estado_encontadorrado = e[0]
break
if not estado_existe:
if Hojas.aceptacion(hoja):
estado[3] = True
if lexema == "":
continue
nuevo = ["S"+str(self.contador), siguientes, [], False]
trans = transicion(estado[0], lexema, nuevo[0])
estado[2].append(trans)
self.contador += 1
self.states.append(nuevo)
else:
if Hojas.aceptacion(hoja):
estado[3] = True
trans_existe = False
for trans in estado[2]:
if trans.comp(estado[0], lexema):
trans_existe = True
break
if not trans_existe:
trans = transicion(estado[0], lexema, estado_encontadorrado)
estado[2].append(trans)
def grafo(self, nombre=""):
dot = Digraph(comment='Grafica de states')
dot.attr('node', shape='circle')
dot.node("L: Letras\\nD: Digitos\\nS: Slash\\nT: Todo\\nA: Asterisco\\nI: Salto\\nC: Comilla\\nK: Simbolo\\nP: Punto")
for e in self.states:
dot.node(e[0],e[0])
if e[3]:
dot.node(e[0], shape='doublecircle')
for e in self.states:
for t in e[2]:
dot.edge(t.eIni, t.eFin, label=t.tran)
dot.render(nombre+".gv",view=True)
|
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
driver = webdriver.Firefox()
driver.get("https://www.datafolks.com")
contact_us_btn = WebDriverWait(driver, 50).until(EC.element_to_be_clickable((By.XPATH, '//div[contains(@class, "tn-elem__912597111551726675405")]')))
contact_us_btn.click()
name_input_field = WebDriverWait(driver, 20).until(EC.visibility_of_element_located((By.NAME, "Name")))
name_input_field.send_keys("Adam First")
email_input_field = WebDriverWait(driver, 20).until(EC.visibility_of_element_located((By.NAME, "Email")))
email_input_field.send_keys("adam@example.com")
phone_input_field = WebDriverWait(driver, 20).until(EC.visibility_of_element_located((By.NAME, "Phone")))
phone_input_field.send_keys("+18474363041")
country_input_field = WebDriverWait(driver, 20).until(EC.visibility_of_element_located((By.NAME, "Country")))
country_input_field.send_keys("USA")
send_btn = WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.CLASS_NAME, "t-submit")))
send_btn.click()
close_icon_btn = WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.XPATH, '//div[contains(@class, "t-popup__close-wrapper")]')))
close_icon_btn.click()
home_pg_btn = WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.XPATH, "//a[@class='tn-atom' and text()='Home']")))
home_pg_btn.click()
driver.quit()
#Process finished with exit code 0 |
from django.contrib.auth.models import User
from django.db.models import Q
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseNotFound, HttpResponseForbidden
from django.views.decorators.http import require_http_methods
from django.views.decorators.csrf import csrf_exempt
from patientprofile.models import PatientProfile, Allergy
import json
import time
import datetime
@require_http_methods(["GET", "POST"])
@csrf_exempt
def profile(request):
if request.method == 'GET':
return get_profile(request)
elif request.method == 'POST':
return update_profile(request)
def get_profile(request):
if request.user is None or not request.user.is_authenticated:
return HttpResponseForbidden("Missing or invalid Authorization token")
profile = PatientProfile.objects.filter(user_id=request.user.id)
return HttpResponse(json.dumps(profile[0].as_dict()), content_type='application/json')
def update_profile(request):
user = request.user
if user is None or not user.is_authenticated:
return HttpResponseForbidden("Missing or invalid Authorization token")
request_body = request.body
# Parse the json
body = json.loads(request_body)
email = body.get('emailAddress')
first_name = body.get('firstName')
last_name = body.get('lastName')
image = body.get('image')
medical_conditions = body.get('medicalConditions')
medication_list = body.get('medicationList')
allergy = body.get('allergy')
cancer_diagnosis = body.get('cancerDiagnosis')
gender = body.get('gender')
date_of_birth = datetime.datetime.strptime(body.get('dateOfBirth'), '%Y-%m-%d').date()
phone_number = body.get('phoneNumber')
chemotherapy = body.get('chemotherapy')
try:
user.email = email
user.first_name = first_name
user.last_name = last_name
user.patientprofile.image = image
user.patientprofile.medical_conditions = medical_conditions
user.patientprofile.medication_list = medication_list
user.patientprofile.cancer_diagnosis = cancer_diagnosis
user.patientprofile.gender = gender
user.patientprofile.date_of_birth = date_of_birth
user.patientprofile.phone_number = phone_number
# Delete existing allergies
Allergy.objects.filter(patient_id=user.patientprofile).delete()
for a in allergy:
temp = Allergy()
temp.patient = user.patientprofile
temp.allergen = a.get('allergen')
temp.reaction = a.get('reaction')
temp.save()
user.patientprofile.chemotherapy = chemotherapy
user.save()
user.patientprofile.save()
except Exception:
return HttpResponseBadRequest("Invalid data.")
return HttpResponse(json.dumps(user.patientprofile.as_dict()), content_type='application/json')
@require_http_methods(["GET"])
def users(request):
user = request.user
if user is None or not user.is_authenticated:
return HttpResponseForbidden("Missing or invalid Authorization token")
query = request.GET.get('query')
if query is None:
users = PatientProfile.objects.all()[:10]
else:
# Need to limit number of users returned at some point
users = PatientProfile.objects.filter(Q(user__first_name__icontains=query) | Q(user__last_name__icontains=query) | Q(user__email__icontains=query))
response = [ obj.as_dict() for obj in users ]
return HttpResponse(json.dumps({"users": response}), content_type='application/json')
@require_http_methods(["GET"])
def get_s3_info(request):
user = request.user
if user is None or not user.is_authenticated:
return HttpResponseForbidden("Missing or invalid Authorization token")
access = 'AKIAJHQ6TIQX7RRLMLQA'
secret = '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'
region = 'us-east-2'
return HttpResponse(json.dumps({"accessKey": access, "secretKey": secret, "region": region}), content_type='application/json') |
import torch
def train(model, train_loader, optimizer, device, logger):
model.train()
loss_sum = 0
for iter, batch in enumerate(train_loader):
optimizer.zero_grad()
input_ids = batch['input_ids'].to(device)
attention_mask = batch['attention_mask'].to(device)
start_positions = batch['start_positions'].to(device)
end_positions = batch['end_positions'].to(device)
outputs = model(input_ids, attention_mask=attention_mask, start_positions=start_positions, end_positions=end_positions)
loss = outputs[0]
loss.backward()
optimizer.step()
if (iter + 1) % 100 == 0:
logger.info(' step : {}/{} Loss: {:.4f}'.format(
iter,
str(len(train_loader)),
loss.detach())
)
def valid(model, dev_loader, device, tokenizer, logger):
model.eval()
pred_texts = []
ans_texts = []
loss_sum = 0
with torch.no_grad():
for iter,batch in enumerate(dev_loader):
input_ids = batch['input_ids'].to(device)
attention_mask = batch['attention_mask'].to(device)
start_positions = batch['start_positions'].to(device)
end_positions = batch['end_positions'].to(device)
outputs = model(input_ids, attention_mask=attention_mask, start_positions=start_positions, end_positions=end_positions)
pred_start_positions = torch.argmax(outputs['start_logits'], dim=1).to('cpu')
pred_end_positions = torch.argmax(outputs['end_logits'], dim=1).to('cpu')
for b in range(len(pred_start_positions)):
ans_text = tokenizer.convert_tokens_to_string(tokenizer.convert_ids_to_tokens(input_ids[b][start_positions[b]:end_positions[b]+1]))
pred_text = tokenizer.convert_tokens_to_string(tokenizer.convert_ids_to_tokens(input_ids[b][pred_start_positions[b]:pred_end_positions[b]+1]))
ans_texts.append(ans_text)
pred_texts.append(pred_text)
loss = outputs[0].to('cpu')
loss_sum += loss
return pred_texts, ans_texts, loss_sum/iter
|
import tkinter as tk
import requests
import json
import js2py
import hashlib
import urllib.parse as up
class Google() :
def __init__(self):
self.js_code = '''
function TL(a) {
var k = "";
var b = 406644;
var b1 = 3293161072;
var jd = ".";
var $b = "+-a^+6";
var Zb = "+-3^+b+-f";
for (var e = [], f = 0, g = 0; g < a.length; g++) {
var m = a.charCodeAt(g);
128 > m ? e[f++] = m : (2048 > m ? e[f++] = m >> 6 | 192 : (55296 == (m & 64512) && g + 1 < a.length && 56320 == (a.charCodeAt(g + 1) & 64512) ? (m = 65536 + ((m & 1023) << 10) + (a.charCodeAt(++g) & 1023),
e[f++] = m >> 18 | 240,
e[f++] = m >> 12 & 63 | 128) : e[f++] = m >> 12 | 224,
e[f++] = m >> 6 & 63 | 128),
e[f++] = m & 63 | 128)
}
a = b;
for (f = 0; f < e.length; f++) a += e[f],
a = RL(a, $b);
a = RL(a, Zb);
a ^= b1 || 0;
0 > a && (a = (a & 2147483647) + 2147483648);
a %= 1E6;
return a.toString() + jd + (a ^ b)
};
function RL(a, b) {
var t = "a";
var Yb = "+";
for (var c = 0; c < b.length - 2; c += 3) {
var d = b.charAt(c + 2),
d = d >= t ? d.charCodeAt(0) - 87 : Number(d),
d = b.charAt(c + 1) == Yb ? a >>> d: a << d;
a = b.charAt(c) == Yb ? a + d & 4294967295 : a ^ d
}
return a
}
'''
self.headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36',
}
self.url = 'https://translate.google.cn/translate_a/single?client=t&sl=auto&tl={}&hl=zh-CN&dt=at&dt=bd&dt=ex&dt=ld&dt=md&dt=qca&dt=rw&dt=rm&dt=ss&dt=t&tk={}&q={}'
def isChinese(self, word):
for w in word:
if '\u4e00' <= w <= '\u9fa5':
return True
return False
def getTk(self, word):
evaljs = js2py.EvalJs()
evaljs.execute(self.js_code)
tk = evaljs.TL(word)
return tk
def translate(self, word):
if len(word) > 4891:
raise RuntimeError('The length of word should be less than 4891...')
languages = ['zh-CN', 'en']
if not self.isChinese(word):
target_language = languages[0]
else:
target_language = languages[1]
res = requests.get(self.url.format(target_language, self.getTk(word), word), headers = self.headers)
return res.json()[0][0][0]
class Youdao() :
def __init__(self) :
self.url = 'http://fanyi.youdao.com/translate?smartresult=dict&smartresult=rule&smartresult=ugc&sessionFrom=null'
def getRequest(self, sentence) :
_data = {
'i': sentence,
'from': 'AUTO',
'to': 'Auto',
'doctype': 'json',
'version': '2.1',
'keyfrom': 'fanyi.web',
'action': 'FY_BY_CLICKBUTTION',
'typoResult': 'false',
}
response = requests.post(self.url, data=_data)
if response.status_code == 200 :
return response.text
else :
print("Something get error, please try again.")
return None
def getResult(self, response) :
result_text = json.loads(response)
#src = result_text['translateResult'][0][0]['src']
tgt = result_text['translateResult'][0][0]['tgt']
return tgt
def translate(self, sentence) :
Yapp = Youdao()
response = Yapp.getRequest(sentence)
result = Yapp.getResult(response)
return result
class Baidu() :
def __init__ (self):
#self.url = 'http://api.fanyi.baidu.com/api/trans/vip/translate?'
self.url = 'https://fanyi-api.baidu.com/api/trans/vip/translate?'
self.appid = '20170709000063735'
self.key = 'EiXpUVJAu4mLYinEqgzN'
self.salt = '1435660288'
def isChinese(self, word):
for w in word:
if '\u4e00' <= w <= '\u9fa5':
return True
return False
def translate(self, sentece) :
if not self.isChinese(sentece) :
tl = 'zh'
else : tl = 'en'
self.dic = {
'q': sentece,
'from': 'auto',
'to': tl,
'appid': self.appid,
'salt': self.salt,
'sign': hashlib.md5((self.appid + sentece + self.salt + self.key).encode(encoding = 'utf-8')).hexdigest(),
}
# print(self.dic['sign'])
r = requests.get(self.url + up.urlencode(self.dic))
result = json.loads(r.text)['trans_result'][0]['dst']
return result
Box_width = 470
Btn_width = 70
Btn_height = 30
class UI() :
def __init__(self) :
self.window = tk.Tk()
self.window.title("Tranlation Tool")
self.window.geometry('600x500')
# submit button
self.GG_btn = tk.Button(self.window, text = "谷歌翻译", command = lambda: self.submit(platform = "google"))
self.GG_btn.place(x = Box_width + 30, y = 20, width = Btn_width, height = Btn_height)
self.YD_btn = tk.Button(self.window, text = "有道翻译", command = lambda: self.submit(platform = "youdao"))
self.YD_btn.place(x = Box_width + 30, y = 70, width = Btn_width, height = Btn_height)
self.BD_btn = tk.Button(self.window, text = "百度翻译", command = lambda: self.submit(platform = "baidu"))
self.BD_btn.place(x = Box_width + 30, y = 120, width = Btn_width, height = Btn_height)
# input box
self.entry = tk.Entry(self.window)
self.entry.place(x = 10, y = 20, width = Box_width, height = 30)
self.entry.bind('<Key-Return>', self.submit)
# the title of result
self.title_text = tk.Label(self.window, text = "翻译结果:")
self.title_text.place(x = 10, y = 60)
# translation result
self.result_text = tk.Text(self.window, background = "#FAFAFA")
self.result_text.place(x = 10, y = 90, width = Box_width, height = 260)
# translators
self.YD_translator = Youdao()
self.GG_translator = Google()
self.BD_translator = Baidu()
def submit(self, event = None, platform = 'google') :
context = self.entry.get()
if platform == 'google' :
result = self.GG_translator.translate(context)
elif platform == 'youdao' :
result = self.YD_translator.translate(context)
elif platform == 'baidu' :
result = self.BD_translator.translate(context)
else :
raise RuntimeError('Platform only is only Google, Youdao or Baidu.')
# translators
# result = self.GG_translator.translate(context)
# result = self.YD_translator.translate(context)
# result = self.BD_translator.translate(context)
# result = "Nothing"
# 文本框中实际是从1.0 开始的,(0.n 来说是比文本内容还要上一行,超过文本框或者文本框开头)
self.result_text.delete(1.0, tk.END)
self.result_text.insert(tk.END, result)
def run(self) :
self.window.mainloop()
if __name__ == '__main__':
win = UI()
win.run()
|
import base64
import json
import logging
import sys
import mock
import msgpack
from ably import CipherParams
from ably.util.crypto import get_cipher
from ably.types.message import Message
from test.ably.testapp import TestApp
from test.ably.utils import BaseAsyncTestCase
if sys.version_info >= (3, 8):
from unittest.mock import AsyncMock
else:
from mock import AsyncMock
log = logging.getLogger(__name__)
class TestTextEncodersNoEncryption(BaseAsyncTestCase):
async def asyncSetUp(self):
self.ably = await TestApp.get_ably_rest(use_binary_protocol=False)
async def asyncTearDown(self):
await self.ably.close()
async def test_text_utf8(self):
channel = self.ably.channels["persisted:publish"]
with mock.patch('ably.rest.rest.Http.post', new_callable=AsyncMock) as post_mock:
await channel.publish('event', 'foó')
_, kwargs = post_mock.call_args
assert json.loads(kwargs['body'])['data'] == 'foó'
assert not json.loads(kwargs['body']).get('encoding', '')
async def test_str(self):
# This test only makes sense for py2
channel = self.ably.channels["persisted:publish"]
with mock.patch('ably.rest.rest.Http.post', new_callable=AsyncMock) as post_mock:
await channel.publish('event', 'foo')
_, kwargs = post_mock.call_args
assert json.loads(kwargs['body'])['data'] == 'foo'
assert not json.loads(kwargs['body']).get('encoding', '')
async def test_with_binary_type(self):
channel = self.ably.channels["persisted:publish"]
with mock.patch('ably.rest.rest.Http.post', new_callable=AsyncMock) as post_mock:
await channel.publish('event', bytearray(b'foo'))
_, kwargs = post_mock.call_args
raw_data = json.loads(kwargs['body'])['data']
assert base64.b64decode(raw_data.encode('ascii')) == bytearray(b'foo')
assert json.loads(kwargs['body'])['encoding'].strip('/') == 'base64'
async def test_with_bytes_type(self):
channel = self.ably.channels["persisted:publish"]
with mock.patch('ably.rest.rest.Http.post', new_callable=AsyncMock) as post_mock:
await channel.publish('event', b'foo')
_, kwargs = post_mock.call_args
raw_data = json.loads(kwargs['body'])['data']
assert base64.b64decode(raw_data.encode('ascii')) == bytearray(b'foo')
assert json.loads(kwargs['body'])['encoding'].strip('/') == 'base64'
async def test_with_json_dict_data(self):
channel = self.ably.channels["persisted:publish"]
data = {'foó': 'bár'}
with mock.patch('ably.rest.rest.Http.post', new_callable=AsyncMock) as post_mock:
await channel.publish('event', data)
_, kwargs = post_mock.call_args
raw_data = json.loads(json.loads(kwargs['body'])['data'])
assert raw_data == data
assert json.loads(kwargs['body'])['encoding'].strip('/') == 'json'
async def test_with_json_list_data(self):
channel = self.ably.channels["persisted:publish"]
data = ['foó', 'bár']
with mock.patch('ably.rest.rest.Http.post', new_callable=AsyncMock) as post_mock:
await channel.publish('event', data)
_, kwargs = post_mock.call_args
raw_data = json.loads(json.loads(kwargs['body'])['data'])
assert raw_data == data
assert json.loads(kwargs['body'])['encoding'].strip('/') == 'json'
async def test_text_utf8_decode(self):
channel = self.ably.channels["persisted:stringdecode"]
await channel.publish('event', 'fóo')
history = await channel.history()
message = history.items[0]
assert message.data == 'fóo'
assert isinstance(message.data, str)
assert not message.encoding
async def test_text_str_decode(self):
channel = self.ably.channels["persisted:stringnonutf8decode"]
await channel.publish('event', 'foo')
history = await channel.history()
message = history.items[0]
assert message.data == 'foo'
assert isinstance(message.data, str)
assert not message.encoding
async def test_with_binary_type_decode(self):
channel = self.ably.channels["persisted:binarydecode"]
await channel.publish('event', bytearray(b'foob'))
history = await channel.history()
message = history.items[0]
assert message.data == bytearray(b'foob')
assert isinstance(message.data, bytearray)
assert not message.encoding
async def test_with_json_dict_data_decode(self):
channel = self.ably.channels["persisted:jsondict"]
data = {'foó': 'bár'}
await channel.publish('event', data)
history = await channel.history()
message = history.items[0]
assert message.data == data
assert not message.encoding
async def test_with_json_list_data_decode(self):
channel = self.ably.channels["persisted:jsonarray"]
data = ['foó', 'bár']
await channel.publish('event', data)
history = await channel.history()
message = history.items[0]
assert message.data == data
assert not message.encoding
def test_decode_with_invalid_encoding(self):
data = 'foó'
encoded = base64.b64encode(data.encode('utf-8'))
decoded_data = Message.decode(encoded, 'foo/bar/utf-8/base64')
assert decoded_data['data'] == data
assert decoded_data['encoding'] == 'foo/bar'
class TestTextEncodersEncryption(BaseAsyncTestCase):
async def asyncSetUp(self):
self.ably = await TestApp.get_ably_rest(use_binary_protocol=False)
self.cipher_params = CipherParams(secret_key='keyfordecrypt_16',
algorithm='aes')
async def asyncTearDown(self):
await self.ably.close()
def decrypt(self, payload, options=None):
if options is None:
options = {}
ciphertext = base64.b64decode(payload.encode('ascii'))
cipher = get_cipher({'key': b'keyfordecrypt_16'})
return cipher.decrypt(ciphertext)
async def test_text_utf8(self):
channel = self.ably.channels.get("persisted:publish_enc",
cipher=self.cipher_params)
with mock.patch('ably.rest.rest.Http.post', new_callable=AsyncMock) as post_mock:
await channel.publish('event', 'fóo')
_, kwargs = post_mock.call_args
assert json.loads(kwargs['body'])['encoding'].strip('/') == 'utf-8/cipher+aes-128-cbc/base64'
data = self.decrypt(json.loads(kwargs['body'])['data']).decode('utf-8')
assert data == 'fóo'
async def test_str(self):
# This test only makes sense for py2
channel = self.ably.channels["persisted:publish"]
with mock.patch('ably.rest.rest.Http.post', new_callable=AsyncMock) as post_mock:
await channel.publish('event', 'foo')
_, kwargs = post_mock.call_args
assert json.loads(kwargs['body'])['data'] == 'foo'
assert not json.loads(kwargs['body']).get('encoding', '')
async def test_with_binary_type(self):
channel = self.ably.channels.get("persisted:publish_enc",
cipher=self.cipher_params)
with mock.patch('ably.rest.rest.Http.post', new_callable=AsyncMock) as post_mock:
await channel.publish('event', bytearray(b'foo'))
_, kwargs = post_mock.call_args
assert json.loads(kwargs['body'])['encoding'].strip('/') == 'cipher+aes-128-cbc/base64'
data = self.decrypt(json.loads(kwargs['body'])['data'])
assert data == bytearray(b'foo')
assert isinstance(data, bytearray)
async def test_with_json_dict_data(self):
channel = self.ably.channels.get("persisted:publish_enc",
cipher=self.cipher_params)
data = {'foó': 'bár'}
with mock.patch('ably.rest.rest.Http.post', new_callable=AsyncMock) as post_mock:
await channel.publish('event', data)
_, kwargs = post_mock.call_args
assert json.loads(kwargs['body'])['encoding'].strip('/') == 'json/utf-8/cipher+aes-128-cbc/base64'
raw_data = self.decrypt(json.loads(kwargs['body'])['data']).decode('ascii')
assert json.loads(raw_data) == data
async def test_with_json_list_data(self):
channel = self.ably.channels.get("persisted:publish_enc",
cipher=self.cipher_params)
data = ['foó', 'bár']
with mock.patch('ably.rest.rest.Http.post', new_callable=AsyncMock) as post_mock:
await channel.publish('event', data)
_, kwargs = post_mock.call_args
assert json.loads(kwargs['body'])['encoding'].strip('/') == 'json/utf-8/cipher+aes-128-cbc/base64'
raw_data = self.decrypt(json.loads(kwargs['body'])['data']).decode('ascii')
assert json.loads(raw_data) == data
async def test_text_utf8_decode(self):
channel = self.ably.channels.get("persisted:enc_stringdecode",
cipher=self.cipher_params)
await channel.publish('event', 'foó')
history = await channel.history()
message = history.items[0]
assert message.data == 'foó'
assert isinstance(message.data, str)
assert not message.encoding
async def test_with_binary_type_decode(self):
channel = self.ably.channels.get("persisted:enc_binarydecode",
cipher=self.cipher_params)
await channel.publish('event', bytearray(b'foob'))
history = await channel.history()
message = history.items[0]
assert message.data == bytearray(b'foob')
assert isinstance(message.data, bytearray)
assert not message.encoding
async def test_with_json_dict_data_decode(self):
channel = self.ably.channels.get("persisted:enc_jsondict",
cipher=self.cipher_params)
data = {'foó': 'bár'}
await channel.publish('event', data)
history = await channel.history()
message = history.items[0]
assert message.data == data
assert not message.encoding
async def test_with_json_list_data_decode(self):
channel = self.ably.channels.get("persisted:enc_list",
cipher=self.cipher_params)
data = ['foó', 'bár']
await channel.publish('event', data)
history = await channel.history()
message = history.items[0]
assert message.data == data
assert not message.encoding
class TestBinaryEncodersNoEncryption(BaseAsyncTestCase):
async def asyncSetUp(self):
self.ably = await TestApp.get_ably_rest()
async def asyncTearDown(self):
await self.ably.close()
def decode(self, data):
return msgpack.unpackb(data)
async def test_text_utf8(self):
channel = self.ably.channels["persisted:publish"]
with mock.patch('ably.rest.rest.Http.post',
wraps=channel.ably.http.post) as post_mock:
await channel.publish('event', 'foó')
_, kwargs = post_mock.call_args
assert self.decode(kwargs['body'])['data'] == 'foó'
assert self.decode(kwargs['body']).get('encoding', '').strip('/') == ''
async def test_with_binary_type(self):
channel = self.ably.channels["persisted:publish"]
with mock.patch('ably.rest.rest.Http.post',
wraps=channel.ably.http.post) as post_mock:
await channel.publish('event', bytearray(b'foo'))
_, kwargs = post_mock.call_args
assert self.decode(kwargs['body'])['data'] == bytearray(b'foo')
assert self.decode(kwargs['body']).get('encoding', '').strip('/') == ''
async def test_with_json_dict_data(self):
channel = self.ably.channels["persisted:publish"]
data = {'foó': 'bár'}
with mock.patch('ably.rest.rest.Http.post',
wraps=channel.ably.http.post) as post_mock:
await channel.publish('event', data)
_, kwargs = post_mock.call_args
raw_data = json.loads(self.decode(kwargs['body'])['data'])
assert raw_data == data
assert self.decode(kwargs['body'])['encoding'].strip('/') == 'json'
async def test_with_json_list_data(self):
channel = self.ably.channels["persisted:publish"]
data = ['foó', 'bár']
with mock.patch('ably.rest.rest.Http.post',
wraps=channel.ably.http.post) as post_mock:
await channel.publish('event', data)
_, kwargs = post_mock.call_args
raw_data = json.loads(self.decode(kwargs['body'])['data'])
assert raw_data == data
assert self.decode(kwargs['body'])['encoding'].strip('/') == 'json'
async def test_text_utf8_decode(self):
channel = self.ably.channels["persisted:stringdecode-bin"]
await channel.publish('event', 'fóo')
history = await channel.history()
message = history.items[0]
assert message.data == 'fóo'
assert isinstance(message.data, str)
assert not message.encoding
async def test_with_binary_type_decode(self):
channel = self.ably.channels["persisted:binarydecode-bin"]
await channel.publish('event', bytearray(b'foob'))
history = await channel.history()
message = history.items[0]
assert message.data == bytearray(b'foob')
assert not message.encoding
async def test_with_json_dict_data_decode(self):
channel = self.ably.channels["persisted:jsondict-bin"]
data = {'foó': 'bár'}
await channel.publish('event', data)
history = await channel.history()
message = history.items[0]
assert message.data == data
assert not message.encoding
async def test_with_json_list_data_decode(self):
channel = self.ably.channels["persisted:jsonarray-bin"]
data = ['foó', 'bár']
await channel.publish('event', data)
history = await channel.history()
message = history.items[0]
assert message.data == data
assert not message.encoding
class TestBinaryEncodersEncryption(BaseAsyncTestCase):
async def asyncSetUp(self):
self.ably = await TestApp.get_ably_rest()
self.cipher_params = CipherParams(secret_key='keyfordecrypt_16', algorithm='aes')
async def asyncTearDown(self):
await self.ably.close()
def decrypt(self, payload, options=None):
if options is None:
options = {}
cipher = get_cipher({'key': b'keyfordecrypt_16'})
return cipher.decrypt(payload)
def decode(self, data):
return msgpack.unpackb(data)
async def test_text_utf8(self):
channel = self.ably.channels.get("persisted:publish_enc",
cipher=self.cipher_params)
with mock.patch('ably.rest.rest.Http.post',
wraps=channel.ably.http.post) as post_mock:
await channel.publish('event', 'fóo')
_, kwargs = post_mock.call_args
assert self.decode(kwargs['body'])['encoding'].strip('/') == 'utf-8/cipher+aes-128-cbc'
data = self.decrypt(self.decode(kwargs['body'])['data']).decode('utf-8')
assert data == 'fóo'
async def test_with_binary_type(self):
channel = self.ably.channels.get("persisted:publish_enc",
cipher=self.cipher_params)
with mock.patch('ably.rest.rest.Http.post',
wraps=channel.ably.http.post) as post_mock:
await channel.publish('event', bytearray(b'foo'))
_, kwargs = post_mock.call_args
assert self.decode(kwargs['body'])['encoding'].strip('/') == 'cipher+aes-128-cbc'
data = self.decrypt(self.decode(kwargs['body'])['data'])
assert data == bytearray(b'foo')
assert isinstance(data, bytearray)
async def test_with_json_dict_data(self):
channel = self.ably.channels.get("persisted:publish_enc",
cipher=self.cipher_params)
data = {'foó': 'bár'}
with mock.patch('ably.rest.rest.Http.post',
wraps=channel.ably.http.post) as post_mock:
await channel.publish('event', data)
_, kwargs = post_mock.call_args
assert self.decode(kwargs['body'])['encoding'].strip('/') == 'json/utf-8/cipher+aes-128-cbc'
raw_data = self.decrypt(self.decode(kwargs['body'])['data']).decode('ascii')
assert json.loads(raw_data) == data
async def test_with_json_list_data(self):
channel = self.ably.channels.get("persisted:publish_enc",
cipher=self.cipher_params)
data = ['foó', 'bár']
with mock.patch('ably.rest.rest.Http.post',
wraps=channel.ably.http.post) as post_mock:
await channel.publish('event', data)
_, kwargs = post_mock.call_args
assert self.decode(kwargs['body'])['encoding'].strip('/') == 'json/utf-8/cipher+aes-128-cbc'
raw_data = self.decrypt(self.decode(kwargs['body'])['data']).decode('ascii')
assert json.loads(raw_data) == data
async def test_text_utf8_decode(self):
channel = self.ably.channels.get("persisted:enc_stringdecode-bin",
cipher=self.cipher_params)
await channel.publish('event', 'foó')
history = await channel.history()
message = history.items[0]
assert message.data == 'foó'
assert isinstance(message.data, str)
assert not message.encoding
async def test_with_binary_type_decode(self):
channel = self.ably.channels.get("persisted:enc_binarydecode-bin",
cipher=self.cipher_params)
await channel.publish('event', bytearray(b'foob'))
history = await channel.history()
message = history.items[0]
assert message.data == bytearray(b'foob')
assert isinstance(message.data, bytearray)
assert not message.encoding
async def test_with_json_dict_data_decode(self):
channel = self.ably.channels.get("persisted:enc_jsondict-bin",
cipher=self.cipher_params)
data = {'foó': 'bár'}
await channel.publish('event', data)
history = await channel.history()
message = history.items[0]
assert message.data == data
assert not message.encoding
async def test_with_json_list_data_decode(self):
channel = self.ably.channels.get("persisted:enc_list-bin",
cipher=self.cipher_params)
data = ['foó', 'bár']
await channel.publish('event', data)
history = await channel.history()
message = history.items[0]
assert message.data == data
assert not message.encoding
|
#!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'miniMaxSum' function below.
#
# The function accepts INTEGER_ARRAY arr as parameter.
#
def miniMaxSum(arr):
sorted_arr= arr
temp=0
for i in range(len(arr)-1):
if sorted_arr[i] > sorted_arr[i+1]:
temp= arr[i]
arr[i]= arr[i+1]
arr[i+1]= temp
max= sorted_arr[len(arr)-1]
for i in range(len(arr)-1):
if sorted_arr[i] < sorted_arr[i+1]:
temp= arr[i]
arr[i]= arr[i+1]
arr[i+1]= temp
min= sorted_arr[len(arr)-1]
sum= 0
for num in arr:
sum+= num
max_val= sum- min
min_val= sum- max
print(min_val, max_val)
if __name__ == '__main__':
arr = list(map(int, input().rstrip().split()))
miniMaxSum(arr)
"""
Problem Statement:
Given five positive integers, find the minimum and maximum values that can be calculated by summing exactly four of the five integers. Then print the respective minimum and maximum values as a single line of two space-separated long integers.
Example
The minimum sum is and the maximum sum is . The function prints
16 24
Function Description
Complete the miniMaxSum function in the editor below.
miniMaxSum has the following parameter(s):
arr: an array of integers
Print
Print two space-separated integers on one line: the minimum sum and the maximum sum of of elements.
Input Format
A single line of five space-separated integers.
Constraints
Output Format
Print two space-separated long integers denoting the respective minimum and maximum values that can be calculated by summing exactly four of the five integers. (The output can be greater than a 32 bit integer.)
Sample Input
1 2 3 4 5
Sample Output
10 14
""" |
from tornado.web import RequestHandler
from random import choice
import json
import jwt
from datetime import datetime
import logging
from tornado_gameshop.handler import RedisHandler, BaseHandler
from apps.utils.verify_code_async import AsyncVerifyCode
from apps.users.forms import SmsCodeForm, RegisterForm, LoginFrom
from apps.users.model import User
from apps.utils.my_decorator import authenticated_async
from apps.utils.pay.alipay import AliPay
from apps.operation.model import Order, UserGame, UserFavorite
from apps.Game.model import Game
def generate_code(length=4):
seeds = "1234567890"
random_str = []
for i in range(length):
random_str.append(choice(seeds))
return "".join(random_str)
class SmsHandler(RedisHandler):
"""
发送短信验证码
"""
async def post(self, *args, **kwargs):
re_data = {}
param = self.request.body.decode("utf8")
param = json.loads(param)
# django tornado框架会把传来的值包装成list,form会迭代出第一个值, 如果自己直接传来字符串,只会迭代出第一个字符
# wtforms_json 对 form 进行了包装,动态加入了 from_json 方法 来解决这个问题
sms_form = SmsCodeForm.from_json(param)
if sms_form.validate():
verify_code = generate_code()
mobile = sms_form.mobile.data
code = AsyncVerifyCode(verify_code, mobile)
# 发送短信验证码
re_json = await code.send_single_sms()
if re_json["code"] != 0:
self.set_status(400)
re_data["mobile"] = re_json["msg"]
else:
# 写入redis并设置过期时间为10分钟
self.redis_conn.set("{}_{}".format(mobile, verify_code), 1, 10*60)
else:
self.set_status(400)
for field in sms_form.errors:
re_data[field] = sms_form.errors[field][0]
self.finish(re_data)
class RegisterHandler(RedisHandler):
"""
用户注册
"""
async def post(self, *args, **kwargs):
re_data = {}
param = self.request.body.decode("utf8")
param = json.loads(param)
register_form = RegisterForm.from_json(param)
if register_form.validate():
mobile = register_form.mobile.data
code = register_form.code.data
password = register_form.password.data
# 验证码是否正确
redis_key = "{}_{}".format(mobile, code)
# 也可以用异步的redis库,但是redis本身是内存操作,性能上能够满足
if not self.redis_conn.get(redis_key):
self.set_status(400)
re_data["code"] = "验证码失效或者错误"
else:
# 验证用户是否已经存在
try:
exists_user = await self.application.objects.get(User, mobile=mobile)
self.set_status(400)
re_data["mobile"] = "用户已存在"
except User.DoesNotExist as e:
# 用户不存在
user = await self.application.objects.create(User, mobile=mobile, password=password)
re_data["id"] = user.id
else:
self.set_status(400)
for field in register_form.errors:
re_data[field] = register_form[field]
self.finish(re_data)
class LoginHandler(RedisHandler):
"""
用户登录
"""
async def post(self, *args, **kwargs):
re_data = {}
param = self.request.body.decode("utf-8")
param = json.loads(param)
form = LoginFrom.from_json(param)
if form.validate():
mobile = form.mobile.data
password = form.password.data
try:
user = await self.application.objects.get(User, mobile=mobile)
if not user.password.check_password(password):
self.set_status(400)
re_data["non_fields"] = "用户名或密码错误"
else:
#登录成功
payload = {
"id": user.id,
"mobile": user.mobile,
"exp": datetime.utcnow()
}
# 返回一个由用户 ID和mobile 生成的 加密token给客户端,客户端每次发过来做验证用
token = jwt.encode(payload, self.settings["secret_key"], algorithm='HS256')
re_data["id"] = user.id
re_data["token"] = token.decode("utf8")
except User.DoesNotExist as e:
self.set_status(400)
re_data["mobile"] = "用户不存在"
self.finish(re_data)
class OrderHandler(BaseHandler):
"""
根据用户传来的商品信息,生成支付宝付款链接并返回
"""
@authenticated_async
async def post(self, *args, **kwargs):
re_data = {}
try:
param = self.request.body.decode("utf-8")
param = json.loads(param)
game_id = param["game_id"]
notify_url = param["notify_url"]
return_url = param["return_url"]
subject = param["subject"]
out_trade_no = param["out_trade_no"]
total_amount = param["total_amount"]
pay = AliPay(
appid=self.settings["appid"],
app_notify_url=notify_url,
app_private_key_path=self.settings["private_key_path"],
alipay_public_key_path=self.settings["ali_pub_key_path"], # 支付宝的公钥,验证支付宝回传消息使用
debug=True, # 默认False,
return_url=return_url
)
url = pay.direct_pay(
subject=subject,
out_trade_no=out_trade_no,
total_amount=int(total_amount),
return_url=return_url
)
order = await self.application.objects.create(Order, user_id=self._current_user.id, game_id=int(game_id),
out_trade_no=out_trade_no, trade_no=None,
order_amount=int(total_amount), add_time=datetime.now())
self.set_status(200)
re_data["success"] = True
re_data["url"] = "https://openapi.alipaydev.com/gateway.do?{data}".format(data=url)
except Exception as e:
re_data["success"] = False
re_data["data"] = "缺少关键参数"
self.set_status(400)
self.finish(re_data)
class UserStarGameHandler(BaseHandler):
"""
用户收藏的游戏
"""
@authenticated_async
async def get(self, *args, **kwargs):
re_data = {"games": [], }
try:
query = UserFavorite.select().where(UserFavorite.user_id == 1)
user_fav = await self.application.objects.execute(query)
games = await self.application.objects.execute(Game.select().where(Game.id.in_([fav.id for fav in user_fav])))
[re_data["games"].append({
"game_id": game.id,
"name": game.name,
"price": game.price,
"cover_url": game.cover_url
}) for game in games]
re_data["success"] = True
self.set_status(200)
except:
re_data["success"] = False
self.set_status(400)
self.finish(re_data)
class UserInfoHandler(BaseHandler):
"""
用户信息
"""
@authenticated_async
async def get(self, *args, **kwargs):
re_data = {}
try:
re_data["mobile"] = self._current_user.mobile
re_data["nick_name"] = self._current_user.nick_name
re_data["success"] = True
self.set_status(200)
except:
re_data["success"] = False
self.set_status(400)
self.finish(re_data)
class CollectGameHandler(BaseHandler):
"""
取消&收藏 游戏
"""
@authenticated_async
async def post(self, *args, **kwargs):
re_data = {}
try:
param = self.request.body.decode("utf-8")
param = json.loads(param)
game_id = param["game_id"]
# 已经收藏, 则取消收藏
try:
record = await self.application.objects.get(UserFavorite, user_id=self._current_user.id,
fav_game_id=game_id)
await self.application.objects.delete(record)
re_data["success"] = True
re_data["data"] = "取消收藏成功"
except UserFavorite.DoesNotExist as e:
# 未收藏, 则收藏
await self.application.objects.create(UserFavorite, user_id=self._current_user.id,
fav_game_id=game_id)
re_data["success"] = True
re_data["data"] = "收藏成功"
except Exception as e:
logging.warning(e)
except Exception as e:
re_data["success"] = False
re_data["data"] = "参数错误"
self.finish(re_data)
|
import helpers
from twitterapi import PoliceStream
from twitterapi import AzureSentiments
from Coordinates import locations
policedata = PoliceStream()
helpers.stream_update(policedata) |
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 19 18:32:22 2021
@author: Nikita
"""
import random
total_products = 100
bad_version = random.randint(1,total_products)
def isBadVersion(version : int) -> bool:
global bad_version
return (version >= bad_version)
class Solution:
"""
Second version with 1 less IsBadVersion call
"""
def firstBadVersion(self, n : int) -> int:
interval_min = 0
interval_max = n
halfway = (interval_max+interval_min)//2
while (interval_max - interval_min > 1):
if(isBadVersion(halfway)):
interval_max = halfway
else:
interval_min = halfway
halfway = (interval_max+interval_min)//2
return interval_max
"""
Initial version that checked at the end -> had to make sure that the
case with 1 and 2 products was covered.
"""
def firstBadVersion(self, n : int) -> int:
interval_min = 1
interval_max = n
halfway = (interval_max+interval_min)//2
while (interval_min != halfway):
if(isBadVersion(halfway)):
interval_max = halfway
else:
interval_min = halfway
halfway = (interval_max+interval_min)//2
if (isBadVersion(halfway)):
return halfway
else:
return interval_max
solution = Solution()
print(solution.firstBadVersion(total_products))
"""
This can be done lineary, but it can be done in log2(n) which is better
Finished this after 48 minutes and 43 seconds.
3rd submission was succesful. Beats 80% in terms of
runtime, but for some reason it's really bad in terms
of memory.
Although I guessed you could do it via a binary search, the test case
with 1 and 2 products tripped me up. Oops.
With this problem solved, I FINALLY GET TO DO SOME DYNAMIC PROGRAMMING
PROBLEMS !!!
""" |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'gui/main_window.ui'
#
# Created by: PyQt5 UI code generator 5.5.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(994, 654)
MainWindow.setStyleSheet("background-color: rgb(65, 65, 65);\n"
"alternate-background-color: rgb(130, 130, 130);\n"
"color: rgb(255, 255, 255);")
self.centralWidget = QtWidgets.QWidget(MainWindow)
self.centralWidget.setObjectName("centralWidget")
self.gridLayout = QtWidgets.QGridLayout(self.centralWidget)
self.gridLayout.setContentsMargins(11, 2, 11, 11)
self.gridLayout.setSpacing(6)
self.gridLayout.setObjectName("gridLayout")
self.splitter_2 = QtWidgets.QSplitter(self.centralWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.splitter_2.sizePolicy().hasHeightForWidth())
self.splitter_2.setSizePolicy(sizePolicy)
self.splitter_2.setOrientation(QtCore.Qt.Horizontal)
self.splitter_2.setObjectName("splitter_2")
self.splitter = QtWidgets.QSplitter(self.splitter_2)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.splitter.sizePolicy().hasHeightForWidth())
self.splitter.setSizePolicy(sizePolicy)
self.splitter.setLayoutDirection(QtCore.Qt.LeftToRight)
self.splitter.setFrameShape(QtWidgets.QFrame.Box)
self.splitter.setFrameShadow(QtWidgets.QFrame.Plain)
self.splitter.setLineWidth(0)
self.splitter.setOrientation(QtCore.Qt.Vertical)
self.splitter.setHandleWidth(6)
self.splitter.setObjectName("splitter")
self.layoutWidget = QtWidgets.QWidget(self.splitter)
self.layoutWidget.setObjectName("layoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.layoutWidget)
self.verticalLayout.setSizeConstraint(QtWidgets.QLayout.SetMaximumSize)
self.verticalLayout.setContentsMargins(11, 11, 11, 11)
self.verticalLayout.setSpacing(0)
self.verticalLayout.setObjectName("verticalLayout")
self.datasets_label = QtWidgets.QLabel(self.layoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.datasets_label.sizePolicy().hasHeightForWidth())
self.datasets_label.setSizePolicy(sizePolicy)
self.datasets_label.setObjectName("datasets_label")
self.verticalLayout.addWidget(self.datasets_label, 0, QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
spacerItem = QtWidgets.QSpacerItem(20, 2, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
self.verticalLayout.addItem(spacerItem)
self.treeWidget = QtWidgets.QTreeWidget(self.layoutWidget)
self.treeWidget.setFocusPolicy(QtCore.Qt.NoFocus)
self.treeWidget.setAlternatingRowColors(False)
self.treeWidget.setSelectionMode(QtWidgets.QAbstractItemView.NoSelection)
self.treeWidget.setTextElideMode(QtCore.Qt.ElideRight)
self.treeWidget.setObjectName("treeWidget")
self.treeWidget.header().setVisible(True)
self.treeWidget.header().setCascadingSectionResizes(False)
self.treeWidget.header().setDefaultSectionSize(80)
self.treeWidget.header().setMinimumSectionSize(1)
self.treeWidget.header().setStretchLastSection(True)
self.verticalLayout.addWidget(self.treeWidget)
self.layoutWidget1 = QtWidgets.QWidget(self.splitter)
self.layoutWidget1.setObjectName("layoutWidget1")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.layoutWidget1)
self.verticalLayout_2.setSizeConstraint(QtWidgets.QLayout.SetMaximumSize)
self.verticalLayout_2.setContentsMargins(11, 11, 11, 11)
self.verticalLayout_2.setSpacing(0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setContentsMargins(11, 11, 11, 11)
self.horizontalLayout_3.setSpacing(6)
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.properties_label = QtWidgets.QLabel(self.layoutWidget1)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.properties_label.sizePolicy().hasHeightForWidth())
self.properties_label.setSizePolicy(sizePolicy)
self.properties_label.setObjectName("properties_label")
self.horizontalLayout_3.addWidget(self.properties_label)
spacerItem1 = QtWidgets.QSpacerItem(20, 20, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem1)
self.properties_combo = QtWidgets.QComboBox(self.layoutWidget1)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.properties_combo.sizePolicy().hasHeightForWidth())
self.properties_combo.setSizePolicy(sizePolicy)
self.properties_combo.setObjectName("properties_combo")
self.horizontalLayout_3.addWidget(self.properties_combo)
self.verticalLayout_2.addLayout(self.horizontalLayout_3)
spacerItem2 = QtWidgets.QSpacerItem(20, 2, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
self.verticalLayout_2.addItem(spacerItem2)
self.properties_table = QtWidgets.QTableWidget(self.layoutWidget1)
self.properties_table.setRowCount(1)
self.properties_table.setColumnCount(2)
self.properties_table.setObjectName("properties_table")
self.properties_table.horizontalHeader().setStretchLastSection(True)
self.properties_table.verticalHeader().setVisible(False)
self.properties_table.verticalHeader().setStretchLastSection(False)
self.verticalLayout_2.addWidget(self.properties_table)
self.tabWidget = QtWidgets.QTabWidget(self.splitter_2)
self.tabWidget.setObjectName("tabWidget")
self.tab = QtWidgets.QWidget()
self.tab.setStyleSheet("background-color: rgb(0, 0, 0);")
self.tab.setObjectName("tab")
self.gridLayout_4 = QtWidgets.QGridLayout(self.tab)
self.gridLayout_4.setContentsMargins(11, 11, 11, 11)
self.gridLayout_4.setSpacing(6)
self.gridLayout_4.setObjectName("gridLayout_4")
self.gridLayout_3 = QtWidgets.QGridLayout()
self.gridLayout_3.setContentsMargins(0, 0, 0, 0)
self.gridLayout_3.setSpacing(7)
self.gridLayout_3.setObjectName("gridLayout_3")
self.graphicsView_1 = PlotWidget(self.tab)
self.graphicsView_1.setObjectName("graphicsView_1")
self.gridLayout_3.addWidget(self.graphicsView_1, 0, 0, 1, 1)
self.graphicsView_2 = PlotWidget(self.tab)
self.graphicsView_2.setObjectName("graphicsView_2")
self.gridLayout_3.addWidget(self.graphicsView_2, 0, 1, 1, 1)
self.graphicsView_3 = PlotWidget(self.tab)
self.graphicsView_3.setObjectName("graphicsView_3")
self.gridLayout_3.addWidget(self.graphicsView_3, 1, 0, 1, 1)
self.graphicsView_4 = GLViewWidget(self.tab)
self.graphicsView_4.setObjectName("graphicsView_4")
self.gridLayout_3.addWidget(self.graphicsView_4, 1, 1, 1, 1)
self.gridLayout_4.addLayout(self.gridLayout_3, 0, 0, 1, 1)
self.tabWidget.addTab(self.tab, "")
self.gridLayout.addWidget(self.splitter_2, 0, 0, 1, 1)
MainWindow.setCentralWidget(self.centralWidget)
self.mainToolBar = QtWidgets.QToolBar(MainWindow)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.mainToolBar.sizePolicy().hasHeightForWidth())
self.mainToolBar.setSizePolicy(sizePolicy)
self.mainToolBar.setIconSize(QtCore.QSize(16, 16))
self.mainToolBar.setToolButtonStyle(QtCore.Qt.ToolButtonTextBesideIcon)
self.mainToolBar.setObjectName("mainToolBar")
MainWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.mainToolBar)
self.statusBar = QtWidgets.QStatusBar(MainWindow)
self.statusBar.setObjectName("statusBar")
MainWindow.setStatusBar(self.statusBar)
self.menuBar = QtWidgets.QMenuBar(MainWindow)
self.menuBar.setGeometry(QtCore.QRect(0, 0, 994, 25))
self.menuBar.setDefaultUp(True)
self.menuBar.setNativeMenuBar(True)
self.menuBar.setObjectName("menuBar")
self.menu_File = QtWidgets.QMenu(self.menuBar)
self.menu_File.setFocusPolicy(QtCore.Qt.NoFocus)
self.menu_File.setObjectName("menu_File")
self.menuHelp = QtWidgets.QMenu(self.menuBar)
self.menuHelp.setObjectName("menuHelp")
self.menuTools = QtWidgets.QMenu(self.menuBar)
self.menuTools.setObjectName("menuTools")
self.menuPlot = QtWidgets.QMenu(self.menuBar)
self.menuPlot.setObjectName("menuPlot")
MainWindow.setMenuBar(self.menuBar)
self.actionImport_New = QtWidgets.QAction(MainWindow)
icon = QtGui.QIcon.fromTheme("document-new")
self.actionImport_New.setIcon(icon)
self.actionImport_New.setMenuRole(QtWidgets.QAction.TextHeuristicRole)
self.actionImport_New.setPriority(QtWidgets.QAction.NormalPriority)
self.actionImport_New.setObjectName("actionImport_New")
self.actionImport_Add = QtWidgets.QAction(MainWindow)
icon = QtGui.QIcon.fromTheme("add")
self.actionImport_Add.setIcon(icon)
self.actionImport_Add.setObjectName("actionImport_Add")
self.actionExport_For = QtWidgets.QAction(MainWindow)
icon = QtGui.QIcon.fromTheme("go-right")
self.actionExport_For.setIcon(icon)
self.actionExport_For.setObjectName("actionExport_For")
self.actionQuit = QtWidgets.QAction(MainWindow)
icon = QtGui.QIcon.fromTheme("exit")
self.actionQuit.setIcon(icon)
self.actionQuit.setObjectName("actionQuit")
self.actionAbout = QtWidgets.QAction(MainWindow)
self.actionAbout.setObjectName("actionAbout")
self.actionRemove = QtWidgets.QAction(MainWindow)
icon = QtGui.QIcon.fromTheme("remove")
self.actionRemove.setIcon(icon)
self.actionRemove.setObjectName("actionRemove")
self.actionAnalyze = QtWidgets.QAction(MainWindow)
icon = QtGui.QIcon.fromTheme("applications-accessories")
self.actionAnalyze.setIcon(icon)
self.actionAnalyze.setObjectName("actionAnalyze")
self.actionNew_Plot = QtWidgets.QAction(MainWindow)
self.actionNew_Plot.setObjectName("actionNew_Plot")
self.actionModify_Plot = QtWidgets.QAction(MainWindow)
self.actionModify_Plot.setObjectName("actionModify_Plot")
self.actionRemove_Plot = QtWidgets.QAction(MainWindow)
self.actionRemove_Plot.setObjectName("actionRemove_Plot")
self.actionRedraw = QtWidgets.QAction(MainWindow)
self.actionRedraw.setObjectName("actionRedraw")
self.actionGenerate = QtWidgets.QAction(MainWindow)
icon = QtGui.QIcon.fromTheme("emblem-photos")
self.actionGenerate.setIcon(icon)
self.actionGenerate.setObjectName("actionGenerate")
self.mainToolBar.addAction(self.actionImport_New)
self.mainToolBar.addAction(self.actionImport_Add)
self.mainToolBar.addAction(self.actionRemove)
self.mainToolBar.addAction(self.actionGenerate)
self.mainToolBar.addAction(self.actionAnalyze)
self.mainToolBar.addAction(self.actionExport_For)
self.mainToolBar.addAction(self.actionQuit)
self.menu_File.addAction(self.actionImport_New)
self.menu_File.addAction(self.actionImport_Add)
self.menu_File.addAction(self.actionRemove)
self.menu_File.addAction(self.actionExport_For)
self.menu_File.addSeparator()
self.menu_File.addAction(self.actionQuit)
self.menuHelp.addAction(self.actionAbout)
self.menuPlot.addAction(self.actionRedraw)
self.menuPlot.addSeparator()
self.menuPlot.addAction(self.actionNew_Plot)
self.menuPlot.addAction(self.actionModify_Plot)
self.menuPlot.addAction(self.actionRemove_Plot)
self.menuBar.addAction(self.menu_File.menuAction())
self.menuBar.addAction(self.menuPlot.menuAction())
self.menuBar.addAction(self.menuTools.menuAction())
self.menuBar.addAction(self.menuHelp.menuAction())
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "PyParticleProcessor"))
self.datasets_label.setText(_translate("MainWindow", "Datasets"))
self.treeWidget.headerItem().setText(0, _translate("MainWindow", "Selected"))
self.treeWidget.headerItem().setText(1, _translate("MainWindow", "ID"))
self.treeWidget.headerItem().setText(2, _translate("MainWindow", "Name"))
self.properties_label.setText(_translate("MainWindow", "Properties:"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("MainWindow", "Default Plots"))
self.menu_File.setTitle(_translate("MainWindow", "File"))
self.menuHelp.setTitle(_translate("MainWindow", "Help"))
self.menuTools.setTitle(_translate("MainWindow", "Tools"))
self.menuPlot.setTitle(_translate("MainWindow", "Plot"))
self.actionImport_New.setText(_translate("MainWindow", "Import New..."))
self.actionImport_New.setIconText(_translate("MainWindow", "New..."))
self.actionImport_New.setToolTip(_translate("MainWindow", "New..."))
self.actionImport_Add.setText(_translate("MainWindow", "Import Add..."))
self.actionImport_Add.setIconText(_translate("MainWindow", "Add..."))
self.actionImport_Add.setToolTip(_translate("MainWindow", "Add..."))
self.actionExport_For.setText(_translate("MainWindow", "Export..."))
self.actionExport_For.setIconText(_translate("MainWindow", "Export..."))
self.actionExport_For.setToolTip(_translate("MainWindow", "Export..."))
self.actionQuit.setText(_translate("MainWindow", "Quit"))
self.actionAbout.setText(_translate("MainWindow", "About"))
self.actionRemove.setText(_translate("MainWindow", "Remove"))
self.actionRemove.setToolTip(_translate("MainWindow", "Remove"))
self.actionAnalyze.setText(_translate("MainWindow", "Analyze"))
self.actionAnalyze.setToolTip(_translate("MainWindow", "Analyze"))
self.actionNew_Plot.setText(_translate("MainWindow", "New Plot..."))
self.actionModify_Plot.setText(_translate("MainWindow", "Modify Current Plot..."))
self.actionRemove_Plot.setText(_translate("MainWindow", "Remove Current Plot"))
self.actionRedraw.setText(_translate("MainWindow", "Redraw"))
self.actionGenerate.setText(_translate("MainWindow", "Generate..."))
self.actionGenerate.setIconText(_translate("MainWindow", "Generate..."))
self.actionGenerate.setToolTip(_translate("MainWindow", "Generate distribution"))
from pyqtgraph import PlotWidget
from pyqtgraph.opengl import GLViewWidget
|
import unittest
from django.http import HttpRequest
from blog.views import response_403
class Response403TestCase(unittest.TestCase):
def test_generic_403_response(self):
request = HttpRequest()
request.method = 'GET'
response = response_403(request)
self.assertEqual(response.status_code, 403)
self.assertIn(b'<title>403: Forbiddena</title>', response.content)
self.assertIn(b'<meta name="description" content="SOUTHERN CROSS!">', response.content) # noqa
self.assertIn(b'<iframe width="400" height="300" src="https://www.youtube.com/embed/e37Ri_5xY5U?autoplay=1" frameborder="0" allowfullscreen></iframe</iframe>', response.content) # noqa
|
import uuid
from unittest import mock
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from model_bakery import baker
from coredata.models import Bairro, Batalhao, Municipio
from operations.models import Operacao
from operations.model_recipes import (
op_recipe_with_occurence,
op_recipe_without_occurence,
)
from operations.api_views import (
GeneralInfoViewSet,
OcurrenceInfoOneViewSet,
OcurrenceInfoTwoViewSet,
OperationInfoADPF635ViewSet,
OperationalInfoOneViewSet,
OperationalInfoTwoViewSet,
ResultInfoViewSet,
)
from operations.serializers import (
GeneralObservationSerializer,
InfoADPF635Serializer,
InfoOcorrenciaOneSerializer,
InfoOcorrenciaTwoSerializer,
InfoOperacionaisOperacaoOneSerializer,
InfoOperacionaisOperacaoTwoSerializer,
InfoResultadosOperacaoSerializer,
)
User = get_user_model()
class TestSendInformacaoGeralOperacao(TestCase):
url_name = "api-operations:create-general-info"
def setUp(self):
self.nm_mun = "Municipio"
self.bairro = "Bairro"
self.batalhao = "Batalhao"
self.p_municipios = mock.patch.object(Municipio.objects, "get_ordered_values")
self.m_municpios = self.p_municipios.start()
self.m_municipio_qs = mock.Mock()
self.m_municipio_qs.values_list.return_value = (self.nm_mun,)
self.m_municpios.return_value = self.m_municipio_qs
self.p_bairros = mock.patch.object(Bairro.objects, "get_ordered_for_municipio")
self.m_bairros = self.p_bairros.start()
self.m_bairros_qs = mock.Mock()
self.m_bairros_qs.values_list.return_value = (self.bairro,)
self.m_bairros.return_value = self.m_bairros_qs
self.p_batalhoes = mock.patch.object(Batalhao.objects, "get_ordered_for_municipio")
self.m_batalhoes = self.p_batalhoes.start()
self.m_batalhoes_qs = mock.Mock()
self.m_batalhoes_qs.values_list.return_value = (self.batalhao,)
self.m_batalhoes.return_value = self.m_batalhoes_qs
self.username = "username"
self.pwd = "pwd1234"
self.user = User.objects.create_user(username=self.username, password=self.pwd)
self.client.force_login(self.user)
self.form_uuid = uuid.uuid4()
self.url = reverse(self.url_name, kwargs={"form_uuid": self.form_uuid})
self.form_data = {
"data": "2021-02-12",
"hora": "12:00:00",
"localidade": "Rua A",
"bairro": self.bairro,
"municipio": self.nm_mun,
"endereco_referencia": "Primeira rua",
"coordenadas_geo": "-12.9999,45.4555",
"batalhao_responsavel": self.batalhao,
}
def tearDown(self):
self.p_municipios.stop()
self.p_bairros.stop()
self.p_batalhoes.stop()
def test_save_database_info(self):
resp = self.client.put(
self.url,
data=self.form_data,
content_type="application/json",
)
assert resp.status_code == 200
operacao = Operacao.objects.get(identificador=self.form_uuid)
assert operacao.usuario == self.user
data = resp.data
assert data["data"] == operacao.data.strftime("%Y-%m-%d")
assert data["hora"] == operacao.hora.strftime("%H:%M:%S")
assert data["localidade"] == operacao.localidade
assert data["bairro"] == operacao.bairro
assert data["municipio"] == operacao.municipio
assert data["endereco_referencia"] == operacao.endereco_referencia
assert data["batalhao_responsavel"] == operacao.batalhao_responsavel
def test_retrieve_saved_info(self):
operacao = baker.make(
Operacao,
usuario=self.user,
identificador=self.form_uuid,
_fill_optional=True
)
resp = self.client.get(self.url)
data = resp.data
assert resp.status_code == 200
assert data["data"] == operacao.data.strftime("%Y-%m-%d")
assert data["hora"] == operacao.hora.strftime("%H:%M:%S")
assert data["localidade"] == operacao.localidade
assert data["bairro"] == operacao.bairro
assert data["municipio"] == operacao.municipio
assert data["endereco_referencia"] == operacao.endereco_referencia
assert data["batalhao_responsavel"] == operacao.batalhao_responsavel
def test_update_some_fields(self):
operacao = baker.make(Operacao, usuario=self.user, identificador=self.form_uuid)
new_info = "Novo Bairro"
# make new info valid bairro name
self.m_bairros_qs.values_list.return_value = (new_info,)
self.form_data["bairro"] = new_info
resp = self.client.put(
self.url,
data=self.form_data,
content_type="application/json",
)
operacao.refresh_from_db()
assert Operacao.objects.count() == 1
assert resp.status_code == 200
assert operacao.bairro == new_info
def test_another_user_tries_to_update_info(self):
baker.make(Operacao, usuario=self.user, identificador=self.form_uuid)
self.client.logout()
self.username = "another-username"
self.pwd = "pwd1234"
user = User.objects.create_user(username=self.username, password=self.pwd)
self.client.force_login(user)
self.form_data["bairro"] = "novo bairro"
resp = self.client.put(
self.url,
data=self.form_data,
content_type="application/json",
)
assert resp.status_code == 404
def test_404_for_object_doesnt_exists(self):
baker.make(Operacao, usuario=self.user)
resp = self.client.get(self.url)
assert resp.status_code == 404
def test_user_cannot_delete_info(self):
resp = self.client.delete(self.url)
assert resp.status_code == 405
def test_login_required(self):
self.client.logout()
resp = self.client.post(self.url)
assert resp.status_code == 403
def test_another_user_tries_to_retrive_info(self):
self.username = "another-username"
self.pwd = "pwd1234"
self.user = User.objects.create_user(username=self.username, password=self.pwd)
self.client.force_login(self.user)
resp = self.client.get(self.url)
assert resp.status_code == 404
class TestSendInformacaoOperacionalOperacao(TestCase):
url_name = "api-operations:create-operational-info-1"
def setUp(self):
self.username = "username"
self.pwd = "pwd1234"
self.user = User.objects.create_user(username=self.username, password=self.pwd)
self.client.force_login(self.user)
self.form_uuid = uuid.uuid4()
self.url = reverse(self.url_name, kwargs={"form_uuid": self.form_uuid})
self.operacao = baker.make(
Operacao,
usuario=self.user,
identificador=self.form_uuid,
_fill_optional=True
)
self.form_data = {
"unidade_responsavel": "Unidade A",
"unidade_apoiadora": "Unidade X",
"nome_comandante_operacao": "Nome Comandante",
"rg_pm_comandante_operacao": "123456",
"posto_comandante_operacao": "Maj",
}
def test_save_database_info(self):
resp = self.client.put(
self.url,
data=self.form_data,
content_type="application/json",
)
assert resp.status_code == 200
operacao = Operacao.objects.get(identificador=self.form_uuid)
data = resp.data
assert data["unidade_responsavel"] == operacao.unidade_responsavel
assert data["unidade_apoiadora"] == operacao.unidade_apoiadora
assert data["nome_comandante_operacao"] == operacao.nome_comandante_operacao
assert data["rg_pm_comandante_operacao"] == operacao.rg_pm_comandante_operacao
assert data["posto_comandante_operacao"] == operacao.posto_comandante_operacao
def test_retrieve_saved_info(self):
resp = self.client.get(self.url)
data = resp.data
assert resp.status_code == 200
assert data["unidade_responsavel"] == self.operacao.unidade_responsavel
assert data["unidade_apoiadora"] == self.operacao.unidade_apoiadora
assert data["nome_comandante_operacao"] == self.operacao.nome_comandante_operacao
assert data["rg_pm_comandante_operacao"] == self.operacao.rg_pm_comandante_operacao
assert data["posto_comandante_operacao"] == self.operacao.posto_comandante_operacao
def test_update_some_fields(self):
operacao = Operacao.objects.get(identificador=self.form_uuid)
new_info = "891011"
self.form_data["rg_pm_comandante_operacao"] = new_info
resp = self.client.put(
self.url,
data=self.form_data,
content_type="application/json",
)
operacao.refresh_from_db()
assert resp.status_code == 200
assert Operacao.objects.count() == 1
assert operacao.rg_pm_comandante_operacao == new_info
def test_user_cannot_delete_info(self):
resp = self.client.delete(self.url)
assert resp.status_code == 405
def test_another_user_tries_to_update_info(self):
self.client.logout()
self.username = "another-username"
self.pwd = "pwd1234"
user = User.objects.create_user(username=self.username, password=self.pwd)
self.client.force_login(user)
self.form_data["bairro"] = "novo bairro"
resp = self.client.put(
self.url,
data=self.form_data,
content_type="application/json",
)
assert resp.status_code == 404
def test_404_for_object_doesnt_exists(self):
baker.make(Operacao, usuario=self.user)
url = reverse(self.url_name, kwargs={"form_uuid": uuid.uuid4()})
resp = self.client.get(url)
assert resp.status_code == 404
def test_login_required(self):
self.client.logout()
resp = self.client.post(self.url)
assert resp.status_code == 403
def test_another_user_tries_to_retrive_info(self):
self.username = "another-username"
self.pwd = "pwd1234"
self.user = User.objects.create_user(username=self.username, password=self.pwd)
self.client.force_login(self.user)
resp = self.client.get(self.url)
assert resp.status_code == 404
class TestOperationSectionFlowMixin:
url_name = None
view_class = None
serializer_class = None
expected_section = None
def setUp(self):
self.username = "username"
self.pwd = "pwd1234"
self.user = User.objects.create_user(username=self.username, password=self.pwd)
self.client.force_login(self.user)
self.form_uuid = uuid.uuid4()
self.operacao = baker.make(
Operacao,
usuario=self.user,
identificador=self.form_uuid,
)
self.url = reverse(self.url_name, kwargs={"form_uuid": self.form_uuid})
self.op_recipe_obj = op_recipe_with_occurence.prepare()
self.form_data = self.serializer_class(self.op_recipe_obj).data
class TestADPF635ViewSet(TestOperationSectionFlowMixin, TestCase):
url_name = "operations_api:create-adpf635l-info"
view_class = OperationInfoADPF635ViewSet
serializer_class = InfoADPF635Serializer
expected_section = 3
def test_update_section_when_saving_data(self):
resp = self.client.put(
self.url,
data=self.form_data,
content_type="application/json",
)
self.operacao.refresh_from_db()
assert resp.status_code == 200
assert self.operacao.secao_atual == self.expected_section
class TestAPIOperationalInfoOne(TestOperationSectionFlowMixin, TestCase):
url_name = "operations_api:create-operational-info-1"
view_class = OperationalInfoOneViewSet
serializer_class = InfoOperacionaisOperacaoOneSerializer
expected_section = 4
def test_update_section_when_saving_data(self):
resp = self.client.put(
self.url,
data=self.form_data,
content_type="application/json",
)
self.operacao.refresh_from_db()
assert resp.status_code == 200
assert self.operacao.secao_atual == self.expected_section
class TestAPIOperationalInfoTwo(TestOperationSectionFlowMixin, TestCase):
url_name = "operations_api:create-operational-info-2"
view_class = OperationalInfoTwoViewSet
serializer_class = InfoOperacionaisOperacaoTwoSerializer
expected_section = 5
def test_update_section_when_saving_data(self):
resp = self.client.put(
self.url,
data=self.form_data,
content_type="application/json",
)
self.operacao.refresh_from_db()
assert resp.status_code == 200
assert self.operacao.secao_atual == self.expected_section
class TestAPIResult(TestOperationSectionFlowMixin, TestCase):
url_name = "operations_api:create-result-info"
view_class = ResultInfoViewSet
serializer_class = InfoResultadosOperacaoSerializer
expected_section = 6
def test_update_section_when_saving_data(self):
self.form_data["houve_ocorrencia_operacao"] = True
resp = self.client.put(
self.url,
data=self.form_data,
content_type="application/json",
)
self.operacao.refresh_from_db()
assert resp.status_code == 200
assert self.operacao.secao_atual == self.expected_section
class TestAPIOcurrenceOne(TestOperationSectionFlowMixin, TestCase):
url_name = "operations_api:create-ocurrence-info-1"
view_class = OcurrenceInfoOneViewSet
serializer_class = InfoOcorrenciaOneSerializer
expected_section = 7
def test_update_section_when_saving_data(self):
resp = self.client.put(
self.url,
data=self.form_data,
content_type="application/json",
)
self.operacao.refresh_from_db()
assert resp.status_code == 200
assert self.operacao.secao_atual == self.expected_section
class TestAPIOcurrenceTwo(TestOperationSectionFlowMixin, TestCase):
url_name = "operations_api:create-ocurrence-info-2"
view_class = OcurrenceInfoTwoViewSet
serializer_class = InfoOcorrenciaTwoSerializer
expected_section = 8
def test_update_section_when_saving_data(self):
resp = self.client.put(
self.url,
data=self.form_data,
content_type="application/json",
)
self.operacao.refresh_from_db()
assert resp.status_code == 200
assert self.operacao.secao_atual == self.expected_section
class TestGeneralObservations(TestOperationSectionFlowMixin, TestCase):
url_name = "operations_api:create-general-observation"
view_class = GeneralInfoViewSet
serializer_class = GeneralObservationSerializer
expected_section = 9
def test_update_section_when_saving_data(self):
resp = self.client.put(
self.url,
data=self.form_data,
content_type="application/json",
)
self.operacao.refresh_from_db()
assert resp.status_code == 200
assert self.operacao.secao_atual == self.expected_section
class TestOperationFlowSkipOptionalSections(TestCase):
"""
Quando o campo 'houve_ocorrencia_operacao' for False, as seções 5 e 6
são ignoradas
"""
url_name = "operations_api:create-result-info"
view_class = ResultInfoViewSet
serializer_class = InfoResultadosOperacaoSerializer
expected_section = 6
def setUp(self):
self.username = "username"
self.pwd = "pwd1234"
self.user = User.objects.create_user(username=self.username, password=self.pwd)
self.client.force_login(self.user)
self.form_uuid = uuid.uuid4()
self.operacao = baker.make(
Operacao,
usuario=self.user,
identificador=self.form_uuid,
)
self.url = reverse(self.url_name, kwargs={"form_uuid": self.form_uuid})
self.op_recipe_obj = op_recipe_without_occurence.prepare()
self.form_data = self.serializer_class(self.op_recipe_obj).data
self.form_data["houve_ocorrencia_operacao"] = False
def test_skip_to_last_section(self):
resp = self.client.put(
self.url,
data=self.form_data,
content_type="application/json",
)
self.operacao.refresh_from_db()
assert resp.status_code == 200
assert self.operacao.secao_atual == self.expected_section
|
from django.contrib.auth.models import User
from django.db.models import Q
from django.http import HttpResponse, JsonResponse, HttpResponseRedirect
from django.shortcuts import render, redirect
from django.urls import reverse
from django.views.generic.base import View, TemplateView
from .models import Movie, Category, Actor, Genre, Rating, Profile
from django.views.generic import ListView, DetailView
from .forms import ReviewForm, RatingForm, RegisterForm, ProfileForm
from django.contrib import messages
from django.contrib.auth import authenticate, logout
from django.contrib.auth.forms import UserCreationForm
class EditProfileView(TemplateView):
template_name = "registration/edit_profile.html"
def dispatch(self, request, *args, **kwargs):
form = ProfileForm(instance=self.get_profile(request.user))
if request.method == 'POST':
form = ProfileForm(request.POST, request.FILES, instance=self.get_profile(request.user))
if form.is_valid():
form.instance.user = request.user
form.save()
messages.success(request, u"Профиль успешно обновлен!")
return redirect(reverse("profile"))
return render(request, self.template_name, {'form': form})
def get_profile(self, user):
try:
return user.profile
except:
return None
class RegisterView(TemplateView):
template_name = "registration/register.html"
def dispatch(self, request, *args, **kwargs):
form = RegisterForm()
if request.method == 'POST':
form = RegisterForm(request.POST)
if form.is_valid():
self.create_new_user(form)
messages.success(request, u"Вы успешно зарегистрировались!")
return redirect(reverse("login"))
context = {
'form': form
}
return render(request, self.template_name, context)
def create_new_user(self, form):
email = None
if 'email' in form.cleaned_data:
email = form.cleaned_data['email']
User.objects.create_user(form.cleaned_data['username'], email, form.cleaned_data['password'],
first_name=form.cleaned_data['first_name'],
last_name=form.cleaned_data['last_name'])
class LogoutView(View):
def dispatch(self, request, *args, **kwargs):
logout(request)
return redirect("/")
class ProfileView(TemplateView):
template_name = "registration/profile.html"
def dispatch(self, request, *args, **kwargs):
if not Profile.objects.filter(user=request.user).exists():
return redirect(reverse("edit_profile"))
context = {
'selected_user': request.user
}
return render(request, self.template_name, context)
class GenreYear:
"""Жанры и года выхода фильмов"""
def get_genres(self):
return Genre.objects.all()
def get_years(self):
return Movie.objects.filter(draft=False).values("year")
class MoviesView(GenreYear,ListView):
"""Список фильмов"""
model = Movie
paginate_by = 1
queryset = Movie.objects.filter(draft=False)
class MovieDetailView(GenreYear,DetailView):
"""Полное описание фильма"""
model = Movie
slug_field = "url"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["star_form"] = RatingForm()
return context
class AddReview(View):
"""Отзывы"""
def post(self, request, pk):
form = ReviewForm(request.POST)
movie = Movie.objects.get(id=pk)
if form.is_valid():
form = form.save(commit=False)
if request.POST.get("parent", None):
form.parent_id = int(request.POST.get("parent"))
form.movie = movie
form.save()
return redirect(movie.get_absolute_url())
class ActorView(GenreYear,DetailView):
"""Вывод информации о актере"""
model = Actor
template_name = 'movies/actor.html'
slug_field = "name"
class FilterMoviesView(GenreYear, ListView):
"""Фильтр фильмов"""
paginate_by = 1
def get_queryset(self):
queryset = Movie.objects.filter(
Q(year__in=self.request.GET.getlist("year")) |
Q(genres__in=self.request.GET.getlist("genre"))
).distinct()
return queryset
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
context["year"] = ''.join([f"year={x}&" for x in self.request.GET.getlist("year")])
context["genre"] = ''.join([f"genre={x}&" for x in self.request.GET.getlist("genre")])
return context
class SearchMoviesView(GenreYear, ListView):
"""Поиск фильмов"""
paginate_by = 1
def get_queryset(self):
return Movie.objects.filter(title__icontains=self.request.GET.get("search"))
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
context["search"] = f'search={self.request.GET.get("search")}&'
return context
class AddStarRating(View):
"""Добавление рейтинга фильму"""
def get_client_ip(self, request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
def post(self, request):
form = RatingForm(request.POST)
if form.is_valid():
Rating.objects.update_or_create(
ip=self.get_client_ip(request),
movie_id=int(request.POST.get("movie")),
defaults={'star_id': int(request.POST.get("star"))}
)
return HttpResponse(status=201)
else:
return HttpResponse(status=400) |
# Person 클래스 - 멤버 변수(name, age)
# Employee 클래스는 Person을 상속 받음
class Person:
def __init__(self, name, age):
self.name = name
self.age = age
class Employee(Person):
pass
if __name__ == "__main__":
p1 = Person("한강" , 25)
print(p1.name,p1.age)
e1 = Person("이이", 32)
print(e1.name,e1.age)
e2 = Person("이황", 33)
print(e2.name,e2.age) |
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import requests
from ipywidgets import interact, fixed
import ipywidgets as widgets
import IPython.display as display
def imshow_mpl(x):
'''
Plots image x (in cwh order) proportional to its original size.
'''
x = np.array(x).squeeze().transpose((1, 2, 0)).astype(np.uint8)
dpi = plt.rcParams['figure.dpi']
height, width, depth = x.shape
figsize = width / float(dpi), height / float(dpi)
fig, ax = plt.subplots(figsize=figsize)
ax.imshow(x)
ax.set_axis_off()
return ax
def imshow(x):
'''
Plots image x (in cwh order) proportional to its original size.
'''
x = np.array(x).squeeze().transpose((1, 2, 0)).astype(np.uint8)
display.display(Image.fromarray(x))
def get_image(url):
'''
Downlaods an image from url and returns np array in cwh order.
'''
im = Image.open(requests.get(url, stream=True).raw)
im = np.array(im)
return im.transpose(2, 0, 1)
def interct_imshow(img_list, description='iteration'):
'''
Given a list of images in cwh order, creates an interactive slider and shows
each list element.
'''
interact(
lambda idx: imshow(img_list[idx]),
idx=widgets.IntSlider(
min=0,
max=len(img_list)-1,
step=1,
value=len(img_list)-1),
continuous_update=False,
description=description
)
def get_vgg19_layer_names(model):
start, end = 'conv1_1', 'fc8'
attr_list = list(model.__dict__.keys())
start_idx = attr_list.index(start)
end_idx = attr_list.index(end)
return attr_list[start_idx : (end_idx + 1)]
|
import copy
import mmcv
import torch
from mmcv.parallel import DataContainer as DC
from mmcv.runner import force_fp32
from os import path as osp
from torch import nn as nn
from torch.nn import functional as F
from mmdet3d.core import (Box3DMode, bbox3d2result, merge_aug_bboxes_3d,
show_result)
from mmdet3d.ops import Voxelization
from mmdet.core import multi_apply
from mmdet.models import DETECTORS
from .. import builder
from .base import Base3DDetector
@DETECTORS.register_module()
class MultiSensorMultiTaskSep(Base3DDetector):
"""Base class of Multi-modality VoxelNet."""
def __init__(self,
pts_voxel_layer=None,
pts_voxel_encoder1=None,
pts_voxel_encoder2=None,
pts_middle_encoder=None,
img_backbone=None,
img_seg_head=None,
pts_backbone=None,
pts_neck=None,
pts_bbox_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None):
super(MultiSensorMultiTaskSep, self).__init__()
if img_backbone:
self.img_backbone = builder.build_backbone(img_backbone)
if img_seg_head:
self.img_seg_head = builder.build_head(img_seg_head)
if pts_voxel_layer:
self.pts_voxel_layer = Voxelization(**pts_voxel_layer)
if pts_voxel_encoder1:
self.pts_voxel_encoder1 = builder.build_voxel_encoder(pts_voxel_encoder1)
if pts_voxel_encoder2:
self.pts_voxel_encoder2 = builder.build_voxel_encoder(pts_voxel_encoder2)
if pts_middle_encoder:
self.pts_middle_encoder = builder.build_middle_encoder(pts_middle_encoder)
if pts_backbone:
self.pts_backbone = builder.build_backbone(pts_backbone)
if pts_neck is not None:
self.pts_neck = builder.build_neck(pts_neck)
if pts_bbox_head:
pts_train_cfg = train_cfg.pts if train_cfg else None
pts_bbox_head.update(train_cfg=pts_train_cfg)
pts_test_cfg = test_cfg.pts if test_cfg else None
pts_bbox_head.update(test_cfg=pts_test_cfg)
self.pts_bbox_head = builder.build_head(pts_bbox_head)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.init_weights(pretrained=pretrained)
def extract_img_feat(self, img, img_metas):
"""Extract features of images."""
input_shape = img.shape[-2:]
for img_meta in img_metas:
img_meta.update(input_shape=input_shape)
img_feats = self.img_backbone(img)
return img_feats
def extract_pts_feat(self, pts, img_feats, pts_indices, img_metas):
"""Extract features of points."""
x = img_feats.permute(0, 2, 3, 1)
sample_feats = []
for i in range(x.shape[0]):
sample_feats.append(x[i][pts_indices[i][:, 0], pts_indices[i][:, 1]])
concat_pts = []
for i in range(len(pts)):
concat_pts.append(torch.cat([pts[i], sample_feats[i]], 1))
voxels, num_points, coors = self.voxelize(concat_pts) # voxels=(M, T=64, ndim=4+64); coors=(M, 4), (batch_idx, z, y, x)
voxel_features1 = self.pts_voxel_encoder1(voxels[:, :, :4], num_points, coors,
img_feats, img_metas) # (M, C=64); M=num of non-empty voxels
voxel_features2 = self.pts_voxel_encoder2(voxels[:, :, 4:], num_points, coors,
img_feats, img_metas) # (M, C=64); M=num of non-empty voxels
batch_size = coors[-1, 0] + 1
x1 = self.pts_middle_encoder(voxel_features1, coors, batch_size) # (N, C, H, W) = (4, 64, 200, 400)
x2 = self.pts_middle_encoder(voxel_features2, coors, batch_size) # (N, C, H, W) = (4, 64, 200, 400)
x = torch.cat([x1, x2], dim=1) # (N, C, H, W) = (4, 128, 200, 400)
x = self.pts_backbone(x)
if self.with_pts_neck:
x = self.pts_neck(x)
return x
def extract_feat(self, points, pts_indices, img, img_metas):
img_feats = self.extract_img_feat(img, img_metas) # (N, 64, 225, 400)
pts_feats = self.extract_pts_feat(pts=points, img_feats=img_feats, pts_indices=pts_indices, img_metas=img_metas)
return img_feats, pts_feats
def forward_train(self,
img=None,
seg_points=None,
seg_pts_indices=None,
seg_label=None,
points=None,
pts_indices=None,
gt_bboxes_3d=None,
gt_labels_3d=None,
img_metas=None,
gt_bboxes_ignore=None):
# points: list of tensor; len(points)=batch_size; points[0].shape=(num_points, 4)
# print('len:', len(gt_bboxes_3d)) # batch_size
img_feats, pts_feats = self.extract_feat(points, pts_indices, img, img_metas)
losses = dict()
seg_logits = self.img_seg_head(img_feats=img_feats, seg_pts=seg_points, seg_pts_indices=seg_pts_indices)
losses_img = self.img_seg_head.loss(seg_logits, seg_label)
losses.update(losses_img)
# pts_feats: tuple
losses_pts = self.forward_pts_train(pts_feats, gt_bboxes_3d,
gt_labels_3d, img_metas,
gt_bboxes_ignore)
losses.update(losses_pts)
return losses
def forward_pts_train(self,
pts_feats,
gt_bboxes_3d,
gt_labels_3d,
img_metas,
gt_bboxes_ignore=None):
outs = self.pts_bbox_head(pts_feats)
loss_inputs = outs + (gt_bboxes_3d, gt_labels_3d, img_metas)
losses = self.pts_bbox_head.loss(
*loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)
return losses
def simple_test_pts(self, x, img_metas, rescale=False):
"""Test function of point cloud branch."""
outs = self.pts_bbox_head(x)
bbox_list = self.pts_bbox_head.get_bboxes(
*outs, img_metas, rescale=rescale)
bbox_results = [
bbox3d2result(bboxes, scores, labels)
for bboxes, scores, labels in bbox_list
]
return bbox_results
def simple_test(self, img, seg_points, seg_pts_indices, points, pts_indices, img_metas, rescale=False):
"""Test function without augmentaiton."""
img_feats, pts_feats = self.extract_feat(points, pts_indices, img, img_metas)
seg_logits = self.img_seg_head(img_feats=img_feats, seg_pts=seg_points, seg_pts_indices=seg_pts_indices)
bbox_list = [dict() for i in range(len(img_metas))] # len(bbox_list)=batch_size
bbox_pts = self.simple_test_pts(pts_feats, img_metas, rescale=rescale)
for result_dict, pts_bbox in zip(bbox_list, bbox_pts):
result_dict['pts_bbox'] = pts_bbox
return seg_logits, bbox_list
@torch.no_grad()
@force_fp32()
def voxelize(self, points):
"""Apply dynamic voxelization to points.
Args:
points (list[torch.Tensor]): Points of each sample.
Returns:
tuple[torch.Tensor]: Concatenated points, number of points
per voxel, and coordinates.
"""
voxels, coors, num_points = [], [], []
for res in points:
res_voxels, res_coors, res_num_points = self.pts_voxel_layer(res)
voxels.append(res_voxels)
coors.append(res_coors)
num_points.append(res_num_points)
voxels = torch.cat(voxels, dim=0)
num_points = torch.cat(num_points, dim=0)
coors_batch = []
for i, coor in enumerate(coors):
coor_pad = F.pad(coor, (1, 0), mode='constant', value=i)
coors_batch.append(coor_pad)
coors_batch = torch.cat(coors_batch, dim=0)
return voxels, num_points, coors_batch
def aug_test(self, imgs, img_metas, **kwargs):
"""Test function with test time augmentation."""
pass
def show_results(self, data, result, out_dir):
"""Results visualization.
Args:
data (dict): Input points and the information of the sample.
result (dict): Prediction results.
out_dir (str): Output directory of visualization result.
"""
for batch_id in range(len(result)):
if isinstance(data['points'][0], DC):
points = data['points'][0]._data[0][batch_id].numpy()
elif mmcv.is_list_of(data['points'][0], torch.Tensor):
points = data['points'][0][batch_id]
else:
ValueError(f"Unsupported data type {type(data['points'][0])} "
f'for visualization!')
if isinstance(data['img_metas'][0], DC):
pts_filename = data['img_metas'][0]._data[0][batch_id][
'pts_filename']
box_mode_3d = data['img_metas'][0]._data[0][batch_id][
'box_mode_3d']
elif mmcv.is_list_of(data['img_metas'][0], dict):
pts_filename = data['img_metas'][0][batch_id]['pts_filename']
box_mode_3d = data['img_metas'][0][batch_id]['box_mode_3d']
else:
ValueError(
f"Unsupported data type {type(data['img_metas'][0])} "
f'for visualization!')
file_name = osp.split(pts_filename)[-1].split('.')[0]
assert out_dir is not None, 'Expect out_dir, got none.'
inds = result[batch_id]['pts_bbox']['scores_3d'] > 0.1
pred_bboxes = copy.deepcopy(
result[batch_id]['pts_bbox']['boxes_3d'][inds].tensor.numpy())
# for now we convert points into depth mode
if box_mode_3d == Box3DMode.DEPTH:
pred_bboxes[..., 2] += pred_bboxes[..., 5] / 2
elif (box_mode_3d == Box3DMode.CAM) or (box_mode_3d
== Box3DMode.LIDAR):
points = points[..., [1, 0, 2]]
points[..., 0] *= -1
pred_bboxes = Box3DMode.convert(pred_bboxes, box_mode_3d,
Box3DMode.DEPTH)
pred_bboxes[..., 2] += pred_bboxes[..., 5] / 2
else:
ValueError(
f'Unsupported box_mode_3d {box_mode_3d} for convertion!')
show_result(points, None, pred_bboxes, out_dir, file_name)
def init_weights(self, pretrained=None):
"""Initialize model weights."""
super(MultiSensorMultiTaskSep, self).init_weights(pretrained)
if pretrained is None:
img_pretrained = None
pts_pretrained = None
elif isinstance(pretrained, dict):
img_pretrained = pretrained.get('img', None)
pts_pretrained = pretrained.get('pts', None)
else:
raise ValueError(
f'pretrained should be a dict, got {type(pretrained)}')
if self.with_img_backbone:
self.img_backbone.init_weights(pretrained=img_pretrained)
if self.with_pts_backbone:
self.pts_backbone.init_weights(pretrained=pts_pretrained)
if self.with_img_neck:
if isinstance(self.img_neck, nn.Sequential):
for m in self.img_neck:
m.init_weights()
else:
self.img_neck.init_weights()
if self.with_img_roi_head:
self.img_roi_head.init_weights(img_pretrained)
if self.with_img_rpn:
self.img_rpn_head.init_weights()
if self.with_pts_bbox:
self.pts_bbox_head.init_weights()
@property
def with_img_shared_head(self):
"""bool: Whether the detector has a shared head in image branch."""
return hasattr(self,
'img_shared_head') and self.img_shared_head is not None
@property
def with_pts_bbox(self):
"""bool: Whether the detector has a 3D box head."""
return hasattr(self,
'pts_bbox_head') and self.pts_bbox_head is not None
@property
def with_img_bbox(self):
"""bool: Whether the detector has a 2D image box head."""
return hasattr(self,
'img_bbox_head') and self.img_bbox_head is not None
@property
def with_img_backbone(self):
"""bool: Whether the detector has a 2D image backbone."""
return hasattr(self, 'img_backbone') and self.img_backbone is not None
@property
def with_pts_backbone(self):
"""bool: Whether the detector has a 3D backbone."""
return hasattr(self, 'pts_backbone') and self.pts_backbone is not None
@property
def with_fusion(self):
"""bool: Whether the detector has a fusion layer."""
return hasattr(self,
'pts_fusion_layer') and self.fusion_layer is not None
@property
def with_img_neck(self):
"""bool: Whether the detector has a neck in image branch."""
return hasattr(self, 'img_neck') and self.img_neck is not None
@property
def with_pts_neck(self):
"""bool: Whether the detector has a neck in 3D detector branch."""
return hasattr(self, 'pts_neck') and self.pts_neck is not None
@property
def with_img_rpn(self):
"""bool: Whether the detector has a 2D RPN in image detector branch."""
return hasattr(self, 'img_rpn_head') and self.img_rpn_head is not None
@property
def with_img_roi_head(self):
"""bool: Whether the detector has a RoI Head in image branch."""
return hasattr(self, 'img_roi_head') and self.img_roi_head is not None
@property
def with_voxel_encoder(self):
"""bool: Whether the detector has a voxel encoder."""
return hasattr(self,
'voxel_encoder') and self.voxel_encoder is not None
@property
def with_middle_encoder(self):
"""bool: Whether the detector has a middle encoder."""
return hasattr(self,
'middle_encoder') and self.middle_encoder is not None
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.