text stringlengths 8 6.05M |
|---|
import profile
def fib(n):
# from http://en.literateprograms.org/Fibonacci_numbers_(Python)
if n == 0:
return 0
elif n == 1:
return 1
else:
return fib(n-1) + fib(n-2)
def fib_seq(n):
seq = [ ]
if n > 0:
seq.extend(fib_seq(n-1))
seq.append(fib(n))
return seq
print 'RAW'
print '=' * 80
# run() takes a string statement as argument, and creates a report of
# the time spent executing different lines of code while running the statement.
profile.run('print fib_seq(20); print')
# Since there are only 66 primitive calls, we know that the vast majority of those 57k calls were recursive.
"""
RAW
================================================================================
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597, 2584, 4181, 6765]
57356 function calls (66 primitive calls) in 0.438 seconds
Ordered by: standard name
ncalls tottime percall cumtime percall filename:lineno(function)
21 0.000 0.000 0.000 0.000 :0(append)
20 0.000 0.000 0.000 0.000 :0(extend)
1 0.001 0.001 0.001 0.001 :0(setprofile)
1 0.000 0.000 0.437 0.437 <string>:1(<module>)
1 0.000 0.000 0.438 0.438 profile:0(print fib_seq(20); print)
0 0.000 0.000 profile:0(profiler)
21/1 0.002 0.000 0.437 0.437 profile_fibonacci_raw.py:12(fib_seq)
57291/21 0.435 0.000 0.435 0.021 profile_fibonacci_raw.py:3(fib)
"""
|
import numpy as np
from dps.train import training_loop
from dps.config import DEFAULT_CONFIG
from dps.rl.algorithms import qlearning
from dps.env import cliff_walk
from dps.rl.policy import BuildLinearController
config = DEFAULT_CONFIG.copy()
config.update(qlearning.config)
config.update(cliff_walk.config)
config.update(
T=20,
width=4,
n_actions=2,
build_controller=BuildLinearController(),
max_steps=100000,
threshold=-1000,
steps_per_target_update=1,
beta_schedule=0.0,
alpha=0.0,
double=False,
reverse_double=False,
exploration_schedule=1.0,
)
np.random.seed(10)
config.order = np.random.randint(config.n_actions, size=config.width)
np.random.seed()
with config:
training_loop()
|
#!/usr/bin/python
# Copyright (c) 2016 University of Utah Student Computing Labs. ################
# All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appears in all copies and
# that both that copyright notice and this permission notice appear
# in supporting documentation, and that the name of The University
# of Utah not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission. This software is supplied as is without expressed or
# implied warranties of any kind.
################################################################################
# obfuscate_keylist.py #########################################################
#
# A Python script to help obfuscate a plain text keyfile.
#
#
# 1.0.0 2016.03.07 initial release. tjm
#
################################################################################
# notes: #######################################################################
#
#
################################################################################
import base64
import plistlib
import argparse
import os
import sys
def main():
#
# parse option definitions
parser = argparse.ArgumentParser(description='Obfuscate plain text keyfile to base64-encoded plist.')
parser.add_argument('-s', '--source', help='Set path to source keyfile', required=True)
parser.add_argument('-d', '--destination', help='Set path to save obfuscated keyfile', required=True)
parser.add_argument('-t', '--testmode', action="store_true", default=False, help='Test mode, verbose output.')
parser.add_argument('-v', '--version', action='version', version='%(prog)s 1.0.0')
args = parser.parse_args()
if args.testmode:
print "Source file : %s" % args.source
print "Destination file: %s\n" % args.destination
obfuscated = []
unobfuscated_string = ''
obfuscated_string = ''
has_new_label = False
if os.path.exists(args.destination):
continue_choice = False
continue_entry = raw_input("Destination file \"%s\" already exists, Continue? [yN]:" % args.destination)
while not continue_choice:
if continue_entry is "n" or continue_entry is "N" or continue_entry is "":
print "Exiting."
sys.exit(1)
elif continue_entry is "y" or continue_entry is "Y":
break
else:
continue_entry = raw_input("Invalid entry. Destination file \"%s\" already exists, Continue? [yN]:" % args.destination)
try:
tmp_file = open(args.source)
content_raw = tmp_file.read()
tmp_file.close()
except IOError:
print "%s not found. Exiting." % args.source
sys.exit(1)
except Exception as e:
print "Unknown error [%s]. Exiting." % e
sys.exit(1)
content_raw = content_raw.split("\n")
content_raw = [x for x in content_raw if x]
if args.testmode:
print "plain text: \n%s\n" % content_raw
for x in content_raw:
label, pword = x.split(':')
if label.lower() == 'new':
if has_new_label:
print "ERROR. Keylist has multiple \'new\' labels and is not valid. Exiting."
sys.exit(1)
else:
has_new_label = True
if args.testmode:
print "entry : %r, %r, %r" % (label, pword, has_new_label)
pword = base64.b64encode(pword)
try:
commented = label.split('#')[1]
commented = base64.b64encode(commented)
is_commented = True
except:
is_commented = False
if is_commented:
output_string = "#"+commented+":"+pword
else:
output_string = label+":"+pword
unobfuscated_string = unobfuscated_string + output_string + ","
obfuscated.append(output_string)
if args.testmode:
print "obfuscated: %s" % (output_string)
if not has_new_label:
print "ERROR. Keylist has no \'new\' label and is not valid. Exiting."
sys.exit(1)
pl = dict(
data = base64.b64encode(unobfuscated_string)
)
if args.testmode:
print "\nplist entry: \n%s\n" % pl
try:
plistlib.writePlist(pl, args.destination)
except Exception as e:
print "Unknown error [%s]. Exiting." % e
sys.exit(1)
if args.testmode:
print "%s created. Exiting." % args.destination
# end code here.
if __name__ == '__main__':
main()
|
new_text = "winner winner chicken dinner"
print(new_text[0],new_text[1],new_text[2], new_text[3])
print(new_text[0:6])
cs_all_in_one = "파이썬, 자료구조, C, C++"
cs_all_in_one.split("+")
print(cs_all_in_one)
python = "파이썬"
before = "cs all in one %s" % python
after = "cs 올인원 {}".format(python)
print(before)
print(after)
str1 = '*'
str2 = str1*50
print(str2) |
# By: Jared Donnelly
# CS 110 - Prof. Kevin Ryan
# I pledge my Honor that I have abided by the Stevens Honor System
def main():
print("The following program accepts numerical inputs and sums them")
print("Please list all the numbers you would like to enter in a list separated by spaces")
sumables = input("Please being inputting your numbers below, then hit enter when you're finished: \n")
finalSum = 0
sumables = sumables.split()
for num in range(len(sumables)):
finalSum = finalSum + int(sumables[num])
print("The total sum of the list is:", finalSum)
main()
|
const { HeroType } = require('./model/hero');
const { AbilityType } = require('./model/abilites');
const { State } = require('./model/state');
const { Map } = require('./model/map');
const { Parameters } = require('./model/parameters');
const { Teams } = require('./model/teams');
let game_map = null;
let game_params = null;
let game_teams = null;
let CoreObj = {
enemyType: 0,
aims: {},
my_builds: {},
groups: [],
counter: 0,
total_army: 0,
flag: 0,
criticalDistance: 0,
speedRate: 1,
firstFlag: 0,
firstEnemyID: 0,
firstMyID: 0,
tick: 0
}
const Bot = (game, game_teams, game_params, game_map) => {
try {
/* Получение состояния игры */
if (game && game_teams && game_params) {
const state = new State(game, game_teams, game_params);
const my_buildings = state.my_buildings();
const my_squads = state.my_squads();
const enemy_buildings = state.enemy_buildings();
const enemy_squads = state.enemy_squads();
const neutral_buildings = state.neutral_buildings();
const forges_buildings = state.forges_buildings();
CoreObj.tick = state.state["Tick"];
// init of data:
if (!CoreObj.flag)
{
console.log(`Maps`)
console.log(game_map)
console.log(`PARAMS`)
let aa = JSON.parse(game_params.abilities[0].ability_data);
CoreObj.speedRate = aa.SpeedModifier;
CoreObj.firstEnemyID = enemy_buildings[0].id;
CoreObj.firstMyID = my_buildings[0].id;
CoreObj.flag = 1;
CoreObj.player_color = my_buildings[0].player_color;
// рассчитываем критическую дистанцию.
let aims = game_map.get_nearest_towers(CoreObj.firstMyID, neutral_buildings);
CoreObj.criticalDistance = game_map.towers_distance(aims[3].id, CoreObj.firstMyID) || 5;
}
// first logic:
if (CoreObj.firstFlag != 5)
firstLogic(my_buildings, neutral_buildings, enemy_buildings, my_squads, state)
else{
// MAIN LOGIC:
console.log("MAIN LOGIC");
// Сбросить захваченные цели
update_aims(CoreObj.aims, CoreObj.my_builds, state);
apply_abilities(state);
// get aims:
my_buildings.forEach(my_building => {
// если враг слишком близко, атаковать его
if (!check_nearest_enemy(my_building, enemy_buildings))
{
// Если башня не зайдействована
if ( ! CoreObj.my_builds[my_building.id] )
single_attack(my_building, enemy_buildings, neutral_buildings)
}
});
// групповая атака
group_attack(my_buildings, enemy_buildings, neutral_buildings);
ids = upgrade_towers(my_buildings);
if (ids.length)
{
for (let i = 0; i < ids.length; i++) {
const tower_id = ids[i];
if (CoreObj.my_builds[tower_id])
delete CoreObj.my_builds[tower_id];
}
}
}
// оценить отправленные цели
console.log(CoreObj);
}
}
catch (e) {
console.log(e);
} finally {
process.send('end');
}
};
process.on('message', async (game) => {
if (game.initial) {
game_map = new Map(game.data); // карта игрового мира
game_params = new Parameters(game.data); // параметры игры
game_teams = new Teams(game.data); // моя команда
} else
await Bot(game.data, game_teams, game_params, game_map);
});
function count_per_dist(aim, startPos)
{
try{
// определяем расстояние между башнями
const distance = game_map.towers_distance(startPos.id, aim.id);
let rate = 1;
if (CoreObj.firstFlag < 5)
rate = CoreObj.speedRate;
// определяем сколько тиков идти до нее со стандартной скоростью
const ticks = distance / (game_params.creep.speed * rate);
// определяем прирост башни в соответствии с ее уровнем
let enemy_creeps = 0;
if (aim.creeps_count >= aim.level.player_max_count)
// если текущее количество крипов больше чем положено по уровню
enemy_creeps = aim.creeps_count;
else {
// если меньше - будет прирост
const grow_creeps = ticks / aim.level.creep_creation_time;
enemy_creeps = aim.creeps_count + grow_creeps;
if (enemy_creeps >= aim.level.player_max_count)
enemy_creeps = aim.level.player_max_count;
}
// определяем количество крипов с учетом бонуса защиты
const enemy_defence = enemy_creeps * (1 + aim.DefenseBonus);
// если получается в моей башне крипов больше + 10 на червя - идем на врага всей толпой
return enemy_defence + 5;
} catch (e) {
console.log('error', e);
return 10000
}
}
function check_nearest_enemy(my_building, enemy_buildings)
{
try{
if (!my_building || !enemy_buildings)
return false;
let sorted_enemy_buildings = game_map.get_nearest_towers(my_building.id, enemy_buildings);
// attack nearest critical enemy:
let dist = game_map.towers_distance(sorted_enemy_buildings[0], my_building.id);
if (dist && dist < CoreObj.criticalDistance)
{
console.log(`Critical enemy: ${sorted_enemy_buildings[0].id} for ${my_building.id} (dist: ${dist})`);
let count = count_per_dist(sorted_enemy_buildings[0], my_building);
console.log(`count_per_dist ${count}`);
sent_army_to_aim(my_building, sorted_enemy_buildings[0], count);
CoreObj.my_builds[my_building.id] = new MyBuild(my_building.id);
CoreObj.my_builds[my_building.id].id = my_building.id;
CoreObj.my_builds[my_building.id].reason = "critical";
return true;
}
return false;
}catch (e) {
console.log(e);
return false;
}
}
function single_attack(my_building, enemy_buildings, neutral_buildings)
{
try{
let towers, builds;
if (isIterable(neutral_buildings)) builds = [...neutral_buildings];
if (isIterable(enemy_buildings)) builds = [...builds, ...enemy_buildings];
towers = game_map.get_nearest_towers(my_building.id, builds);
// check first 3 towers:
for (var i = 0; i < towers.length && i < 3; i++)
{
let count = count_per_dist(towers[i], my_building);
console.log(`compare: ${my_building.creeps_count} >= ${count}`);
if (my_building.creeps_count >= count)
{
// ПОСТАВИТЬ НА УЧЕТ!!!!!!
CoreObj.my_builds[my_building.id] = new MyBuild(my_building.id);
CoreObj.my_builds[my_building.id].id = my_building.id;
CoreObj.my_builds[my_building.id].reason = "attack";
CoreObj.aims[towers[i].id] = new Aim(towers[i].id);
CoreObj.aims[towers[i].id].id = towers[i].id;
CoreObj.aims[towers[i].id].need_squads = count;
CoreObj.aims[towers[i].id].from.push(my_building.id);
sent_army_to_aim(my_building, towers[i], count)
break ;
}
}
}catch (e) {
console.log(e);
}
}
// групповая атака
function group_attack(my_buildings, enemy_buildings, neutral_buildings)
{
try{
random_attack(my_buildings, enemy_buildings, neutral_buildings)
}catch (e) {
console.log(e);
}
}
function group_towers(tower)
{
try{
CoreObj.groups.forEach(group => {
});
return [];
}catch (e) {
console.log(e);
}
}
function random_attack(my_buildings, enemy_buildings, neutral_buildings)
{
try{
var towers, builds, my_building;
if (isIterable(neutral_buildings)) builds = [...neutral_buildings];
if (isIterable(enemy_buildings)) builds = [...builds, ...enemy_buildings];
towers = game_map.get_nearest_towers(my_buildings[0].id, builds);
var min_dist = game_map.towers_distance(towers[0].id, my_buildings[0].id) || 4;
var max_count = 0;
var my_count = 0;
for (var i = 0; i < my_buildings.length; i++)
{
my_building = my_buildings[i];
console.log(my_buildings.length)
// максимальное необходимое количество крипов для захвата
let count = count_per_dist(towers[0], my_building);
let dist = game_map.towers_distance(towers[0].id, my_building.id);
if (dist < min_dist * 1.5)
{
max_count = (max_count < count) ? count : max_count;
my_count += my_building.creeps_count;
console.log(`adds ${my_building.creeps_count} to ${my_count}`)
console.log(`my ${my_count} > ${max_count} max?`)
// если отряд вышел, то добавляем его в my_builds
if (sent_army_to_aim(my_building, towers[0], 50))
{
console.log("check" + my_building);
if (!CoreObj.my_builds[my_building.id])
{
CoreObj.my_builds[my_building.id] = new MyBuild(my_building.id);
CoreObj.my_builds[my_building.id].id = my_building.id;
CoreObj.my_builds[my_building.id].reason = "attack";
}
if (!CoreObj.aims[towers[0].id])
{
CoreObj.aims[towers[0].id] = new Aim(towers[i].id);
CoreObj.aims[towers[0].id].id = towers[i].id;
CoreObj.aims[towers[0].id].need_squads = count;
CoreObj.aims[towers[0].id].from.push(my_building.id);
}else{
CoreObj.aims[towers[0].id].from.push(my_building.id)
}
}
}
if (my_count > max_count)
break ;
}
}catch (e) {
console.log(e);
}
}
function apply_abilities(state)
{
try{
const my_buildings = state.my_buildings();
const my_squads = state.my_squads();
const enemy_buildings = state.enemy_buildings();
// Применение абилки ускорение
if (my_squads.length > 3) {
if (state.ability_ready(AbilityType[0])) {
location = game_map.get_squad_center_position(my_squads[2]);
process.send(game_teams.my_her.speed_up(location));
}
}
// Чума
if (state.ability_ready(AbilityType[5])) {
let max_count = 0;
let id = enemy_buildings[0].id;
enemy_buildings.forEach(enemy_building => {
if (enemy_building.creeps_count > max_count && !CoreObj.aims[enemy_building.id])
{
max_count = enemy_building.creeps_count;
id = enemy_building.id;
}
});
console.log(`plague to enem(${max_count})`);
process.send(game_teams.my_her.plague(id));
}
// Обмен
if (state.ability_ready(AbilityType[6])) {
let max_count = 0;
let min_count = 100;
let enemy_id = enemy_buildings[0].id;
let my_id = my_buildings[0].id;
enemy_buildings.forEach(enemy_building => {
if (enemy_building.creeps_count > max_count && !CoreObj.aims[enemy_building.id])
{
max_count = enemy_building.creeps_count;
enemy_id = enemy_building.id;
}
});
my_buildings.forEach(my_building => {
if (my_building.creeps_count < min_count)
{
min_count = my_building.creeps_count;
my_id = my_building.id;
}
});
console.log(`change my(${min_count}) to enem(${max_count})`);
process.send(game_teams.my_her.exchange(enemy_id, my_id));
}
}catch (e) {
console.log(e);
}
}
function upgrade_towers(my_buildings)
{
try{
var ids = []
my_buildings.forEach(my_building => {
// Upgrade башни
if (my_buildings[0].level.id < game_params.tower_levels.length - 1) {
// Если хватает стоимости на upgrade
const update_coast = game_params.get_tower_level(my_buildings[0].level.id + 1).update_coast;
if (update_coast < my_buildings[0].creeps_count) {
process.send(game_teams.my_her.upgrade_tower(my_buildings[0].id));
my_buildings[0].creeps_count -= update_coast;
ids.push(my_building.id);
}
}
});
return ids;
}catch (e) {
console.log(e);
return ids;
}
}
function sample()
{
try{
if (my_squads.length > 4) {
if (state.ability_ready(AbilityType[0])) {
location = game_map.get_squad_center_position(my_squads[3]);
process.send(game_teams.my_her.speed_up(location));
}
}
}catch (e) {
console.log(e);
}
}
function firstLogic(my, neutral, enemy, my_squads, state)
{
try{
var my_tower = get_tower_by_id(my, CoreObj.firstMyID) ||
get_tower_by_id(my, CoreObj.firstEnemyID);
console.log(my_tower);
// STEP 0 - 1
if (CoreObj.firstFlag == 0 && my_tower)
{
console.log("s0-1");
let aims = game_map.get_nearest_towers(my_tower.id, neutral)
// let count = count_per_dist(aims[0], my_tower);
process.send(game_teams.my_her.move(my_tower.id, aims[0].id, 0.6));
process.send(game_teams.my_her.move(my_tower.id, enemy[0].id, 0.4));
CoreObj.firstFlag = 1;
}
// ALWAYS
// Применение абилки ускорение
if (my_squads.length > 3) {
if (state.ability_ready(AbilityType[0])) {
location = game_map.get_squad_center_position(my_squads[2]);
process.send(game_teams.my_her.speed_up(location));
}
}
if (CoreObj.firstFlag == 3 || CoreObj.tick >= 180)
CoreObj.firstFlag = 5;
// STEP 1 - 2
if (CoreObj.firstFlag == 1){
let towers = [...my, ...enemy];
let base1 = get_tower_by_id(towers, CoreObj.firstMyID);
let base2 = get_tower_by_id(towers, CoreObj.firstEnemyID);
let my_base = base2.player_color == CoreObj.player_color ? base2 : base1;
let en_base = base2.player_color == CoreObj.player_color ? base1 : base2;
console.log("s1-2");
if (my_base.creeps_count < 3 && state.ability_ready(AbilityType[6]) && (en_base.creeps_count > my_base.creeps_count))
{
process.send(game_teams.my_her.exchange(en_base.id, my_base.id));
CoreObj.firstFlag = 2;
}
}
// STEP 2 - 3
// my base build became the enemy's
if (CoreObj.firstFlag == 2)
{
let towers = [...my, ...enemy];
let base1 = get_tower_by_id(towers, CoreObj.firstMyID);
let base2 = get_tower_by_id(towers, CoreObj.firstEnemyID);
console.log("is chuma is available? " + state.ability_ready(AbilityType[5]))
// проверяем доступность абилки Чума
if (state.ability_ready(AbilityType[5])) {
if (base1.player_color == CoreObj.player_color)
process.send(game_teams.my_her.plague(base2.id));
else
process.send(game_teams.my_her.plague(base1.id));
}
CoreObj.firstFlag = 3;
}
}catch (e) {
console.log(e)
}
}
function sent_army_to_aim(start_pos, end_pos, my_count)
{
try{
if (start_pos && end_pos && start_pos.creeps_count > 6)
{
let count = my_count || (end_pos.creeps_count + 5)
if (start_pos.creeps_count * 0.25 > count)
process.send(game_teams.my_her.move(start_pos.id, end_pos.id, 0.25));
else if (start_pos.creeps_count * 0.5 > count)
process.send(game_teams.my_her.move(start_pos.id, end_pos.id, 0.5));
else if (start_pos.creeps_count * 0.75 > count)
process.send(game_teams.my_her.move(start_pos.id, end_pos.id, 0.75));
else
process.send(game_teams.my_her.move(start_pos.id, end_pos.id, 1));
return true;
}
return false;
}catch (e) {
console.log(e)
}
}
function get_tower_by_id(towers, id)
{
try{
for (var i = 0; i < towers.length; i++)
{
if (towers[i].id == id)
return towers[i];
}
return undefined;
}catch (e) {
console.log(e);
console.log("errr");
}
}
function isIterable(obj) {
// checks for null and undefined
if (obj == null) {
return false;
}
return typeof obj[Symbol.iterator] === 'function';
}
function Aim(id){
this.id = id;
this.total_squads = 0;
this.need_squads = 0;
this.link = null;
this.from = [];
}
function MyBuild(id){
this.id = id;
this.reason = 0;
}
/**
* Удаляет захваченные башни из списка целей
* Обновляет значения total_squads и need_squads
* Подсчитывает количество total_army
* @param {*} aims
* @param {*} state
*/
function update_aims(aims, my_builds, state)
{
console.log("UPDATE AIMS")
try{
const my_buildings = state.my_buildings();
const enemy_buildings = state.enemy_buildings();
if (Object.keys(aims))
{
my_buildings.forEach(my_building => {
if (aims[my_building.id])
{
console.log(`\taim[${my_building.id}] is our!`)
// оставляем только те id, которых нет в aims
for (tower_id in aims[my_building.id].from)
{
if (my_building[tower_id] !== undefined)
if (my_builds[aims[my_building.id].from].reason == 'attack')
delete my_builds[tower_id];
}
delete aims[my_building.id];
}
// CoreObj.total_army += my_building.creeps_count;
});
}
// удалить потерянные башни
if (Object.keys(my_builds))
{
enemy_buildings.forEach(enemy_building => {
if (my_builds[enemy_building.id])
{
delete my_builds[enemy_building.id];
}
});
}
// удалить зависшие башни
if (Object.keys(my_builds))
{
my_buildings.forEach(my_building => {
if (my_builds[my_building.id] && my_building.creeps_count >= 10)
{
delete my_builds[my_building.id];
}
});
}
}catch(e){
console.log("ERROR in upd");
console.log(CoreObj);
console.log(e);
}
return ;
} |
from bs4 import BeautifulSoup
from requests import get
from flask import Flask
app = Flask(__name__)
@app.route('/')
def helloWorld():
print('Hello, Scrapers!')
@app.route('/scrape')
def scrape():
url = 'https://www.basketball-reference.com/players/w/wadedw01.html'
response = get(url)
soup = BeautifulSoup(response.text, 'html.parser')
print(soup.findAll('div', class_='stats_pullout'))
|
s,v=map(int,input().split())
print(pow(s,v))
|
from typing import List
from pydantic import BaseModel
class Prediction(BaseModel):
filename: str
predicted: str
extracted_features: List
|
from modules import cell as c,\
explosive as exive,\
explosion as ex,\
wall_mutable as wm
class Bomb(c.Cell, exive.Explosive):
def __init__(self, position, timer, user):
self.timer = timer
self.ex_type = user.ex_type
self._position = position
self.user = user
self.range = user.bomb_range
@property
def position(self):
return self._position
def action(self, gamefield, tick_time):
self.timer -= tick_time
if self.is_explode_time:
self.explode(gamefield, self.ex_type, self.user)
@position.setter
def position(self, position):
self._position = position
def contact(self, user):
return
def explode(self, gamefield, ex_type, user):
gamefield[self._position.y][self._position.x] = ex.Explosion(
self._position, self.ex_type, self.user)
if self.user:
self.user.bombs_count += 1
directions = [(-1, 0), (0, -1), (1, 0), (0, 1)]
for direction in directions:
boom_range = self.range
i = 0
while i < boom_range:
y = self._position.y + direction[0] * (i + 1)
x = self._position.x + direction[1] * (i + 1)
if isinstance(gamefield[y][x], exive.Explosive):
cell = gamefield[y][x]
gamefield[y][x].explode(gamefield, self.ex_type, user)
if not cell.should_continue_explode(self.ex_type):
break
if isinstance(cell, wm.Wall_Mutable):
boom_range = i + 1 + cell.extra_range
else:
break
i += 1
@property
def is_explode_time(self):
return self.timer <= 0
def should_continue_explode(self, ex_type):
return False
@property
def extra_range(self):
return 0
def is_passable(self, user):
return (user.left_x == self.position.x or user.right_x == self.position.x)\
and (user.top_y == self.position.y or user.bot_y == self.position.y)
@property
def image_name(self):
return str.format("bomb{0}.png", self.ex_type.value)
|
#!/usr/bin/python3
"""Square Module"""
class Square():
"""Square.
Private instance attribute: size:
property def size(self).
property setter def size(self, value).
Instantiation with optional size.
Public instance method: def area(self).
"""
def __init__(self, size=0):
"""Constructor"""
self.size = size
def area(self):
"""returns the current square area"""
return self.__size ** 2
@property
def size(self):
"""retrieves the size"""
return self.__size
@size.setter
def size(self, value):
"""sets the size"""
if type(value) != int:
raise TypeError("size must be an integer")
if value < 0:
raise ValueError("size must be >= 0")
self.__size = value
def __eq__(self, next):
"""Equal."""
return self.__size == next
def __ne__(self, next):
"""Diferent."""
return self.__size != next
def __lt__(self, next):
"""Less than."""
return self.__size < next
def __le__(self, next):
"""Less or equal than."""
return self.__size <= next
def __gt__(self, next):
"""Greater than."""
return self.__size > next
def __ge__(self, next):
"""Greater or equal than."""
return self.__size >= next
|
class Point:
def __init__(self, x=0, y=0):
self.a = x
self.b = y
def __pow__(self, otherObj):
obj = Point()
obj.a = self.a ** otherObj.a
obj.b = self.b ** otherObj.b
return obj
def main():
object1 = Point(12, 13)
object2 = Point(2, 2)
object3 = object1 ** object2
print(object3.a)
print(object3.b)
if __name__ == "__main__":
main()
|
'''
Created on Jul 19, 2012
@author: Michele Sama (m.sama@puzzledev.com)
'''
import datetime
from django.template.defaultfilters import safe
from django.db.models.base import Model
from jom import factory as jom_factory
from django.template.loader import render_to_string
from types import NoneType
class JomField(object):
""" Define the base class for a field.
"""
def __init__(self, instance, name, readonly = False,
factory = jom_factory.JomFactory.default()):
self.name = name
self.instance = instance
self.readonly = readonly
self.factory = factory
def getValue(self):
return getattr(self.instance, self.name)
def setValue(self, value):
setattr(self.instance, self.name, value)
value = property(getValue, setValue)
def toString(self):
raise AssertionError(
"JomField is abstract")
def toJavascript(self):
raise AssertionError(
"JomField is abstract")
@classmethod
def renderField(self, clazz, name, readonly = False):
dictionary = {
'clazz': clazz,
'name': name,
'readonly': readonly
}
return render_to_string(
'jom/JomField.js', dictionary = dictionary)
class BooleanJomField(JomField):
""" Define a field wrapping a boolean.
"""
def __init__(self, instance, name, readonly = False,
factory = jom_factory.JomFactory.default()):
value = getattr(instance, name)
if not isinstance(value, bool):
raise AssertionError(
"Value should be a boolean. Found: %s." % value)
super(BooleanJomField, self).__init__(instance, name, readonly, factory)
def toString(self):
return self.value
def toJavascript(self):
return "true" if self.value else "false"
class NumeralJomField(JomField):
""" Define a field wrapping a numeral.
"""
def __init__(self, instance, name, readonly = False,
factory = jom_factory.JomFactory.default()):
value = getattr(instance, name)
if not isinstance(value, (int, long, float, NoneType)):
raise AssertionError(
"Value should be a number. Found: %s." % value)
super(NumeralJomField, self).__init__(instance, name, readonly, factory)
def toString(self):
return self.value
def toJavascript(self):
# marked safe to avoid comma separators
return safe(self.value)
class StringJomField(JomField):
""" Define a field wrapping a string.
"""
def __init__(self, instance, name, readonly = False,
factory = jom_factory.JomFactory.default()):
value = getattr(instance, name)
if not isinstance(value, (str, unicode, NoneType)):
value = getattr(instance, name)
raise AssertionError(
"Value should be a string. Found: %s." % value)
super(StringJomField, self).__init__(instance, name, readonly, factory)
def toString(self):
return self.value
def toJavascript(self):
# TODO(msama): handle tabs and new lines
value = self.value if self.value else ""
return safe("\"%s\"" % value.replace("\"", "\\\""))
class JavascriptJomField(JomField):
""" Define a field wrapping a string.
"""
def __init__(self, instance, name, readonly = False,
factory = jom_factory.JomFactory.default()):
value = getattr(instance, name)
if not isinstance(value, (str, unicode)):
raise AssertionError(
"Value should be a string. Found: %s." % value)
super(JavascriptJomField, self).__init__(instance, name, readonly, factory)
def toString(self):
return self.value
def toJavascript(self):
if self.value:
return self.value
else:
return "{}"
class UrlJomField(JomField):
""" Define a field wrapping a file.
"""
def __init__(self, instance, name, readonly = False,
factory = jom_factory.JomFactory.default()):
# TODO(msama): typechecking
super(UrlJomField, self).__init__(instance, name, readonly, factory)
def getValue(self):
try:
filefield = getattr(self.instance, self.name)
if filefield.name != None:
return filefield.url
else:
return ""
except ValueError:
return ""
def setValue(self, value):
filefield = getattr(self.instance, self.name)
filefield.name = value
value = property(getValue, setValue)
def toString(self):
return self.getValue()
def toJavascript(self):
return safe("\"%s\"" % self.getValue())
class DateJomField(JomField):
""" Define a field wrapping a boolean.
"""
def __init__(self, instance, name, readonly = False,
factory = jom_factory.JomFactory.default()):
value = getattr(instance, name)
if not isinstance(value, (datetime.date.Date,
datetime.time.Time, datetime.datetime.DateTime)):
raise AssertionError(
"Value should be a datetime. Found: %s." % value)
super(DateJomField, self).__init__(instance, name, readonly, factory)
def toString(self):
return self.value
def toJavascript(self):
return self.value
class ForeignKeyJomField(JomField):
def __init__(self, instance, name, readonly = False,
factory = jom_factory.JomFactory.default()):
for f in instance._meta.fields:
if f.name == name:
self.related = f.rel.to
if self.related == None:
raise AssertionError(
"name should be a related field")
super(ForeignKeyJomField, self).__init__(instance, name, readonly, factory)
def getValue(self):
try:
return getattr(self.instance, self.name)
except self.related.DoesNotExist:
return None
def setValue(self, value):
if value == None:
setattr(self.instance, self.name, None)
elif isinstance(value, int):
setattr(self.instance, self.name,
self.related.objects.get(id = value))
elif isinstance(value, (str, unicode)):
setattr(self.instance, self.name,
self.related.objects.get(id = int(value)))
elif isinstance(value, Model):
setattr(self.instance, self.name, value)
elif isinstance(value, dict):
jomInstance = self.factory.update(value)
setattr(self.instance, self.name, jomInstance.instance)
else:
raise AttributeError(
"%s (%s), should be a instance of Model or a dict." % (value, type(value)))
value = property(getValue, setValue)
def toString(self):
return self.value.__srt__()
def toJavascript(self):
return self.value.id
@classmethod
def renderField(self, clazz, name, fk_clazz, readonly = False):
dictionary = {
'clazz': clazz,
'name': name,
'fk_clazz': fk_clazz,
'readonly': readonly
}
return render_to_string(
'jom/ForeignKeyJomField.js', dictionary = dictionary) |
n=int(input())
l=list(map(int,input().split()))
v=[]
for i in range(n):
if i%2==0:
if l[i]%2!=0:
v.append(l[i])
elif i%2!=0:
if l[i]%2==0:
v.append(l[i])
for i in v:
print(i,end=" ")
|
from discord.ext import commands
import re
class Maths():
"""Some mathematical commands"""
def __init__(self, bot):
self.bot = bot
@commands.command(description="Add two numbers together",
brief="Addition")
async def add(self, left : float, right : float):
await self.bot.say(round((left + right), 2))
@commands.command(description="Calculates the square of a number",
brief="Squaring")
async def square(self, number: float):
squared_value = round((pow(number, 2)), 2)
await self.bot.say(str(number) + "² = " + str(squared_value))
@commands.command(description="Subtract two numbers",
brief="Subtraction")
async def subtract(self, left: float, right: float):
await self.bot.say(round((left - right), 2))
@commands.command(description="Multiply two numbers",
brief="Multiplication")
async def multiply(self, left: float, right: float):
await self.bot.say(round((left * right), 2))
@commands.command(description="Divide a number by another number",
brief="Division")
async def divide(self, dividend: float, divisor: float):
if divisor == 0:
await self.bot.say('Division by 0 is not allowed.')
else:
await self.bot.say(round((dividend / divisor), 2))
@commands.command(description="Calculate a percentage",
brief="Percentage")
async def percentage(self, part: float, whole: float):
if whole == 0:
await self.bot.say("Can't have a percentage of an empty whole")
else:
await self.bot.say('{0:.2f}%'.format((part / whole) * 100))
@commands.command(description="Calculate how much a percentage equates to",
brief="Percent of")
async def percentof(self, percentage, whole: float):
percentage_re = float(re.sub('[^0-9.]', '', percentage))
await self.bot.say(round((percentage_re / 100) * whole, 2))
def setup(bot):
bot.add_cog(Maths(bot))
|
""" Geometry with automated choice of used format. """
from .base import SeismicGeometry
from .blosc import BloscFile
|
# -*- coding: utf-8 -*-
import statistics
import scipy.stats as sts
from scipy.stats import norm
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.mlab as mlab
print("Homework 1: Hailey Kryszewski 124001456")
print("Question 1:\n")
print("Mean Median and Mode for FE_H")
astroValuesFE=[-.60,-.63,-.57,-.53,-.6,-.6,-.54,-.63,-.49,-.63,-.68,-.51,-.47,-.54,-.58,-.60,-.69,-.42,-.60,-.55,-.38,-.81,-.48,-.56,-.56,-.53,-.55,-.47,-.54,-.58,-.60,-.69,-.42,-.60,-.55,-.38,-.81,-.48,-.56,-.49,-.32,-.52,-.53,-.7,-.43,-.4,-.3,-.3]
meanVal= statistics.mean(astroValuesFE)
print("The mean value of FE_H is: ", meanVal)
medianVal= statistics.median(astroValuesFE)
print("The median value of FE_H is: ", medianVal)
modeVal= statistics.mode(astroValuesFE)
print("The mode value of FE_H is: ", modeVal)
stdVal= statistics.stdev(astroValuesFE)
print("The standard deviation of FE_H is: ", stdVal)
print("Mean Median and Mode for oRV")
astroValuesoRV=[-5.2,-4.4,-5.5,-5.2,-5.9,-7.4,-7.1,-4.8,-5.8,-6.1,-5.4,-3.2,-4.7,-6.6,-4.6,-3.9,-5.6,-5.2,-4.7,-3.9,-5.3,-7.0,-5.4,-2.3,-5.3,-6.7,-5.6,-5.3,-6.8,-5.5,-5.0,-6.2,-8.0,-4.7,-4.6,-5.2,-6.0,-5.8]
meanVal= statistics.mean(astroValuesoRV)
print("The mean value of oRV is: ", meanVal)
medianVal= statistics.median(astroValuesoRV)
print("The median value of oRV is: ", medianVal)
modeVal= statistics.mode(astroValuesoRV)
print("The mode value of oRV is: ", modeVal)
stdVal= statistics.stdev(astroValuesoRV)
print("The standard deviation of oRV is: ", stdVal)
print("Mean Median and Mode for PLX")
astroValuespLX=[0.089,0.089,0.088,0.095,0.090,0.086,0.076,0.092]
meanVal= statistics.mean(astroValuespLX)
print("The mean value of PLX is: ", meanVal)
medianVal= statistics.median(astroValuespLX)
print("The median value of PLX is: ", medianVal)
modeVal= statistics.mode(astroValuespLX)
print("The mode value of PLX is: ", modeVal)
stdVal= statistics.stdev(astroValuespLX)
print("The standard deviation of PLX is: ", stdVal)
print("Mean Median and Mode for PM")
astroValuesPM=[-1093,-1093,-1086,-1102,-1098,-1110]
meanVal= statistics.mean(astroValuesPM)
print("The mean value of PLX is: ", meanVal)
medianVal= statistics.median(astroValuesPM)
print("The median value of PLX is: ", medianVal)
modeVal= statistics.mode(astroValuesPM)
print("The mode value of PLX is: ", modeVal)
stdVal= statistics.stdev(astroValuesPM)
print("The standard deviation of PLX is: ", stdVal)
print("Mean Median and Mode for Velocities")
astroValuesV=[-4.9,-5.2,-5.19,-5.24,-4.6,-5.2,-5.5,-4.4,-5.8,-6,-5.2,-4.6,-4.7,-8,-6.2,-5.2,-5,-6.8,-5.3,-5.6,-5.5,-5.3,-2.3,-6.7,-5.4,-7,-5.3,-3.9,-4.7,-5.2,-5.6,-3.9,-4.6,-6.6,-4.7,-3.2,-5.4,-6.1,-5.8,4.8,-7.1,-5.9,-7.4]
meanVal= statistics.mean(astroValuesV)
print("The mean value of PLX is: ", meanVal)
medianVal= statistics.median(astroValuesV)
print("The median value of PLX is: ", medianVal)
modeVal= statistics.mode(astroValuesV)
print("The mode value of PLX is: ", modeVal)
stdVal= statistics.stdev(astroValuesV)
print("The standard deviation of PLX is: ", stdVal)
print("Question 2:\n")
print("The fe_h value represents the metallicity. In other words, this value represents what part of the mass is not Hydrogen or Helium.\n")
print("The oRV value represents the radial velocity of the star in orbit. It is the value of the rate of change in distance of a star or celestial body and a point.\n")
print("The PLX value represents the parallax to a given star.\n")
print("The PM value represents the proper motion of the star is the movement of a closer star across the sky in relation to further away stars.\n")
print("The velocities represent how fast the star is moving.\n")
print("Question 3:\n")
sampVar= statistics.variance(astroValuesPM)
print("The sample variance of PM is ", sampVar)
popVar= statistics.pvariance(astroValuesPM)
print("The population variance of PM is ", popVar, "\n")
print("Python defaults to the sample variance.\n")
print("Question 4 and 5:\n")
feMean=np.mean(astroValuesFE)
feStd=np.std(astroValuesFE)
astroValuesFE.sort()
pmMean=np.mean(astroValuesPM)
pmStd=np.std(astroValuesPM)
astroValuesPM.sort()
vMean=np.mean(astroValuesV)
vStd=np.std(astroValuesV)
astroValuesV.sort()
orvMean=np.mean(astroValuesoRV)
orvStd=np.std(astroValuesoRV)
astroValuesoRV.sort()
plxMean=statistics.mean(astroValuespLX)
plxStd=statistics.stdev(astroValuespLX)
astroValuespLX.sort()
plt.subplot(3, 2, 1)
(feMean,feStd)=norm.fit(astroValuesFE)
n,bins, patches=plt.hist(astroValuesFE,50,normed=True,facecolor='green',alpha=.75)
y=mlab.normpdf(bins,feMean,feStd)
l=plt.plot(bins,y,'r--',linewidth=2)
plt.xlabel('fe_h Values')
plt.ylabel('Probability')
plt.grid(True)
plt.subplots_adjust(hspace=.35)
plt.subplot(3, 2, 3)
(pmMean,pmStd)=norm.fit(astroValuesPM)
n,bins, patches=plt.hist(astroValuesPM,6,normed=True,facecolor='blue',alpha=.75)
y=mlab.normpdf(bins,pmMean,pmStd)
l=plt.plot(bins,y,'r--',linewidth=2)
plt.xlabel('PM Values (mas*yr^-1)')
plt.ylabel('Probability')
plt.grid(True)
plt.subplots_adjust(hspace=.35)
plt.subplot(3, 2, 4)
(vMean,vStd)=norm.fit(astroValuesV)
n,bins, patches=plt.hist(astroValuesV,40,normed=True,facecolor='pink',alpha=.75)
y=mlab.normpdf(bins,vMean,vStd)
l=plt.plot(bins,y,'r--',linewidth=2)
plt.xlabel('Velocity Values')
plt.ylabel('Probability')
plt.grid(True)
plt.subplots_adjust(hspace=.35)
plt.subplot(3, 2, 5)
(orvMean,orvStd)=norm.fit(astroValuesoRV)
n,bins, patches=plt.hist(astroValuesoRV,50,normed=True,facecolor='green',alpha=.75)
y=mlab.normpdf(bins,orvMean,orvStd)
l=plt.plot(bins,y,'r--',linewidth=2)
plt.xlabel('ORV Values (km/s)')
plt.ylabel('Probability')
plt.grid(True)
plt.subplots_adjust(hspace=.5)
plt.subplot(3, 2, 6)
(plxMean,plxStd)=norm.fit(astroValuespLX)
n,bins, patches=plt.hist(astroValuespLX,10,normed=True,facecolor='orange',alpha=.75)
y=mlab.normpdf(bins,plxMean,plxStd)
l=plt.plot(bins,y,'r--',linewidth=2)
plt.xlabel('PLX Values (arcsec)')
plt.ylabel('Probability')
plt.grid(True)
plt.tight_layout()
plt.subplots_adjust(hspace=.5)
plt.show()
print('The data appears to follow a normal distribution.\n')
print('Question 6')
print('Assuming the same astronomer using the same telescope, the statistical error will result from the difference of the amount of measurements taken. For example, the fe_h list of values has 48 elements where as the PLX list only has 8. This is because fe_h has a broader base of values to give a more accurate distribution. In order of statistical error from most statistically error prone to least statistically error prone is PM, PLX, oRV, Velocities, fe_h. The systematic error of the system is much easier to predict since it is the same astronomer and the same instrument. The systematic error should be uniform across all measurements.') |
draws_bounding_box = False
|
import calendar
year = 2021
for month in range(1, 13):
print(calendar.month(year, month))
|
def df1(max):
n,a,b = 0, 0, 1
while n < max:
print(b)
b,a = a + b, b
n = n + 1
return 'done'
|
class Solution:
def longestValidParentheses(self, s):
"""
:type s: str
:rtype: int
来自LeetCode的答案 写的非常的好
https://leetcode.com/problems/longest-valid-parentheses/discuss/14126/My-O(n)-solution-using-a-stack
"""
stack, res, s = [0], 0, ')'+s
for i in range(1, len(s)):
if s[i] == ')' and s[stack[-1]] == '(':
stack.pop()
res = max(res, i - stack[-1])
else:
stack.append(i)
return res
class Solution_1:
def longestValidParentheses(self, s):
"""
:type s: str
:rtype: int
上面的改进版
https://leetcode.com/problems/longest-valid-parentheses/discuss/14167/Simple-JAVA-solution-O(n)-time-one-stack
"""
stack, res = [-1], 0
for i in range(0, len(s)):
if s[i] == ')' and stack[-1] != -1 and s[stack[-1]] == '(':
stack.pop()
res = max(res, i - stack[-1])
else:
stack.append(i)
return res
class Solution_2:
def longestValidParentheses(self, s):
"""
:type s: str
:rtype: int
dp的写法 解法来自LeetCode
https://leetcode.com/problems/longest-valid-parentheses/discuss/14133/My-DP-O(n)-solution-without-using-stack
"""
dp, s = [0], ')'+s
for i in range(1, len(s)):
if s[i] == ')' and s[i - dp[-1] - 1] == '(':
dp.append(dp[-1] + 2 + dp[-2-dp[-1]])
else:
dp.append(0)
return max(dp)
|
import math
from new.rules import *
class Point(object):
x = 0
y = 0
def __init__(self, x, y):
self.x = x
self.y = y
class Branch(object):
color = Color(0, 0, 0)
def __init__(self, angle, start_point, end_point):
self.length = 10
self.angle = angle
self.start_point = start_point
self.end_point = end_point
class Paint(object):
tree_string = ''
angle = 270
branch = []
save_point = []
save_angle = []
#start_point = Point(150, 250)
save = False
length = 10
def __init__(self, tree, temp_point):
self.tree = tree
self.start_point = temp_point
self.temp_point = temp_point
def next_generation(self):
if self.tree_string is None or self.tree_string is '':
self.tree_string = self.tree.axiom
rules = self.tree.rules
variables = self.tree.variables
temp_string = ''
for item in self.tree_string:
if item in variables:
temp_string += rules[item]
else:
temp_string += item
self.tree_string = temp_string
self.start_point = self.temp_point
self.angle = 270
print(self.tree_string)
self.make()
def make(self):
temp = []
for item in self.tree_string:
if item == '-':
self.angle -= self.tree.angle
if item == '+':
self.angle += self.tree.angle
if item == 'F' or item == 'A' or item == 'B' or item == 'G' or item == 'X':
self.angle = self.angle % 360
start_point = self.start_point
end_x = start_point.x + self.length * math.cos(math.pi * (self.angle / 180))
end_y = start_point.y + self.length * math.sin(math.pi * (self.angle / 180))
end_point = Point(end_x, end_y)
temp.append(Branch(self.angle, start_point, end_point))
self.start_point = end_point
if item == '[':
self.save_point.append(Point(self.start_point.x, self.start_point.y))
self.save_angle.append(self.angle)
if item == ']':
p = self.save_point.pop()
self.start_point = p
angle = self.save_angle.pop()
self.angle = angle
self.branch = temp
if __name__ == '__main__':
p = Paint(KochCurve())
p.next_generation()
p.next_generation()
|
#!/proj/sot/ska3/flight/bin/python
#####################################################################################
# #
# create_interactive_page.py: create interactive html page for a given msid #
# #
# author: t. isobe (tisobe@cfa.harvard.edu) #
# #
# last update: Feb 03, 2021 #
# #
#####################################################################################
import os
import sys
import re
import string
import time
import numpy
import astropy.io.fits as pyfits
import Chandra.Time
#
#--- interactive plotting module
#
import plotly.express as px
import plotly.graph_objects as go
#
#--- reading directory list
#
path = '/data/mta/Script/MTA_limit_trends/Scripts/house_keeping/dir_list'
with open(path, 'r') as f:
data = [line.strip() for line in f.readlines()]
for ent in data:
atemp = re.split(':', ent)
var = atemp[1].strip()
line = atemp[0].strip()
exec("%s = %s" %(var, line))
#
#--- append path to a private folder
#
sys.path.append(bin_dir)
sys.path.append(mta_dir)
#
#--- import several functions
#
import run_fetch as rf
import mta_common_functions as mcf #---- contains other functions commonly used in MTA scripts
import envelope_common_function as ecf #---- collection of functions used in envelope fitting
import read_limit_table as rlt #---- read limit table and craete msid<--->limit dict
#
#--- other settings
#
na = 'na'
#
#--- read category data
#
cfile = house_keeping + 'sub_html_list_all'
category_list = mcf.read_data_file(cfile)
#
#--- set several values used in the plots
#
color_table = ['blue', 'red', '#FFA500']
css = """
p{
text-align:left;
}
"""
#
#--- get dictionaries of msid<-->unit and msid<-->description
#
[udict, ddict] = ecf.read_unit_list()
web_address = 'https://' + web_address
#
#--- alias dictionary
#
afile = house_keeping + 'msid_alias'
data = mcf.read_data_file(afile)
alias = {}
alias2 = {}
for ent in data:
atemp = re.split('\s+', ent)
alias[atemp[0]] = atemp[1]
alias2[atemp[1]] = atemp[0]
#
#--- a list of those with sub groups
#
sub_list_file = house_keeping + 'sub_group_list'
sub_group_list = mcf.read_data_file(sub_list_file)
#-------------------------------------------------------------------------------------------
#-- create_interactive_page: update all msid listed in msid_list --
#-------------------------------------------------------------------------------------------
def create_interactive_page(msid, group, mtype, start, stop, step):
"""
create an interactive html page for a given msid
input: msid --- msid
group --- group name
mtype --- mid, mde, min, or max
start --- start time
stop --- stop time
step --- bin size in seconds
"""
start = ecf.check_time_format(start)
stop = ecf.check_time_format(stop)
#
#--- create msid <---> unit dictionary
#
[udict, ddict] = ecf.read_unit_list()
#
#--- read mta database
#
mta_db = ecf.read_mta_database()
#
#--- read mta msid <---> sql msid conversion list
#
mta_cross = ecf.read_cross_check_table()
#
#--- get limit data table for the msid
#
try:
uck = udict[msid]
if uck.lower() == 'k':
tchk = 1
else:
tchk = ecf.convert_unit_indicator(uchk)
except:
tchk = 0
glim = make_glim(msid)
#
#--- extract data from archive
#
chk = 0
try:
[ttime, tdata] = rf.get_data(msid, start, stop)
except:
#
#--- if no data in archive, try mta local database
#
try:
[ttime, tdata] = get_mta_fits_data(msid, group, start, stop)
#
#--- if it is also failed, return the empty data set
#
except:
chk = 1
ttime = []
tdata = []
#
#--- only short_p can change step size (by setting "step")
#
if chk == 0:
data_p = process_day_data(msid, ttime, tdata, glim, step=step)
else:
data_p = 'na'
#
#--- create interactive html page
#
create_html_page(msid, group, data_p, mtype, step)
#----------------------------------------------------------------------------------------
#-- create_html_page: create indivisual html pages for all msids in database --
#----------------------------------------------------------------------------------------
def create_html_page(msid, group, data_p, mtype, bin_size):
"""
"""
try:
unit = udict[msid]
descrip = ddict[msid]
except:
unit = ''
descrip = ''
#
#--- pdata is two dim array of data (see read_data for details). flist is sub category
#--- of each data set
#
if data_p == 'na':
pout = '<h1 style="padding-top:40px;padding-bottom:40px;">NO DATA FOUND</h1>\n'
else:
[pdata, byear] = read_msid_data_full(data_p, msid)
#
#--- create the plot
#
create_trend_plots(msid, group, pdata, byear, unit, 'week', mtype)
#--------------------------------------------------------------------------------
#-- create_trend_plots: create interactive trend plot ---
#--------------------------------------------------------------------------------
def create_trend_plots(msid, group, pdata, byear, unit, ltype, mtype):
"""
create static and interactive trend plot
input: msid --- msid
group --- the gruop name to which msid belogs
pdata --- a list of arrays of data; see read_data for details
year --- a base year for the short term plot
unit --- unit of msid
ltype --- 'short' or 'long' --- period length indicator
mtype --- 'mid', 'med', 'min', or 'max' --- data type indicator
output: pout --- plot in html format
"""
if not (len(pdata) > 0 and len(pdata[0]) > 0):
print(msid + ': empty data file')
return na
if len(pdata[0]) < 10:
return False
#
#--- get a data position of mtype data in pdata
#
[pos, cname] = select_data_position(mtype)
#
#--- column name
#
xname = 'Time '
if ltype == 'long':
xname = xname + ' (Year)'
else:
xname = xname + ' (Ydate Year: ' + str(byear) + ')'
mname = msid.upper()
if unit != '':
#
#--- special treatment for temp unit "F"
#
if unit == 'F':
unit = 'K'
mname = mname + ' (' + unit + ')'
if mtype == 'mid':
mnam = ': Mean'
elif mtype == 'med':
mnam = ': Median'
elif mtype == 'min':
mnam = ': Minimum'
elif mtype == 'max':
mnam = ': Maximum'
else:
mnam = ''
mname = mname + ' ' + mnam
colnames = ['Time', '# of Data', 'Mean', 'Median', 'Sadnard Deviation', \
'Min', 'Max', '% of Lower Yellow Violation', \
'% of Upper Yellow Violation', '% of Lower Red Violation',\
'% of Upper Red Violation', 'Lower Yellow Limit', \
'Upper Yellow Limit', 'Lower Red Limit', 'Upper Red Limit']
#
#--- set data frame
#
p_dict= {}
for k in range(0, 2):
p_dict[colnames[k]] = pdata[k]
for k in range(2, 15):
p_dict[colnames[k]] = pdata[k+2]
p_dict[colnames[0]] = shorten_digit(p_dict[colnames[0]])
for k in range(2, 7):
p_dict[colnames[k]] = shorten_digit(p_dict[colnames[k]])
#
#--- get a data position of mtype data in pdata
#
[pos, cname] = select_data_position(mtype)
#
#--- set plotting ranges
#
[xmin, xmax, xpos] = set_x_plot_range(pdata[0], ltype)
xchk = xmax - xmin
#
#--- set warning area range lists
#
[ymin, ymax, ypos] = set_y_plot_range(pdata[0], pdata[pos], ltype)
[time_save, rb1_save, rb2_save, yb2_save, yt1_save, yt2_save, rt2_save] \
= set_warning_area(msid, xmin, xmax, ymin, ymax, byear)
fig= px.scatter(p_dict, x=colnames[0], y=colnames[pos-2], hover_data=colnames,\
labels={colnames[0]:xname, colnames[pos-2]:mname} )
fig.update_layout(yaxis_range=[ymin,ymax])
#
#---- bottom warning area
#
fig.add_trace(go.Scatter(x=time_save, y=rb1_save, fill = None,\
opacity=0.3, mode='lines'))
fig.add_trace(go.Scatter(x=time_save, y=rb2_save, fill='tonexty',\
opacity=0.3, mode='none', fillcolor='rgba(255,0,0,0.3)'))
fig.add_trace(go.Scatter(x=time_save, y=yb2_save, fill='tonexty',\
opacity=0.3, mode='none', fillcolor='rgba(255,255,0,0.3)'))
#
#--- top warning area
#
fig.add_trace(go.Scatter(x=time_save, y=yt1_save, fill = 'tonexty',\
opacity=0.0, mode='none', fillcolor='rgba(255,255,0,0.0)'))
fig.add_trace(go.Scatter(x=time_save, y=yt2_save, fill='tonexty', \
opacity=0.3, mode='none', fillcolor='rgba(255,255,0,0.3)'))
fig.add_trace(go.Scatter(x=time_save, y=rt2_save, fill='tonexty',\
opacity=0.3, mode='none', fillcolor='rgba(255,0,0,0.3)'))
fig.update_layout(yaxis=dict(range=[ymin, ymax]))
fig.layout.update(showlegend=False)
hname = web_dir + 'Interactive/' + msid + '_inter_avg.html'
fig.write_html(hname)
#----------------------------------------------------------------------------------
#-- drop_suffix: drop suffix of msid (eps. those of HRC msids) --
#----------------------------------------------------------------------------------
def drop_suffix(msid):
"""
hrc has 4 different categories (all, hrc i, hrc s, off); use the same limit range
input: msid --- msid
output: pmsid --- msid without suffix
"""
pmsid = msid.replace('_i^', '')
pmsid = pmsid.replace('_s^', '')
pmsid = pmsid.replace('_off^', '')
return pmsid
#----------------------------------------------------------------------------------
#-- set_warning_area: set yellow and red violation zones --
#----------------------------------------------------------------------------------
def set_warning_area_xxx(pdata, xmin, xmax, ymin, ymax):
"""
set yellow and red violation zones
input: pdata --- a two dimensional array of data (see read_data)
xmin --- xmin
xmax --- xmax
ymin --- ymin
ymax --- ymax
output: a list of lists:
time_save --- time list
rb1_save --- lower boundary of the bottom red area
rb2_save --- top boundary of the bottom red area
yb1_save --- lower boundary of the bottom yellow area
yb2_save --- top boundary of the bottom yellow area
yt1_save --- lower boundary of the top yellow area
yt2_save --- top boundary of the top yellow area
rt1_save --- lower boundary of the top red area
rt2_save --- top boundary of the top red area
"""
l_len = len(pdata[0]) + 2
#
#--- filling up the beginning of the plot to the end of the plot
#
aa = numpy.array([xmin])
bb = numpy.array([xmax])
time_save = three_array_add(aa, pdata[0], bb)
if pdata[13][-1] < ymin:
brbnd = pdata[13][-1]
else:
brbnd = ymin
if pdata[16][-1] > ymax:
trbnd = pdata[13][-1]
else:
trbnd = ymax
brbnd = 0.0
if brbnd > ymin:
brbnd = ymin
trbnd = 9e9
if trbnd < ymax:
trbnd = ymax
rb1_save = [brbnd] * l_len
rb2_save = adjust_lim_list(pdata[15])
yb1_save = rb2_save
yb2_save = adjust_lim_list(pdata[13])
yt1_save = adjust_lim_list(pdata[14])
yt2_save = adjust_lim_list(pdata[16])
rt1_save = yt2_save
rt2_save = [trbnd] *l_len
return [time_save, rb1_save, rb2_save, yb1_save, yb2_save, \
yt1_save, yt2_save, rt1_save, rt2_save]
#----------------------------------------------------------------------------------
#-- adjust_lim_list: adjust the limit area so that it covers xmin to xmax --
#----------------------------------------------------------------------------------
def adjust_lim_list(alist):
"""
adjust the limit area so that it covers xmin to xmax
input: alist --- data list
output: slist --- adjusted list
"""
#
#--- some data has open limit at beginning; fill them
#
val = alist[0]
pos = 0
for k in range(0, len(alist)):
if abs(alist[k]) >= 9e6:
continue
if abs(alist[k]) == 999:
continue
if abs(alist[k]) == 998:
continue
val = alist[k]
pos = k
break
if pos > 0:
for k in range(0, pos+1):
alist[k] = val
#
#--- make sure that the area covers from xmin to xmax
#
aa = numpy.array([val])
bb = numpy.array([alist[-1]])
slist = three_array_add(aa, alist, bb)
#
#--- special adjustment for the no limit cases
#
alist = list(slist)
slist = []
for ent in alist:
if abs(ent) >= 9e6:
slist.append(ent/abs(ent) * 9e12)
elif abs(int(ent)) in [998,999]:
slist.append(ent/abs(ent) * 9e12)
else:
slist.append(ent)
slist = numpy.array(slist)
return slist
#----------------------------------------------------------------------------------
#----------------------------------------------------------------------------------
#----------------------------------------------------------------------------------
def three_array_add(a1, a2, a3):
slist = numpy.concatenate((a1, a2))
slist = numpy.concatenate((slist, a3))
return slist
#----------------------------------------------------------------------------------
#-- read_msid_data_full: read the data of msid ---
#----------------------------------------------------------------------------------
def read_msid_data_full(data_p, msid):
"""
read the data of msid
input: data_p --- a list of lists of data
msid --- msid
output: pdata --- a two dimensional array of data
xtime = pdata[0]
dnum = pdata[1]
start = pdata[2]
stop = pdata[3]
avg = pdata[4]
med = pdata[5]
std = pdata[6]
dmin = pdata[7]
dmax = pdata[8]
ylow = pdata[9]
ytop = pdata[10]
rlow = pdata[11]
rtop = pdata[12]
yl_lim = pdata[13]
yu_lim = pdata[14]
rl_lim = pdata[15]
ru_lim = pdata[16]
pcolor = pdata[17] --- 0, 1, or 2: see color_table at beginning
byear --- base year for short term plot
"""
today = ecf.find_current_stime()
dtime = data_p[0]
dnum = data_p[1]
avg = data_p[2]
med = data_p[3]
std = data_p[4]
dmin = data_p[5]
dmax = data_p[6]
ylow = data_p[7]
ytop = data_p[8]
rlow = data_p[9]
rtop = data_p[10]
yl_lim = data_p[11]
yu_lim = data_p[12]
rl_lim = data_p[13]
ru_lim = data_p[14]
skp = 3
dtime = dtime[0::skp]
dnum = dnum[0::skp]
avg = avg[0::skp]
med = med[0::skp]
std = std[0::skp]
dmin = dmin[0::skp]
dmax = dmax[0::skp]
ylow = ylow[0::skp]
ytop = ytop[0::skp]
rlow = rlow[0::skp]
rtop = rtop[0::skp]
yl_lim = yl_lim[0::skp]
yu_lim = yu_lim[0::skp]
rl_lim = rl_lim[0::skp]
ru_lim = ru_lim[0::skp]
#
out = Chandra.Time.DateTime(dtime[2]).date
atemp = re.split(':', out)
byear = int(float(atemp[0]))
xtime = []
for k in range(0, len(dtime)):
yday = chandratime_to_yday(dtime[k], byear)
xtime.append(yday)
start = []
stop = []
pcolor = []
rm_id = []
for k in range(0, len(xtime)):
if k > 0:
tstart = 0.5 * ( float(xtime[k-1] + float(xtime[k])))
tstop = float(xtime[k]) + 0.5 * (float(xtime[k]) - float(xtime[k-1]))
else:
tstart = float(xtime[k]) - 0.5 * (float(xtime[k+1]) - float(xtime[k]))
tstop = float(xtime[k]) + 0.5 * (float(xtime[k+1]) - float(xtime[k]))
start.append(tstart)
stop.append(tstop)
if abs(yl_lim[k]) > 6e6:
pcolor.append(0)
else:
if (avg[k] not in [998, 999]) and ((avg[k] > ru_lim[k]) or (rtop[k] > 0.7)):
pcolor.append(1)
elif (avg[k] not in [-999, -998]) and ((avg[k] < rl_lim[k]) or (rlow[k] > 0.7)):
pcolor.append(1)
elif (avg[k] not in [998, 999]) and ((avg[k] > yu_lim[k]) or (ytop[k] > 0.7)):
pcolor.append(2)
elif (avg[k] not in [-999, -998]) and ((avg[k] < yl_lim[k]) or (ylow[k] > 0.7)):
pcolor.append(2)
else:
pcolor.append(0)
if dmax[k] > 9.0e8 or dmin[k] < -9.0e8:
rm_id.append(k)
#
#--- if the avg is totally flat, the plot wil bust; so change tiny bit at the last entry
#
if len(avg) > 0:
test = numpy.std(avg)
else:
test = 0
if test == 0:
alen = len(avg) - 1
avg[alen] = avg[alen] * 1.0001
pcolor = numpy.array(pcolor)
plist = [xtime, dnum, start, stop, avg, med, std, \
dmin, dmax, ylow, ytop, rlow, rtop, yl_lim, yu_lim, rl_lim, ru_lim, pcolor]
#
#--- if there is extremely large values, drop them
#
rm_rate = float(len(rm_id)) / float(len(xtime))
if rm_rate < 0.1:
plist = remove_extreme(plist, rm_id)
#
#--- convert into numpy array then all to float entry
#
pdata = numpy.array(plist)
pdata = pdata.astype(float)
return [pdata, byear]
#----------------------------------------------------------------------------------
#-- remove_extreme: remove the elements of the lists by given indecies --
#----------------------------------------------------------------------------------
def remove_extreme(plist, rm_id):
"""
remove the elements of the lists by given indecies
input: plist --- a list of lists
rm_id --- a list of indecies to be removed
output: u_lsit --- a list of updated lists
"""
u_list = []
for alist in plist:
new_a = numpy.delete(numpy.array(alist), rm_id)
u_list.append(new_a)
return u_list
#----------------------------------------------------------------------------------
#-- convert_stime_into_year: convert time in seconds from 1998.1.1 to fractional year
#----------------------------------------------------------------------------------
def convert_stime_into_year(stime):
"""
convert time in seconds from 1998.1.1 to fractional year
input: stime --- time in seconds from 1998.1.1
output: ytime --- time in fractional year
year --- year
base --- the number of the days in that year, either 365 or 366
"""
date = Chandra.Time.DateTime(stime)
year = float(date.year)
yday = float(date.yday)
hrs = float(date.hour)
mins = float(date.min)
secs = float(date.sec)
if mcf.is_leapyear(year):
base = 366
else:
base = 365
ytime = year + (yday + hrs / 24.0 + mins / 1440.0 + secs / 86400.0) / base
return [ytime, year, base]
#----------------------------------------------------------------------------------
#-- set_x_range: find plotting x range ---
#----------------------------------------------------------------------------------
def set_x_plot_range(x, ltype):
"""
setting x plotting range
input: x --- a list of x values
ltype --- data type; week, short, one, five, long
output: xmin --- xmin
xmax --- xmax
xpos --- x position of the text to be placed
"""
if ltype == 'long':
xmin = 1999
xmax = ecf.current_time() + 1
xmax = int(xmax)
elif ltype == 'five':
xmax = ecf.current_time() + 1
xmax = "%4.1f" % round(xmax, 1)
xmax = int(float(xmax))
xmin = xmax - 6.0
xmin = "%4d" % round(xmin, 1)
xmin = int(float(xmin))
elif ltype == 'short':
xmax = max(x)
xmax = "%4.1f" % round(xmax, 1)
xmax = int(float(xmax))
xmin = xmax - 90.0
xmin = "%4d" % round(xmin, 1)
xmin = int(float(xmin))
xmax += 10
else:
xmin = min(x)
xmax = max(x)
xdff = xmax - xmin
xmin -= 0.01 * xdff
xmax += 0.06 * xdff
xmin = 0.1 * (int(10*xmin) -1)
xmax = 0.1 * (int(10*xmax) +1)
xdiff = xmax - xmin
xpos = xmin + 0.05 * xdiff
if ltype =='':
xpos = xmax - 0.1 * xdiff
return [xmin, xmax, xpos]
#----------------------------------------------------------------------------------
#-- set_y_plot_range: find plotting y range ---
#----------------------------------------------------------------------------------
def set_y_plot_range(x, y=[], ltype=''):
"""
find plotting y range
input: x --- a list of y if only one array is given; otherwise a list of x
y --- a list of y data if it is given
ltype --- week, short, one, five, long
output: [ymin, ymax, ypos]
"""
if y != []:
#
#--- remove all dummy values and the values outside of the range
#
udata = []
for k in range(0, len(x)):
if y[k] in [-999, -998,-99, 99, 998, 999]:
continue
else:
udata.append(y[k])
else:
udata = []
for k in range(0, len(x)):
if x[k] in [-999, -998,-99, 99, 998, 999]:
continue
else:
udata.append(x[k])
#
#--- remove possible extreme outlayers from both ends before getting min and max
#
udata.sort()
lcnt = len(udata)
p = int(0.02 * lcnt)
test = udata[p:lcnt-p]
test = udata
ymin = min(test)
ymax = max(test)
if ymin == ymax:
ymax = ymin + 0.5
ymin = ymin - 0.5
else:
ydiff = ymax - ymin
ymin -= 0.2 * ydiff
ymax += 0.2 * ydiff
ydiff = ymax - ymin
ypos = ymax - 0.1 * ydiff
return [ymin, ymax, ypos]
#----------------------------------------------------------------------------------
#----------------------------------------------------------------------------------
#----------------------------------------------------------------------------------
def set_x_bound(ltype):
if ltype == 'week':
bound = 8.0
elif ltype == 'short':
bound = 100.0
elif ltype == 'one':
bound = 370.0
elif ltype == 'five':
bound = 5.5
else:
bound = 100.
return bound
#----------------------------------------------------------------------------------
#----------------------------------------------------------------------------------
#----------------------------------------------------------------------------------
def select_data_position(mtype):
if mtype == 'mid':
pos = 4
cname = 'Mean'
elif mtype == 'med':
pos = 5
cname = 'Median'
elif mtype == 'min':
pos = 7
cname = 'Min'
elif mtype == 'max':
pos = 8
cname = 'Max'
return [pos, cname]
#----------------------------------------------------------------------------------
#-- check_dir_exist: chek whether the directory exists, and if not, create one -
#----------------------------------------------------------------------------------
def check_dir_exist(tdir):
"""
chek whether the directory exists, and if not, create one
input: tdir --- directory name
output: tdir --- created directory
"""
if not os.path.isdir(tdir):
cmd = 'mkdir ' + tdir
os.system(cmd)
#----------------------------------------------------------------------------------
#-- read_template: read template --
#----------------------------------------------------------------------------------
def read_template(fname, repl=[]):
"""
read template
input: fname --- template file name
repl --- a list of lists:[<tag to be replaced>, <replacing value>]
output: out --- template read
"""
infile = house_keeping + 'Templates/' + fname
with open(infile, 'r') as f:
out = f.read()
#
#--- if substitue strings are given, replace them before return
#
if len(repl) > 0:
for rset in repl:
out = out.replace(rset[0], rset[1])
return out
#----------------------------------------------------------------------------------
#-- create_limit_table: create a limit table for msid --
#----------------------------------------------------------------------------------
def create_limit_table(msid, group, unit, xmin, xmax):
"""
create a limit table for msid
input: msid --- msid
unit --- unit
xmin --- xmin
xmax --- xmax
output: <web_dir>/Limit_table/<msid>_limit_table.html
"""
#
#--- read limit data
#
pmsid = drop_suffix(msid)
l_list = ecf.set_limit_list(pmsid)
#
#--- read header part
#
title = msid + ' limit table'
repl = [["#MSID#", title], ["#JAVASCRIPT#", ''], ["#STYLE#", ""]]
line = read_template('html_head', repl )
#
#--- except a few, all temperatures are in K
#
if unit == 'DEGF':
tline = msid.upper() + ' (K)'
elif unit == 'DEGC':
tline = msid.upper() + ' (K)'
elif unit == '':
tline = msid.upper()
else:
tline = msid.upper() + ' (' + unit + ')'
bgline = '<th style="background-color:'
line = line + '<h2>' + tline + '</h2>\n'
line = line + '<table border=1 cellpadding=2>\n'
line = line + '<tr><th>Start Time</th>\n'
line = line + '<th>Stop Time</th>\n'
line = line + bgline + 'yellow">Yellow Lower</th>\n'
line = line + bgline + 'yellow">Yellow Upper</th>\n'
line = line + bgline + 'red">Red Lower</th>\n'
line = line + bgline + 'red">Red Upper</th>\n'
line = line + '</tr>\n'
for k in range(0, len(l_list)):
alist = l_list[k]
[astart, byear, base] = convert_stime_into_year(float(alist[0]))
[astop, byear, base] = convert_stime_into_year(float(alist[1]))
#
#--- there are often the data with <start>=<stop>, drop them
#
if astart == astop:
continue
astart = float('%4.2f' % (round(astart,2)))
astop = float('%4.2f' % (round(astop, 2)))
if k == 0:
if astart > xmin:
astart = '---'
if k == (len(l_list) -1):
astop = "---"
#
#--- alist: ymin, ymax, rmin, rmax in position of 2 to 5
#
tlist = [astart, astop] + alist[2:6]
#
#--- create each row
#
line = line + '<tr>\n'
for tval in tlist:
line = line + '<td style="text-align:center;">' + str(tval) + '</td>\n'
line = line + '</tr>\n'
line = line + '</table>\n'
line = line + '</body>\n</html>\n'
o_dir = web_dir + group + '/'
check_dir_exist(o_dir)
o_dir = o_dir + 'Limit_table/'
check_dir_exist(o_dir)
file_name = o_dir + msid + '_limit_table.html'
with open(file_name, 'w') as fo:
fo.write(line)
#-------------------------------------------------------------------------------------------
#-- process_day_data: extract data from the archive and compute the stats ---
#-------------------------------------------------------------------------------------------
def process_day_data(msid, time, data, glim, step = 3600.0):
"""
extract data from the archive and compute the stats
input: msid --- msid of the data
time --- array of time
data --- array of data
glim --- a list of limit tables
step --- interval of the data. defalut: 3600 sec
output: a list of lists which contain:
btime --- a list of time in sec from 1998.1.1
bdata --- a list of the mean of each interval
bmed --- a list of the median of each interval
bstd --- a list of the std of each interval
bmin --- a list of the min of each interval
bmax --- a list of the max of each interval
byl --- a list of the rate of yellow lower violation
byu --- a list of the rate of yellow upper violation
brl --- a list of the rate of red lower violation
bru --- a list of the rate of red upper violation
bcnt --- a list of the total data counts
byl --- a list of the lower yellow limits
byu --- a list of the upper yellow limits
brl --- a list of the lower red limits
bru --- a list of the upper red limits
"""
btime = []
bdata = []
bmed = []
bstd = []
bmin = []
bmax = []
byl = []
byu = []
brl = []
bru = []
bcnt = []
vsave = []
#
#--- extract data from archive
#
try:
data = numpy.array(data)
dtime = numpy.array(time)
#
#--- remove all "nan" data
#
mask = ~(numpy.isnan(data))
data = data[mask]
dtime = dtime[mask]
#
#--- there are glitch values much larger than the real value; remove them
#
mask = [data < 9e6]
data = data[mask]
dtime = dtime[mask]
#
#--- devide the data into a 'step' size
#
spos = 0
chk = 1
send = dtime[spos] + step
dlen = len(dtime)
for k in range(0, dlen):
if dtime[k] < send:
chk = 0
continue
else:
rdata = data[spos:k]
avg = rdata.mean()
if len(rdata) < 1:
med = 0.0
else:
med = numpy.median(rdata)
sig = rdata.std()
amin = rdata.min()
amax = rdata.max()
stime = dtime[spos + int(0.5 * (k-spos))]
vlimits = find_violation_range(glim, stime)
[yl, yu, rl, ru, tot] = find_violation_rate(rdata, vlimits)
btime.append(stime)
bdata.append(avg)
bmed.append(med)
bstd.append(sig)
bmin.append(amin)
bmax.append(amax)
byl.append(yl)
byu.append(yu)
brl.append(rl)
bru.append(ru)
bcnt.append(tot)
vsave.append(vlimits)
spos = k
send = dtime[k] + step
chk = 1
#
#--- check whether there are any left over; if so add it to the data lists
#
if chk == 0:
rdata = data[spos:dlen]
if len(rdata) < 1:
avg = 0.0
med = 0.0
else:
avg = rdata.mean()
med = numpy.median(rdata)
sig = rdata.std()
amin = rdata.min()
amax = rdata.max()
stime = dtime[spos + int(0.5 * (k-spos))]
vlimits = find_violation_range(glim, stime)
[yl, yu, rl, ru, tot] = find_violation_rate(rdata, vlimits)
btime.append(dtime[spos + int(0.5 * (k-spos))])
bdata.append(avg)
bmed.append(med)
bstd.append(sig)
bmin.append(amin)
bmax.append(amax)
byl.append(yl)
byu.append(yu)
brl.append(rl)
bru.append(ru)
bcnt.append(tot)
vsave.append(vlimits)
#out = [btime, bdata, bmed, bstd, bmin, bmax, byl, byu, brl, bru, bcnt]
out = [btime, bcnt, bdata, bmed, bstd, bmin, bmax, byl, byu, brl, bru]
#
#--- adding limits to the table
#
vtemp = [[], [], [], []]
for k in range(0, len(vsave)):
for m in range(0, 4):
vtemp[m].append(vsave[k][m])
out = out + vtemp
except:
ftime = 0
fdata = 0
fmed = 0
fstd = 0
fmin = 0
fmax = 0
ylow = 0
yupper= 0
rlow = 0
rupper= 0
tcnt = 0
vlimits = [-9.0e9, -9.0e9, 9.0e9, 9.0e9]
#out = [ftime, fdata, fmed, fstd, fmin, fmax, ylow, yupper, rlow, rupper, tcnt]
out = [ftime, tcnt, fdata, fmed, fstd, fmin, fmax, ylow, yupper, rlow, rupper]
out = out + vlimits
return out
#-------------------------------------------------------------------------------------------
#-- find_violation_range: set violation range --
#-------------------------------------------------------------------------------------------
def find_violation_range(glim, time):
"""
set violation range
input: glim --- a list of lists of violation set [start, stop, yl, yu, rl, ru]
time --- time of the violation check
output: vlimit --- a four element list of [yl, yu, rl, ru]
"""
vlimit = [-9.0e9, -9.0e9, 9.0e9, 9.0e9]
for lim_set in glim:
start = float(lim_set[0])
stop = float(lim_set[1])
if (time >= start) and (time < stop):
vlimit = [lim_set[2], lim_set[3], lim_set[4], lim_set[5]]
return vlimit
#-------------------------------------------------------------------------------------------
#-- find_violation_rate: find rate of yellow, red violations in both lower and upper limits
#-------------------------------------------------------------------------------------------
def find_violation_rate(carray, limits):
"""
find rate of yellow, red violations in both lower and upper limits
input: carray --- numpy array of the data
limits --- a list of limit [yellow lower, yellow upper, red lower, red upper]
output: [yl, yu, rl, ru, tot]: rate of yellow lower
rate of yellow upper
rate of red lower
rate of red upper
totla number of the data
"""
tot = len(carray)
ftot = float(tot)
yl = find_num_of_elements(carray, limits[0], side=0)
yu = find_num_of_elements(carray, limits[1], side=1)
rl = find_num_of_elements(carray, limits[2], side=0)
ru = find_num_of_elements(carray, limits[3], side=1)
yl -= rl
yu -= ru
#
#--- compute the violation is how many percent of the data
#
fdiv = ftot /100
yl = yl/fdiv
yu = yu/fdiv
rl = rl/fdiv
ru = ru/fdiv
return [yl, yu, rl, ru, tot]
#-------------------------------------------------------------------------------------------
#-- find_num_of_elements: find the numbers of elements above or lower than limit
#-------------------------------------------------------------------------------------------
def find_num_of_elements(carray, lim, side=0):
"""
find the numbers of elements above or lower than limit comparing to the total data #
input: carray --- numpy array of the data
lim --- the limit value
side --- lower:0 or upper:1 limit
output: cnt --- the numbers of the values beyond the limit
"""
#
#--- assume that the huge limit value means that there is no limit
#
if abs(lim) > 1e6:
return 0
if side == 0:
out = numpy.where(carray < lim)
else:
out = numpy.where(carray > lim)
try:
cnt = len(out[0])
except:
cnt = 0
return cnt
#--------------------------------------------------------------------------------
#-- get_mta_fits_data: fetch data from mta local database --
#--------------------------------------------------------------------------------
def get_mta_fits_data(msid, group, start, stop):
"""
fetch data from mta local database
input: msid --- msid
start --- start time in seconds from 1998.1.1
stop --- stop time in seconds from 1998.1.1
output: time --- time in second from 1998.1.1 for the given period
vals --- vals of msid for the given period
"""
#
#--- find a parent group name
#
pgroup = 'Comp_save/'
mc1 = re.search('Deahk', group)
mc2 = re.search('Grad', group)
if mc1 is not None:
pgroup = 'Deahk_save/'
elif mc2 is not None:
pgroup = 'Grad_save/'
#
#--- find which year(s) the requested period falls
#
date = Chandra.Time.DateTime(start)
byear = int(float(date.year))
date = Chandra.Time.DateTime(stop)
eyear = int(float(date.year))
chk = 0
for year in range(byear, eyear+1):
fits = deposit_dir + pgroup + group + '/' + msid + '_full_data_' + str(year) + '.fits'
if not os.path.isfile(fits):
fits = deposit_dir + pgroup + group + '/' + msid + '_full_data_' + str(year) + '.fits.gz'
if not os.path.isfile(fits):
continue
#
#--- extract the data for the given period
#
f = pyfits.open(fits)
data = f[1].data
f.close()
if chk == 0:
time = data['time']
vals = data[msid]
ext = [(time > start) & (time < stop)]
time = time[ext]
vals = vals[ext]
chk = 1
else:
tmp1 = data['time']
tmp2 = data[msid]
ext = [(tmp1 > start) & (tmp1 < stop)]
tmp1 = tmp1[ext]
tmp2 = tmp2[ext]
time = numpy.append(time, tmp1)
vals = numpy.append(vals, tmp2)
if chk > 0:
return [time, vals]
else:
#
#--- if no data, return False
#
return False
#------------------------------------------------------------------------------------
#-- shorten_digit: clean up the value so that it show only a few digits --
#------------------------------------------------------------------------------------
def shorten_digit(alist):
"""
clean up the value so that it show only a few digits
input: alist --- a list of data
output: olist --- a list of data cleaned
"""
olist = []
for ent in alist:
out = '%2.3e' % ent
val = float(out)
if val > 1000 or val < 0.001:
val = out
else:
val = str(val)
olist.append(val)
return olist
#------------------------------------------------------------------------------------
#-- set_warning_area: create warning area for plotting --
#------------------------------------------------------------------------------------
def set_warning_area(msid, xmin, xmax, ymin, ymax, byear):
"""
create warning area for plotting
input: msid --- msid
xmin --- min x
xmax --- max x
ymin --- min y
ymax --- max y
byear --- the base year
output: t_save --- a list of starting and stopping times in ydate
bt_lim --- a list of bottom; usually 0, but can be ymin
lr_lim --- a list of lower red limit
ly_lim --- a list of lower yellow limit
uy_lim --- a list of upper yellow limit
ur_lim --- a list of upper red limit
tp_lim --- a list of top: usually 9e10, but can be ymax
"""
msid = msid.lower()
[limit_dict, cnd_dict] = rlt.get_limit_table()
bval = 0.
if bval > ymin:
bval = ymin
tval = 9e9
if tval < ymax:
tval = ymax
try:
out = limit_dict[msid]
cnd_msid = cnd_dict[msid]
t_save = []
bt_lim = []
lr_lim = []
ly_lim = []
uy_lim = []
ur_lim = []
tp_lim = []
chk = 0
dlen = len(out)
for ent in out:
try:
lim_list = ent[3]['none']
except:
continue
#
x1 = chandratime_to_yday(ent[0], byear)
x2 = chandratime_to_yday(ent[1] -1.0, byear)
if x2 < xmin:
continue
if x1 < xmin:
x1 = xmin
if x1 < xmax and x2 >= xmax:
x2 = xmax
chk = 1
t_save.append(x1)
t_save.append(x2)
for k in range(0, 2):
bt_lim.append(bval)
ly_lim.append(lim_list[0])
uy_lim.append(lim_list[1])
lr_lim.append(lim_list[2])
ur_lim.append(lim_list[3])
tp_lim.append(tval)
if chk == 1:
break
except:
t_save = [xmin, xmax]
bt_lim = [-9e10, -9e10]
lr_lim = [-9e10, -9e10]
ly_lim = [-9e10, -9e10]
uy_lim = [9e10, 9e10]
ur_lim = [9e10, 9e10]
tp_lim = [9e10, 9e10]
return [t_save, bt_lim, lr_lim, ly_lim, uy_lim, ur_lim, tp_lim]
#------------------------------------------------------------------------------------
#-- make_glim: create limit list in glim format --
#------------------------------------------------------------------------------------
def make_glim(msid):
"""
create limit list in glim format
input: msid --- msid
output: glim --- a list of list of [<start time> <stop time> <lower yellow> <upper yellow>
<lower red> <upper red>]
time is in seconds from 1998.1.1
"""
msid = msid.lower()
[limit_dict, cnd_dict] = rlt.get_limit_table()
out = limit_dict[msid]
glim = []
for ent in out:
try:
lim_list = ent[3]['none']
except:
continue
temp = [ent[0], ent[1], lim_list[0], lim_list[1], lim_list[2], lim_list[3]]
glim.append(temp)
if len(glim) == 0:
glim = [[0, 3218831995, -9e6, 9e6, -9e6, 9e6]]
return glim
#------------------------------------------------------------------------------------
#-- chandratime_to_yday: convert chandra time into ydate from the 001 day of byear -
#------------------------------------------------------------------------------------
def chandratime_to_yday(ctime, byear):
"""
convert chandra time into ydate from the 001 day of byear
input: ctime --- chandra time; seconds from 1998.1.1
byear --- the base year
output: ydate --- ydate from 001 day of byear
"""
out = Chandra.Time.DateTime(ctime).date
atemp = re.split(':', out)
year = int(float(atemp[0]))
ydate = float(atemp[1])
hh = float(atemp[2])
mm = float(atemp[3])
ss = float(atemp[4])
ydate+= hh /24.0 + mm/1440.0 + ss /86400.0
if year < byear:
for tyear in range(year, byear):
if mcf.is_leapyear(tyear):
base = 366
else:
base = 365
ydate -= base
elif year > byear:
for tyear in range(byear, year):
if mcf.is_leapyear(tyear):
base = 366
else:
base = 365
ydate += base
return ydate
#------------------------------------------------------------------------------------
if __name__ == "__main__":
# msid = '1cbat'
# group = 'Acistemp'
# tstart = '2019:001:00:00:00'
# tstop = '2019:002:00:00:00'
# step = 300.0
# create_interactive_page(msid, group, tstart, tstop, step)
# msid = 'hstrtgrd1'
# group = 'Gradhstrut'
# tstart = '2019:001:00:00:00'
# tstop = '2019:002:00:00:00'
# step = 300.0
# create_interactive_page(msid, group, tstart, tstop, step)
if len(sys.argv) == 5:
msid = sys.argv[1]
group = sys.argv[2]
tstart = sys.argv[3]
tstop = sys.argv[4]
mtype = 'mid'
step = 300.0
create_interactive_page(msid, group, mtype, tstart, tstop, step)
elif len(sys.argv) == 6:
msid = sys.argv[1]
group = sys.argv[2]
mtype = sys.argv[3]
tstart = sys.argv[4]
tstop = sys.argv[5]
step = int(float(sys.argv[5]))
create_interactive_page(msid, group, mtype, tstart, tstop, step)
elif len(sys.argv) == 7:
msid = sys.argv[1]
group = sys.argv[2]
mtype = sys.argv[3]
tstart = sys.argv[4]
tstop = sys.argv[5]
step = int(float(sys.argv[6]))
create_interactive_page(msid, group, mtype, tstart, tstop, step)
else:
print("Usage: create_interactive_page.py <msid> <group> <mtype> <start> <stop> <bin size> ")
|
from itertools import permutations
print list(permutations([0,1,2,3,4,5,6,7,8,9]))[999999]
|
import random
import my_module
print("Module Implementation:")
print("My name is:", my_module.name)
print(my_module.age)
print("Print a random whole number between 1 and 10 (inclusive):")
# randint generates a random number in a given range (inclusive of the lower and upper limit)
random_integer = random.randint(1, 10)
print("Random integer:", random_integer)
print("Print a random floating point number:")
# Remember this is always [0.0, 1.0). It means, 0.0 is included, but 1.0 is not.
# The random values can go upto 0.9999 but never reach 0.1.
random_float = random.random()
print("Random floating point number:", random_float)
# random floating point value between 0 and 5
random_float5 = random.random() * 5
print("Random floating point number between 0 and 5:", random_float5)
# love calculator
love_score = round(random.random() * 100, 2)
print("The love score of you with your partner is:", love_score)
|
import re
import os
def is_loc(line):
raise NotImplementedError()
def loc_in_file(filename):
raise NotImplementedError()
def loc_in_directory():
raise NotImplementedError()
|
from collections import Counter
def sherlockValidSting(s):
freq = Counter(s)
# same frequency
if len(set(freq.values()) == 1):
return 'Yes'
# more than 2 unique frequencies
elif len(freq.values() > 2):
return 'No'
# two unit freq
else:
for key in freq:
freq[key] -=1
temp = list(freq.values())
# remove zeros
try:
temp.remove(0)
except:
pass
if len(set(temp) == 1):
return 'Yes'
else:
freq[key] +=1
return 'No'
|
"""
作者:Wanghao
日期:2020年11月19日
"""
import matlab.engine
import numpy as np
from tkinter import *
root = Tk()
root.title("图像重建")
root.geometry("600x230")
eng1 = matlab.engine.start_matlab()
eng2 = matlab.engine.start_matlab()
eng3 = matlab.engine.start_matlab()
def Import():
a=np.loadtxt("training dataCSV1.csv", dtype=np.float, delimiter=',', unpack=False)
lab2.config(text=a.shape)
b= np.loadtxt("training dataCSV2.csv", dtype=np.float, delimiter=',', unpack=False)
lab4.config(text=b.shape)
c = np.loadtxt("test dataCSV1.csv", dtype=np.float, delimiter=',', unpack=False)
lab6.config(text=c.shape)
d = np.loadtxt('test dataCSV2.csv', dtype=np.float, delimiter=',', unpack=False)
lab8.config(text=d.shape)
def ENG1():
eng1.RBFnet(nargout=0)
input("press enter to continue...")
# while eng1.isvalid():
# pass
eng1.quit()
def ENG2():
eng2.predict(nargout=0)
input("press enter to continue...")
eng2.quit()
def ENG3():
eng3.code02(nargout=0)
input("press enter to continue...")
eng3.quit()
btn1 = Button(root, text="导入数据", font=('Arial', 12), width=15,
height=2, command=Import)
btn1.place(x=10, y=10)
lab1 = Label(root,text="样本训练数据:", font=('Arial', 12), width=15,height=2)
lab1.place(x=200,y=10)
lab2 = Label(root,relief="groove", font=('Arial', 12), width=15,height=2)
lab2.place(x=350,y=10)
lab3 = Label(root,text="样本训练标签:", font=('Arial', 12), width=15,height=2)
lab3.place(x=200,y=60)
lab4 = Label(root,relief="groove", font=('Arial', 12), width=15,height=2)
lab4.place(x=350,y=60)
lab5 = Label(root,text="样本测试数据:", font=('Arial', 12), width=15,height=2)
lab5.place(x=200,y=110)
lab6 = Label(root,relief="groove", font=('Arial', 12), width=15,height=2)
lab6.place(x=350,y=110)
lab7= Label(root,text="样本测试标签:", font=('Arial', 12), width=15,height=2)
lab7.place(x=200,y=160)
lab8 = Label(root,relief="groove", font=('Arial', 12), width=15,height=2)
lab8.place(x=350,y=160)
btn2 = Button(root, text="训练模型", font=('Arial', 12), width=15,
height=2, command=ENG1)
btn2.place(x=10, y=60)
btn3 = Button(root, text="测试模型", font=('Arial', 12), width=15,
height=2, command=ENG2)
btn3.place(x=10, y=110)
btn3 = Button(root, text="图像重建", font=('Arial', 12), width=15,
height=2, command=ENG3)
btn3.place(x=10, y=160)
root.mainloop()
|
import tek.test
tek.test.setup(__file__)
|
from random import shuffle
from scratch.linear_algebra import sum_of_squares
from s1 import quantile
import math
num_friends = list(range(101))
shuffle(num_friends)
def mean(xs: List[float]) -> float:
return sum(xs)/len(xs)
def data_range(xs: List[float]) -> float:
return max(xs) - min(xs)
def de_mean(xs: List[float]) -> float:
x_bar = mean(xs)
return [x - x_bar for x in xs]
def variance(xs: List[float]) -> float:
assert len(xs) >= 2
n = len(xs)
deviations = de_mean(xs)
return sum_of_squares(deviations)/(n-1)
def standard_deviation(xs: List[float]) -> float:
return(math.sqrt(variance(xs)))
def interquartile_range(xs: List[float]) -> float:
return quantile(xs, 0.75) - quantile(xs, 0.25)
|
import numpy as np
import nibabel as nib
# Define Paths
confounds_path = '/mnt/project1/home1/varunk/fMRI/Autism-Connectome-Analysis/confounds/calc_residuals/'
mean_csf = confounds_path + 'mean_csf.txt'
mean_wm = confounds_path + 'mean_wm.txt'
mean_global = confounds_path + 'mean_global.txt'
residual_file = confounds_path + 'sub-0050009_task-rest_run-1_bold_roi_st_mcf.nii_brain.nii.gz_residual.nii.gz'
# Read The residual Brain using nib
residual_brain = nib.load(residual_file).get_data()
# Create a brain mask
global_mask = (residual_brain != 0).sum(-1) != 0
residual_brain_ndarray = residual_brain[global_mask] # Voxels x Time
# Read the csf, WM and Global mean using np
mean_csf_array = np.genfromtxt(mean_csf)
mean_wm_array = np.genfromtxt(mean_wm)
mean_global_array = np.genfromtxt(mean_global)
# Randomly sample 100 voxel time series by randomly selecting a list x from [0, brain.shape[0] - 1] and similarly y and z
sample_size = 20
voxel_list = np.random.choice(np.arange(residual_brain_ndarray.shape[0]),sample_size,replace=False)
# For these 100 voxels, calculate the dot product of the series and mean signal one by one
sum = 0
for voxel_idx in voxel_list:
dot_prod = np.dot(residual_brain_ndarray[voxel_idx,:],mean_csf_array)
sum = sum + dot_prod
print('Dot_prod: ',dot_prod)
print('Sum of Dot_prod ',sum)
# CSF does not sum to be zero. Coz the orthogonalization was done using the all the confounds. THINK!
|
import fcntl, termios, struct
def get_console_size():
h, w, hp, wp = struct.unpack("HHHH",
fcntl.ioctl(0, termios.TIOCGWINSZ,
struct.pack("HHHH", 0, 0, 0, 0)
)
)
return {"height": h, "width": w}
|
limits, lines = [], []
with open('/var/lib/dpkg/status') as fp:
lines = fp.read().splitlines()
begin, end = 0, 0
for line in lines:
if len(line.rstrip()) == 0:
limits.append((begin, end))
begin = end + 1
end += 1
# uqnique dependencies
unique_dependencies = set(())
# associative arrays -> package: [dependencies]
package_dependencies = {}
# generate --> unique_dependencies & package_dependencies
for limit in limits:
begin, end = limit
name, depends = '', ''
for line in lines[begin: end]:
# name
query_param = 'Package: '
if line.startswith(query_param):
name = line[len(query_param):].strip()
# dependencies
if 'Depends: ' in line:
query_param = 'Pre-Depends: '
if query_param in line:
depends += line[len(query_param):].strip()
query_param = 'Depends: '
if query_param in line:
depends += line[len(query_param):].strip()
unique_dependencies.add(name)
dpns = depends.split(',')
package_dependencies[name] = []
for dpn in dpns:
dp = dpn[0:dpn.find('(')].strip()
if len(dp) > 0 and not dp in package_dependencies[name]:
package_dependencies[name].append(dp)
# reverse dependencies
reverse_dependencies = {}
# p: [d]
for ud in unique_dependencies:
reverse_dependencies[ud] = []
for p,d in package_dependencies.items():
if ud in d:
reverse_dependencies[ud].append(p)
#print(len(reverse_dependencies))
html = '<table border="1px">'
html += '<tr><th>Package</th><th>Description</th><th>Dependencies</th><th>Reverse Dependencies</th></tr>'
for limit in limits:
begin, end = limit
html += '<tr border="1px">'
name, description, depends, links, rd_links = '', '', '', '', ''
for line in lines[begin: end]:
# name
query_param = 'Package: '
if line.startswith(query_param):
name = line[len(query_param):].strip()
# description
query_param = 'Description: '
if line.startswith(query_param) or line.startswith(' ') and not line.startswith(' /'):
if line.startswith(query_param):
description += line[len(query_param):].strip()
else:
description += line[0:].strip()
# dependencies
if 'Depends: ' in line:
query_param = 'Pre-Depends: '
if query_param in line:
depends += line[len(query_param):].strip()
query_param = 'Depends: '
if query_param in line:
depends += line[len(query_param):].strip()
html += '<td id="{}" class="package">{}</td>'.format(name, name)
html += '<td class="description">{description}</td>'.format(description=description)
dpns = depends.split(',')
package_dependencies[name] = []
for dpn in dpns:
dp = dpn[0:dpn.find('(')].strip()
pipe = dp.split(' | ')
for dp in pipe:
if dp in unique_dependencies:
dp = dp
else:
dp = pipe[0]
links += '<a href="#{dp}">{dp}</a>, '.format(dp= dp.strip())
if len(links) > 0:
html += '<td class="dependencies">{}</td>'.format(links[:-2])
else:
html += '<td class="dependencies"></td>'
for rd in reverse_dependencies[name]:
rd_links += '<a href="#{rd}">{rd}</a>, '.format(rd= rd.strip())
if len(links) > 0:
html += '<td class="reverse_dependencies">{}</td>'.format(rd_links[:-2])
else:
html += '<td class="reverse_dependencies"></td>'
html += '</tr>'
html += '</table>'
print(html) |
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from pathlib import PurePath
from textwrap import dedent
import pytest
from packaging.utils import canonicalize_name as canonicalize_project_name
from pants.backend.codegen.protobuf.python import python_protobuf_module_mapper
from pants.backend.codegen.protobuf.python.additional_fields import (
rules as protobuf_additional_fields_rules,
)
from pants.backend.codegen.protobuf.target_types import ProtobufSourcesGeneratorTarget
from pants.backend.codegen.protobuf.target_types import rules as protobuf_target_type_rules
from pants.backend.python import target_types_rules
from pants.backend.python.dependency_inference.default_module_mapping import (
DEFAULT_MODULE_MAPPING,
DEFAULT_TYPE_STUB_MODULE_MAPPING,
)
from pants.backend.python.dependency_inference.module_mapper import (
FirstPartyPythonModuleMapping,
ModuleProvider,
ModuleProviderType,
PossibleModuleProvider,
PythonModuleOwners,
PythonModuleOwnersRequest,
ThirdPartyPythonModuleMapping,
module_from_stripped_path,
)
from pants.backend.python.dependency_inference.module_mapper import rules as module_mapper_rules
from pants.backend.python.target_types import (
PythonRequirementTarget,
PythonSourcesGeneratorTarget,
PythonSourceTarget,
)
from pants.core.util_rules import stripped_source_files
from pants.engine.addresses import Address
from pants.testutil.rule_runner import QueryRule, RuleRunner
from pants.util.frozendict import FrozenDict
def test_default_module_mapping_is_normalized() -> None:
for k in DEFAULT_MODULE_MAPPING:
assert k == canonicalize_project_name(
k
), "Please update `DEFAULT_MODULE_MAPPING` to use canonical project names"
for k in DEFAULT_TYPE_STUB_MODULE_MAPPING:
assert k == canonicalize_project_name(
k
), "Please update `DEFAULT_TYPE_STUB_MODULE_MAPPING` to use canonical project names"
def test_default_module_mapping_uses_tuples() -> None:
for modules in DEFAULT_MODULE_MAPPING.values():
assert isinstance(modules, tuple)
assert len(modules) > 0
@pytest.mark.parametrize(
"stripped_path,expected",
[
("top_level.py", "top_level"),
("top_level.pyi", "top_level"),
("dir/subdir/__init__.py", "dir.subdir"),
("dir/subdir/__init__.pyi", "dir.subdir"),
("dir/subdir/app.py", "dir.subdir.app"),
("src/python/project/not_stripped.py", "src.python.project.not_stripped"),
],
)
def test_module_from_stripped_path(stripped_path: str, expected: str) -> None:
assert module_from_stripped_path(PurePath(stripped_path)) == expected
def test_first_party_modules_mapping() -> None:
root_provider = ModuleProvider(
Address("", relative_file_path="root.py"), ModuleProviderType.IMPL
)
util_provider = ModuleProvider(
Address("src/python/util", relative_file_path="strutil.py"), ModuleProviderType.IMPL
)
util_stubs_provider = ModuleProvider(
Address("src/python/util", relative_file_path="strutil.pyi"), ModuleProviderType.TYPE_STUB
)
test_provider = ModuleProvider(
Address("tests/python/project_test", relative_file_path="test.py"), ModuleProviderType.IMPL
)
mapping = FirstPartyPythonModuleMapping(
FrozenDict(
{
"default": FrozenDict(
{
"root": (root_provider,),
"util.strutil": (util_provider, util_stubs_provider),
"project_test.test": (test_provider,),
"ambiguous": (root_provider, util_provider),
"util.ambiguous": (util_provider, test_provider),
"two_resolves": (root_provider,),
}
),
"another": FrozenDict({"two_resolves": (test_provider,)}),
}
)
)
def assert_addresses(
mod: str, expected: tuple[PossibleModuleProvider, ...], *, resolve: str | None = None
) -> None:
assert mapping.providers_for_module(mod, resolve=resolve) == expected
root_provider0 = PossibleModuleProvider(root_provider, 0)
root_provider1 = PossibleModuleProvider(root_provider, 1)
util_provider0 = PossibleModuleProvider(util_provider, 0)
util_provider1 = PossibleModuleProvider(util_provider, 1)
util_stubs_provider0 = PossibleModuleProvider(util_stubs_provider, 0)
util_stubs_provider1 = PossibleModuleProvider(util_stubs_provider, 1)
test_provider0 = PossibleModuleProvider(test_provider, 0)
test_provider1 = PossibleModuleProvider(test_provider, 1)
assert_addresses("root", (root_provider0,))
assert_addresses("root.func", (root_provider1,))
assert_addresses("root.submodule.func", ())
assert_addresses("util.strutil", (util_provider0, util_stubs_provider0))
assert_addresses("util.strutil.ensure_text", (util_provider1, util_stubs_provider1))
assert_addresses("util", ())
assert_addresses("project_test.test", (test_provider0,))
assert_addresses("project_test.test.TestDemo", (test_provider1,))
assert_addresses("project_test", ())
assert_addresses("project.test", ())
assert_addresses("ambiguous", (root_provider0, util_provider0))
assert_addresses("ambiguous.func", (root_provider1, util_provider1))
assert_addresses("ambiguous.submodule.func", ())
assert_addresses("util.ambiguous", (util_provider0, test_provider0))
assert_addresses("util.ambiguous.Foo", (util_provider1, test_provider1))
assert_addresses("util.ambiguous.Foo.method", ())
assert_addresses("two_resolves", (root_provider0, test_provider0), resolve=None)
assert_addresses("two_resolves.foo", (root_provider1, test_provider1), resolve=None)
assert_addresses("two_resolves.foo.bar", (), resolve=None)
assert_addresses("two_resolves", (root_provider0,), resolve="default")
assert_addresses("two_resolves", (test_provider0,), resolve="another")
def test_third_party_modules_mapping() -> None:
colors_provider = ModuleProvider(Address("", target_name="ansicolors"), ModuleProviderType.IMPL)
colors_stubs_provider = ModuleProvider(
Address("", target_name="types-ansicolors"), ModuleProviderType.TYPE_STUB
)
pants_provider = ModuleProvider(Address("", target_name="pantsbuild"), ModuleProviderType.IMPL)
pants_testutil_provider = ModuleProvider(
Address("", target_name="pantsbuild.testutil"), ModuleProviderType.IMPL
)
submodule_provider = ModuleProvider(
Address("", target_name="submodule"), ModuleProviderType.IMPL
)
mapping = ThirdPartyPythonModuleMapping(
FrozenDict(
{
"default-resolve": FrozenDict(
{
"colors": (colors_provider, colors_stubs_provider),
"pants": (pants_provider,),
"req.submodule": (submodule_provider,),
"pants.testutil": (pants_testutil_provider,),
"two_resolves": (colors_provider,),
}
),
"another-resolve": FrozenDict({"two_resolves": (pants_provider,)}),
}
)
)
def assert_addresses(
mod: str, expected: tuple[PossibleModuleProvider, ...], *, resolve: str | None = None
) -> None:
assert mapping.providers_for_module(mod, resolve) == expected
colors_provider0 = PossibleModuleProvider(colors_provider, 0)
colors_provider1 = PossibleModuleProvider(colors_provider, 1)
colors_provider2 = PossibleModuleProvider(colors_provider, 2)
colors_stubs_provider0 = PossibleModuleProvider(colors_stubs_provider, 0)
colors_stubs_provider1 = PossibleModuleProvider(colors_stubs_provider, 1)
pants_provider0 = PossibleModuleProvider(pants_provider, 0)
pants_provider1 = PossibleModuleProvider(pants_provider, 1)
pants_provider2 = PossibleModuleProvider(pants_provider, 2)
pants_provider3 = PossibleModuleProvider(pants_provider, 3)
pants_testutil_provider0 = PossibleModuleProvider(pants_testutil_provider, 0)
pants_testutil_provider1 = PossibleModuleProvider(pants_testutil_provider, 1)
submodule_provider0 = PossibleModuleProvider(submodule_provider, 0)
submodule_provider1 = PossibleModuleProvider(submodule_provider, 1)
assert_addresses("colors", (colors_provider0, colors_stubs_provider0))
assert_addresses("colors.red", (colors_provider1, colors_stubs_provider1))
assert_addresses("pants", (pants_provider0,))
assert_addresses("pants.task", (pants_provider1,))
assert_addresses("pants.task.task", (pants_provider2,))
assert_addresses("pants.task.task.Task", (pants_provider3,))
assert_addresses("pants.testutil", (pants_testutil_provider0,))
assert_addresses("pants.testutil.foo", (pants_testutil_provider1,))
assert_addresses("req.submodule", (submodule_provider0,))
assert_addresses("req.submodule.foo", (submodule_provider1,))
assert_addresses("req.another", ())
assert_addresses("req", ())
assert_addresses("unknown", ())
assert_addresses("unknown.pants", ())
assert_addresses("two_resolves", (colors_provider0, pants_provider0), resolve=None)
assert_addresses("two_resolves.foo", (colors_provider1, pants_provider1), resolve=None)
assert_addresses("two_resolves.foo.bar", (colors_provider2, pants_provider2), resolve=None)
assert_addresses("two_resolves", (colors_provider0,), resolve="default-resolve")
assert_addresses("two_resolves", (pants_provider0,), resolve="another-resolve")
@pytest.fixture
def rule_runner() -> RuleRunner:
return RuleRunner(
rules=[
*stripped_source_files.rules(),
*module_mapper_rules(),
*python_protobuf_module_mapper.rules(),
*target_types_rules.rules(),
*protobuf_additional_fields_rules(),
*protobuf_target_type_rules(),
QueryRule(FirstPartyPythonModuleMapping, []),
QueryRule(ThirdPartyPythonModuleMapping, []),
QueryRule(PythonModuleOwners, [PythonModuleOwnersRequest]),
],
target_types=[
PythonSourceTarget,
PythonSourcesGeneratorTarget,
PythonRequirementTarget,
ProtobufSourcesGeneratorTarget,
],
)
def test_map_first_party_modules_to_addresses(rule_runner: RuleRunner) -> None:
rule_runner.set_options(
[
"--source-root-patterns=['src/python', 'tests/python', 'build-support']",
"--python-enable-resolves",
"--python-resolves={'python-default': '', 'another-resolve': ''}",
]
)
rule_runner.write_files(
{
"src/python/project/util/dirutil.py": "",
"src/python/project/util/tarutil.py": "",
"src/python/project/util/BUILD": "python_sources(resolve='another-resolve')",
# A module with multiple owners, including type stubs.
"src/python/multiple_owners.py": "",
"src/python/multiple_owners.pyi": "",
"src/python/BUILD": "python_sources()",
"build-support/multiple_owners.py": "",
"build-support/BUILD": "python_sources()",
# A package module.
"tests/python/project_test/demo_test/__init__.py": "",
"tests/python/project_test/demo_test/BUILD": "python_sources()",
# Check that plugin mappings work. Note that we duplicate one of the files with a normal
# python_source.
"src/python/protos/f1.proto": "",
"src/python/protos/f2.proto": "",
"src/python/protos/f2_pb2.py": "",
"src/python/protos/BUILD": dedent(
"""\
protobuf_sources(name='protos')
python_source(name='py', source="f2_pb2.py")
"""
),
}
)
result = rule_runner.request(FirstPartyPythonModuleMapping, [])
assert result == FirstPartyPythonModuleMapping(
FrozenDict(
{
"another-resolve": FrozenDict(
{
"project.util.dirutil": (
ModuleProvider(
Address("src/python/project/util", relative_file_path="dirutil.py"),
ModuleProviderType.IMPL,
),
),
"project.util.tarutil": (
ModuleProvider(
Address("src/python/project/util", relative_file_path="tarutil.py"),
ModuleProviderType.IMPL,
),
),
}
),
"python-default": FrozenDict(
{
"multiple_owners": (
ModuleProvider(
Address("build-support", relative_file_path="multiple_owners.py"),
ModuleProviderType.IMPL,
),
ModuleProvider(
Address("src/python", relative_file_path="multiple_owners.py"),
ModuleProviderType.IMPL,
),
ModuleProvider(
Address("src/python", relative_file_path="multiple_owners.pyi"),
ModuleProviderType.TYPE_STUB,
),
),
"project_test.demo_test": (
ModuleProvider(
Address(
"tests/python/project_test/demo_test",
relative_file_path="__init__.py",
),
ModuleProviderType.IMPL,
),
),
"protos.f1_pb2": (
ModuleProvider(
Address(
"src/python/protos",
relative_file_path="f1.proto",
target_name="protos",
),
ModuleProviderType.IMPL,
),
),
"protos.f2_pb2": (
ModuleProvider(
Address("src/python/protos", target_name="py"),
ModuleProviderType.IMPL,
),
ModuleProvider(
Address(
"src/python/protos",
relative_file_path="f2.proto",
target_name="protos",
),
ModuleProviderType.IMPL,
),
),
}
),
}
)
)
def test_map_third_party_modules_to_addresses(rule_runner: RuleRunner) -> None:
def req(
tgt_name: str,
req_str: str,
*,
modules: list[str] | None = None,
stub_modules: list[str] | None = None,
resolve: str = "default",
) -> str:
return dedent(
f"""\
python_requirement(name='{tgt_name}', requirements=['{req_str}'],
modules={modules or []},
type_stub_modules={stub_modules or []},
resolve={repr(resolve)})
"""
)
build_file = "\n\n".join(
[
req("req1", "req1==1.2"),
req("un_normalized", "Un-Normalized-Project>3"),
req("file_dist", "file_dist@ file:///path/to/dist.whl"),
req("vcs_dist", "vcs_dist@ git+https://github.com/vcs/dist.git"),
req("modules", "foo==1", modules=["mapped_module"]),
# We extract the module from type stub dependencies.
req("typed-dep1", "typed-dep1-types"),
req("typed-dep2", "types-typed-dep2"),
req("typed-dep3", "typed-dep3-stubs"),
req("typed-dep4", "stubs-typed-dep4"),
req("typed-dep5", "typed-dep5-foo", stub_modules=["typed_dep5"]),
# A 3rd-party dependency can have both a type stub and implementation.
req("multiple_owners1", "multiple_owners==1"),
req("multiple_owners2", "multiple_owners==2", resolve="another"),
req("multiple_owners_types", "types-multiple_owners==1", resolve="another"),
# Only assume it's a type stubs dep if we are certain it's not an implementation.
req("looks_like_stubs", "looks-like-stubs-types", modules=["looks_like_stubs"]),
]
)
rule_runner.write_files({"BUILD": build_file})
rule_runner.set_options(
["--python-resolves={'default': '', 'another': ''}", "--python-enable-resolves"]
)
result = rule_runner.request(ThirdPartyPythonModuleMapping, [])
assert result == ThirdPartyPythonModuleMapping(
FrozenDict(
{
"another": FrozenDict(
{
"multiple_owners": (
ModuleProvider(
Address("", target_name="multiple_owners2"), ModuleProviderType.IMPL
),
ModuleProvider(
Address("", target_name="multiple_owners_types"),
ModuleProviderType.TYPE_STUB,
),
),
}
),
"default": FrozenDict(
{
"file_dist": (
ModuleProvider(
Address("", target_name="file_dist"), ModuleProviderType.IMPL
),
),
"looks_like_stubs": (
ModuleProvider(
Address("", target_name="looks_like_stubs"), ModuleProviderType.IMPL
),
),
"mapped_module": (
ModuleProvider(
Address("", target_name="modules"), ModuleProviderType.IMPL
),
),
"multiple_owners": (
ModuleProvider(
Address("", target_name="multiple_owners1"), ModuleProviderType.IMPL
),
),
"req1": (
ModuleProvider(
Address("", target_name="req1"), ModuleProviderType.IMPL
),
),
"typed_dep1": (
ModuleProvider(
Address("", target_name="typed-dep1"), ModuleProviderType.TYPE_STUB
),
),
"typed_dep2": (
ModuleProvider(
Address("", target_name="typed-dep2"), ModuleProviderType.TYPE_STUB
),
),
"typed_dep3": (
ModuleProvider(
Address("", target_name="typed-dep3"), ModuleProviderType.TYPE_STUB
),
),
"typed_dep4": (
ModuleProvider(
Address("", target_name="typed-dep4"), ModuleProviderType.TYPE_STUB
),
),
"typed_dep5": (
ModuleProvider(
Address("", target_name="typed-dep5"), ModuleProviderType.TYPE_STUB
),
),
"un_normalized_project": (
ModuleProvider(
Address("", target_name="un_normalized"), ModuleProviderType.IMPL
),
),
"vcs_dist": (
ModuleProvider(
Address("", target_name="vcs_dist"), ModuleProviderType.IMPL
),
),
}
),
}
)
)
def test_map_module_to_address(rule_runner: RuleRunner) -> None:
def assert_owners(
module: str, expected: list[Address], expected_ambiguous: list[Address] | None = None
) -> None:
owners = rule_runner.request(
PythonModuleOwners, [PythonModuleOwnersRequest(module, resolve="python-default")]
)
assert list(owners.unambiguous) == expected
assert list(owners.ambiguous) == (expected_ambiguous or [])
from_import_owners = rule_runner.request(
PythonModuleOwners,
[PythonModuleOwnersRequest(f"{module}.Class", resolve="python-default")],
)
assert list(from_import_owners.unambiguous) == expected
assert list(from_import_owners.ambiguous) == (expected_ambiguous or [])
rule_runner.set_options(["--source-root-patterns=['root', '/']", "--python-enable-resolves"])
rule_runner.write_files(
{
# A root-level module.
"script.py": "",
"BUILD": dedent(
"""\
python_source(name="script", source="script.py")
python_requirement(name="valid_dep", requirements=["valid_dep"])
# Dependency with a type stub.
python_requirement(name="dep_w_stub", requirements=["dep_w_stub"])
python_requirement(name="dep_w_stub-types", requirements=["dep_w_stub-types"])
"""
),
# Normal first-party module.
"root/no_stub/app.py": "",
"root/no_stub/BUILD": "python_sources()",
# First-party module with type stub.
"root/stub/app.py": "",
"root/stub/app.pyi": "",
"root/stub/BUILD": "python_sources()",
# Package path.
"root/package/subdir/__init__.py": "",
"root/package/subdir/BUILD": "python_sources()",
# Third-party requirement with first-party type stub.
"root/dep_with_stub.pyi": "",
"root/BUILD": dedent(
"""\
python_sources()
python_requirement(name="dep", requirements=["dep_with_stub"])
"""
),
# Namespace package split between first- and third-party, disambiguated by ancestry level.
"root/namespace/__init__.py": "",
"root/namespace/BUILD": dedent(
"""\
python_requirement(name="thirdparty", requirements=["namespace.thirdparty"])
python_source(name="init", source="__init__.py")
"""
),
# Ambiguity.
"root/ambiguous/f1.py": "",
"root/ambiguous/f2.py": "",
"root/ambiguous/f3.py": "",
"root/ambiguous/f4.pyi": "",
"root/ambiguous/BUILD": dedent(
"""\
# Ambiguity purely within third-party deps.
python_requirement(name='thirdparty1', requirements=['ambiguous_3rdparty'])
python_requirement(name='thirdparty2', requirements=['ambiguous_3rdparty'])
# Ambiguity purely within first-party deps.
python_source(name="firstparty1", source="f1.py")
python_source(name="firstparty2", source="f1.py")
# Ambiguity within third-party, which should result in ambiguity for first-party
# too. These all share the module `ambiguous.f2`.
python_requirement(
name='thirdparty3', requirements=['bar'], modules=['ambiguous.f2']
)
python_requirement(
name='thirdparty4', requirements=['bar'], modules=['ambiguous.f2']
)
python_source(name="firstparty3", source="f2.py")
# Ambiguity within first-party, which should result in ambiguity for third-party
# too. These all share the module `ambiguous.f3`.
python_source(name="firstparty4", source="f3.py")
python_source(name="firstparty5", source="f3.py")
python_requirement(
name='thirdparty5', requirements=['baz'], modules=['ambiguous.f3']
)
# You can only write a first-party type stub for a third-party requirement if
# there are not third-party type stubs already.
python_requirement(
name='ambiguous-stub',
requirements=['ambiguous-stub'],
modules=["ambiguous.f4"],
)
python_requirement(
name='ambiguous-stub-types',
requirements=['ambiguous-stub-types'],
type_stub_modules=["ambiguous.f4"],
)
python_source(name='ambiguous-stub-1stparty', source='f4.pyi')
"""
),
}
)
assert_owners("pathlib", [])
assert_owners("typing", [])
assert_owners("valid_dep", [Address("", target_name="valid_dep")])
assert_owners(
"dep_w_stub",
[Address("", target_name="dep_w_stub"), Address("", target_name="dep_w_stub-types")],
)
assert_owners("script", [Address("", target_name="script")])
assert_owners("no_stub.app", expected=[Address("root/no_stub", relative_file_path="app.py")])
assert_owners(
"stub.app",
[
Address("root/stub", relative_file_path="app.py"),
Address("root/stub", relative_file_path="app.pyi"),
],
)
assert_owners(
"package.subdir", [Address("root/package/subdir", relative_file_path="__init__.py")]
)
assert_owners(
"dep_with_stub",
[
Address("root", target_name="dep"),
Address("root", relative_file_path="dep_with_stub.pyi"),
],
)
assert_owners("namespace.thirdparty", [Address("root/namespace", target_name="thirdparty")])
assert_owners(
"ambiguous_3rdparty",
[],
expected_ambiguous=[
Address("root/ambiguous", target_name="thirdparty1"),
Address("root/ambiguous", target_name="thirdparty2"),
],
)
assert_owners(
"ambiguous.f1",
[],
expected_ambiguous=[
Address("root/ambiguous", target_name="firstparty1"),
Address("root/ambiguous", target_name="firstparty2"),
],
)
assert_owners(
"ambiguous.f2",
[],
expected_ambiguous=[
Address("root/ambiguous", target_name="thirdparty3"),
Address("root/ambiguous", target_name="thirdparty4"),
Address("root/ambiguous", target_name="firstparty3"),
],
)
assert_owners(
"ambiguous.f3",
[],
expected_ambiguous=[
Address("root/ambiguous", target_name="thirdparty5"),
Address("root/ambiguous", target_name="firstparty4"),
Address("root/ambiguous", target_name="firstparty5"),
],
)
assert_owners(
"ambiguous.f4",
[],
expected_ambiguous=[
Address("root/ambiguous", target_name="ambiguous-stub"),
Address("root/ambiguous", target_name="ambiguous-stub-types"),
Address("root/ambiguous", target_name="ambiguous-stub-1stparty"),
],
)
def test_resolving_ambiguity_by_filesystem_proximity(rule_runner: RuleRunner) -> None:
rule_runner.set_options(
[
"--source-root-patterns=['root1', 'root2', 'root3']",
"--python-infer-ambiguity-resolution=by_source_root",
]
)
rule_runner.write_files(
{
"root1/aa/bb/BUILD": "python_sources()",
"root1/aa/bb/foo.py": "",
"root1/aa/cc/BUILD": "python_sources()",
"root1/aa/cc/bar.py": "from aa.bb import foo",
"root2/aa/bb/BUILD": "python_sources()",
"root2/aa/bb/foo.py": "",
"root2/aa/dd/baz.py": "from aa.bb import foo",
"root3/aa/ee/BUILD": "python_sources()",
"root3/aa/ee/foo.py": "from aa.bb import foo",
}
)
owners = rule_runner.request(
PythonModuleOwners, [PythonModuleOwnersRequest("aa.bb.foo", None, locality=None)]
)
assert list(owners.unambiguous) == []
assert list(owners.ambiguous) == [
Address("root1/aa/bb", relative_file_path="foo.py"),
Address("root2/aa/bb", relative_file_path="foo.py"),
]
owners = rule_runner.request(
PythonModuleOwners, [PythonModuleOwnersRequest("aa.bb.foo", None, locality="root1/")]
)
assert list(owners.unambiguous) == [Address("root1/aa/bb", relative_file_path="foo.py")]
assert list(owners.ambiguous) == []
owners = rule_runner.request(
PythonModuleOwners, [PythonModuleOwnersRequest("aa.bb.foo", None, locality="root2/")]
)
assert list(owners.unambiguous) == [Address("root2/aa/bb", relative_file_path="foo.py")]
assert list(owners.ambiguous) == []
owners = rule_runner.request(
PythonModuleOwners, [PythonModuleOwnersRequest("aa.bb.foo", None, locality="root3/")]
)
assert list(owners.unambiguous) == []
assert list(owners.ambiguous) == [
Address("root1/aa/bb", relative_file_path="foo.py"),
Address("root2/aa/bb", relative_file_path="foo.py"),
]
def test_map_module_considers_resolves(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"BUILD": dedent(
"""\
# Note that both `python_requirements` have the same `dep`, which would normally
# result in ambiguity.
python_requirement(
name="dep1",
resolve="a",
requirements=["dep"],
)
python_requirement(
name="dep2",
resolve="b",
requirements=["dep"],
)
"""
)
}
)
rule_runner.set_options(["--python-resolves={'a': '', 'b': ''}", "--python-enable-resolves"])
def get_owners(resolve: str | None) -> PythonModuleOwners:
return rule_runner.request(PythonModuleOwners, [PythonModuleOwnersRequest("dep", resolve)])
assert get_owners("a").unambiguous == (Address("", target_name="dep1"),)
assert get_owners("b").unambiguous == (Address("", target_name="dep2"),)
assert get_owners(None).ambiguous == (
Address("", target_name="dep1"),
Address("", target_name="dep2"),
)
def test_issue_15111(rule_runner: RuleRunner) -> None:
"""Ensure we can handle when a single address provides multiple modules.
This is currently only possible with third-party targets.
"""
rule_runner.write_files(
{"BUILD": "python_requirement(name='req', requirements=['docopt', 'types-docopt'])"}
)
rule_runner.set_options(["--python-enable-resolves"])
result = rule_runner.request(ThirdPartyPythonModuleMapping, [])
assert result == ThirdPartyPythonModuleMapping(
FrozenDict(
{
"python-default": FrozenDict(
{
"docopt": (
ModuleProvider(Address("", target_name="req"), ModuleProviderType.IMPL),
ModuleProvider(
Address("", target_name="req"), ModuleProviderType.TYPE_STUB
),
),
}
)
}
)
)
|
"""
================================
Digits Classification Exercise
================================
A tutorial exercise regarding the use of classification techniques on
the Digits dataset.
This exercise is used in the :ref:`clf_tut` part of the
:ref:`supervised_learning_tut` section of the
:ref:`stat_learn_tut_index`.
"""
print(__doc__)
from sklearn import datasets, neighbors, linear_model
import cv2
import numpy as np
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
img4=cv2.imread('2.jpg')
img4=(255-img4)
gray4 = cv2.cvtColor(img4,cv2.COLOR_BGR2GRAY)
cellsz= [np.hsplit(row,1) for row in np.vsplit(gray4,1)]
z = np.array(X_digits)
# test = z[0,0].reshape(-1,400).astype(np.dtype(np.double)) # Size = (2500,7)
# cv2.imshow('res',z[:,:])
n_samples = len(X_digits)
X_train = X_digits[:.9 * n_samples]
y_train = y_digits[:.9 * n_samples]
X_test = X_digits[.9 * n_samples:]
y_test = y_digits[.9 * n_samples:]
print X_train.shape
print y_train.shape
print X_test.shape
print y_test.shape
knn = neighbors.KNeighborsClassifier()
logistic = linear_model.LogisticRegression()
print('KNN score: %f' % knn.fit(X_train, y_train).score(X_test, y_test))
# print('LogisticRegression score: %f'
# % logistic.fit(X_train, y_train).score(X_test, y_test))
cv2.waitKey() |
from rest_framework import serializers
from .models import Event
class EventSerializer(serializers.ModelSerializer):
id = serializers.IntegerField(read_only=True)
title = serializers.CharField()
description = serializers.CharField()
start = serializers.CharField()
end = serializers.CharField()
class Meta:
model = Event
fields = ('id', 'title', 'description', 'start', 'end',)
|
from DPjudge import Power
class XtalballPower(Power):
# ----------------------------------------------------------------------
def __init__(self, game, name, type = None):
Power.__init__(self, game, name, type)
# ----------------------------------------------------------------------
def __repr__(self):
text = Power.__repr__(self).decode('latin-1')
for listName, orders in self.list.items():
if orders: text += '%s\n%s\n' % (listName, '\n'.join(orders))
return text.encode('latin-1')
# ----------------------------------------------------------------------
def reinit(self, includeFlags = 6):
Power.reinit(self, includeFlags)
# -----------------------------------
# Initialize the transient parameters
# -----------------------------------
if includeFlags & 5:
self.list, self.notes = {'SOONER': [], 'LATER': []}, {}
# ----------------------------------------------------------------------
def isEliminated(self, public = False, personal = False):
if not Power.isEliminated(self, public, personal): return False
if not (self.homes and self.game.phase == 'M' and
'GARRISON' in self.game.rules): return True
save = next = self.game.phase
while next not in 'AM':
self.game.phase = self.game.findNextPhase()
next = self.game.phase.split()[-1][0]
self.game.phase = save
return next != 'A'
# ----------------------------------------------------------------------
def movesSubmitted(self):
if self.name not in self.game.map.powers: return 1
if (not self.game.skip
and [x for x in self.game.powers if x.units and not x.list['SOONER']]):
return self.list['SOONER'] or not self.units
if self.game.skip: return self.list['LATER']
return self.list['LATER'] or not self.units
# ----------------------------------------------------------------------
|
from django.db import models
from django_countries.fields import CountryField
from phone_field import PhoneField
from django.conf import settings
gender_choices = (
('M', 'Male'),
('F', 'Female'),
('O', 'Other'),
)
class_choices = (
('6', 'Class 6'),
('7', 'Class 7'),
('8', 'Class 8'),
('9', 'Class 9'),
('10', 'Class 10'),
('11', 'Class 11'),
('12', 'Class 12'),
)
class_type_choices = (
('1-1', 'One-to-One Class'),
('1-n', 'One-to-Many Class'),
)
class languages(models.Model):
language = models.CharField(max_length = 50)
def __str__(self):
return self.language
class Meta:
verbose_name_plural = 'Languages Spoken'
class subjects(models.Model):
subject_name = models.CharField(max_length = 50)
sub_detail = models.CharField(max_length = 500, blank = True, null = True)
def __str__(self):
return self.subject_name
class Meta:
verbose_name_plural = 'Subjects'
class notifications_type(models.Model):
notifications_title = models.CharField(max_length = 20)
def __str__(self):
return self.notifications_title
class Meta:
verbose_name_plural = 'Notification Types'
class StudentProfile(models.Model):
student = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, default = 1)
name = models.CharField(max_length = 100)
dob = models.DateField(blank = True, null = True)
gender = models.CharField(max_length = 1, choices = gender_choices)
email = models.EmailField()
phone_number = PhoneField(blank = True, null = True)
skype_id = models.CharField(max_length = 15, blank = True, null = True)
profile_pic = models.ImageField(blank = True, null = True)
active = models.BooleanField(default=1)
student_class = models.CharField(max_length = 5, choices = class_choices)
school = models.CharField(max_length = 100, blank = True, null = True)
board = models.CharField(max_length = 50, blank = True, null = True)
notifications = models.ManyToManyField(notifications_type, blank = True)
def __str__(self):
return self.name
class Meta:
verbose_name_plural = 'Students'
#name, dob, gender email phone_number skype_id profile_pic acctive class school board
class TutorProfile(models.Model):
tutor = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, default = 1)
name = models.CharField(max_length = 100)
dob = models.DateField(blank = True, null = True)
gender = models.CharField(max_length = 1, choices = gender_choices)
email = models.EmailField()
phone_number = PhoneField()
skype_id = models.CharField(max_length = 15)
profile_pic = models.ImageField()
active = models.BooleanField(default=1)
identity_document = models.FileField()
curriculum_vitae = models.TextField()
about = models.TextField()
languages_spoken = models.ManyToManyField(languages)
def __str__(self):
return self.name
class Meta:
verbose_name_plural = 'Tutors'
class TimeSlots(models.Model):
start_time = models.CharField(max_length = 10)
end_time = models.CharField(max_length = 10)
def __str__(self):
slot = str(self.start_time) + ' - ' + str(self.end_time)
return slot
class Meta:
verbose_name_plural = 'Time Slots'
class listings(models.Model):
title = models.CharField(max_length = 100)
tutor = models.ForeignKey(TutorProfile, on_delete=models.CASCADE)
subject = models.ManyToManyField(subjects)
student_class = models.CharField(max_length = 2, choices = class_choices)
class_type = models.CharField(max_length = 5, choices = class_type_choices)
hourly_rate = models.IntegerField()
methodology = models.TextField(blank= True, null = True)
details = models.TextField(blank= True, null = True)
class_slot = models.ManyToManyField(TimeSlots)
def __str__(self):
return self.tutor.name
class Meta:
verbose_name_plural = 'Class Listings'
class class_request(models.Model):
student = models.ForeignKey(StudentProfile, on_delete=models.CASCADE)
listing = models.ForeignKey(listings, on_delete=models.CASCADE)
time_slot = models.ForeignKey(TimeSlots, on_delete=models.CASCADE)
date_time = models.DateTimeField()
accepted_status = models.BooleanField(default=0)
def __str__(self):
lsiting_name = self.student.name + ' - ' + str(self.listing)
return lsiting_name
class Meta:
verbose_name_plural = 'Class Requests'
class payment(models.Model):
student = models.ForeignKey(StudentProfile, on_delete=models.SET_NULL, blank= True, null = True)
listing = models.ForeignKey(listings, on_delete=models.SET_NULL, blank= True, null = True)
date_time = models.DateTimeField()
payment_id = models.CharField(max_length = 20)
amount = models.IntegerField()
def __str__(self):
return self.student.name
class Meta:
verbose_name_plural = 'Payments' |
from django.urls import path
from . import views
urlpatterns = [
#main paig
path("", views.index, name="index"),
#groupe list page
path("groups",views.groups_list, name="list"),
path('group/<str:slug>/',views.detail_group, name="detail_group_url"),
#user page , list post , vews and edit post
path('<str:user>/',views.profile,name='profile'),
path('<str:username>/<int:post_id>/',views.post_views,name='post'),
path(
'<str:username>/<int:post_id>/edit/',
views.post_edit,
name='post_edit'),
#create post
path("create",views.CreatePost.as_view(), name="create_post_url"),
]
|
"""
Python types on which database engine operates using public interfaces.
"""
from typing import Union
__all__ = [
'DB_TYPE',
'BYTEORDER',
'SIGNED',
'ENCODING',
'INVALID_ID',
'DFS_CONFIG_PATH',
'WORKER_PATH',
'REPLICA_PATH',
'NODE_STORAGE',
'RELATIONSHIP_STORAGE',
'PROPERTY_STORAGE',
'LABEL_STORAGE',
'DYNAMIC_STORAGE',
'NODE_RECORD_SIZE',
'RELATIONSHIP_RECORD_SIZE',
'PROPERTY_RECORD_SIZE',
'LABEL_RECORD_SIZE',
'DYNAMIC_RECORD_SIZE',
'DYNAMIC_RECORD_PAYLOAD_SIZE'
]
DB_TYPE = Union[str, int, float, bool]
BYTEORDER = 'big'
SIGNED = True
ENCODING = 'utf-8'
INVALID_ID = -1
DFS_CONFIG_PATH = 'configs/config.json'
WORKER_PATH = 'worker_instance_'
REPLICA_PATH = 'replica_'
NODE_STORAGE = 'node_storage.db'
RELATIONSHIP_STORAGE = 'relationship_storage.db'
PROPERTY_STORAGE = 'property_storage.db'
LABEL_STORAGE = 'label_storage.db'
DYNAMIC_STORAGE = 'dynamic_storage.db'
NODE_RECORD_SIZE = 13
RELATIONSHIP_RECORD_SIZE = 33
PROPERTY_RECORD_SIZE = 13
LABEL_RECORD_SIZE = 5
DYNAMIC_RECORD_SIZE = 32
DYNAMIC_RECORD_PAYLOAD_SIZE = 27
|
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
journeys_df = pd.read_csv('data/clean_journeys.csv', parse_dates=[14, 15])
clean_df = pd.read_csv('data/clean_pred.csv')
station_df = pd.read_csv('data/stations.csv')
# create time column
time_columns = ['Start Date', 'Start Month', 'Start Year', 'Start Hour',
'Start Minute', 'End Date', 'End Month', 'End Year', 'End Hour', 'End Minute']
journeys_df = journeys_df.drop(columns=time_columns)
supply_class_count = pd.crosstab(clean_df['Station ID'], clean_df['Supply'])
top_undersupplied_stations = supply_class_count.sort_values([1], ascending=False)[:10]
top_undersupplied_stations
station_id = 66
station_capacity = int(station_df[station_df['Station ID'] == station_id]['Capacity'])
station_journeys = []
for flow, prefix in zip(['In', 'Out'], ['Start', 'End']):
uni_df = journeys_df[journeys_df[prefix + ' Station ID'] == station_id]
uni_df = uni_df.rename(columns={prefix + ' Time': 'Time'})
uni_df['Flow'] = flow
station_journeys.append(uni_df)
station_journeys = pd.concat(station_journeys, sort=False)[['Time', 'Flow']].set_index('Time')
trips_by_hour = [(n.hour, g) for n, g in station_journeys.groupby(pd.Grouper(freq='H'))]
dist_ratios = np.arange(0, 1, 0.1)
results = pd.DataFrame(columns=['Capacity', 'Dist Ratio', 'Missed In', 'Missed Out'])
for capacity in range(0, 500, 50):
for dist_ratio in dist_ratios:
missed_out = 0
missed_in = 0
supply = round(capacity * dist_ratio)
for hour, hour_trips in trips_by_hour:
if hour in [5, 17]:
supply = round(capacity * dist_ratio)
for index, trip in hour_trips.iterrows():
if trip['Flow'] == 'In':
if supply < capacity:
supply += 1
else:
missed_in += 1
else:
if supply > 0:
supply -= 1
else:
missed_out += 1
print(capacity, dist_ratio, missed_in, missed_out)
results = results.append({'Capacity': capacity, 'Dist Ratio': dist_ratio, 'Missed In': missed_in, 'Missed Out': missed_out}, ignore_index=True)
results = results.astype(float)
fig = plt.figure(figsize=(24, 10))
for i, dist_ratio in enumerate(dist_ratios):
plot_df = results[results['Dist Ratio'] == dist_ratio]
plt.subplot(2, 5, i+1)
sns.lineplot(plot_df['Capacity'], plot_df['Missed Out'], color='red', label='No Bikes')
sns.lineplot(plot_df['Capacity'], plot_df['Missed In'], color='skyblue', label='No Space')
plt.title('Distribution Ratio of {:.2f}'.format(dist_ratio))
plt.xlabel('Capacity')
plt.ylabel('Trips')
fig.savefig('images/capacity_optimised')
|
__author__ = 'toby'
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import json
data = pd.read_csv("D:\\Documents\\GitHub\\pyqt_groundstation\\logs\\07-24-2022_09-29-13\\PROP_DATA_0.txt")
other = open("D:\\Documents\\GitHub\\pyqt_groundstation\\logs\\07-24-2022_09-29-13\\PROP_OTHER_MSGS.txt").readlines()
transitions = []
for line in other:
if line.startswith("NON-DATA"):
line = line.split(" : ")[2] #extract json
line = json.loads(line)
print(line)
transitions.append(line["timeStamp"])
times = (min(transitions) - 50000, max(transitions))
plt.figure()
plt.subplot(311)
plt.plot(data["timeStamp"], data["tank1Thermo"], label="tank 1 thermo")
[plt.axvline(t, color="r") for t in transitions]
plt.xlim(times)
plt.legend()
plt.subplot(312)
plt.plot(data["timeStamp"], data["loxTankDucer"], label="lox tank, psi")
plt.plot(data["timeStamp"], data["loxVenturi"], label="lox venturi, psi")
# plt.plot(data["timeStamp"], data["loxRegDucer"], label="lox reg, psi")
[plt.axvline(t, color="r") for t in transitions]
plt.xlim(times)
plt.legend()
valveNames = "kerDrip kerFlow kerPressurant kerPurge kerVent loxDrip loxFlow loxPressurant loxPurge loxVent".split(" ")
plt.subplot(313)
for v in valveNames:
series = data[v].apply(lambda x: 1 if x == "OPEN" else 0)
plt.plot(data["timeStamp"], series, label=v)
plt.legend()
plt.xlim(times)
plt.show()
|
import wx
import re
from mcp21.package import MCPPackageBase
class MCPPackage(MCPPackageBase):
def __init__(self, mcp):
MCPPackageBase.__init__(self, mcp)
self.package = 'dns-com-vmoo-smartcomplete'
self.min = '1.0'
self.max = '1.0'
self.callbacks = {}
mcp.register(self, ['dns-com-vmoo-smartcomplete-result'])
def mcp_negotiate_end(self):
# TODO - is there a less intrusive way to do this?
self.mcp.connection.input_pane.tab_completion.completers = self
def request(self, callback, prefix, suffix = ""):
request_id = str(wx.NewId())
self.mcp.server_notify('dns-com-vmoo-smartcomplete-request', {
'id' : request_id,
'prefix' : prefix,
'suffix' : suffix,
'channel' : '0',
})
self.callbacks[request_id] = (prefix, callback)
def dispatch(self, msg):
if msg.message == 'dns-com-vmoo-smartcomplete-result': self.do_result(msg)
def do_result(self, msg):
request_id = msg.data['id']
to_complete, callback = self.callbacks.pop(request_id, None)
if callback:
completions = msg.data.get('options')
if completions:
completions = list(set(completions))
completions.sort()
callback(msg.data.get('startpos'), to_complete, completions)
|
import glob
import re
from pprint import pprint
import tensorflow as tf
import tensorflow_hub as hub
import matplotlib.pyplot as plt
from sklearn.model_selection import KFold
import time
import numpy as np
import pandas as pd
import seaborn as sns
best_roc_aucs = {
'accuracy': [],
'accuracy_baseline': [],
'auc': [],
'steps': [],
'time': [],
}
best_accuracies = {
'accuracy': [],
'accuracy_baseline': [],
'auc': [],
'steps': [],
'time': [],
}
data_dir = 'psa_research'
sentences_filepaths = glob.glob(
"labeled_sentences/{}/*.csv".format(data_dir)
)
data = None
count = 100
for i, path in enumerate(sentences_filepaths):
if i > count:
break
if data is None:
data = pd.read_csv(path, encoding='utf-8')
else:
data = pd.concat([data, pd.read_csv(path, encoding='utf-8')])
tf.logging.set_verbosity('ERROR')
fold = KFold(n_splits=5, shuffle=True, random_state=0)
for i, (train_index, test_index) in enumerate(fold.split(data, data.has_citation)):
print('Fold Number:', i)
best_roc_auc = {}
best_accuracy = {}
test_df = data.iloc[test_index]
train_df = data.iloc[train_index]
# Training input on the whole training set with no limit on training epochs.
train_input_fn = tf.estimator.inputs.pandas_input_fn(
train_df, train_df["has_citation"], num_epochs=None, shuffle=True)
# Prediction on the whole training set.
predict_train_input_fn = tf.estimator.inputs.pandas_input_fn(
train_df, train_df["has_citation"], shuffle=False)
# Prediction on the test set.
predict_test_input_fn = tf.estimator.inputs.pandas_input_fn(
test_df, test_df["has_citation"], shuffle=False)
embedded_text_feature_column = hub.text_embedding_column(
key="processed_text",
#module_spec="https://tfhub.dev/google/nnlm-en-dim128/1"
module_spec="https://tfhub.dev/google/Wiki-words-250/1"
)
estimator = tf.estimator.DNNClassifier(
hidden_units=[250, 100],
feature_columns=[embedded_text_feature_column],
n_classes=2,
optimizer=tf.train.AdagradOptimizer(learning_rate=0.003),
dropout=0.1,
model_dir='tf_model_{}'.format(i)
)
start = time.time()
out = []
for epoch in range(25):
print('epoch', epoch)
estimator.train(input_fn=train_input_fn, steps=1000)
train_eval_result = estimator.evaluate(input_fn=predict_train_input_fn)
test_eval_result = estimator.evaluate(input_fn=predict_test_input_fn)
out.append(test_eval_result)
print(test_eval_result)
tic = time.time()
if test_eval_result['auc'] > best_roc_auc.get('auc', 0):
best_roc_auc = {
'auc': test_eval_result['auc'],
'accuracy': test_eval_result['accuracy'],
'accuracy_baseline': test_eval_result['accuracy_baseline'],
'steps': test_eval_result['global_step'],
'time': tic - start,
}
if test_eval_result['accuracy'] > best_accuracy.get('accuracy', 0):
best_accuracy = {
'auc': test_eval_result['auc'],
'accuracy': test_eval_result['accuracy'],
'accuracy_baseline': test_eval_result['accuracy_baseline'],
'steps': test_eval_result['global_step'],
'time': tic - start,
}
for d, d_o_l in (
(best_roc_auc, best_roc_aucs),
(best_accuracy, best_accuracies),
):
for key in ['accuracy', 'accuracy_baseline', 'auc', 'steps', 'time']:
d_o_l[key].append(d[key])
pprint(out)
print(best_roc_aucs)
print(best_accuracies)
best_roc_auc_row = {
'accuracy': np.mean(best_roc_aucs['accuracy']),
'accuracy_std': np.std(best_roc_aucs['accuracy']),
'algo_name': 'DNN',
'max_features': None,
'feature_selector': None,
'weights': None,
'name': 'DNN_{}steps'.format(int(np.mean(best_roc_aucs['steps']))),
'roc_auc': np.mean(best_roc_aucs['auc']),
'roc_auc_std': np.std(best_roc_aucs['auc']),
'time': np.mean(best_roc_aucs['time']),
}
best_accuracy_row = {
'accuracy': np.mean(best_accuracies['accuracy']),
'accuracy_std': np.std(best_accuracies['accuracy']),
'algo_name': 'DNN',
'max_features': None,
'feature_selector': None,
'weights': None,
'name': 'DNN_{}steps'.format(int(np.mean(best_accuracies['steps']))),
'roc_auc': np.mean(best_accuracies['auc']),
'roc_auc_std': np.std(best_accuracies['auc']),
'time': np.mean(best_accuracies['time']),
}
pd.DataFrame([best_roc_auc_row]).to_csv(
'results/{}/dnn_best_roc_auc.csv'.format(data_dir)
)
pd.DataFrame([best_accuracy_row]).to_csv(
'results/{}/dnn_best_accuracy.csv'.format(data_dir)
) |
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the jumpingOnClouds function below.
def jumpingOnClouds(c):
pos = 0
jump_count = 0
while pos < len(c)-1:
pos = pos+2 if len(c) - pos -1 >= 2 else pos+1
if c[pos] == 1:
pos -= 1
jump_count += 1
return jump_count
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input())
c = list(map(int, input().rstrip().split()))
result = jumpingOnClouds(c)
fptr.write(str(result) + '\n')
fptr.close()
|
__author__ = 'maguowei'
|
# Copyright (c) 2017-2020, University of Tennessee. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# This program is free software: you can redistribute it and/or modify it under
# the terms of the BSD 3-Clause license. See the accompanying LICENSE file.
'''
Tags project with version based on current date, and creates tar file.
Tag is yyyy.mm.rr, where yyyy.mm is current year and month,
and rr is a release counter within current month, starting at 0.
Version is an integer yyyymmrr, to allow simple comparisons.
Requires Python >= 3.7.
Usage:
#!/usr/bin/env python
import release
release.make( 'project', 'version.h', 'version.c' )
'project' is the name of the project, used for the tar filename.
'version.h' is a header containing the following #define for the version,
with PROJECT changed to the project's name.
// Version is updated by make_release.py; DO NOT EDIT.
// Version 2020.02.00
#define PROJECT_VERSION 20200200
'version.c' is a source file containing the following #define for the id:
// PROJECT_ID is the Mercurial or git commit hash ID, either
// defined by `hg id` or `git rev-parse --short HEAD` in Makefile,
// or defined here by make_release.py for release tar files. DO NOT EDIT.
#ifndef PROJECT_ID
#define PROJECT_ID "unknown"
#endif
const char* id() {
return PROJECT_ID;
}
int version() {
return PROJECT_VERSION;
}
Currently, this assumes Mercurial (hg). Porting to git should be simple.
Steps this takes:
1. Marks version in repo.
- Saves the Version to version.h.
- Updates copyright year in all files.
- Commits that change.
- Tags that commit.
2. Prepares archive in directory project-tag.
- Saves the `hg id` to version.c.
- Generates Doxygen docs.
3. Generates tar file project-tag.tar.gz
'''
from __future__ import print_function
import sys
MIN_PYTHON = (3, 7)
assert sys.version_info >= MIN_PYTHON, "requires Python >= %d.%d" % MIN_PYTHON
import os
import datetime
import re
import subprocess
from subprocess import PIPE
#-------------------------------------------------------------------------------
def myrun( cmd, **kwargs ):
'''
Simple wrapper around subprocess.run(), with check=True.
If cmd is a str, it is split on spaces before being passed to run.
Prints cmd.
kwargs are passed to run(). Set `stdout=PIPE, text=True` if you want the
output returned.
'''
if (type(cmd) is str):
cmd = cmd.split(' ')
print( '\n>>', ' '.join( cmd ) )
return subprocess.run( cmd, check=True, **kwargs ).stdout
# end
#-------------------------------------------------------------------------------
def file_sub( filename, search, replace, **kwargs ):
'''
Replaces search regexp with replace in file filename.
'''
#print( 'reading', filename )
txt = open( filename ).read()
txt2 = re.sub( search, replace, txt, **kwargs )
if (txt != txt2):
#print( 'writing', filename )
open( filename, mode='w' ).write( txt2 )
# end
#-------------------------------------------------------------------------------
def make( project, version_h, version_c ):
'''
Makes project release.
'''
today = datetime.date.today()
year = today.year
month = today.month
release = 0
# Search for latest tag this month and increment release if found.
tags = myrun( 'hg tags -q', stdout=PIPE, text=True ).rstrip().split( '\n' )
tags.sort( reverse=True )
pattern = r'%04d\.%02d\.(\d+)' % (year, month)
for tag in tags:
s = re.search( pattern, tag )
if (s):
release = int( s.group(1) ) + 1
break
tag = '%04d.%02d.%02d' % (year, month, release)
version = '%04d%02d%02d' % (year, month, release)
print( '\n>> Tag '+ tag +', Version '+ version )
#--------------------
# Update version in version_h.
# TODO update in CMakeLists.txt?
print( '\n>> Updating version in:', version_h )
file_sub( version_h,
r'// Version \d\d\d\d.\d\d.\d\d\n(#define \w+_VERSION) \d+',
r'// Version %s\n\1 %s' % (tag, version), count=1 )
# Update copyright in all files.
files = myrun( 'hg status -acmn', stdout=PIPE, text=True ).rstrip().split( '\n' )
print( '\n>> Updating copyright in:', end=' ' )
for file in files:
print( file, end=', ' )
file_sub( file,
r'Copyright \(c\) (\d+)(-\d+)?, University of Tennessee',
r'Copyright (c) \1-%04d, University of Tennessee' % (year) )
# end
print()
myrun( 'hg diff' )
print( '>> Do changes look good? Continue building release [yn]? ', end='' )
response = input()
if (response != 'y'):
print( '>> Release aborted. Please revert changes as desired.' )
exit(1)
myrun( ['hg', 'commit', '-m', 'Version '+ tag] )
myrun( ['hg', 'tag', tag] )
#--------------------
# Prepare tar file.
dir = project +'-'+ tag
print( '\n>> Preparing files in', dir )
myrun( 'hg archive -r '+ tag +' '+ dir )
os.chdir( dir )
# Update hash ID in version_c.
id = myrun( 'hg id -i -r '+ tag, stdout=PIPE, text=True ).strip()
print( '\n>> Setting ID in:', version_c )
file_sub( version_c,
r'^(#define \w+_ID) "unknown"',
r'\1 "'+ id +'"', count=1, flags=re.M )
# Build Doxygen docs. Create dummy 'make.inc' to avoid 'make config'.
open( 'make.inc', mode='a' ).close()
myrun( 'make docs' )
os.unlink( 'make.inc' )
os.chdir( '..' )
tar = dir + '.tar.gz'
print( '\n>> Creating tar file', tar )
myrun( 'tar -zcvf '+ tar +' '+ dir )
# end
|
from re import compile, match
REGEX = compile(r'^(?:([a-zA-Z]+) ?(\d+)|(\d+) ?([a-zA-Z]+))$')
VALUES = {
'USD': [1, 2, 5, 10, 20, 50, 100], 'CUP': [1, 3, 5, 10, 20, 50, 100],
'RUB': [10, 50, 100, 500, 1000, 5000], 'UAH': [1, 2, 5, 10, 50, 100, 500],
'SOS': [1000], 'EUR': [5, 10, 20, 50, 100, 200, 500]}
def atm(value):
amount, currency = sorted(a for a in match(REGEX, value).groups() if a)
currency = currency.upper()
result = []
try:
denoms = VALUES[currency]
except KeyError:
return 'Sorry, have no {}.'.format(currency)
rem = int(amount)
for denom in reversed(denoms):
quo, rem = divmod(rem, denom)
if quo:
result.append('{} * {} {}'.format(quo, denom, currency))
if rem:
return 'Can\'t do {} {}. Value must be divisible by {}!'\
.format(amount, currency, denoms[0])
return ', '.join(result)
|
"""
<Function 2: Duplication Check>
Author: Osiel Ramirez
Authored on: 12/22/2020
1. Receive the dictionary from the 1st function.
2. Check if the dictionary value [Mac address] exists more than once.
3. If a duplication is found, print that machine is being ARP spoofed
4. Exit elegantly without fail.
"""
import extract
import logger
# import the other functions from the thier respective
# files.
spacer = "----------------------------------------------------------------------"
# Spacer is to clean up the look of thr output.
def spoofDetect(arpTable):
macList = list(arpTable.values())
# Convert the received into a list with only the mac-addresses.
detectList = []
# Instantiate the empty list that will be used to detect duplications.
for item in macList:
# Each element from macList will be iterated through.
if item in detectList:
# Here the item from the macList will be compared to the detectList
# to check for duplicates
print("Status: ATTACK DETECTED - ARP SPOOF <MAC: {}>".format(item))
logger.logs("Status: ATTACK DETECTED - ARP SPOOF <MAC: {}>".format(item))
break
# Lines 29-31 print the detected attack to the console and send a copy
# to the log file via the logger.log function.
else:
detectList.append(item)
# If an item is not duplicated, it adds it to the detectList.
if len(macList) == len(detectList):
# If the the 2 lists are equal in length, it signals that
# the arp table is completely scanned.
print(spacer)
print("Status: SYSTEM SECURE - ARP SAFE")
print(spacer)
logger.logs("Status: SYSTEM SECURE - ARP SAFE")
# When there are no duplicated MACs detected after running
# through the entire macList address, print and log that
# the system is secure.
if __name__ == '__main__':
spoofDetect(extract.arpExtract())
# Magic main is used here to force the code to be executed from this file.
# SpoofDetect function is called with the arpExtract method as an arguent. |
from Node import Node
class BST:
def __init__(self):
self.__root = None
def isEmpty(self):
return self.__root == None
def insertNodes(self,other):
new_node = Node()
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2012:
# Hynes Stephen, sthynes8@gmail.com
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
"""This Class is a plugin for Logentries which will forward data to your Logentris account.
A free 30 Day Trial can be started here https://logentries.com/quick-start/
"""
import urllib2
import json
import re
import time
import datetime
from collections import deque
from shinken.basemodule import BaseModule
from shinken.log import logger
properties = {
'daemons': ['broker'],
'type': 'log_data',
'external': False,
}
def get_instance(plugin):
logger.debug("Get a Logentries broker for plugin %s" % plugin.get_name())
instance = Logentries_Broker(plugin)
return instance
class Logentries_Broker(BaseModule):
# Class for Logentries Broker
def __init__(self, modconf):
BaseModule.__init__(self, modconf)
self.uuid_regex = '[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}'
self.host = getattr(
modconf, 'host', 'https://js.logentries.com/v1/logs/')
self.token = getattr(modconf, 'token', None)
if self.token is None:
self.is_uuid = bool(re.match(self.uuid_regex, self.token))
if not self.is_uuid:
raise Exception
self.endpoint = self.host + self.token
self.queue_size = getattr(modconf, 'queue_size', 10)
self.queue = deque([])
def init(self):
logger.info(
"[Logentries Broker] I init the %s server connection to %s" %
(self.get_name(), str(self.endpoint)))
def send_data(self):
while len(self.queue) > 0:
data = self.queue.popleft()
timestamp = datetime.datetime.fromtimestamp(
time.getime()).strftime('%H:%M:%S %d-%m-%Y')
msg = json.dumps(
{"event": {'timestamp': timestamp, 'data': data['log']}})
req = urllib2.Request(self.endpoint, msg)
try:
urllib2.urlopen(req)
except urllib2.URLError as e:
logger.error("Can't send log message to Logentries %s", e)
def manage_logentries_brok(self, b):
data = b.data
if data is None:
return
self.queue.append(data)
if len(self.queue) >= self.queue_size:
logger.debug("Queue is full, sending logs to Logentries")
self.send_data()
|
import os
from os.path import dirname, basename, join, abspath, relpath
import platform
from datetime import date, datetime
from .json_uploader import json_uploader
DEBUG = os.environ['DEBUG'] == 'True' if 'DEBUG' in os.environ else False
EMULATE_UCONTROLLERS = DEBUG
VERSION = '1.0.3.6'
PROJECT_PATH = dirname(dirname(abspath(__file__)))
CONFIG_RELPATH = relpath(abspath(__file__), PROJECT_PATH)
MAIN_RELPATH = 'start.py'
STATION_INFO_RELPATH = 'station_info.cfg'
SECURITY_TOKEN_RELPATH = join(dirname(CONFIG_RELPATH), 'security_token.cfg')
# In minutes
if DEBUG:
WAKEUP_PERIOD_MIN = 0.05
WAKEUP_PERIOD_MAX = 0.10
else:
WAKEUP_PERIOD_MIN = 20
WAKEUP_PERIOD_MAX = 30
# hh:mm format
def GET_NIGHT_INTERVAL(when):
Y = 2000 # dummy leap year to allow input X-02-29 (leap day)
SEASONS = [('winter', (date(Y, 1, 1), date(Y, 3, 20)), ('16:00', '08:00')),
('spring', (date(Y, 3, 21), date(Y, 6, 20)), ('18:00', '07:00')),
('summer', (date(Y, 6, 21), date(Y, 9, 22)), ('19:00', '06:00')),
('autumn', (date(Y, 9, 23), date(Y, 12, 20)), ('17:00', '07:00')),
('winter', (date(Y, 12, 21), date(Y, 12, 31)), ('16:00', '08:00'))]
if isinstance(when, datetime):
when = when.date()
when = when.replace(year=Y)
return next(nighttime for season, (start, end), nighttime in SEASONS if start <= when <= end)
# Preserve the following station specific files after update
PRESERVE_FILES = [
STATION_INFO_RELPATH,
join(CONFIG_RELPATH, 'json_uploader', json_uploader.JsonUploader.DB_FILENAME),
SECURITY_TOKEN_RELPATH
]
if DEBUG:
SERVER_URL = 'http://0.0.0.0:8000'
else:
if platform.node() == 'pmg-001':
SERVER_URL = 'https://10.51.0.54'
else:
SERVER_URL = 'https://meteori.petnica.rs:1143'
URL_REGISTER = SERVER_URL + '/station_register'
URL_DATA = SERVER_URL + '/station_data'
URL_VERSION = SERVER_URL + '/station_version'
URL_CODE_DOWNLOAD = SERVER_URL + '/station_code_download'
WELCOME_MESSAGE = """
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* ______ ___ ___ _____ *
* | ___ \| \/ || __ \ *
* | |_/ /| . . || | \/ *
* | __/ | |\/| || | __ *
* | | | | | || |_\ \ *
* \_| \_| |_/ \____/ *
* Meteor network station *
* *
* v{} *
* *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
""".format(VERSION)
|
import numpy as np
import math
import matplotlib.pyplot as plt
#plt.switch_backend('Qt4Agg')
# read in and plot
rg = np.loadtxt("langevin/rg.txt", skiprows=2)
rg2 = np.loadtxt("dpd/rg.txt", skiprows=2)
fig, ax = plt.subplots()
ax.plot(rg[:,0], rg[:,1], "r-", label="Rg, Langevin")
ax.plot(rg2[:,0], rg2[:,1], "b-", label="Rg, DPD")
plt.legend(loc="best", frameon=False)
ax.set_xlabel("time steps")
ax.set_ylabel("Rg")
plt.savefig("rg.png", dpi=200, bbox_inches="tight")
plt.show()
|
from fractions import gcd
def nbr_of_laps(x, y):
lcm = (x * y) / gcd(x, y)
return [lcm / x, lcm / y]
|
# Import all libraries and classes
import os
from random import randint
import pygame
from pygame.locals import *
from Collision import GameCheck
from Fish import Fish
from Fruit import Fruit
from Slither import Slither
# Position of game screen
x_pos = 300
y_pos = 120
cmd = 'wmic desktopmonitor get screenheight, screenwidth'
os.environ['SDL_VIDEO_WINDOW_POS'] = "%d,%d" % (x_pos, y_pos)
pygame.init()
clock = pygame.time.Clock()
# Create Class with game mechanic
class App:
windowWidth = 880
windowHeight = 616
# Init main parameters to work with
def __init__(self):
self._running = True
self._display_surf = None
self._background_surf = None
self._snake_easy_surf = None
self._snake_normal_surf = None
self._snake_hard_surf = None
self._image_surf = None
self._apple_surf = None
self._orange_surf = None
self._fish_surf = None
self.game = GameCheck()
self.slither = Slither(3)
self.orange = Fruit(10, 5)
self.apple = Fruit(10, 5)
self.fish = Fish(1)
self.FPS = 0
self.score = 0
# Load photos of all elements
def on_init(self):
pygame.init()
self._display_surf = pygame.display.set_mode((self.windowWidth, self.windowHeight))
self._background_surf = pygame.image.load("background.jpg")
pygame.display.set_caption('Snake game')
self._running = True
self._snake_easy_surf = pygame.image.load("body_easy.png").convert()
self._snake_normal_surf = pygame.image.load("body_normal.png").convert()
self._snake_hard_surf = pygame.image.load("body_hard.png").convert()
self._image_surf = pygame.image.load("snake.png").convert()
self._apple_surf = pygame.image.load("apple.png").convert()
self._orange_surf = pygame.image.load("orange.jpg").convert()
self._fish_surf = pygame.image.load("fish.png").convert()
# staticmethod don`t require class commands
@staticmethod
# Define size and type of text
def text_objects(text, colour, size="small"):
global text_surface
font_name = pygame.font.match_font('arial')
small_font = pygame.font.SysFont(font_name, 35)
med_font = pygame.font.SysFont(font_name, 45)
large_font = pygame.font.SysFont(font_name, 95)
if size == "small":
text_surface = small_font.render(text, True, colour)
if size == "medium":
text_surface = med_font.render(text, True, colour)
if size == "large":
text_surface = large_font.render(text, True, colour)
return text_surface, text_surface.get_rect()
# Set position of text on Screen
def message_to_screen(self, msg, colour, y_displace=0, size="small"):
text_surface, text_rectangle = self.text_objects(msg, colour, size)
text_rectangle.center = (int(self.windowWidth / 2), int(self.windowHeight / 2) + y_displace)
self._display_surf.blit(text_surface, text_rectangle)
# Show Window When you Loose
def after_game(self, way):
after = True
while after:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
os.system("python game.py")
pygame.quit()
quit()
if event.key == pygame.K_m:
os.system("python start.py")
pygame.quit()
quit()
if event.key == pygame.K_q:
pygame.quit()
quit()
pygame.display.set_caption('Snake game')
self._display_surf = pygame.display.set_mode((self.windowWidth, self.windowHeight))
self._background_surf = pygame.image.load("start_font.jpg")
self._display_surf.blit(self._background_surf, (0, 0))
if way == 1:
self.message_to_screen("You hit the board", (43, 88, 12), -170, "large")
elif way == 2:
self.message_to_screen("You eat yourself", (43, 88, 12), -170, "large")
else:
self.message_to_screen("You was eaten by Fish", (43, 88, 12), -170, "large")
self.message_to_screen('Your score: ' + str(self.score), (0, 0, 0), -100, "medium")
# To show players score
HiScore = 'Score.txt'
with open(HiScore, 'r') as file:
try:
high_score = str(file.read())
except:
high_score = 0
if self.score < int(high_score):
self.message_to_screen('High Score: ' + str(high_score), (0, 0, 0), -50, "medium")
else:
high_score = self.score
self.message_to_screen('New High Score: ' + str(high_score), (0, 0, 0), -50, "medium")
with open(HiScore, 'w') as file:
file.write(str(self.score))
self.message_to_screen("Can you beat your score?", (0, 0, 0), 0)
self.message_to_screen("It`s time to show your best!", (0, 0, 0), 40)
self.message_to_screen("Go to main Menu: Press M", (0, 0, 0), 100)
self.message_to_screen("Press 'Space' to Start again or Q to quit.", (0, 0, 0), 140)
pygame.display.update()
# Check if game is on loop
def on_event(self, event):
if event.type == QUIT:
self._running = False
#
def on_loop(self):
self.slither.update()
self.fish.update()
# does snake eat apple?
for i in range(0, self.slither.length):
if self.game.isCollision(self.apple.x, self.apple.y, self.slither.x[i], self.slither.y[i], 30):
self.apple.x = randint(2, 18) * 44
self.apple.y = randint(2, 12) * 44
self.slither.length += 1
self.score += 1
# does snake eat orange
for i in range(0, self.slither.length):
if self.game.isCollision(self.orange.x, self.orange.y, self.slither.x[i], self.slither.y[i], 30):
self.orange.x = randint(2, 18) * 44
self.orange.y = randint(2, 12) * 44
self.slither.length += 5
self.score += 5
# does snake collide with fish?
for i in range(0, self.slither.length - 5):
if self.game.isCollision(self.fish.x[0], self.fish.y[0], self.slither.x[i], self.slither.y[i], 30):
if self.fish.x[0] == self.slither.x[0]:
self.after_game(3)
else:
for lenght in range(0, self.slither.x[i]):
self.slither.length = i
# does snake collide with itself?
if self.slither.length <= 5:
for i in range(2, self.slither.length):
if self.game.isCollision(self.slither.x[0], self.slither.y[0], self.slither.x[i], self.slither.y[i],30):
self.after_game(2)
else:
for i in range(2, self.slither.length - 5):
if self.game.isCollision(self.slither.x[0], self.slither.y[0], self.slither.x[i], self.slither.y[i],30):
self.after_game(2)
# does snake collide with board?
if (0 > self.slither.x[0] or self.slither.x[0] > 836) or (0 > self.slither.y[0] or self.slither.y[0] > 572):
self.after_game(1)
pass
# Define Pause in Game
def pause(self):
paused = True
self.message_to_screen("Paused", (0, 0, 0), -100, size="large")
self.message_to_screen("Press C to continue playing or Q to quit", (0, 0, 0), 25)
pygame.display.update()
while paused:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_c:
paused = False
elif event.key == pygame.K_q:
pygame.quit()
quit()
# Draw Game Score
def draw_score(self, surf, text, size, x, y):
font_name = pygame.font.match_font('arial')
font = pygame.font.Font(font_name, size)
if text == '0':
text = 0
curr_score = int(text)
HiScore = 'Score.txt'
with open(HiScore, 'r') as file:
try:
high_score = str(file.read())
except:
high_score = 0
frame = pygame.Rect(10, 8, 145, 46)
pygame.draw.rect(self._display_surf, [0, 0, 0], frame, 3)
text_surface = font.render('Current score: ' + str(curr_score), True, (0, 0, 0))
surf = self._display_surf.blit(text_surface, (x, y))
if self.score >= int(high_score):
high_score = curr_score
text_surface = font.render('New High score: ' + str(high_score), True, (0, 0, 0))
surf = self._display_surf.blit(text_surface, (x, y + 20))
else:
text_surface = font.render('Last high score: ' + str(high_score), True, (0, 0, 0))
surf = self._display_surf.blit(text_surface, (x, y + 20))
# Show all Elements of Game
def on_render(self):
self._display_surf.blit(self._background_surf, (0, 0))
# If Score become Higher Game Become Harder
if 0 <= self.score < 150:
self.slither.draw(self._display_surf, self._snake_easy_surf)
self.FPS = 10
elif 150 <= self.score < 300:
self.slither.draw(self._display_surf, self._snake_normal_surf)
self.FPS = 17
else:
self.slither.draw(self._display_surf, self._snake_hard_surf)
self.FPS = 25
self.orange.draw(self._display_surf, self._orange_surf)
self.apple.draw(self._display_surf, self._apple_surf)
self.fish.draw(self._display_surf, self._fish_surf)
self.draw_score(self._display_surf, str(self.score), 18, 20, 10)
pygame.display.flip()
# Close all process
def on_cleanup(self):
pygame.quit()
# Main Loop
def on_execute(self):
if self.on_init() == False:
self._running = False
while (self._running):
pygame.event.pump()
keys = pygame.key.get_pressed()
if (keys[K_RIGHT] or keys[K_d]):
self.slither.moveRight()
if (keys[K_LEFT] or keys[K_a]):
self.slither.moveLeft()
if (keys[K_UP] or keys[K_w]):
self.slither.moveUp()
if (keys[K_DOWN] or keys[K_s]):
self.slither.moveDown()
if (keys[K_ESCAPE] or keys[K_q]):
self._running = False
if (keys[K_p]):
self.pause()
self.on_loop()
self.on_render()
clock.tick(self.FPS)
self.on_cleanup()
# It`s just wishes of Pycharm
@property
def display_surf(self):
return self._display_surf
@property
def background_surf(self):
return self._background_surf
# Init Game
if __name__ == "__main__":
theApp = App()
theApp.on_execute()
|
#Time Complexity: O(n)
#Space Complexity: O(1)
#Did this code successfully run on Leetcode : Yes
#Any problem you faced while coding this : No
class Solution:
def rob(self, nums: List[int]) -> int:
total=0
n=len(nums)
n = len(nums)
if n == 0:
return 0
if n == 1:
return nums[0]
d = [0 for _ in range(n)]
d[0] = nums[0]
d[1] = max(nums[0], nums[1])
for i in range(2, n):
d[i] = max(d[i - 1], d[i - 2] + nums[i])
print(d[i])
return d[n - 1] |
from django.contrib import admin
from .models import Profile, FollowList
admin.site.register(Profile)
admin.site.register(FollowList)
|
from fabric.api import env
env.shell = '/bin/sh -c '
DEBUG = 1
import setup
import deploy
import rollback
import hostinfo
#TODO: shuold make some install/register mechanism
T = [setup.setup,
deploy.deploy,
deploy.ideploy,
deploy.check,
rollback.rollback,
hostinfo.hostinfo,
]
if DEBUG:
T.append(setup.exterminate)
tasks = [ cls() for cls in T ]
for i, t in enumerate(tasks):
t.name = t.__class__.__name__
exec "task_%d = tasks[%d]" % (i, i) #TODO: it's weird
|
#-*- coding: utf-8 -*-
"""referrence:
1. https://blog.csdn.net/zhupenghui176/article/details/109097737
2. https://www.jb51.net/LINUXjishu/457748.html ##如何解决僵尸进程及其原理
"""
import os
import time
import signal
print("main main pid")
print(os.getppid())
print("main pid:%d" % os.getpid())
def fork(cmd, times=3):
r, w = os.pipe()
pid = os.fork()
if pid:
os.close(w)
return pid, r
else:
os.close(r)
os.close(0)
os.dup2(w, 1)
os.dup2(w, 2)
cmd = cmd.split()
print("child pid: %d" % os.getpid())
print("the parent pid of child: %d" % os.getppid())
for _ in range(times):
time.sleep(0.1)
os._exit(0) # 如果不退出子进程,子进程会继续执行pid, r = fork("ping 127.0.0.1"),从而报错
#raise Exception("afsaf")
#os.execlp(cmd[0], cmd[0], *cmd[1:])
res = []
res.append(fork("ping 127.0.0.1", 5))
res.append(fork("ping 127.0.0.1"))
# 如果没有子进程会报错, 有子进程但是没有停止的,返回(0, 0),有停止的子进程(pid, 256) WNOHANG(wait no hung) 其值为1
# option=os.WNOHANG, 如果有两个子进程结束了会如何? 答:会返回pid较小的那一个
# option=0, 如果有两个子进程结束了会如何? 答:会返回pid较小的那一个
# waitpid返回的第二个参数表示的是进程结束的方式 o: os._exit(0)正常退出, 256:os._exit(1)异常退出,对应python err
# wait_res = os.waitpid(-1, os.WNOHANG)
# print(wait_res)
# time.sleep(1) #wait for all subprocess stop
for _ in res:
ret = os.waitpid(-1, os.WNOHANG)
print("detect subprocess over%s" % str(ret))
try:
ret = os.waitpid(-1, 0)
print(ret)
except:
print("无正在运行的子程序")
for i in res:
print(os.read(i[1], 1024))
# if not res[0]:
# os.kill(pid, signal.SIGINT) |
from flaskbox.helpers import create_init_file
def test_init_file(tmpdir):
"""Check if flaskbox.yml file is created correct
"""
file = tmpdir.join('flaskbox.yml')
create_init_file()
assert file
|
# Find the number of composite integers, n < 10^8, that have precisely two,
# not necessarily distinct, prime factors.
from math import floor, sqrt
LIMIT = 100000000
# A semiprime is a composite number that has precisely two, not necessarily
# distinct prime factors.
def countSemiPrime():
numPrimeFactorSieve = findNumPrimeFactor()
numSemi = 0
for n in range(2, LIMIT):
if numPrimeFactorSieve[n] == 2:
numSemi += 1
return numSemi
# Let f(n) be the number of prime factors (not necessarily distinct) of all integers
# 0 <= n < 10^8.
# We find min(f(n), 3) for each 0 <= n < 10^8.
# Methodology: Modify Sieve of Eratosthenes Algorithm.
def findNumPrimeFactor():
numPrimeFactorSieve = [0]*LIMIT
squareLimit = floor(sqrt(LIMIT))
cubeLimit = floor(LIMIT ** (1/3))
for n in range(2, LIMIT):
if numPrimeFactorSieve[n] == 0:
nextNum = n
powerTwo = 0
powerThree = 0
if n < squareLimit:
powerTwo = n * n
if n < cubeLimit:
powerThree = powerTwo * n
while nextNum < LIMIT:
if numPrimeFactorSieve[nextNum] <= 2:
if powerThree != 0 and nextNum % powerThree == 0:
numPrimeFactorSieve[nextNum] += 3
elif powerTwo != 0 and nextNum % powerTwo == 0:
numPrimeFactorSieve[nextNum] += 2
else:
numPrimeFactorSieve[nextNum] += 1
nextNum += n
return numPrimeFactorSieve
|
t=int(raw_input())
s=0
while t:
ip=int(raw_input())
if ip>0:
s+=ip
t-=1
print s
|
Python 3.6.2 |Anaconda custom (64-bit)| (default, Sep 19 2017, 08:03:39) [MSC v.1900 64 bit (AMD64)] on win32
Type "copyright", "credits" or "license()" for more information.
>>> obj={'day':1,'date':1,'week':7,'fortnight':14,'month':1,'year':1,'decade':10,'century':100}
multi={'before':-1,'after':1,'later':1,'next':1,'previous':-1,'this':0,'last':-1}
drx_obj={'today':0,'tomorrow':1,'yesterday':-1}
days={'sunday':0,'monday':1,'tuesday':2,'wednesday':3,'thursday':4,'friday':5,'saturday':6}
|
#!/usr/bin/env python
"""
ox.py : quick checks on photons
===================================
::
In [36]: ox.view(np.int32)[:,3]
Out[36]:
array([[-1507329, 3159, 0, 6400],
[ 2752511, 4430, 1, 6152],
[ 1441791, 4425, 2, 6656],
...,
[ 1376255, 3155, 4997, 6272],
[-1376257, 3157, 4998, 6416],
[ 1376255, 3155, 4999, 6272]], dtype=int32)
bnd_sidx
two int16 nidx phidx flags
In [37]: np.all( ox.view(np.int32)[:,3,2] == np.arange(5000) )
Out[37]: True
In [38]: ox_flags[np.where(ox_flags[:,3] & hismask.code("SD"))]
Out[38]:
array([[-1965950, 3981, 19, 6208],
[-1965941, 4035, 74, 6208],
[-1965892, 4329, 217, 6208],
[-1966004, 3657, 406, 6224],
[-1965891, 4335, 546, 6208],
[-1965899, 4287, 586, 7232],
[-1965913, 4203, 690, 6208],
In [41]: ox_flags[np.where(ox_flags[:,3] & hismask.code("SD"))][:,0] & 0xffff
Out[41]:
array([130, 139, 188, 76, 189, 181, 167, 185, 152, 150, 29, 89, 97, 160, 183, 37, 132, 50, 13, 169, 141, 84, 73, 85, 144, 128, 87, 19, 187, 174, 76, 180, 101, 82, 116, 66, 29, 63,
88, 165, 36, 169, 9, 121, 23, 129, 143], dtype=int32)
In [42]: ox_flags[np.where(ox_flags[:,3] & hismask.code("SD"))][:,0] >> 16
Out[42]:
array([-30, -30, -30, -30, -30, -30, -30, -30, -30, -30, -30, -30, -30, -30, -30, -30, -30, -30, -30, -30, -30, -30, -30, -30, 30, 30, 30, -30, -30, -30, -30, -30, -30, -30, -30, 30, -30, -30,
-30, -30, -30, -30, 30, -30, -30, -30, -30], dtype=int32)
"""
import os, sys, logging, numpy as np
log = logging.getLogger(__name__)
from opticks.ana.histype import HisType
from opticks.ana.mattype import MatType
from opticks.ana.hismask import HisMask
from opticks.ana.blib import BLib
from opticks.ana.ggeo import GGeo
histype = HisType()
mattype = MatType()
hismask = HisMask()
blib = BLib()
ggeo = GGeo()
def dump_boundaries(ox):
bndidx = (ox[:,3,0].view(np.uint32) >> 16).view(np.int16)[0::2]
u_bndidx, u_bndidx_counts = np.unique(bndidx, return_counts=True)
tot = 0
print("dump_boundaries")
for bnd,bnd_count in sorted(zip(u_bndidx,u_bndidx_counts), key=lambda _:_[1], reverse=True):
name = blib.bname(np.abs(bnd)-1) # subtract 1 to get index as signed boundaries are 1-based
print("%4d : %7d : %s " % (bnd, bnd_count, name))
tot += bnd_count
pass
print("%4s : %7d " % ("TOT",tot))
def dump_sensorIndex(ox):
sidx = (ox[:,3,0].view(np.uint32) & 0xffff).view(np.int16)[0::2]
u_sidx, u_sidx_counts = np.unique(sidx, return_counts=True)
tot = 0
print("dump_sensorIndex")
for sid,sid_count in sorted(zip(u_sidx,u_sidx_counts), key=lambda _:_[1], reverse=True):
print("%4d : %7d : %s " % (sid, sid_count, ""))
tot += sid_count
pass
print("%4s : %7d " % ("TOT",tot))
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
if len(sys.argv) > 1 and os.path.isdir(sys.argv[1]):
os.chdir(sys.argv[1])
log.info("chdir %s " % os.getcwd())
pass
np.set_printoptions(suppress=True, linewidth=200)
ox = np.load("ox.npy")
ph = np.load("ph.npy") # seqhis, seqmat sequence histories for all photons
seqhis = ph[:,0,0]
seqmat = ph[:,0,1]
ox_flags = ox.view(np.int32)[:,3]
ox_lander = ox_flags[ox_flags[:,1] != -1]
print("ox_flags : %s " % repr(ox_flags.shape) )
print("ox_lander : %s : photons landing on sensor volumes " % repr(ox_lander.shape))
dump_boundaries(ox)
#dump_sensorIndex(ox)
for i, oxr in enumerate(ox):
oxf = oxr[3].view(np.int32)
# see okc/OpticksPhotonFlags optixrap/cu/generate.cu
bnd_sidx,nidx,idx,pflg = oxf ## nidx3 will soon become "the one"
nrpo = ggeo.get_triplet_index(nidx)
nidx2,ridx,pidx,oidx = nrpo
assert nidx2 == nidx
#if ridx > 0: continue # skip photons with last intersect on instanced geometry
if ridx == 0: continue # skip photons with last intersect on remainder geometry
bnd = np.int16(bnd_sidx >> 16)
sidx = np.int16(bnd_sidx & 0xffff)
sqh = seqhis[idx] # photon index
sqm = seqmat[idx]
msk = " %15s " % hismask.label(pflg)
his = "( %16x : %30s ) " % (sqh, histype.label(sqh))
mat = "( %16x : %30s ) " % (sqm, mattype.label(sqm))
print(" %5d : %6s %6s : %15s : %s %s %s : %s " % (i, bnd, sidx, oxf[1:], msk,his,mat, nrpo) )
pass
dump_boundaries(ox)
#dump_sensorIndex(ox)
|
from rest_framework import serializers
from . import models
from artuium_server.artwork import serializers as artwork_serializers
from artuium_server.statics import models as statics_models
class RegionSerializer(serializers.ModelSerializer):
class Meta:
model = models.Region
fields = ['name']
class GallerySerializer(serializers.ModelSerializer):
region = RegionSerializer()
class Meta:
model = models.Gallery
fields = ['id', 'name', 'location', 'region', 'address', 'scale', 'website']
class ExhibitionImageSerializer(serializers.ModelSerializer):
class Meta:
model = models.ExhibitionImage
fields = ['id', 'image', 'size']
class ExhibitionSerializer(serializers.ModelSerializer):
artists = artwork_serializers.ArtistSerializer(many = True)
artworks = artwork_serializers.ArtworkSerializer(many = True)
gallery = GallerySerializer()
images = ExhibitionImageSerializer(many = True)
is_liked = serializers.SerializerMethodField()
is_reviewed = serializers.SerializerMethodField()
class Meta:
model = models.Exhibition
fields = ['id', 'name', 'content', 'open_date', 'close_date', 'open_time', 'close_time', 'notopendate', 'fee', 'artists', 'artworks', 'gallery', 'images', 'review_count', 'like_count', 'is_liked', 'total_rate', 'is_reviewed']
def get_is_liked(self, obj):
if 'request' in self.context:
request = self.context['request']
user = request.user
like_check = statics_models.Like.objects.filter(user = user, exhibition = obj)
if like_check.count() > 0:
return True
else:
return False
return False
def get_is_reviewed(self, obj):
if 'request' in self.context:
request = self.context['request']
user = request.user
review_check = statics_models.Review.objects.filter(author = user, exhibition = obj, deleted = False)
if review_check.count() > 0:
return True
else:
return False
return False |
if __name__ == '__main__':
num = [int(x) for x in raw_input().split(" ")]
count = 0
num.sort()
for i in xrange(0, 3):
if num[i] == num[i+1]:
count += 1
print count
|
import copy
from unittest import mock, skipIf
import pandas as pd
from django.conf import settings
from django.test import TestCase, RequestFactory, Client as Browser, override_settings
from django.contrib.sessions.middleware import SessionMiddleware
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from core.models import Address
from core.tests.test_helpers import create_address_example
from core.utils import OPERATIONAL_STATES, EXPANSION_STATES, OTHER_STATES
from consent.models import SmsConsent
from custom_auth.models import Client
from custom_auth.tests.test_helpers import (
create_client_example, CLIENT_EXAMPLE_DATA, create_reviewer_example
)
from fit_quiz.views import FIT_QUIZ_SESSION_DATA_PREFIX
from inquiry.forms import InquiryFirstForm, InquiryHomeForm, WizardClientUserCreationForm
from inquiry.models import Inquiry
from inquiry.outcomes import INQUIRY_OUTCOME_SLUG_MAP
from inquiry.tests.test_forms import FIRST_FORM_EXAMPLE_DATA, HOME_FORM_EXAMPLE_DATA
from inquiry.tests.test_utils import create_inquiry_example, UNDESIRABLE_ZIP_CODES
from inquiry.views import InquiryApplyWizard, InquirySubmitted, _get_inquiry_segment_event_data
from stages.models import InquiryInReview
FIRST_DATA = {
"inquiry_apply_wizard-current_step": "first",
"first-street": "20 University Rd",
"first-unit": "Suite 100",
"first-city": "Cambridge",
"first-state": "MA",
"first-zip_code": "02138",
"first-use_case_debts": "on",
"first-use_case_renovate": "on",
"first-use_case_other": "",
"first-email": "",
"submit": "Next",
}
HOME_DATA = {
"inquiry_apply_wizard-current_step": "home",
"home-property_type": "sf",
"home-rent_type": "no",
"home-primary_residence": "True",
"home-ten_year_duration_prediction": "over_10",
"home-home_value": "1000000",
"home-household_debt": "500000",
"submit": "Next",
}
HOMEOWNER_DATA = {
"inquiry_apply_wizard-current_step": "homeowner",
"homeowner-first_name": "Bob",
"homeowner-last_name": "Smith",
"homeowner-referrer_name": "Sarah Dekin",
"homeowner-notes": "I sure hope I get approved!",
'homeowner-when_interested': '7_to_12_months',
"submit": "Next",
}
def submit_inquiry_forms(
browser,
email,
first_overrides=None,
home_overrides=None,
homeowner_overrides=None,
signup_overrides=None
):
first_data = copy.deepcopy(FIRST_DATA)
# always override the email
first_data.update({"first-email": email})
if first_overrides is not None:
first_data.update(first_overrides)
response = browser.post('/inquiry/data/first/', first_data)
if response.context is not None:
return response
home_data = copy.deepcopy(HOME_DATA)
if home_overrides is not None:
home_data.update(home_overrides)
response = browser.post('/inquiry/data/home/', home_data)
if response.context is not None:
return response
homeowner_data = copy.deepcopy(HOMEOWNER_DATA)
if homeowner_overrides is not None:
homeowner_data.update(homeowner_overrides)
response = browser.post('/inquiry/data/homeowner/', homeowner_data)
if response.context is not None:
return response
signup_data = {
"inquiry_apply_wizard-current_step": "signup",
"signup-phone_number": "617-399-0604",
"signup-password1": "testpassword1",
"signup-password2": "testpassword1",
"signup-sms_opt_in": "on",
"signup-agree_to_terms": "on",
"submit": "Finish",
}
if signup_overrides is not None:
signup_data.update(signup_overrides)
response = browser.post('/inquiry/data/signup/', signup_data)
if response.context is not None:
return response
return browser.get('/inquiry/data/done/')
@skipIf(settings.REMOTE_ENVIRONMENT, "Remote environments require an IP address")
class InquiryTemplateTests(TestCase):
def test_first_template(self):
response = self.client.get('/inquiry/data/first/')
self.assertTemplateUsed(response, 'inquiry/first.html')
def test_home_template(self):
response = self.client.get('/inquiry/data/home/')
self.assertTemplateUsed(response, 'inquiry/home.html')
def test_homeowner_template(self):
response = self.client.get('/inquiry/data/homeowner/')
self.assertTemplateUsed(response, 'inquiry/homeowner.html')
def test_signup_template(self):
response = self.client.get('/inquiry/data/signup/')
self.assertTemplateUsed(response, 'inquiry/signup.html')
def test_submitted_template(self):
response = self.client.get('/inquiry/submitted/')
self.assertRedirects(response, '/auth/login/')
@override_settings(
SEGMENT_ENABLED=True,
ZIP_CODE_FORECAST=pd.
read_csv(settings.PROJECT_PATH + '/apps/inquiry/tests/test_data/test_zip_codes.csv')
)
class InquiryApplyWizardTests(TestCase):
def _get_request(self):
request = RequestFactory().get('/fake-path')
SessionMiddleware().process_request(request)
request.session.save() # add session middleware to request
return request
def _check_created_objects(self, email):
self.assertEqual(Client.objects.count(), 1)
client = Client.objects.get()
self.assertEqual(client.user.email, email)
self.assertEqual(
client.full_name_short, '{0} {1}'.format(
HOMEOWNER_DATA['homeowner-first_name'], HOMEOWNER_DATA['homeowner-last_name']
)
)
state = FIRST_DATA['first-state']
self.assertEqual(client.friendly_id[:2], state)
self.assertEqual(Address.objects.count(), 1)
address = Address.objects.get()
self.assertEqual(address.state, state)
self.assertEqual(Inquiry.objects.count(), 1)
inquiry = Inquiry.objects.get()
self.assertEqual(client.inquiry, inquiry)
self.assertEqual(inquiry.address, address)
self.assertTrue(isinstance(client.current_stage, InquiryInReview))
return client
def test_vet_based_on_form_not_first_step(self):
""" tests that only the first step vets based on state """
view = InquiryApplyWizard()
view.request = self._get_request()
mocked_form = mock.Mock()
for form in ['home', 'homeowner', 'signup']:
with mock.patch.object(
mocked_form, 'cleaned_data', {
'state': 'MT',
'zip_code': '01234'
}
):
(outcome_slug, url_name,
vetted_message) = view._vet_based_on_form(form, mocked_form)
self.assertIsNone(outcome_slug)
self.assertEqual(url_name, '')
self.assertEqual(vetted_message, '')
def test_vet_based_on_form_other_state(self):
view = InquiryApplyWizard()
view.request = self._get_request()
mocked_form = mock.Mock()
for state in OTHER_STATES:
with mock.patch.object(
mocked_form, 'cleaned_data', {
'state': state,
'zip_code': '01234'
}
):
(outcome_slug, url_name,
vetted_message) = view._vet_based_on_form('first', mocked_form)
self.assertEqual(outcome_slug, INQUIRY_OUTCOME_SLUG_MAP['1_other_states'])
self.assertEqual(url_name, 'inquiry:outcome')
self.assertEqual(vetted_message, 'rejected other states')
def test_vet_based_on_form_expansion_state(self):
view = InquiryApplyWizard()
view.request = self._get_request()
mocked_form = mock.Mock()
for state in EXPANSION_STATES:
with mock.patch.object(
mocked_form, 'cleaned_data', {
'state': state,
'zip_code': '01234'
}
):
(outcome_slug, url_name,
vetted_message) = view._vet_based_on_form('first', mocked_form)
self.assertEqual(outcome_slug, INQUIRY_OUTCOME_SLUG_MAP['2_expansion_states'])
self.assertEqual(url_name, 'inquiry:outcome')
self.assertEqual(vetted_message, 'rejected expansion states')
def test_vet_based_on_form_undesirable_zip_code(self):
view = InquiryApplyWizard()
view.request = self._get_request()
mocked_form = mock.Mock()
for undesirable_zip_code in UNDESIRABLE_ZIP_CODES:
with mock.patch.object(
mocked_form, 'cleaned_data', {
'state': 'MA',
'zip_code': undesirable_zip_code
}
):
(outcome_slug, url_name,
vetted_message) = view._vet_based_on_form('first', mocked_form)
self.assertEqual(outcome_slug, INQUIRY_OUTCOME_SLUG_MAP['3_undesirable_zip_code'])
self.assertEqual(url_name, 'inquiry:outcome')
self.assertEqual(vetted_message, 'rejected undesirable zip code')
def test_vet_based_on_form(self):
view = InquiryApplyWizard()
view.request = self._get_request()
mocked_form = mock.Mock()
with mock.patch.object(mocked_form, 'cleaned_data', {'state': 'MA', 'zip_code': '02138'}):
(outcome_slug, url_name, vetted_message) = view._vet_based_on_form(
'first', mocked_form
) # yapf: disable
self.assertIsNone(outcome_slug)
self.assertEqual(url_name, '')
self.assertEqual(vetted_message, '')
@mock.patch('inquiry.views.get_state_zip_code_outcome_key')
def test_vet_based_on_form_calls_get_state_zip_code_outcome_key(
self, mocked_get_state_zip_code_outcome_key
):
view = InquiryApplyWizard()
view.request = self._get_request()
mocked_get_state_zip_code_outcome_key.return_value = (None, '')
mocked_form = mock.Mock()
with mock.patch.object(mocked_form, 'cleaned_data', {'state': 'MA', 'zip_code': '02138'}):
(outcome_slug, url_name, vetted_message) = view._vet_based_on_form(
'first', mocked_form
) # yapf: disable
cleaned_data = mocked_form.cleaned_data
mocked_get_state_zip_code_outcome_key.assert_called_once_with(
cleaned_data['state'], cleaned_data['zip_code']
)
def test_first_form_other_state(self):
browser = Browser()
first_data = copy.deepcopy(FIRST_DATA)
for state in OTHER_STATES:
first_data.update({"first-email": 'test+client1@hometap.com', "first-state": state})
response = browser.post('/inquiry/data/first/', first_data)
self.assertRedirects(
response,
'/inquiry/results/rros/',
status_code=302,
target_status_code=200,
fetch_redirect_response=False
)
def test_first_form_expansion_state(self):
browser = Browser()
first_data = copy.deepcopy(FIRST_DATA)
for state in EXPANSION_STATES:
first_data.update({"first-email": 'test+client1@hometap.com', "first-state": state})
response = browser.post('/inquiry/data/first/', first_data)
self.assertRedirects(
response,
'/inquiry/results/rres/',
status_code=302,
target_status_code=200,
fetch_redirect_response=False
)
def test_first_form_operational_state(self):
browser = Browser()
first_data = copy.deepcopy(FIRST_DATA)
for state in OPERATIONAL_STATES:
first_data.update({"first-email": 'test+client1@hometap.com', "first-state": state})
response = browser.post('/inquiry/data/first/', first_data)
self.assertEqual(response.status_code, 302)
def test_first_form_undesirable_zip_code(self):
browser = Browser()
first_data = copy.deepcopy(FIRST_DATA)
for undesirable_zip_code in UNDESIRABLE_ZIP_CODES:
first_data.update({
"first-email": 'test+client1@hometap.com',
'first-zip_code': undesirable_zip_code
})
response = browser.post('/inquiry/data/first/', first_data)
self.assertEqual(response.status_code, 302)
def test_first_form_ok(self):
browser = Browser()
first_data = copy.deepcopy(FIRST_DATA)
first_data.update({"first-email": 'test+client1@hometap.com'})
response = browser.post('/inquiry/data/first/', first_data)
self.assertEqual(response.status_code, 302)
def test_get_email_no_client_session_or_form(self):
# no fit quiz and the user messed up on the first step
view = InquiryApplyWizard()
view.initial_dict = {}
view.request = self._get_request()
self.assertEqual(view._get_email({}, 'first'), '')
def test_get_email_from_first_form(self):
# no fit quiz but the user entered the email before messing up
view = InquiryApplyWizard()
email = 'fit_quiz@ht.com'
# yapf: disable
self.assertEqual(
view._get_email({'email': email, 'password1': 'testpassword1'}, 'first'),
email
)
# yapf: enable
def test_get_email_from_first_form_not_fit_quiz(self):
# user filled a fit quiz but changed the email in the first step
view = InquiryApplyWizard()
request = self._get_request()
fit_quiz_email = 'fit_quiz@ht.com'
request.session['fit_email'] = fit_quiz_email
request.session.save()
view.request = request
first_email = 'test+client1@hometap.com'
# yapf: disable
self.assertEqual(
view._get_email({'email': first_email, 'password1': 'testpassword1'}, 'first'),
first_email
)
# yapf: enable
def test_get_email_from_fit_quiz(self):
# user filled a fit quiz
view = InquiryApplyWizard()
view.initial_dict = {}
# add the fit quiz email cookie to the session
email = 'fit_quiz@ht.com'
request = self._get_request()
request.session['fit_email'] = email
request.session.save()
view.request = request
self.assertEqual(view._get_email({}, 'first'), email)
def test_get_wizard_step_event_data_empty(self):
data = InquiryApplyWizard._get_wizard_step_event_data({}, [], 'first', 'submitted')
self.assertEqual(data, {'tracking_status': 'first screen submitted'})
def test_get_wizard_step_event_data_first(self):
exported_fields = [
'street',
'unit',
'city',
'state',
'zip_code',
'use_case_debts',
'use_case_education',
'use_case_diversify',
'use_case_buy_home',
'use_case_renovate',
'use_case_other',
'use_case_business',
'use_case_emergency',
'use_case_retirement',
'email',
]
data = InquiryApplyWizard._get_wizard_step_event_data({
'street': 'One First Street'
}, exported_fields, 'first', 'submitted')
self.assertEqual(
data, {
'street': 'One First Street',
'tracking_status': 'first screen submitted'
}
)
def test_get_wizard_step_event_data_home(self):
exported_fields = [
'property_type', 'primary_residence', 'rent_type', 'ten_year_duration_prediction',
'home_value', 'household_debt'
]
data = InquiryApplyWizard._get_wizard_step_event_data({
'fake': 'fake'
}, exported_fields, 'home', 'submitted')
self.assertEqual(data, {'tracking_status': 'home screen submitted'})
def test_get_wizard_step_event_data_signup(self):
""" should not include the passwords """
cleaned_data = {
'phone_number': '617-399-0604',
'password1': 'testpassword1',
'password2': 'testpassword1',
'sms_opt_in': 'on',
'agree_to_terms': 'on',
}
exported_fields = ['phone_number', 'sms_opt_in', 'agree_to_terms']
data = InquiryApplyWizard._get_wizard_step_event_data(
cleaned_data, exported_fields, 'signup', 'submitted'
)
cleaned_data.pop('password1')
cleaned_data.pop('password2')
cleaned_data.update({'tracking_status': 'signup screen submitted'})
self.assertDictEqual(data, cleaned_data)
@mock.patch('inquiry.views.InquiryApplyWizard._get_wizard_step_event_data')
@mock.patch('inquiry.views.segment_event')
@skipIf(settings.REMOTE_ENVIRONMENT, "Remote environments require an IP address")
def test_send_wizard_step_segment_event_first(
self, mocked_segment_event, mocked_get_wizard_step_event_data
):
view = InquiryApplyWizard()
form = InquiryFirstForm(data=FIRST_FORM_EXAMPLE_DATA)
self.assertTrue(form.is_valid())
data = dict(FIRST_FORM_EXAMPLE_DATA, **{'tracking_status': 'first screen submitted'})
mocked_get_wizard_step_event_data.return_value = data
view._send_wizard_step_segment_event(
'first', form, form.cleaned_data['email'], 'submitted'
) # yapf: disable
exported_fields = [
'street',
'unit',
'city',
'state',
'zip_code',
'use_case_debts',
'use_case_education',
'use_case_diversify',
'use_case_buy_home',
'use_case_renovate',
'use_case_other',
'use_case_business',
'use_case_emergency',
'use_case_retirement',
'email',
]
cleaned_data = form.cleaned_data
mocked_get_wizard_step_event_data.assert_called_once_with(
cleaned_data, exported_fields, 'first', 'submitted'
)
mocked_segment_event.assert_called_once_with(
cleaned_data['email'], 'investment inquiry - first screen submitted', data
)
@skipIf(settings.REMOTE_ENVIRONMENT, "Remote environments require an IP address")
def test_send_wizard_step_segment_event(self):
view = InquiryApplyWizard()
step_data = {
'first': (InquiryFirstForm, FIRST_FORM_EXAMPLE_DATA),
'home': (InquiryHomeForm, HOME_FORM_EXAMPLE_DATA),
}
email = FIRST_FORM_EXAMPLE_DATA['email']
for step, (form_class, form_data) in step_data.items():
form = form_class(data=form_data)
self.assertTrue(form.is_valid())
view._send_wizard_step_segment_event(step, form, email, 'submitted')
def test_fill_form_initial_empty(self):
session = {}
initial = {}
InquiryApplyWizard.fill_form_initial(session, initial)
self.assertEqual(initial, {})
def test_fill_form_initial(self):
session = {
'not_fit_test': 'not_fit_test',
FIT_QUIZ_SESSION_DATA_PREFIX + 'test': 'fit_test',
}
initial = {}
InquiryApplyWizard.fill_form_initial(session, initial)
self.assertEqual(initial, {'test': 'fit_test'})
initial = {'something': 'something'}
InquiryApplyWizard.fill_form_initial(session, initial)
self.assertEqual(initial, {
'test': 'fit_test',
'something': 'something',
})
def test_multiple_inquiry_submissions_same_email(self):
create_client_example()
data = copy.deepcopy(CLIENT_EXAMPLE_DATA)
data.update({
"password1": "testpassword1",
"password2": "testpassword1",
"agree_to_terms": "on",
})
form = WizardClientUserCreationForm(data=data)
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['email'][0], 'User with this Email address already exists.')
@mock.patch('inquiry.views._get_inquiry_segment_event_data')
@mock.patch('inquiry.views.segment_event')
@skipIf(settings.REMOTE_ENVIRONMENT, "Remote environments require an IP address")
def test_event_e69_sent(self, mocked_segment_event, mocked_get_inquiry_segment_event_data):
mocked_get_inquiry_segment_event_data.return_value = {'foo': 'bar'}
browser = Browser()
submit_inquiry_forms(browser, 'test+client1@hometap.com')
client = Client.objects.get(user__email='test+client1@hometap.com')
mocked_get_inquiry_segment_event_data.assert_called_once_with(client)
# first four calls are to send the wizard step events
self.assertEqual(mocked_segment_event.call_count, 5)
mocked_segment_event.assert_called_with(
'test+client1@hometap.com', 'investment inquiry - created account - server',
mocked_get_inquiry_segment_event_data.return_value
)
@mock.patch('inquiry.views.InquiryApplyWizard._create_address')
def test_done_address_fails(self, mocked_create_address):
mocked_create_address.side_effect = ValidationError({
'state': ['This field cannot be blank.']
})
browser = Browser()
email = 'test+client1@hometap.com'
response = submit_inquiry_forms(browser, email)
self.assertEqual(response.status_code, 500)
for _class in [Client, Address, Inquiry, InquiryInReview, SmsConsent]:
self.assertFalse(_class.objects.exists())
@mock.patch('inquiry.views.InquiryApplyWizard._create_inquiry')
def test_done_inquiry_fails(self, mocked_create_inquiry):
mocked_create_inquiry.side_effect = ValidationError({
'client': ['Inquiry with this Client already exists.']
})
browser = Browser()
email = 'test+client1@hometap.com'
response = submit_inquiry_forms(browser, email)
self.assertEqual(response.status_code, 500)
for _class in [Client, Address, Inquiry, InquiryInReview, SmsConsent]:
self.assertFalse(_class.objects.exists())
@mock.patch('inquiry.views.transitions.ClientSubmitInquiry')
def test_done_submit_inquiry_fails(self, mocked_client_submit_inquiry):
mocked_client_submit_inquiry.side_effect = ValueError(
"Invalid init value 'client' for Transition object"
)
browser = Browser()
email = 'test+client1@hometap.com'
response = submit_inquiry_forms(browser, email)
self.assertEqual(response.status_code, 500)
for _class in [Client, Address, Inquiry, InquiryInReview, SmsConsent]:
self.assertFalse(_class.objects.exists())
def test_done_sms_consent_true(self):
browser = Browser()
email = 'test+client1@hometap.com'
response = submit_inquiry_forms(browser, email)
self.assertEqual(response.status_code, 302)
client = self._check_created_objects(email)
self.assertEqual(SmsConsent.objects.count(), 1)
sms_consent = SmsConsent.objects.get()
self.assertEqual(client.sms_consent, sms_consent)
def test_done_sms_consent_false(self):
browser = Browser()
email = 'test+client1@hometap.com'
response = submit_inquiry_forms(
browser, email, signup_overrides={
"signup-sms_opt_in": "",
}
)
self.assertEqual(response.status_code, 302)
self._check_created_objects(email)
self.assertFalse(SmsConsent.objects.exists())
@skipIf(settings.REMOTE_ENVIRONMENT, "Remote environments require an IP address")
class SubmitInquiryFormsTests(TestCase):
""" tests the submit_inquiry_forms() helper function """
def _verify_form_error(self, override_data, missing_field):
browser = Browser()
response = submit_inquiry_forms(browser, "test+client1@hometap.com", **override_data)
self.assertIsNotNone(response.context)
self.assertEqual(response.status_code, 200)
self.assertFormError(response, 'form', missing_field, 'This field is required.')
def test_incomplete_first(self):
self._verify_form_error({'first_overrides': {'first-street': ''}}, 'street')
def test_incomplete_home(self):
self._verify_form_error({'home_overrides': {'home-rent_type': ''}}, 'rent_type')
def test_incomplete_homeowner(self):
self._verify_form_error(
{'homeowner_overrides': {'homeowner-first_name': ''}}, 'first_name'
) # yapf:disable
def test_incomplete_signup(self):
self._verify_form_error({'first_overrides': {'first-email': ''}}, 'email')
def test_agree_to_terms_off(self):
self._verify_form_error({
'signup_overrides': {
'signup-agree_to_terms': ''
}
}, 'agree_to_terms')
def test_ok(self):
browser = Browser()
response = submit_inquiry_forms(browser, "test+client1@hometap.com")
self.assertRedirects(
response,
'/inquiry/submitted/',
status_code=302,
target_status_code=200,
fetch_redirect_response=False
)
client = Client.objects.get(user__email="test+client1@hometap.com")
self.assertFalse(client.email_confirmed)
@skipIf(settings.REMOTE_ENVIRONMENT, "Remote environments require an IP address")
class InquiryViewTests(TestCase):
def test_multiple_inquiry_submissions_same_session(self):
"""
This tests submitting multiple inquiry submissions during the same session. This is
possible if someone submits one, does not confirm their email, and submits another. (After
confirming, clients are unable to access the inquiry page). CustomFormToolsSessionStorage
is used to fix bug EN-308 which would normally cause a KeyError in this same scenario
(two inquiry wizard session view submissions in the same session). This test makes sure
that that bug no longer occurs.
"""
browser = Browser()
response = submit_inquiry_forms(browser, "test+client1@hometap.com")
self.assertRedirects(
response,
'/inquiry/submitted/',
status_code=302,
target_status_code=200,
fetch_redirect_response=False
)
response = submit_inquiry_forms(browser, "test+client2@hometap.com")
self.assertRedirects(
response,
'/inquiry/submitted/',
status_code=302,
target_status_code=200,
fetch_redirect_response=False
)
def test_inquiry_submissions_same_email(self):
"""
This tests submitting two inquiry submissions with the same email. This is possible
if someone submits one, does not confirm their email, and submits another.
"""
browser = Browser()
response = submit_inquiry_forms(browser, "test+client1@hometap.com")
self.assertRedirects(
response,
'/inquiry/submitted/',
status_code=302,
target_status_code=200,
fetch_redirect_response=False
)
response = submit_inquiry_forms(browser, "test+client1@hometap.com")
self.assertEqual(response.status_code, 200)
def test_event_25_in_context(self):
"""
Test that the datalayer event dict for event 25 is in the context of the inquiry submitted
view when appropriate
"""
browser = Browser()
response = submit_inquiry_forms(browser, "test+client1@hometap.com")
self.assertEqual(response.status_code, 302)
# Note: do not use assertRedirects because it does a GET which would conflict with
# how DataLayerViewMixing.send_event_once_per_session works
self.assertEqual(response.url, '/inquiry/submitted/')
event_partial_string = (
"[{\\u0022city\\u0022: \\u0022Cambridge\\u0022,"
" \\u0022email\\u0022: \\u0022test+client1@hometap.com\\u0022"
)
# test that the event is in the html on the first load to be sent via datalayer to GTM
response = browser.get('/inquiry/submitted/')
self.assertContains(response, event_partial_string)
# TODO(Charlie): restore after completing EN-331
# # test that the event is not in the html on a second load of the page
# response = browser.get('/inquiry/submitted/')
# self.assertNotContains(response, event_partial_string)
class InquirySubmittedMethodTests(TestCase):
def setUp(self):
self.client = create_client_example()
address = create_address_example()
self.inquiry = create_inquiry_example(self.client, address)
request = RequestFactory().get('/fake-path')
request.user = self.client.user
self.view = InquirySubmitted()
self.view.request = request
@mock.patch('inquiry.views._get_inquiry_segment_event_data')
def test_get_event(self, mocked_get_inquiry_segment_event_data):
mocked_get_inquiry_segment_event_data.return_value = {"foo": "bar"}
event_data = self.view.get_event()
mocked_get_inquiry_segment_event_data.assert_called_once_with(self.client)
self.assertDictEqual(
event_data, {
"event": "investment inquiry - created account",
"foo": "bar"
}
)
class GetInquirySegmentEventDataTests(TestCase):
""" Tests for the _get_inquiry_segment_event_data function """
def setUp(self):
self.client = create_client_example()
address = create_address_example()
self.inquiry = create_inquiry_example(self.client, address)
self.expected_event_data = {
'tracking_status': 'investment inquiry submitted',
'email': self.client.email,
'phone': self.client.phone_number,
'email_confirmed': self.client.email_confirmed,
'friendly_id': self.client.friendly_id,
'first_name': self.client.user.first_name,
'last_name': self.client.user.last_name,
'use_case_debts': self.inquiry.use_case_debts,
'use_case_diversify': self.inquiry.use_case_diversify,
'use_case_renovate': self.inquiry.use_case_renovate,
'use_case_education': self.inquiry.use_case_education,
'use_case_buy_home': self.inquiry.use_case_buy_home,
'use_case_business': self.inquiry.use_case_business,
'use_case_emergency': self.inquiry.use_case_emergency,
'use_case_retirement': self.inquiry.use_case_retirement,
'when_interested': self.inquiry.when_interested,
'household_debt': self.inquiry.household_debt,
'referrer_name': self.inquiry.referrer_name,
'property_type': self.inquiry.property_type,
'primary_residence': self.inquiry.primary_residence,
'home_value': self.inquiry.home_value,
'ten_year_duration_prediction': self.inquiry.ten_year_duration_prediction,
'street': self.inquiry.address.street,
'unit': self.inquiry.address.unit,
'city': self.inquiry.address.city,
'state': self.inquiry.address.state,
'zip_code': self.inquiry.address.zip_code,
'sms_allowed': True,
}
def test_success_sms_allowed_true(self):
event_data = _get_inquiry_segment_event_data(self.client)
self.assertDictEqual(event_data, self.expected_event_data)
def test_success_sms_allowed_false(self):
self.client.sms_consent = None
self.client.save()
expected_event_data = copy.deepcopy(self.expected_event_data)
expected_event_data['sms_allowed'] = False
event_data = _get_inquiry_segment_event_data(self.client)
self.assertDictEqual(event_data, expected_event_data)
def test_no_inquiry(self):
client_2 = create_client_example(overrides={"email": "test+client2@hometap.com"})
with self.assertRaises(ObjectDoesNotExist):
_get_inquiry_segment_event_data(client_2)
def test_not_client(self):
reviewer = create_reviewer_example()
with self.assertRaises(AttributeError):
_get_inquiry_segment_event_data(reviewer)
|
import boto3
import json
def get_key_information():
conn = boto3.client('ec2')
regions = [region['RegionName'] for region in conn.describe_regions()['Regions']]
key_info = []
for region in regions:
client = boto3.client('kms', region_name=region)
response = client.list_keys()['Keys']
key_names = []
for res in response:
key_names.append(res['KeyId'])
for ids in key_names:
response = client.describe_key(
KeyId=ids
)['KeyMetadata']
req_info = []
req_info.append(response)
key_info.append(req_info)
key_dict = {'Keys': key_info}
key_json = json.dumps(key_dict, indent=4, default=str)
print(key_json)
get_key_information()
|
import json
from django.http import HttpResponse
from django.views import View
from django.contrib.contenttypes.models import ContentType
from django.views.generic import TemplateView
from home.forms import HomeForm, CommentForm
from django.shortcuts import redirect, render
from home.models import Post, Comment, Like, LikeDislike
from friendship.models import Friend
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django_ajax.decorators import ajax
from django.db.models import Q
from django.views.decorators.csrf import csrf_exempt
@method_decorator(login_required, name='dispatch')
class Home(TemplateView):
template_name = 'home/home.html'
def get(self, request):
form = HomeForm()
friends_and_user1 = Friend.objects.filter(\
Q(from_user=request.user)).values_list('to_user', flat=True)
friends_and_user2 = Friend.objects.filter( \
Q(to_user=request.user)).values_list('from_user', flat=True)
posts = Post.objects.filter( \
Q(user_id__in=friends_and_user1) | \
Q(user_id__in=friends_and_user2) | \
Q(user=request.user)).order_by('-created')
users = User.objects.exclude(id=request.user.id)[1:]
args = {'form': form, 'posts': posts, 'users': users}
return render(request, self.template_name, args)
def post(self, request):
form = HomeForm(request.POST or None, request.FILES or None)
if form.is_valid():
post = form.save(commit=False)
post.user = request.user
post.save()
text = form.cleaned_data['post']
form = HomeForm()
return redirect('home:home')
args = {'form': form, 'text': form.cleaned_data['post']}
return render(request, self.template_name, args)
@login_required()
def comments(request, pk=None):
template_name = 'home/all_comments.html'
cmnt = Comment.objects.filter(post_id=pk).order_by('-created')
post = Post.objects.get(pk=pk)
count = Comment.objects.filter(post_id=pk).count()
context = {'cmnt': cmnt, 'post': post, 'count': count}
return render(request, template_name, context)
# def already_liked_post(user, post):
# return Like.objects.filter(user=user, post=post).exists()
# @ajax
# def likes(request, pk=None):
# if request.method == "POST":
# post = Post.objects.get(id=pk)
#
# if not already_liked_post(request.user, post):
# Like.objects.create(user=request.user, post=post)
# else:
# Like.objects.filter(user=request.user, post=post).delete()
#
# likecount = Like.objects.filter(post=post).count()
# return {'likecount': likecount}
# else:
# post = Post.objects.get(id=pk)
#
# if not already_liked_post(request.user, post):
# Like.objects.create(user=request.user, post=post)
# else:
# Like.objects.filter(user=request.user, post=post).delete()
#
# likecount = Like.objects.filter(post=post).count()
# return {'likecount': likecount}
#
#
# class Write_comments(TemplateView):
# template_name = 'home/write_comments.html'
#
# def get(self, request):
# form = CommentForm()
#
# cmnt = User.comment_set.filter(id=request.user.id)
# args = {'form': form, 'cmnt': cmnt, 'user': request.user}
# return render(request, self.template_name, args)
#
# def post(self, request):
# form = CommentForm(request.POST)
# if form.is_valid():
#
#
# text = form.cleaned_data['comment']
# form = CommentForm()
# return redirect('home:write_comment')
# args = {'form': form}
# return render(request, self.template_name, args)
@login_required()
def write_comments(request, pk=None):
if request.method == 'POST':
form = CommentForm(request.POST)
if form.is_valid():
comment = form.save(commit=False)
comment.user = request.user
comment.post = Post.objects.get(id=pk)
comment.save()
return redirect('home:comments', pk=pk)
else:
return redirect('home:write_comments')
else:
form = CommentForm()
cmnt = Comment.objects.filter(post_id=pk, user_id=request.user.id).order_by('-created')
post = Post.objects.get(pk=pk)
args = {'form': form, 'cmnt': cmnt, 'post': post}
return render(request, 'home/write_comments.html', args)
@login_required
def delete_comment(request, pk=None):
if request.method == 'POST':
cmnt = Comment.objects.get(pk=pk)
post_id = cmnt.post_id
cmnt.delete()
return redirect('home:comments', pk=post_id)
else:
cmnt = Comment.objects.get(pk=pk)
post_id = cmnt.post_id
cmnt.delete()
return redirect('home:write_comments', pk=post_id)
@login_required
def post_by_me(request):
posts = request.user.post_set.all().order_by('-created')
context = {'posts': posts}
return render(request, 'home/post_by_me.html', context)
# @login_required
# def post_liked_by_me(request):
# likes = Like.objects.filter(user=request.user).order_by('-timestamp')
#
# context = {'likes': likes}
# return render(request, 'home/post_liked_by_me.html', context)
@login_required
def delete_post(request, pk=None):
print("bye")
pst = Post.objects.filter(pk=pk)
print(pst)
pst.delete()
print("bye3")
return redirect('home:post_by_me')
class VotesView(View):
model = None # Data Model - Articles or Comments
vote_type = None # Vote type Like/Dislike
def post(self, request, pk):
obj = self.model.objects.get(pk=pk)
# GenericForeignKey does not support get_or_create
try:
likedislike = LikeDislike.objects.get(content_type=ContentType.objects.get_for_model(obj), object_id=obj.id,
user=request.user)
if likedislike.vote is not self.vote_type:
likedislike.vote = self.vote_type
likedislike.save(update_fields=['vote'])
result = True
else:
likedislike.delete()
result = False
except LikeDislike.DoesNotExist:
obj.votes.create(user=request.user, vote=self.vote_type)
result = True
return HttpResponse(
json.dumps({
"result": result,
"like_count": obj.votes.likes().count(),
"dislike_count": obj.votes.dislikes().count(),
"sum_rating": obj.votes.sum_rating()
}),
content_type="application/json"
) |
from selenium.webdriver import Chrome
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
def test_KeyBoard():
path = "C:\\chromedriver\\chromedriver.exe"
driver = Chrome(executable_path=path)
driver.get("https://www.theTestingWorld.com/testings")
driver.maximize_window()
#Sending text to textbox
driver.find_element_by_name("fld_username").send_keys("hello")
act=ActionChains(driver)
#act.send_keys(Keys.CONTROL).send_keys("a").perform() # Its not working.
act.send_keys(Keys.TAB).perform()
|
from board2 import *
class Minmax():
def __init__(self, eval):
self._eval = eval
def minmax(self, board, depth=2):
if depth == 0 or board.get_winner() != 0:
return (self._eval(board) * board.playerTurn, None)
children = ((move, board.apply_move(move[0], *move[1])) for move in board.moves())
return max(
(
# should be running negamax since we negate the score from 1 level down.
-self.minmax(board, depth - 1)[0],
move
) for move, board in children
)
def __call__(self, board):
return self.minmax(board, depth=2)
|
# -*- coding: UTF-8 -*-
from __future__ import unicode_literals # Python提供了__future__模块,把下一个新版本的特性导入到当前版本,于是我们就可以在当前版本中测试一些新版本的特性
import datetime #导入时间模块
import requests
import feedparser #此模块可以方便的获取RSS订阅源的信息
from flask import Flask, render_template, request, make_response
#render_template模块根据用户模板返回信息给templates里面的页面
#make_response用于创建自定义响应类 比默认的响应类Response更灵活
#request用于提取请求中用post或者get提交的参数
app = Flask(__name__) #创建程序实例 即Flask类的对象
RSS_FEED = {
"zhihu": "https://www.zhihu.com/rss",
"netease": "http://news.163.com/special/00011K6L/rss_newsattitude.xml",
"songshuhui": "http://songshuhui.net/feed",
"ifeng": "http://news.ifeng.com/rss/index.xml"} #可以选择的RSS网站
DEFAULTS = {'city': '北京',
'publication': 'songshuhui'} #默认的选择
WEATHERS = {"北京": 101010100,
"上海": 101020100,
"广州": 101280101,
"深圳": 101280601}
#利用request获取参数 若参数为空则返回默认值
def get_value_with_fallback(key):
if request.args.get(key):
return request.args.get(key)
if request.cookies.get(key):
return request.cookies.get(key)
return DEFAULTS[key]
@app.route('/')
def home():
publication = get_value_with_fallback('publication')
city = get_value_with_fallback('city')
weather = get_weather(city)
articles = get_news(publication)
# 此处创建自定义响应类,并利用render_template渲染templates中的模板,将数据传递过去
response = make_response(render_template('home.html', articles=articles, weather=weather))
expires = datetime.datetime.now() +datetime.timedelta(days=365)
response.set_cookie('publication', publication, expires=expires)
response.set_cookie('city', city, expires=expires)
return response
def get_weather(city):
code = WEATHERS.get('city', 101010100)
url = "http://www.weather.com.cn/data/sk/{0}.html".format(code)
r = requests.get(url)
r.encoding = "utf-8"
data = r.json()["weatherinfo"]
return dict(city=data['city'], temperature=data['temp'], description=data['WD'])
def get_news(publication):
feed = feedparser.parse(RSS_FEED[publication]) #对网页进行解析
return feed['entries'] #返回一组文章条目 是list
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000, debug=True) |
#!/usr/bin/python
# -*- coding: cp936 -*-
import sqlite3
import pandas as pd
def importNewregToSQLite():
"""excel"""
with sqlite3.connect('C:\sqlite\db\hxdata.db') as db:
#ExcelDocument('..\input\营销人员和营业部列表.xlsx') as src:
insert_template_4 = "INSERT INTO newreg " \
"(usrmobile, marketcode, departmentid, createtime) " \
"VALUES (?, ?, ?, ?);"
insert_template_9 = "INSERT INTO newreg "\
"(usrmobile, marketcode, departmentid, createtime, refid, refnickname, refrealname, refphone, pageindex) "\
"VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?);"
#清空的数据库遗留的数据
db.execute('DELETE FROM newreg;')
#对于EXCEL文档里的每一个SHEET都导入数据库(simTrade中只有一个名为simTrade的SHEET)
df = pd.read_excel('..\input\\newReg.xlsx', sheetname = 'newReg')
print("df Column headings:")
print(df.columns)
#for sheet in src:
# if sheet.name == 'SQL Results':
#判断这个excel有几列
df1 = None
if df.shape[1] == 4:
df1 = df[['MOBILENO','MARKET_CODE','DEPARTMENT_ID','CREATETIME']]
else:
if df.shape[1] == 9 or df.shape[1] == 10:
print("There are " + "9" + " columns")
df1 = df[['MOBILENO', 'MARKET_CODE', 'DEPARTMENT_ID', 'CREATETIME', 'REFERRER_ID', 'NICK_NAME', 'REAL_NAME', 'PHONE', 'PAGE_INDEX']]
print("df1 Column headings:")
print(df1.columns)
print(df1)
try:
print('3')
if df.shape[1] == 4:
db.executemany(insert_template_4, df1.values) #iter_rows() 自动跳过了抬头首行
else:
if df.shape[1] == 9:
db.executemany(insert_template_9, df1.values)
else:
if df.shape[1] == 10:
db.executemany(insert_template_9, df1.values)
except sqlite3.Error as e:
print('2')
print(e)
db.rollback()
else:
db.commit()
#检查是不是所有的数据都被加载了
select_stmt = 'SELECT usrmobile FROM newreg;'
total = 0
for row in db.execute(select_stmt).fetchall():
print(str(row))
total = total + 1
print(total)
#importNewregToSQLite()
|
#-*- coding: utf-8 -*-
from models import *
from django.contrib.auth import authenticate, login
from django.contrib.auth.models import User
from django.http import HttpResponse, HttpResponseBadRequest
from django.http.response import HttpResponseNotAllowed
from django.utils import simplejson
from django.core import serializers
from django.views.decorators.csrf import csrf_exempt
from django import forms
from django.shortcuts import get_object_or_404
def get_news(request):
items = News.objects.all()
items = serializers.serialize('json', items, indent=4, use_natural_keys=True)
return HttpResponse(items, mimetype='application/json')
def get_news_by_id(request, news_id):
items = News.objects.filter(id=news_id)
items = serializers.serialize('json', items, indent=4, use_natural_keys=True)
return HttpResponse(items, mimetype='application/json')
def get_last_x_news(request, x, y):
items = News.objects.all()[x:y]
items = serializers.serialize('json', items, indent=4, use_natural_keys=True)
return HttpResponse(items, mimetype='application/json')
def get_members(request):
items = Author.objects.all()
items = serializers.serialize('json', items, indent=4, use_natural_keys=True)
return HttpResponse(items, mimetype='application/json') |
from flask import request
from kernel.signal import http_request_signal, http_response_signal
def init_event(core):
app = core.app
app.before_request(_before_each_request)
app.after_request(_after_each_request)
def _before_each_request():
http_request_signal.send(request=request)
def _after_each_request(response):
http_response_signal.send(response=response)
return response
|
from __future__ import print_function # Python 2/3 compatibility
import boto3
import time
import csv
import sys
from lab_config import boto_args
def import_csv(tableName, fileName):
dynamodb = boto3.resource(**boto_args)
dynamodb_table = dynamodb.Table(tableName)
count = 0
time1 = time.time()
with open(fileName, 'r', encoding="utf-8") as csvfile:
myreader = csv.reader(csvfile, delimiter=',')
for row in myreader:
count += 1
newEmployee = {}
#primary keys
newEmployee['PK'] = "e#{}".format(row[0])
newEmployee['SK'] = 'root'
newEmployee['GSI_1_PK'] = 'root'
newEmployee['GSI_1_SK'] = row[1]
newEmployee['GSI_3_PK'] = "state#{}".format(row[5])
newEmployee['GSI_3_SK'] = "{}#{}".format(row[4], row[3])
newEmployee['employeeid'] = int(row[0])
newEmployee['name'] = row[1]
newEmployee['title'] = row[2]
newEmployee['dept'] = row[3]
newEmployee['city'] = row[4]
newEmployee['state'] = row[5]
newEmployee['city_dept'] = newEmployee['GSI_3_SK']
newEmployee['dob'] = row[6]
newEmployee['hire_date'] = row[7]
newEmployee['previous_title'] = row[8]
newEmployee['previous_title_end'] = row[9]
newEmployee['lock'] = '0'
if len(row) == 11:
newEmployee['is_manager'] = row[10]
newEmployee['GSI_2_PK'] = str(newEmployee['is_manager'])
newEmployee['GSI_2_SK'] = "root"
item = dynamodb_table.put_item(Item=newEmployee)
newCurrentTitle = {}
newCurrentTitle['employeeid'] = newEmployee['employeeid']
newCurrentTitle['name'] = newEmployee['name']
newCurrentTitle['hire_date'] = newEmployee['hire_date']
newCurrentTitle['PK'] = newEmployee['PK']
newCurrentTitle['SK'] = 'current_title#' + newEmployee['title']
newCurrentTitle['GSI_1_PK'] = newCurrentTitle['SK']
newCurrentTitle['GSI_1_SK'] = newCurrentTitle['name']
item = dynamodb_table.put_item(Item=newCurrentTitle)
newPreviousTitle = {}
newPreviousTitle['employeeid'] = newEmployee['employeeid']
newPreviousTitle['name'] = newEmployee['name']
newPreviousTitle['hire_date'] = newEmployee['hire_date']
newPreviousTitle['PK'] = newEmployee['PK']
newPreviousTitle['SK'] = 'previous_title#' + newEmployee['previous_title']
newPreviousTitle['GSI_1_PK'] = newPreviousTitle['SK']
newPreviousTitle['GSI_1_SK'] = newPreviousTitle['name']
item = dynamodb_table.put_item(Item=newPreviousTitle)
newLocation = {}
newLocation['employeeid'] = newEmployee['employeeid']
newLocation['name'] = newEmployee['name']
newLocation['hire_date'] = newEmployee['hire_date']
newLocation['city_dept'] = newEmployee['city_dept']
newLocation['PK'] = newEmployee['PK']
newLocation['SK'] = 'state#' + newEmployee['state']
newLocation['GSI_1_PK'] = newLocation['SK']
newLocation['GSI_1_SK'] = newLocation['name']
item = dynamodb_table.put_item(Item=newLocation)
if count % 100 == 0:
time2 = time.time() - time1
print("employee count: %s in %s" % (count, time2))
time1 = time.time()
return count
if __name__ == "__main__":
args = sys.argv[1:]
tableName = args[0]
fileName = args[1]
begin_time = time.time()
count = import_csv(tableName, fileName)
# print summary
print('RowCount: %s, Total seconds: %s' %(count, (time.time() - begin_time)))
|
from django.db import models
from django.contrib.auth.models import AbstractUser
# Create your models here.
class Meeting(models.Model):
meeting_title = models.CharField(max_length = 50, blank = True, default = 'Title is not given')
meeting_estimated_time = models.DurationField(blank = False, null = False)
meeting_date = models.DateTimeField(blank = False, null = False)
meeting_ending_time = models.DateTimeField(blank = True, null = True) # It is always set on serializer.
meeting_id = models.AutoField(primary_key = True)
creator = models.ForeignKey('User', related_name = 'created_meetings', on_delete = models.CASCADE,)
meeting_room = models.ForeignKey('MeetingRoom',related_name = 'meeting_room', on_delete = models.CASCADE,)
class MeetingRoom(models.Model):
meeting_room_id = models.AutoField(primary_key = True)
meeting_room_availability = models.BooleanField(default = True, null = False)
meeting_room_capacity = models.IntegerField(null = False, blank = False)
meeting_room_name = models.CharField(max_length = 50, null = False)
creator = models.ForeignKey('User', related_name = 'created_rooms', on_delete = models.CASCADE,) # Admin user can create new rooms, update rooms.
class User(AbstractUser):
position = models.CharField(max_length = 50, null = False)
meetings = models.ManyToManyField(Meeting, blank = True, null = True)
|
from flask import Flask
from flask_restful import Api
from flaskext.mysql import MySQL |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Created on 2018-11-06 11:52:31
# Project: PySpider_MaFengWo
from pyspider.libs.base_handler import *
import json
from pyspider.libs.utils import md5string
import re
from fake_useragent import UserAgent
default_headers = {
'Accept':'application/json, text/javascript, */*; q=0.01',
'Accept-Encoding':'gzip, deflate',
'Accept-Language':'zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7',
'Connection':'keep-alive',
'Content-Length':68,
'Content-Type':'application/x-www-form-urlencoded; charset=UTF-8',
'Connection':'keep-alive',
'Upgrade-Insecure-Requests':1,
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3100.0 Safari/537.36',
'X-Requested-With':'XMLHttpRequest',
}
class Handler(BaseHandler):
crawl_config = {
'headers':default_headers,
'timeout':300,
'proxy':'192.168.1.1:8080',
}
@every(minutes=24 * 60)
def on_start(self):
url = 'http://www.mafengwo.cn/jd/10088/gonglve.html'
self.crawl(url,callback=self.get_index,fetch_type='js')
def get_taskid(self, task):
return md5string(task['url']+json.dumps(task['fetch'].get('data', '')))
def get_index(self,response):
url = 'http://www.mafengwo.cn/ajax/router.php'
total = int(response.doc('.pi.pg-last').attr('data-page'))
for i in range(1,total+1):
data = {
'sAct':'KMdd_StructWebAjax|GetPoisByTag',
'iMddid':'10088',
' iTagId':0,
'iPage':i
}
self.crawl(url,method='POST',data=data,callback=self.index_page)
# 每一个回调detail_page的任务有效期是10天
@config(age=10 * 24 * 60 * 60)
def index_page(self, response):
print(response.text)
result = response.text.replace('\\','')
print(result)
pattern = re.compile('<a href="(.*?)" target=')
urls = re.findall(pattern,result)
for url in urls:
mainurl = 'https://www.mafengwo.cn'
self.crawl(mainurl + url, callback=self.detail_page,fetch_type='js')
# 数字越大优先级越高
@config(priority=2)
def detail_page(self, response):
return {
"name": response.doc('h1').text(),
"location":response.doc('.mhd > p').text(),
"price":response.doc('.mod-detail dd > div').text(),
"phone":response.doc('.tel > .content').text(),
"visittime":response.doc('.item-time > .content').text(),
"website":response.doc('.content > a').text(),
}
|
from PyML import *
from PyML import ker
from PyML.classifiers import multi
from PyML.demo import demo2d
import csv
from PyML.datagen import sample
import matplotlib.pyplot as plt
def read_data(file_name):
if file_name== 'train':
data = vectorDatasets.VectorDataSet("train_new_train.data")
data.attachLabels(Labels("train_new_label.data"))
return data
if file_name =='test':
data = vectorDatasets.VectorDataSet("test.csv")
return data
def multi_train(c,data,kernel_name):
s=svm.SVM()
#mc = multi.OneAgainstRest(svm.SVM(),C=1)
mc = multi.OneAgainstOne(svm.SVM(),C=1)
mc.cv(data,2)
def test_and_print(s,test):
r=s.test(test)
result_file = open("result.csv","wb")
result_ob = csv.writer(result_file)
result_ob.writerow(['ImageId','Label'])
j=1
for i in r.L:
l=[j]
l.append(i)
result_ob.writerow(l)
print l
j=j+1
'''if __name__ == "__main__":
data=read_data('train')
data.normalize()
test=read_data('test')
s=svm.SVM()
mc = multi.OneAgainstRest(svm.SVM(ker.Gaussian()),C=10,gamma=0.77387)
mc.train(data)
test_and_print(mc,test)'''
|
'''
Arduino
requirement:
pip3 install pymata-aio --user
'''
import zmq
import subprocess
import pathlib
import platform
import time
import threading
from codelab_adapter import settings
from codelab_adapter.core_extension import Extension
def get_python3_path():
# If it is not working, Please replace python3_path with your local python3 path. shell: which python3
if (platform.system() == "Darwin"):
# which python3
# 不如用PATH python
# 不确定
path = "/usr/local/bin/python3"
if platform.system() == "Windows":
path = "python3"
if platform.system() == "Linux":
path = "/usr/bin/python3"
return path
python3_path = get_python3_path()
class arduinoExtension(Extension):
def __init__(self):
name = type(self).__name__ # class name
super().__init__(name)
self.scratch3_message = {}
self.TOPIC = "eim/arduino"
self.first_start = 1
def run(self):
# 抽象掉这部分 Class
port = 38782 # todo 随机分配
context = zmq.Context.instance()
socket = context.socket(zmq.REQ)
socket.connect("tcp://localhost:%s" % port)
codelab_adapter_server_dir = pathlib.Path.home(
) / "codelab_adapter" / "servers"
script = "{}/arduino_server.py".format(codelab_adapter_server_dir)
cmd = [python3_path, script]
arduino_server = subprocess.Popen(cmd)
settings.running_child_procs.append(arduino_server)
lock = threading.Lock()
def request():
while self._running:
lock.acquire()
self.scratch3_message = self.read()
lock.release()
bg_task = threading.Thread(target=request)
self.logger.debug("thread start")
bg_task.daemon = True
bg_task.start()
while self._running:
scratch3_message = self.scratch3_message
self.logger.debug("scratch3_message {}".format(scratch3_message))
self.scratch3_message = {}
if scratch3_message == {}:
scratch3_message = {"topic": self.TOPIC, "payload": ""}
topic = scratch3_message.get('topic')
arduino_code = scratch3_message.get("payload")
if topic == self.TOPIC:
socket.send_json({"arduino_code": arduino_code})
result = socket.recv_json().get("result")
if self.first_start == 1:
self.publish({"topic": "eim/arduino/init","payload": ""})
self.first_start = 0
# 发往scratch3.0
self.publish({"topic": self.TOPIC,"payload": result})
time.sleep(0.05)
# release socket
socket.send_json({"arduino_code": "quit!"})
result = socket.recv_json().get("result")
arduino_server.terminate()
arduino_server.wait()
socket.close()
context.term()
export = arduinoExtension
|
def reverse(st):
return ' '.join(reversed(st.split(' ')))
|
#!/usr/bin/python
#from pylab import plot,show,norm
#from pylab import plot,show,norm
#import numpy
import sys
from csv import reader, writer
#from sklearn import preprocessing
from decimal import *
betas = []
betas.append(0.0)
betas.append(0.0)
betas.append(0.0)
def load_csv(filename):
samples = list()
with open(filename, 'r') as fd:
csv_reader = reader(fd)
for row in csv_reader:
#row.insert(0, '1') # bias
samples.append(row)
return samples
# pos 0 has feature 1
# pos 1 has feature 2
# pos 2 has true label
def convert_to_float(samples, column):
for row in samples:
row[column] = float(row[column].strip())
return
def mean(rows):
#total = 0
#for x in rows:
# total += x
#elements = len(rows)
#return total / elements
return sum(rows) / float(len(rows))
def funct(rows):
m = mean(rows)
diff = sum((i-m)**2 for i in rows)
#diff = sum(i**2 for i in rows)
return diff
def stdev(rows):
"""Calculates the population standard deviation"""
dev = funct(rows) / float(len(rows))
return dev**0.5
def scale(samples):
trainset = []
#xraw = []
for row in samples:
features = []
features.append(row[0])
features.append(row[1])
#z1 = (row[0] - mean(features)) / stdev(features)
#z2 = (row[1] - mean(features)) / stdev(features)
z1 = row[0] / stdev(features)
z2 = row[1] / stdev(features)
#print "custom = ", z1, z2
par = []
par.append(1.0) # bias
par.append(z1) # feature 1 scaled
par.append(z2) # feature 2 scaled
par.append(row[2]) # label
trainset.append(par)
#
#xpar = []
#xpar.append(row[0])
#xpar.append(row[1])
#xraw.append(xpar)
"""
norm = preprocessing.normalize(xraw)
trainset = []
i = 0;
for row in samples:
par = []
par.append(1.0) # bias
par.append(norm[i][0]) # feature 1 scaled
par.append(norm[i][1]) # feature 2 scaled
par.append(row[2]) # label
trainset.append(par)
print "sklearn = ", preprocessing.normalize(xraw)
#trainset = preprocessing.scale(xraw)
"""
return trainset
def f(features):
"""Receives the betas (betas) and one row of features, plus label (features).
"""
result = 0.0
i = 0
for x in features[:-1]: # ignore the label
result += betas[i] * x
i += 1
return result
def gradient_descent(alpha, features):
"""Receives the betas (betas) and one row of features, plus label (features).
"""
tlabel = features[len(features)-1]
n = float(len(features) - 1)
f_x = f(features)
i = 0
for x in features[:-1]:
betas[i] = betas[i] - alpha * 1.0 / n * (f_x - tlabel) * x
i += 1
return
#return summa
def risk(features):
"""Receives the betas (betas) and one row of features, plus label (features)
"""
f_x = f(features)
tlabel = features[len(features)-1]
summa = Decimal(0)
for x in features[:-1]:
partial = Decimal(f_x - tlabel)
#print partial
summa = partial**Decimal(2.0) # minus label
n = Decimal(len(features) - 1)
return Decimal(summa * 1 / (2*n))
def main(script, *args):
if len(sys.argv) != 3:
print "Error in arguments!"
sys.exit()
trainset = load_csv(sys.argv[1])
columns = len(trainset[0])
for i in range(columns):
convert_to_float(trainset, i)
scaled_trainset = scale(trainset) # bias, features plus label
fd = open(sys.argv[2],'w')
output = writer(fd)
learning_rates = [0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1.0, 5.0, 10.0, 0.55]
#min_risk = Decimal(9999.0)
for alpha in learning_rates:
iterations = 0
risk_betas = Decimal(0)
ant_risk = Decimal(9999.0)
#betas = [0.0 for _ in range(len(scaled_trainset[0])-1)]
betas[0] = 0.0
betas[1] = 0.0
betas[2] = 0.0
convergence = False
while iterations < 100 and not convergence:
iterations += 1
#if iterations == 99 and alpha != 0.55:
# convergence = True
#if risk_betas == ant_risk and alpha == 0.55:
# convergence = True
# print "convergence with alpha=", alpha, "iterations=", iterations - 1, "risk=", risk_betas
#else:
# ant_risk = risk_betas
for row in scaled_trainset:
risk_betas = risk(row)
gradient_descent(alpha, row)
#if risk_betas < min_risk:
# print "***alpha=", alpha, " risk=", risk_betas
# min_risk = risk_betas
temp = []
temp.append(alpha)
temp.append(iterations)
temp.append(betas[0])
temp.append(betas[1])
temp.append(betas[2])
output.writerow(temp)
fd.close()
"""
[Executed at: Thu Jun 22 10:47:47 PDT 2017]
alpha = 0.001: alpha passed [1/1]
alpha = 0.001: iterations passed [1/1]
alpha = 0.001: b_intercept failed [0/1]
alpha = 0.001: b_age failed [0/1]
alpha = 0.001: b_weight failed [0/1]
alpha = 0.005: alpha passed [1/1]
alpha = 0.005: iterations passed [1/1]
alpha = 0.005: b_intercept failed [0/1]
alpha = 0.005: b_age failed [0/1]
alpha = 0.005: b_weight failed [0/1]
alpha = 0.01: alpha passed [1/1]
alpha = 0.01: iterations passed [1/1]
alpha = 0.01: b_intercept failed [0/1]
alpha = 0.01: b_age failed [0/1]
alpha = 0.01: b_weight failed [0/1]
alpha = 0.05: alpha passed [1/1]
alpha = 0.05: iterations passed [1/1]
alpha = 0.05: b_intercept failed [0/1]
alpha = 0.05: b_age failed [0/1]
alpha = 0.05: b_weight failed [0/1]
alpha = 0.1: alpha passed [1/1]
alpha = 0.1: iterations passed [1/1]
alpha = 0.1: b_intercept failed [0/1]
alpha = 0.1: b_age failed [0/1]
alpha = 0.1: b_weight failed [0/1]
alpha = 0.5: alpha passed [1/1]
alpha = 0.5: iterations passed [1/1]
alpha = 0.5: b_intercept failed [0/1]
alpha = 0.5: b_age failed [0/1]
alpha = 0.5: b_weight failed [0/1]
alpha = 1: alpha passed [1/1]
alpha = 1: iterations passed [1/1]
alpha = 1: b_intercept failed [0/1]
alpha = 1: b_age failed [0/1]
alpha = 1: b_weight failed [0/1]
alpha = 5: alpha passed [1/1]
alpha = 5: iterations passed [1/1]
alpha = 5: b_intercept passed [1/1]
alpha = 5: b_age passed [1/1]
alpha = 5: b_weight passed [1/1]
alpha = 10: alpha passed [1/1]
alpha = 10: iterations passed [1/1]
alpha = 10: b_intercept passed [1/1]
alpha = 10: b_age passed [1/1]
alpha = 10: b_weight passed [1/1]
alpha = free: alpha passed [1/1]
alpha = free: iterations passed [1/1]
alpha = free: b_intercept failed [0/1]
alpha = free: b_age failed [0/1]
alpha = free: b_weight failed [0/1]
"Linear Regression",26
"""
if __name__ == '__main__':
main(*sys.argv)
|
from __future__ import print_function
import os,imp
import pprint as pp
import socket
import sys
import datetime as dt
import errno
import traceback
from socket import error as socket_error
e=sys.exit
#builtins: init, config
def formatExceptionInfo(maxTBlevel=5):
cla, exc, trbk = sys.exc_info()
excName = cla.__name__
try:
excArgs = exc.__dict__["args"]
except KeyError:
excArgs = "<no args>"
excTb = traceback.format_tb(trbk, maxTBlevel)
return (excName, excArgs, excTb)
def create_symlink(from_dir, to_dir, home):
global log
os.chdir(home)
#print(home)
if (os.name == "posix"):
#os.unlink(to_dir)
#if not os.path.isdir(to_dir):
os.symlink(from_dir, to_dir)
#print (from_dir)
#print (to_dir)
#e(0)
elif (os.name == "nt"):
from subprocess import Popen, PIPE, STDOUT
wget = Popen(('mklink /J %s %s' % (to_dir, from_dir)).split(' '), stdout=PIPE, stderr=STDOUT, shell=True)
stdout, nothing = wget.communicate()
log.info(stdout, extra=d)
#print stdout
#os.system('mklink /J %s %s' % (to_dir, from_dir))
else:
log.error('Cannot create symlink. Unknown OS.', extra=d)
def unlink(dirname):
if (os.name == "posix"):
os.unlink(dirname)
elif (os.name == "nt"):
#print('deleting', os.getpid(), dirname)
try:
os.rmdir( dirname )
except:
pass
else:
log.error('Cannot unlink. Unknown OS.', extra=d)
def import_module(filepath):
class_inst = None
mod_name,file_ext = os.path.splitext(os.path.split(filepath)[-1])
assert os.path.isfile(filepath), 'File %s does not exists.' % filepath
if file_ext.lower() == '.py':
py_mod = imp.load_source(mod_name.replace('.','_'), filepath)
elif file_ext.lower() == '.pyc':
py_mod = imp.load_compiled(mod_name, filepath)
return py_mod
def netcat_write(*args, **kargs):
(msg, job_status) =args
log=kargs['log']
#n1=dt.datetime.now()
try:
s = socket.socket()
host = 'WHKWDCTGABUZUN1'
port = 12347 # Reserve a port for your service.
s.connect((host, port))
#f = open('/dump2/oats/101316/rawdata/pr1lsmars11.20161013.TransOrd.dat.gz') #('/dump2/oats/101316/rawdata/MatchIt_20161013.dat.gz','rb')
print ('Sending..', end=" ")
#l = f.read(1024*100)
#while (l):
# print '.',
s.sendall(msg)
#l = f.read(1024*100)
#f.close()
print ("Done Sending")
s.shutdown(socket.SHUT_WR)
s.close
except socket_error as serr:
job_status['socket_error']=formatExceptionInfo()
print(serr)
except Exception:
print(formatExceptionInfo())
job_status['Exception']=formatExceptionInfo()
#n2=dt.datetime.now()
#diff=(n2-n1)
#print (diff.seconds)
#e(0) |
#!/usr/bin/python3
# Written by Michael Gillett, 2020
# github.com/gillettmi
# A simple morse code conversion program
import time
import os
from playsound import playsound as ps
from cipher import *
morse = {
'a': '.-', 'b': '-...', 'c': '-.-.',
'd': '-..', 'e': '.', 'f': '..-.',
'g': '--.', 'h': '....', 'i': '..',
'j': '.---', 'k': '-.-', 'l': '.-..',
'm': '--', 'n': '-.', 'o': '---',
'p': '.--.', 'q': '--.-', 'r': '.-.',
's': '...', 't': '-', 'u': '..-',
'v': '...-', 'w': '.--', 'x': '-..-',
'y': '-.--', 'z': '--..',
# '1': '.----', '2': '..---', '3': '...---',
# '4': '....-', '5': '.....', '6': '-....',
# '7': '--...', '8': '---..', '9': '----.',
# '0': '-----',
# '.': '.-.-.-', ',': '--..--', '?': '..--..',
# '!': '-.-.--', '&': '.-...', ':': '---...',
# ';': '-.-.-.', '@': '.--.-.', '$': '...-..-'
}
menu = '''
================================
MORSE CODE GENERATOR
================================
1. Convert and Transmit
2. Convert, Encrypt, and Transmit
3. Deconvert Morse Message
4. Quit
'''
output = ''
errors = ''
current_directory = os.getcwd()
# input_file = os.path.join(current_directory, 'input.txt')
output_file = os.path.join(current_directory, 'output.txt')
def press_enter():
input('Press enter to continue.')
def error(error_message):
print(error_message)
press_enter()
# PROGRAM BEGINS HERE
def encode_morse(message, encrypt=False, encrypt_key=0):
global output, errors
# split the words sand save to line list
words = message.split(' ')
# convert each word in line list
for word in words:
for letter in list(word):
# make each letter lowercase
letter = letter.lower()
# If encrypt == True : encrypt input
if encrypt:
letter = caesar_cypher(letter=letter, mode='encrypt', key=encrypt_key)
# convert the letter to morse and add to global output string
if letter in morse:
output += morse[letter] + ' '
# if the character is unrecognized, print it
else:
errors += letter
# print all of the errors
if errors != '':
print('Unrecognized characters omitted: {0}'.format(errors))
write_file(output, output_file)
transmit(output)
def decode_morse(message='', encrypted=False, decrypt_key=0):
output_message = ''
decrypted_message = ''
morse_letters = message.split(' ')
for letter in morse_letters:
try:
output_message += (list(morse.keys())[list(morse.values()).index(letter.lower())])
except ValueError:
print('Unable to convert letter:', letter)
continue
print('Your message:', output_message)
write_file(output_message, output_file)
if encrypted:
for letter in output_message:
decrypted_message += caesar_cypher(letter=letter, mode='decrypt', key=decrypt_key)
print('Your decrypted message:', decrypted_message)
write_file(decrypted_message, output_file)
# Still working on this section
# def import_text_file(save_location):
# with open(save_location, 'r') as f:
# file_contents = f.readlines()
# return file_contents
def write_file(message_output, save_location):
# write output file
with open(save_location, 'w') as o:
o.write(message_output)
def transmit(input_morse):
dot = os.path.join(current_directory, 'audio', 'dot.mp3')
dash = os.path.join(current_directory, 'audio', 'dash.mp3')
print('Transmitting message:', input_morse)
for t in input_morse:
if t == '.':
ps(dot)
elif t == '-':
ps(dash)
else:
time.sleep(0.2)
print('\nTransmission complete')
def main():
while True:
print(menu)
try:
choice = int(input('Input:'))
except ValueError:
pass
# Convert input from text to morse code (no encryption)
if choice == 1:
encode_morse(str(input('Message:')))
# Convert and encrypt input
elif choice == 2:
try:
encode_morse(str(input('Message:')),
encrypt=True,
encrypt_key=int(input('Encryption Key:')))
except ValueError:
error('Invalid input. Please input an integer for the Encryption Key.')
# Decrypt input
elif choice == 3:
while True:
# select if the message is encrypted or not
is_it_encrypted = str(input('Is the message encrypted? (Y/N)')).lower()
# if the message is encrypted:
if is_it_encrypted == 'y':
try:
decode_morse(message=str(input('Message:')),
encrypted=True,
decrypt_key=int(input('Encryption Key:')))
break
except ValueError:
error('Please input an integer for the Encryption Key.')
# if the image isn't encrypted:
elif is_it_encrypted == 'n':
decode_morse(str(input('Message:')))
break
# wrong input
else:
error('Please input Y or N')
# Quit
elif choice == 4:
break
# Error
else:
error('Please input an integer from the menu.')
if __name__ == '__main__':
main()
|
from tour import Tour
class Population(object):
def __init__(self, size, initialize, tour_manager):
self._tours = []
self.tour_manager = tour_manager
if initialize:
for i in range(0, size):
tour = Tour(tour_manager)
tour.generate_individual()
self._tours.append(tour)
else:
for i in range(0, size):
self._tours.append(None)
def save_tour(self, i, tour):
self._tours[i] = tour
def get_tour(self, i):
return self._tours[i]
def get_fittest(self):
fittest = self._tours[0]
for i in range (0, len(self._tours)):
if fittest.get_fitness() <= self.get_tour(i).get_fitness():
fittest = self.get_tour(i)
return fittest
def size(self):
return len(self._tours) |
from django.apps import AppConfig
class PioperateConfig(AppConfig):
name = 'PiOperate'
|
import scipy
from numpy import *
import scipy.integrate
from fractions import Fraction
# finding the volume integral of divergence
def Dv(x,y,z):
return 2*(x+y)
D1, errt = scipy.integrate.tplquad(Dv, 0, 1, lambda z: 0, lambda z: 1, lambda z,y: 0, lambda z,y: 1)
# finding the surface integral of 6 surfaces of a cube, here it is a unit cube
# declaring the functions for all sides
def S1(y,z):
return y**2 # side 1
def S2(y,z):
return -(y**2) # side 2
def S3(x,z):
return (2*x + z**2) # side 3
def S4(x,z):
return -(z**2) # side 4
def S5(x,y):
return 2*y # side 5
def S6(x,y):
return 0 # side 6
Si1, errt1 = scipy.integrate.dblquad(lambda z, y: S1(y,z), 0, 1, lambda z: 0, lambda z: 1) # evaluating the surface integral of side 1
SI1 = (Fraction(Si1).limit_denominator(100))
Si2, errt2 = scipy.integrate.dblquad(lambda z, y: S2(y,z), 0, 1, lambda z: 0, lambda z: 1) # evaluating the surface integral of side 2
SI2 = (Fraction(Si2).limit_denominator(100))
Si3, errt3 = scipy.integrate.dblquad(lambda z, x: S3(x,z), 0, 1, lambda z: 0, lambda z: 1) # evaluating the surface integral of side 3
SI3 = (Fraction(Si3).limit_denominator(100))
Si4, errt4 = scipy.integrate.dblquad(lambda z, x: S4(x,z), 0, 1, lambda z: 0, lambda z: 1) # evaluating the surface integral of side 4
SI4 = (Fraction(Si4).limit_denominator(100))
Si5, errt5 = scipy.integrate.dblquad(lambda y, x: S5(x,y), 0, 1, lambda y: 0, lambda y: 1) # evaluating the surface integral of side 5
SI5 = (Fraction(Si5).limit_denominator(100))
Si6, errt6 = scipy.integrate.dblquad(lambda y, x: S6(x,y), 0, 1, lambda x: 0, lambda y: 1) # evaluating the surface integral of side 6
SI6 = (Fraction(Si6).limit_denominator(100))
# Calculating the total flux
SI = SI1 + SI2 + SI3 + SI4 + SI5 + SI6
print 'surface integral of side 1 =',SI1
print 'surface integral of side 2 =',SI2
print 'surface integral of side 3 =',SI3
print 'surface integral of side 4 =',SI4
print 'surface integral of side 5 =',SI5
print 'surface integral of side 6 =',SI6
print 'The total flux is =',SI
print 'The output from the volume integral =',D1
print 'Hence both LHS and RHS are equal do we successfully checked the Divergence theorem'
|
from argparse import ArgumentParser
from types import SimpleNamespace
import os
import yaml
def convert_dict_namespace(dict_convert):
"""
Convert params from dictionary to SimpleNamespace type (in order to use dotted notation)
:param dict_convert: dictionary to convert
:return: SimpleNamespace
"""
for key in dict_convert:
if isinstance(dict_convert[key], dict):
dict_convert[key] = convert_dict_namespace(dict_convert[key])
return SimpleNamespace(**dict_convert)
def parse_arguments():
"""
The function reads files with settings ('param file').
The param files are stored in PROJECT_ROOT/experiments/cfgs
The param file provides default values for all parameters.
A common line arguments are used to set specific values, different from default.
:return: paraters in SimpleNamespace format
"""
parser = ArgumentParser(description=__doc__) # TODO: check what is mean
parser.add_argument('--param_file', type=str, help='configure file with parameters')
parser.add_argument('--device', default=None, type=str, help='device')
parser.add_argument('--workers', default=None, type=int, help='number of data loading workers')
parser.add_argument('--output_dir', default=None, type=str, help='path where to save')
parser.add_argument('--test_only', default=None, type=bool, help="Only test the model")
parser.add_argument('--pretrained_path', default=None, type=str, help="Use pre-trained models from the modelzoo")
parser.add_argument('--world_size', default=None, type=int, help='number of distributed processes')
parser.add_argument('--dist_url', default=None, type=str, help='url used to set up distributed training')
parser.add_argument('--DATASET_name', default=None, type=str, help='dataset')
parser.add_argument('--DATASET_path', default=None, type=str, help='path to dataset')
parser.add_argument('--DATASET_batch_size', default=None, type=int, help='train batch size')
parser.add_argument('--DATASET_aspect_ratio_group_factor', default=None, type=int)
parser.add_argument('--TRAIN_epochs', default=None, type=int, help='number of total epochs to run')
parser.add_argument('--TRAIN_lr', default=None, type=float, help='initial learning rate')
parser.add_argument('--TRAIN_momentum', default=None, type=float, help='momentum')
parser.add_argument('--TRAIN_weight_decay', default=None, type=float, help='weight decay')
parser.add_argument('--TRAIN_lr_step_size', default=None, type=int, help='decrease lr every step-size epochs')
parser.add_argument('--TRAIN_lr_steps', default=None, type=int, help='decrease lr every step-size epochs')
parser.add_argument('--TRAIN_lr_gamma', default=None, type=float, help='decrease lr by a factor of lr-gamma')
parser.add_argument('--TRAIN_resume', default=None, type=str, help='resume from checkpoint')
parser.add_argument('--EVAL_eval_metric', default=None, type=str, help='figure of merit to select best model')
parser.add_argument('--EVAL_save_results', default=None, type=bool, help='save or not results')
parser.add_argument('--EVAL_make_log', default=None, type=bool, help='make log durint eval')
parser.add_argument('--MODEL_name', default=None, type=str, help='model name')
parser.add_argument('--LOG_print_freq', default=None, type=int, help='print frequency')
parser.add_argument('--LOG_print_freq_test', default=None, type=int, help='print frequency test')
parser.add_argument('--LOG_with_tensorboard', default=None, type=bool, help='tensorboard on')
parser.add_argument('--LOG_smooth_window', default=None, type=int, help='smooth window for meters')
args = parser.parse_args()
# parse param file
with open(args.param_file, 'r') as f:
params = yaml.safe_load(f)
# add host & id to params
params['host'] = os.uname()[1]
params['user_id'] = os.getlogin()
# addition configurations (check if something was set using common line)
for k in args.__dict__:
if args.__dict__[k] is not None:
# check capital letter (this indicates that parameter is folded)
# note: this code only handles the 'two-foldness' of parameters, like param[TRAIN][lr]
if k[0].isupper():
part1, part2 = k.split('_', 1) # split at first occurrence of '_'
params[part1][part2] = args.__dict__[k]
else:
params[k] = args.__dict__[k]
# convert params from dictionary to SimpleNamespace type
params = convert_dict_namespace(params)
return params
|
# Copyright (c) 2021 kamyu. All rights reserved.
#
# Google Code Jam 2021 Round 3 - Problem B. Square Free
# https://codingcompetitions.withgoogle.com/codejam/round/0000000000436142/0000000000813e1a
#
# Time: O(R^2 * C^2)
# Space: O(R + C)
#
def inplace_counting_sort(nums, reverse=False): # Time: O(len(nums)+max(nums)), Space: O(max(nums))
count = [0]*(max(nums)+1)
for num in nums:
count[num] += 1
for i in xrange(1, len(count)):
count[i] += count[i-1]
for i in reversed(xrange(len(nums))): # inplace but unstable sort
if nums[i] < 0: # processed
continue
while i != count[nums[i]]-1:
count[nums[i]] -= 1
nums[count[nums[i]]], nums[i] = ~nums[i], nums[count[nums[i]]]
count[nums[i]] -= 1
nums[i] = ~nums[i]
for i in xrange(len(nums)):
nums[i] = ~nums[i] # restore values
if reverse: # unstable sort
nums.reverse()
def possible(S, D): # Time: O(R * C), Space: O(R + C)
inplace_counting_sort(S, reverse=True) # Time: O(R + C), Space: O(C)
inplace_counting_sort(D, reverse=True) # Time: O(R + C), Space: O(R)
S_prefix = [0]
for i in xrange(len(S)): # Time: O(R), Space: O(R)
S_prefix.append(S_prefix[-1] + S[i])
D_suffix = [0]
for i in reversed(xrange(len(D))): # Time: O(C), Space: O(C)
D_suffix.append(D_suffix[-1] + D[i])
D_suffix.reverse()
# consider a graph running max flow algorithm where edge from source to each Sx is with capacity S[x], edge from each Sx to each Dy is with capacity 1, edge from each Dy to sink is with capacity D[y],
# if sum(S) != sum(D), it is impossible,
# otherwise, we want all nodes with full capacity,
# it is possible
# <=> sum(S[x] for x in X)-sum(D[y] for y in Y) <= |X|*(C-|Y|) for all 0 <= |X| <= R and 0 <= |Y| <= C
# <=> sum(S[x] for x in X')-sum(D[y] for y in Y') <= |X|*|Y| for all 0 <= |X| <= R and 0 <= |Y| <= C
# and X' is the biggist |X| of S and Y' is the smallest C-|Y| of D
# <=> -(sum(S)-sum(S[x] for x in X'))+(sum(D)-sum(D[y]) for y in Y') <= |X|*|Y| for all 0 <= |X| <= R and 0 <= |Y| <= C
# and X' is the biggist |X| of S and Y' is the smallest C-|Y| of D
# <=> sum(D[y] for y in Y'')-sum(S[x] for x in X'') <= |X|*|Y| for all 0 <= |X| <= R and 0 <= |Y| <= C
# and Y'' is the biggest |Y| of D and X'' is the smallest R-|X| of S
return S_prefix[-1] == D_suffix[0] and \
all(S_prefix[i]-D_suffix[j] <= i*j for i in xrange(len(S_prefix)) for j in xrange(len(D_suffix))) # Time: O(R * C)
def square_free():
R, C = map(int, raw_input().strip().split())
S = map(lambda x: C-int(x), raw_input().strip().split())
D = map(lambda x: R-int(x), raw_input().strip().split())
if not possible(S[:], D[:]):
return "IMPOSSIBLE"
result = [['/']*C for _ in xrange(R)]
for i in xrange(R):
for j in xrange(C):
if not (S[i] >= 1 and D[j] >= 1 and possible([S[k]-int(k == i) for k in xrange(len(S))], [D[k]-int(k == j) for k in xrange(len(D))])):
continue
result[i][j] = '\\' # lexicographically smallest, assumed '\\' < '/'
S[i], D[j] = S[i]-1, D[j]-1
return "POSSIBLE\n"+"\n".join("".join(row) for row in result)
for case in xrange(input()):
print 'Case #%d: %s' % (case+1, square_free())
|
import json
import csv
import requests
from requests.auth import HTTPBasicAuth
import DiscoveryDetails as dt
from ibm_cloud_sdk_core.api_exception import ApiException
def delete_and_add_example(query_id, document_id, relevance):
deleteResult = dt.discovery.delete_training_example(dt.environment_id, dt.collection_id, query_id, document_id)
print("Delete result = " + json.dumps(deleteResult.get_result()))
add_example_result = dt.discovery.create_training_example(dt.environment_id, dt.collection_id, query_id, document_id=document_id, cross_reference=None, relevance=relevance)
print("add_example_result = " + json.dumps(add_example_result.get_result()))
def create_training_example(query_id, document_id, relevance):
print("---")
print("document_id = " + str(document_id))
print("relevance = " + str(relevance))
try:
create_result = dt.discovery.create_training_example(dt.environment_id, dt.collection_id, query_id, document_id=document_id, cross_reference=None, relevance=relevance)
print("create_result = " + json.dumps(create_result))
except ApiException as apiE:
if( apiE.code == 409 ): #Example already exists. Delete it and add
print("Example exists. Delete example and add example with new relevancy score")
delete_and_add_example(query_id, document_id, relevance)
#function for posting to training data endpoint
def training_post(training_obj):
nlQuery = training_obj["natural_language_query"]
examples = training_obj["examples"]
add_training_data_result = None
try:
add_training_data_result = dt.discovery.add_training_data(dt.environment_id, dt.collection_id, natural_language_query=nlQuery, filter=None, examples=examples)
except ApiException as apiE:
# Check if the query already exists
try:
if( apiE.code == 409 ): # Query already exists
error = apiE.message
partAfterQueryId = error.split("id ",1)[1]
query_id = partAfterQueryId.split(" ",1)[0]
print("Query already exists. Add examples")
print("query_id = " + str(query_id))
for example in training_obj["examples"]:
create_training_example(query_id, example["document_id"], example["relevance"])
else:
print("ApiException occurred in training_post when calling discovery.add_training_data api with error code = " + str(apiE.code))
print(apiE)
raise Exception(apiE)
except Exception as e:
print("Exception occurred in training_post when calling discovery.add_training_data api")
print(e)
raise Exception(e)
#open the training file and create new training data objects
with open("./training_file.tsv",'r') as training_doc:
training_csv = csv.reader(training_doc, delimiter='\t')
#create a new object for each example
noOfQuestions = 0
for row in training_csv:
noOfExamples = int((len(row) - 1)/3)
training_obj = {}
training_obj["examples"] = []
training_obj["natural_language_query"] = row[0]
noOfQuestions = noOfQuestions + 1
print("Question No. " + str(noOfQuestions))
print("Question = " + training_obj["natural_language_query"])
i = 1
for j in range(1, noOfExamples + 1):
example_obj = {}
if( row[i+2] and row[i+2].strip() == ""):
row[i+2] = 0
example_obj["relevance"] = row[i+2]
example_obj["document_id"] = row[i]
training_obj["examples"].append(example_obj)
i = i + 3
training_post(training_obj)
print("----------------")
print("Number of questions = " + str(noOfQuestions))
print("**************")
print("************** RELEVANCY TRAINING COMPLETED **************")
print("**************")
|
import urllib.request
from bs4 import BeautifulSoup
def getDOBBoilerData( boroNum, houseNum, houseStreet ):
url = requestToDOBUrl( boroNum, houseNum, houseStreet )
soup = urlToSoup( url )
if hasDOBData( soup ):
return extractDOBDataFromSoup( soup )
else:
return "Invalid Query"
def requestToDOBUrl( boroNum, houseNum, houseStreet ):
return ("http://a810-bisweb.nyc.gov/bisweb/PropertyProfileOverviewServlet" +
"?boro=" + str(boroNum) +
"&houseno=" + str(houseNum) +
"&street=" + houseStreet.replace(' ','+'))
def urlToSoup( url ):
"Takes in URL and returns a soup object of the contents."
webpage = urllib.request.urlopen( url )
soup = BeautifulSoup( webpage.read(), "html.parser" )
# soup.unicode
return soup
def hasDOBData( soup ):
"Checks to see whether DEP data exist for a given application number."
tables = soup.find_all("table")
return tables[1].get_text().find("NO RECORD") == -1
def extractDOBDataFromSoup( soup ):
"""
Takes in data structure from BeautifulSoup and parses for DOB Boiler Data.
We assume that the soup has been prescreened to ensure that data exist.
"""
allUrls = soup.find_all('a')
#get the url with the reference to the "BoilerComplianceQueryServlet".
#There should be exactly one such url.
for i in allUrls:
if i['href'].find("BoilerComplianceQueryServlet") != -1:
url = "http://a810-bisweb.nyc.gov/bisweb/" + i['href']
soup2 = urlToSoup(url)
boilerTables = soup2.find_all('table')
records = list()
for row in boilerTables[3].find_all('tr'): #grab the table with boiler data
records.append(row.get_text().strip('\n').split('\n'))
return records
|
def binarySearch(array, start, end, needle):
if start > end:
return -1
mid = (start + end)/2;
if array[mid] == needle:
return mid
if needle < array[mid]:
end = mid-1
else:
start = mid+1
return binarySearch(array, start, end, needle)
array = [2, 3, 14, 25, 36, 47];
start = 0
end = len(array) - 1
needle = 13
index = binarySearch(array, start, end, needle);
if index != -1:
print str(index) + ' ' + str(array[index])
print 'Not found: ' + str(index) |
import numpy as np
import matplotlib.pyplot as plt
t = np.arange(0.0,5.0,0.01)
y = np.cos(2*np.pi*t)
plt.plot(t,y,'r--')
#注释函数
#xy 注释坐标
#xytext 注释文字坐标
plt.annotate('local max',xy = (2,1), xytext = (3,1.5),arrowprops=dict(facecolor='black', shrink=0.05),)
#y范围
plt.ylim(-2,2)
plt.show()
|
import os
os.system("rm ks_cpp.so")
import numpy as np
from KS_Sampling import ks_sampling, ks_sampling_mem
np.set_printoptions(precision=6, linewidth=120, suppress=True)
np.random.seed(0)
if __name__ == '__main__':
# -- Example 1 -- 5000 data points, feature vector length 100
n_sample = 5000
n_feature = 100
X = np.random.randn(n_sample, n_feature)
X *= 100
print(ks_sampling(X, seed=[345, 456], n_result=4990))
print(ks_sampling(X, seed=[345, 456], n_result=4990, backend="Python"))
print(ks_sampling_mem(X, seed=[345, 456], n_result=4990))
print(ks_sampling_mem(X, seed=[345, 456], n_result=4990, backend="Python"))
# (array([ 345, 456, 450, ..., 1696, 4495, 4400]),
# array([1388.464734, 1649.251576, 1633.396292, ..., 959.175021, 956.828118, 0. ]))
# -- Example 2 -- data with sets of same values
X = np.array([[1], [1], [2], [2], [2], [3], [3]])
print(ks_sampling(X))
print(ks_sampling(X, backend="Python"))
print(ks_sampling_mem(X))
print(ks_sampling_mem(X, backend="Python"))
|
import collections
import os
import urllib
import pytest
import torch
import torchvision
from pytest import approx
from torchvision.datasets.utils import download_url
from torchvision.io import _HAS_VIDEO_OPT, VideoReader
# WARNING: these tests have been skipped forever on the CI because the video ops
# are never properly available. This is bad, but things have been in a terrible
# state for a long time already as we write this comment, and we'll hopefully be
# able to get rid of this all soon.
try:
import av
# Do a version test too
torchvision.io.video._check_av_available()
except ImportError:
av = None
VIDEO_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "assets", "videos")
CheckerConfig = ["duration", "video_fps", "audio_sample_rate"]
GroundTruth = collections.namedtuple("GroundTruth", " ".join(CheckerConfig))
def backends():
backends_ = ["video_reader"]
if av is not None:
backends_.append("pyav")
return backends_
def fate(name, path="."):
"""Download and return a path to a sample from the FFmpeg test suite.
See the `FFmpeg Automated Test Environment <https://www.ffmpeg.org/fate.html>`_
"""
file_name = name.split("/")[1]
download_url("http://fate.ffmpeg.org/fate-suite/" + name, path, file_name)
return os.path.join(path, file_name)
test_videos = {
"RATRACE_wave_f_nm_np1_fr_goo_37.avi": GroundTruth(duration=2.0, video_fps=30.0, audio_sample_rate=None),
"SchoolRulesHowTheyHelpUs_wave_f_nm_np1_ba_med_0.avi": GroundTruth(
duration=2.0, video_fps=30.0, audio_sample_rate=None
),
"TrumanShow_wave_f_nm_np1_fr_med_26.avi": GroundTruth(duration=2.0, video_fps=30.0, audio_sample_rate=None),
"v_SoccerJuggling_g23_c01.avi": GroundTruth(duration=8.0, video_fps=29.97, audio_sample_rate=None),
"v_SoccerJuggling_g24_c01.avi": GroundTruth(duration=8.0, video_fps=29.97, audio_sample_rate=None),
"R6llTwEh07w.mp4": GroundTruth(duration=10.0, video_fps=30.0, audio_sample_rate=44100),
"SOX5yA1l24A.mp4": GroundTruth(duration=11.0, video_fps=29.97, audio_sample_rate=48000),
"WUzgd7C1pWA.mp4": GroundTruth(duration=11.0, video_fps=29.97, audio_sample_rate=48000),
}
@pytest.mark.skipif(_HAS_VIDEO_OPT is False, reason="Didn't compile with ffmpeg")
class TestVideoApi:
@pytest.mark.skipif(av is None, reason="PyAV unavailable")
@pytest.mark.parametrize("test_video", test_videos.keys())
@pytest.mark.parametrize("backend", backends())
def test_frame_reading(self, test_video, backend):
torchvision.set_video_backend(backend)
full_path = os.path.join(VIDEO_DIR, test_video)
with av.open(full_path) as av_reader:
if av_reader.streams.video:
av_frames, vr_frames = [], []
av_pts, vr_pts = [], []
# get av frames
for av_frame in av_reader.decode(av_reader.streams.video[0]):
av_frames.append(torch.tensor(av_frame.to_rgb().to_ndarray()).permute(2, 0, 1))
av_pts.append(av_frame.pts * av_frame.time_base)
# get vr frames
video_reader = VideoReader(full_path, "video")
for vr_frame in video_reader:
vr_frames.append(vr_frame["data"])
vr_pts.append(vr_frame["pts"])
# same number of frames
assert len(vr_frames) == len(av_frames)
assert len(vr_pts) == len(av_pts)
# compare the frames and ptss
for i in range(len(vr_frames)):
assert float(av_pts[i]) == approx(vr_pts[i], abs=0.1)
mean_delta = torch.mean(torch.abs(av_frames[i].float() - vr_frames[i].float()))
# on average the difference is very small and caused
# by decoding (around 1%)
# TODO: asses empirically how to set this? atm it's 1%
# averaged over all frames
assert mean_delta.item() < 2.55
del vr_frames, av_frames, vr_pts, av_pts
# test audio reading compared to PYAV
with av.open(full_path) as av_reader:
if av_reader.streams.audio:
av_frames, vr_frames = [], []
av_pts, vr_pts = [], []
# get av frames
for av_frame in av_reader.decode(av_reader.streams.audio[0]):
av_frames.append(torch.tensor(av_frame.to_ndarray()).permute(1, 0))
av_pts.append(av_frame.pts * av_frame.time_base)
av_reader.close()
# get vr frames
video_reader = VideoReader(full_path, "audio")
for vr_frame in video_reader:
vr_frames.append(vr_frame["data"])
vr_pts.append(vr_frame["pts"])
# same number of frames
assert len(vr_frames) == len(av_frames)
assert len(vr_pts) == len(av_pts)
# compare the frames and ptss
for i in range(len(vr_frames)):
assert float(av_pts[i]) == approx(vr_pts[i], abs=0.1)
max_delta = torch.max(torch.abs(av_frames[i].float() - vr_frames[i].float()))
# we assure that there is never more than 1% difference in signal
assert max_delta.item() < 0.001
@pytest.mark.parametrize("stream", ["video", "audio"])
@pytest.mark.parametrize("test_video", test_videos.keys())
@pytest.mark.parametrize("backend", backends())
def test_frame_reading_mem_vs_file(self, test_video, stream, backend):
torchvision.set_video_backend(backend)
full_path = os.path.join(VIDEO_DIR, test_video)
reader = VideoReader(full_path)
reader_md = reader.get_metadata()
if stream in reader_md:
# Test video reading from file vs from memory
vr_frames, vr_frames_mem = [], []
vr_pts, vr_pts_mem = [], []
# get vr frames
video_reader = VideoReader(full_path, stream)
for vr_frame in video_reader:
vr_frames.append(vr_frame["data"])
vr_pts.append(vr_frame["pts"])
# get vr frames = read from memory
f = open(full_path, "rb")
fbytes = f.read()
f.close()
video_reader_from_mem = VideoReader(fbytes, stream)
for vr_frame_from_mem in video_reader_from_mem:
vr_frames_mem.append(vr_frame_from_mem["data"])
vr_pts_mem.append(vr_frame_from_mem["pts"])
# same number of frames
assert len(vr_frames) == len(vr_frames_mem)
assert len(vr_pts) == len(vr_pts_mem)
# compare the frames and ptss
for i in range(len(vr_frames)):
assert vr_pts[i] == vr_pts_mem[i]
mean_delta = torch.mean(torch.abs(vr_frames[i].float() - vr_frames_mem[i].float()))
# on average the difference is very small and caused
# by decoding (around 1%)
# TODO: asses empirically how to set this? atm it's 1%
# averaged over all frames
assert mean_delta.item() < 2.55
del vr_frames, vr_pts, vr_frames_mem, vr_pts_mem
else:
del reader, reader_md
@pytest.mark.parametrize("test_video,config", test_videos.items())
@pytest.mark.parametrize("backend", backends())
def test_metadata(self, test_video, config, backend):
"""
Test that the metadata returned via pyav corresponds to the one returned
by the new video decoder API
"""
torchvision.set_video_backend(backend)
full_path = os.path.join(VIDEO_DIR, test_video)
reader = VideoReader(full_path, "video")
reader_md = reader.get_metadata()
assert config.video_fps == approx(reader_md["video"]["fps"][0], abs=0.0001)
assert config.duration == approx(reader_md["video"]["duration"][0], abs=0.5)
@pytest.mark.parametrize("test_video", test_videos.keys())
@pytest.mark.parametrize("backend", backends())
def test_seek_start(self, test_video, backend):
torchvision.set_video_backend(backend)
full_path = os.path.join(VIDEO_DIR, test_video)
video_reader = VideoReader(full_path, "video")
num_frames = 0
for _ in video_reader:
num_frames += 1
# now seek the container to 0 and do it again
# It's often that starting seek can be inprecise
# this way and it doesn't start at 0
video_reader.seek(0)
start_num_frames = 0
for _ in video_reader:
start_num_frames += 1
assert start_num_frames == num_frames
# now seek the container to < 0 to check for unexpected behaviour
video_reader.seek(-1)
start_num_frames = 0
for _ in video_reader:
start_num_frames += 1
assert start_num_frames == num_frames
@pytest.mark.parametrize("test_video", test_videos.keys())
@pytest.mark.parametrize("backend", ["video_reader"])
def test_accurateseek_middle(self, test_video, backend):
torchvision.set_video_backend(backend)
full_path = os.path.join(VIDEO_DIR, test_video)
stream = "video"
video_reader = VideoReader(full_path, stream)
md = video_reader.get_metadata()
duration = md[stream]["duration"][0]
if duration is not None:
num_frames = 0
for _ in video_reader:
num_frames += 1
video_reader.seek(duration / 2)
middle_num_frames = 0
for _ in video_reader:
middle_num_frames += 1
assert middle_num_frames < num_frames
assert middle_num_frames == approx(num_frames // 2, abs=1)
video_reader.seek(duration / 2)
frame = next(video_reader)
lb = duration / 2 - 1 / md[stream]["fps"][0]
ub = duration / 2 + 1 / md[stream]["fps"][0]
assert (lb <= frame["pts"]) and (ub >= frame["pts"])
def test_fate_suite(self):
# TODO: remove the try-except statement once the connectivity issues are resolved
try:
video_path = fate("sub/MovText_capability_tester.mp4", VIDEO_DIR)
except (urllib.error.URLError, ConnectionError) as error:
pytest.skip(f"Skipping due to connectivity issues: {error}")
vr = VideoReader(video_path)
metadata = vr.get_metadata()
assert metadata["subtitles"]["duration"] is not None
os.remove(video_path)
@pytest.mark.skipif(av is None, reason="PyAV unavailable")
@pytest.mark.parametrize("test_video,config", test_videos.items())
@pytest.mark.parametrize("backend", backends())
def test_keyframe_reading(self, test_video, config, backend):
torchvision.set_video_backend(backend)
full_path = os.path.join(VIDEO_DIR, test_video)
av_reader = av.open(full_path)
# reduce streams to only keyframes
av_stream = av_reader.streams.video[0]
av_stream.codec_context.skip_frame = "NONKEY"
av_keyframes = []
vr_keyframes = []
if av_reader.streams.video:
# get all keyframes using pyav. Then, seek randomly into video reader
# and assert that all the returned values are in AV_KEYFRAMES
for av_frame in av_reader.decode(av_stream):
av_keyframes.append(float(av_frame.pts * av_frame.time_base))
if len(av_keyframes) > 1:
video_reader = VideoReader(full_path, "video")
for i in range(1, len(av_keyframes)):
seek_val = (av_keyframes[i] + av_keyframes[i - 1]) / 2
data = next(video_reader.seek(seek_val, True))
vr_keyframes.append(data["pts"])
data = next(video_reader.seek(config.duration, True))
vr_keyframes.append(data["pts"])
assert len(av_keyframes) == len(vr_keyframes)
# NOTE: this video gets different keyframe with different
# loaders (0.333 pyav, 0.666 for us)
if test_video != "TrumanShow_wave_f_nm_np1_fr_med_26.avi":
for i in range(len(av_keyframes)):
assert av_keyframes[i] == approx(vr_keyframes[i], rel=0.001)
if __name__ == "__main__":
pytest.main([__file__])
|
import re
from pathlib import Path
from bs4 import BeautifulSoup
from django.core.management import BaseCommand
from psqlextra.query import ConflictAction
from psqlextra.util import postgres_manager
# https://www.goodreads.com/work/quotes/1494157
from web.models import GoodreadsQuote
class Command(BaseCommand):
def handle(self, *args, **kwargs):
quotes_dir = Path("data/goodreads_scrape/quotes").absolute().glob("*")
for file in quotes_dir:
book_id, page_number = re.match("(\d+)\?page=(\d+)", file.name).groups()
print(book_id, page_number)
html_doc = open(file, 'r').read()
soup = BeautifulSoup(html_doc, 'html.parser')
# kill all script and style elements
for script in soup(["script", "style"]):
script.decompose()
quotes = []
for quote_html in soup.find_all(class_='quoteText'):
quote = str(quote_html)
start_quote_index = quote.index('>') + 1
end_quote_index = quote.index('<br/>')
quote_text = quote[start_quote_index:end_quote_index].strip()
quotes.append({
"text": quote_text,
"source_book_id": book_id
})
with postgres_manager(GoodreadsQuote) as manager:
manager.on_conflict(['text', 'book'], ConflictAction.NOTHING).bulk_insert(quotes)
print(GoodreadsQuote.objects.count())
|
# Copyright (c) Members of the EGEE Collaboration. 2004.
# See http://www.eu-egee.org/partners/ for details on the copyright
# holders.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import re
from DynamicSchedulerGeneric import Utils as DynSchedUtils
class GLUE2Exception(Exception):
def __init__(self, msg):
Exception.__init__(self, msg)
share_regex = re.compile("dn:\s*GLUE2ShareID\s*=\s*.+")
pol_regex = re.compile("dn:\s*GLUE2PolicyID\s*=\s*.+")
attr_regex = re.compile("(GLUE2\w+)\s*:\s*(.+)")
class ShareContainer:
def __init__(self):
self.id = None
self.mqueue = None
def check(self):
if not self.id:
raise GLUE2Exception("Missing mandatory attribute GLUE2ShareID")
if not self.mqueue:
raise GLUE2Exception("Missing mandatory attribute GLUE2ComputingShareMappingQueue")
class PolicyContainer:
def __init__(self):
self.vo = None
self.fkey = None
def check(self):
if not self.vo:
raise GLUE2Exception("Missing mandatory attribute GLUE2PolicyUserDomainForeignKey")
if not self.fkey:
raise GLUE2Exception("Missing mandatory attribute GLUE2MappingPolicyShareForeignKey")
def parseGLUETemplate(ldifFilename, shareTable, policyTable, share_fkeys):
static_file = None
try:
currShare = None
currPolicy = None
static_file = open(ldifFilename)
for line in static_file:
parsed = share_regex.match(line)
if parsed:
currShare = line.strip()
shareTable[currShare] = ShareContainer()
continue
parsed = pol_regex.match(line)
# Ignore policy for endpoint in case
if parsed and line.find('GLUE2Share') > 0:
currPolicy = line.strip()
policyTable[currPolicy] = PolicyContainer()
continue
parsed = attr_regex.match(line)
if parsed:
key = parsed.group(1)
value = parsed.group(2).strip()
if key == 'GLUE2ShareID' and currShare:
shareTable[currShare].id = value
elif key == 'GLUE2ComputingShareMappingQueue' and currShare:
shareTable[currShare].mqueue = value
elif key == 'GLUE2PolicyUserDomainForeignKey' and currPolicy:
policyTable[currPolicy].vo = value
elif key == 'GLUE2MappingPolicyShareForeignKey' and currPolicy:
policyTable[currPolicy].fkey = value
continue
if len(line.strip()) == 0:
if currShare:
shareTable[currShare].check()
if currPolicy:
policyTable[currPolicy].check()
share_fkeys[policyTable[currPolicy].fkey] = policyTable[currPolicy]
currShare = None
currPolicy = None
#close cycle
if currShare:
shareTable[currShare].check()
if currPolicy:
policyTable[currPolicy].check()
share_fkeys[policyTable[currPolicy].fkey] = policyTable[currPolicy]
for shareID in share_fkeys:
missing = True
for shareData in shareTable.values():
if shareID == shareData.id:
missing = False
if missing:
raise GLUE2Exception("Invalid foreign key " + shareID)
finally:
if static_file:
static_file.close()
def process(config, collector, out=sys.stdout):
shareTable = dict()
policyTable = dict()
share_fkeys = dict()
ldifList = DynSchedUtils.getLDIFFilelist(config, 'ComputingShare.ldif')
for ldifFilename in ldifList:
parseGLUETemplate(ldifFilename, shareTable, policyTable, share_fkeys)
for shareDN in shareTable:
shareData = shareTable[shareDN]
policyData = share_fkeys[shareData.id]
out.write("%s\n" % shareDN)
nwait = collector.queuedJobsOnQueueForVO(shareData.mqueue, policyData.vo)
nrun = collector.runningJobsOnQueueForVO(shareData.mqueue, policyData.vo)
out.write("GLUE2ComputingShareRunningJobs: %d\n" % nrun)
out.write("GLUE2ComputingShareWaitingJobs: %d\n" % nwait)
out.write("GLUE2ComputingShareTotalJobs: %d\n" % (nrun + nwait))
if collector.isSetERT(shareData.mqueue):
out.write("GLUE2ComputingShareEstimatedAverageWaitingTime: %d\n" % collector.getERT(shareData.mqueue))
else:
out.write("GLUE2ComputingShareEstimatedAverageWaitingTime: 0\n")
if collector.isSetWRT(shareData.mqueue):
out.write("GLUE2ComputingShareEstimatedWorstWaitingTime: %d\n" % collector.getWRT(shareData.mqueue))
else:
out.write("GLUE2ComputingShareEstimatedWorstWaitingTime: 0\n")
nfreeSlots = collector.freeSlots(shareData.mqueue, policyData.vo)
if nfreeSlots >= 0:
out.write("GLUE2ComputingShareFreeSlots: %d\n" % nfreeSlots)
out.write("\n")
|
packList = ['a', 'a', 'b', 'c', 'd', 'e', 'e', 'f', 'a', 'a', 'a', 'q', 'q', 'r']
bufferList = []
runList = []
element = 0
sameChar = 0
bounds = len(packList) - 1
while sameChar <= bounds:
bufferList.append(packList[element])
sameChar = element + 1
while sameChar <= bounds:
if packList[element] == packList[sameChar]:
bufferList.append(packList[sameChar])
sameChar = sameChar + 1
else:
if len(bufferList) == 1:
runList.append(bufferList[0])
bufferList = []
element = sameChar
break
else:
myTuple = (len(bufferList), packList[element])
runList.append(myTuple)
bufferList = []
element = sameChar
break
else:
if len(bufferList) == 1:
runList.append(bufferList[0])
else:
yourTuple = (len(bufferList), packList[element])
runList.append(yourTuple)
print(runList) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.