text stringlengths 38 1.54M |
|---|
# -*- coding: utf-8 -*-
import scrapy
from dingxiangyuan import settings
from dingxiangyuan.items import DingxiangyuanItemLoader, BoardItem
def get_start_urls():
urls = [url for url in settings.BOARD_MAP]
return urls
class BoardSpider(scrapy.Spider):
name = 'board'
allowed_domains = ['dxy.cn']
# start_urls = ['http://www.dxy.cn/bbs/board/112', 'http://neuro.dxy.cn/bbs/board/46']
def start_requests(self):
for url in get_start_urls():
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
item_loader = DingxiangyuanItemLoader(item=BoardItem(), response=response)
board_id = settings.BOARD_MAP[response.url][0]
board_name = settings.BOARD_MAP[response.url][1]
board_url = response.url
item_loader.add_value("board_id", board_id)
item_loader.add_value("board_name", board_name)
item_loader.add_value("board_url", board_url)
item_loader.add_xpath("topic_num", '//div[@id="board"]/div/div/span[3]/text()')
item_loader.add_xpath("moderator_url_list", '//div[@id="moderator"]/ul/li[2]/div/ul/li/a/@href')
board_item = item_loader.load_item()
yield board_item
|
# -*- coding:gbk -*-
import sys
import signal
import pyuv
import client_info, utils
#import gc
#gc.disable()
def startServer():
def signal_cb(handle, signum):
client_info.closeAllClients()
signal_h.close()
server.close()
utils.log("PyUV version %s" % pyuv.__version__)
loop = utils.getLibUVLoop()
server = pyuv.TCP(loop)
server.bind(("0.0.0.0", 2222))
server.listen(client_info.on_connection)
signal_h = pyuv.Signal(loop)
signal_h.start(signal_cb, signal.SIGINT)
loop.run()
utils.log("Stopped!")
def main():
sys.excepthook = utils.exceptHook
startServer()
if __name__ == "__main__":
main()
|
import struct
import sys
import pprint
filename = sys.argv[1]
read = list(open(filename, "rb").read())
pos = 0
palettes = []
def print_pos():
print('0x%x'%(pos))
def read_boolean():
global pos
short = struct.unpack('>?', bytes(read[pos:pos+1]))[0]
pos += 1
return short
def read_byte():
global pos
short = struct.unpack('>b', bytes(read[pos:pos+1]))[0]
pos += 1
return short
def read_unsigned_byte():
global pos
short = struct.unpack('>B', bytes(read[pos:pos+1]))[0]
pos += 1
return short
def read_short():
global pos
short = struct.unpack('>h', bytes(read[pos:pos+2]))[0]
pos += 2
return short
def read_unsigned_short():
global pos
short = struct.unpack('>H', bytes(read[pos:pos+2]))[0]
pos += 2
return short
def read_int():
global pos
short = struct.unpack('>i', bytes(read[pos:pos+4]))[0]
pos += 4
return short
def read_utf():
global pos
length = read_short()
string = bytes(read[pos:pos+length])
pos += length
return string
def read_palette():
global palettes, pos
palette_amt = read_short()
palettes = []
for i in range(palette_amt):
size = read_int()
# parse palettes
pos += size
#print(size)
read_palette()
font_amt = 0
font_images = []
font_sizes = []
font_offsets = []
iarr = []
def read_fonts():
global font_images, font_sizes, font_offsets, iarr, pos
font_amt = read_short()
for i in range(font_amt):
font_images.append(read_utf())
somelen = read_short()
font_sizes.append([])
font_offsets.append([])
for j in range(somelen):
#somelen1 = struct.unpack('>I', bytes(read[pos:pos+4]))[0]
pos += 4
font_sizes[i].append([])
font_offsets[i].append([])
iarr.append(read_short())
for k in range(256):
font_offsets[i][j].append(read_short())
font_sizes[i][j].append(read_short())
"""print(font_images)
print(font_amt)
print(font_sizes[0])
print(font_sizes[1])
print(font_offsets[0])
print(font_offsets[1])
print(iarr)
print("0x%x"%pos)"""
readShort3 = read_short()
#print(readShort3)
#print(font_offsets[0])
#X = new image[readShort3]
# more
for i in range(readShort3):
pos += 8
read_fonts()
print_pos()
languages = []
items = []
def read_strings():
global languages
unknowns = []
languages_amt = read_short()
for i in range(languages_amt):
read_int()
strings_amt = read_short()
strings = []
for j in range(strings_amt):
strings.append(read_utf())
languages.append(strings)
unknowns.append(read_short())
print(unknowns)
read_strings()
def lps(x):
print(languages[0][x])
def lp(*args):
for i in args:
lps(i)
for i in range(len(sys.argv) - 2):
lps(int(sys.argv[i+2]))
def read_images():
images_len = read_short()
arr1 = [] # sprite_info_offset
arr2 = [] # boundingbox
i = 1
while i <= images_len:
arr2.append(read_short())
arr2.append(read_short())
arr2.append(read_short())
arr2.append(read_short())
arr1.append(read_short())
i += 1
arr3 = [] # sprite_info
print(len(arr1))
for i in range(arr1[images_len - 1]):
arr3.append(read_short())
images_strings = []
images_strings_len = read_short()
for i in range(images_strings_len):
images_strings.append(read_utf())
return
print(arr1)
print("")
print(arr2)
print("")
print(arr3)
print("")
print(images_strings)
print(len(images_strings))
read_images()
def read_clips():
clips_len = read_short()
print('%i clips' % clips_len)
clips = []
for i in range(clips_len):
clips_sub_len = read_short()
clips_sub = []
for j in range(clips_sub_len):
clips_ssub_len = read_short()
clips_ssub = []
for k in range(clips_ssub_len):
clips_ssub.append(read_short())
clips_sub.append(clips_ssub)
clips.append(clips_sub)
#return
for i in range(len(clips)):
print(i)
print(clips[i])
read_clips()
def read_sound():
sounds_len = read_short()
sound_files = []
sound_mimes = []
sound_priority = []
sound_load = []
for i in range(sounds_len):
sound_files.append(read_utf())
sound_mimes.append(read_utf())
sound_priority.append(read_int())
sound_load.append(read_boolean())
return
print(sound_files)
print(sound_mimes)
print(sound_priority)
print(sound_load)
read_sound()
def read_items():
global items
items_len = read_short()
items = []
for i in range(items_len):
item = []
# 0 = type? 0 = weapon, 1 = food, 2 = addon? (gear, durability, colors)
# 1 = price?
# 2 = increment
# 3 = max owned
# 4 = item name id
# 5 = item desc id
# 6 = sprite id
for x in range(6):
item.append(read_int())
item.append(read_short())
items.append(item)
#continue
print('%i %s %s' % (i, str(languages[0][item[4]]), str(item)))
read_items()
def read_quests():
quests_len = read_short()
quests = []
for i in range(quests_len):
quest = []
# 0 = currently active?
# 1 = ?
# 2 = active
# 4 = complete
# 1 = person giving the quest (tid)
# 2 = is mission start?
# 3 = person sprite id
# 4 = quest name (tid)
# 5 = quest description (tid)
# 6 = level id
quest.append(0)
quest.append(read_int())
if read_boolean():
quest.append(1)
else:
quest.append(0)
quest.append(read_short())
quest.append(read_int())
quest.append(read_int())
quest.append(read_int())
quests.append(quest)
continue
pprint.pprint([
languages[0][quest[1]],
languages[0][quest[4]],
languages[0][quest[5]],
quest
])
read_quests()
def read_gangs():
gangs_len = read_short()
gangs = []
for i in range(gangs_len):
gang = []
# 0 = gang name
# 1 = sprite id
# 2 = ?
# 3 = default notoriety
# 4 = ?
gang.append(read_int())
gang.append(read_short())
gang.append(read_short())
gang.append(read_byte())
gang.append(read_int())
gangs.append(gang)
continue
pprint.pprint([
languages[0][gang[0]],
gang
])
read_gangs()
print(pos)
def read_effects():
effects_len = read_short()
for effect_i in range(effects_len):
# effect_i 9 = gore
effect = {}
effect["effect_type"] = read_int()
effect_type = effect["effect_type"]
effect["should_be_2"] = read_int()
effect["unk1"] = read_int()
effect["animation_time"] = read_unsigned_short()
#print(pos)
if effect_type == 0: # clip
effect["clip"] = read_int()
if effect_type == 1: # spawner
spawners_len = read_short()
spawners = []
for i in range(spawners_len):
newarray = []
for j in range(5):
newarray.append(0)
# effect id
newarray[3] = read_int()
# increment amount (n / this = number of effects added)
newarray[4] = read_unsigned_short()
# 0-2: pos-orientation
newarray[0] = read_int()
newarray[1] = read_int()
newarray[2] = read_int()
spawners.append(newarray)
effect["spawners"] = spawners
if effect_type == 2:
effect["linked_effect"] = read_int()
array_len = read_short()
array1 = []
array2 = []
for i in range(array_len):
array2.append([
# 0: operation (0-3)
read_int(),
# 1: always 0?, adds to the current time elapsed for the operation
read_int(),
# 2: variable 0
read_int(),
# 3: variable 1
read_int()
])
newarray = []
newarray_len = read_short()
for j in range(newarray_len * 2):
newarray.append(read_int())
array1.append(newarray)
effect["array1"] = array1
effect["array2"] = array2
if effect_type == 3:
effect["color"] = '%x'%(read_int())
effect["rect_size"] = read_unsigned_byte()
if effect_type == 4:
effect["color"] = '%x'%(read_int())
effect["size"] = read_int()
#print(str(effect_i) + " " + str(effect_type))
#pprint.pprint(effect)
read_effects()
def read_classes():
classes_len = read_short()
print(str(classes_len) + " classes")
classes = []
for i in range(classes_len):
classes.append([
# 0: object type
read_int(),
# 1: clip id
read_int(),
# 2: default health
read_short(),
# 3: weight (float)
read_int() / 65536.0,
# 4: x or y (float), width?
read_int() / 65536.0,
# 5: y or x (float), height?
read_int() / 65536.0,
# 6:
read_int(),
# 7:
read_int()
])
#continue
print(i)
pprint.pprint(classes[i])
read_classes()
def read_weapons():
weapons_len = read_short()
print(str(weapons_len) + " weapons")
weapons = []
for i in range(weapons_len):
weapons.append([
# 0: item id
read_int(),
# 1: weapon class?
# 0 = melee
# 1 = pistol
# 2 = smg
# 3 = assault rifle
# 4 = special/heavy (sniper, rpg)
read_int(),
# 2: damage
read_short(),
# 3: animation time?
read_short(),
# 4: area of effect (float)
read_int(),
# 5: increment (n * this)
read_byte(),
# 6: sound id
read_int()
])
continue
print(i)
pprint.pprint(weapons[i])
print(str(languages[0][items[weapons[i][0]][4]]))
print(str(languages[0][items[weapons[i][0]][5]]))
read_weapons()
def read_gears():
gears_len = read_short()
gears = []
for i in range(gears_len):
gears.append([
read_int(),
read_int(),
read_int(),
read_int(),
read_int(),
read_int(),
read_int()
])
continue
print(i)
pprint.pprint(gears[i])
read_gears()
def read_businesses():
businesses_len = read_short()
businesses = []
for i in range(businesses_len):
businesses.append(read_short()) # sprite id
return
pprint.pprint(businesses)
read_businesses()
def read_robbery_items():
robbery_items_len = read_short()
robbery_items = []
for i in range(robbery_items_len):
worth = read_int()
len1 = read_short()
array1 = []
# rotations
for j in range(len1):
array2 = []
array2.append(read_short()) # sprite id
array2.append(worth)
for k in range(5):
array2.append(read_int())
array1.append(array2)
robbery_items.append(array1)
return
pprint.pprint(robbery_items)
read_robbery_items()
def read_dialog_texts():
texts_len = read_short()
texts = []
for i in range(texts_len):
can_redraw = read_boolean()
is_tutorial = read_boolean()
conversation_len = read_short()
conversation = []
for j in range(conversation_len):
current_conversation = {
"name": languages[0][read_int()],
"text": languages[0][read_int()],
"sprite": read_short()
}
conversation.append(current_conversation)
texts.append({
"can_redraw": can_redraw,
"is_tutorial": is_tutorial,
"conversation": conversation
})
return
pprint.pprint(texts)
read_dialog_texts()
|
#!/usr/bin/env python
import numpy as np
def ltr_parts(parts_dict):
# when we flip image left parts became right parts and vice versa. This is the list of parts to exchange each other.
leftParts = [ parts_dict[p] for p in ["Lsho", "Lelb", "Lwri", "Lhip", "Lkne", "Lank", "Leye", "Lear"] ]
rightParts = [ parts_dict[p] for p in ["Rsho", "Relb", "Rwri", "Rhip", "Rkne", "Rank", "Reye", "Rear"] ]
return leftParts,rightParts
class RmpeGlobalConfig:
width = 368
height = 368
stride = 8
parts = ["nose", "neck", "Rsho", "Relb", "Rwri", "Lsho", "Lelb", "Lwri", "Rhip", "Rkne", "Rank", "Lhip", "Lkne", "Lank", "Reye", "Leye", "Rear", "Lear"]
num_parts = len(parts)
parts_dict = dict(zip(parts, range(num_parts)))
parts += ["background"]
num_parts_with_background = len(parts)
leftParts, rightParts = ltr_parts(parts_dict)
# this numbers probably copied from matlab they are 1.. based not 0.. based
limb_from = [2, 9, 10, 2, 12, 13, 2, 3, 4, 3, 2, 6, 7, 6, 2, 1, 1, 15, 16]
limb_to = [9, 10, 11, 12, 13, 14, 3, 4, 5, 17, 6, 7, 8, 18, 1, 15, 16, 17, 18]
limbs_conn = zip(limb_from, limb_to)
limbs_conn = [(fr - 1, to - 1) for (fr, to) in limbs_conn]
paf_layers = 2*len(limbs_conn)
heat_layers = num_parts
num_layers = paf_layers + heat_layers + 1
paf_start = 0
heat_start = paf_layers
bkg_start = paf_layers + heat_layers
data_shape = (3, height, width) # 3, 368, 368
mask_shape = (height//stride, width//stride) # 46, 46
parts_shape = (num_layers, height//stride, width//stride) # 57, 46, 46
class TransformationParams:
target_dist = 0.6;
scale_prob = 1; # TODO: this is actually scale unprobability, i.e. 1 = off, 0 = always, not sure if it is a bug or not
scale_min = 0.5;
scale_max = 1.1;
max_rotate_degree = 40.
center_perterb_max = 40.
flip_prob = 0.5
sigma = 7.
paf_thre = 8. # it is original 1.0 * stride in this program
class RmpeCocoConfig:
parts = ['nose', 'Leye', 'Reye', 'Lear', 'Rear', 'Lsho', 'Rsho', 'Lelb',
'Relb', 'Lwri', 'Rwri', 'Lhip', 'Rhip', 'Lkne', 'Rkne', 'Lank',
'Rank']
num_parts = len(parts)
# for COCO neck is calculated like mean of 2 shoulders.
parts_dict = dict(zip(parts, range(num_parts)))
@staticmethod
def convert(joints):
result = np.zeros((joints.shape[0], RmpeGlobalConfig.num_parts, 3), dtype=np.float)
result[:,:,2]=2. # 2 - abstent, 1 visible, 0 - invisible
for p in RmpeCocoConfig.parts:
coco_id = RmpeCocoConfig.parts_dict[p]
global_id = RmpeGlobalConfig.parts_dict[p]
assert global_id!=1, "neck shouldn't be known yet"
result[:,global_id,:]=joints[:,coco_id,:]
neckG = RmpeGlobalConfig.parts_dict['neck']
RshoC = RmpeCocoConfig.parts_dict['Rsho']
LshoC = RmpeCocoConfig.parts_dict['Lsho']
# no neck in coco database, we calculate it as averahe of shoulders
# TODO: we use 0 - hidden, 1 visible, 2 absent - it is not coco values they processed by generate_hdf5
both_shoulders_known = (joints[:, LshoC, 2]<2) & (joints[:, RshoC, 2]<2)
result[both_shoulders_known, neckG, 0:2] = (joints[both_shoulders_known, RshoC, 0:2] +
joints[both_shoulders_known, LshoC, 0:2]) / 2
result[both_shoulders_known, neckG, 2] = np.minimum(joints[both_shoulders_known, RshoC, 2],
joints[both_shoulders_known, LshoC, 2])
return result
class RpmeMPIIConfig:
parts = ["HeadTop", "Neck", "RShoulder", "RElbow", "RWrist", "LShoulder", "LElbow", "LWrist", "RHip", "RKnee",
"RAnkle", "LHip", "LKnee", "LAnkle"]
numparts = len(parts)
#14 - Chest is calculated like "human center location provided by the annotated data"
@staticmethod
def convert(joints):
raise "Not implemented"
# more information on keypoints mapping is here
# https://github.com/ZheC/Realtime_Multi-Person_Pose_Estimation/issues/7
def check_layer_dictionary():
dct = RmpeGlobalConfig.parts[:]
dct = [None]*(RmpeGlobalConfig.num_layers-len(dct)) + dct
for (i,(fr,to)) in enumerate(RmpeGlobalConfig.limbs_conn):
name = "%s->%s" % (RmpeGlobalConfig.parts[fr], RmpeGlobalConfig.parts[to])
print(i, name)
x = i*2
y = i*2+1
assert dct[x] is None
dct[x] = name + ":x"
assert dct[y] is None
dct[y] = name + ":y"
print(dct)
if __name__ == "__main__":
check_layer_dictionary()
|
# Altere o programa anterior permitindo ao usuário informar as populações e as taxas de crescimento iniciais. Valide a
# entrada e permita repetir a operação.
pais_a = 80000
pais_b = 200000
anos = 0
taxa_a = float(input('Taxa de crescimento pais A: ')) / 100 + 1
taxa_b = float(input('Taxa de crescimento pais B: ')) / 100 + 1
while pais_a < pais_b:
anos = anos + 1
pais_a = pais_a * taxa_a
pais_b = pais_b * taxa_b
print(anos)
print(f'Foram necessarios {anos} anos') |
import socket, time
from collections import defaultdict
from threading import RLock
from paramiko import SSHClient, AutoAddPolicy
from contextlib import contextmanager
class SafeModeError(Exception):
"""Raised when Mikrotik safe-mode entering failed"""
pass
class AlreadyConnectedError(Exception):
"""Raised when there is another connection to the device"""
pass
class Ssh:
connected_hosts = defaultdict(RLock)
def __init__(self, hostname: str, username: str, password: str, colored=True):
self.username = username
self.password = password
self.hostname = hostname
self.colored = colored
self.client = SSHClient()
self.client.set_missing_host_key_policy(AutoAddPolicy())
self.shell = None
def connect(self):
if not Ssh.connected_hosts[self.hostname].acquire(timeout=10):
raise AlreadyConnectedError(
f'Device {self.hostname} is busy by another ssh conn'
)
transport = self.client.get_transport()
if not (transport and transport.active):
modificator = '+t300w' if self.colored else '+c300w'
self.client.connect(
hostname=self.hostname,
username=self.username + modificator,
password=self.password,
look_for_keys=False,
allow_agent=False)
self.shell = self.client.invoke_shell()
self.read_all()
def read_all(self, timeout=1) -> str:
if self.shell.gettimeout() != timeout:
self.shell.settimeout(timeout)
try:
time.sleep(timeout/10)
res = self.shell.recv(1000000)
except socket.timeout:
res = b''
return res.decode('utf-8', errors='replace')
def send(self, string: str):
self.shell.send(chr(3)) # Ctrl-C
self.read_all()
self.shell.send(string)
def __enter__(self):
self.connect()
self.client.exec_command(
'/system logging disable [find where action=echo disabled=no]')
self.read_all()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.client.exec_command(
'/system logging enable [find where action=echo disabled=yes]')
self.close()
def close(self):
self.client.close()
Ssh.connected_hosts[self.hostname].release()
del Ssh.connected_hosts[self.hostname]
@contextmanager
def safe_mode(self):
try:
time.sleep(3)
self.send(chr(3))
self.read_all()
time.sleep(3)
for _ in range(3):
self.send(chr(0x18)) # Ctrl-X
time.sleep(0.1)
prompt = self.read_all()
if '<SAFE>' in prompt:
break
else:
raise SafeModeError('Unable to get safe mode')
yield
finally:
self.send(chr(0x18))
self.read_all()
|
# coding: utf-8
from django.contrib import admin
from the_tale.linguistics import models
class WordAdmin(admin.ModelAdmin):
list_display = ('id', 'type', 'normal_form', 'state', 'created_at', 'updated_at')
list_filter = ('type', 'state',)
class TemplateAdmin(admin.ModelAdmin):
list_display = ('id', 'key', 'state', 'author', 'raw_template', 'created_at', 'updated_at')
list_filter = ('state', 'key')
class ContributionAdmin(admin.ModelAdmin):
list_display = ('id', 'type', 'source', 'account', 'entity_id', 'created_at')
list_filter = ('type',)
class RestrictionAdmin(admin.ModelAdmin):
list_display = ('id', 'name', 'group', 'external_id')
list_filter = ('group',)
admin.site.register(models.Word, WordAdmin)
admin.site.register(models.Template, TemplateAdmin)
admin.site.register(models.Contribution, ContributionAdmin)
admin.site.register(models.Restriction, RestrictionAdmin)
|
from lml.plugin import PluginInfoChain
__test_plugins__ = PluginInfoChain(__name__).add_a_plugin("test_io", "x")
|
# --------------------------------------------------------
# Deep Iterative Matching Network
# Licensed under The Apache-2.0 License [see LICENSE for details]
# Written by Yi Li, Gu Wang
# --------------------------------------------------------
from __future__ import print_function, division
import numpy as np
from lib.utils.mkdir_if_missing import *
import scipy.io as sio
import cv2
from tqdm import tqdm
if __name__ == "__main__":
GEN_EGGBOX = False
# GEN_EGGBOX = True # change this line for eggbox, because eggbox in the first version is wrong
big_idx2class = {
1: "ape",
2: "benchvise",
4: "camera",
5: "can",
6: "cat",
8: "driller",
9: "duck",
10: "eggbox",
11: "glue",
12: "holepuncher",
13: "iron",
14: "lamp",
15: "phone",
}
class_name_list = big_idx2class.values()
class_name_list = sorted(class_name_list)
class2big_idx = {}
for key in big_idx2class:
class2big_idx[big_idx2class[key]] = key
cur_path = os.path.abspath(os.path.dirname(__file__))
# config for Yu's results
keyframe_path = "%s/{}_test.txt" % (
os.path.join(
cur_path, "../data/LINEMOD_6D/LM6d_converted/LM6d_render_v1/image_set/real"
)
)
if not GEN_EGGBOX:
yu_pred_dir = os.path.join(cur_path, "../data/LINEMOD_6D/results_frcnn_linemod")
else:
yu_pred_dir = os.path.join(
cur_path, "../data/LINEMOD_6D/frcnn_LM6d_eggbox_yu_val_v02_fix"
)
# config for renderer
width = 640
height = 480
K = np.array([[572.4114, 0, 325.2611], [0, 573.57043, 242.04899], [0, 0, 1]])
ZNEAR = 0.25
ZFAR = 6.0
# output_path
version = "mask_Yu_v02"
real_root_dir = os.path.join(
cur_path, "../data/LINEMOD_6D/LM6d_converted/LM6d_render_v1/data/real"
)
real_meta_path = "%s/{}-meta.mat" % (real_root_dir)
rendered_root_dir = os.path.join(
cur_path, "../data/LINEMOD_6D/LM6d_converted/LM6d_render_v1/data", version
)
pair_set_dir = os.path.join(
cur_path, "../data/LINEMOD_6D/LM6d_converted/LM6d_render_v1/image_set"
)
mkdir_if_missing(rendered_root_dir)
mkdir_if_missing(pair_set_dir)
all_pair = []
for small_class_idx, class_name in enumerate(tqdm(class_name_list)):
if GEN_EGGBOX:
if class_name != "eggbox": # eggbox is wrong in the first version
continue
else:
if class_name == "eggbox":
continue
big_class_idx = class2big_idx[class_name]
with open(keyframe_path.format(class_name)) as f:
real_index_list = [x.strip() for x in f.readlines()]
video_name_list = [x.split("/")[0] for x in real_index_list]
real_prefix_list = [x.split("/")[1] for x in real_index_list]
# init render
model_dir = os.path.join(
cur_path, "../data/LINEMOD_6D/LM6d_converted/models/{}".format(class_name)
)
all_pair = []
for idx, real_index in enumerate(real_index_list):
rendered_dir = os.path.join(
rendered_root_dir, video_name_list[idx], class_name
)
mkdir_if_missing(rendered_dir)
label_file = os.path.join(
rendered_dir, "{}-label.png".format(real_prefix_list[idx])
)
yu_idx = idx
yu_pred_file = os.path.join(
yu_pred_dir, class_name, "{:04d}.mat".format(yu_idx)
)
yu_pred = sio.loadmat(yu_pred_file)
labels = np.zeros((height, width))
rois = yu_pred["rois"]
if len(rois) != 0:
pred_roi = np.squeeze(rois[:, 1:5])
x1 = int(pred_roi[0])
y1 = int(pred_roi[1])
x2 = int(pred_roi[2])
y2 = int(pred_roi[3])
labels[y1:y2, x1:x2] = big_class_idx
else:
print("no roi in {}".format(yu_idx))
cv2.imwrite(label_file, labels)
print(big_class_idx, class_name, " done")
|
"""The application models"""
from Player import Player
from GameDate import GameDate
from Comment import Comment |
#coding=utf-8
#A daemon process to keep network alive in 大活 of NUPT
#!/usr/bin/python
import time
from os import system
while True:
time.sleep(15)
system("ping -c 5 202.119.236.20 >/tmp/ping_tmp")
f=open("/tmp/ping_tmp",'r')
s=f.read()
if s.find("0%")==-1:
system("/etc/init.d/networking restart")
|
# -*- coding: gbk -*-
import datetime
#系统全局变量定义
STCode = '600000'
STName = ''
todaydate = datetime.datetime.now().strftime("%Y-%m-%d")
path_data_origin = '原始数据\\原始数据' + todaydate + '\\'
path_data_avg = '均值整理数据\\均值整理数据' + todaydate + '\\'
path_rule_rst = '规则分析结果\\规则分析结果' + todaydate + '\\'
path_view_rst = '图片结果\\图片结果' + todaydate + '\\'
path_email_rst = '邮件记录\\'
Fig_Cnt = 1
信息dict = {'大形态重要': [], '大形态一般': [], '基本形态重要': [], '基本形态一般': [], '形态3个一般': []}
'''ruleAnly的天数设置'''
##数据库中读取数据分析的天数,画图时K线时间天数(要保证>=各rule的天数)
Analyse_days_date = 60
Anly_days_add = 1 #增加n天的分析
#rules的最小分析天数
Anly_days_1 = 2
Anly_days_2 = 2
Anly_days_3 = 2
Anly_days_4 = 2
Anly_days_5 = 1
Anly_days_6 = 1
Anly_days_7 = 3
Anly_days_8 = 3
Anly_days_9 = 3
Anly_days_10 = 2
Anly_days_11 = 5
Anly_days_12 = 2
Anly_days_13 = 2
Anly_days_14 = 3
Anly_days_15 = 3
Anly_days_16 = 2
Anly_days_17 = 2
Anly_days_18 = 2
Anly_days_19 = 1
Anly_days_50 = 6
Anly_days_51 = 7
Anly_days_52 = 16
Anly_days_53 = 21
Anly_days_60 = 7
Anly_days_61 = 7
Anly_days_62 = 10
Anly_days_63 = 10
Anly_days_64 = 10
Anly_days_65 = 4
Anly_days_70 = 25
Anly_days_80 = 36
Anly_days_81 = 32
#Anly_days_82 = 6-30
Anly_days_121 = 2 ####1天内有涨停,不包含Anly_days_add
'''whlAnly的天数设置'''
whlAnly_1 = 5 ####不包含Anly_days_add
'''whlAnly相关变量初始化'''
当天涨停数 = 0
当天选股数= 0
ZT_1天涨5数 = 0
ZT_1天涨5数N = 0
ZT_1天涨5比例 = 0.0
ZT_2天涨5数 = 0
ZT_2天涨5数N = 0
ZT_2天涨5比例 = 0.0
ZT_1天买进2天涨5数 = 0
ZT_1天买进2天涨5数N = 0
ZT_1天买进2天涨5比例 = 0.0
ZT_1天买进3天涨5数 = 0
ZT_1天买进3天涨5数N = 0
ZT_1天买进3天涨5比例 = 0.0
XG_1天涨5数 = 0
XG_1天涨5数N = 0
XG_1天涨5比例 = 0.0
XG_2天涨5数 = 0
XG_2天涨5数N = 0
XG_2天涨5比例 = 0.0
####用于筛选当前处理的日期
Date0 = ''
Cnt_0 = 1 |
#! python3
#!/usr/bin/python
# -*- coding: utf-8 -*-
import discord
import urllib.request
import urllib.error
import urllib.parse
import json
from discord.ext import commands
import time
class test:
def __init__(self, bot):
self.bot = bot
@commands.command(pass_context=True)
async def test(self , ctx, language, *, input : str):#This is like the command input, so for this one you have to say "say (input)" and it will output the input
"""Usage: $test <language #> <code> language #s, visit rextester.com/main"""
try:
#embed creation
await self.bot.send_typing(ctx.message.channel)
author = ctx.message.author
create=discord.Embed(description="Loading", colour=discord.Colour(value=0xff7373))
create.set_author(name="Rex Tester", icon_url="https://i.imgur.com/n9q9nBU.png")
create.set_footer(text="Command issued by {}".format(author))
start = await self.bot.say(embed=create)
time.sleep(1)
url = 'http://rextester.com/rundotnet/api'
postdata = urllib.parse.urlencode({
'LanguageChoice': language,
'Program': input,
'Input': "",
'CompilerArgs': "",
})
postdatabytes = str.encode(postdata)
req = urllib.request.Request(url, postdatabytes)
response = urllib.request.urlopen(req)
output = response.read()
#print 'API response: ' + output
response_decoded = json.loads(output)
warns = response_decoded["Warnings"]
er = response_decoded["Errors"]
re = response_decoded["Result"]
st = response_decoded["Stats"]
#print "Decoded JSON:"
#print response_decoded
author2 = ctx.message.author.mention
build5=discord.Embed(colour=discord.Colour(value=0xffb200))
build5.set_author(name="Rex Tester", icon_url="https://i.imgur.com/n9q9nBU.png")
build5.add_field(name="Warnings", value="Not started", inline=False)
build5.add_field(name="Errors", value="Not started", inline=False)
build5.add_field(name="Stats", value="Not started", inline=False)
build5.add_field(name="Result", value="Not started", inline=False)
build5.set_footer(text="Command issued by {}".format(author))
buildprint5 = await self.bot.edit_message(start, embed=build5)
await self.bot.send_typing(ctx.message.channel)
#Adding results
time.sleep(0.5)
add=discord.Embed(colour=discord.Colour(value=0xffb200))
add.set_author(name="Rex Tester", icon_url="https://i.imgur.com/n9q9nBU.png")
add.add_field(name="Warnings", value="```{}```".format(warns), inline=False)
add.add_field(name="Errors", value="Not started", inline=False)
add.add_field(name="Stats", value="Not started", inline=False)
add.add_field(name="Result", value="Not started", inline=False)
add.set_footer(text="Command issued by {}".format(author))
addprint = await self.bot.edit_message(buildprint5, embed=add)
await self.bot.send_typing(ctx.message.channel)
time.sleep(0.5)
add2=discord.Embed(colour=discord.Colour(value=0xffb200))
add2.set_author(name="Rex Tester", icon_url="https://i.imgur.com/n9q9nBU.png")
add2.add_field(name="Warnings", value="```{}```".format(warns), inline=False)
add2.add_field(name="Errors", value="```{}```".format(er), inline=False)
add2.add_field(name="Stats", value="Not started", inline=False)
add2.add_field(name="Result", value="Not started", inline=False)
add2.set_footer(text="Command issued by {}".format(author))
addprint2 = await self.bot.edit_message(addprint, embed=add2)
await self.bot.send_typing(ctx.message.channel)
time.sleep(0.5)
add3=discord.Embed(colour=discord.Colour(value=0xffb200))
add3.set_author(name="Rex Tester", icon_url="https://i.imgur.com/n9q9nBU.png")
add3.add_field(name="Warnings", value="```{}```".format(warns), inline=False)
add3.add_field(name="Errors", value="```{}```".format(er), inline=False)
add3.add_field(name="Stats", value="```{}```".format(st), inline=False)
add3.add_field(name="Result", value="Not started", inline=False)
add3.set_footer(text="Command issued by {}".format(author))
addprint3 = await self.bot.edit_message(addprint2, embed=add3)
await self.bot.send_typing(ctx.message.channel)
time.sleep(0.5)
add4=discord.Embed(colour=discord.Colour(value=0xffb200))
add4.set_author(name="Rex Tester", icon_url="https://i.imgur.com/n9q9nBU.png")
add4.add_field(name="Warnings", value="```{}```".format(warns), inline=False)
add4.add_field(name="Errors", value="```{}```".format(er), inline=False)
add4.add_field(name="Stats", value="```{}```".format(st), inline=False)
add4.add_field(name="Result", value="```{}```".format(re), inline=False)
add4.set_footer(text="Command issued by {}".format(author))
addprint4 = await self.bot.edit_message(addprint3, embed=add4)
await self.bot.send_typing(ctx.message.channel)
time.sleep(0.3)
add5=discord.Embed(colour=discord.Colour(value=0x35bf4d))
add5.set_author(name="Rex Tester", icon_url="https://i.imgur.com/n9q9nBU.png")
add5.add_field(name="Warnings", value="```{}```".format(warns), inline=False)
add5.add_field(name="Errors", value="```{}```".format(er), inline=False)
add5.add_field(name="Stats", value="```{}```".format(st), inline=False)
add5.add_field(name="Result", value="```{}```".format(re), inline=False)
add5.set_footer(text="Command issued by {}".format(author))
addprint5 = await self.bot.edit_message(addprint4, embed=add5)
await self.bot.say("**{}, Done :thumbsup:**".format(author2))
await self.bot.send_typing(ctx.message.channel)
except Exception as e:
await self.bot.say("I have ran into a error :x:")
raise
#It is a dict called response. I have to call specific keys from within the dict
#I would think just replace the "print" with the bot.say
def setup(bot):
n = test(bot)
bot.add_cog(n)
|
('test_precisely',
[('F401', 14, 8, "'gensim.models.AuthorTopicModel' imported but unused", None),
('F401', 15, 8, "'gensim.corpora.mmcorpus' imported but unused", None),
('F401', 16, 8, "'gensim.test.utils.common_dictionary' imported but unused", None),
('F401', 16, 8, "'gensim.test.utils.datapath' imported but unused", None),
('F401', 16, 8, "'gensim.test.utils.temporary_file' imported but unused", None)],
{'logical lines': 4, 'physical lines': 8, 'tokens': 85})
|
import requests
def getHTMLText():
try:
r=requests.get(url,timeout=30)
r.raise_for_status()
r.encoding='utf-8'
return r.text
except:
return ""
url="http//m.kdnet.net/cluster/list?clusterid=1"
print(getHTMLText(url)) |
#coding=utf-8
from kafka import KafkaConsumer
consumer = KafkaConsumer('k8saudit',
group_id='my-group',
bootstrap_servers=['47.105.135.136:32772']) # 这二个port为brokers的二个端口
# print message.value
for message in consumer:
print ("%s:%d:%d: key=%s value=%s" % (message.topic, message.partition,
message.offset, message.key,
message.value))
|
import numpy as np
import math
def delta(x_hat_1,x_hat_2,M1,M2):
if M1 >= M2:
return x_hat_2-x_hat_1
else:
return x_hat_1-x_hat_2
# Function is for the sum of squares
Sfunc= lambda M1,M2,S_1,S_2,delta: S_1+S_2+delta**2*M1*M2/(M1+M2)
def x_hat(M1,M2,x_hat_1,x_hat_2,Delta):
if M1>=M2:
return x_hat_1+Delta*M2/(M1+M2)
else:
return x_hat_2+Delta*M1/(M1+M2)
|
import telepot
from pprint import pprint
import sys
import time
import datetime
import random
data = time.strftime("[%A] %d/%m/%Y - %H:%M:%S")
def handle(msg):
resposta = bot.getUpdates()
print(resposta)
chat_id = msg['chat']['id']
command = msg['text']
print('Comando recebido: %s' % command)
if command == '/mamaemandou':
rand = random.randint(1, 3)
rand_casa_fora = random.randint(1, 2)
if rand_casa_fora == 1:
bot.sendMessage(chat_id, 'Time da casa: ')
else:
bot.sendMessage(chat_id, 'Time visitante:')
if rand == 1:
bot.sendMessage(chat_id, 'Vitória')
elif rand == 2:
bot.sendMessage(chat_id, 'Empate')
else:
bot.sendMessage(chat_id, 'Derrota')
if command == '/data':
bot.sendMessage(chat_id, str(data))
bot = telepot.Bot('879354760:AAF4JiZOadBOx4cLg3O2CstEui2SAE0h1-A')
bot.message_loop(handle)
while True:
time.sleep(10)
|
import boto3
import json
def send_message(handle, queue_URL, message):
response = handle.send_message(
QueueUrl=queue_URL,
DelaySeconds=1,
MessageAttributes={
},
MessageBody=(
message
)
)
return response
if __name__ == '__main__':
sqs = boto3.client('sqs')
queue_url = 'https://eu-west-1.queue.amazonaws.com/363553477801/aws-primer'
with open('contacts.json') as f:
contacts = json.load(f)
for line in contacts:
resp = send_message(sqs, queue_url, json.dumps(line))
print(resp)
|
# FOR BETTER CODING PRACTICE, START USING __init__ FUNCTION AND SELF FOR CLASSES AND ITS PARAMETERS
# CREATE hasAce FUNCTION IN BOTH DEALER AND PLAYER CLASSES
# this will determine if their hand has aces
# might make a hand list that has their current hand (THIS MIGHT SOLVE THE WHOLE ISSUE)
# Blackjack game
import random
class deck:
# creates a deck with # of decks (deck_size) times 52
@staticmethod
def create_deck(deck_size):
actual_deck = {}
for temp in range(deck_size):
for deck in range(52):
# converts deck into a number between 1 and 13, inclusive
value = int(deck/4) + 1
# for values 11,12,13 (Jack, Queen, King) gives that key a value of 10
if(value > 10):
value = 10
# Designates the suit of the card
if(deck % 4 == 0):
suit = "Diamond"
elif(deck % 4 == 1):
suit = "Clubs"
elif(deck % 4 == 2):
suit = "Hearts"
elif(deck % 4 == 3):
suit = "Spades"
# Displays the card number (ace, one, two, ..., king)
card = str(int(deck/4) + 1)
if(card == "1"):
card = "Ace"
elif(card == "11"):
card = "Jack"
elif(card == "12"):
card = "Queen"
elif(card == "13"):
card = "King"
actual_deck[str(temp) + str(deck)] = [card, suit , value]
return actual_deck
# prints the whole deck
@staticmethod
def print_deck(actual_deck):
print(actual_deck)
class Dealer:
# creates the dealer with condition of hitting until 17, standing otherwise
# current_hand is the dealer's current hand, whenever they hit it adds on to this hand
def __init__(self, current_deck, num_value, my_turn, current_hand):
self.current_deck = current_deck
self.num_value = num_value
self.my_turn = my_turn
self.current_hand = current_hand
# this counts aces on the first card draw as an 11
if(self.num_value == 1):
self.num_value = 11
for check in self.current_hand:
self.num_value += check[2]
def hit(self):
print("The dealer has %d." % self.num_value)
temp_card = self.current_deck.pop(random.choice(list(self.current_deck.keys())))
self.current_hand.append(temp_card)
card_value = temp_card[2]
print(temp_card)
#this resets the total value of the current hand to check for aces and changes respectively
temp_hand_value = 0
for ace in self.current_hand:
if(ace[0] == 'Ace' and self.num_value < 11):
ace[2] = 11
elif(ace[0] == 'Ace' and self.num_value > 11):
ace[2] = 1
temp_hand_value += ace[2]
self.num_value = temp_hand_value
print("The dealer hits and now has %d." % self.num_value)
return self.num_value
def stand(self):
if(self.num_value > 21):
print("The dealer has %d. The dealer busts." % self.num_value)
else:
print("The dealer has %d. The dealer stands." % self.num_value)
self.my_turn = False
class Chickens:
# creates cpu players to play with player: Ester, Petra and Elinor
# Three types: wildcard, aggressive, and safe
def type():
pass
# random.choice(type)
pass
class Player:
# creates the actions for the player, attributes
# current_hand is the dealer's current hand, whenever they hit it adds on to this hand
def __init__(self, current_deck, name, num_value, my_turn, current_hand):
self.current_deck = current_deck
self.name = name
self.num_value = num_value
self.my_turn = my_turn
self.current_hand = current_hand
if(self.num_value == 1):
self.num_value = 11
for check in self.current_hand:
self.num_value += check[2]
def second_card(self, num_players):
player_second_card = self.current_deck.pop(random.choice(list(self.current_deck.keys())))
self.current_hand.append(player_second_card)
self.num_value += player_second_card[2]
#this resets the total value of the current hand to check for aces and changes respectively
temp_hand_value = 0
for ace in self.current_hand:
if(ace[0] == 'Ace' and self.num_value < 11):
ace[2] = 11
elif(ace[0] == 'Ace' and self.num_value > 11):
ace[2] = 1
temp_hand_value += ace[2]
self.num_value = temp_hand_value
print("%s's second card is %s of %s. %s has %d." % (self.name, player_second_card[0], player_second_card[1], self.name, self.num_value))
return player_second_card
def hit(self):
print("%s have %d." % (self.name, self.num_value))
temp_card = self.current_deck.pop(random.choice(list(self.current_deck.keys())))
self.current_hand.append(temp_card)
card_value = temp_card[2]
print(temp_card)
#this resets the total value of the current hand to check for aces and changes respectively
temp_hand_value = 0
for ace in self.current_hand:
if(ace[0] == 'Ace' and self.num_value < 11):
ace[2] = 11
elif(ace[0] == 'Ace' and self.num_value > 11):
ace[2] = 1
temp_hand_value += ace[2]
self.num_value = temp_hand_value
print("%s hits and now have %d." % (self.name, self.num_value))
if(self.num_value > 21):
print("%s busted with %d." % (self.name, self.num_value))
self.my_turn = False
return self.num_value
def stand(self):
print("%s stands with %d." % (self.name, self.num_value))
self.my_turn = False
# plays the game by going through each player + dealer, deals the cards, etc.
class main:
print("Welcome to Blackjack")
# User inputs # of decks but is converted to a string, int converts string to integer
temp_decks = input("Please enter the number of decks you would like to use: ")
num_decks = int(temp_decks)
trial = deck()
trial_deck = trial.create_deck(num_decks).copy()
# trial.print_deck(trial_deck)
# User inputs # of players using same code as above to convert to integer
temp_players = input("Please enter the number of players that would like to play: ")
num_players = int(temp_players)
# creates players
list_players = []
for player_name in range(1, num_players + 1):
players_hand = []
temp_name = input("Please enter Player %d's name: " % player_name)
list_players.append(Player(trial_deck, temp_name, 0, True, players_hand))
# creates dealer
dealers_hand = []
current_dealer = Dealer(trial_deck, 0, True, dealers_hand)
current_dealer.temp_deck = trial_deck
# deals first cards
for first_card in range(1, num_players + 1):
player_first_card = trial_deck.pop(random.choice(list(trial_deck.keys())))
list_players[first_card - 1].num_value = player_first_card[2]
list_players[first_card - 1].current_hand.append(player_first_card)
print("%s starts with: %s of %s." % (list_players[first_card - 1].name, player_first_card[0], player_first_card[1]))
dealer_first_card = trial_deck.pop(random.choice(list(trial_deck.keys())))
dealers_hand.append(dealer_first_card)
current_dealer.num_value = dealer_first_card[2]
print("Dealer starts with: %s of %s." % (dealer_first_card[0], dealer_first_card[1]))
# deals second cards
for second_card in range(1, num_players + 1):
list_players[second_card - 1].second_card(num_players)
# game starts with first player and ends with the dealer
for turn in range(0, num_players):
while(list_players[turn].my_turn):
action = input("Would %s like to Hit 'h' or Stand 's': " % list_players[turn].name)
if(action == "h"):
list_players[turn].hit()
elif(action == "s"):
list_players[turn].stand()
# dealer's actions noting their conditions of hitting below 17, standing otherwise
while(current_dealer.my_turn):
while(current_dealer.num_value < 17):
current_dealer.hit()
if(current_dealer.num_value >= 17):
current_dealer.stand()
main()
|
"""
Implementation of TSN-RGB branch for JHMDB Classification;
1. For each video, sparsely sample 3 segmenets, each segments contains 5 optical-flow frames, 10 in total for x & y directions ==> Input Shape as [batch * num_segments, H, W, 10];
2. Re-write sample indice, only do segmenting and sampling from [20%, 80%] frames, to erase-out the confusion of starting and ending portion of actions;
3. Load Pre-trained FLOW weights, rewrite the final fc layer to 8 classes;
4. Using Consensus Method for 3 different segments;
TSN model Reference:
https://github.com/yjxiong/tsn-pytorch/model;
TSN Dataset Referecne:
https://github.com/yjxiong/tsn-pytorch/dataset;
RGB & Optical Flow Extraction:
https://github.com/yjxiong/temporal-segment-networks;
DataLoader Augmentation for RGB, Optical-Flow and RGBDiff:
/home/zhufl/Workspace/tsn-pytorch/main.py & test_model.py
Update:
1. Modify dataset_JHMDB to sample fixed frame index when testing;
2. Adding confusion matrix ploting func;
"""
import os, sys, cv2
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
from collections import OrderedDict
''' Use class/package from tsn-pytorch'''
cur_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(cur_path, '../../tsn-pytorch'))
from models import TSN
from transforms import *
from ops import ConsensusModule
from dataset_JHMDB import TSNDataSet
sys.path.append(os.path.join(cur_path, '../'))
from confusion_matrix_utils import *
''' Config '''
arch = 'BNInception'
num_class = 51
modality = 'RGB'
crop_fusion_type= 'avg'
num_segments = 25
flow_prefix = 'flow_'
batch_size = 32
workers = 1
data_length = 1
# actions = os.listdir('/home/zhufl/Data2/JHMDB')
# actions.remove('train_test_splits')
# print(actions)
p = 14
print("Using {} out of {} for testing".format(p, num_segments))
class TSN_BIT(nn.Module):
def __init__(self):
super(TSN_BIT, self).__init__()
self.tsn = TSN(num_class, num_segments=p, modality=modality,
base_model=arch,
consensus_type=crop_fusion_type,
dropout=0.7)
self.activation = nn.LeakyReLU()
self.fc1 = nn.Linear(51, 32)
self.fc2 = nn.Linear(32, 21)
def forward(self, input):
x = self.activation(self.tsn(input))
x = self.activation(self.fc1(x))
x = self.fc2(x)
return x
net = TSN_BIT().cuda()
net.eval()
''' Load Dataset '''
model_name = 'TSN_RGB_2019-01-24_12-26-11.pth'
checkpoint = torch.load(os.path.join(cur_path, model_name))
print("Number of parameters recovered from modeo {} is {}".format(model_name, len(checkpoint)))
model_state = net.state_dict()
base_dict = {k:v for k, v in checkpoint.items() if k in model_state}
missing_dict = {k:v for k, v in model_state.items() if k not in base_dict}
for key, value in missing_dict.items():
print("Missing motion branch param {}".format(key))
model_state.update(base_dict)
net.load_state_dict(model_state)
''' data_length can control how many segments can we get from individual video '''
train_list = os.path.join(cur_path, '../data/JHMDB_train.txt')
test_list = os.path.join(cur_path, '../data/JHMDB_test.txt')
""" Apply normalize onto input data """
input_mean = net.tsn.input_mean
input_std = net.tsn.input_std
if modality != 'RGBDiff':
normalize = GroupNormalize(input_mean, input_std)
else:
normalize = IdentityTransform()
train_loader = torch.utils.data.DataLoader(
TSNDataSet("", test_list, num_segments=num_segments,
new_length=data_length,
modality=modality,
image_tmpl="img_{:05d}.jpg" if modality in ["RGB", "RGBDiff"] else flow_prefix+"{}_{:05d}.jpg",
test_mode=True,
transform=torchvision.transforms.Compose([
GroupCenterCrop([224, 224]),
Stack(roll=arch == 'BNInception'),
ToTorchFormatTensor(div=arch != 'BNInception'),
normalize,
])
),
batch_size=batch_size, shuffle=False,
num_workers=workers, pin_memory=True,
drop_last=False)
print("Length of dataset is {}".format(len(train_loader)))
# net = nn.DataParallel(net, device_ids=[1,2,3]).cuda(1)
''' Start Testing Process '''
accur = []
gt = []
for epoch in range(1):
for idx, (input, target, indice) in enumerate(train_loader):
# import pdb;pdb.set_trace()
# print(indice)
with torch.no_grad():
b_shape = input.shape[0]
'''
Selecting front 2 frames;
Commen out if use all sampled frames;
'''
if p is not None:
input = input.view(b_shape, num_segments, 3, 224, 224)
input_var = input[:, :p, :, :, :]
input_var = input_var.contiguous().view(-1, 3, 224, 224)
input_var = torch.autograd.Variable(input_var, volatile=True).cuda()
target = target.detach()
out = net(input_var).detach()
out = out.data.cpu().numpy().copy()
pred = np.argmax(out, 1)
accur += pred.tolist()
gt += target.numpy().tolist()
print("For epoch {}, batch {}".format(epoch, idx))
''' count the over all accuracy & confusion matrix '''
cf = confusion_matrix(gt, accur).astype(float)
cls_cnt = cf.sum(axis=1)
cls_hit = np.diag(cf)
print 'cls_hit:'
print cls_hit
print 'cls_cnt:'
print cls_cnt
cls_acc = cls_hit / cls_cnt
print(cls_acc)
print('Accuracy {:.02f}%'.format(np.mean(cls_acc) * 100))
''' draw the confusion matrix '''
# draw_cnf(gt, accur, actions)
|
# Baby Nap Time Predictor
# This program predicts when a 8-12 month old baby should take their 2 naps a day and bedtime.
# Assumption: 3-3.5 hours of wake time between sleep
def welcome():
print ("*** WELCOME TO THE BABY NAP TIME PREDICTOR! ***")
print ("This program will help you predict when it's time for your baby to go to sleep.")
print ("This program is currently for babies on 2 naps a day.")
print ("Please enter (i) hours in 24 hours (ii) minutes converted to 2 decimal points.")
print ("")
def predict_nap1(firstname):
print ("FIRST NAP TIME PREDICTION")
# prompt baby sleep & wake times
bedtime_sleep = float(input("What time did " + str(firstname) + " go to bed the night before? "))
bedtime_wake = float(input("What time today did " + str(firstname) + " wake up? "))
# calc how many hours baby slept at night
if 12 <= bedtime_sleep <= 24:
sleep_time = (24 - float(bedtime_sleep)) + bedtime_wake
else:
sleep_time = bedtime_wake - bedtime_sleep
# comment + state how many hours baby slept
if sleep_time < 9:
print ("Oh no! " + str(firstname) + " did not get enough sleep at only " + str(sleep_time) + " hours. Try to encourage " + str(firstname) + " to sleep between 9-11 hours at night.")
elif 9 <= sleep_time <= 11:
print ("Wonderful! " + str(firstname) + " had a good night sleep with " + str(sleep_time) + " hours.")
else:
print (str(firstname) + " overslept at " + str(sleep_time) + " hours. Try to limit bedtime sleep between 9-11 hours max.")
# predict nap1
predict_nap1_window_start = float(bedtime_wake + 3)
predict_nap1_window_end = float(bedtime_wake + 3.5)
# state nap1 prediction
print (str(firstname) + "'s first nap window will be between " + str(predict_nap1_window_start) + " and " + str(
predict_nap1_window_end) + ". Good luck!")
print ("")
return sleep_time
def predict_nap2(firstname):
print ("SECOND NAP TIME PREDICTION")
# prompt baby sleep & wake times for nap1
nap1_sleep = float(input("What time did " + str(firstname) + " fall asleep for nap 1? "))
nap1_wake = float(input("What time did " + str(firstname) + " wake up from nap 1? "))
# calc how many hours baby slept for nap1
nap1_time = nap1_wake - nap1_sleep
# comment + state how many hours baby slept
if nap1_time < 0.5:
print ("Oh no!" + str(firstname) + " had a light first nap at only " + str(nap1_time) + " hours. Try to encourage baby to sleep at least 30 mins.")
elif 0.5 <= nap1_time <= 2:
print ("Wonderful! " + str(firstname) + " had a good first nap at " + str(nap1_time) + " hours.")
else:
print (str(firstname) + " overslept at " + str(nap1_time) + " hours. Try to limit naps to 2 hours max.")
# predict nap2
predict_nap2_window_start = float(nap1_wake + 3)
predict_nap2_window_end = float(nap1_wake + 3.5)
# state nap2 prediction
print (str(firstname) + "'s second nap window will be between " + str(predict_nap2_window_start) + " and " + str(
predict_nap2_window_end) + ". Good luck!")
print ("")
return nap1_time
def predict_bedtime(firstname):
print ("BEDTIME PREDICTION")
# prompt baby sleep & wake times for nap2
nap2_sleep = float(input("What time did " + str(firstname) + " fall asleep for nap 2? "))
nap2_wake = float(input("What time did " + str(firstname) + " wake up from nap 2? "))
# calc how many hours baby slept for nap2
nap2_time = nap2_wake - nap2_sleep
# comment + state how many hours baby slept
if nap2_time < 0.5:
print ("Oh no! " + str(firstname) + " had a light second nap at only " + str(nap2_time) + " hours. Try to encourage " + str(firstname) + " to sleep at least 30 mins.")
elif 0.5 <= nap2_time <= 2:
print ("Wonderful!" + str(firstname) + " had a good second nap at " + str(nap2_time) + " hours.")
else:
print (str(firstname) + " overslept at " + str(nap2_time) + " hours. Try to limit naps to 2 hours max.")
# predict bedtime
predict_bedtime_window_start = float(nap2_wake + 3)
predict_bedtime_window_end = float(nap2_wake + 3.5)
# state bedtime prediction
print (str(firstname) + "'s bedtime window will be between " + str(predict_bedtime_window_start) + " and " + str(
predict_bedtime_window_end) + ". Good luck!")
print ("")
return nap2_time
def total_sleep(firstname, sleep_time, nap1_time, nap2_time):
total_sleep = float(sleep_time + nap1_time + nap2_time)
print ("TOTAL SLEEP")
print ("Over the last day, " + str(firstname) + " slept a total of " + str(total_sleep) + " hours.")
def main():
welcome()
firstname = raw_input("What is your baby's name? ")
print ("")
sleep_time = predict_nap1(firstname)
nap1_time = predict_nap2(firstname)
nap2_time = predict_bedtime(firstname)
total_sleep(firstname, sleep_time, nap1_time, nap2_time)
if __name__ == '__main__':
main()
|
import cv2 as cv
import numpy as np
import socket
#python3 rcwl.py & python3 pir.py & python3 laser.py & python3 s360.py& python3 s180.py
host = "192.168.43.136" # set to IP address of target computer
#send_rcwl = 8085
#send_pir =8086
send_s360 =8087
send_s180 =8088
send_laser =8089
#send_addr_r = (host, send_rcwl)
#send_addr_p = (host, send_pir)
send_addr_s360 = (host, send_s360)
send_addr_s180 = (host, send_s180)
send_addr_l = (host, send_laser)
UDPSock_send = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Initialize the parameters
confThreshold = 0.5 #Confidence threshold
nmsThreshold = 0.4 #Non-maximum suppression threshold
inpWidth = 640 #Width of network's input image
inpHeight = 480 #Height of network's input image
# Load names of classes
classesFile = "./data/coco.names";
# Process inputs
winName = 'Detection in OpenCV'
cv.namedWindow(winName, cv.WINDOW_NORMAL)
classes = None
with open(classesFile, 'rt') as f:
classes = f.read().rstrip('\n').split('\n')
# Give the configuration and weight files for the model and load the network using them.
#acurate version but slow
#modelConfiguration = "./cfg/yolov3.cfg";
#modelWeights = "./weights/yolov3.weights";
#modelConfiguration = "./cfg/tiny-yolo.cfg";
#modelWeights = "./weights/tiny-yolo.weights";
#fast version
modelConfiguration = "./cfg/yolov2-tiny.cfg";
modelWeights = "./weights/yolov2-tiny.weights";
net = cv.dnn.readNetFromDarknet(modelConfiguration, modelWeights)
net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV)
net.setPreferableTarget(cv.dnn.DNN_TARGET_CPU)
global pers_now
pers_now = 0
global count
count = 0
def cal_center(left,top,l,h):
pct=top+(h/2) #person frame center top
pcl=left+(l/2) #person frame center left
#pc=(pct,pcl) #center of person frame
pl=pcl-(l/4)
pr=pcl+(l/4)
pt=pct-(h/4)
pb=pct+(h/4)
fcl=320
fct=240
if(fcl>pl and fcl<pr and fct<pb and fct>pt):
global pers_now
global count
print("all condn satisfied turn on laser")
data1="laserTrue"
UDPSock_send.sendto(data1.encode(), send_addr_l)
pers_now=pers_now+1
elif (fct<pb and fct>pt):
print("Height satisfied and stop servo 180")
#code for moving along the y axis vertically
if(pcl<fcl):
if(count==2):
print("move left")
data="move_left"
UDPSock_send.sendto(data.encode(), send_addr_s360)
count=(count+1)%3
else:
if(count==2):
print("move right")
data="move_right"
UDPSock_send.sendto(data.encode(), send_addr_s360)
count=(count+1)%3
elif(fcl>pl and fcl<pr):
print("length satisfied and stop servo movement 360")
#code for moving along the x axis horizontally
if(pct<fct):
if(count==2):
print("move top")
data="move_top"
UDPSock_send.sendto(data.encode(), send_addr_s180)
count=(count+1)%3
else:
if(count==2):
print("move bottom")
data="move_bottom"
UDPSock_send.sendto(data.encode(), send_addr_s180)
count=(count+1)%3
else:
if(pcl<fcl):
if(count==2):
print("move left")
data="move_left"
UDPSock_send.sendto(data.encode(), send_addr_s360)
count=(count+1)%3
else:
if(count==2):
print("move right")
data="move_right"
UDPSock_send.sendto(data.encode(), send_addr_s360)
count=(count+1)%3
if(pct<fct):
if(count==2):
print("move top")
data="move_top"
UDPSock_send.sendto(data.encode(), send_addr_s180)
count=(count+1)%5
else:
if(count==4):
print("move bottom")
data="move_bottom"
UDPSock_send.sendto(data.encode(), send_addr_s180)
count=(count+1)%5
# Get the names of the output layers
def getOutputsNames(net):
# Get the names of all the layers in the network
layersNames = net.getLayerNames()
# Get the names of the output layers, i.e. the layers with unconnected outputs
return [layersNames[i[0] - 1] for i in net.getUnconnectedOutLayers()]
# Draw the predicted bounding box
def drawPred(classId, conf, left, top, right, bottom):
# Draw a bounding box.
cv.rectangle(frame, (left, top), (right, bottom), (255, 135, 161), 3)
cv.rectangle(frame, (318, 238), (322, 242), (255, 135, 161), 3)
#cv.rectangle(frame, (0, 50), (323, 244), (255, 135, 161), 3)
label = '%.2f' % conf
# Get the label for the class name and its confidence
if classes:
assert(classId < len(classes))
label = '%s:%s' % (classes[classId], label)
#Display the label at the top of the bounding box
labelSize, baseLine = cv.getTextSize(label, cv.FONT_HERSHEY_SIMPLEX, 0.5, 1)
top = max(top, labelSize[1])
cv.rectangle(frame, (left, top - round(1.5*labelSize[1])), (left + round(1.5*labelSize[0]), top + baseLine), (255, 255, 255), cv.FILLED)
#cv.rectangle(frame, (left, top )), (left , top), (255, 255, 255), cv.FILLED)
cv.putText(frame, label, (left, top), cv.FONT_HERSHEY_SIMPLEX, 0.75, (0,0,0), 1)
# Remove the bounding boxes with low confidence using non-maxima suppression
def postprocess(frame, outs):
frameHeight = frame.shape[0]
frameWidth = frame.shape[1]
# Scan through all the bounding boxes output from the network and keep only the
# ones with high confidence scores. Assign the box's class label as the class with the highest score.
classIds = []
confidences = []
boxes = []
for out in outs:
for detection in out:
scores = detection[5:]
classId = np.argmax(scores)
confidence = scores[classId]
if confidence > confThreshold:
center_x = int(detection[0] * frameWidth)
center_y = int(detection[1] * frameHeight)
width = int(detection[2] * frameWidth)
height = int(detection[3] * frameHeight)
left = int(center_x - width / 2)
top = int(center_y - height / 2)
classIds.append(classId)
confidences.append(float(confidence))
boxes.append([left, top, width, height])
#cal_center(left,top,width,height)
#print(boxes,classIds)
data=[classIds,boxes]
data=str(data)
mul_person(boxes,classIds)
#UDPSock.sendto(data.encode(), addr)
# Perform non maximum suppression to eliminate redundant overlapping boxes with
# lower confidences.
indices = cv.dnn.NMSBoxes(boxes, confidences, confThreshold, nmsThreshold)
for i in indices:
i = i[0]
box = boxes[i]
left = box[0]
top = box[1]
width = box[2]
height = box[3]
drawPred(classIds[i], confidences[i], left, top, left + width, top + height)
#print(box)
def mul_person(boxes,classIds):
global pers_now
no=len(classIds)
if( pers_now >= no):
pers_now=no
print(pers_now,"current person",no)
if(pers_now<no and classIds[pers_now]==0):
print(boxes[pers_now],"boxes[no]")
lt=boxes[pers_now][0]
tp=boxes[pers_now][1]
l=boxes[pers_now][2]
h=boxes[pers_now][3]
cal_center(lt,tp,l,h)
else:
#send regular rotation
data="none"
UDPSock_send.sendto(data.encode(), send_addr_s360)
# Webcam input
cap = cv.VideoCapture("http://192.168.43.136:8082/")
#cap = cv.VideoCapture(0)
while cv.waitKey(1) < 0:
# get frame from the video
hasFrame, frame = cap.read(0)
frame = cv.flip(frame,1)
# Stop the program if reached end of video
if not hasFrame:
print("Done processing !!!")
cv.waitKey(3000)
# Release device
cap.release()
break
# Create a 4D blob from a frame.
blob = cv.dnn.blobFromImage(frame, 1/255, (inpWidth, inpHeight), [0,0,0], 1, crop=False)
# Sets the input to the network
net.setInput(blob)
# Runs the forward pass to get output of the output layers
outs = net.forward(getOutputsNames(net))
# Remove the bounding boxes with low confidence
postprocess(frame, outs)
# Put efficiency information. The function getPerfProfile returns the overall time for inference(t) and the timings for each of the layers(in layersTimes)
t, _ = net.getPerfProfile()
label = 'Inference time: %.2f ms' % (t * 1000.0 / cv.getTickFrequency())
cv.putText(frame, label, (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255))
# Write the frame with the detection boxes
cv.imshow(winName, frame)
UDPSock_send.close()
|
from abc import abstractmethod
from typing import Any
from meiga import AnyResult, NotImplementedMethodError
from petisco.base.misc.interface import Interface
class AsyncAppService(Interface):
"""
A base class for creating async app services.
"""
@abstractmethod
async def execute(self, *args: Any, **kwargs: Any) -> AnyResult:
return NotImplementedMethodError
|
import asyncio
import logging
import os
import random
import re
import traceback
from typing import Optional, Tuple, List
from tgintegration import InteractionClient, BotController, ReplyKeyboard
MAX_RUNS: Optional[int] = None
SESSION_NAME: str = "my_account"
async def main():
# This example uses the configuration of `config.ini` (see examples/README)
game = DinoParkGame(session_name=SESSION_NAME, log_level=logging.DEBUG)
await game.start()
for _ in range(MAX_RUNS or 999999):
try:
await asyncio.sleep(1.5)
await game.buy_dinosaurs()
await game.collect_diamonds()
await game.sell_diamonds()
await game.play_lucky_number()
await game.get_bonus()
await asyncio.sleep(90)
await game.controller.clear_chat()
except KeyboardInterrupt:
break
except:
traceback.print_exc()
await game.controller.client.stop()
examples_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
class DinoParkGame:
VALUE_PATTERN = re.compile(r"^.*?\s*(\w+): ([\d ]+).*$", re.MULTILINE)
NUMBERS_ONLY_PATTERN = re.compile(r"\b(\d[\d ]+)\b")
def __init__(self, session_name, log_level=logging.INFO):
self.purchase_balance = None
self.withdrawal_balance = None
self.diamonds = None
self.menu: Optional[ReplyKeyboard] = None
self.logger = logging.getLogger(self.__class__.__name__)
self.logger.setLevel(log_level)
client = InteractionClient(
session_name=session_name,
global_action_delay=1.0,
config_file=os.path.join(examples_dir, "config.ini"),
)
self.controller = BotController(
bot_under_test="@DinoParkNextBot", client=client
)
async def start(self):
await self.controller.start()
await self._update_keyboard()
await self.update_balance()
async def _update_keyboard(self):
start = await self.controller.send_command_await("start")
self.menu = start.reply_keyboard
def _extract_values(self, text):
groups = self.VALUE_PATTERN.findall(text)
try:
return {g[0].lower(): str_to_int(g[1]) for g in groups}
except KeyError:
return {}
async def update_balance(self):
balance_menu = await self.menu.press_button_await(r".*Balance")
values = self._extract_values(balance_menu.full_text)
self.purchase_balance = values["purchases"]
self.withdrawal_balance = values["withdrawals"]
diamonds_menu = await self.menu.press_button_await(r".*Farm")
diamonds_values = self._extract_values(diamonds_menu.full_text)
self.diamonds = diamonds_values["total"]
self.logger.debug(
"Balance updated: +{} for purchases, +{} for withdrawals, +{} diamonds.".format(
self.purchase_balance, self.withdrawal_balance, self.diamonds
)
)
async def collect_diamonds(self):
farm = await self.menu.press_button_await(".*Farm")
collected = await farm.inline_keyboards[0].press_button_await(
".*Collect diamonds"
)
num_collected = self._extract_values(collected.full_text).get("collected", 0)
self.diamonds += num_collected
self.logger.info(
"{} diamonds collected.".format(
num_collected if num_collected > 0 else "No"
)
)
async def sell_diamonds(self):
market = await self.menu.press_button_await(r".*Marketplace")
if not market.inline_keyboards:
self.logger.debug("No selling available at the moment.")
return
await market.inline_keyboards[0].press_button_await(r"Sell diamonds.*")
await self.update_balance()
async def buy_dinosaurs(self):
dinosaurs_menu = (
await self.menu.press_button_await(r".*Dinosaurs")
).inline_keyboards[0]
dinos = await dinosaurs_menu.press_button_await(r".*Buy dinosaurs")
dino_costs: List[Tuple[int, int]] = [] # (KeyboardIndex, Cost)
for n, msg in enumerate(dinos.messages):
# "Worth" in the message has no colon (:) before the number, therefore we use the numbers only pattern
values = self.NUMBERS_ONLY_PATTERN.findall(msg.caption)
cost = str_to_int(values[0])
dino_costs.append((n, cost))
while True:
affordable_dinos = (x for x in dino_costs if x[1] <= self.purchase_balance)
most_expensive_affordable: Optional[Tuple[int, int]] = max(
affordable_dinos, key=lambda v: v[1], default=None
)
if most_expensive_affordable is None:
break
dino_msg_index, dino_cost = most_expensive_affordable
bought = await dinos.inline_keyboards[dino_msg_index].press_button_await(
r".*Buy"
)
self.purchase_balance -= dino_cost
self.logger.info(
f"Bought dinosaur: {bought.full_text} -- Remaining balance: {self.purchase_balance}"
)
async def play_lucky_number(self):
lucky_number = await (
await self.menu.press_button_await(r".*Games")
).reply_keyboard.press_button_await(r".*Lucky number")
bet = await lucky_number.reply_keyboard.press_button_await(r".*Place your bet")
if "only place one bet per" in bet.full_text.lower():
await bet.delete_all_messages()
return
await self.controller.send_message_await(str(random.randint(1, 30)))
self.logger.debug("Bet placed.")
async def get_bonus(self):
bonus = await (
await self.menu.press_button_await(r".*Games")
).reply_keyboard.press_button_await(r".*Bonus.*")
if "already claimed" in bonus.full_text.lower():
# Clean up
await bonus.delete_all_messages()
def str_to_int(value: str) -> int:
return int(value.replace(" ", ""))
if __name__ == "__main__":
asyncio.get_event_loop().run_until_complete(main())
|
# First try
def part1(numbers):
p1, p2 = 0, 1
while(numbers[p1] + numbers[p2] != 2020):
if(p2 == len(numbers) -1):
p1 += 1
p2 = p1 + 1
else:
p2 += 1
print(numbers[p1] * numbers[p2])
# Improved to O(n) amortized
def part1_alt(numbers):
S = set(numbers)
for x in S:
if 2020 - x in S:
print(x * (2020 - x))
# First try
def part2(numbers):
p1, p2, p3 = 0, 1, 2
while(numbers[p1] + numbers[p2] + numbers[p3] != 2020):
if(p3 == len(numbers) - 1):
p2 += 1
p3 = p2 + 1
if(p2 == len(numbers) - 2):
p1 += 1
p2 = p1 + 1
else:
p3 += 1
print(numbers[p1] * numbers[p2] * numbers[p3])
# Improved to O(n^2) amortized
def part2_alt(numbers):
S = set(numbers)
for x in S:
for y in S:
if 2020 - x - y in S:
print(x * y * (2020 - x - y))
if __name__ == "__main__":
with open("input.txt") as f:
lines = f.readlines()
numbers = map(lambda x: int(x), lines)
part1_alt(numbers)
part2_alt(numbers) |
from collections import deque
from prioritized_exp import RL_brain
import numpy
class Memory(object):
def __init__(self, capacity, prioritized, planning, n_features, n_actions, batch_size,
qsa_feature_extractor, qsa_feature_extractor_for_all_acts):
self.size = 0 # current memory size: 0 ~ capacity
self.virtual_size = 0 # the size of memory if there is no cap
self.capacity = capacity
self.prioritized = prioritized
self.planning = planning
self.n_actions = n_actions
self.n_features = n_features
self.batch_size = batch_size
self.qsa_feature_extractor = qsa_feature_extractor # extract feature vector for Q(s,a)
self.qsa_feature_extractor_for_all_acts = qsa_feature_extractor_for_all_acts # extract feature vectors for
# Q(s', a') for all a'
if self.prioritized:
self.memory = RL_brain.Memory(self.capacity)
else:
self.memory = deque(maxlen=self.capacity)
def store(self, transition):
if self.prioritized:
self.memory.store(transition)
else:
self.memory.append(transition)
self.size += 1
self.virtual_size += 1 # the number of transitions saved historically
if self.size > self.capacity:
self.size = self.capacity
def sample(self):
if self.prioritized:
return self._prioritized_sample()
else:
return self._no_prioritized_sample()
def planning_sample(self, n):
# n: the number of planning samples
if self.prioritized:
return self._prioritized_planning_sample(n)
else:
pass # not implemented yet
def _prioritized_planning_sample(self, n):
pass
def _prioritized_sample(self):
tree_idx, samples, is_weights = self.memory.sample(self.batch_size)
qsa_feature = numpy.zeros((self.batch_size, self.n_features)) # feature for Q(s,a)
qsa_next_features = numpy.zeros((self.batch_size, self.n_actions,
self.n_features)) # features for Q(s',a') for all a'
rewards = numpy.zeros(self.batch_size)
terminal_weights = numpy.ones(self.batch_size)
is_weights = numpy.squeeze(is_weights) # morvan's memory return 2d is_weights array
for i, (state, action, reward, next_state, terminal) in enumerate(samples):
rewards[i] = reward
terminal_weights[i] = 0. if terminal else 1.
qsa_feature[i] = self.qsa_feature_extractor(state, action)
qsa_next_features[i] = self.qsa_feature_extractor_for_all_acts(next_state)
# we do not directly save feature vectors in memory because that may take too much memory
return qsa_feature, qsa_next_features, rewards, terminal_weights, is_weights, tree_idx
def _no_prioritized_sample(self):
assert self.batch_size <= len(self.memory)
sample_mem_idxs = numpy.random.choice(len(self.memory), self.batch_size, replace=False)
qsa_feature = numpy.zeros((self.batch_size, self.n_features)) # feature for Q(s,a)
qsa_next_features = numpy.zeros((self.batch_size, self.n_actions,
self.n_features)) # features for Q(s',a') for all a'
rewards = numpy.zeros(self.batch_size)
terminal_weights = numpy.ones(self.batch_size)
# every sample is equally important in non-prioritized sampling
is_weights = numpy.ones(self.batch_size)
for i, mem_idx in enumerate(sample_mem_idxs):
state, action, reward, next_state, terminal = self.memory[mem_idx]
rewards[i] = reward
terminal_weights[i] = 0. if terminal else 1.
qsa_feature[i] = self.qsa_feature_extractor(state, action)
qsa_next_features[i] = self.qsa_feature_extractor_for_all_acts(next_state)
return qsa_feature, qsa_next_features, rewards, terminal_weights, is_weights, sample_mem_idxs
def update_priority(self, e_ids, abs_errors):
assert self.prioritized
self.memory.batch_update(e_ids, abs_errors)
|
import logging
import torch
import numpy as np
from torch.distributions import multivariate_normal
from dataset import generate_clusters
logging.basicConfig(#filename='example.log',
format='%(asctime)s %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p',
level=logging.DEBUG)
np.random.seed(seed=42)
EPS = 1e-6
def initialize_params(data, k, var=1):
"""
:param data: design matrix (examples, features)
:param k: number of clusters
:param var: initial variance
"""
# choose k points from data to initialize means
m, d = data.size()
idxs = torch.from_numpy(np.random.choice(m, k, replace=False))
mu = data[idxs]
# Initialize variances to 1s to start
var = torch.Tensor(k, d).fill_(var)
# uniform prior over cluster ownership indicators
pi = torch.Tensor(m, k).fill_(1. / k)
# pi = torch.rand(m, k)
return mu, var, pi
def log_likelihood(data, mus, vars, log_expected_pis):
"""
Computes the log likelihood of the GMM with parameters mus, vars
"""
# convert expected_pis back into probability space
expected_pis = torch.exp(log_expected_pis)
# initialize empty tensor to hold log likelihood values for each cluster
# (we will sum these before returning a result)
log_like = torch.Tensor(mus.size(0), 1)
# for each cluster we're going to compute the log likelihood of each data point belonging to this cluster
for k, (mu, var) in enumerate(zip(mus, vars)):
# define the Gaussian distribution that represents the kth cluster
m = multivariate_normal.MultivariateNormal(mu, torch.diag(var))
# compute log probability of all data points against this cluster, weighted by the
# expected ownership in this cluster.
#
# Intuitively, if a data point falls very far from this cluster, the ownership probability will be very
# low and hence we'll be multiplying the log likelihood by a very small value, discounting it's impact
# on our model likelihood.
#
# @ does a matrix multiplication (pointwise multiplication, then sum over result)
ll = expected_pis[:, k] @ m.log_prob(data)
log_like[k] = ll.sum()
# sum likelihoods from each cluster
return log_like.sum()
# def argmax_cluster_ownership(pis):
# h, w = pis.size()
# v, idx = torch.max(pis, dim=1)
# argmax_pis = torch.zeros(h, w, dtype=torch.uint8)
#
# for i in range(w):
# argmax_pis[idx == i, i] = 1
# return argmax_pis
def expectation_step(data, mus, vars):
"""
Expectation step of the EM algorithm.
This step computes the expected value of the latent parameters (pi) which defines cluster ownership
for each data point. We do this by computing the log likelihood of each data point originating from each cluster
and then normalizing over all the clusters to get probability of ownership.
:param data:
:param mus:
:param vars:
:return:
"""
# get the dimensions of the dataset
n, d = data.size()
num_clusters, _ = mus.size()
# initialize variable to hold the expected cluster ownership values
log_expected_pis = torch.zeros(n, num_clusters)
for k, (mu, var) in enumerate(zip(mus, vars)):
# create a Gaussian distribution representing the kth cluster
m = multivariate_normal.MultivariateNormal(mu, torch.diag(var))
# compute the log probability for every data point against this Gaussian distribution
log_expected_pis[:, k] = m.log_prob(data)
# normalize cluster ownership (in log space)
log_expected_pis -= torch.logsumexp(log_expected_pis, dim=1).view(n, 1)
return log_expected_pis
def maximization_step(data, mus, vars, log_expected_pis):
"""
Maximization step of EM algorithm.
This step optimizes the Gaussian parameters given the expected value of the latent variables (cluster ownership).
:param data:
:param mus:
:param vars:
:param log_expected_pis:
:return:
"""
# initialize variables to hold optimized parameters
mu_star = torch.zeros(mus.size())
var_star = torch.zeros(vars.size())
# convert pi (cluster ownership) back into probabilities
expected_pis = torch.exp(log_expected_pis)
for k, (mu, var) in enumerate(zip(mus, vars)):
# compute the expected number of data points
# (consider the extreme case where expected_pis is a binary matrix -
# ownership of every data point if fully known. In this case this is just the number of data points
# owned by this cluster)
n_k = torch.sum(expected_pis[:, k], dim=0)
# Compute optimized cluster means by taking the weighted average over all data points
# (weighted by how much we think each data point belongs to this cluster)
mu_star[k, :] = (expected_pis[:, k] @ data) / n_k
# Computer optimized cluster variance by taking the weighted squared difference between each
# data point and the mean.
var_star[k, :] = (1 / n_k) * (expected_pis[:, k] @ (data - mu_star[k, :]) ** 2)
return mu_star, var_star
def _distance(x1, x2):
# compute euclidean distance between 2 data points
return torch.sqrt(torch.sum((x1 - x2) ** 2))
def degenerative_check(mus, dist_thresh=0.5):
"""
Identify cases where our clusters start to converge and try to perturb one of the means to help
the model find the correct clusters.
Note: this has not shown to work very well yet, still needs some work
:param mus:
:param dist_thresh:
:return:
"""
for i in range(mus.size(0)):
for j in range(i + 1, mus.size(0)):
dist = _distance(mus[i, :], mus[j, :])
if dist <= dist_thresh:
logging.debug("distance[{}, {}] = {}".format(i, j, dist))
old_mu_j = mus[j, :].clone()
# randomly perturb cluster centroid
mus[j, :] += torch.randn(mus[j, :].size())
logging.info("degenerative cluster centroids found, randomly perturbing cluster ({}) -> ({})".format(old_mu_j,
mus[j, :]))
return mus
def expectation_maximization(data, mus, vars, max_iters=1000, converge_thresh=1e-3):
em_converged = False
log_likes = [-np.Inf]
iters = 0
while not em_converged:
## Expectation
# set pis to expected values based on current parameters
log_expected_pis = expectation_step(data, mus, vars)
## Maximization
# maximize parameters given expectations of pis
mu_star, var_star = maximization_step(data, mus, vars, log_expected_pis)
# Compute log likelihood of the model to see how we're doing.
# This should decrease over time.
log_like = log_likelihood(data, mu_star, var_star, log_expected_pis)
if np.abs(log_likes[-1] - log_like) < converge_thresh:
em_converged = True
# track log likelihoods over time to logging purposes
log_likes.append(log_like)
# update parameters for next round
mus = mu_star
vars = var_star
# perform check to try to correct when multiple clusters start converging to the same point
mus = degenerative_check(mus)
iters += 1
logging.debug("EM iteration[{}] = {}".format(iters, log_like))
if iters % 10 == 0:
logging.debug("MU")
logging.debug(mus)
logging.debug("VAR")
logging.debug(vars)
if iters > max_iters:
logging.debug('Breaking because we hit {} iterations'.format(max_iters))
break
return mu_star, var_star, log_expected_pis
if __name__ == '__main__':
# generate sample data
K = 3
clusters, true_mus, true_vars = generate_clusters(K, samples_per_cluster=100)
X = torch.cat(clusters)
m, k = X.size()
logging.debug("m = {}, k = {}".format(m, k))
# initialize model parameters
mu, var, pi = initialize_params(X, K)
# learn model parameters using expectation maximization
mu_star, var_star, log_expected_pis = expectation_maximization(X, mu, var, pi)
print("True parameters")
print("Cluster means")
print(true_mus)
print("\nCluster variances")
print(true_vars)
print("\n")
print("Learned params:")
print(mu_star)
print("----\n ")
print(torch.sqrt(var_star))
print("----\n ")
cluster_prob, cluster_idx = torch.max(torch.exp(log_expected_pis), dim=1)
print(cluster_idx)
|
#!/usr/bin/env python
# encoding: utf-8
"""
@version: ??
@author: liangliangyy
@license: MIT Licence
@contact: liangliangyy@gmail.com
@site: https://www.lylinux.org/
@software: PyCharm
@file: urls.py
@time: 2016/11/2 下午7:15
"""
from django.conf.urls import url
from django.views.decorators.cache import cache_page
from . import views
from haystack.forms import ModelSearchForm
from haystack.query import SearchQuerySet
from haystack.views import SearchView
urlpatterns = [
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^page/(?P<page>\d+)$', views.IndexView.as_view(), name='index_page'),
url(r'^article/(?P<year>\d+)/(?P<month>\d+)/(?P<day>\d+)/(?P<article_id>\d+).html$',
views.ArticleDetailView.as_view(),
name='detailbyid'),
url(r'^blogpage/(?P<year>\d+)/(?P<month>\d+)/(?P<day>\d+)/(?P<page_id>\d+)-(?P<slug>[\w-]+).html$',
views.ArticleDetailView.as_view(),
name='pagedetail'),
url(r'^category/(?P<category_name>[\w-]+).html$', views.CategoryDetailView.as_view(), name='category_detail'),
url(r'^category/(?P<category_name>[\w-]+)/(?P<page>\d+).html$', views.CategoryDetailView.as_view(),
name='category_detail_page'),
# url(r'^category/(?P<category_name>[\w-]+)/(?P<page>\d+).html$', views.CategoryDetailView.as_view(),
# name='category_detail'),
url(r'^author/(?P<author_name>\w+).html$', views.AuthorDetailView.as_view(), name='author_detail'),
url(r'^author/(?P<author_name>\w+)/(?P<page>\d+).html$', views.AuthorDetailView.as_view(),
name='author_detail_page'),
url(r'^tag/(?P<tag_name>.+).html$', views.TagDetailView.as_view(), name='tag_detail'),
url(r'^tag/(?P<tag_name>.+)/(?P<page>\d+).html$', views.TagDetailView.as_view(), name='tag_detail_page'),
url(r'^upload', views.fileupload, name='upload'),
url(r'^refresh', views.refresh_memcache, name='refresh')
]
|
import argparse
import concurrent.futures
import sys
import time as cpytime
import os
from os.path import exists, join
from glob import glob
import numpy as np
import pandas as pd
import xarray as xr
import yaml
from scipy import interpolate
from soundings.preprocessing import goesimager, rtmaloader, raploader
class DataHolder(object):
def __init__(self, sonde_time):
self.sonde_time = sonde_time
self.sonde_lon = None
self.sonde_lat = None
self.sonde_file = None
self.sonde_pres = None
self.sonde_tdry = None
self.sonde_dp = None
self.sonde_alt = None
self.sonde_site_id = None
self.nwp_file = None
self.nwp_lon = None
self.nwp_lat = None
self.nwp_pres = None
self.nwp_tdry = None
self.nwp_dp = None
self.nwp_alt = None
self.goes_files = None
self.goes_patches = None
self.goes_patch_lons = None
self.goes_patch_lats = None
self.rtma_files = None
self.rtma_patches = None
self.rtma_patch_lons = None
self.rtma_patch_lats = None
def save(self, processed_dir):
patch_ds = xr.Dataset(data_vars={'sonde_rel_time': (self.sonde_time),
'sonde_file': (self.sonde_file),
'sonde_site_id': (self.sonde_site_id),
'sonde_lon': (self.sonde_lon),
'sonde_lat': (self.sonde_lat),
'sonde_pres': (('profile_dims'), self.sonde_pres),
'sonde_tdry': (('profile_dims'), self.sonde_tdry),
'sonde_dp': (('profile_dims'), self.sonde_dp),
'sonde_alt': (('profile_dims'), self.sonde_alt),
'nwp_file': (self.nwp_file),
'nwp_lon': (self.nwp_lon),
'nwp_lat': (self.nwp_lat),
'nwp_pres': (('nwp_dims'), self.nwp_pres),
'nwp_tdry': (('nwp_dims'), self.nwp_tdry),
'nwp_dp': (('nwp_dims'), self.nwp_dp),
'nwp_alt': (('nwp_dims'), self.nwp_alt),
'goes_files': (('band'), self.goes_files),
'goes_abi': (('band', 'goes_y', 'goes_x'), self.goes_patches),
'goes_lon': (('goes_y', 'goes_x'), self.goes_patch_lons),
'goes_lat': (('goes_y', 'goes_x'), self.goes_patch_lats),
'rtma_files': (('rtma_type'), self.rtma_files),
'rtma_values': (('rtma_type', 'rtma_y', 'rtma_x'), self.rtma_patches),
'rtma_lon': (('rtma_y', 'rtma_x'), self.rtma_patch_lons),
'rtma_lat': (('rtma_y', 'rtma_x'), self.rtma_patch_lats)
},
coords={'goes_y': np.arange(config['goes']['patch_y_length_pixels']),
'goes_x': np.arange(config['goes']['patch_x_length_pixels']),
'band': config['goes']['bands'],
'rtma_y': np.arange(config['rtma']['patch_y_length_pixels']),
'rtma_x': np.arange(config['rtma']['patch_x_length_pixels']),
'rtma_type': config['rtma']['rtma_type'],
'profile_dims': np.arange(config['raob']['profile_dims']),
'nwp_dims': np.arange(config['nwp']['nwp_dims'])})
patch_ds['sonde_pres'].attrs['units'] = 'hectopascals'
patch_ds['sonde_tdry'].attrs['units'] = 'celsius'
patch_ds['sonde_dp'].attrs['units'] = 'celsius'
patch_ds['sonde_alt'].attrs['units'] = 'meters'
patch_ds['goes_abi'].attrs['units'] = 'rad' if config['goes']['bt'] == False else 'bt'
patch_ds['rtma_values'].attrs['units'] = 'LPI: something, LTI: something, LRI: something'
out_file = join(
processed_dir, f"{self.sonde_site_id}_{self.sonde_time.strftime('%Y_%m_%d_%H%M')}.nc")
print(out_file)
if not exists(processed_dir):
os.makedirs(processed_dir)
try:
os.remove(out_file)
except OSError:
pass
patch_ds.to_netcdf(out_file, engine='netcdf4')
patch_ds.close()
def interpolate_to_height_intervals(alt, y, altitude_intervals):
# alititude does not always increase mononically,
# however, assume_sorted if True, x has to be an array of
# monotonically increasing values...
f = interpolate.interp1d(alt, y, assume_sorted=True)
return f(altitude_intervals)
def nwp_querry_sgp(time, locations, dataset):
nwp_file, pres, temp, spec, height, \
lons, lats = extract_nwp_values(time, locations)
set_nwp_profile(nwp_file, pres[0], temp[0], spec[0], height[0],
lons[0], lats[0], dataset)
def extract_nwp_values(time, locations):
try:
rap_timestep = raploader.RAPLoader(config['nwp']['path'], time,
time_range_minutes=config['nwp']['time_range_minutes'])
except FileNotFoundError as fnfe:
raise fnfe
pres, temp, spec, height, \
lons, lats = rap_timestep.extract_rap_profile(locations, config['nwp']['wgrib2'])
return rap_timestep.rap_file, pres, temp, spec, height, lons, lats
def set_nwp_profile(nwp_file, p, t, q, h, lon, lat, dataset):
"""
Set the RAP data by first converting specific humidity to dew point temperature,
then linearly interpolate to the specified dimension.
---
params:
p : np.array
pressure in Pa
t : np.array
temperature in K
q : np.array
specific humidity
h : np.array
height in m
"""
if lon == 999.0 or lat == 999.0:
raise ValueError(f'[NWP] invalid lon {lon} lat {lat}.')
altitude_intervals = np.linspace(h[0], h[0] + config['top_window_boundary'], config['nwp']['nwp_dims'])
t -= 273.15 # convert K to deg C
pres = interpolate_to_height_intervals(h, p/100., altitude_intervals) # convert Pa to hPa
tdry = interpolate_to_height_intervals(h, t, altitude_intervals)
epsilon = 0.622
A = 17.625
B = 243.04 # deg C
C = 610.94 # Pa
# vapor pressure
e = p*q / (epsilon + (1 - epsilon)*q)
if e[0] == 0: # replace first value with eps if zero
e[0] = np.finfo(float).eps
if e.all() == 0: # forward fill values where zero exist
prev = np.arange(len(e))
prev[e == 0] = 0
prev = np.maximum.accumulate(prev)
e = e[prev]
# dewpoint temperature
td = B * np.log(e/C) / (A - np.log(e/C))
td = interpolate_to_height_intervals(h, td, altitude_intervals)
dataset.nwp_file = nwp_file
dataset.nwp_lon = lon
dataset.nwp_lat = lat
dataset.nwp_pres = pres
dataset.nwp_tdry = tdry
dataset.nwp_dp = td
dataset.nwp_alt = altitude_intervals
def set_noaa_profile(xar, path, s, dataset):
def _remove_unsorted_vals(arr):
# assumes that the first value is correct.
mini = arr[0]
is_valid = np.zeros(len(arr), dtype=bool)
for i, v in enumerate(arr[1:]):
if v < mini:
is_valid[i+1] = True
else:
is_valid[i+1] = False
mini = v
arr[is_valid] = np.nan
return arr
numMand = xar.numMand.values[s]
numSigT = xar.numSigT.values[s]
htMan = _remove_unsorted_vals(xar.htMan.values[s, :numMand])
htSigT = _remove_unsorted_vals(xar.htSigT.values[s, :numSigT])
ht = np.concatenate([htMan, htSigT])
p = np.concatenate([xar.prMan.values[s, :numMand], xar.prSigT.values[s, :numSigT]])
t = np.concatenate([xar.tpMan.values[s, :numMand], xar.tpSigT.values[s, :numSigT]])
td = t - np.concatenate([xar.tdMan.values[s, :numMand], xar.tdSigT.values[s, :numSigT]])
ht_nans = np.isnan(ht)
p_nans = np.isnan(p)
t_nans = np.isnan(t)
td_nans = np.isnan(td)
# remove nans
nans = ht_nans | p_nans | t_nans | td_nans
ht = ht[~nans]; p = p[~nans]; t = t[~nans]; td = td[~nans]
# sort by height
order = ht.argsort()
ht = ht[order]; p = p[order]; t = t[order]; td = td[order]
t -= 273.15 # convert K to C
td -= 273.15
if max(ht) < config['top_window_boundary']:
raise ValueError(f"[RAOB] unable to interpolate top boundary layers. " \
f"data has max of {max(ht):.3f} < {config['top_window_boundary']} for defined value.")
altitude_intervals = np.linspace(
ht[0], ht[0] + config['top_window_boundary'], config['raob']['profile_dims'])
dataset.sonde_pres = interpolate_to_height_intervals(ht, p, altitude_intervals)
dataset.sonde_tdry = interpolate_to_height_intervals(ht, t, altitude_intervals)
dataset.sonde_dp = interpolate_to_height_intervals(ht, td, altitude_intervals)
dataset.sonde_alt = altitude_intervals
dataset.sonde_file = path
dataset.sonde_site_id = xar.staName.values[s].decode('UTF-8').strip().lower()
def set_sgp_profile(sonde, path, dataset):
"""
Read NetCDF formatted radiosonde for a specific launch
Inputs:
"""
p = sonde.pres.values
t = sonde.tdry.values
td = sonde.dp.values
alt = sonde.alt.values
alt_s = alt[0]
# remove duplicate values at surface level
start_indx = 0
for i in range(1, len(alt)):
if alt[i] == alt_s:
start_indx = i
else:
break
altitude_intervals = np.linspace(
alt[start_indx], alt[start_indx] + config['top_window_boundary'], config['raob']['profile_dims'])
dataset.sonde_pres = interpolate_to_height_intervals(
alt[start_indx:], p[start_indx:], altitude_intervals)
dataset.sonde_tdry = interpolate_to_height_intervals(
alt[start_indx:], t[start_indx:], altitude_intervals)
dataset.sonde_dp = interpolate_to_height_intervals(
alt[start_indx:], td[start_indx:], altitude_intervals)
dataset.sonde_alt = altitude_intervals
dataset.sonde_file = path
dataset.sonde_site_id = sonde.site_id
def set_rtma_data(time, lon, lat, dataset):
try:
rtma_timestep = rtmaloader.RTMALoader(config['rtma']['path'], time, config['rtma']['rtma_type'],
time_range_minutes=config['rtma']['time_range_minutes'])
except FileNotFoundError as fnfe: # likely missing a file for all bands
raise fnfe
try:
patches, patch_lons, \
patch_lats = rtma_timestep.extract_image_patch(lon, lat, config['rtma']['patch_x_length_pixels'],
config['rtma']['patch_y_length_pixels'])
dataset.rtma_patches = patches[0]
dataset.rtma_patch_lons = patch_lons
dataset.rtma_patch_lats = patch_lats
dataset.rtma_files = np.array(rtma_timestep.rtma_files)
except ValueError as ve: # likely invalid lon/lat
raise ValueError(f'[RTMA] {ve}')
rtma_timestep.close()
def set_goes_data(time, lon, lat, dataset):
try:
goes16_abi_timestep = goesimager.GOES16ABI(config['goes']['path'], time, config['goes']['bands'],
time_range_minutes=config['goes']['time_range_minutes'])
except FileNotFoundError as fnfe: # likely missing a file for all bands
raise fnfe
try:
patches, patch_lons, \
patch_lats = goes16_abi_timestep.extract_image_patch(lon, lat, config['goes']['patch_x_length_pixels'],
config['goes']['patch_y_length_pixels'],
bt=config['goes']['bt'])
dataset.goes_patches = patches[0]
dataset.goes_patch_lons = patch_lons
dataset.goes_patch_lats = patch_lats
dataset.goes_files = np.array(goes16_abi_timestep.channel_files)
except ValueError as ve: # likely invalid lon/lat
raise ValueError(f'[GOES] {ve}')
goes16_abi_timestep.close()
def extract_sgp_information():
"""Process with the SGP radiosondes"""
already_processed = glob(join(config['output_path'], '*'))
with open(config['raob']['valid_sgp_files_path']) as fp:
with concurrent.futures.ThreadPoolExecutor(max_workers=4) as pool:
path = fp.readline().rstrip('\n')
while path:
if str(config['date_regex']) not in path:
path = fp.readline().rstrip('\n')
continue
# arm-sgp / year / file.cdf
sonde = xr.open_dataset(
join(config['raob']['path'], *path.split('/')[-3:]))
dataset = DataHolder(pd.Timestamp(sonde['time'].values[0], unit='s', tz='UTC'))
dataset.sonde_lon = sonde['lon'].values[0]
dataset.sonde_lat = sonde['lat'].values[0]
if f"{config['output_path']}/sgp_{dataset.sonde_time.strftime('%Y_%m_%d_%H%M')}.nc" in already_processed:
path = fp.readline().rstrip('\n')
continue
futures = []
futures.append(pool.submit(set_sgp_profile, sonde, path, dataset))
futures.append(pool.submit(set_goes_data, dataset.sonde_time, dataset.sonde_lon,
dataset.sonde_lat, dataset))
futures.append(pool.submit(set_rtma_data, dataset.sonde_time, dataset.sonde_lon,
dataset.sonde_lat, dataset))
futures.append(pool.submit(nwp_querry_sgp, dataset.sonde_time,
[(dataset.sonde_lon, dataset.sonde_lat)], dataset))
try:
for future in concurrent.futures.as_completed(futures, timeout=20):
try:
_ = future.result()
except Exception as e:
raise e
dataset.save(config['output_path'])
except Exception as e:
print(f"ERROR: {dataset.sonde_site_id} {path.split('/')[-1]}, {e}")
sonde.close()
del dataset
path = fp.readline().rstrip('\n')
def _group_by_times(inds, timestamps):
"""Group radiosondes by release time rounded by day+hour."""
rounded_timestamps = timestamps.round('H')
days = rounded_timestamps.day.values
hours = rounded_timestamps.hour.values
groups = []
for day in np.unique(days):
for hour in np.unique(hours):
group_mask = np.logical_and.reduce([rounded_timestamps.day.values == day,
rounded_timestamps.hour.values == hour])
if group_mask.any():
groups.append(inds[group_mask])
return groups
def _process_station_groups(f, xar, rel_times, group, pool):
# locations are different for each in group
locations = list(zip(xar.staLon.values[group], xar.staLat.values[group]))
# all dates in the group are rounded to the same. grab first.
group_time = pd.Timestamp(rel_times[group[0]], unit='s', tz='UTC')
# start_t = cpytime.time()
try:
nwp_file, pres, temp, spec, height, \
lons, lats = extract_nwp_values(group_time, locations)
except (FileNotFoundError, Exception) as e:
print(f"ERROR: [NWP] {f.split('/')[-1]}, {e}")
return
# print(f'{len(locations)} locations finished in {cpytime.time() - start_t} s')
for i, s in enumerate(group):
time = pd.Timestamp(rel_times[s], unit='s', tz='UTC')
dataset = DataHolder(time)
dataset.sonde_lon = locations[i][0]
dataset.sonde_lat = locations[i][1]
# print(xar.staName.values[s].decode('UTF-8').strip().lower(), dataset.sonde_lon, dataset.sonde_lat)
futures = []
futures.append(pool.submit(set_nwp_profile, nwp_file, pres[i], temp[i], spec[i],
height[i], lons[i], lats[i], dataset))
futures.append(pool.submit(set_noaa_profile, xar, f, s, dataset))
futures.append(pool.submit(set_goes_data, dataset.sonde_time, dataset.sonde_lon,
dataset.sonde_lat, dataset))
futures.append(pool.submit(set_rtma_data, dataset.sonde_time, dataset.sonde_lon,
dataset.sonde_lat, dataset))
try:
for future in concurrent.futures.as_completed(futures, timeout=20):
try:
_ = future.result()
except Exception as e:
raise e
dataset.save(config['output_path'])
except Exception as e:
print(f"ERROR: {dataset.sonde_site_id} {f.split('/')[-1]}, {e}")
del dataset
def extract_noaa_information():
"""Process with the NOAA radiosondes"""
invalid_location_ids = ['9999','adq','akn','anc','ann','bet','bna','brw','cdb',
'fai','ito','jsj','lih','mcg','ome','otz','sle','snp','sya','yak']
already_processed = glob(join(config['output_path'], '*'))
files = glob(join(f"{config['raob']['noaa_mutli_path']}", '*', f"*{config['date_regex']}*"))
with concurrent.futures.ThreadPoolExecutor(max_workers=10) as pool:
for f in files:
print('processing:', f)
try:
xar = xr.open_dataset(f, decode_times=False)
except Exception as e:
print(e)
continue
rel_times = xar.relTime.values
# should the mask look at other values? e.g., top-sfc > 17,000?
mask = np.logical_and(rel_times != 99999, rel_times < 1e+20)
rel_times[~mask] = 0 # avoid `pandas._libs.tslibs.np_datetime.OutOfBoundsDatetime` for invalid dates.
timestamps = pd.to_datetime(rel_times, unit='s')
# filter out already processed files and invalid sites
for i, t in enumerate(timestamps):
site_id = xar.staName.values[i].decode('UTF-8').strip().lower()
output_file = f"{site_id}_{t.strftime('%Y_%m_%d_%H%M')}.nc"
if f"{config['output_path']}/{output_file}" in already_processed or site_id in invalid_location_ids:
mask[i] = False
inds = xar.recNum.values[mask]
timestamps = timestamps[mask]
groups = _group_by_times(inds, timestamps)
# thread this to have all groups be processed at the same time.
for group in groups:
_process_station_groups(f, xar, rel_times, group, pool)
xar.close()
def main(config_path):
global config
np.set_printoptions(threshold=sys.maxsize)
np.set_printoptions(suppress=True)
start_t = cpytime.time()
with open(config_path, 'r') as stream:
try:
config = yaml.safe_load(stream)
except yaml.YAMLError as exc:
print(exc)
sys.exit(1)
if config['raob']['valid_sgp_files_path'] is not None:
print('sgp')
extract_sgp_information()
if config['raob']['noaa_mutli_path'] is not None:
print('noaa')
extract_noaa_information()
print(f"runtime: {cpytime.time()-start_t}")
if __name__ == "__main__":
"""
Usage: python -m soundings.preprocessing.preprocess -c ./soundings/preprocessing/config.yaml
"""
parser = argparse.ArgumentParser(description='data preprocessing')
parser.add_argument('-c', '--config', metavar='path', type=str,
required=True, help='the path to config file')
args = parser.parse_args()
main(config_path=args.config)
|
class Solution:
def flipgame(self, fronts, backs):
"""
:type fronts: List[int]
:type backs: List[int]
:rtype: int
"""
n = len(fronts)
res = 2001
for i in range(n):
tmp1 = fronts[:]
tmp2 = backs[:]
print("qian:",tmp1,tmp2)
tmp1[i],tmp2[i] = tmp2[i],tmp1[i]
print(tmp1,tmp2)
lookup = set(tmp1)
for j in range(n):
if tmp2[j] not in lookup:
res = min(res,tmp2[j])
if res == 2001:
return 0
return res
a = Solution()
# print(a.flipgame([1,2,4,4,7],[1,3,4,1,3]))
print(a.flipgame([1,1],[2,1]))
|
import turtle
t = turtle.Pen()
turtle.hideturtle()
turtle.speed(0)
turtle.bgcolor("red")
turtle.title("cursedddcube")
t.width(3)
for i in range(4):
t.forward(100)
t.left(90)
t.penup()
t.sety(50)
t.setx(50)
t.pendown()
for i in range(4):
t.forward(100)
t.left(90)
t.goto(0, 0)
t.goto(50, 50)
t.goto(50, 150)
t.goto(0, 100)
t.goto(100, 100)
t.goto(150, 150)
t.goto(150, 50)
t.goto(100, 0)
for x in range(30, 100, 5):
turtle.color('black')
style = ('Courier', x, 'italic')
turtle.write('CURSED!', font=style, align='center')
turtle.hideturtle()
turtle.Screen().exitonclick()
|
import json
import networkx as nx
import random
import matplotlib.pyplot as plt
from strategy import kill, kill_worker, safe, safe_worker
from multiprocessing import freeze_support
from parallel import multiprocess_calc
from config import CAPACITY
class MyGraph(nx.MultiDiGraph):
def __init__(self, data):
'''
initiate a digraph based on idiom dict
where each node is a pinyin string
'''
super().__init__()
for idiom in data:
self.add_edge(data[idiom]['first'],
data[idiom]['last'],
key=idiom)
def get_outing_edge(self, node):
'''
node is a pinyin string
return all outing edges
example: if node is 'di',
then return {'yi':['低回不已'], 'yan':['低眉顺眼'], ...}
'''
result = {}
for x in self.successors(node):
result[x] = list(self.adj[node][x])
return result
def show_graph(self):
# don’t use this, will stuck as the nodes and edges are too many
nx.draw(self, with_labels=True)
plt.show()
class Player():
def __init__(self, name, mode):
self.name = name
assert mode in ['manual', 'random',
'smart'], "mode must be 'manual', 'random' or 'smart'"
self.mode = mode # 'manual', 'random', 'smart'
def emit(self, current, graph):
'''
give a new pinyin string according to graph and 'current' pinyin
then make the edge in graph unavaiable (remove the edge)
'''
target_pinyin, idiom = self.choose(graph, current)
graph.remove_edge(current, target_pinyin, key=idiom)
print('{}\t: {}'.format(self.name, idiom))
return target_pinyin
def choose(self, graph, current):
outing_edge = graph.get_outing_edge(current)
assert len(outing_edge) > 0, 'No available choice for {}'.format(current)
if self.mode == 'manual':
all_idioms = outing_edge.values()
all_idioms = [item for sublist in all_idioms for item in sublist]
print('Current available choice: ', all_idioms)
while True:
choice = input('Input your choice (idiom): ')
if choice not in all_idioms:
print('Your choice {} is not in {}'.format(
choice, list(all_idioms)))
else:
break
for x in list(outing_edge):
if choice in outing_edge[x]:
target_pinyin = x
break
return target_pinyin, choice
elif self.mode == 'random':
target_pinyin = random.choice(list(outing_edge))
idiom = outing_edge[target_pinyin][0]
return target_pinyin, idiom
elif self.mode == 'smart':
all_list = list(outing_edge)
s0_list = []
s1_list = []
s2_list = []
k0_list = []
k1_list = []
k2_list = []
if CAPACITY[0]:
for x in all_list:
idiom_temp = list(graph.adj[current][x])[0]
graph.remove_edge(current, x, key=idiom_temp)
if kill(0, x, graph):
k0_list.append(x)
graph.add_edge(current, x, key=idiom_temp)
print('k0', k0_list)
if CAPACITY[1]:
for x in all_list:
idiom_temp = list(graph.adj[current][x])[0]
graph.remove_edge(current, x, key=idiom_temp)
if safe(0, x, graph):
s0_list.append(x)
graph.add_edge(current, x, key=idiom_temp)
print('s0', s0_list)
if CAPACITY[2]:
for x in all_list:
idiom_temp = list(graph.adj[current][x])[0]
graph.remove_edge(current, x, key=idiom_temp)
if kill(1, x, graph):
k1_list.append(x)
graph.add_edge(current, x, key=idiom_temp)
print('k1', k1_list)
# k0, s0, k1 do not use multiprocessing
# while s1, k2, s2 use it
# TODO
# BUG in multiprocessing
# May possibly cause wrong result when calculating Kn and Sn list using multiprocessing.
# Since graph is shared between all process, modification made by one process may influence
# other processes.
#
# In additin, in function multiprocess_calc(), terminating other processes when one of them
# already get a desirable result may contribute to a circumstance where a process has
# remove an edge but was killed before adding such edge.
# I realized this when my result of s2_list is incorrect
# (my s2_list is not null but I lost the game just the next round)
# Since this bug occurs rarely and I'm busy on my homework I just leave it as it is.
#
# How to solve? Passing the copy of graph instead of graph's inference?
# (I think passing graph to a function is to pass it's inference which could
# cause a changed graph, maybe I'm wrong.)
if CAPACITY[3]:
s1_list += multiprocess_calc(current,
graph, s0_list, safe_worker, 1)
print('s1', s1_list)
if CAPACITY[4]:
k2_list += multiprocess_calc(current,
graph, all_list, kill_worker, 2, one_only=True)
print('k2', k2_list)
if CAPACITY[5]:
s2_list += multiprocess_calc(current,
graph, s1_list, safe_worker, 2, one_only=True)
print('s2', s2_list)
final_list = k0_list + k1_list + k2_list + s2_list + s1_list + s0_list + all_list
target_pinyin = final_list[0]
idiom = outing_edge[target_pinyin][0]
return target_pinyin, idiom
class Game():
def __init__(self, idiom_data, initial_idiom, player_one, player_two):
'''
Example parameter:
idiom_data:
{
"阿鼻地狱": {
"first": "a",
"last": "yu"
},
"阿党比周": {
"first": "e",
"last": "zhou"
},
......
}
initial_idiom: '阿鼻地狱'
players: ['Me', 'Robot']
'''
self.graph = MyGraph(idiom_data)
assert initial_idiom in idiom_data, '{} is not in idiom dataset.'.format(
initial_idiom)
self.current = idiom_data[initial_idiom]['last']
self.graph.remove_edge(
idiom_data[initial_idiom]['first'], self.current, key=initial_idiom)
self.players = (Player(*player_one), Player(*player_two))
def run(self):
count = 1
game_over = False
while True:
print('Round {}'.format(count))
count += 1
# Omit in turn
for player in self.players:
try:
self.current = player.emit(self.current, self.graph)
except Exception as e:
print(e)
print('{} Lost!'.format(player.name))
game_over = True
break
print()
if game_over:
break
def play(player_one, player_two):
idiom_data = open('idiom.json', encoding='utf-8').read()
idiom_data = json.loads(idiom_data)
game = Game(idiom_data, input('Initial idiom: '), player_one, player_two)
game.run()
if __name__ == "__main__":
freeze_support()
play(('Me', 'smart'), ('Robot', 'random'))
|
"""Test the entire package; an underscore precedes this file name
so it does not include itself in the test discovery."""
import os.path as osp
from tests_basic import run_tests
test_dir = osp.join(osp.dirname(__file__), "tests_basic")
run_tests(test_dir)
|
from sqlalchemy import Column, Integer, String, ForeignKey
from sqlalchemy.orm import relationship
from yaramanager.db.base_class import Base
class Meta(Base):
id = Column(Integer, primary_key=True, index=True, autoincrement=True)
key = Column(String(255), index=True)
value = Column(String(255), index=True)
order = Column(Integer)
rule_id = Column(Integer, ForeignKey("rule.id"))
rule = relationship("Rule", back_populates="meta")
def __repr__(self):
if self.rule:
return f"<Meta {self.key}={self.value} (attached to {self.rule.name})>"
return f"<Meta {self.key}={self.value}>"
|
YC = 199.8 # matrix compression strength
GSL = 0.788
length = 0.15
thickness = 0.1
parameters = {
"results": [
{
"type": "min",
"identifier":
{
"symbol": "S22",
"elset": "ALL",
"position": "Element 1 Int Point 1"
},
"referenceValue": -YC,
"tolerance": YC * 0.005
},
{
"type": "max",
"identifier":
{
"symbol": "SDV_CDM_d2",
"elset": "ALL",
"position": "Element 1 Int Point 1"
},
"referenceValue": 1.0,
"tolerance": 0.0
},
{
"type": "max",
"identifier":
{
"symbol": "SDV_CDM_d1T",
"elset": "ALL",
"position": "Element 1 Int Point 1"
},
"referenceValue": 0.0,
"tolerance": 0.0
},
{
"type": "max",
"identifier":
{
"symbol": "SDV_CDM_d1C",
"elset": "ALL",
"position": "Element 1 Int Point 1"
},
"referenceValue": 0.0,
"tolerance": 0.0
},
{
"type": "finalValue",
"identifier":
{
"symbol": "SDV_CDM_alpha",
"elset": "ALL",
"position": "Element 1 Int Point 1"
},
"referenceValue": 53.0,
"tolerance": 0.4
},
{
"type": "max",
"identifier": "Plastic dissipation: ALLPD for Whole Model",
"referenceValue": GSL * (length * thickness), # Unrecoverable energy dissipation from fracture * fracture area: GSL*LC1*LC3
"tolerance": GSL * (length * thickness) * 0.01
},
{
"type": "continuous",
"identifier":
{
"symbol": "S22",
"elset": "ALL",
"position": "Element 1 Int Point 1"
},
"referenceValue": 0.0,
"tolerance": 2.0
}
]
}
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import socket
ip_port = ('127.0.0.1',9997)
sk = socket.socket(socket.AF_INET,socket.SOCK_DGRAM,0)
while True:
str_in = str(raw_input('输出: '))
inp = str(str_in).strip()
if inp == 'exit':
break
sk.sendto(bytes(inp),ip_port) #UDP每次发送都要指定发送的目标
sk.close()
|
#Exercise E.41
import numpy as np
import matplotlib.pyplot as plt
from ODESolver import *
def SIR( u, t): #function for solving differential eq. in the SIR model
#I = infected
#S = susceptibles
#R = recovered
S,I,R = u
nu = 0.1 #parameter in the ODE system
dS = -beta*S*I #S equation
dI = beta*S*I - nu*I #I equation
dR = nu*I #R equation
return [dS,dI,dR]
def terminate(u,t,i): #terminate condition- return true for termination if S + I + R is not sufficiently constant
tol = 1e-6
diff = (u[i,0]+u[i,1]+u[i,2])-(u[0,0]+u[0,1]+u[0,2])
return abs(diff)>tol
def solve_SIR():
solver = RungeKutta4(SIR) #define object of RungeKutta4
solver.set_initial_condition([1500,1,0]) #set initial conditions for RungeKutta4 object
T = 60 #time of simulation [days]
dt = 0.5 #minimum time interval(between two iterations)
n = int(T/dt) #number of iterations for T
time = np.linspace(0,T,n+1)
u, t = solver.solve(time, terminate) #calculate u,t
return u, t
def plot_SIR(t, u, filename): #plott the results
S, I, R = u[:,0], u[:,1], u[:,2]
plt.plot(t,S,label = 'Susceptibles')
plt.plot(t,I,label = 'infected')
plt.plot(t,R,label = 'Recovered')
plt.legend()
plt.xlabel('Time')
plt.ylabel('Spreading of a disease')
plt.suptitle('Spreading of a disease by a SIR model')
plt.savefig(filename)
plt.show()
beta = 0.0005
u,t = solve_SIR()
plot_SIR(t,u,'SIR_0005.png')
beta = 0.0001
u,t = solve_SIR()
plot_SIR(t,u,'SIR_0001.png')
'''
run SIR.py
According to the equation for S:
S(t+dt)= S(t)-beta*S*I*dt
we can conclude that increasing beta will lead to increase number of infected and move from S to I category (S will rapidly decrease with increasing beta).
Contrary, with small beta, the number of infected will reduce (insignificant influence to S).
'''
|
#!/usr/bin/python
import ConfigParser
import subprocess
import socket
import time
import shutil
import os
import sys
import glob
def main():
#parse the config file
config = ConfigParser.ConfigParser()
config.readfp(open(os.path.join(sys.path[0], 'mysqlClusterwareBackup.cfg')))
#set path to different Oracle Homes and binaries
xag_home = config.get('clusterware','xag_home')
grid_home = config.get('clusterware','grid_home')
agctl = xag_home + '/bin/agctl'
#Get list of instance names to attempt backup of
instances = config.get('clusterware', 'instances')
#Get hostname of machine
hostname = socket.gethostname().split('.')[0]
#Loop through instances to attempt backup of and see if they are running on this node
#If they are, back them up
#Otherwise, skip them (they are stopped or on another node)
for instance in instances.split(','):
instance_name = instance.strip()
xag_status = subprocess.check_output([agctl, 'status', 'mysql_server', instance_name]).strip()
if 'is running on' in xag_status:
if hostname in xag_status:
print 'Instance', instance_name, 'is running on this node - continuing'
instance_config = subprocess.check_output([agctl, 'config', 'mysql_server', instance_name]).strip()
for line in instance_config.split('\n'):
if line.startswith('Mysql home:'):
mysql_home = line.split(':')[1].strip()
if line.startswith('Datadir:'):
datadir = line.split(':')[1].strip()
if mysql_home is None:
print ' Could not find MySQL Home for', instance_name, '- skipping it'
elif datadir is None:
print ' Could not find MySQL Home for', instance_name, '- skipping it'
#"We got one!"
else:
print ' Attempting to backup instance', instance_name
#set backup options based upon instance name and the config file
mysql_socket = datadir + '/mysql.sock'
use_osb = config.getboolean('mysqlbackup', 'use_osb')
backup_dir = mysql_home + '/' + config.get('mysqlbackup', 'backup-dir')
backup_user = config.get('mysqlbackup', 'user')
verbose_output = config.getboolean('mysqlbackup', 'verbose')
use_compression = config.getboolean('mysqlbackup','compression')
meb = mysql_home + '/meb/mysqlbackup'
socket_arg = '--socket='+mysql_socket
user_arg = '--user='+backup_user
bdir_arg = '--backup-dir='+backup_dir
meb_cmd = []
meb_cmd.extend([meb, socket_arg, user_arg, bdir_arg])
if use_compression is True:
meb_cmd.append('--compress')
if config.has_option('mysqlbackup', 'compression_method'):
compression_type = '--compress-method=' + config.get('mysqlbackup', 'compression_method')
meb_cmd.append(compression_type)
#Attempt backup using Oracle Secure Backup integration
if use_osb is True:
sbt_db = config.get('mysqlbackup', 'sbt-database-name')
#set sbt options for mysqlbackup
bi_arg = '--backup-image=sbt:'+instance_name+'-'+time.strftime('%Y-%m-%d')
sbt_db_arg = '--sbt-database-name='+sbt_db
btype_arg = 'backup-to-image'
meb_cmd.extend([bi_arg, sbt_db_arg, btype_arg])
#Start the backup job and monitor the result
try:
backup_run = subprocess.check_output(meb_cmd, stderr=subprocess.STDOUT).strip()
print ' mysqlbackup return code was 0 - backup appears to have succeeded'
if verbose_output is True:
print ' verbose output enabled - output from mysqlbackup follows'
print '**************************************************************'
print backup_run
print '**************************************************************'
else:
if backup_run.endswith('mysqlbackup completed OK!'):
print ' last line of output was "mysqlbackup completed OK!" - success!'
else:
print ' unable to verify backup was successful - output from mysqlbackup follows'
print '**************************************************************'
print backup_run
print '**************************************************************'
except subprocess.CalledProcessError as e:
print ' mysqlbackup return code was', e.returncode, '- backup failed'
print ' The output from mysqlbackup follows'
print '**************************************************************'
print e.output.strip()
print '**************************************************************'
#If specified in the config, keep the log file from the job
if config.has_option('mysqlbackup', 'log-dir'):
meb_log_dir = mysql_home + '/' + config.get('mysqlbackup', 'log-dir')
meb_src_log = backup_dir + '/meta/MEB*.log'
print ' copying MEB log to', meb_log_dir
for data in glob.glob(meb_src_log):
shutil.copy(data, meb_log_dir)
#Data was backed up using SBT so local metadata from backup is not needed anymore
print ' removing', backup_dir
shutil.rmtree(backup_dir)
else:
print ' Backups not using Oracle Secure Backup not implemented yet - skipping'
else:
print 'Instance', instance_name, 'is not running on this node - skipping it'
elif 'is not running' in xag_status:
print 'Instance', instance_name, 'is not running at all - skipping it'
else:
print 'Instance', instance_name, 'appears to have an unusual status - skipping and displaying output from agctl'
print ' ', xag_status
print
return
if __name__ == '__main__':
main()
|
# Generated by Django 2.0.2 on 2018-03-10 17:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('galerie_photo', '0009_auto_20180306_1611'),
('visite_virtuelle', '0004_auto_20180306_1624'),
('gestion_table', '0007_remove_table_galerie'),
]
operations = [
migrations.AddField(
model_name='table',
name='galerie',
field=models.ManyToManyField(to='galerie_photo.Theme'),
),
migrations.AddField(
model_name='table',
name='visite',
field=models.ManyToManyField(to='visite_virtuelle.Visite'),
),
]
|
# BitCity Studios:
# Cameron O'Leary <coleary9@jhu.edu>
# Steve Griffin <sgriff27@jhu.edu>
# Jeremy Dolinko <j.dolinko@gmail.com>
# Jonathan Rivera <jriver21@jhu.edu>
# Michael Shavit <shavitmichael@gmail.com>
from enemy import Enemy
class MeleeEnemy(Enemy):
def __init__(self, x, y, width, height, id,
nodes, mapWidth, mapHeight, spriteName):
super(MeleeEnemy, self).__init__(
x, y, width, height, id, nodes, mapWidth, mapHeight,
spriteName)
def think(self, playerPosition):
super(MeleeEnemy, self).think(playerPosition)
if abs(playerPosition[0] - self.body.x) + \
abs(playerPosition[1] - self.body.y) < 200 and not \
self.attackCoolDown.getState():
self.isAttacking = True # only attacks within 100 blocks of player
else:
self.isAttacking = False
|
"""
Prototype1
Neural Network Using Keras
Predicts next day values. 48 steps ahead forecast.
Uses daytype,temperature,holidays and utilises autocorrelation.
Main Program
"""
#===========================================================================
from cleancsv import clean_csv
from traintest import data_split,predict_data
from data import datas
import datetime
from network import model_build,predict
from analysis import plot_values, errors
import pandas as pd
import time as time
import keras
import warnings
import tensorflow as tf
warnings.filterwarnings("ignore") #comment out to display warnings
#===========================================================================
start_time = time.time()
#===========================================================================
date_begin='16-Jul-2017'
date_predict='01-Feb-2019' #important for day ahead prediction
date_end=datetime.datetime.strftime(datetime.datetime.strptime(date_predict,
'%d-%b-%Y')-datetime.timedelta(days=1),'%d-%b-%Y')
clean_csv(date_begin,date_end)
#===========================================================================
df_train=datas(date_begin,date_end)
#===========================================================================
prediction_step=48
val_preappended_data=8046
#---------------------------------------------------------------------------
df_train,df_test=data_split(df_train,val_preappended_data)
#---------------------------------------------------------------------------
'''
data_split for analysis
predict_data for real life
tempmax=23
df_train,df_predict=predict_data(df_train,date_end,date_predict,tempmax_predict)
'''
#---------------------------------------------------------------------------
#===========================================================================
model=model_build(df_train,epochs=5,batch_size=32,prediction_step=prediction_step)
model.load_weights("weights.best.hdf5")
model.compile(optimizer='adam',loss='mean_squared_error',metrics=[keras.losses.mean_absolute_percentage_error])
#===========================================================================
predictions=predict(model,df_test,prediction_step)
df_test.drop(df_test.head(val_preappended_data+prediction_step).index,inplace=True) #8112 corresponds to the removal of values used to make prediction
df_test['Predicted']=predictions.values
#===========================================================================
plot_values(df_test)
errors(df_test)
#===========================================================================
print(" RUNTIME \n --- %s seconds ---" % (time.time() - start_time))
#===========================================================================
|
import re
class TelefonesBr:
def __init__(self, telefone):
if(self.valida_telefone(telefone)):
self.telefone = telefone
else:
raise ValueError("Telefone incorreto")
def __str__(self):
padrao = "[+]?(\d{2,3})?[ ]?[(]?(\d{2})[)]?[ ]?(\d{4,5})[-]?(\d{4})"
resposta = re.match(padrao, self.telefone)
codigo_pais = resposta.group(1) if resposta.group(1) != None else "55"
return f"+{codigo_pais} ({resposta.group(2)}) {resposta.group(3)}-{resposta.group(4)}"
def valida_telefone(self, telefone):
padrao = "[+]?(\d{2,3})?[ ]?[(]?\d{2}[)]?[ ]?\d{4,5}[-]?\d{4}"
resposta = re.fullmatch(padrao, telefone)
if(resposta):
return True
else:
return False
|
import asyncio
import sys
import os
os.environ['VITA_CONFIG'] = '/var/www/config.py'
# Add the ptdraft folder path to the sys.path list
sys.path.append('/Users/disturber/Documents/git/pharmex/')
sys.path.append(os.path.abspath(__file__ + "/../"))
sys.path.append(os.path.abspath(__file__ + "/../modules/vita/"))
sys.path.append(os.path.abspath(__file__ + "/../modules/business/"))
import logging
from modules.kinetic_core import Logger
from elasticsearch_async import AsyncElasticsearch
from datetime import datetime
from agents.AgentExecutorClient import AgentExecutorClient
from supply.ProvisionExecutorClient import ProvisionExecutorClient
from warehouse.ProductsExecutorClient import ProductsExecutorClient
from warehouse.ProductsExecutor import ProductsExecutor
from modules.kinetic_core.Connector import db
from transliterate import translit
Logger.init(logging.DEBUG)
es = AsyncElasticsearch(hosts=['52.19.43.93'],
http_auth=('vita', 'g6t7y7h98hyg67'), port=8080)
async def clean():
wrong = await db.list("select price_list_id, current_timestamp from pharmex_pricelists where key in (select key from pharmex_pricelist_revision where deactivated_at is not null)")
for data in wrong:
price_list_id = int(data["price_list_id"])
print(price_list_id)
await db.query("delete from pharmex_pricelists where price_list_id = $1", (int(data["price_list_id"]),))
try:
await es.delete(index='pricelist2executor-index', doc_type='item',
id=price_list_id)
except:
pass
print("deleted " + str(price_list_id))
es.indices.refresh(index="pricelist2executor-index")
print("finished")
loop = asyncio.get_event_loop()
loop.create_task(clean())
loop.run_forever()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-04 14:24
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('web_empresa', '0007_auto_20171004_0923'),
]
operations = [
migrations.AddField(
model_name='galeriafotoempresa',
name='marca_agua',
field=models.PositiveIntegerField(choices=[(0, 'Ninguna'), (1, 'Blanca'), (2, 'Naranja')], default=2),
),
]
|
from flask import Blueprint
bp = Blueprint('players', __name__, url_prefix='/players')
from .import routes |
from movieManager.models import Comment, Movie, Viewer
from spiders.movieSpider.movieSpider.spiders.maoYan_comment import MaoyanCommentSpider
class Pipeline(object):
def process_item(self, item, spider):
# 处理 MovieCommentSpider的管道
# print("处理 MovieCommentSpider的管道")
if spider.name == MaoyanCommentSpider.name:
# print(spider.name, MaoyanCommentSpider.name)
# TODO... 处理 viewer
# print("TODO... 处理 viewer", item)
viewerItem = item["viewer"]
viewer = Viewer.objects.filter(user_id=viewerItem["user_id"])
viewer_dict = dict(viewerItem)
# print("viewer_dict", viewer_dict)
if viewer.first():
# print("update viewer")
viewer.update(**viewer_dict)
viewer = viewer.first()
else:
# print("create viewer")
viewer = Viewer.objects.create(**viewer_dict)
viewer = Viewer.objects.filter(**viewer_dict).first()
# TODO... 检查Movie是否存在
# print("TODO... 检查Movie是否存在")
movie = Movie.objects.filter(movie_id=item["movie"])
if movie.first():
item_tmp = item
item_tmp["movie"] = movie.first()
item_tmp["viewer"] = viewer
item_tmp_dict = dict(item_tmp)
# print("item_tmp_dict", item_tmp_dict)
comment = Comment.objects.filter(comment_id=item_tmp["comment_id"])
if comment:
# print("update comment")
comment.update(**item_tmp_dict)
comment = comment.first()
else:
# print("create comment")
Comment.objects.create(**item_tmp_dict)
comment = Comment.objects.filter(**item_tmp_dict).first()
# print(item)
return item |
import cv2
import numpy as np
def calibrate_camera(images, nx, ny):
#Takes chessboard images taken with a camera and returns camera coeffs
imgpoints, objpoints = process_calibration_images(images, nx, ny)
img_size = (images[0].shape[1], images[0].shape[0])
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size, None,None)
return ret, mtx, dist, rvecs, tvecs
def process_calibration_images(images, nx, ny):
#Returns chessboard corners for each of input images
imgpoints = []
objpoints= []
objp = np.zeros((nx * ny, 3), np.float32)
objp[:,:2] = np.mgrid[0: nx, 0: ny].T.reshape(-1, 2)
for image in images:
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
ret, corners = cv2.findChessboardCorners(gray, (nx,ny), None)
#If corners found
if ret == True:
#img = cv2.drawChessboardCorners(image, (nx, ny), corners, ret)
#plt.imshow(img)
objpoints.append(objp)
imgpoints.append(corners)
return imgpoints, objpoints
def undistortImage(image, mtx, dist):
undistorted_image = cv2.undistort(image, mtx, dist, None, mtx)
return undistorted_image
def unwrap(image, source, destination):
h,w = image.shape[:2]
M = cv2.getPerspectiveTransform(source, destination)
Minv = cv2.getPerspectiveTransform(destination, source)
warped = cv2.warpPerspective(image, M, (w,h), flags=cv2.INTER_LINEAR)
return warped, M, Minv # Minv is inverse matrix of M
|
import sys
n = int(sys.stdin.readline())
m = [[[[0,0,0] for i in range(n+1)] for i in range(n+1)] for i in range(n+1)]
m[n][n][n][0] = 1
for i in range(1,3*n+1):
for j in range(max(0,i-2*n),min(n,i)+1):
for k in range(max(0,i-j-n),min(n,i-j)+1):
x, y, z = n-j, n-k, n-i+j+k
for s in range(3):
if s != 0 and x+1 <= n:
m[x][y][z][s] += m[x+1][y][z][0]
if s != 1 and y+1 <= n:
m[x][y][z][s] += m[x][y+1][z][1]
if s != 2 and z+1 <= n:
m[x][y][z][s] += m[x][y][z+1][2]
ans = m[0][0][0][0]
for i in range(2,n+1):
ans *= i*i*(i-1)
print ans/2
|
#!/usr/bin/python
import time
from datetime import *
start_time='20131008143000'
def format_time(start_time):
global fmat_time
y = start_time[0:4]
m = start_time[4:6]
d = start_time[6:8]
H = start_time[8:10]
M = start_time[10:12]
S = start_time[12:]
print y ,m ,d,H,M,S
s='%s-%s-%s %s:%s:%s'%(y,m,d,H,M,S)
print s
fmat_time=datetime.strptime(s,"%Y-%m-%d %H:%M:%S")
#now = date(int(y),int(m),int(d))
#tm = time(int(H),int(M),int(S))
#fmat_time= '%s %s'%(now,tm)
print 'fmat',fmat_time
return fmat_time
format_time(start_time)
|
import os
# Twitter API credentials if not using ENV variables
# Add credentials here:
CONSUMER_KEY = ""
CONSUMER_SECRET = ""
ACCESS_KEY = ""
ACCESS_SECRET = ""
# DO NOT CHANGE
# Check for twitter credentials in ENV variable
# Fall back on what is entered above
CONSUMER_KEY = os.getenv("T_CONSUMER_KEY", CONSUMER_KEY)
CONSUMER_SECRET = os.getenv("T_CONSUMER_SECRET", CONSUMER_SECRET)
ACCESS_KEY = os.getenv("T_ACCESS_KEY", ACCESS_KEY)
ACCESS_SECRET = os.getenv("T_ACCESS_SECRET", ACCESS_SECRET)
|
"""Unit tests for the JaCoCo source."""
from .base import JaCoCoCommonCoverageTestsMixin, JaCoCoCommonTestsMixin, JaCoCoTestCase
class JaCoCoUncoveredBranchesTest(JaCoCoCommonCoverageTestsMixin, JaCoCoCommonTestsMixin, JaCoCoTestCase):
"""Unit tests for the JaCoCo metrics."""
METRIC_TYPE = "uncovered_branches"
JACOCO_XML = "<report><counter type='BRANCH' missed='2' covered='4'/></report>"
async def test_uncovered_branches_without_branches(self):
"""Test that a JaCoCo XML without branches results in 100% coverage."""
response = await self.collect(get_request_text="<report><counter type='LINE' missed='4' covered='6'/></report>")
self.assert_measurement(response, value="0", total="0")
|
class Solution:
def diameterOfBinaryTree(self, root: TreeNode) -> int:
# every node is visited => time complexity is O(n)
# recursive function is placed on the stack
# which the heigh of the tree is H <= n => space complexity is O(n)
def findDepth(root):
if not root:
return 0
# find the number of step from root to left/right child
left = findDepth(root.left)
right = findDepth(root.right)
# update the diameter with left + right + 1
# + 1 is the root node since it is not null
# left is number of nodes from the left subtree
# right is number of nodes from the right subtree
self.dia = max(self.dia, left + right + 1)
return max(left,right) +1
self.dia = 0
if root is None:
return 0
findDepth(root)
# why -1 ? since we find the best steps between
# the deepest leafs, the result we want is length
# not steps, so we want to minus 1
# e.g A->B->C steps: 3 but length is 2
return self.dia - 1
|
import csv
import numpy as np
def main():
fold_ct = 4
eval_scattering = read_eval_csv('eval_scattering.csv', fold_ct)
eval_baseline = read_eval_csv('eval_baseline.csv', fold_ct)
print '''
\\begin{tabular}{lcc}
\\toprule
Scene & Baseline & Temporal scattering \\\\
\\midrule
'''[1:-1]
labels = eval_scattering['labels']
for i in range(len(labels)):
row = ''
row = row + labels[i]
row = row + ' & '
per_fold_accuracies = 100*eval_baseline['class_accuracies'][i]
row = row + '${:04.1f} \pm {:04.1f}$'.format(per_fold_accuracies.mean(), per_fold_accuracies.std())
row = row + ' & '
per_fold_accuracies = 100*eval_scattering['class_accuracies'][i]
row = row + '${:04.1f} \pm {:04.1f}$'.format(per_fold_accuracies.mean(), per_fold_accuracies.std())
row = row + ' \\\\'
row = row.replace(' 0', ' \\phantom{0}')
row = row.replace('_', '\_')
print row
print '''
\\bottomrule
'''[1:-1]
row = ''
row = row + 'Average'
row = row + ' & '
per_fold_accuracies = 100*eval_baseline['mean_accuracies']
row = row + '${:04.1f} \pm {:04.1f}$'.format(per_fold_accuracies.mean(), per_fold_accuracies.std())
row = row + ' & '
per_fold_accuracies = 100*eval_scattering['mean_accuracies']
row = row + '${:04.1f} \pm {:04.1f}$'.format(per_fold_accuracies.mean(), per_fold_accuracies.std())
row = row + ' \\\\'
row = row.replace(' 0', ' \\phantom{0}')
row = row.replace('_', '\_')
print row
print '''
\\end{tabular}
'''[1:-1]
def read_eval_csv(filename, fold_ct):
csvfile = open(filename, 'r')
csvreader = csv.reader(csvfile, delimiter=',', quotechar='"')
eval_csv = {}
i = 0
eval_csv['labels'] = []
eval_csv['class_accuracies'] = []
for row in csvreader:
eval_csv['labels'].append(row[0])
eval_csv['class_accuracies'].append(np.zeros(fold_ct))
for fold in range(fold_ct):
eval_csv['class_accuracies'][i][fold] = float(row[1+fold])
i = i+1
eval_csv['mean_accuracies'] = np.mean(eval_csv['class_accuracies'], axis=0)
csvfile.close()
return eval_csv
main()
|
import tensorflow as tf
import os
def image_data():
"""
读取图片,并且用tensor来表示图片
:return:
"""
#构建文件名列表
filename_list = os.listdir("./dog")
#拼接文件路径
file_list = [os.path.join("./dog/", file) for file in filename_list]
print(filename_list)
#构建文件名队列
file_quenue = tf.train.string_input_producer(file_list)
#读取
reader = tf.WholeFileReader()
key, value = reader.read(file_quenue)
#解码
image = tf.image.decode_jpeg(value)
print("image", image)
#图形的形状,类型的修改
image_resized = tf.image.resize_images(image, [200, 200])
print("image_resized", image_resized)
#静态形状的修改
image_resized.set_shape(shape=[200, 200, 3])
#批处理
image_batch = tf.train.batch([image_resized], batch_size=100, num_threads=1, capacity=100)
print("image_batch", image_batch)
with tf.Session() as sess:
#创建线程协调员
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
key_new , value_new, image_batch = sess.run([key, value, image_batch])
print("key \n", key_new)
# print("value \n", value_new)
print("image_batch", image_batch)
#回收线程
coord.request_stop()
coord.join(threads)
if __name__=="__main__":
image_data() |
import ui
import mouseModule
import player
import net
import snd
import upgradeStorage
import chat
import app
import localeInfo
import uiScriptLocale
import ime
import uiPickMoney
class UpgradeItemsStorageWindow(ui.ScriptWindow):
BOX_WIDTH = 176
dlgPickMoney = None
def __init__(self):
ui.ScriptWindow.__init__(self)
self.tooltipItem = None
self.sellingSlotNumber = -1
self.pageButtonList = []
self.curPageIndex = 0
self.isLoaded = 0
self.AttachedItemCount = 0
# self.__LoadWindow()
def __del__(self):
ui.ScriptWindow.__del__(self)
upgradeStorage.ClearVector()
def Open(self):
self.__LoadWindow()
self.ShowWindow()
ui.ScriptWindow.Show(self)
def Destroy(self):
upgradeStorage.ClearVector()
self.ClearDictionary()
self.tooltipItem = None
self.wndBoard = None
self.wndItem = None
self.dlgPickMoney.Destroy()
self.dlgPickMoney = None
self.pageButtonList = []
def __LoadWindow(self):
if self.isLoaded == 1:
return
self.isLoaded = 1
pyScrLoader = ui.PythonScriptLoader()
pyScrLoader.LoadScriptFile(self, "UIScript/UpgradeItemsStorageWindow.py")
from _weakref import proxy
## Item
wndItem = ui.GridSlotWindow()
wndItem.SetParent(self)
wndItem.SetPosition(8, 35)
wndItem.SetSelectEmptySlotEvent(ui.__mem_func__(self.SelectEmptySlot))
wndItem.SetSelectItemSlotEvent(ui.__mem_func__(self.SelectItemSlot))
wndItem.SetUnselectItemSlotEvent(ui.__mem_func__(self.UseItemSlot))
wndItem.SetUseSlotEvent(ui.__mem_func__(self.UseItemSlot))
wndItem.SetOverInItemEvent(ui.__mem_func__(self.OverInItem))
wndItem.SetOverOutItemEvent(ui.__mem_func__(self.OverOutItem))
wndItem.Show()
## Close Button
self.GetChild("TitleBar").SetCloseEvent(ui.__mem_func__(self.Close))
self.GetChild("ExitButton").SetEvent(ui.__mem_func__(self.Close))
self.wndItem = wndItem
self.wndBoard = self.GetChild("board")
dlgPickMoney = uiPickMoney.PickMoneyDialog()
dlgPickMoney.LoadDialog()
dlgPickMoney.Hide()
self.dlgPickMoney = dlgPickMoney
## Initialize
self.SetTableSize(2)
def ShowWindow(self):
self.SetTableSize(2)
self.Show()
def __MakePageButton(self, pageCount):
self.curPageIndex = 0
self.pageButtonList = []
text = "I"
pos = -int(float(pageCount-1)/2 * 52)
for i in xrange(pageCount):
button = ui.RadioButton()
button.SetParent(self)
button.SetUpVisual("d:/ymir work/ui/game/windows/tab_button_middle_01.sub")
button.SetOverVisual("d:/ymir work/ui/game/windows/tab_button_middle_02.sub")
button.SetDownVisual("d:/ymir work/ui/game/windows/tab_button_middle_03.sub")
button.SetWindowHorizontalAlignCenter()
button.SetWindowVerticalAlignBottom()
button.SetPosition(pos, 75)
button.SetText(text)
button.SetEvent(lambda arg=i: self.SelectPage(arg))
button.Show()
self.pageButtonList.append(button)
pos += 52
text += "I"
self.pageButtonList[0].Down()
def SelectPage(self, index):
self.curPageIndex = index
for btn in self.pageButtonList:
btn.SetUp()
self.pageButtonList[index].Down()
self.RefreshUpgradeItemsStorage()
def __LocalPosToGlobalPos(self, local):
return self.curPageIndex*upgradeStorage.UPGRADE_ITEMS_STORAGE_PAGE_SIZE + local
def SetTableSize(self, size):
size = upgradeStorage.UPGRADE_ITEMS_STORAGE_SLOT_Y_COUNT
self.__MakePageButton(2)
self.wndItem.ArrangeSlot(0, upgradeStorage.UPGRADE_ITEMS_STORAGE_SLOT_X_COUNT, size, 32, 32, 0, 0)
self.wndItem.RefreshSlot()
self.wndItem.SetSlotBaseImage("d:/ymir work/ui/public/Slot_Base.sub", 1.0, 1.0, 1.0, 1.0)
wnd_height = 130 + 32 * size
self.wndBoard.SetSize(self.BOX_WIDTH, wnd_height)
self.SetSize(self.BOX_WIDTH, wnd_height)
self.UpdateRect()
def RefreshUpgradeItemsStorage(self):
getItemID=upgradeStorage.GetItemID
getItemCount=upgradeStorage.GetItemCount
setItemID=self.wndItem.SetItemSlot
for i in xrange(upgradeStorage.UPGRADE_ITEMS_STORAGE_PAGE_SIZE):
slotIndex = self.__LocalPosToGlobalPos(i)
itemCount = getItemCount(slotIndex)
if itemCount <= 1:
itemCount = 0
setItemID(i, getItemID(slotIndex), itemCount)
self.wndItem.RefreshSlot()
def SetItemToolTip(self, tooltip):
self.tooltipItem = tooltip
def Close(self):
self.Hide()
self.OverOutItem()
if self.dlgPickMoney:
self.dlgPickMoney.Close()
else:
self.dlgPickMoney = None
upgradeStorage.ClearVector()
## Slot Event
def SelectEmptySlot(self, selectedSlotPos):
selectedSlotPos = self.__LocalPosToGlobalPos(selectedSlotPos)
if mouseModule.mouseController.isAttached():
attachedSlotType = mouseModule.mouseController.GetAttachedType()
attachedSlotPos = mouseModule.mouseController.GetAttachedSlotNumber()
if player.SLOT_TYPE_UPGRADE_ITEMS_STORAGE == attachedSlotType:
net.SendUpgradeItemsStorageItemMovePacket(attachedSlotPos, selectedSlotPos, self.AttachedItemCount)
self.AttachedItemCount = 0
else:
attachedInvenType = player.SlotTypeToInvenType(attachedSlotType)
if player.RESERVED_WINDOW == attachedInvenType:
return
net.SendUpgradeItemsStorageCheckinPacket(attachedInvenType, attachedSlotPos, selectedSlotPos)
mouseModule.mouseController.DeattachObject()
def SelectItemSlot(self, selectedSlotPos):
selectedSlotPos = self.__LocalPosToGlobalPos(selectedSlotPos)
if mouseModule.mouseController.isAttached():
attachedSlotType = mouseModule.mouseController.GetAttachedType()
if player.SLOT_TYPE_INVENTORY == attachedSlotType:
attachedSlotPos = mouseModule.mouseController.GetAttachedSlotNumber()
elif player.SLOT_TYPE_UPGRADE_ITEMS_STORAGE == attachedSlotType:
attachedSlotPos = mouseModule.mouseController.GetAttachedSlotNumber()
net.SendUpgradeItemsStorageItemMovePacket(attachedSlotPos, selectedSlotPos, 0)
mouseModule.mouseController.DeattachObject()
else:
curCursorNum = app.GetCursor()
if app.SELL == curCursorNum:
chat.AppendChat(chat.CHAT_TYPE_INFO, localeInfo.UPGRADE_ITEMS_STORAGE_SELL_DISABLE_SAFEITEM)
elif app.BUY == curCursorNum:
chat.AppendChat(chat.CHAT_TYPE_INFO, localeInfo.SHOP_BUY_INFO)
elif app.IsPressed(app.DIK_LALT):
link = upgradeStorage.GetItemLink(selectedSlotPos)
ime.PasteString(link)
elif app.IsPressed(app.DIK_LSHIFT):
itemCount = upgradeStorage.GetItemCount(selectedSlotPos)
if itemCount > 1:
self.dlgPickMoney.SetTitleName(localeInfo.PICK_ITEM_TITLE)
self.dlgPickMoney.SetAcceptEvent(ui.__mem_func__(self.OnPickItem))
self.dlgPickMoney.Open(itemCount)
self.dlgPickMoney.itemGlobalSlotIndex = selectedSlotPos
else:
selectedItemID = upgradeStorage.GetItemID(selectedSlotPos)
itemCount = player.GetItemCount(selectedSlotPos)
mouseModule.mouseController.AttachObject(self, player.SLOT_TYPE_UPGRADE_ITEMS_STORAGE, selectedSlotPos, selectedItemID, itemCount)
snd.PlaySound("sound/ui/pick.wav")
def OnPickItem(self, count):
itemSlotIndex = self.dlgPickMoney.itemGlobalSlotIndex
selectedItemVNum = upgradeStorage.GetItemID(itemSlotIndex)
mouseModule.mouseController.AttachObject(self, player.SLOT_TYPE_UPGRADE_ITEMS_STORAGE, itemSlotIndex, selectedItemVNum, count)
self.AttachedItemCount = count
def UseItemSlot(self, attachedSlotPos):
attachedSlotPos = self.__LocalPosToGlobalPos(attachedSlotPos)
for i in xrange(player.INVENTORY_PAGE_SIZE):
net.SendUpgradeItemsStorageCheckoutPacket(attachedSlotPos, i)
def __ShowToolTip(self, slotIndex):
if self.tooltipItem:
self.tooltipItem.SetUpgradeItemsStorageItem(slotIndex)
def OverInItem(self, slotIndex):
slotIndex = self.__LocalPosToGlobalPos(slotIndex)
self.__ShowToolTip(slotIndex)
def OverOutItem(self):
if self.tooltipItem:
self.tooltipItem.HideToolTip()
def OnPressEscapeKey(self):
self.Close()
return True
|
import struct
import pandas as pd
import pymongo
from pymongo import MongoClient
import datetime
import time
from os import listdir
from os.path import isfile, join
import ciso8601
head_s = 148
struct_size = 60
#script body
if __name__ == "__main__":
path_to_history = "C:\Users\USERNAME\AppData\Roaming\MetaQuotes\Terminal\88A7C6C356B9D73AC70BD2040F0D9829\history\Ava-Real 1\\"
filenames = [f for f in listdir(path_to_history) if isfile(join(path_to_history, f))]
client = MongoClient()
db = client['FIN_DATA']
#do it for all files
for filename in filenames:
try:
read = 0
openTime = []
openPrice = []
lowPrice = []
highPrice = []
closePrice = []
volume = []
with open(path_to_history+filename, 'rb') as f:
while True:
if read >= head_s:
buf = f.read(struct_size)
read += struct_size
if not buf:
break
bar = struct.unpack("<Qddddqiq", buf)
openTime.append(time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(bar[0])))
openPrice.append(bar[1])
highPrice.append(bar[2])
lowPrice.append(bar[3])
closePrice.append(bar[4])
volume.append(bar[5])
else:
buf = f.read(head_s)
read += head_s
data = {'0_openTime':openTime, '1_open':openPrice,'2_high':highPrice,'3_low':lowPrice,'4_close':closePrice,'5_volume':volume}
result = pd.DataFrame.from_dict(data)
result = result.set_index('0_openTime')
result.index.name = "DATE_TIME"
result.columns = ["OPEN", "HIGH", "LOW", "CLOSE", "VOLUME"]
print "-------------------------------------------------------"
tableName = filename[:-4]
print tableName
print "Data rows for "+tableName+": %s" %len(result)
rows = db[tableName]
#last N rows from table
if db[tableName].count() > 0:
#last date in collection
last_col_date = list(db[tableName].find().skip(db[tableName].count() - 1))
else:
last_col_date = 0
#if data exists
if db[tableName].count() > 0:
print "last date in collection:"
dbdte_str = last_col_date[0]['DATE_TIME']
t = ciso8601.parse_datetime(dbdte_str)
dbdte = time.mktime(t.timetuple())
print dbdte_str
#----------------------------------------------------------------
print "Last date in file:"
fldte_str = list(result.tail(1).index)[0]
fldte_str1 = list(result.iloc[-2:].index)[0]
t = ciso8601.parse_datetime(fldte_str)
t1 = ciso8601.parse_datetime(fldte_str1)
fldte = time.mktime(t.timetuple())
fldte1 = time.mktime(t1.timetuple())
print fldte_str
#----------------------------------------------------------------
#get estimate of how many bars are missing
period = fldte - fldte1
print "Period %s" %period
dif = int((fldte - dbdte)/period)
print "Difference %s" %dif
d = result.tail(dif)
#if len(lst) == 0:
for b in range(0, len(d)-1):
row = {"DATE_TIME": d.ix[b].name,
"OPEN": d.ix[b].OPEN,
"HIGH": d.ix[b].HIGH,
"LOW": d.ix[b].LOW,
"CLOSE": d.ix[b].CLOSE,
"VOLUME": d.ix[b].VOLUME}
print row
try:
rows.insert_one(row)
print "Passed"
if last_col_date == 0:
#index it for future use
db[tableName].create_index([('DATE_TIME', pymongo.ASCENDING)], unique=True)
except Exception as e:
print e
continue
else:
#if len(lst) == 0:
for b in range(0, len(result)-1):
row = {"DATE_TIME": result.ix[b].name,
"OPEN": result.ix[b].OPEN,
"HIGH": result.ix[b].HIGH,
"LOW": result.ix[b].LOW,
"CLOSE": result.ix[b].CLOSE,
"VOLUME": result.ix[b].VOLUME}
try:
rows.insert_one(row)
if last_col_date == 0:
#index it for future use
db[tableName].create_index([('DATE_TIME', pymongo.ASCENDING)], unique=True)
except Exception as e:
print e
continue
except Exception as e:
print e
continue
print "\nAll done"
|
import unittest
from users import User
class UserTest(unittest.TestCase):
def setUp(self):
'''
method run before each user test
'''
self.new_user = User("Mary", "1234")
def tearDown(self):
'''
method called after each user test
'''
User.users_list = []
def test_init(self):
'''
test method to check if user class is initialize
'''
self.assertEqual(self.new_user.init_username,"Mary")
self.assertEqual(self.new_user.init_password, "1234")
def test_save_user(self):
'''
test method to test if user has been saved
'''
self.new_user.save_users()
self.assertEqual(len(User.users_list),1)
def test_user(self):
'''
test method to test if user has been save
'''
self.new_user.save_users()
self.assertEqual(len(User.users_list),1) |
import os, pickle, numpy as np
import keras
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
from keras.utils.np_utils import to_categorical
from keras import optimizers
from keras.layers import Conv2D, MaxPooling2D, ZeroPadding2D, GlobalAveragePooling2D
from keras.layers.normalization import BatchNormalization
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.models import Sequential, Model, Input
from keras.layers.advanced_activations import LeakyReLU
from keras.preprocessing.image import ImageDataGenerator
# import matplotlib.pyplot as plt
final = {}
def read_data(link):
files = os.listdir(link)
files.sort()
idx = 0
for file1 in files:
now = link + file1
final[idx] = file1.split(".")[0]
if idx == 0:
train = np.load(now)
m_lbl = np.array([idx] * train.shape[0])
else:
temp1 = np.load(now)
temp3 = np.array([idx] * temp1.shape[0])
train = np.vstack([train, temp1])
m_lbl = np.hstack([m_lbl, temp3])
idx += 1
print(final)
print(train.shape)
return train, m_lbl
train, m_lbl = read_data("../col-774-spring-2018/train/")
test = np.load("../col-774-spring-2018/test/test.npy")
train_x, m_lbl = shuffle(train, m_lbl, random_state=0)
train_y = to_categorical(m_lbl, num_classes=20)
# train_x -= 255
# test -= 255
train_x = np.divide(train_x, 255)
test_x = np.divide(test, 255)
train_x.resize(train_x.shape[0], 28, 28, 1)
test_x.resize(test_x.shape[0], 28, 28, 1)
# def vgg(input_tensor):
# def two_conv_pool(x, F1, F2, name):
# x = Conv2D(F1, (3, 3), activation=None, padding='same', name='{}_conv1'.format(name))(x)
# x = BatchNormalization()(x)
# x = Activation('relu')(x)
# x = Conv2D(F2, (3, 3), activation=None, padding='same', name='{}_conv2'.format(name))(x)
# x = BatchNormalization()(x)
# x = Activation('relu')(x)
# x = MaxPooling2D((2, 2), strides=(2, 2), name='{}_pool'.format(name))(x)
# return x
# def three_conv_pool(x, F1, F2, F3, name):
# x = Conv2D(F1, (3, 3), activation=None, padding='same', name='{}_conv1'.format(name))(x)
# x = BatchNormalization()(x)
# x = Activation('relu')(x)
# x = Conv2D(F2, (3, 3), activation=None, padding='same', name='{}_conv2'.format(name))(x)
# x = BatchNormalization()(x)
# x = Activation('relu')(x)
# x = Conv2D(F3, (3, 3), activation=None, padding='same', name='{}_conv3'.format(name))(x)
# x = BatchNormalization()(x)
# x = Activation('relu')(x)
# x = MaxPooling2D((2, 2), strides=(2, 2), name='{}_pool'.format(name))(x)
# return x
# net = input_tensor
# net = two_conv_pool(net, 64, 64, "block1")
# net = two_conv_pool(net, 128, 128, "block2")
# net = three_conv_pool(net, 256, 256, 256, "block3")
# net = three_conv_pool(net, 512, 512, 512, "block4")
# net = Flatten()(net)
# net = Dense(512, activation='relu', name='fc')(net)
# net = Dense(20, activation='softmax', name='predictions')(net)
# return net
def vgg():
def two_conv_pool(x, F1, F2, name, flag=1):
if flag == 0:
x.add(Conv2D(F1, (3, 3), activation='linear', padding='same', input_shape=(28,28,1), name='{}_conv1'.format(name)))
else:
x.add(Conv2D(F1, (3, 3), activation='linear', padding='same', name='{}_conv1'.format(name)))
x.add(BatchNormalization())
x.add(LeakyReLU(alpha=0.1))
x.add(Conv2D(F2, (3, 3), activation='linear', padding='same', name='{}_conv2'.format(name)))
x.add(BatchNormalization())
x.add(LeakyReLU(alpha=0.1))
x.add(MaxPooling2D((2, 2), strides=(2, 2), name='{}_pool'.format(name)))
return x
def three_conv_pool(x, F1, F2, F3, name):
x.add(Conv2D(F1, (3, 3), activation='linear', padding='same', name='{}_conv1'.format(name)))
x.add(BatchNormalization())
x.add(LeakyReLU(alpha=0.1))
x.add(Conv2D(F2, (3, 3), activation='linear', padding='same', name='{}_conv2'.format(name)))
x.add(BatchNormalization())
x.add(LeakyReLU(alpha=0.1))
x.add(Conv2D(F3, (3, 3), activation='linear', padding='same', name='{}_conv3'.format(name)))
x.add(BatchNormalization())
x.add(LeakyReLU(alpha=0.1))
x.add(MaxPooling2D((2, 2), strides=(2, 2), name='{}_pool'.format(name)))
return x
net = Sequential()
net = two_conv_pool(net, 64, 64, "block1", 0)
net = two_conv_pool(net, 128, 128, "block2")
net = three_conv_pool(net, 256, 256, 256, "block3")
net = three_conv_pool(net, 512, 512, 512, "block4")
net.add(Flatten())
net.add(Dense(512, activation='relu', name='fc'))
net.add(Dense(20, activation='softmax', name='predictions'))
return net
epoch = 15
batch_size = 100
learning_rate = 0.001
# X = Input(shape=[28, 28, 1])
# y = vgg(X)
# model = Model(X, y, "VGG")
# opt = optimizers.Adam(lr=learning_rate)
# model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
# model.summary()
# data_generator = ImageDataGenerator(width_shift_range=0.1, height_shift_range=0.1, vertical_flip=True, horizontal_flip=True)
# data_generator.fit(train_x)
model = vgg()
opt = optimizers.Adam(lr=learning_rate)
model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
model.summary()
# model.fit_generator(data_generator.flow(train_x, train_y, batch_size=100), steps_per_epoch=1000, epochs=50)
history = model.fit(train_x, train_y, epochs=epoch, batch_size=batch_size, validation_split=0.15, verbose=1)
model.save("vgg16.h5")
y_classes = model.predict(test_x)
y_classes = np.argmax(y_classes, axis=-1)
print(y_classes[:30])
with open("submit_VGG16.csv", "w") as outfile:
outfile.writelines("ID,CATEGORY\n")
for i in range(y_classes.shape[0]):
outfile.writelines(str(i) + ',' + final[y_classes[i]] + '\n')
print("done")
|
from datetime import datetime, timedelta
from datas_br import DatasBr
#hoje = datetime.today()
#amanha = datetime.today() + timedelta(days=1, hours=20)
#print(hoje - amanha)
hoje = DatasBr()
print(hoje.tempo_cadastro()) |
from rest_framework import serializers
from napio.models import AlbumInfo, ArtistInfo, PlaylistInfo
class AlbumInfoSerializer(serializers.ModelSerializer):
class Meta:
model = AlbumInfo
fields = ['name', 'url']
class ArtistInfoSerializer(serializers.ModelSerializer):
class Meta:
model = ArtistInfo
fields = ['name', 'url']
class PlaylistInfoSerializer(serializers.ModelSerializer):
albums = AlbumInfoSerializer(many=True, read_only=True)
artists = ArtistInfoSerializer(many=True, read_only=True)
class Meta:
model = PlaylistInfo
fields = ['name', 'artists', 'albums']
|
from flask import Flask, request, jsonify, render_template
import tensorflow as tf
import time
from selenium import webdriver
import numpy as np
import pandas as pd
import os
from tensorflow.keras.preprocessing.text import Tokenizer
from selenium.webdriver.chrome.options import Options
from tensorflow.keras.preprocessing.sequence import pad_sequences
### SCRAPING PART
chrome_options = Options()
chrome_options.binary_location = os.environ.get("GOOGLE_CHROME_BIN")
chrome_options.add_argument("--headless")
chrome_options.add_argument("--disable-dev-shm-usage")
chrome_options.add_argument("--no-sandbox")
driver = webdriver.Chrome(executable_path = os.environ.get("CHROMEDRIVER_PATH"),
chrome_options = chrome_options
)
def scraping(url, page_list, element):
review_text = []
check_text = []
for i in range(page_list):
if 'petco' in url:
driver.get((url+str(i)+'/ct:r'))
time.sleep(10)
text_scrape_1 = driver.find_elements_by_css_selector(element)
text_list = [i.text for i in text_scrape_1]
review_text.append(text_list)
else:
driver.get((url+str(i)))
time.sleep(10)
text_scrape_1 = driver.find_elements_by_css_selector(element)
text_list = [i.text for i in text_scrape_1]
review_text.append(text_list)
review_text_1 = [y for x in review_text for y in x]
if 'petco' in url:
for index, j in enumerate(review_text_1):
if (index != 0) and (j != 'Helpful?'):
check_text.append(j)
return check_text
else:
return review_text_1
### VARIABLES FOR PADDING
vocab_size = 10000
embedding_dim = 60
max_length = 200
trunc_type='post'
padding_type='post'
oov_tok = "<OOV>"
### IMPORT OF TRAINING SENTENCES TO CREATE THE TOKENIZER
training_file = pd.read_csv('training_sentences_df.csv')
training_sentences = training_file.iloc[:,-1]
# print(training_sentences)
trial_list = []
for j in training_sentences:
# print(j)
if ('[' in j):
trial_list.append(j[2:-2])
else:
trial_list.append(j)
### TOKENIZER and padding
tokenizer = Tokenizer(num_words=vocab_size, oov_token=oov_tok)
tokenizer.fit_on_texts(trial_list)
### IMPORTING THE MODEL
# Recreate the exact same model, including its weights and the optimizer
model = tf.keras.models.load_model('lemmma_aug_model_0.h5')
def check_genuine_reviews(sentences_raw, model):
df = pd.DataFrame()
df['review_text'] = pd.Series(sentences_raw)
sentences = []
labels = []
df_modified = pd.DataFrame()
df_modified['review_text'] = df['review_text'].unique()
for i in range(len(df_modified)):
sentences.append(df_modified['review_text'][i])
sentences = np.array(sentences)
sequences = tokenizer.texts_to_sequences(sentences)
padded = pad_sequences(sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type)
print()
print(model.predict(x = padded))
df_modified['result'] = np.round(model.predict(x = padded))
return len(df_modified) - df_modified['result'].sum(), len(df_modified) ,round((((len(df_modified) - df_modified['result'].sum()) / (int(len(df_modified)))) * 100),2)
### FLASK APP
application = Flask(__name__, template_folder='template')
@application.route('/')
def home():
return render_template('trial_index.html')
@application.route('/predict',methods=['POST'])
def predict():
'''
For rendering results on HTML GUI
'''
weblink = request.form['product_web_link']
if 'amazon' in weblink:
url = 'https://www.amazon.com/' + weblink.split('/')[-4] + '/product-reviews/' + weblink.split('/')[
-2] + '/ref=cm_cr_arp_d_paging_btm_next_2?ie=UTF8&reviewerType=all_reviews&pageNumber='
element = 'span.a-size-base.review-text.review-text-content'
elif 'petco' in weblink:
url = weblink + '?bvstate=pg:'
element = '#BVRRContainer p'
elif 'chewy' in weblink:
url = 'https://www.chewy.com/' + weblink.split('/')[-3] + '/product-reviews/' + weblink.split('/')[
-1] + '?reviewSort=NEWEST&reviewFilter=ALL_STARS&pageNumber='
print(url)
element = 'span.ugc-list__review__display'
page_numbers = 10
review_list_from_function = scraping(url,
page_numbers,
element)
genuine_review, total_review, genuine_review_percentage = check_genuine_reviews(sentences_raw = review_list_from_function, model = model)
return render_template('trial_index.html', prediction_text ='Appoximately {}% of the reviews are genuine for this product.'.format(genuine_review_percentage))
if __name__ == "__main__":
application.run(debug=True) |
import json
from itertools import groupby
from pyramid.view import view_config
from sqlalchemy.orm import aliased, eagerload
from sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound
from sqlalchemy import and_, or_, desc, func
from bizarro.models.people import *
from bizarro.models.media import Writer, Source
from connect_db import create_session
def get_or_create_writer(session, writer_name, source):
try:
writer = session.query(Writer)\
.join(Person, PersonName)\
.filter(PersonName.full_name==writer_name)\
.one()
except NoResultFound:
writer = Writer(person=Person())
session.add(writer)
session.commit()
writer.person.names.append(PersonName(full_name=writer_name))
writer.sources.append(source)
session.commit()
return writer
|
for i in range(6):
for j in range(6):
if j >= 5 - i:
print('*', end='')
else:
print(' ', end='')
print()
#draw letter 'CODE' - user chose font size
|
import gym
from stable_baselines import GAIL, SAC, PPO1
from stable_baselines.gail import ExpertDataset, generate_expert_traj
import gym_donkeycar
model = GAIL.load("gail_donkeycar")
env = gym.make('donkey-generated-track-v0')
obs = env.reset()
while True:
action, _states = model.predict(obs)
obs, rewards, dones, info = env.step(action)
env.render()
|
# Generated by Django 3.1.7 on 2021-10-08 00:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0011_bluetoothdevice_location'),
]
operations = [
migrations.RenameField(
model_name='cvetable',
old_name='cvename',
new_name='cve_id',
),
migrations.AddField(
model_name='cvetable',
name='cve_description',
field=models.CharField(default='CVE-1000', max_length=500),
preserve_default=False,
),
]
|
import sys
sys.stdin = open('input.txt')
for t in range(1, int(input())+1):
n = int(input())
prices = list(map(int, input().split()))
result = 0
highest = 0
for i in range(n-1, -1, -1):
if prices[i] < highest:
result += highest - prices[i]
else:
highest = prices[i]
print(f'#{t} {result}')
"""
풀기 전에 swea 댓글을 봐버렸어요.........
"""
|
from django.db import models
from django.urls import reverse
class Group(models.Model):
title = models.CharField(max_length=255, verbose_name='Наименование группы')
slug = models.SlugField(unique=True, verbose_name='URL')
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('group', kwargs={"slug": self.slug})
class Meta:
verbose_name = 'Группа'
verbose_name_plural = 'Группы'
class Coin(models.Model):
group = models.ForeignKey(Group, on_delete=models.CASCADE, verbose_name='Группа')
title = models.CharField(max_length=255, verbose_name='Наименование монеты')
slug = models.SlugField(unique=True, verbose_name='URL')
price = models.DecimalField(verbose_name="Цена", max_digits=9, decimal_places=2,null=True, blank=True)
img = models.ImageField(verbose_name='Фото', upload_to='collection/photo/%Y/%m/%d/',null=True, blank=True)
year = models.CharField(verbose_name='Год выпуска', max_length=255,null=True, blank=True)
denomination = models.CharField(max_length=255, verbose_name='Номинал', null=True, blank=True)
diameter = models.DecimalField(verbose_name="Диаметр", max_digits=9, decimal_places=2,null=True, blank=True)
edition = models.IntegerField(verbose_name='Тираж', null=True, blank=True)
material = models.CharField(max_length=255, verbose_name='Материал',null=True, blank=True)
weight = models.DecimalField(verbose_name='Вес',max_digits=9, decimal_places=2,null=True, blank=True)
def __str__(self):
return self.title
class Meta:
verbose_name = 'Монета'
verbose_name_plural = 'Монеты'
def get_absolute_url(self):
return reverse('coin', kwargs={'slug': self.slug}) |
# _*_ coding: utf-8 _*_
# @Time : 2019/8/29 1:15
# @Author : Ole211
# @Site :
# @File : camera calibration.py
# @Software : PyCharm
import cv2
import os
import numpy as np
import glob
os.chdir('d:\\img\\cal\\test')
# termination criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# prepare object points like (0, 0, 0), (1, 0, 0), (2, 0, 0) ...,(6, 5, 0)
objp = np.zeros((6*7, 3), np.float32)
objp[:, :2] = np.mgrid[0:7, 0:6].T.reshape(-1, 2)
# Array to store object points and image points from all the images
objpoints = []
imgpoints = []
images = glob.glob('*.jpg')
for fname in images:
print(fname)
img = cv2.imread(fname)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Find the chess board corners
ret, corners = cv2.findChessboardCorners(gray, (7, 6), None)
print(ret)
# If found, add object points, image points (after refining them)
if ret == True:
objpoints.append(objp)
corners2 = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria)
imgpoints.append(corners)
# Draw and display the corners
cv2.drawChessboardCorners(img, (7, 6), corners2, ret)
cv2.imshow('img', img)
# Calibration
# So now we have our object points and image points we are ready to go for calibration.
# For that we use the function, cv2.calibrateCamera(). It return the camera
# matrix, distortion coeffcients, rotation and translation vectors etc
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
# Undistortion
'''
We have got what we were trying. Now we can take an image and undistort it. OpenCV comes with two methods,
we will see both. But before that, we can refine the camera matrix based on a free scaling parameter
using cv2.getOptimalNewCameraMatrix(). If the scaling parameter alpha=0, it returns undistorted image with minimum unwanted pixels.
So it may even remove some pixels at image corners. If alpha=1, all pixels are retained with some extra black images.
It also returns an image ROI which can be used to crop the result.
'''
# Undistortion
cv2.waitKey(0)
cv2.destroyAllWindows() |
import numpy as np
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Input, Dense, Reshape, Flatten, Conv2D, MaxPooling2D, UpSampling3D, Conv3D, Conv3DTranspose, Dropout
from tensorflow.keras.losses import binary_crossentropy
from tensorflow.keras.models import Model
from constants import *
import preprocess
arr = np.zeros((64,64,64))
for i in range(64):
for j in range(64):
for k in range(64):
arr[i][j][k] = i+j+k
res = arr
res = np.minimum(res, np.flip(arr, axis=0))
res = np.minimum(res, np.flip(arr, axis=1))
res = np.minimum(res, np.flip(arr, axis=2))
res = np.minimum(res, np.flip(arr, axis=(0,1)))
res = np.minimum(res, np.flip(arr, axis=(0,2)))
res = np.minimum(res, np.flip(arr, axis=(1,2)))
def weighted_bce(y_true, y_pred):
weights = (y_true * 2) + 1.
weights *= res
bce = K.binary_crossentropy(y_true, y_pred)
weighted_bce = K.mean(bce * weights)
return weighted_bce
def unison_shuffled_copies(a, b):
assert len(a) == len(b)
p = np.random.permutation(len(a))
return a[p], b[p]
def make_dnn(**kwargs):
inputs = Input(shape=(PROJ_SHAPE[0], PROJ_SHAPE[1], NUM_VIEWS))
x = inputs
# Encoder
x = Conv2D(32, 7, activation='relu', padding='same')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Dropout(0.2)(x)
x = Conv2D(64, 3, activation='relu', padding='same')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Dropout(0.2)(x)
x = Conv2D(64, 3, activation='relu', padding='same')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Dropout(0.2)(x)
x = Conv2D(128, 3, activation='relu', padding='same')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Dropout(0.2)(x)
x = Conv2D(128, 3, activation='relu', padding='same')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Dropout(0.2)(x)
x = Conv2D(256, 3, activation='relu', padding='same')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Dropout(0.2)(x)
x = Flatten()(x)
# FC
x = Dense(4320, activation='relu')(x)
x = Dropout(0.5)(x)
x = Reshape((6, 6, 6, 20))(x)
# Decoder
x = Dropout(0.2)(x)
x = UpSampling3D(size=(2, 2, 2))(x)
x = Dropout(0.2)(x)
x = Conv3DTranspose(256, 3, activation='relu', padding='valid')(x)
x = Dropout(0.2)(x)
x = UpSampling3D(size=(2, 2, 2))(x)
x = Conv3DTranspose(128, 3, activation='relu', padding='valid')(x)
x = Dropout(0.2)(x)
x = UpSampling3D(size=(2, 2, 2))(x)
x = Conv3DTranspose(64, 3, activation='relu', padding='valid')(x)
x = Dropout(0.2)(x)
x = Conv3DTranspose(1, 3, activation='sigmoid', padding='valid')(x)
outputs = Reshape((64, 64, 64))(x)
dnn = Model(inputs=inputs, outputs=outputs)
return dnn
def main():
# x_train, y_train = preprocess.load_data()
# x_train, y_train = unison_shuffled_copies(x_train, y_train)
dnn = make_dnn()
# dnn.compile(optimizer='Adam', loss=weighted_bce)
dnn.summary()
# history = dnn.fit(x_train, y_train, batch_size=None, epochs=1, validation_split=0.1)
if __name__ == '__main__':
main() |
import pygame
from pygame.locals import *
# Colors
COLOR_BLACK = pygame.Color( 0, 0, 0)
COLOR_WHITE = pygame.Color(255, 255, 255)
COLOR_RED = pygame.Color(255, 0, 0)
COLOR_DARKGRAY = pygame.Color( 50, 50, 50)
# Game logic
(PLAYER_LEFT, PLAYER_RIGHT) = (1, 2)
(DIR_UP, DIR_DOWN, DIR_NONE) = (-1, 1, 0)
(KEY_LEFT_UP, KEY_LEFT_DOWN, KEY_RIGHT_UP, KEY_RIGHT_DOWN) = (K_a, K_y, K_k, K_m)
|
from math import floor, log, pow
def generador_de_primos(limite_alto):
primos = []
esPrimo = False
j = 0
primos.append(2)
for i in range(3, limite_alto, 2):
j = 0
esPrimo = True
while primos[j] * primos[j] <= i:
if i % primos[j] == 0:
esPrimo = False
break
j += 1
if esPrimo:
primos.append(i)
return primos
divisorMaximo = 20
p = generador_de_primos(divisorMaximo)
print p
resultado = 1
for i in range(0, len(p)):
a = int(floor(log(divisorMaximo) / log(p[i])))
print a
resultado = resultado * int(pow(p[i], a))
print resultado
print "El numero mas pequeño que puede ser dividido entre el"\
"rango 1-20 es: %s" % (resultado)
|
import tkinter as tk
import webbrowser
def callback(url):
webbrowser.open_new(url)
root = tk.Tk()
# specify size of window.
root.geometry("600x300")
subject_grades = [36,67,80]
subject = ["Mathematics","Science","Social Science"]
#finding the minimun grades
index = subject_grades.index(min(subject_grades))
# Create text widget and specify size.
T = tk.Text(root, height = 30, width = 69)
# Create label
l = tk.Label(root, text = "Feedback")
l.config(font =("Courier", 29))
Fact = "Grades were not upto to the mark for "+ subject[index] +". Here are the Resources to progress further."
# Create button for next text.
link1 = tk.Label(root, text="Link", fg="blue", cursor="hand2",)
link1.bind("<Button-1>", lambda e: callback("http://www.algebra4children.com/eigth-grade-math-worksheets.html"))
l.pack()
link1.pack()
T.pack()
# Insert The Fact.
T.insert(tk.END, Fact)
tk.mainloop() |
from openmdao.utils.assert_utils import assert_check_partials
import numpy as np
import pytest
def test_reshape_tensor2vector():
import omtools.examples.valid.ex_reshape_tensor2_vector as example
i = 2
j = 3
k = 4
l = 5
shape = (i, j, k, l)
tensor = np.arange(np.prod(shape)).reshape(shape)
vector = np.arange(np.prod(shape))
# TENSOR TO VECTOR
desired_output = vector
np.testing.assert_almost_equal(example.prob['reshape_tensor2vector'],
desired_output)
partials_error = example.prob.check_partials(
includes=['comp_reshape_tensor2vector'],
out_stream=None,
compact_print=True)
assert_check_partials(partials_error, atol=1.e-6, rtol=1.e-6)
def test_reshape_vector2tensor():
import omtools.examples.valid.ex_reshape_vector2_tensor as example
i = 2
j = 3
k = 4
l = 5
shape = (i, j, k, l)
tensor = np.arange(np.prod(shape)).reshape(shape)
vector = np.arange(np.prod(shape))
# VECTOR TO TENSOR
desired_output = tensor
np.testing.assert_almost_equal(example.prob['reshape_vector2tensor'],
desired_output)
partials_error = example.prob.check_partials(
includes=['comp_reshape_vector2tensor'],
out_stream=None,
compact_print=True)
assert_check_partials(partials_error, atol=1.e-5, rtol=1.e-5)
|
from . import visitors
import numbers
class node(object):
def __init__(self, v = None):
'''
If v is a node, then this copies the tree rooted at v. This will
copy expr_type attributes as well.
Otherwise this constructs a new node with value v and no children.
node.value should be some subclass of GeneralOperator,
a subclass of Var, or an int/float/complex number.
By default, n.expr_type is None. To assign the types for n and all
children you must explicitly call n.assign_types().
'''
# copy constructor
if isinstance(v, node):
self.value = v.value
self.expr_type = v.expr_type
self.children = [node(i) for i in v.children]
# ordinary constructor
else:
self.value = v
self.children = []
self.expr_type = None
def accept(self, visitor):
return visitor.visit(self)
def assign_types(self):
self.accept(visitors.Recognizer(assign_types = True))
def copy(self, value = None, recursive = False):
'''
This allows us to use node instances to create new nodes
without actually importing the node module elsewhere.
This avoids a circular import between node.py and visitors.py.
If value is not None, then recursive is assumed to be False.
The result will have no children.
Otherwise, if recursive is True then all children will be copied.
If recursive is False, then the result will have no children.
If n is a node, and X is anything:
n.copy()
<==> node(n.value)
n.copy(recursive = True)
<==> node(n)
n.copy(value = X)
<==> node(X)
'''
if value != None:
return node(value)
elif not recursive:
return node(self.value)
return node(self)
def construct(self, left_child, value, right_child = None, assign_types = False):
'''
Return a new node with the given value, and
left_child and right_child as children.
If right_child is None, then the result will have one child.
Otherwise, the result will have two children.
This does no other checks on the input, except for those done
by the node constructor when node(value) is called.
'''
result = node(value)
result.children.append(left_child)
if right_child != None:
result.children.append(right_child)
if assign_types:
result.assign_types()
return result
def strict_match(self, other):
if str(self) != str(other):
return False
return True
def __repr__(self):
''' Return a string representation of this tree in RPN. '''
# This is is used for evaluating test results.
# Be careful if you want to change it.
return self.accept(visitors.Printer(mode = visitors.Printer.POSTFIX_MODE))
def __str__(self, mode = visitors.Printer.INFIX_MODE):
'''
Produce a string representation of this tree's structure.
mode is a string that corresponds to a static member of visitors.Printer:
mode == "prefix" produces an unparenthesized prefix representation
mode == "postfix" produces an unparenthesized postfix representation
mode == "infix" produces a fully parenthesized infix representation
mode == "tree" produces a multiline tree-like representation, with
children indented according to their depth.
'''
return self.accept(visitors.Printer(mode = mode))
def reduce(self, replace_constants = False):
'''
Computes and returns the value of this node.
This does not alter this instance at all.
replace_constants = True will replace Constant objects with an
approximate numeric values, e.g., E() becomes 2.7182818...
replace_constants = False will leave E() in the tree.
'''
return self.accept(visitors.Reducer(replace_constants = replace_constants))
def replace(self, symbol, expr):
''' Replace all occurrences of symbol with expr. '''
return self.accept(visitors.Replacer(symbol, expr))
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-04-19 11:36
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ExamInfoModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('subject', models.CharField(max_length=30, verbose_name='科目名称')),
('exam_time', models.DateTimeField(verbose_name='考试开始时间')),
('exam_time_end', models.DateTimeField(default=datetime.datetime(2016, 4, 19, 11, 36, 29, 295564, tzinfo=utc), verbose_name='考试结束时间')),
('register_deadline', models.DateTimeField(default=django.utils.timezone.now, verbose_name='报名截至时间')),
('fee', models.IntegerField(verbose_name='考试费用')),
('notes', models.CharField(default='', max_length=512, verbose_name='备注')),
],
),
migrations.AlterUniqueTogether(
name='examinfomodel',
unique_together=set([('subject', 'exam_time')]),
),
]
|
#!/usr/bin/python3
"""
Gather data from an API
"""
import requests
from sys import argv
if __name__ == '__main__':
user_id = argv[1]
base_url = 'https://jsonplaceholder.typicode.com'
user = requests.get('{}/users/{}'.format(base_url, user_id))
task = requests.get('{}/todos?userId={}'.format(base_url, user_id))
user_out = user.json()
task_out = task.json()
all_tasks = len(task_out)
done_tasks = len([x for x in task_out if x.get('completed') is True])
print('Employee {} is done with tasks({}/{}):'
.format(user_out.get('name'), done_tasks, all_tasks))
for task in task_out:
if task.get('completed') is True:
print('\t {}'.format(task.get('title')))
|
from nvdomain.constants import LOCATIONS
from util.spelling import OldToModernSpellingPair
def extract_location(text):
matching_locations = [OldToModernSpellingPair(old, modern) for old, modern in sorted(LOCATIONS.items(), reverse=True) if old in text]
if not matching_locations:
return ""
if len(matching_locations) > 1:
return get_last_matching_location(matching_locations, text)
return matching_locations[0]
def get_last_matching_location(matching_locations, text):
highest_index = 0
winning_location = ""
for location in matching_locations:
try:
index = text.index(location.original)
except:
print(location.original)
print(text)
exit()
if index > highest_index:
highest_index = index
winning_location = location
return winning_location
|
def cevre_bul(uk,kk):
cevre=2*(int(uk)+int(kk))
return cevre
def alan_bul(uk,kk):
alan= int(uk)*int(kk)
return alan
uk=input("uzun kenar: ")
while not uk.isdigit():
print("bir sayı değeri giriniz")
uk=input("uzun kenar: ")
kk=input("kısa kenar: ")
while not kk.isdigit():
print("bir sayı değeri giriniz")
kk=input("kısa kenar: ")
while kk>uk:
print("uzun kenar, kısa kenardan büyük olmalıdır...")
uk=input("uzun kenar: ")
while not uk.isdigit():
print("bir sayı değeri giriniz")
uk=input("uzun kenar: ")
kk=input("kısa kenar: ")
while not kk.isdigit():
print("bir sayı değeri giriniz")
kk=input("kısa kenar: ")
cevre= cevre_bul(uk, kk)
alan= alan_bul(uk, kk)
print("alan: {0}\nçevre: {1}".format(alan,cevre)) |
#Aisha Siddiq 81047072 Project 4
import OthelloGame
def ROW_NUM():
'''Prompts the user for the row number
'''
while True:
try:
row = int(input())
if (row >= 4 and row <=16) and row % 2 == 0:
return row
else:
print('Please enter an even integer from 4-16')
except ValueError:
print('Please print an integer to specify the row number')
def COLUMN_NUM():
'''Prompts the user for the column number
'''
while True:
try:
column = int(input())
if (column >= 4 and column <=16) and column % 2 == 0:
return column
else:
print('Please enter an even integer from 4-16')
except ValueError:
print('Please print an integer to specify the column number')
def first_move():
'''Asks the user to specify who should be the first player
'''
while True:
move_first = input()
if move_first.upper() == 'B' or move_first.upper() == 'W':
return move_first.upper()
else:
print('Please enter "B" or "W" for the beginning player')
def start_piece_position():
'''Asks the player who will be the top left player and the bottom left player
'''
while True:
corner_position = input()
if corner_position.upper() == 'B':
top_piece = corner_position
bottom_piece = 'W'
elif corner_position.upper() == 'W':
top_piece = corner_position
bottom_piece = 'B'
else:
print('Please enter "B" or "W" to specify to top left player')
continue
return (top_piece.upper(), bottom_piece.upper())
def row_column_prompt():
'''Prompts the user for the row and column (specifies that the first 2
values should be integers)'''
while True:
try:
row_column = input()
split_row_column = row_column.split()
first_number = int(split_row_column[0])-1
second_number = int(split_row_column[1])-1
return (first_number, second_number)
except:
print('INVALID')
def insert_row_column(row, column):
'''Prompts the user to input a row and column number
'''
while True:
first_number, second_number = row_column_prompt()
if (0 <= first_number < row) and (0 <= second_number < column):
return (first_number, second_number)
else:
print('INVALID')
def print_board(game_state):
'''Prints the board of the game
'''
for table in game_state.game_board:
for board in table:
print(board, end = ' ')
print()
def print_turn(game_state):
'''Prints whose turn it is
'''
print('TURN:', game_state._turn)
def print_score(game_state):
'''Prints the current score of the game
'''
print('{}: {} {}: {}'.format(game_state.Black, game_state.total_black,
game_state.White, game_state.total_white))
def play_Othello():
'''Implements the game in a single function
'''
row = ROW_NUM()
column = COLUMN_NUM()
who_plays_first = first_move()
top_piece, bottom_piece = start_piece_position()
game_state = OthelloGame.OthelloGame(row, column, who_plays_first, top_piece, bottom_piece)
game_state.create_board()
while True:
winning_player = input()
if winning_player != '>' and winning_player != '<':
print('Please enter ">" or "<" to specify the winning method')
continue
break
game_state.player_score()
print_score(game_state)
print_board(game_state)
print_turn(game_state)
while True:
(row_number, column_number) = insert_row_column(row, column)
try:
game_state.make_move(row_number, column_number)
if game_state.winning_player():
break
print('VALID')
print_score(game_state)
print_board(game_state)
print_turn(game_state)
except:
print('INVALID')
if winning_player == '>':
winner = game_state.winner_most_points()
elif winning_player == '<':
winner = game_state.winner_least_points()
print_score(game_state)
print_board(game_state)
print('WINNER:', winner)
if __name__ == '__main__':
play_Othello()
|
## From the Corey Schafer YouTube video: Python Tutorial: Automate Parsing and Renaming of Multiple Files
## Break down a file name into variables, then rearange them and rename the file.
## original test files were in this format: 20190712_IMG073803545.jpg
import os
os.chdir('/home/chuck/Python/TestFiles')
for f in os.listdir():
fName, fExt = os.path.splitext(f) # separate file name from extension
fDate, fNum = fName.split('_') # separate date from image number
fNum = fNum.strip()[3:] # remove 'IMG' from the number
year = fDate[:4] # break date into parts
month = fDate[4:6]
day = fDate[6:]
newFileName = '{}_{}_{}_{}{}'.format(month, day, year, fNum, fExt) # reorder variables using {place holders}
print(newFileName)
## os.rename(f, newFileName)
|
def encode(str): return ende(str)
def decode(str): return ende(str)
def ende(str):
code = ["gaderypolukiGADERYPOLUKI","agedyropulikAGEDYROPULIK"]
str = list(str)
for i in range(0,len(str)):
if str[i] in code[0]:
str[i] = code[1][list(code[0]).index(str[i])]
return "".join(str) |
from __future__ import absolute_import
__version__ = '2.3.3'
__commit__ = '1294ac9126cee3a9ae316764f63cb46fe5666ccd'
from sbol.sbol import *
import sbol.unit_tests
|
import os
# Color Palette - RGB : Integer dictionary
# data_Serialize.py
palette = {(1,64,128):0,
(3,143,255):1,
(2,255,128):2,
(255,140,0):3,
(0,0,0):4}
# Paths
# data_serialize.py
img_path = './dataset/training/images/'
# data_serialize.py
label_path = './dataset/training/labels/'
# data_serialize.py
img_val_path = './dataset/validation/images/'
# data_serialize.py
label_val_path = './dataset/validation/labels/'
# data_serialize.py, training.py
hdf5_path ='./hdf5_container/serialized_data.hdf5'
# data_serialize.py, training.py
hdf5_val_path ='./hdf5_container/validation_data.hdf5'
# data_serialize.py
dataset_mean = './hdf5_container/mean_values.json'
# Image Parameters
# data_serialize.py
image_shape = (512, 288) # augmentation takes info directly from image
# data_serialize.py
width = image_shape[0]
# data_serialize.py
height = image_shape[1]
# number of unique classes depends on how many dictionary keys palette has
# data_loader.py
num_classes = len(palette) # seg_aug in augmentation.py uses: nb_classes=np.max(seg)+1
# Serialization
# data_serialize.py
bufferSize = None
batchSize=32
# Training Parameters
validation_split = 0.1
dataset_length = len(os.listdir(img_path))
# data_serialize.py
val_size = int( dataset_length * validation_split )
# augmentation.py
seed_value = 1
# training.py
batch_size = 2
# training.py
val_batch_size = 2
# training.py
val_steps = int(val_size // val_batch_size)
steps_per_epoch = int( dataset_length // batch_size )
# training.py
epochs = 150
# training.py
one_hot=True
# training.py
preprocessors=None # give a list in the training.py to change it
# training.py
do_augment=True
# training.py
gpu_count = 1
# training.py
show_summary=True
|
import math
def solution(n,a,b):
x, y = min(a,b), max(a,b)
if x % 2 == 1 and y - x == 1 : return 1
cnt = 1
while(True) :
x = math.ceil(x / 2)
y = math.ceil(y / 2)
cnt += 1
if x % 2 == 1 and y - x == 1 : return cnt
return cnt
print(solution(8,4,7)) # 3 |
import tkinter as tk
from tkinter import messagebox, Button
from nonogram import Nonogram
from image import Img
import numpy as np
# ##### DEFINE GRID HERE ###### #
ROWS = 10
COLS = 10
# Visual size of grid box
GRID_SIZE = 40
# Initialize
nonogram = Nonogram()
img = Img()
tiles = [[0 for _ in range(COLS)] for _ in range(ROWS)]
def create_grid(event=None):
w = grid.winfo_width() # Get current width of canvas
h = grid.winfo_height() # Get current height of canvas
grid.delete('grid_line') # Will only remove the grid_line
# Creates all vertical lines at intevals of 100
for i in range(0, w, GRID_SIZE):
grid.create_line([(i, 0), (i, h)], tag='grid_line')
# Creates all horizontal lines at intevals of 100
for i in range(0, h, GRID_SIZE):
grid.create_line([(0, i), (w, i)], tag='grid_line')
def callback_grid(event):
# Get rectangle diameters
col_width = int(grid.winfo_width()/COLS)
row_height = int(grid.winfo_height()/ROWS)
# Calculate column and row number
col = event.x//col_width
row = event.y//row_height
# If the tile is not filled, create a rectangle
if not tiles[row][col]:
tiles[row][col] = grid.create_rectangle(col*col_width, row*row_height, (col+1)*col_width, (row+1)*row_height, fill="black")
else:
grid.delete(tiles[row][col])
tiles[row][col] = None
def callback_generate():
# Generate nonogram and destroy window.
nonogram_definition = nonogram.generate_nonogram_from_matrix(np.array(tiles))
# if nonogram.solve(nonogram_definition): TODO: Implement solver
img.draw_nonogram(nonogram_definition)
messagebox.showinfo("Completed", "Succes! \nYour Nonogram puzzle is saved as Nonogram.bpm in this directory.")
root.destroy()
if __name__ == "__main__":
root = tk.Tk()
grid = tk.Canvas(root, width=COLS * GRID_SIZE, height=ROWS * GRID_SIZE, background='white')
button = Button(root, text="Generate", command=callback_generate)
grid.pack()
button.pack()
grid.bind('<Configure>', create_grid)
grid.bind("<Button-1>", callback_grid)
root.mainloop()
|
import datetime
import pandas as pd
# 日付から曜日を判定
def get_weekday(date):
weekday = ["Mon","Tue","Wed","Thu","Fri","Sat","Sun"]
# d = datetime.datetime.strptime(date, "%Y-%m-%d %H:%M:%S")
return weekday[date.weekday()]
# weekend_exclude = trueで週末を加えない
def make_weekday_array(start, end, weekend_exclude):
start_temp_for = start + datetime.timedelta(days = 1)
start_temp_for = datetime.datetime.strftime(start_temp_for,'%Y-%m-%d 00:00:00')
start_temp_for = datetime.datetime.strptime(start_temp_for,'%Y-%m-%d %H:%M:%S')
start_temp = datetime.datetime.strftime(start,'%Y-%m-%d 23:59:00')
start_temp = datetime.datetime.strptime(start_temp,'%Y-%m-%d %H:%M:%S')
end_temp_for = datetime.datetime.strftime(end,'%Y-%m-%d 00:00:00')
end_temp_for = datetime.datetime.strptime(end_temp_for, '%Y-%m-%d %H:%M:%S')
weekday_array_start = []
weekday_array_end = []
if weekend_exclude == True:
if get_weekday(start) != 'Sat' or get_weekday(start) != 'Sun':
weekday_array_start.append(start)
weekday_array_end.append(start_temp)
while(1) :
if get_weekday(start_temp_for) == 'Fri':
weekday_array_start.append(start_temp_for)
weekday_array_end.append(start_temp_for + datetime.timedelta(days = 1) - datetime.timedelta(minutes= 1))
elif get_weekday(start_temp_for) == 'Sat' or get_weekday(start_temp_for) == 'Sun':
pass
else :
weekday_array_start.append(start_temp_for)
weekday_array_end.append(start_temp_for + datetime.timedelta(days = 1) - datetime.timedelta(minutes= 1))
if start_temp_for == end_temp_for :
break
start_temp_for = start_temp_for + datetime.timedelta(days = 1)
if (get_weekday(end) == 'Sat') or (get_weekday(end) == 'Sun'):
pass
else:
weekday_array_start.append(end_temp_for)
weekday_array_end.append(end)
else :
weekday_array_start.append(start)
weekday_array_end.append(start_temp)
while(1) :
weekday_array_start.append(start_temp_for)
weekday_array_end.append(start_temp_for + datetime.timedelta(days = 1) - datetime.timedelta(minutes= 1))
if start_temp_for == end_temp_for :
break
start_temp_for = start_temp_for + datetime.timedelta(days = 1)
if end != end_temp_for:
weekday_array_start.append(end_temp_for)
weekday_array_end.append(end)
return pd.DataFrame({'Start' : weekday_array_start,'End' : weekday_array_end})
if __name__ == "__main__":
make_weekday_array(datetime.datetime(2019,10,31,3,45),datetime.datetime(2019,11,15,8,1,0),null) |
from datetime import datetime
import serial.tools.list_ports
import sys
import psutil
import serial
import time
import math
import platform
from gpu.GpuDevice import GpuDevice
if platform.system() == 'Windows':
import wmi
class PCMetric:
def __init__(self, port, config) -> None:
self._time_format = "%H:%M %d/%m/%Y"
self._timeout_readings = config.timeout_readings
self._amd_names = config.amd_names
self._nvidia_names = config.nvidia_names
self._intel_names = config.intel_names
self.port = port
self.mem_stats = self.cpu_stats_total = self.trimmed_stats = None
self.cpu_count_real = psutil.cpu_count(False)
self.cpu_count = psutil.cpu_count()
self.connection = None
self.is_amd_card = self.is_nvidia_card = self.is_intel_card = False
self.startup_time = time.strftime(self._time_format, time.localtime(psutil.boot_time()))
self.current_time = self.uptime = self.day = None
self.sensors = self.cpu_fan = None
self.gpu_devices: [GpuDevice] = []
self.video_controllers = []
self.video_controllers_count = 0
@staticmethod
def convert_size(size_bytes, with_prefix=True):
if size_bytes == 0:
if with_prefix:
return "0B"
else:
return 0
size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
i = int(math.floor(math.log(size_bytes, 1024)))
p = math.pow(1024, i)
s = round(size_bytes / p, 1)
if with_prefix:
return "%s%s" % (s, size_name[i])
else:
return str(s)
def get_cpu_mem_stats(self):
self.trimmed_stats = ""
cpu_stats = psutil.cpu_percent(interval=self._timeout_readings, percpu=True)
for stat in cpu_stats:
self.trimmed_stats += str(int(stat)) + ","
average = int(sum(cpu_stats) / float(len(cpu_stats)))
self.trimmed_stats += str(average) + "%"
mem_stats = psutil.virtual_memory()
self.mem_stats = PCMetric.convert_size(mem_stats.total) + "," \
+ PCMetric.convert_size(mem_stats.used, with_prefix=False) + "," \
+ PCMetric.convert_size(mem_stats.free) + "," \
+ str(int(mem_stats.percent)) + "%"
def get_os_specific_stats(self):
if platform.system() == 'Windows':
w = wmi.WMI()
# for el in w.Win32_TemperatureProbe():
# print(el)
self.video_controllers = w.Win32_VideoController()
self.video_controllers_count = len(self.video_controllers)
#
# for el in w.Win32_Processor():
# print(el)
#
# for el in w.CIM_TemperatureSensor():
# print(el)
# for el in w.Win32_Fan():
# print(el)
for el in w.CIM_PCVideoController():
if any(el.AdapterCompatibility in s for s in self._amd_names):
self.is_amd_card = True
if any(el.AdapterCompatibility in s for s in self._nvidia_names):
self.is_nvidia_card = True
if any(el.AdapterCompatibility in s for s in self._intel_names):
self.is_intel_card = True
w2 = wmi.WMI(namespace="root\\wmi")
try:
temperature_info = w2.MSAcpi_ThermalZoneTemperature()
print(temperature_info.CurrentTemperature)
except wmi.x_wmi as wmi_e:
print("Sorry, cannot handle wmi on this machine")
print(wmi_e.com_error)
except Exception:
print(sys.exc_info())
else:
# nix systems only
self.sensors = psutil.sensors_temperatures()
self.cpu_fan = psutil.sensors_fans()
pass
def get_gpu_stats(self):
self.gpu_devices.clear()
if self.is_amd_card:
from gpu import AmdVideoCard
self.gpu_devices.extend(AmdVideoCard.AmdVideoCard.get_stats())
if self.is_nvidia_card:
# TODO.md handle this
pass
if self.is_intel_card:
# TODO.md handle this
pass
def connect(self):
try:
self.connection = serial.Serial(self.port, 115200, timeout=.1)
time.sleep(1)
return True
except serial.SerialException as e:
print('Cannot connect to device %s %s', self.port, e)
def send_via_serial(self, type_name, data, set_type_name_ending=True):
self.connection: serial.Serial
self.connection.write(type_name.encode())
if type(data) is int:
self.connection.write(bytes([data]))
elif type(data) is str:
if set_type_name_ending:
data += type_name + "_end"
self.connection.write(data.encode())
else:
self.connection.write(data)
def get_time(self):
self.current_time = time.strftime(self._time_format, time.localtime(time.time()))
self.uptime = str(datetime.strptime(self.current_time, self._time_format) \
- datetime.strptime(self.startup_time, self._time_format))[:-3]
self.day = str(time.strftime('%a', time.localtime(time.time())))
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import requests
import urllib
import hashlib
from bs4 import BeautifulSoup
import json
import os
import time
import datetime
# from mpl_toolkits import basemap
# myak='DhzUPOCzgD3zrfRChfombTGZh59v9fGG'
# url = 'http://api.map.baidu.com/geocoder/v2/?address={}&output=json&ak=DhzUPOCzgD3zrfRChfombTGZh59v9fGG'
# s=requests.get(url.format('北京'))
# print(s.json())
n=datetime.datetime.now()
s=os.stat(r'D:\学习\GitHub\china_wea\wea.txt').st_mtime
print(datetime.timedelta(n,s)) |
# coding: utf-8
import numpy as np
import pandas as pd
def Get_Real_coordinates_Dataframe(Data_frame):
Data_frame_N=Data_frame.copy()
for i in range(0,Data_frame.shape[0]):
Data_frame_N.X[i]=Data_frame.X[i]+60
Data_frame_N.Y[i]=Data_frame.Y[i]+54
return(Data_frame_N)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
CS224N 2018-19: Homework 5
"""
### YOUR CODE HERE for part 1i
import torch
import torch.nn as nn
import torch.nn.utils
import torch.nn.functional as F
class CNN(nn.Module):
def __init__(self, reshape_size, embed_size, max_word_length, kernel_size=5):
super(CNN, self).__init__()
self.convolution = nn.Conv1d(in_channels=reshape_size, out_channels=embed_size, kernel_size=kernel_size, bias=True)
self.max_pool = nn.MaxPool1d(kernel_size = max_word_length-kernel_size+1)
def forward(self, x):
x = F.relu(self.convolution(x))
x = self.max_pool(x).squeeze()
return x
### END YOUR CODE
|
# @Date: 2019-05-13
# @Email: thc@envs.au.dk Tzu-Hsin Karen Chen
# @Last modified time: 2020-10-07
import sys
#sys.path.insert(0, '/work/qiu/data4Keran/code/modelPredict')
sys.path.insert(0, '/home/xx02tmp/code3/modelPredict')
from img2mapC05 import img2mapC
import numpy as np
import time
sys.path.insert(0, '/home/xx02tmp/code3/dataPrepare')
import basic4dataPre
import h5py
import os
import glob2
import scipy.io as sio
from scipy import stats
import scipy.ndimage
import numpy.matlib
from numpy import argmax
from keras.utils import to_categorical
import skimage.measure
#image folder
imgFile_s2='/home/xx02tmp/image/to run49/'
#gt file folder
foldRef_LCZ=imgFile_s2
#class number
num_lcz=3
#stride to cut patches
step=24
patch_shape = (48, 48, 6)
#new line
img_shape = (48, 48)
#save folder
foldS='/home/xx02tmp/patch/patch50_11_02_48/'
params = {'dim_x': patch_shape[0],
'dim_y': patch_shape[1],
'dim_z': patch_shape[2],
'step': step,
'Bands': [0,1,2,3,4,5],
'scale':1.0,
'ratio':1,
'isSeg':0,
'nanValu':0,
'dim_x_img': img_shape[0],#the actuall extracted image patch
'dim_y_img': img_shape[1]}
#name of images
cities = ['summerrs2014_segA150sd']
#names of gt files
cities_ = ['class14_segA5530vp02n1_tra']
citiesval = ['summerrs2014_segA150sd']
cities_val = ['class14_segA5530vp02n1_val']
#tra and vali patch numbers of each images
patchNum = np.zeros((2,len(cities)), dtype= np.int64) ;
#class number of each class
classNum = np.zeros((len(cities),3), dtype= np.int64) ; #change here
if not os.path.exists(foldS+'vali/'):
os.makedirs(foldS+'vali/')
if not os.path.exists(foldS+'trai/'):
os.makedirs(foldS+'trai/')
###########training patch#################
for idCity in np.arange(len(cities)):
params['Bands'] = [0]
params['scale'] = 1
img2mapCLass=img2mapC(**params);
###lcz to patches
#load file
prj0, trans0, ref0= img2mapCLass.loadImgMat(foldRef_LCZ+cities_[idCity]+'.tif')
print('ref0 size', ref0.shape)
ref = np.int8(ref0)
#print('lcz file size', ref.shape, trans0, ref.dtype)
# to patches
patchLCZ, R, C = img2mapCLass.label2patches_all(ref, 1)
print('lcz patches, beginning', patchLCZ.shape, patchLCZ.dtype)
#load img
file =imgFile_s2 + cities[idCity] + '.tif'
params['Bands'] = [0,1,2,3,4,5]
params['scale'] = 1.0#!!!!!!!!!!!!!!!!!!!
img2mapCLass=img2mapC(**params);
prj0, trans0, img_= img2mapCLass.loadImgMat(file)
print('img size', img_.shape)
#image to patches
patch_summer, R, C, idxNan = img2mapCLass.Bands2patches(img_, 1)
print('image patches', patch_summer.shape, patch_summer.dtype)
#try not delete idxNan (by Karen)
print('lcz patches, before delete idxNan', patchLCZ.shape, patchLCZ.dtype)
patchLCZ = np.delete(patchLCZ, idxNan, axis=0)
print('lcz patches, after delete idxNan', patchLCZ.shape, patchLCZ.dtype)
############manupulate the patches############
#delete patches without lcz
#change here, try 0.5
c3Idx=basic4dataPre.patch2labelInx_lt(patchLCZ, 0, patchLCZ.shape[1], patchLCZ.shape[2]*patchLCZ.shape[1]*0.044*1)
patchLCZ = np.delete(patchLCZ, c3Idx, axis=0)
print('lcz patches, after delete noLCZ', patchLCZ.shape, patchLCZ.dtype)
patch_summer = np.delete(patch_summer, c3Idx, axis=0)
print('image patches, after delete noLCZ', patch_summer.shape, patch_summer.dtype)
#print('delete no lcz patch: ', patchHSE.shape, patch_summer.shape, patchLCZ.shape)
#NOT downsample to have a 90m gt
#keep original 90m because of the new inputs of label has resoluiton at 90m
#patchLCZ=skimage.measure.block_reduce(patchLCZ, (1,3,3,1), np.mean)
patchLCZ=skimage.measure.block_reduce(patchLCZ, (1,1,1,1), np.mean)
print('downsampled patchHSE:', patchLCZ.shape)
###statistic of class number
tmp=patchLCZ.reshape((-1,1))
for c in np.arange(1,4): #change here class=1, 2, 3,4
idx_=np.where(tmp==c)
idx = idx_[0]
classNum[idCity, c-1]=idx.shape[0]
#reset the labels
patchLCZ=patchLCZ-1; #0123, -1012
#print('print(np.unique(patchHSE))',np.unique(patchLCZ))
patchLCZ[patchLCZ==-1 ] = 3 #change here the low density class (0123)
#patchLCZ=basic4dataPre.patchIndex2oneHot(patchLCZ, num_lcz)
#print('final LCZ:', patchLCZ.shape, np.unique(patchLCZ))
print('print(np.unique(patchLCZ))',np.unique(patchLCZ))
print('shape', patchLCZ.shape, patch_summer.shape)
patchNum_tra =basic4dataPre.savePatch_fold_single(patch_summer, patchLCZ, foldS+'trai/', cities[idCity])
patchNum[0,idCity]=patchNum_tra
#patchNum[1,idCity]=patchNum_val
print(patchNum, classNum)
##############validation patch##############
print('start validation patch')
for idCity in np.arange(len(citiesval)):
params['Bands'] = [0]
params['scale'] = 1
img2mapCLass=img2mapC(**params);
###lcz to patches
#load file
prj0, trans0, ref0= img2mapCLass.loadImgMat(foldRef_LCZ+cities_val[idCity]+'.tif')
print('ref0 size', ref0.shape)
ref = np.int8(ref0)
#print('lcz file size', ref.shape, trans0, ref.dtype)
# to patches
patchLCZ, R, C = img2mapCLass.label2patches_all(ref, 1)
print('lcz patches, beginning', patchLCZ.shape, patchLCZ.dtype)
#load img
file =imgFile_s2 + citiesval[idCity] + '.tif'
params['Bands'] = [0,1,2,3,4,5]
params['scale'] = 1.0#!!!!!!!!!!!!!!!!!!!
img2mapCLass=img2mapC(**params);
prj0, trans0, img_= img2mapCLass.loadImgMat(file)
print('img size', img_.shape)
#image to patches
patch_summer, R, C, idxNan = img2mapCLass.Bands2patches(img_, 1)
print('image patches', patch_summer.shape, patch_summer.dtype)
#try not delete idxNan (by Karen)
print('lcz patches, before delete idxNan', patchLCZ.shape, patchLCZ.dtype)
patchLCZ = np.delete(patchLCZ, idxNan, axis=0)
print('lcz patches, after delete idxNan', patchLCZ.shape, patchLCZ.dtype)
############manupulate the patches############
#delete patches without lcz
#change here
c3Idx=basic4dataPre.patch2labelInx_lt(patchLCZ, 0, patchLCZ.shape[1], patchLCZ.shape[2]*patchLCZ.shape[1]*0.044*1)
patchLCZ = np.delete(patchLCZ, c3Idx, axis=0)
print('lcz patches, after delete noLCZ', patchLCZ.shape, patchLCZ.dtype)
patch_summer = np.delete(patch_summer, c3Idx, axis=0)
print('image patches, after delete noLCZ', patch_summer.shape, patch_summer.dtype)
#print('delete no lcz patch: ', patchHSE.shape, patch_summer.shape, patchLCZ.shape)
#NOT downsample to have a 90m gt
#keep original 90m because of the new inputs of label has resoluiton at 90m
#patchLCZ=skimage.measure.block_reduce(patchLCZ, (1,3,3,1), np.mean)
patchLCZ=skimage.measure.block_reduce(patchLCZ, (1,1,1,1), np.mean)
print('downsampled patchHSE:', patchLCZ.shape)
###statistic of class number
tmp=patchLCZ.reshape((-1,1))
for c in np.arange(1,4): #change here
idx_=np.where(tmp==c)
idx = idx_[0]
#classNum[idCity, c-1]=idx.shape[0]
#reset the labels
patchLCZ=patchLCZ-1;
#print('print(np.unique(patchHSE))',np.unique(patchLCZ))
patchLCZ[patchLCZ==-1 ] = 3 #change here
#patchLCZ=basic4dataPre.patchIndex2oneHot(patchLCZ, num_lcz)
#print('final LCZ:', patchLCZ.shape, np.unique(patchLCZ))
print('print(np.unique(patchLCZ))',np.unique(patchLCZ))
print('shape', patchLCZ.shape, patch_summer.shape)
patchNum_val =basic4dataPre.savePatch_fold_singlev(patch_summer, patchLCZ, foldS+'vali/', cities[idCity])
#patchNum[0,idCity]=patchNum_tra
patchNum[1,idCity]=patchNum_val
print(patchNum, classNum)
sio.savemat((foldS +'patchNum.mat'), {'patchNum': patchNum, 'classNum':classNum})
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Input ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
def parse_sequence_example(serialized, image_feature, caption_feature):
"""Parses a tensorflow.SequenceExample into an image and caption.
Args:
serialized: A scalar string Tensor; a single serialized SequenceExample.
image_feature: Name of SequenceExample context feature containing image
data.
caption_feature: Name of SequenceExample feature list containing integer
captions.
Returns:
encoded_image: A scalar string Tensor containing a JPEG encoded image.
caption: A 1-D uint64 Tensor with dynamically specified length.
"""
context, sequence = tf.parse_single_sequence_example(
serialized,
context_features={
image_feature: tf.FixedLenFeature([], dtype=tf.string)
},
sequence_features={
caption_feature: tf.FixedLenSequenceFeature([], dtype=tf.int64),
})
encoded_image = context[image_feature]
caption = sequence[caption_feature]
return encoded_image, caption
def prefetch_input_data(reader,
file_pattern,
is_training,
batch_size,
values_per_shard,
input_queue_capacity_factor=16,
num_reader_threads=1,
shard_queue_name="filename_queue",
value_queue_name="input_queue"):
"""Prefetches string values from disk into an input queue.
In training the capacity of the queue is important because a larger queue
means better mixing of training examples between shards. The minimum number of
values kept in the queue is values_per_shard * input_queue_capacity_factor,
where input_queue_memory factor should be chosen to trade-off better mixing
with memory usage.
Args:
reader: Instance of tf.ReaderBase.
file_pattern: Comma-separated list of file patterns (e.g.
/tmp/train_data-?????-of-00100).
is_training: Boolean; whether prefetching for training or eval.
batch_size: Model batch size used to determine queue capacity.
values_per_shard: Approximate number of values per shard.
input_queue_capacity_factor: Minimum number of values to keep in the queue
in multiples of values_per_shard. See comments above.
num_reader_threads: Number of reader threads to fill the queue.
shard_queue_name: Name for the shards filename queue.
value_queue_name: Name for the values input queue.
Returns:
A Queue containing prefetched string values.
"""
data_files = []
for pattern in file_pattern.split(","):
data_files.extend(tf.gfile.Glob(pattern))
if not data_files:
tf.logging.fatal("Found no input files matching %s", file_pattern)
else:
tf.logging.info("Prefetching values from %d files matching %s",
len(data_files), file_pattern)
if is_training:
filename_queue = tf.train.string_input_producer(
data_files, shuffle=True, capacity=16, name=shard_queue_name)
min_queue_examples = values_per_shard * input_queue_capacity_factor
capacity = min_queue_examples + 100 * batch_size
values_queue = tf.RandomShuffleQueue(
capacity=capacity,
min_after_dequeue=min_queue_examples,
dtypes=[tf.string],
name="random_" + value_queue_name)
else:
filename_queue = tf.train.string_input_producer(
data_files, shuffle=False, capacity=1, name=shard_queue_name)
capacity = values_per_shard + 3 * batch_size
values_queue = tf.FIFOQueue(
capacity=capacity, dtypes=[tf.string], name="fifo_" + value_queue_name)
enqueue_ops = []
for _ in range(num_reader_threads):
_, value = reader.read(filename_queue)
enqueue_ops.append(values_queue.enqueue([value]))
tf.train.queue_runner.add_queue_runner(tf.train.queue_runner.QueueRunner(
values_queue, enqueue_ops))
tf.scalar_summary(
"queue/%s/fraction_of_%d_full" % (values_queue.name, capacity),
tf.cast(values_queue.size(), tf.float32) * (1. / capacity))
return values_queue
def batch_with_dynamic_pad(images_and_captions,
batch_size,
queue_capacity,
add_summaries=True):
"""Batches input images and captions.
This function splits the caption into an input sequence and a target sequence,
where the target sequence is the input sequence right-shifted by 1. Input and
target sequences are batched and padded up to the maximum length of sequences
in the batch. A mask is created to distinguish real words from padding words.
Example:
Actual captions in the batch ('-' denotes padded character):
[
[ 1 2 5 4 5 ],
[ 1 2 3 4 - ],
[ 1 2 3 - - ],
]
input_seqs:
[
[ 1 2 3 4 ],
[ 1 2 3 - ],
[ 1 2 - - ],
]
target_seqs:
[
[ 2 3 4 5 ],
[ 2 3 4 - ],
[ 2 3 - - ],
]
mask:
[
[ 1 1 1 1 ],
[ 1 1 1 0 ],
[ 1 1 0 0 ],
]
Args:
images_and_captions: A list of pairs [image, caption], where image is a
Tensor of shape [height, width, channels] and caption is a 1-D Tensor of
any length. Each pair will be processed and added to the queue in a
separate thread.
batch_size: Batch size.
queue_capacity: Queue capacity.
add_summaries: If true, add caption length summaries.
Returns:
images: A Tensor of shape [batch_size, height, width, channels].
input_seqs: An int32 Tensor of shape [batch_size, padded_length].
target_seqs: An int32 Tensor of shape [batch_size, padded_length].
mask: An int32 0/1 Tensor of shape [batch_size, padded_length].
"""
enqueue_list = []
for image, caption in images_and_captions:
caption_length = tf.shape(caption)[0]
input_length = tf.expand_dims(tf.sub(caption_length, 1), 0)
input_seq = tf.slice(caption, [0], input_length)
target_seq = tf.slice(caption, [1], input_length)
indicator = tf.ones(input_length, dtype=tf.int32)
enqueue_list.append([image, input_seq, target_seq, indicator])
images, input_seqs, target_seqs, mask = tf.train.batch_join(
enqueue_list,
batch_size=batch_size,
capacity=queue_capacity,
dynamic_pad=True,
name="batch_and_pad")
if add_summaries:
lengths = tf.add(tf.reduce_sum(mask, 1), 1)
tf.scalar_summary("caption_length/batch_min", tf.reduce_min(lengths))
tf.scalar_summary("caption_length/batch_max", tf.reduce_max(lengths))
tf.scalar_summary("caption_length/batch_mean", tf.reduce_mean(lengths))
return images, input_seqs, target_seqs, mask
|
#------------------------------------------------------------------------------
# Copyright 2013 Esri
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#------------------------------------------------------------------------------
# TestBuildElevationMosaic.py
# Description: Test Build Elevation Mosaic Toolbox
# Requirements: ArcGIS Desktop Standard
# ----------------------------------------------------------------------------
import arcpy
import sys
import traceback
import TestUtilities
import os
class LicenseError(Exception):
pass
try:
arcpy.ImportToolbox(TestUtilities.toolbox,"elevationmosaics")
arcpy.env.overwriteOutput = True
#Set tool param variables
inputElevationFolderPath = os.path.join(TestUtilities.elevSourcePath)
inputRasterType = "DTED"
#inputAspectFunctionTemplateFile = os.path.join(TestUtilities.toolboxesPath,"Raster Functions","Aspect.rft.xml")
#inputPercentSlopeFunctionTemplateFile = os.path.join(TestUtilities.toolboxesPath,"Raster Functions","PercentSlope.rft.xml")
#inputHillshadeFunctionTemplateFile = os.path.join(TestUtilities.toolboxesPath,"Raster Functions","Hillshade.rft.xml")
inputAspectFunctionTemplateFile = os.path.join(TestUtilities.toolboxesPath,"Raster Functions","10.3","AspectNumericValues.rft.xml")
inputPercentSlopeFunctionTemplateFile = os.path.join(TestUtilities.toolboxesPath,"Raster Functions","10.3","SlopePercentRise.rft.xml")
inputHillshadeFunctionTemplateFile = os.path.join(TestUtilities.toolboxesPath,"Raster Functions","10.3","GreyScaleHillshade.rft.xml")
outputDTMMosaic = "DigitalTerrainModel"
outputHillshadeMosaic = os.path.join(TestUtilities.outputGDB, "Hillshade")
outputAspectMosaic = os.path.join(TestUtilities.outputGDB,"Aspect")
outputPercentSlopeMosaic = os.path.join(TestUtilities.outputGDB,"PercentSlope")
#Testing Build Elevation Mosaics - DTED input
arcpy.AddMessage("Starting Test: Build Elevation Mosaic Tools")
arcpy.BuildElevationMosaics_elevationmosaics(TestUtilities.outputGDB,inputElevationFolderPath,inputRasterType,inputAspectFunctionTemplateFile,
inputPercentSlopeFunctionTemplateFile,inputHillshadeFunctionTemplateFile, outputDTMMosaic,
outputHillshadeMosaic,outputAspectMosaic,outputPercentSlopeMosaic)
#Verify Results
countDTMFootprints = int(arcpy.GetCount_management(os.path.join(TestUtilities.outputGDB,outputDTMMosaic)).getOutput(0))
print "DTM Footprint count: " + str(countDTMFootprints)
countSlopeFootprints = int(arcpy.GetCount_management(outputPercentSlopeMosaic).getOutput(0))
print "PercentSlope Footprint count: " + str(countSlopeFootprints)
countHillshadeFootprints = int(arcpy.GetCount_management(outputHillshadeMosaic).getOutput(0))
print "Hillshade Footprint count: " + str(countHillshadeFootprints)
countAspectFootprints = int(arcpy.GetCount_management(outputAspectMosaic).getOutput(0))
print "Aspect Footprint count: " + str(countAspectFootprints)
if (countDTMFootprints < 1) or (countSlopeFootprints < 1) or (countHillshadeFootprints < 1) or (countAspectFootprints < 1):
print "Invalid output footprint count!"
raise Exception("Test Failed")
print("Test Passed")
except LicenseError:
print "Spatial Analyst license is unavailable"
except arcpy.ExecuteError:
# Get the arcpy error messages
msgs = arcpy.GetMessages()
arcpy.AddError(msgs)
print(msgs)
# return a system error code
sys.exit(-1)
except:
# Get the traceback object
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
# Concatenate information together concerning the error into a message string
pymsg = "PYTHON ERRORS:\nTraceback info:\n" + tbinfo + "\nError Info:\n" + str(sys.exc_info()[1])
msgs = "ArcPy ERRORS:\n" + arcpy.GetMessages() + "\n"
# Return python error messages for use in script tool or Python Window
arcpy.AddError(pymsg)
arcpy.AddError(msgs)
# Print Python error messages for use in Python / Python Window
print(pymsg + "\n")
print(msgs)
# return a system error code
sys.exit(-1)
finally:
if arcpy.CheckExtension("Spatial") == "Available":
arcpy.CheckInExtension("Spatial") |
import numpy as np
import pandas as pd
from hmmlearn import hmm
from matplotlib.colors import ListedColormap
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import AdaBoostClassifier, RandomForestClassifier
from sklearn import cross_validation
from sklearn.cross_validation import KFold
from sklearn.grid_search import GridSearchCV, RandomizedSearchCV
from sklearn.metrics import confusion_matrix, accuracy_score
from sklearn.preprocessing import PolynomialFeatures
from config import (TIME_SEQUENCE_LENGTH, HMM_TRANSITION_MATRIX, HMM_EMISSION_MATRIX, HMM_START_MATRIX, N_COMPONENTS)
from utilities import (convert_to_words, print_full, get_position_stats, combine_csv, resolve_acc_gyro,
blank_filter, concat_data, update_df)
names = ['Logistic Regression', 'Nearest Neighbors', 'RBF SVM', 'Decision Tree',
'Random Forest', 'Naive Bayes', 'AdaBoost']
classifiers = [
LogisticRegression(dual=False, solver='lbfgs', multi_class='multinomial', max_iter=500),
KNeighborsClassifier(3),
SVC(gamma=2, C=1),
DecisionTreeClassifier(max_depth=5),
RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini',
max_depth=None, max_features='auto', max_leaf_nodes=None,
min_samples_leaf=8, min_samples_split=4,
min_weight_fraction_leaf=0.0, n_estimators=5000, n_jobs=-1,
oob_score=False, random_state=None, verbose=0,
warm_start=False),
GaussianNB(),
AdaBoostClassifier(base_estimator=DecisionTreeClassifier(max_depth=2),
n_estimators=100,
learning_rate=0.03)
]
# For optimal performance, set degree to 3. For speed, set to 1
polynomial_features = PolynomialFeatures(interaction_only=False, include_bias=True, degree=3)
rf = RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini',
max_depth=None, max_features='auto', max_leaf_nodes=None,
min_samples_leaf=8, min_samples_split=4,
min_weight_fraction_leaf=0.0, n_estimators=200, n_jobs=-1,
oob_score=False, random_state=None, verbose=0,
warm_start=False)
def test_classifiers(df_train):
"""check different classifier accuracy, rank features"""
y = df_train['state'].values
X = df_train.drop(['state', 'index'], axis=1)
if X.isnull().values.any() == False:
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.2)
X = polynomial_features.fit_transform(X)
else:
print "Abort: Found NaN values"
# iterate over classifiers
for name, clf in zip(names, classifiers):
clf.fit(X_train, y_train)
clf_prediction = clf.predict(X_test)
clf_scores = cross_validation.cross_val_score(clf, X, df_train.state, cv=10, scoring='accuracy')
# report on the accuracy
print '{} prediction accuracy this time: {}'.format(name, accuracy_score(y_test, clf_prediction))
print('%s general accuracy: %0.2f (+/- %0.2f)' % (name, clf_scores.mean(), clf_scores.std() * 2))
# Print the feature ranking
try:
importances = clf.feature_importances_
std = np.std([tree.feature_importances_ for tree in clf.estimators_], axis=0)
indices = np.argsort(importances)[::-1]
print "Feature importance ranking:"
for f in range(X.shape[1]):
print("%d. feature %s (%f)" % (f + 1, X.columns[indices[f]], importances[indices[f]]))
except AttributeError:
print "Feature importance not available"
def trial(df_train, test_data):
"""The trial is for running predictions on test data."""
#my_test_data = test_data.drop(['avg_stand'], axis=1)
y = df_train['state'].values
X = df_train.drop(['avg_stand', 'stand', 'state', 'index'], axis=1)
if X.isnull().values.any() == False:
#X = polynomial_features.fit_transform(X)
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.1)
else:
print "Found NaN values"
rf.fit(X_train, y_train)
#polynomial_test_data = polynomial_features.fit_transform(my_test_data)
rf_pred2 = rf.predict(test_data)
print rf_pred2
test_data['state'] = rf_pred2
final_prediction = convert_to_words(rf_pred2)
print_full(final_prediction)
get_position_stats(final_prediction)
return test_data
def trial_standup(df_train, test_data):
"""
Test 1: 1s followed by 3s
"""
y = df_train['avg_stand'].values
X = df_train.drop(['avg_stand', 'stand', 'state', 'index'], axis=1)
if X.isnull().values.any() == False:
rf = RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini',
max_depth=None, max_features='auto', max_leaf_nodes=None,
min_samples_leaf=8, min_samples_split=4,
min_weight_fraction_leaf=0.0, n_estimators=500, n_jobs=-1,
oob_score=False, random_state=None, verbose=0,
warm_start=False)
X = polynomial_features.fit_transform(X)
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.1)
else:
print "Found NaN values"
rf.fit(X_train, y_train)
p_test_data = polynomial_features.fit_transform(test_data)
rf_pred2 = rf.predict(p_test_data)
print rf_pred2
test_data['avg_stand'] = rf_pred2
final_prediction = convert_to_words(rf_pred2)
print_full(final_prediction)
get_position_stats(final_prediction)
# Now we have the estimated stand_up values, we use them to create a new feature
# in the original df
# rf_pred3 = rf_pred2.astype(int)
return test_data
def test_model(df_train):
"""check model accuracy, rank features"""
y = df_train['state'].values
X = df_train.drop(['state', 'index'], axis=1)
if X.isnull().values.any() == False:
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.2)
#X = polynomial_features.fit_transform(X)
rf.fit(X_train, y_train)
rf_prediction = rf.predict(X_test)
rf_scores = cross_validation.cross_val_score(
rf, X, df_train.state, cv=10, scoring='accuracy')
# report on the accuracy
print 'Random Forest prediction accuracy this time: {}'.format(accuracy_score(y_test, rf_prediction))
print("Random Forest general accuracy: %0.2f (+/- %0.2f)" % (rf_scores.mean(), rf_scores.std() * 2))
# Print the feature ranking
try:
importances = rf.feature_importances_
std = np.std([tree.feature_importances_ for tree in rf.estimators_], axis=0)
indices = np.argsort(importances)[::-1]
print "Feature importance ranking:"
for f in range(X.shape[1]):
print("%d. feature %s (%f)" % (f + 1, X.columns[indices[f]], importances[indices[f]]))
except AttributeError:
print "Feature importance not available"
def test_model_stand(df_train):
"""check model accuracy"""
y = df_train['avg_stand'].values
X = df_train.drop(['avg_stand', 'stand', 'state', 'index'], axis=1)
if X.isnull().values.any() == False:
rf = RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini',
max_depth=None, max_features='auto', max_leaf_nodes=None,
min_samples_leaf=8, min_samples_split=4,
min_weight_fraction_leaf=0.0, n_estimators=500, n_jobs=-1,
oob_score=False, random_state=None, verbose=0,
warm_start=False)
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.2)
rf.fit(X_train, y_train)
rf_pred = rf.predict(X_test)
rf_scores = cross_validation.cross_val_score(
rf, X, df_train.state, cv=10, scoring='accuracy')
print 'rf prediction: {}'.format(accuracy_score(y_test, rf_pred))
print("Random Forest Accuracy: %0.2f (+/- %0.2f)" % (rf_scores.mean(), rf_scores.std() * 2))
importances = rf.feature_importances_
std = np.std([tree.feature_importances_ for tree in rf.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(X.shape[1]):
print("%d. feature %s (%f)" % (f + 1, X.columns[indices[f]], importances[indices[f]]))
def hmm_comparison(df_train, df_test, label_test=False):
"""check different classifier accuracy, rank features"""
y = df_train['state'].values
X = df_train.drop(['state', 'index', 'stand', 'avg_stand'], axis=1)
if X.isnull().values.any() == False:
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.8)
rf = RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini',
max_depth=None, max_features='auto', max_leaf_nodes=None,
min_samples_leaf=8, min_samples_split=4,
min_weight_fraction_leaf=0.0, n_estimators=500, n_jobs=-1,
oob_score=False, random_state=None, verbose=0,
warm_start=False)
else:
print "Abort: Found NaN values"
rf.fit(X_train, y_train)
df_test_ready = df_test
df_test_ready = df_test_ready.drop(['state'], axis=1)
rf_prediction = rf.predict(df_test_ready)
rf_scores = cross_validation.cross_val_score(rf, X, df_train.state, cv=10, scoring='accuracy')
# report on the accuracy
print 'Random Forest prediction accuracy this time: {}'.format(accuracy_score(df_test['state'], rf_prediction))
print('Random Forest general accuracy: %0.2f (+/- %0.2f)' % (rf_scores.mean(), rf_scores.std() * 2))
# Enhance with the HMM
# Hidden Markov Model with multinomial (discrete) emissions
model = hmm.MultinomialHMM(n_components=N_COMPONENTS,
n_iter=10,
verbose=False)
model.startprob_ = HMM_START_MATRIX
model.transmat_ = HMM_TRANSITION_MATRIX
model.emissionprob_ = HMM_EMISSION_MATRIX
observations = np.array(rf_prediction)
n_samples = len(observations)
hmm_input_data = observations.reshape((n_samples, -1))
hmm_result = model.decode(hmm_input_data, algorithm='viterbi')
# NOW THE COMPARISON
print "COMPARISON"
print 'HMM final result: {}'.format(hmm_result[1])
print "RF prediction result: {}".format(rf_prediction)
print "y test values: {}".format(df_test['state'].values)
rf_comparison_accuracy = 1 - np.mean(rf_prediction != df_test['state'])
hmm_comparison_accuracy = 1 - np.mean(hmm_result[1] != df_test['state'])
print (rf_prediction != df_test['state']).sum()/float(rf_prediction.size)
print "RF accuracy: {}".format(rf_comparison_accuracy)
print "HMM accuracy: {}".format(hmm_comparison_accuracy)
return hmm_result, rf_prediction
|
import django_filters
from ...app import models
from ..utils.filters import filter_by_query_param
def filter_app_search(qs, _, value):
if value:
qs = filter_by_query_param(qs, value, ("name",))
return qs
class AppFilter(django_filters.FilterSet):
search = django_filters.CharFilter(method=filter_app_search)
is_active = django_filters.BooleanFilter()
class Meta:
model = models.App
fields = ["search", "is_active"]
|
from ansiblemetrics.ansible_metric import AnsibleMetric
class NumFileModules(AnsibleMetric):
""" This class implements the metric 'Number Of file Modules' in an Ansible script. """
def count(self):
""" Count the number of 'file' occurrences in a playbook. """
return sum(1 for task in self.tasks if task and 'file' in task) |
def features(feature_set_name):
'''Return dictionary[column_name] = transformation for the feature set.
Raise RuntimeError if not found.
feature_set_name in [act, actlog, ct, ctlog, t, tlog,
bestNN, pcaNN,
id, prices
best15{census|city|zip}]
See Features.R for original implementation.
'''
# these dictionaries define the features in each feature set and
# whether and how to convert the feature into the log domain
id_features = {'RECORDING.DATE': 'none',
'SALE.DATE': 'none',
'sale.year': 'none',
'sale.month': 'none',
'sale.day': 'none',
'apn.recoded': 'none',
'CENSUS.TRACT': 'none',
'zip5': 'none',
'PROPERTY.CITY': 'none'}
price_features = {'SALE.AMOUNT': 'none'}
predictors_assessment = {'IMPROVEMENT.VALUE.CALCULATED': 'log',
'LAND.VALUE.CALCULATED': 'log',
'fraction.improvement.value': 'none'}
predictors_census = {'avg.commute': 'none',
'census.tract.has.industry': 'none',
'census.tract.has.park': 'none',
'census.tract.has.retail': 'none',
'census.tract.has.school': 'none',
'fraction.owner.occupied': 'none',
'median.household.income': 'none'}
predictors_taxroll = {'EFFECTIVE.YEAR.BUILT': 'none',
'has.pool': 'none',
'is.new.construction': 'none',
'YEAR.BUILT': 'none',
'zip5.has.industry': 'none',
'zip5.has.park': 'none',
'zip5.has.retail': 'none',
'zip5.has.school': 'none',
'LAND.SQUARE.FOOTAGE': 'log',
'LIVING.SQUARE.FEET': 'log',
'BASEMENT.SQUARE.FEET': 'log1p',
'TOTAL.BATHS.CALCULATED': 'log1p',
'BEDROOMS': 'log1p',
'FIREPLACE.NUMBER': 'log1p',
'PARKING.SPACES': 'log1p',
'STORIES.NUMBER': 'log1p',
'TOTAL.ROOMS': 'log1p'}
def transform(predictors, use_log):
'''Return dictionary specifying how to convert to log domains.'''
result = {}
for k, v in predictors.iteritems():
result[k] = v if use_log else 'none'
return result
def pca(n):
'''Return feature set of first n pca features.'''
pca_features_all = ('median.household.income',
'land.square.footage',
'living.area',
'basement.square.feet')
pca_features_selected = pca_features_all[:n]
result = {}
for feature in pca_features_selected:
result[feature] = 'none' # no transformations
return result
def best(n):
'''Return feature set of first n best features.
Mimic pca(n), eventually.
The best features will end up in a file and will need to be read in.
For now, raise an error
'''
return NotImplementedError
pca_feature_set_names = ('pca01', 'pca02', 'pca03', 'pca04')
best_feature_set_names = ('best01', 'best02', 'best03', 'best04', 'best05',
'best06', 'best07', 'best08', 'best09', 'best10',
'best11', 'best12', 'best13', 'best14', 'best15',
'best16', 'best17', 'best18', 'best19', 'best20')
if pca_feature_set_names.count(feature_set_name) == 1:
return pca(pca_feature_set_names.index(feature_set_name) + 1)
elif best_feature_set_names.count(feature_set_name) == 1:
return best(best_feature_set_names.index(feature_set_name) + 1)
elif feature_set_name == 'act':
d = predictors_assessment # NOTE: mutates predictors_assessment
d.update(predictors_census)
d.update(predictors_taxroll)
return transform(d, use_log=False)
elif feature_set_name == 'actlog':
d = predictors_assessment
d.update(predictors_census)
d.update(predictors_taxroll)
return transform(d, use_log=True)
elif feature_set_name == 'ct':
d = predictors_census
d.update(predictors_taxroll)
return transform(d, use_log=False)
elif feature_set_name == 'ctlog':
d = predictors_census
d.update(predictors_taxroll)
return transform(d, use_log=True)
elif feature_set_name == 't':
d = predictors_taxroll
return transform(d, use_log=False)
elif feature_set_name == 'tlog':
d = predictors_taxroll
return transform(d, use_log=True)
elif feature_set_name == 'id':
return id_features
elif feature_set_name == 'prices':
return price_features
elif feature_set_name == 'best15census':
return NotImplementedError
elif feature_set_name == 'best15city':
return NotImplementedError
elif feature_set_name == 'best15zip':
return NotImplementedError
else:
raise RuntimeError('invalid feature_set_name ' + feature_set_name)
if __name__ == '__main__':
# unit test
# for now, just print
import unittest
def get(feature_set_name):
''' get features and possibly print them.'''
f = features(feature_set_name)
if False:
print(feature_set_name)
print(f)
return f
class TestFeatures(unittest.TestCase):
def setUp(self):
pass
def test_id(self):
f = get('id')
self.assertEqual(len(f), 9)
def test_prices(self):
f = get('prices')
self.assertEqual(len(f), 1)
def test_pca02(self):
f = get('pca02')
self.assertEqual(len(f), 2)
def test_best20(self):
self.failIf(False) # not yet implemented
# f = get('best20')
# self.assertEqual(len(f), 21)
def test_act(self):
f = get('act')
self.assertEqual(len(f), 27)
def test_actlog(self):
f = get('actlog')
self.assertEqual(len(f), 27)
def test_ct(self):
f = get('ct')
self.assertEqual(len(f), 24)
def test_ctlog(self):
f = get('ctlog')
self.assertEqual(len(f), 24)
def test_t(self):
f = get('t')
self.assertEqual(len(f), 17)
def test_tlog(self):
f = get('tlog')
self.assertEqual(len(f), 17)
unittest.main()
|
# Generated by Django 2.2.3 on 2019-07-18 10:29
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('services', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Story',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=255)),
('title', models.CharField(blank=True, max_length=500, null=True)),
('url', models.URLField(blank=True, max_length=2000, null=True)),
('content', models.TextField(blank=True, null=True)),
('content_type', models.CharField(blank=True, choices=[('T', 'text'), ('U', 'url'), ('I', 'image')], max_length=1, null=True)),
('start_comments', models.IntegerField(default=0)),
('comments', models.IntegerField(default=0)),
('start_score', models.IntegerField(default=0)),
('score', models.IntegerField(default=0)),
('date', models.DateField(auto_now_add=True, db_index=True)),
('status', models.CharField(choices=[('N', 'new'), ('O', 'ok'), ('E', 'error')], default='N', max_length=1)),
('top_ten', models.BooleanField(default=False)),
('nsfw', models.BooleanField(default=False)),
('description', models.CharField(blank=True, max_length=2000, null=True)),
('service', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='stories', to='services.Service')),
],
options={
'verbose_name': 'story',
'verbose_name_plural': 'stories',
'ordering': ('-score',),
'unique_together': {('service', 'code', 'date')},
},
),
]
|
"""Dusty box analysis."""
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from . import exact as exact_solution
def get_dust_properties(snap):
"""Get dust properties.
Calculate the dust-to-gas ratio and stopping times.
Parameters
----------
sim
The Simulation object.
Returns
-------
dust_fraction
The dust fraction for each dust species.
stopping_time
The stopping time for each dust species.
"""
subsnaps = [snap['gas']] + snap['dust']
density = np.array(
[subsnap['density'].to('g/cm^3').magnitude.mean() for subsnap in subsnaps]
)
c_s = np.mean(snap['sound_speed'].to('cm/s').magnitude)
ɣ = snap.properties['adiabatic_index']
s = snap.properties['grain_size'].to('cm').magnitude
rho_m = snap.properties['grain_density'].to('g/cm^3').magnitude
rho_g = density[0]
rho_d = density[1:]
drag_coeff = rho_g * rho_d * c_s / (np.sqrt(np.pi * ɣ / 8) * s * rho_m)
dust_fraction = density[1:] / np.sum(density)
stopping_time = density.sum() / drag_coeff
return dust_fraction, stopping_time
def calculate_differential_velocity(sim):
"""Calculate differential velocity.
Parameters
----------
sim
The simulation object.
Returns
-------
The velocity differential DataFrame.
"""
n_dust = sim.snaps[0].num_dust_species
# Snapshot times
time = sim.properties['time'].magnitude
# Velocity differential: simulation data
data = np.zeros((len(time), n_dust))
for idx, snap in enumerate(sim.snaps):
subsnaps = [snap['gas']] + snap['dust']
vx = np.array(
[subsnap['velocity_x'].to('cm/s').magnitude.mean() for subsnap in subsnaps]
)
data[idx, :] = vx[1:] - vx[0]
# Generate DataFrame
arrays = np.hstack([time[:, np.newaxis], data])
columns = ['time'] + [
f'differential_velocity.{idx}' for idx in range(1, n_dust + 1)
]
dataframe = pd.DataFrame(arrays, columns=columns)
return dataframe
def calculate_differential_velocity_exact(
sim, times=None, n_points=1000, backreaction=True
):
"""Calculate differential velocity exact.
Parameters
----------
sim
The simulation object.
times
Times to evaluate solution.
n_points
Default is 1000.
backreaction
True or False.
Returns
-------
The velocity differential DataFrame.
"""
n_dust = sim.snaps[0].num_dust_species
# Velocity differential: initial data
snap = sim.snaps[0]
subsnaps = [snap['gas']] + snap['dust']
vx = np.array(
[subsnap['velocity_x'].to('cm/s').magnitude.mean() for subsnap in subsnaps]
)
delta_vx_init = vx[1:] - vx[0]
# Time
if times is None:
_time = sim.properties['time'].magnitude
time = np.linspace(_time[0], _time[-1], n_points)
else:
time = times
# Velocity differential: analytical solutions
dust_fraction, stopping_time = get_dust_properties(sim.snaps[0])
exact = np.zeros((len(time), n_dust))
if backreaction:
for idxi, t in enumerate(time):
exact[idxi, :] = exact_solution.delta_vx(
t, stopping_time, dust_fraction, delta_vx_init
)
else:
for idxi, t in enumerate(time):
for idxj in range(n_dust):
exact[idxi, idxj] = exact_solution.delta_vx(
t, stopping_time[idxj], dust_fraction[idxj], delta_vx_init[idxj]
)
# Generate DataFrame
arrays = np.hstack([time[:, np.newaxis], exact])
columns = ['time'] + [
f'differential_velocity.{idx}' for idx in range(1, n_dust + 1)
]
dataframe = pd.DataFrame(arrays, columns=columns)
return dataframe
def calculate_error(sim, relative=False):
_data = calculate_differential_velocity(sim)
time = _data['time'].to_numpy()
data = [_data[col].to_numpy() for col in _data.columns if col.startswith('d')]
_exact = calculate_differential_velocity_exact(sim, times=time)
exact = [_exact[col].to_numpy() for col in _exact.columns if col.startswith('d')]
if relative:
error = np.array([np.abs((yd - ye) / ye) for yd, ye in zip(data, exact)]).T
else:
error = np.array([np.abs(yd - ye) for yd, ye in zip(data, exact)]).T
n_dust = len(data)
# Generate DataFrame
arrays = np.hstack([time[:, np.newaxis], error])
columns = ['time'] + [f'error.{idx}' for idx in range(1, n_dust + 1)]
dataframe = pd.DataFrame(arrays, columns=columns)
return dataframe
def plot_differential_velocity(data, exact1, exact2, ax):
"""Plot differential velocity.
Plot the data as circle markers, the analytical solution with back
reaction as solid lines, and the analytical solution without back
reaction as dashed lines.
Parameters
----------
data
A DataFrame with the differential velocity.
exact1
A DataFrame with the differential velocity exact solution with
backreaction.
exact2
A DataFrame with the differential velocity exact solution
without backreaction.
ax
Matplotlib Axes.
Returns
-------
ax
Matplotlib Axes.
"""
y_data = [data[col].to_numpy() for col in data.columns if col.startswith('d')]
y_exact1 = [exact1[col].to_numpy() for col in exact1.columns if col.startswith('d')]
y_exact2 = [exact2[col].to_numpy() for col in exact2.columns if col.startswith('d')]
for yd, ye1, ye2 in zip(y_data, y_exact1, y_exact2):
[line] = ax.plot(exact1['time'], ye1)
ax.plot(exact2['time'], ye2, '--', color=line.get_color(), alpha=0.33)
ax.plot(data['time'], yd, 'o', ms=4, fillstyle='none', color=line.get_color())
ax.grid(b=True)
return ax
def plot_differential_velocity_all(
data, exact1, exact2, ncols=3, figsize=(15, 8), transpose=False
):
"""Plot differential velocity for each simulation.
Parameters
----------
data
A dictionary of DataFrames with the differential velocity.
exact1
A dictionary of DataFrames with the differential velocity exact
solution with backreaction.
exact2
A dictionary of DataFrames with the differential velocity exact
solution without backreaction.
ncols
The number of columns of axes in the figure. Default is 3.
figsize
The figsize like (x, y). Default is (15, 8).
transpose
Whether to run along columns or rows. Default (False) is to run
along rows.
Returns
-------
fig
Matplotlib Figure.
"""
nrows = int(np.ceil(len(data) / ncols))
fig, axs = plt.subplots(
ncols=ncols, nrows=nrows, sharex=False, sharey=True, figsize=figsize
)
fig.subplots_adjust(hspace=0.1, wspace=0.1)
if transpose:
_axs = axs.T.flatten()
else:
_axs = axs.flatten()
for d, e1, e2, ax in zip(data.values(), exact1.values(), exact2.values(), _axs):
plot_differential_velocity(d, e1, e2, ax)
for ax in axs[-1, :]:
ax.set(xlabel='Time')
for ax in axs[:, 0]:
ax.set(ylabel='Differential velocity')
return fig
def plot_error(df, plot_type, ax):
"""Plot differential velocity error.
Parameters
----------
df
A DataFrame with the differential velocity.
plot_type
Plot type: 'linear' or 'log'.
ax
Matplotlib Axes.
Returns
-------
ax
Matplotlib Axes.
"""
x = df['time'].to_numpy()
y_error = [df[col].to_numpy() for col in df.columns if col.startswith('error')]
for y in y_error:
if plot_type == 'log':
ax.semilogy(x, y)
elif plot_type == 'linear':
ax.plot(x, y)
else:
raise ValueError
ax.grid(b=True)
return ax
def plot_error_all(
dataframes, plot_type='log', ncols=3, figsize=(15, 8), transpose=False
):
"""Plot differential velocity error for each simulation.
Parameters
----------
dataframes
A dictionary of DataFrames, one per simulation.
plot_type
Plot type: 'linear' or 'log'.
ncols
The number of columns of axes in the figure. Default is 3.
figsize
The figsize like (x, y). Default is (15, 8).
transpose
Whether to run along columns or rows. Default (False) is to run
along rows.
Returns
-------
fig
Matplotlib Figure.
"""
nrows = int(np.ceil(len(dataframes) / ncols))
fig, axs = plt.subplots(
ncols=ncols, nrows=nrows, sharex=True, sharey=True, figsize=figsize
)
fig.subplots_adjust(hspace=0.1, wspace=0.1)
if transpose:
_axs = axs.T.flatten()
else:
_axs = axs.flatten()
for df, ax in zip(dataframes.values(), _axs):
plot_error(df=df, plot_type=plot_type, ax=ax)
for ax in axs[-1, :]:
ax.set(xlabel='Time')
for ax in axs[:, 0]:
ax.set(ylabel='Differential velocity error')
return fig
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.